vallium: initial import of the vulkan frontend
authorDave Airlie <airlied@redhat.com>
Fri, 19 Jun 2020 06:40:27 +0000 (16:40 +1000)
committerDave Airlie <airlied@redhat.com>
Mon, 17 Aug 2020 04:31:47 +0000 (14:31 +1000)
This is the initial import of the vallium frontend for gallium.
This is only good enough to run the triangle and the gears demo
(wrongly) from Sascha demos.

Improvements are mostly on the llvmpipe side after this.

It contains an implementation of the Vulkan API which is mapped
onto the gallium API, and is suitable only for SOFTWARE drivers.

Command buffers are recordred into malloced memory, then later
they are played back against the gallium API. The command buffers
are mostly just Vulkan API marshalling but in some places the information is
processed before being put into the command buffer (renderpass stuff).

Execution happens on a separate "graphics" thread, againt the gallium API.

There is only a single queue which wraps a single gallium context.

Resources are allocated via the new resource/memory APIs.
Shaders are created via the context and bound/unbound in the
second thread.

(No HW for reasons - memory management, sw paths for lots of paths,
pointless CPU side queue)

v2: drop mesa_icd, drop cpp_args, drop extra flags, change meson config (Eric)
v2.1: use meson-gallium job

meson pieces:
Reviewed-by: Eric Engestrom <eric@engestrom.ch>
overall:

Acked-by: Roland Scheidegger <sroland@vmware.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/6082>

30 files changed:
.gitlab-ci.yml
meson.build
meson_options.txt
src/gallium/frontends/vallium/meson.build [new file with mode: 0644]
src/gallium/frontends/vallium/val_cmd_buffer.c [new file with mode: 0644]
src/gallium/frontends/vallium/val_conv.h [new file with mode: 0644]
src/gallium/frontends/vallium/val_descriptor_set.c [new file with mode: 0644]
src/gallium/frontends/vallium/val_device.c [new file with mode: 0644]
src/gallium/frontends/vallium/val_entrypoints_gen.py [new file with mode: 0644]
src/gallium/frontends/vallium/val_execute.c [new file with mode: 0644]
src/gallium/frontends/vallium/val_extensions.py [new file with mode: 0644]
src/gallium/frontends/vallium/val_formats.c [new file with mode: 0644]
src/gallium/frontends/vallium/val_image.c [new file with mode: 0644]
src/gallium/frontends/vallium/val_lower_input_attachments.c [new file with mode: 0644]
src/gallium/frontends/vallium/val_lower_vulkan_resource.c [new file with mode: 0644]
src/gallium/frontends/vallium/val_lower_vulkan_resource.h [new file with mode: 0644]
src/gallium/frontends/vallium/val_pass.c [new file with mode: 0644]
src/gallium/frontends/vallium/val_pipeline.c [new file with mode: 0644]
src/gallium/frontends/vallium/val_pipeline_cache.c [new file with mode: 0644]
src/gallium/frontends/vallium/val_private.h [new file with mode: 0644]
src/gallium/frontends/vallium/val_query.c [new file with mode: 0644]
src/gallium/frontends/vallium/val_util.c [new file with mode: 0644]
src/gallium/frontends/vallium/val_wsi.c [new file with mode: 0644]
src/gallium/frontends/vallium/val_wsi.h [new file with mode: 0644]
src/gallium/frontends/vallium/val_wsi_wayland.c [new file with mode: 0644]
src/gallium/frontends/vallium/val_wsi_x11.c [new file with mode: 0644]
src/gallium/meson.build
src/gallium/targets/vallium/meson.build [new file with mode: 0644]
src/gallium/targets/vallium/target.c [new file with mode: 0644]
src/gallium/targets/vallium/val_icd.py [new file with mode: 0644]

index c53a712b9b1ee04331388b795046d9962408289b..ed6dabce0baa2487ee24bad5c9751ee45eb7cccf 100644 (file)
@@ -550,6 +550,7 @@ meson-gallium:
       -D gallium-nine=true
       -D gallium-opencl=disabled
     GALLIUM_DRIVERS: "iris,nouveau,kmsro,r300,r600,freedreno,swr,swrast,svga,v3d,vc4,virgl,etnaviv,panfrost,lima,zink"
+    VULKAN_DRIVERS: swrast
     EXTRA_OPTION: >
       -D osmesa=gallium
       -D tools=all
index c76748b7162890ecbee54a1bec36935fde9caaf4..c3511c9652c5ec70c814ca67f702ad787c11a2fa 100644 (file)
@@ -243,9 +243,9 @@ _vulkan_drivers = get_option('vulkan-drivers')
 if _vulkan_drivers.contains('auto')
   if system_has_kms_drm
     if host_machine.cpu_family().startswith('x86')
-      _vulkan_drivers = ['amd', 'intel']
+      _vulkan_drivers = ['amd', 'intel', 'swrast']
     elif ['arm', 'aarch64'].contains(host_machine.cpu_family())
-      _vulkan_drivers = []
+      _vulkan_drivers = ['swrast']
     else
       error('Unknown architecture @0@. Please pass -Dvulkan-drivers to set driver options. Patches gladly accepted to fix this.'.format(
             host_machine.cpu_family()))
@@ -262,8 +262,12 @@ endif
 with_intel_vk = _vulkan_drivers.contains('intel')
 with_amd_vk = _vulkan_drivers.contains('amd')
 with_freedreno_vk = _vulkan_drivers.contains('freedreno')
+with_swrast_vk = _vulkan_drivers.contains('swrast')
 with_any_vk = _vulkan_drivers.length() != 0
 
+if with_swrast_vk and not with_gallium_softpipe
+  error('swrast vulkan requires gallium swrast')
+endif
 if with_dri_swrast and (with_gallium_softpipe or with_gallium_swr)
   error('Only one swrast provider can be built')
 endif
index 2d39d13b6adfcdefe06640f245cfcefdeb38c5cc..a0cf4abca9282cb34823b701711faa5c7c190e37 100644 (file)
@@ -166,7 +166,7 @@ option(
   'vulkan-drivers',
   type : 'array',
   value : ['auto'],
-  choices : ['auto', 'amd', 'freedreno', 'intel'],
+  choices : ['auto', 'amd', 'freedreno', 'intel', 'swrast'],
   description : 'List of vulkan drivers to build. If this is set to auto all drivers applicable to the target OS/architecture will be built'
 )
 option(
diff --git a/src/gallium/frontends/vallium/meson.build b/src/gallium/frontends/vallium/meson.build
new file mode 100644 (file)
index 0000000..f0afa89
--- /dev/null
@@ -0,0 +1,66 @@
+
+val_entrypoints = custom_target(
+  'val_entrypoints.[ch]',
+  input : ['val_entrypoints_gen.py', vk_api_xml],
+  output : ['val_entrypoints.h', 'val_entrypoints.c'],
+  command : [
+    prog_python, '@INPUT0@', '--xml', '@INPUT1@', '--outdir',
+    meson.current_build_dir()
+  ],
+  depend_files : files('val_extensions.py'),
+)
+
+val_extensions_c = custom_target(
+  'val_extensions.c',
+  input : ['val_extensions.py', vk_api_xml],
+  output : ['val_extensions.c', 'val_extensions.h'],
+  command : [
+    prog_python, '@INPUT0@', '--xml', '@INPUT1@', '--out-c', '@OUTPUT0@',
+    '--out-h', '@OUTPUT1@'
+  ],
+)
+
+libval_files = files(
+    'val_device.c',
+    'val_cmd_buffer.c',
+    'val_descriptor_set.c',
+    'val_execute.c',
+    'val_util.c',
+    'val_image.c',
+    'val_formats.c',
+    'val_lower_vulkan_resource.c',
+    'val_lower_vulkan_resource.h',
+    'val_lower_input_attachments.c',
+    'val_pass.c',
+    'val_pipeline.c',
+    'val_pipeline_cache.c',
+    'val_query.c',
+    'val_wsi.c')
+
+val_deps = []
+val_flags = []
+
+if with_platform_x11
+  val_deps += dep_xcb_dri3
+  val_flags += [
+    '-DVK_USE_PLATFORM_XCB_KHR',
+    '-DVK_USE_PLATFORM_XLIB_KHR',
+  ]
+  libval_files += files('val_wsi_x11.c')
+endif
+
+if with_platform_wayland
+  val_deps += dep_wayland_client
+  val_flags += '-DVK_USE_PLATFORM_WAYLAND_KHR'
+  libval_files += files('val_wsi_wayland.c')
+endif
+
+libvallium_st = static_library(
+  'vallium_st',
+  [libval_files, val_entrypoints, val_extensions_c ],
+  link_with : [ libvulkan_wsi ],
+  c_args : [ val_flags ],
+  gnu_symbol_visibility : 'hidden',
+  include_directories : [ inc_include, inc_src, inc_util, inc_gallium, inc_compiler, inc_gallium_aux, inc_vulkan_wsi ],
+  dependencies : [ idep_nir, idep_mesautil, idep_vulkan_util ]
+)
diff --git a/src/gallium/frontends/vallium/val_cmd_buffer.c b/src/gallium/frontends/vallium/val_cmd_buffer.c
new file mode 100644 (file)
index 0000000..fd0f8cd
--- /dev/null
@@ -0,0 +1,1419 @@
+/*
+ * Copyright © 2019 Red Hat.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "val_private.h"
+#include "pipe/p_context.h"
+
+static VkResult val_create_cmd_buffer(
+   struct val_device *                         device,
+   struct val_cmd_pool *                       pool,
+   VkCommandBufferLevel                        level,
+   VkCommandBuffer*                            pCommandBuffer)
+{
+   struct val_cmd_buffer *cmd_buffer;
+
+   cmd_buffer = vk_alloc(&pool->alloc, sizeof(*cmd_buffer), 8,
+                         VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+   if (cmd_buffer == NULL)
+      return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
+
+   vk_object_base_init(&device->vk, &cmd_buffer->base,
+                       VK_OBJECT_TYPE_COMMAND_BUFFER);
+   cmd_buffer->device = device;
+   cmd_buffer->pool = pool;
+   list_inithead(&cmd_buffer->cmds);
+   cmd_buffer->status = VAL_CMD_BUFFER_STATUS_INITIAL;
+   if (pool) {
+      list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
+   } else {
+      /* Init the pool_link so we can safefly call list_del when we destroy
+       * the command buffer
+       */
+      list_inithead(&cmd_buffer->pool_link);
+   }
+   *pCommandBuffer = val_cmd_buffer_to_handle(cmd_buffer);
+
+   return VK_SUCCESS;
+}
+
+static void
+val_cmd_buffer_free_all_cmds(struct val_cmd_buffer *cmd_buffer)
+{
+   struct val_cmd_buffer_entry *tmp, *cmd;
+   LIST_FOR_EACH_ENTRY_SAFE(cmd, tmp, &cmd_buffer->cmds, cmd_link) {
+      list_del(&cmd->cmd_link);
+      vk_free(&cmd_buffer->pool->alloc, cmd);
+   }
+}
+
+static VkResult val_reset_cmd_buffer(struct val_cmd_buffer *cmd_buffer)
+{
+   val_cmd_buffer_free_all_cmds(cmd_buffer);
+   list_inithead(&cmd_buffer->cmds);
+   cmd_buffer->status = VAL_CMD_BUFFER_STATUS_INITIAL;
+   return VK_SUCCESS;
+}
+
+VkResult val_AllocateCommandBuffers(
+   VkDevice                                    _device,
+   const VkCommandBufferAllocateInfo*          pAllocateInfo,
+   VkCommandBuffer*                            pCommandBuffers)
+{
+   VAL_FROM_HANDLE(val_device, device, _device);
+   VAL_FROM_HANDLE(val_cmd_pool, pool, pAllocateInfo->commandPool);
+
+   VkResult result = VK_SUCCESS;
+   uint32_t i;
+
+   for (i = 0; i < pAllocateInfo->commandBufferCount; i++) {
+
+      if (!list_is_empty(&pool->free_cmd_buffers)) {
+         struct val_cmd_buffer *cmd_buffer = list_first_entry(&pool->free_cmd_buffers, struct val_cmd_buffer, pool_link);
+
+         list_del(&cmd_buffer->pool_link);
+         list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
+
+         result = val_reset_cmd_buffer(cmd_buffer);
+         cmd_buffer->level = pAllocateInfo->level;
+
+         pCommandBuffers[i] = val_cmd_buffer_to_handle(cmd_buffer);
+      } else {
+         result = val_create_cmd_buffer(device, pool, pAllocateInfo->level,
+                                        &pCommandBuffers[i]);
+         if (result != VK_SUCCESS)
+            break;
+      }
+   }
+
+   if (result != VK_SUCCESS) {
+      val_FreeCommandBuffers(_device, pAllocateInfo->commandPool,
+                             i, pCommandBuffers);
+      memset(pCommandBuffers, 0,
+             sizeof(*pCommandBuffers) * pAllocateInfo->commandBufferCount);
+   }
+
+   return result;
+}
+
+static void
+val_cmd_buffer_destroy(struct val_cmd_buffer *cmd_buffer)
+{
+   val_cmd_buffer_free_all_cmds(cmd_buffer);
+   list_del(&cmd_buffer->pool_link);
+   vk_object_base_finish(&cmd_buffer->base);
+   vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
+}
+
+void val_FreeCommandBuffers(
+   VkDevice                                    device,
+   VkCommandPool                               commandPool,
+   uint32_t                                    commandBufferCount,
+   const VkCommandBuffer*                      pCommandBuffers)
+{
+   for (uint32_t i = 0; i < commandBufferCount; i++) {
+      VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, pCommandBuffers[i]);
+
+      if (cmd_buffer) {
+         if (cmd_buffer->pool) {
+            list_del(&cmd_buffer->pool_link);
+            list_addtail(&cmd_buffer->pool_link, &cmd_buffer->pool->free_cmd_buffers);
+         } else
+            val_cmd_buffer_destroy(cmd_buffer);
+      }
+   }
+}
+
+VkResult val_ResetCommandBuffer(
+   VkCommandBuffer                             commandBuffer,
+   VkCommandBufferResetFlags                   flags)
+{
+   VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
+
+   return val_reset_cmd_buffer(cmd_buffer);
+}
+
+VkResult val_BeginCommandBuffer(
+   VkCommandBuffer                             commandBuffer,
+   const VkCommandBufferBeginInfo*             pBeginInfo)
+{
+   VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
+   VkResult result;
+   if (cmd_buffer->status != VAL_CMD_BUFFER_STATUS_INITIAL) {
+      result = val_reset_cmd_buffer(cmd_buffer);
+      if (result != VK_SUCCESS)
+         return result;
+   }
+   cmd_buffer->status = VAL_CMD_BUFFER_STATUS_RECORDING;
+   return VK_SUCCESS;
+}
+
+VkResult val_EndCommandBuffer(
+   VkCommandBuffer                             commandBuffer)
+{
+   VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
+   cmd_buffer->status = VAL_CMD_BUFFER_STATUS_EXECUTABLE;
+   return VK_SUCCESS;
+}
+
+VkResult val_CreateCommandPool(
+   VkDevice                                    _device,
+   const VkCommandPoolCreateInfo*              pCreateInfo,
+   const VkAllocationCallbacks*                pAllocator,
+   VkCommandPool*                              pCmdPool)
+{
+   VAL_FROM_HANDLE(val_device, device, _device);
+   struct val_cmd_pool *pool;
+
+   pool = vk_alloc2(&device->alloc, pAllocator, sizeof(*pool), 8,
+                    VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+   if (pool == NULL)
+      return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
+
+   vk_object_base_init(&device->vk, &pool->base,
+                       VK_OBJECT_TYPE_COMMAND_POOL);
+   if (pAllocator)
+      pool->alloc = *pAllocator;
+   else
+      pool->alloc = device->alloc;
+
+   list_inithead(&pool->cmd_buffers);
+   list_inithead(&pool->free_cmd_buffers);
+
+   *pCmdPool = val_cmd_pool_to_handle(pool);
+
+   return VK_SUCCESS;
+}
+
+void val_DestroyCommandPool(
+   VkDevice                                    _device,
+   VkCommandPool                               commandPool,
+   const VkAllocationCallbacks*                pAllocator)
+{
+   VAL_FROM_HANDLE(val_device, device, _device);
+   VAL_FROM_HANDLE(val_cmd_pool, pool, commandPool);
+
+   if (!pool)
+      return;
+
+   list_for_each_entry_safe(struct val_cmd_buffer, cmd_buffer,
+                            &pool->cmd_buffers, pool_link) {
+      val_cmd_buffer_destroy(cmd_buffer);
+   }
+
+   list_for_each_entry_safe(struct val_cmd_buffer, cmd_buffer,
+                            &pool->free_cmd_buffers, pool_link) {
+      val_cmd_buffer_destroy(cmd_buffer);
+   }
+
+   vk_object_base_finish(&pool->base);
+   vk_free2(&device->alloc, pAllocator, pool);
+}
+
+VkResult val_ResetCommandPool(
+   VkDevice                                    device,
+   VkCommandPool                               commandPool,
+   VkCommandPoolResetFlags                     flags)
+{
+   VAL_FROM_HANDLE(val_cmd_pool, pool, commandPool);
+   VkResult result;
+
+   list_for_each_entry(struct val_cmd_buffer, cmd_buffer,
+                       &pool->cmd_buffers, pool_link) {
+      result = val_reset_cmd_buffer(cmd_buffer);
+      if (result != VK_SUCCESS)
+         return result;
+   }
+   return VK_SUCCESS;
+}
+
+void val_TrimCommandPool(
+   VkDevice                                    device,
+   VkCommandPool                               commandPool,
+   VkCommandPoolTrimFlags                      flags)
+{
+   VAL_FROM_HANDLE(val_cmd_pool, pool, commandPool);
+
+   if (!pool)
+      return;
+
+   list_for_each_entry_safe(struct val_cmd_buffer, cmd_buffer,
+                            &pool->free_cmd_buffers, pool_link) {
+      val_cmd_buffer_destroy(cmd_buffer);
+   }
+}
+
+static struct val_cmd_buffer_entry *cmd_buf_entry_alloc_size(struct val_cmd_buffer *cmd_buffer,
+                                                             uint32_t extra_size,
+                                                             enum val_cmds type)
+{
+   struct val_cmd_buffer_entry *cmd;
+   uint32_t cmd_size = sizeof(*cmd) + extra_size;
+   cmd = vk_alloc(&cmd_buffer->pool->alloc,
+                  cmd_size,
+                  8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+   if (!cmd)
+      return NULL;
+
+   cmd->cmd_type = type;
+   return cmd;
+}
+
+static struct val_cmd_buffer_entry *cmd_buf_entry_alloc(struct val_cmd_buffer *cmd_buffer,
+                                                        enum val_cmds type)
+{
+   return cmd_buf_entry_alloc_size(cmd_buffer, 0, type);
+}
+
+static void cmd_buf_queue(struct val_cmd_buffer *cmd_buffer,
+                          struct val_cmd_buffer_entry *cmd)
+{
+   list_addtail(&cmd->cmd_link, &cmd_buffer->cmds);
+}
+
+static void
+state_setup_attachments(struct val_attachment_state *attachments,
+                        struct val_render_pass *pass,
+                        const VkClearValue *clear_values)
+{
+   for (uint32_t i = 0; i < pass->attachment_count; ++i) {
+      struct val_render_pass_attachment *att = &pass->attachments[i];
+      VkImageAspectFlags att_aspects = vk_format_aspects(att->format);
+      VkImageAspectFlags clear_aspects = 0;
+      if (att_aspects == VK_IMAGE_ASPECT_COLOR_BIT) {
+         /* color attachment */
+         if (att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
+            clear_aspects |= VK_IMAGE_ASPECT_COLOR_BIT;
+         }
+      } else {
+         /* depthstencil attachment */
+         if ((att_aspects & VK_IMAGE_ASPECT_DEPTH_BIT) &&
+             att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
+            clear_aspects |= VK_IMAGE_ASPECT_DEPTH_BIT;
+            if ((att_aspects & VK_IMAGE_ASPECT_STENCIL_BIT) &&
+                att->stencil_load_op == VK_ATTACHMENT_LOAD_OP_DONT_CARE)
+               clear_aspects |= VK_IMAGE_ASPECT_STENCIL_BIT;
+         }
+         if ((att_aspects & VK_IMAGE_ASPECT_STENCIL_BIT) &&
+             att->stencil_load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
+            clear_aspects |= VK_IMAGE_ASPECT_STENCIL_BIT;
+         }
+      }
+      attachments[i].pending_clear_aspects = clear_aspects;
+      if (clear_values)
+         attachments[i].clear_value = clear_values[i];
+   }
+}
+
+void val_CmdBeginRenderPass(
+   VkCommandBuffer                             commandBuffer,
+   const VkRenderPassBeginInfo*                pRenderPassBegin,
+   VkSubpassContents                           contents)
+{
+   VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
+   VAL_FROM_HANDLE(val_render_pass, pass, pRenderPassBegin->renderPass);
+   VAL_FROM_HANDLE(val_framebuffer, framebuffer, pRenderPassBegin->framebuffer);
+   struct val_cmd_buffer_entry *cmd;
+   uint32_t cmd_size = pass->attachment_count * sizeof(struct val_attachment_state);
+
+   cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, VAL_CMD_BEGIN_RENDER_PASS);
+   if (!cmd)
+      return;
+
+   cmd->u.begin_render_pass.render_pass = pass;
+   cmd->u.begin_render_pass.framebuffer = framebuffer;
+   cmd->u.begin_render_pass.render_area = pRenderPassBegin->renderArea;
+
+   cmd->u.begin_render_pass.attachments = (struct val_attachment_state *)(cmd + 1);
+   state_setup_attachments(cmd->u.begin_render_pass.attachments, pass, pRenderPassBegin->pClearValues);
+
+   cmd_buf_queue(cmd_buffer, cmd);
+}
+
+void val_CmdNextSubpass(
+   VkCommandBuffer                             commandBuffer,
+   VkSubpassContents                           contents)
+{
+   VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
+   struct val_cmd_buffer_entry *cmd;
+
+   cmd = cmd_buf_entry_alloc(cmd_buffer, VAL_CMD_NEXT_SUBPASS);
+   if (!cmd)
+      return;
+
+   cmd->u.next_subpass.contents = contents;
+
+   cmd_buf_queue(cmd_buffer, cmd);
+}
+
+void val_CmdBindVertexBuffers(
+   VkCommandBuffer                             commandBuffer,
+   uint32_t                                    firstBinding,
+   uint32_t                                    bindingCount,
+   const VkBuffer*                             pBuffers,
+   const VkDeviceSize*                         pOffsets)
+{
+   VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
+   struct val_cmd_buffer_entry *cmd;
+   struct val_buffer **buffers;
+   VkDeviceSize *offsets;
+   int i;
+   uint32_t cmd_size = bindingCount * sizeof(struct val_buffer *) + bindingCount * sizeof(VkDeviceSize);
+
+   cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, VAL_CMD_BIND_VERTEX_BUFFERS);
+   if (!cmd)
+      return;
+
+   cmd->u.vertex_buffers.first = firstBinding;
+   cmd->u.vertex_buffers.binding_count = bindingCount;
+
+   buffers = (struct val_buffer **)(cmd + 1);
+   offsets = (VkDeviceSize *)(buffers + bindingCount);
+   for (i = 0; i < bindingCount; i++) {
+      buffers[i] = val_buffer_from_handle(pBuffers[i]);
+      offsets[i] = pOffsets[i];
+   }
+   cmd->u.vertex_buffers.buffers = buffers;
+   cmd->u.vertex_buffers.offsets = offsets;
+
+   cmd_buf_queue(cmd_buffer, cmd);
+}
+
+void val_CmdBindPipeline(
+   VkCommandBuffer                             commandBuffer,
+   VkPipelineBindPoint                         pipelineBindPoint,
+   VkPipeline                                  _pipeline)
+{
+   VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
+   VAL_FROM_HANDLE(val_pipeline, pipeline, _pipeline);
+   struct val_cmd_buffer_entry *cmd;
+
+   cmd = cmd_buf_entry_alloc(cmd_buffer, VAL_CMD_BIND_PIPELINE);
+   if (!cmd)
+      return;
+
+   cmd->u.pipeline.bind_point = pipelineBindPoint;
+   cmd->u.pipeline.pipeline = pipeline;
+
+   cmd_buf_queue(cmd_buffer, cmd);
+}
+
+void val_CmdBindDescriptorSets(
+   VkCommandBuffer                             commandBuffer,
+   VkPipelineBindPoint                         pipelineBindPoint,
+   VkPipelineLayout                            _layout,
+   uint32_t                                    firstSet,
+   uint32_t                                    descriptorSetCount,
+   const VkDescriptorSet*                      pDescriptorSets,
+   uint32_t                                    dynamicOffsetCount,
+   const uint32_t*                             pDynamicOffsets)
+{
+   VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
+   VAL_FROM_HANDLE(val_pipeline_layout, layout, _layout);
+   struct val_cmd_buffer_entry *cmd;
+   struct val_descriptor_set **sets;
+   uint32_t *offsets;
+   int i;
+   uint32_t cmd_size = descriptorSetCount * sizeof(struct val_descriptor_set *) + dynamicOffsetCount * sizeof(uint32_t);
+
+   cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, VAL_CMD_BIND_DESCRIPTOR_SETS);
+   if (!cmd)
+      return;
+
+   cmd->u.descriptor_sets.bind_point = pipelineBindPoint;
+   cmd->u.descriptor_sets.layout = layout;
+   cmd->u.descriptor_sets.first = firstSet;
+   cmd->u.descriptor_sets.count = descriptorSetCount;
+
+   sets = (struct val_descriptor_set **)(cmd + 1);
+   for (i = 0; i < descriptorSetCount; i++) {
+      sets[i] = val_descriptor_set_from_handle(pDescriptorSets[i]);
+   }
+   cmd->u.descriptor_sets.sets = sets;
+
+   cmd->u.descriptor_sets.dynamic_offset_count = dynamicOffsetCount;
+   offsets = (uint32_t *)(sets + descriptorSetCount);
+   for (i = 0; i < dynamicOffsetCount; i++)
+      offsets[i] = pDynamicOffsets[i];
+   cmd->u.descriptor_sets.dynamic_offsets = offsets;
+
+   cmd_buf_queue(cmd_buffer, cmd);
+}
+
+void val_CmdDraw(
+   VkCommandBuffer                             commandBuffer,
+   uint32_t                                    vertexCount,
+   uint32_t                                    instanceCount,
+   uint32_t                                    firstVertex,
+   uint32_t                                    firstInstance)
+{
+   VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
+   struct val_cmd_buffer_entry *cmd;
+
+   cmd = cmd_buf_entry_alloc(cmd_buffer, VAL_CMD_DRAW);
+   if (!cmd)
+      return;
+
+   cmd->u.draw.vertex_count = vertexCount;
+   cmd->u.draw.instance_count = instanceCount;
+   cmd->u.draw.first_vertex = firstVertex;
+   cmd->u.draw.first_instance = firstInstance;
+
+   cmd_buf_queue(cmd_buffer, cmd);
+}
+
+void val_CmdEndRenderPass(
+   VkCommandBuffer                             commandBuffer)
+{
+   VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
+   struct val_cmd_buffer_entry *cmd;
+
+   cmd = cmd_buf_entry_alloc(cmd_buffer, VAL_CMD_END_RENDER_PASS);
+   if (!cmd)
+      return;
+
+   cmd_buf_queue(cmd_buffer, cmd);
+}
+
+void val_CmdSetViewport(
+   VkCommandBuffer                             commandBuffer,
+   uint32_t                                    firstViewport,
+   uint32_t                                    viewportCount,
+   const VkViewport*                           pViewports)
+{
+   VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
+   struct val_cmd_buffer_entry *cmd;
+   int i;
+
+   cmd = cmd_buf_entry_alloc(cmd_buffer, VAL_CMD_SET_VIEWPORT);
+   if (!cmd)
+      return;
+
+   cmd->u.set_viewport.first_viewport = firstViewport;
+   cmd->u.set_viewport.viewport_count = viewportCount;
+   for (i = 0; i < viewportCount; i++)
+      cmd->u.set_viewport.viewports[i] = pViewports[i];
+
+   cmd_buf_queue(cmd_buffer, cmd);
+}
+
+void val_CmdSetScissor(
+   VkCommandBuffer                             commandBuffer,
+   uint32_t                                    firstScissor,
+   uint32_t                                    scissorCount,
+   const VkRect2D*                             pScissors)
+{
+   VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
+   struct val_cmd_buffer_entry *cmd;
+   int i;
+
+   cmd = cmd_buf_entry_alloc(cmd_buffer, VAL_CMD_SET_SCISSOR);
+   if (!cmd)
+      return;
+
+   cmd->u.set_scissor.first_scissor = firstScissor;
+   cmd->u.set_scissor.scissor_count = scissorCount;
+   for (i = 0; i < scissorCount; i++)
+      cmd->u.set_scissor.scissors[i] = pScissors[i];
+
+   cmd_buf_queue(cmd_buffer, cmd);
+}
+
+void val_CmdSetLineWidth(
+   VkCommandBuffer                             commandBuffer,
+   float                                       lineWidth)
+{
+   VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
+   struct val_cmd_buffer_entry *cmd;
+
+   cmd = cmd_buf_entry_alloc(cmd_buffer, VAL_CMD_SET_LINE_WIDTH);
+   if (!cmd)
+      return;
+
+   cmd->u.set_line_width.line_width = lineWidth;
+
+   cmd_buf_queue(cmd_buffer, cmd);
+}
+
+void val_CmdSetDepthBias(
+   VkCommandBuffer                             commandBuffer,
+   float                                       depthBiasConstantFactor,
+   float                                       depthBiasClamp,
+   float                                       depthBiasSlopeFactor)
+{
+   VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
+   struct val_cmd_buffer_entry *cmd;
+
+   cmd = cmd_buf_entry_alloc(cmd_buffer, VAL_CMD_SET_DEPTH_BIAS);
+   if (!cmd)
+      return;
+
+   cmd->u.set_depth_bias.constant_factor = depthBiasConstantFactor;
+   cmd->u.set_depth_bias.clamp = depthBiasClamp;
+   cmd->u.set_depth_bias.slope_factor = depthBiasSlopeFactor;
+
+   cmd_buf_queue(cmd_buffer, cmd);
+}
+
+void val_CmdSetBlendConstants(
+   VkCommandBuffer                             commandBuffer,
+   const float                                 blendConstants[4])
+{
+   VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
+   struct val_cmd_buffer_entry *cmd;
+
+   cmd = cmd_buf_entry_alloc(cmd_buffer, VAL_CMD_SET_BLEND_CONSTANTS);
+   if (!cmd)
+      return;
+
+   memcpy(cmd->u.set_blend_constants.blend_constants, blendConstants, 4 * sizeof(float));
+
+   cmd_buf_queue(cmd_buffer, cmd);
+}
+
+void val_CmdSetDepthBounds(
+   VkCommandBuffer                             commandBuffer,
+   float                                       minDepthBounds,
+   float                                       maxDepthBounds)
+{
+   VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
+   struct val_cmd_buffer_entry *cmd;
+
+   cmd = cmd_buf_entry_alloc(cmd_buffer, VAL_CMD_SET_DEPTH_BOUNDS);
+   if (!cmd)
+      return;
+
+   cmd->u.set_depth_bounds.min_depth = minDepthBounds;
+   cmd->u.set_depth_bounds.max_depth = maxDepthBounds;
+
+   cmd_buf_queue(cmd_buffer, cmd);
+}
+
+void val_CmdSetStencilCompareMask(
+   VkCommandBuffer                             commandBuffer,
+   VkStencilFaceFlags                          faceMask,
+   uint32_t                                    compareMask)
+{
+   VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
+   struct val_cmd_buffer_entry *cmd;
+
+   cmd = cmd_buf_entry_alloc(cmd_buffer, VAL_CMD_SET_STENCIL_COMPARE_MASK);
+   if (!cmd)
+      return;
+
+   cmd->u.stencil_vals.face_mask = faceMask;
+   cmd->u.stencil_vals.value = compareMask;
+
+   cmd_buf_queue(cmd_buffer, cmd);
+}
+
+void val_CmdSetStencilWriteMask(
+   VkCommandBuffer                             commandBuffer,
+   VkStencilFaceFlags                          faceMask,
+   uint32_t                                    writeMask)
+{
+   VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
+   struct val_cmd_buffer_entry *cmd;
+
+   cmd = cmd_buf_entry_alloc(cmd_buffer, VAL_CMD_SET_STENCIL_WRITE_MASK);
+   if (!cmd)
+      return;
+
+   cmd->u.stencil_vals.face_mask = faceMask;
+   cmd->u.stencil_vals.value = writeMask;
+
+   cmd_buf_queue(cmd_buffer, cmd);
+}
+
+
+void val_CmdSetStencilReference(
+   VkCommandBuffer                             commandBuffer,
+   VkStencilFaceFlags                          faceMask,
+   uint32_t                                    reference)
+{
+   VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
+   struct val_cmd_buffer_entry *cmd;
+
+   cmd = cmd_buf_entry_alloc(cmd_buffer, VAL_CMD_SET_STENCIL_REFERENCE);
+   if (!cmd)
+      return;
+
+   cmd->u.stencil_vals.face_mask = faceMask;
+   cmd->u.stencil_vals.value = reference;
+
+   cmd_buf_queue(cmd_buffer, cmd);
+}
+
+void val_CmdPushConstants(
+   VkCommandBuffer                             commandBuffer,
+   VkPipelineLayout                            layout,
+   VkShaderStageFlags                          stageFlags,
+   uint32_t                                    offset,
+   uint32_t                                    size,
+   const void*                                 pValues)
+{
+   VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
+   struct val_cmd_buffer_entry *cmd;
+
+   cmd = cmd_buf_entry_alloc_size(cmd_buffer, (size - 4), VAL_CMD_PUSH_CONSTANTS);
+   if (!cmd)
+      return;
+
+   cmd->u.push_constants.stage = stageFlags;
+   cmd->u.push_constants.offset = offset;
+   cmd->u.push_constants.size = size;
+   memcpy(cmd->u.push_constants.val, pValues, size);
+
+   cmd_buf_queue(cmd_buffer, cmd);
+}
+
+void val_CmdBindIndexBuffer(
+   VkCommandBuffer                             commandBuffer,
+   VkBuffer                                    _buffer,
+   VkDeviceSize                                offset,
+   VkIndexType                                 indexType)
+{
+   VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
+   VAL_FROM_HANDLE(val_buffer, buffer, _buffer);
+   struct val_cmd_buffer_entry *cmd;
+
+   cmd = cmd_buf_entry_alloc(cmd_buffer, VAL_CMD_BIND_INDEX_BUFFER);
+   if (!cmd)
+      return;
+
+   cmd->u.index_buffer.buffer = buffer;
+   cmd->u.index_buffer.offset = offset;
+   cmd->u.index_buffer.index_type = indexType;
+
+   cmd_buf_queue(cmd_buffer, cmd);
+}
+
+void val_CmdDrawIndexed(
+   VkCommandBuffer                             commandBuffer,
+   uint32_t                                    indexCount,
+   uint32_t                                    instanceCount,
+   uint32_t                                    firstIndex,
+   int32_t                                     vertexOffset,
+   uint32_t                                    firstInstance)
+{
+   VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
+   struct val_cmd_buffer_entry *cmd;
+
+   cmd = cmd_buf_entry_alloc(cmd_buffer, VAL_CMD_DRAW_INDEXED);
+   if (!cmd)
+      return;
+
+   cmd->u.draw_indexed.index_count = indexCount;
+   cmd->u.draw_indexed.instance_count = instanceCount;
+   cmd->u.draw_indexed.first_index = firstIndex;
+   cmd->u.draw_indexed.vertex_offset = vertexOffset;
+   cmd->u.draw_indexed.first_instance = firstInstance;
+
+   cmd_buf_queue(cmd_buffer, cmd);
+}
+
+void val_CmdDrawIndirect(
+   VkCommandBuffer                             commandBuffer,
+   VkBuffer                                    _buffer,
+   VkDeviceSize                                offset,
+   uint32_t                                    drawCount,
+   uint32_t                                    stride)
+{
+   VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
+   VAL_FROM_HANDLE(val_buffer, buf, _buffer);
+   struct val_cmd_buffer_entry *cmd;
+
+   cmd = cmd_buf_entry_alloc(cmd_buffer, VAL_CMD_DRAW_INDIRECT);
+   if (!cmd)
+      return;
+
+   cmd->u.draw_indirect.offset = offset;
+   cmd->u.draw_indirect.buffer = buf;
+   cmd->u.draw_indirect.draw_count = drawCount;
+   cmd->u.draw_indirect.stride = stride;
+
+   cmd_buf_queue(cmd_buffer, cmd);
+}
+
+void val_CmdDrawIndexedIndirect(
+   VkCommandBuffer                             commandBuffer,
+   VkBuffer                                    _buffer,
+   VkDeviceSize                                offset,
+   uint32_t                                    drawCount,
+   uint32_t                                    stride)
+{
+   VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
+   VAL_FROM_HANDLE(val_buffer, buf, _buffer);
+   struct val_cmd_buffer_entry *cmd;
+
+   cmd = cmd_buf_entry_alloc(cmd_buffer, VAL_CMD_DRAW_INDEXED_INDIRECT);
+   if (!cmd)
+      return;
+
+   cmd->u.draw_indirect.offset = offset;
+   cmd->u.draw_indirect.buffer = buf;
+   cmd->u.draw_indirect.draw_count = drawCount;
+   cmd->u.draw_indirect.stride = stride;
+
+   cmd_buf_queue(cmd_buffer, cmd);
+}
+
+void val_CmdDispatch(
+   VkCommandBuffer                             commandBuffer,
+   uint32_t                                    x,
+   uint32_t                                    y,
+   uint32_t                                    z)
+{
+   VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
+   struct val_cmd_buffer_entry *cmd;
+
+   cmd = cmd_buf_entry_alloc(cmd_buffer, VAL_CMD_DISPATCH);
+   if (!cmd)
+      return;
+
+   cmd->u.dispatch.x = x;
+   cmd->u.dispatch.y = y;
+   cmd->u.dispatch.z = z;
+
+   cmd_buf_queue(cmd_buffer, cmd);
+}
+
+void val_CmdDispatchIndirect(
+   VkCommandBuffer                             commandBuffer,
+   VkBuffer                                    _buffer,
+   VkDeviceSize                                offset)
+{
+   VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
+   struct val_cmd_buffer_entry *cmd;
+
+   cmd = cmd_buf_entry_alloc(cmd_buffer, VAL_CMD_DISPATCH_INDIRECT);
+   if (!cmd)
+      return;
+
+   cmd->u.dispatch_indirect.buffer = val_buffer_from_handle(_buffer);
+   cmd->u.dispatch_indirect.offset = offset;
+
+   cmd_buf_queue(cmd_buffer, cmd);
+}
+
+void val_CmdExecuteCommands(
+   VkCommandBuffer                             commandBuffer,
+   uint32_t                                    commandBufferCount,
+   const VkCommandBuffer*                      pCmdBuffers)
+{
+   VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
+   struct val_cmd_buffer_entry *cmd;
+   uint32_t cmd_size = commandBufferCount * sizeof(struct val_cmd_buffer *);
+
+   cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, VAL_CMD_EXECUTE_COMMANDS);
+   if (!cmd)
+      return;
+
+   cmd->u.execute_commands.command_buffer_count = commandBufferCount;
+   for (unsigned i = 0; i < commandBufferCount; i++)
+      cmd->u.execute_commands.cmd_buffers[i] = val_cmd_buffer_from_handle(pCmdBuffers[i]);
+
+   cmd_buf_queue(cmd_buffer, cmd);
+}
+
+void val_CmdSetEvent(VkCommandBuffer commandBuffer,
+                     VkEvent _event,
+                     VkPipelineStageFlags stageMask)
+{
+   VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
+   VAL_FROM_HANDLE(val_event, event, _event);
+   struct val_cmd_buffer_entry *cmd;
+
+   cmd = cmd_buf_entry_alloc(cmd_buffer, VAL_CMD_SET_EVENT);
+   if (!cmd)
+      return;
+
+   cmd->u.event_set.event = event;
+   cmd->u.event_set.value = true;
+   cmd->u.event_set.flush = !!(stageMask == VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT);
+
+   cmd_buf_queue(cmd_buffer, cmd);
+}
+
+void val_CmdResetEvent(VkCommandBuffer commandBuffer,
+                       VkEvent _event,
+                       VkPipelineStageFlags stageMask)
+{
+   VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
+   VAL_FROM_HANDLE(val_event, event, _event);
+   struct val_cmd_buffer_entry *cmd;
+
+   cmd = cmd_buf_entry_alloc(cmd_buffer, VAL_CMD_SET_EVENT);
+   if (!cmd)
+      return;
+
+   cmd->u.event_set.event = event;
+   cmd->u.event_set.value = false;
+   cmd->u.event_set.flush = !!(stageMask == VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT);
+
+   cmd_buf_queue(cmd_buffer, cmd);
+
+}
+
+void val_CmdWaitEvents(VkCommandBuffer commandBuffer,
+                       uint32_t eventCount,
+                       const VkEvent* pEvents,
+                       VkPipelineStageFlags srcStageMask,
+                       VkPipelineStageFlags dstStageMask,
+                       uint32_t memoryBarrierCount,
+                       const VkMemoryBarrier* pMemoryBarriers,
+                       uint32_t bufferMemoryBarrierCount,
+                       const VkBufferMemoryBarrier* pBufferMemoryBarriers,
+                       uint32_t imageMemoryBarrierCount,
+                       const VkImageMemoryBarrier* pImageMemoryBarriers)
+{
+   VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
+   struct val_cmd_buffer_entry *cmd;
+   uint32_t cmd_size = 0;
+
+   cmd_size += eventCount * sizeof(struct val_event *);
+   cmd_size += memoryBarrierCount * sizeof(VkMemoryBarrier);
+   cmd_size += bufferMemoryBarrierCount * sizeof(VkBufferMemoryBarrier);
+   cmd_size += imageMemoryBarrierCount * sizeof(VkImageMemoryBarrier);
+
+   cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, VAL_CMD_WAIT_EVENTS);
+   if (!cmd)
+      return;
+
+   cmd->u.wait_events.src_stage_mask = srcStageMask;
+   cmd->u.wait_events.dst_stage_mask = dstStageMask;
+   cmd->u.wait_events.event_count = eventCount;
+   cmd->u.wait_events.events = (struct val_event **)(cmd + 1);
+   for (unsigned i = 0; i < eventCount; i++)
+      cmd->u.wait_events.events[i] = val_event_from_handle(pEvents[i]);
+   cmd->u.wait_events.memory_barrier_count = memoryBarrierCount;
+   cmd->u.wait_events.buffer_memory_barrier_count = bufferMemoryBarrierCount;
+   cmd->u.wait_events.image_memory_barrier_count = imageMemoryBarrierCount;
+
+   /* TODO finish off this */
+   cmd_buf_queue(cmd_buffer, cmd);
+}
+
+
+void val_CmdCopyBufferToImage(
+   VkCommandBuffer                             commandBuffer,
+   VkBuffer                                    srcBuffer,
+   VkImage                                     destImage,
+   VkImageLayout                               destImageLayout,
+   uint32_t                                    regionCount,
+   const VkBufferImageCopy*                    pRegions)
+{
+   VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
+   VAL_FROM_HANDLE(val_buffer, src_buffer, srcBuffer);
+   VAL_FROM_HANDLE(val_image, dst_image, destImage);
+   struct val_cmd_buffer_entry *cmd;
+   uint32_t cmd_size = regionCount * sizeof(VkBufferImageCopy);
+
+   cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, VAL_CMD_COPY_BUFFER_TO_IMAGE);
+   if (!cmd)
+      return;
+
+   cmd->u.buffer_to_img.src = src_buffer;
+   cmd->u.buffer_to_img.dst = dst_image;
+   cmd->u.buffer_to_img.dst_layout = destImageLayout;
+   cmd->u.buffer_to_img.region_count = regionCount;
+
+   {
+      VkBufferImageCopy *regions;
+
+      regions = (VkBufferImageCopy *)(cmd + 1);
+      memcpy(regions, pRegions, regionCount * sizeof(VkBufferImageCopy));
+      cmd->u.buffer_to_img.regions = regions;
+   }
+
+   cmd_buf_queue(cmd_buffer, cmd);
+}
+
+void val_CmdCopyImageToBuffer(
+   VkCommandBuffer                             commandBuffer,
+   VkImage                                     srcImage,
+   VkImageLayout                               srcImageLayout,
+   VkBuffer                                    destBuffer,
+   uint32_t                                    regionCount,
+   const VkBufferImageCopy*                    pRegions)
+{
+   VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
+   VAL_FROM_HANDLE(val_image, src_image, srcImage);
+   VAL_FROM_HANDLE(val_buffer, dst_buffer, destBuffer);
+   struct val_cmd_buffer_entry *cmd;
+   uint32_t cmd_size = regionCount * sizeof(VkBufferImageCopy);
+
+   cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, VAL_CMD_COPY_IMAGE_TO_BUFFER);
+   if (!cmd)
+      return;
+
+   cmd->u.img_to_buffer.src = src_image;
+   cmd->u.img_to_buffer.dst = dst_buffer;
+   cmd->u.img_to_buffer.src_layout = srcImageLayout;
+   cmd->u.img_to_buffer.region_count = regionCount;
+
+   {
+      VkBufferImageCopy *regions;
+
+      regions = (VkBufferImageCopy *)(cmd + 1);
+      memcpy(regions, pRegions, regionCount * sizeof(VkBufferImageCopy));
+      cmd->u.img_to_buffer.regions = regions;
+   }
+
+   cmd_buf_queue(cmd_buffer, cmd);
+}
+
+void val_CmdCopyImage(
+   VkCommandBuffer                             commandBuffer,
+   VkImage                                     srcImage,
+   VkImageLayout                               srcImageLayout,
+   VkImage                                     destImage,
+   VkImageLayout                               destImageLayout,
+   uint32_t                                    regionCount,
+   const VkImageCopy*                          pRegions)
+{
+   VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
+   VAL_FROM_HANDLE(val_image, src_image, srcImage);
+   VAL_FROM_HANDLE(val_image, dest_image, destImage);
+   struct val_cmd_buffer_entry *cmd;
+   uint32_t cmd_size = regionCount * sizeof(VkImageCopy);
+
+   cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, VAL_CMD_COPY_IMAGE);
+   if (!cmd)
+      return;
+
+   cmd->u.copy_image.src = src_image;
+   cmd->u.copy_image.dst = dest_image;
+   cmd->u.copy_image.src_layout = srcImageLayout;
+   cmd->u.copy_image.dst_layout = destImageLayout;
+   cmd->u.copy_image.region_count = regionCount;
+
+   {
+      VkImageCopy *regions;
+
+      regions = (VkImageCopy *)(cmd + 1);
+      memcpy(regions, pRegions, regionCount * sizeof(VkImageCopy));
+      cmd->u.copy_image.regions = regions;
+   }
+
+   cmd_buf_queue(cmd_buffer, cmd);
+}
+
+
+void val_CmdCopyBuffer(
+   VkCommandBuffer                             commandBuffer,
+   VkBuffer                                    srcBuffer,
+   VkBuffer                                    destBuffer,
+   uint32_t                                    regionCount,
+   const VkBufferCopy*                         pRegions)
+{
+   VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
+   VAL_FROM_HANDLE(val_buffer, src_buffer, srcBuffer);
+   VAL_FROM_HANDLE(val_buffer, dest_buffer, destBuffer);
+   struct val_cmd_buffer_entry *cmd;
+   uint32_t cmd_size = regionCount * sizeof(VkBufferCopy);
+
+   cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, VAL_CMD_COPY_BUFFER);
+   if (!cmd)
+      return;
+
+   cmd->u.copy_buffer.src = src_buffer;
+   cmd->u.copy_buffer.dst = dest_buffer;
+   cmd->u.copy_buffer.region_count = regionCount;
+
+   {
+      VkBufferCopy *regions;
+
+      regions = (VkBufferCopy *)(cmd + 1);
+      memcpy(regions, pRegions, regionCount * sizeof(VkBufferCopy));
+      cmd->u.copy_buffer.regions = regions;
+   }
+
+   cmd_buf_queue(cmd_buffer, cmd);
+}
+
+void val_CmdBlitImage(
+   VkCommandBuffer                             commandBuffer,
+   VkImage                                     srcImage,
+   VkImageLayout                               srcImageLayout,
+   VkImage                                     destImage,
+   VkImageLayout                               destImageLayout,
+   uint32_t                                    regionCount,
+   const VkImageBlit*                          pRegions,
+   VkFilter                                    filter)
+{
+   VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
+   VAL_FROM_HANDLE(val_image, src_image, srcImage);
+   VAL_FROM_HANDLE(val_image, dest_image, destImage);
+   struct val_cmd_buffer_entry *cmd;
+   uint32_t cmd_size = regionCount * sizeof(VkImageBlit);
+
+   cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, VAL_CMD_BLIT_IMAGE);
+   if (!cmd)
+      return;
+
+   cmd->u.blit_image.src = src_image;
+   cmd->u.blit_image.dst = dest_image;
+   cmd->u.blit_image.src_layout = srcImageLayout;
+   cmd->u.blit_image.dst_layout = destImageLayout;
+   cmd->u.blit_image.filter = filter;
+   cmd->u.blit_image.region_count = regionCount;
+
+   {
+      VkImageBlit *regions;
+
+      regions = (VkImageBlit *)(cmd + 1);
+      memcpy(regions, pRegions, regionCount * sizeof(VkImageBlit));
+      cmd->u.blit_image.regions = regions;
+   }
+
+   cmd_buf_queue(cmd_buffer, cmd);
+}
+
+void val_CmdClearAttachments(
+   VkCommandBuffer                             commandBuffer,
+   uint32_t                                    attachmentCount,
+   const VkClearAttachment*                    pAttachments,
+   uint32_t                                    rectCount,
+   const VkClearRect*                          pRects)
+{
+   VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
+   struct val_cmd_buffer_entry *cmd;
+   uint32_t cmd_size = attachmentCount * sizeof(VkClearAttachment) + rectCount * sizeof(VkClearRect);
+
+   cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, VAL_CMD_CLEAR_ATTACHMENTS);
+   if (!cmd)
+      return;
+
+   cmd->u.clear_attachments.attachment_count = attachmentCount;
+   cmd->u.clear_attachments.attachments = (VkClearAttachment *)(cmd + 1);
+   for (unsigned i = 0; i < attachmentCount; i++)
+      cmd->u.clear_attachments.attachments[i] = pAttachments[i];
+   cmd->u.clear_attachments.rect_count = rectCount;
+   cmd->u.clear_attachments.rects = (VkClearRect *)(cmd->u.clear_attachments.attachments + attachmentCount);
+   for (unsigned i = 0; i < rectCount; i++)
+      cmd->u.clear_attachments.rects[i] = pRects[i];
+
+   cmd_buf_queue(cmd_buffer, cmd);
+}
+
+void val_CmdFillBuffer(
+   VkCommandBuffer                             commandBuffer,
+   VkBuffer                                    dstBuffer,
+   VkDeviceSize                                dstOffset,
+   VkDeviceSize                                fillSize,
+   uint32_t                                    data)
+{
+   VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
+   VAL_FROM_HANDLE(val_buffer, dst_buffer, dstBuffer);
+   struct val_cmd_buffer_entry *cmd;
+
+   cmd = cmd_buf_entry_alloc(cmd_buffer, VAL_CMD_FILL_BUFFER);
+   if (!cmd)
+      return;
+
+   cmd->u.fill_buffer.buffer = dst_buffer;
+   cmd->u.fill_buffer.offset = dstOffset;
+   cmd->u.fill_buffer.fill_size = fillSize;
+   cmd->u.fill_buffer.data = data;
+
+   cmd_buf_queue(cmd_buffer, cmd);
+}
+
+void val_CmdUpdateBuffer(
+   VkCommandBuffer                             commandBuffer,
+   VkBuffer                                    dstBuffer,
+   VkDeviceSize                                dstOffset,
+   VkDeviceSize                                dataSize,
+   const void*                                 pData)
+{
+   VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
+   VAL_FROM_HANDLE(val_buffer, dst_buffer, dstBuffer);
+   struct val_cmd_buffer_entry *cmd;
+
+   cmd = cmd_buf_entry_alloc_size(cmd_buffer, dataSize, VAL_CMD_UPDATE_BUFFER);
+   if (!cmd)
+      return;
+
+   cmd->u.update_buffer.buffer = dst_buffer;
+   cmd->u.update_buffer.offset = dstOffset;
+   cmd->u.update_buffer.data_size = dataSize;
+   memcpy(cmd->u.update_buffer.data, pData, dataSize);
+
+   cmd_buf_queue(cmd_buffer, cmd);
+}
+
+void val_CmdClearColorImage(
+   VkCommandBuffer                             commandBuffer,
+   VkImage                                     image_h,
+   VkImageLayout                               imageLayout,
+   const VkClearColorValue*                    pColor,
+   uint32_t                                    rangeCount,
+   const VkImageSubresourceRange*              pRanges)
+{
+   VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
+   VAL_FROM_HANDLE(val_image, image, image_h);
+   struct val_cmd_buffer_entry *cmd;
+   uint32_t cmd_size = rangeCount * sizeof(VkImageSubresourceRange);
+
+   cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, VAL_CMD_CLEAR_COLOR_IMAGE);
+   if (!cmd)
+      return;
+
+   cmd->u.clear_color_image.image = image;
+   cmd->u.clear_color_image.layout = imageLayout;
+   cmd->u.clear_color_image.clear_val = *pColor;
+   cmd->u.clear_color_image.range_count = rangeCount;
+   cmd->u.clear_color_image.ranges = (VkImageSubresourceRange *)(cmd + 1);
+   for (unsigned i = 0; i < rangeCount; i++)
+      cmd->u.clear_color_image.ranges[i] = pRanges[i];
+
+   cmd_buf_queue(cmd_buffer, cmd);
+}
+
+void val_CmdClearDepthStencilImage(
+   VkCommandBuffer                             commandBuffer,
+   VkImage                                     image_h,
+   VkImageLayout                               imageLayout,
+   const VkClearDepthStencilValue*             pDepthStencil,
+   uint32_t                                    rangeCount,
+   const VkImageSubresourceRange*              pRanges)
+{
+   VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
+   VAL_FROM_HANDLE(val_image, image, image_h);
+   struct val_cmd_buffer_entry *cmd;
+   uint32_t cmd_size = rangeCount * sizeof(VkImageSubresourceRange);
+
+   cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, VAL_CMD_CLEAR_DEPTH_STENCIL_IMAGE);
+   if (!cmd)
+      return;
+
+   cmd->u.clear_ds_image.image = image;
+   cmd->u.clear_ds_image.layout = imageLayout;
+   cmd->u.clear_ds_image.clear_val = *pDepthStencil;
+   cmd->u.clear_ds_image.range_count = rangeCount;
+   cmd->u.clear_ds_image.ranges = (VkImageSubresourceRange *)(cmd + 1);
+   for (unsigned i = 0; i < rangeCount; i++)
+      cmd->u.clear_ds_image.ranges[i] = pRanges[i];
+
+   cmd_buf_queue(cmd_buffer, cmd);
+}
+
+
+void val_CmdResolveImage(
+   VkCommandBuffer                             commandBuffer,
+   VkImage                                     srcImage,
+   VkImageLayout                               srcImageLayout,
+   VkImage                                     destImage,
+   VkImageLayout                               destImageLayout,
+   uint32_t                                    regionCount,
+   const VkImageResolve*                       regions)
+{
+   VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
+   VAL_FROM_HANDLE(val_image, src_image, srcImage);
+   VAL_FROM_HANDLE(val_image, dst_image, destImage);
+   struct val_cmd_buffer_entry *cmd;
+   uint32_t cmd_size = regionCount * sizeof(VkImageResolve);
+
+   cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, VAL_CMD_RESOLVE_IMAGE);
+   if (!cmd)
+      return;
+
+   cmd->u.resolve_image.src = src_image;
+   cmd->u.resolve_image.dst = dst_image;
+   cmd->u.resolve_image.src_layout = srcImageLayout;
+   cmd->u.resolve_image.dst_layout = destImageLayout;
+   cmd->u.resolve_image.region_count = regionCount;
+   cmd->u.resolve_image.regions = (VkImageResolve *)(cmd + 1);
+   for (unsigned i = 0; i < regionCount; i++)
+      cmd->u.resolve_image.regions[i] = regions[i];
+
+   cmd_buf_queue(cmd_buffer, cmd);
+}
+
+void val_CmdResetQueryPool(
+   VkCommandBuffer                             commandBuffer,
+   VkQueryPool                                 queryPool,
+   uint32_t                                    firstQuery,
+   uint32_t                                    queryCount)
+{
+   VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
+   VAL_FROM_HANDLE(val_query_pool, query_pool, queryPool);
+   struct val_cmd_buffer_entry *cmd;
+
+   cmd = cmd_buf_entry_alloc(cmd_buffer, VAL_CMD_RESET_QUERY_POOL);
+   if (!cmd)
+      return;
+
+   cmd->u.query.pool = query_pool;
+   cmd->u.query.query = firstQuery;
+   cmd->u.query.index = queryCount;
+
+   cmd_buf_queue(cmd_buffer, cmd);
+}
+
+void val_CmdBeginQueryIndexedEXT(
+   VkCommandBuffer                             commandBuffer,
+   VkQueryPool                                 queryPool,
+   uint32_t                                    query,
+   VkQueryControlFlags                         flags,
+   uint32_t                                    index)
+{
+   VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
+   VAL_FROM_HANDLE(val_query_pool, query_pool, queryPool);
+   struct val_cmd_buffer_entry *cmd;
+
+   cmd = cmd_buf_entry_alloc(cmd_buffer, VAL_CMD_BEGIN_QUERY);
+   if (!cmd)
+      return;
+
+   cmd->u.query.pool = query_pool;
+   cmd->u.query.query = query;
+   cmd->u.query.index = index;
+   cmd->u.query.precise = true;
+
+   cmd_buf_queue(cmd_buffer, cmd);
+}
+
+void val_CmdBeginQuery(
+   VkCommandBuffer                             commandBuffer,
+   VkQueryPool                                 queryPool,
+   uint32_t                                    query,
+   VkQueryControlFlags                         flags)
+{
+   val_CmdBeginQueryIndexedEXT(commandBuffer, queryPool, query, flags, 0);
+}
+
+void val_CmdEndQueryIndexedEXT(
+   VkCommandBuffer                             commandBuffer,
+   VkQueryPool                                 queryPool,
+   uint32_t                                    query,
+   uint32_t                                    index)
+{
+   VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
+   VAL_FROM_HANDLE(val_query_pool, query_pool, queryPool);
+   struct val_cmd_buffer_entry *cmd;
+
+   cmd = cmd_buf_entry_alloc(cmd_buffer, VAL_CMD_END_QUERY);
+   if (!cmd)
+      return;
+
+   cmd->u.query.pool = query_pool;
+   cmd->u.query.query = query;
+   cmd->u.query.index = index;
+
+   cmd_buf_queue(cmd_buffer, cmd);
+}
+
+void val_CmdEndQuery(
+   VkCommandBuffer                             commandBuffer,
+   VkQueryPool                                 queryPool,
+   uint32_t                                    query)
+{
+   val_CmdEndQueryIndexedEXT(commandBuffer, queryPool, query, 0);
+}
+
+void val_CmdWriteTimestamp(
+   VkCommandBuffer                             commandBuffer,
+   VkPipelineStageFlagBits                     pipelineStage,
+   VkQueryPool                                 queryPool,
+   uint32_t                                    query)
+{
+   VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
+   VAL_FROM_HANDLE(val_query_pool, query_pool, queryPool);
+   struct val_cmd_buffer_entry *cmd;
+
+   cmd = cmd_buf_entry_alloc(cmd_buffer, VAL_CMD_WRITE_TIMESTAMP);
+   if (!cmd)
+      return;
+
+   cmd->u.query.pool = query_pool;
+   cmd->u.query.query = query;
+   cmd->u.query.flush = !(pipelineStage == VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT);
+
+   cmd_buf_queue(cmd_buffer, cmd);
+}
+
+void val_CmdCopyQueryPoolResults(
+   VkCommandBuffer                             commandBuffer,
+   VkQueryPool                                 queryPool,
+   uint32_t                                    firstQuery,
+   uint32_t                                    queryCount,
+   VkBuffer                                    dstBuffer,
+   VkDeviceSize                                dstOffset,
+   VkDeviceSize                                stride,
+   VkQueryResultFlags                          flags)
+{
+   VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
+   VAL_FROM_HANDLE(val_query_pool, query_pool, queryPool);
+   VAL_FROM_HANDLE(val_buffer, buffer, dstBuffer);
+   struct val_cmd_buffer_entry *cmd;
+
+   cmd = cmd_buf_entry_alloc(cmd_buffer, VAL_CMD_COPY_QUERY_POOL_RESULTS);
+   if (!cmd)
+      return;
+
+   cmd->u.copy_query_pool_results.pool = query_pool;
+   cmd->u.copy_query_pool_results.first_query = firstQuery;
+   cmd->u.copy_query_pool_results.query_count = queryCount;
+   cmd->u.copy_query_pool_results.dst = buffer;
+   cmd->u.copy_query_pool_results.dst_offset = dstOffset;
+   cmd->u.copy_query_pool_results.stride = stride;
+   cmd->u.copy_query_pool_results.flags = flags;
+
+   cmd_buf_queue(cmd_buffer, cmd);
+}
+
+void val_CmdPipelineBarrier(
+   VkCommandBuffer                             commandBuffer,
+   VkPipelineStageFlags                        srcStageMask,
+   VkPipelineStageFlags                        destStageMask,
+   VkBool32                                    byRegion,
+   uint32_t                                    memoryBarrierCount,
+   const VkMemoryBarrier*                      pMemoryBarriers,
+   uint32_t                                    bufferMemoryBarrierCount,
+   const VkBufferMemoryBarrier*                pBufferMemoryBarriers,
+   uint32_t                                    imageMemoryBarrierCount,
+   const VkImageMemoryBarrier*                 pImageMemoryBarriers)
+{
+   VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
+   struct val_cmd_buffer_entry *cmd;
+   uint32_t cmd_size = 0;
+
+   cmd_size += memoryBarrierCount * sizeof(VkMemoryBarrier);
+   cmd_size += bufferMemoryBarrierCount * sizeof(VkBufferMemoryBarrier);
+   cmd_size += imageMemoryBarrierCount * sizeof(VkImageMemoryBarrier);
+
+   cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, VAL_CMD_PIPELINE_BARRIER);
+   if (!cmd)
+      return;
+
+   cmd->u.pipeline_barrier.src_stage_mask = srcStageMask;
+   cmd->u.pipeline_barrier.dst_stage_mask = destStageMask;
+   cmd->u.pipeline_barrier.by_region = byRegion;
+   cmd->u.pipeline_barrier.memory_barrier_count = memoryBarrierCount;
+   cmd->u.pipeline_barrier.buffer_memory_barrier_count = bufferMemoryBarrierCount;
+   cmd->u.pipeline_barrier.image_memory_barrier_count = imageMemoryBarrierCount;
+
+   /* TODO finish off this */
+   cmd_buf_queue(cmd_buffer, cmd);
+}
diff --git a/src/gallium/frontends/vallium/val_conv.h b/src/gallium/frontends/vallium/val_conv.h
new file mode 100644 (file)
index 0000000..5efc078
--- /dev/null
@@ -0,0 +1,198 @@
+/*
+ * Copyright © 2019 Red Hat.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#pragma once
+
+static inline unsigned vk_cull_to_pipe(uint32_t vk_cull)
+{
+   /* these correspond */
+   return vk_cull;
+}
+
+static inline unsigned vk_polygon_mode_to_pipe(uint32_t vk_poly_mode)
+{
+   /* these correspond */
+   return vk_poly_mode;
+}
+
+static inline unsigned vk_conv_stencil_op(uint32_t vk_stencil_op)
+{
+   switch (vk_stencil_op) {
+   case VK_STENCIL_OP_KEEP:
+      return PIPE_STENCIL_OP_KEEP;
+   case VK_STENCIL_OP_ZERO:
+      return PIPE_STENCIL_OP_ZERO;
+   case VK_STENCIL_OP_REPLACE:
+      return PIPE_STENCIL_OP_REPLACE;
+   case VK_STENCIL_OP_INCREMENT_AND_CLAMP:
+      return PIPE_STENCIL_OP_INCR;
+   case VK_STENCIL_OP_DECREMENT_AND_CLAMP:
+      return PIPE_STENCIL_OP_DECR;
+   case VK_STENCIL_OP_INVERT:
+      return PIPE_STENCIL_OP_INVERT;
+   case VK_STENCIL_OP_INCREMENT_AND_WRAP:
+      return PIPE_STENCIL_OP_INCR_WRAP;
+   case VK_STENCIL_OP_DECREMENT_AND_WRAP:
+      return PIPE_STENCIL_OP_DECR_WRAP;
+   default:
+      assert(0);
+      return 0;
+   }
+}
+
+static inline unsigned vk_conv_topology(VkPrimitiveTopology topology)
+{
+   switch (topology) {
+   case VK_PRIMITIVE_TOPOLOGY_POINT_LIST:
+      return PIPE_PRIM_POINTS;
+   case VK_PRIMITIVE_TOPOLOGY_LINE_LIST:
+      return PIPE_PRIM_LINES;
+   case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP:
+      return PIPE_PRIM_LINE_STRIP;
+   case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST:
+      return PIPE_PRIM_TRIANGLES;
+   case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP:
+      return PIPE_PRIM_TRIANGLE_STRIP;
+   case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN:
+      return PIPE_PRIM_TRIANGLE_FAN;
+   case VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY:
+      return PIPE_PRIM_LINES_ADJACENCY;
+   case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY:
+      return PIPE_PRIM_LINE_STRIP_ADJACENCY;
+   case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY:
+      return PIPE_PRIM_TRIANGLES_ADJACENCY;
+   case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY:
+      return PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY;
+   case VK_PRIMITIVE_TOPOLOGY_PATCH_LIST:
+      return PIPE_PRIM_PATCHES;
+   default:
+      assert(0);
+      return 0;
+   }
+}
+
+static inline unsigned vk_conv_wrap_mode(enum VkSamplerAddressMode addr_mode)
+{
+   switch (addr_mode) {
+   case VK_SAMPLER_ADDRESS_MODE_REPEAT:
+      return PIPE_TEX_WRAP_REPEAT;
+   case VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT:
+      return PIPE_TEX_WRAP_MIRROR_REPEAT;
+   case VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE:
+      return PIPE_TEX_WRAP_CLAMP_TO_EDGE;
+   case VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER:
+      return PIPE_TEX_WRAP_CLAMP_TO_BORDER;
+   case VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE:
+      return PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE;
+   default:
+      assert(0);
+      return 0;
+   }
+}
+
+static inline unsigned vk_conv_blend_factor(enum VkBlendFactor vk_factor)
+{
+   switch (vk_factor) {
+   case VK_BLEND_FACTOR_ZERO:
+      return PIPE_BLENDFACTOR_ZERO;
+   case VK_BLEND_FACTOR_ONE:
+      return PIPE_BLENDFACTOR_ONE;
+   case VK_BLEND_FACTOR_SRC_COLOR:
+      return PIPE_BLENDFACTOR_SRC_COLOR;
+   case VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR:
+      return PIPE_BLENDFACTOR_INV_SRC_COLOR;
+   case VK_BLEND_FACTOR_DST_COLOR:
+      return PIPE_BLENDFACTOR_DST_COLOR;
+   case VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR:
+      return PIPE_BLENDFACTOR_INV_DST_COLOR;
+   case VK_BLEND_FACTOR_SRC_ALPHA:
+      return PIPE_BLENDFACTOR_SRC_ALPHA;
+   case VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA:
+      return PIPE_BLENDFACTOR_INV_SRC_ALPHA;
+   case VK_BLEND_FACTOR_DST_ALPHA:
+      return PIPE_BLENDFACTOR_DST_ALPHA;
+   case VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA:
+      return PIPE_BLENDFACTOR_INV_DST_ALPHA;
+   case VK_BLEND_FACTOR_CONSTANT_COLOR:
+      return PIPE_BLENDFACTOR_CONST_COLOR;
+   case VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR:
+      return PIPE_BLENDFACTOR_INV_CONST_COLOR;
+   case VK_BLEND_FACTOR_CONSTANT_ALPHA:
+      return PIPE_BLENDFACTOR_CONST_ALPHA;
+   case VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA:
+      return PIPE_BLENDFACTOR_INV_CONST_ALPHA;
+   case VK_BLEND_FACTOR_SRC1_COLOR:
+      return PIPE_BLENDFACTOR_SRC1_COLOR;
+   case VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR:
+      return PIPE_BLENDFACTOR_INV_SRC1_COLOR;
+   case VK_BLEND_FACTOR_SRC1_ALPHA:
+      return PIPE_BLENDFACTOR_SRC1_ALPHA;
+   case VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA:
+      return PIPE_BLENDFACTOR_INV_SRC1_ALPHA;
+   case VK_BLEND_FACTOR_SRC_ALPHA_SATURATE:
+      return PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE;
+   default:
+      assert(0);
+      return 0;
+   }
+}
+
+static inline unsigned vk_conv_blend_func(enum VkBlendOp op)
+{
+   switch (op) {
+   case VK_BLEND_OP_ADD:
+      return PIPE_BLEND_ADD;
+   case VK_BLEND_OP_SUBTRACT:
+      return PIPE_BLEND_SUBTRACT;
+   case VK_BLEND_OP_REVERSE_SUBTRACT:
+      return PIPE_BLEND_REVERSE_SUBTRACT;
+   case VK_BLEND_OP_MIN:
+      return PIPE_BLEND_MIN;
+   case VK_BLEND_OP_MAX:
+      return PIPE_BLEND_MAX;
+   default:
+      assert(0);
+      return 0;
+   }
+}
+
+static inline enum pipe_swizzle vk_conv_swizzle(VkComponentSwizzle swiz)
+{
+   switch (swiz) {
+   case VK_COMPONENT_SWIZZLE_ZERO:
+      return PIPE_SWIZZLE_0;
+   case VK_COMPONENT_SWIZZLE_ONE:
+      return PIPE_SWIZZLE_1;
+   case VK_COMPONENT_SWIZZLE_R:
+      return PIPE_SWIZZLE_X;
+   case VK_COMPONENT_SWIZZLE_G:
+      return PIPE_SWIZZLE_Y;
+   case VK_COMPONENT_SWIZZLE_B:
+      return PIPE_SWIZZLE_Z;
+   case VK_COMPONENT_SWIZZLE_A:
+      return PIPE_SWIZZLE_W;
+   case VK_COMPONENT_SWIZZLE_IDENTITY:
+   default:
+      return PIPE_SWIZZLE_NONE;
+   }
+}
diff --git a/src/gallium/frontends/vallium/val_descriptor_set.c b/src/gallium/frontends/vallium/val_descriptor_set.c
new file mode 100644 (file)
index 0000000..82c1129
--- /dev/null
@@ -0,0 +1,501 @@
+/*
+ * Copyright © 2019 Red Hat.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "val_private.h"
+#include "vk_util.h"
+#include "u_math.h"
+
+VkResult val_CreateDescriptorSetLayout(
+    VkDevice                                    _device,
+    const VkDescriptorSetLayoutCreateInfo*      pCreateInfo,
+    const VkAllocationCallbacks*                pAllocator,
+    VkDescriptorSetLayout*                      pSetLayout)
+{
+   VAL_FROM_HANDLE(val_device, device, _device);
+   struct val_descriptor_set_layout *set_layout;
+
+   assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO);
+   uint32_t max_binding = 0;
+   uint32_t immutable_sampler_count = 0;
+   for (uint32_t j = 0; j < pCreateInfo->bindingCount; j++) {
+      max_binding = MAX2(max_binding, pCreateInfo->pBindings[j].binding);
+      if (pCreateInfo->pBindings[j].pImmutableSamplers)
+         immutable_sampler_count += pCreateInfo->pBindings[j].descriptorCount;
+   }
+
+   size_t size = sizeof(struct val_descriptor_set_layout) +
+                 (max_binding + 1) * sizeof(set_layout->binding[0]) +
+                 immutable_sampler_count * sizeof(struct val_sampler *);
+
+   set_layout = vk_zalloc2(&device->alloc, pAllocator, size, 8,
+                           VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+   if (!set_layout)
+      return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
+
+   vk_object_base_init(&device->vk, &set_layout->base,
+                       VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT);
+   /* We just allocate all the samplers at the end of the struct */
+   struct val_sampler **samplers =
+      (struct val_sampler **)&set_layout->binding[max_binding + 1];
+
+   set_layout->binding_count = max_binding + 1;
+   set_layout->shader_stages = 0;
+   set_layout->size = 0;
+
+   uint32_t dynamic_offset_count = 0;
+
+   for (uint32_t j = 0; j < pCreateInfo->bindingCount; j++) {
+      const VkDescriptorSetLayoutBinding *binding = &pCreateInfo->pBindings[j];
+      uint32_t b = binding->binding;
+
+      set_layout->binding[b].array_size = binding->descriptorCount;
+      set_layout->binding[b].descriptor_index = set_layout->size;
+      set_layout->binding[b].type = binding->descriptorType;
+      set_layout->binding[b].valid = true;
+      set_layout->size += binding->descriptorCount;
+
+      for (gl_shader_stage stage = MESA_SHADER_VERTEX; stage < MESA_SHADER_STAGES; stage++) {
+         set_layout->binding[b].stage[stage].const_buffer_index = -1;
+         set_layout->binding[b].stage[stage].shader_buffer_index = -1;
+         set_layout->binding[b].stage[stage].sampler_index = -1;
+         set_layout->binding[b].stage[stage].sampler_view_index = -1;
+         set_layout->binding[b].stage[stage].image_index = -1;
+      }
+
+      if (binding->descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC ||
+          binding->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
+         set_layout->binding[b].dynamic_index = dynamic_offset_count;
+         dynamic_offset_count += binding->descriptorCount;
+      }
+      switch (binding->descriptorType) {
+      case VK_DESCRIPTOR_TYPE_SAMPLER:
+      case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
+         val_foreach_stage(s, binding->stageFlags) {
+            set_layout->binding[b].stage[s].sampler_index = set_layout->stage[s].sampler_count;
+            set_layout->stage[s].sampler_count += binding->descriptorCount;
+         }
+         break;
+      default:
+         break;
+      }
+
+      switch (binding->descriptorType) {
+      case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
+      case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
+         val_foreach_stage(s, binding->stageFlags) {
+            set_layout->binding[b].stage[s].const_buffer_index = set_layout->stage[s].const_buffer_count;
+            set_layout->stage[s].const_buffer_count += binding->descriptorCount;
+         }
+        break;
+      case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
+      case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
+         val_foreach_stage(s, binding->stageFlags) {
+            set_layout->binding[b].stage[s].shader_buffer_index = set_layout->stage[s].shader_buffer_count;
+            set_layout->stage[s].shader_buffer_count += binding->descriptorCount;
+         }
+         break;
+
+      case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
+      case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
+      case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
+         val_foreach_stage(s, binding->stageFlags) {
+            set_layout->binding[b].stage[s].image_index = set_layout->stage[s].image_count;
+            set_layout->stage[s].image_count += binding->descriptorCount;
+         }
+         break;
+      case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
+      case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
+      case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
+         val_foreach_stage(s, binding->stageFlags) {
+            set_layout->binding[b].stage[s].sampler_view_index = set_layout->stage[s].sampler_view_count;
+            set_layout->stage[s].sampler_view_count += binding->descriptorCount;
+         }
+         break;
+      default:
+         break;
+      }
+
+      if (binding->pImmutableSamplers) {
+         set_layout->binding[b].immutable_samplers = samplers;
+         samplers += binding->descriptorCount;
+
+         for (uint32_t i = 0; i < binding->descriptorCount; i++)
+            set_layout->binding[b].immutable_samplers[i] =
+               val_sampler_from_handle(binding->pImmutableSamplers[i]);
+      } else {
+         set_layout->binding[b].immutable_samplers = NULL;
+      }
+
+      set_layout->shader_stages |= binding->stageFlags;
+   }
+
+   set_layout->dynamic_offset_count = dynamic_offset_count;
+
+   *pSetLayout = val_descriptor_set_layout_to_handle(set_layout);
+
+   return VK_SUCCESS;
+}
+
+void val_DestroyDescriptorSetLayout(
+    VkDevice                                    _device,
+    VkDescriptorSetLayout                       _set_layout,
+    const VkAllocationCallbacks*                pAllocator)
+{
+   VAL_FROM_HANDLE(val_device, device, _device);
+   VAL_FROM_HANDLE(val_descriptor_set_layout, set_layout, _set_layout);
+
+   if (!_set_layout)
+     return;
+   vk_object_base_finish(&set_layout->base);
+   vk_free2(&device->alloc, pAllocator, set_layout);
+}
+
+VkResult val_CreatePipelineLayout(
+    VkDevice                                    _device,
+    const VkPipelineLayoutCreateInfo*           pCreateInfo,
+    const VkAllocationCallbacks*                pAllocator,
+    VkPipelineLayout*                           pPipelineLayout)
+{
+   VAL_FROM_HANDLE(val_device, device, _device);
+   struct val_pipeline_layout *layout;
+
+   assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO);
+
+   layout = vk_alloc2(&device->alloc, pAllocator, sizeof(*layout), 8,
+                       VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+   if (layout == NULL)
+      return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
+
+   vk_object_base_init(&device->vk, &layout->base,
+                       VK_OBJECT_TYPE_PIPELINE_LAYOUT);
+   layout->num_sets = pCreateInfo->setLayoutCount;
+
+   for (uint32_t set = 0; set < pCreateInfo->setLayoutCount; set++) {
+      VAL_FROM_HANDLE(val_descriptor_set_layout, set_layout,
+                      pCreateInfo->pSetLayouts[set]);
+      layout->set[set].layout = set_layout;
+   }
+
+   layout->push_constant_size = 0;
+   for (unsigned i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
+      const VkPushConstantRange *range = pCreateInfo->pPushConstantRanges + i;
+      layout->push_constant_size = MAX2(layout->push_constant_size,
+                                        range->offset + range->size);
+   }
+   layout->push_constant_size = align(layout->push_constant_size, 16);
+   *pPipelineLayout = val_pipeline_layout_to_handle(layout);
+
+   return VK_SUCCESS;
+}
+
+void val_DestroyPipelineLayout(
+    VkDevice                                    _device,
+    VkPipelineLayout                            _pipelineLayout,
+    const VkAllocationCallbacks*                pAllocator)
+{
+   VAL_FROM_HANDLE(val_device, device, _device);
+   VAL_FROM_HANDLE(val_pipeline_layout, pipeline_layout, _pipelineLayout);
+
+   if (!_pipelineLayout)
+     return;
+   vk_object_base_finish(&pipeline_layout->base);
+   vk_free2(&device->alloc, pAllocator, pipeline_layout);
+}
+
+VkResult
+val_descriptor_set_create(struct val_device *device,
+                          const struct val_descriptor_set_layout *layout,
+                          struct val_descriptor_set **out_set)
+{
+   struct val_descriptor_set *set;
+   size_t size = sizeof(*set) + layout->size * sizeof(set->descriptors[0]);
+
+   set = vk_alloc(&device->alloc /* XXX: Use the pool */, size, 8,
+                   VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+   if (!set)
+      return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
+
+   /* A descriptor set may not be 100% filled. Clear the set so we can can
+    * later detect holes in it.
+    */
+   memset(set, 0, size);
+
+   vk_object_base_init(&device->vk, &set->base,
+                       VK_OBJECT_TYPE_DESCRIPTOR_SET);
+   set->layout = layout;
+
+   /* Go through and fill out immutable samplers if we have any */
+   struct val_descriptor *desc = set->descriptors;
+   for (uint32_t b = 0; b < layout->binding_count; b++) {
+      if (layout->binding[b].immutable_samplers) {
+         for (uint32_t i = 0; i < layout->binding[b].array_size; i++)
+            desc[i].sampler = layout->binding[b].immutable_samplers[i];
+      }
+      desc += layout->binding[b].array_size;
+   }
+
+   *out_set = set;
+
+   return VK_SUCCESS;
+}
+
+void
+val_descriptor_set_destroy(struct val_device *device,
+                           struct val_descriptor_set *set)
+{
+   vk_object_base_finish(&set->base);
+   vk_free(&device->alloc, set);
+}
+
+VkResult val_AllocateDescriptorSets(
+    VkDevice                                    _device,
+    const VkDescriptorSetAllocateInfo*          pAllocateInfo,
+    VkDescriptorSet*                            pDescriptorSets)
+{
+   VAL_FROM_HANDLE(val_device, device, _device);
+   VAL_FROM_HANDLE(val_descriptor_pool, pool, pAllocateInfo->descriptorPool);
+   VkResult result = VK_SUCCESS;
+   struct val_descriptor_set *set;
+   uint32_t i;
+
+   for (i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
+      VAL_FROM_HANDLE(val_descriptor_set_layout, layout,
+                      pAllocateInfo->pSetLayouts[i]);
+
+      result = val_descriptor_set_create(device, layout, &set);
+      if (result != VK_SUCCESS)
+         break;
+
+      list_addtail(&set->link, &pool->sets);
+      pDescriptorSets[i] = val_descriptor_set_to_handle(set);
+   }
+
+   if (result != VK_SUCCESS)
+      val_FreeDescriptorSets(_device, pAllocateInfo->descriptorPool,
+                             i, pDescriptorSets);
+
+   return result;
+}
+
+VkResult val_FreeDescriptorSets(
+    VkDevice                                    _device,
+    VkDescriptorPool                            descriptorPool,
+    uint32_t                                    count,
+    const VkDescriptorSet*                      pDescriptorSets)
+{
+   VAL_FROM_HANDLE(val_device, device, _device);
+   for (uint32_t i = 0; i < count; i++) {
+      VAL_FROM_HANDLE(val_descriptor_set, set, pDescriptorSets[i]);
+
+      if (!set)
+         continue;
+      list_del(&set->link);
+      val_descriptor_set_destroy(device, set);
+   }
+   return VK_SUCCESS;
+}
+
+void val_UpdateDescriptorSets(
+    VkDevice                                    _device,
+    uint32_t                                    descriptorWriteCount,
+    const VkWriteDescriptorSet*                 pDescriptorWrites,
+    uint32_t                                    descriptorCopyCount,
+    const VkCopyDescriptorSet*                  pDescriptorCopies)
+{
+   for (uint32_t i = 0; i < descriptorWriteCount; i++) {
+      const VkWriteDescriptorSet *write = &pDescriptorWrites[i];
+      VAL_FROM_HANDLE(val_descriptor_set, set, write->dstSet);
+      const struct val_descriptor_set_binding_layout *bind_layout =
+         &set->layout->binding[write->dstBinding];
+      struct val_descriptor *desc =
+         &set->descriptors[bind_layout->descriptor_index];
+      desc += write->dstArrayElement;
+
+      switch (write->descriptorType) {
+      case VK_DESCRIPTOR_TYPE_SAMPLER:
+         for (uint32_t j = 0; j < write->descriptorCount; j++) {
+            VAL_FROM_HANDLE(val_sampler, sampler,
+                            write->pImageInfo[j].sampler);
+
+            desc[j] = (struct val_descriptor) {
+               .type = VK_DESCRIPTOR_TYPE_SAMPLER,
+               .sampler = sampler,
+            };
+         }
+         break;
+
+      case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
+         for (uint32_t j = 0; j < write->descriptorCount; j++) {
+            VAL_FROM_HANDLE(val_image_view, iview,
+                            write->pImageInfo[j].imageView);
+            VAL_FROM_HANDLE(val_sampler, sampler,
+                            write->pImageInfo[j].sampler);
+
+            desc[j].type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
+            desc[j].image_view = iview;
+
+            /* If this descriptor has an immutable sampler, we don't want
+             * to stomp on it.
+             */
+            if (sampler)
+               desc[j].sampler = sampler;
+         }
+         break;
+
+      case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
+      case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
+      case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
+         for (uint32_t j = 0; j < write->descriptorCount; j++) {
+            VAL_FROM_HANDLE(val_image_view, iview,
+                            write->pImageInfo[j].imageView);
+
+            desc[j] = (struct val_descriptor) {
+               .type = write->descriptorType,
+               .image_view = iview,
+            };
+         }
+         break;
+
+      case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
+      case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
+         for (uint32_t j = 0; j < write->descriptorCount; j++) {
+            VAL_FROM_HANDLE(val_buffer_view, bview,
+                            write->pTexelBufferView[j]);
+
+            desc[j] = (struct val_descriptor) {
+               .type = write->descriptorType,
+               .buffer_view = bview,
+            };
+         }
+         break;
+
+      case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
+      case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
+      case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
+      case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
+         for (uint32_t j = 0; j < write->descriptorCount; j++) {
+            assert(write->pBufferInfo[j].buffer);
+            VAL_FROM_HANDLE(val_buffer, buffer, write->pBufferInfo[j].buffer);
+            assert(buffer);
+            desc[j] = (struct val_descriptor) {
+               .type = write->descriptorType,
+               .buf.offset = write->pBufferInfo[j].offset,
+               .buf.buffer = buffer,
+               .buf.range =  write->pBufferInfo[j].range,
+            };
+
+         }
+
+      default:
+         break;
+      }
+   }
+
+   for (uint32_t i = 0; i < descriptorCopyCount; i++) {
+      const VkCopyDescriptorSet *copy = &pDescriptorCopies[i];
+      VAL_FROM_HANDLE(val_descriptor_set, src, copy->srcSet);
+      VAL_FROM_HANDLE(val_descriptor_set, dst, copy->dstSet);
+
+      const struct val_descriptor_set_binding_layout *src_layout =
+         &src->layout->binding[copy->srcBinding];
+      struct val_descriptor *src_desc =
+         &src->descriptors[src_layout->descriptor_index];
+      src_desc += copy->srcArrayElement;
+
+      const struct val_descriptor_set_binding_layout *dst_layout =
+         &dst->layout->binding[copy->dstBinding];
+      struct val_descriptor *dst_desc =
+         &dst->descriptors[dst_layout->descriptor_index];
+      dst_desc += copy->dstArrayElement;
+
+      for (uint32_t j = 0; j < copy->descriptorCount; j++)
+         dst_desc[j] = src_desc[j];
+   }
+}
+
+VkResult val_CreateDescriptorPool(
+    VkDevice                                    _device,
+    const VkDescriptorPoolCreateInfo*           pCreateInfo,
+    const VkAllocationCallbacks*                pAllocator,
+    VkDescriptorPool*                           pDescriptorPool)
+{
+   VAL_FROM_HANDLE(val_device, device, _device);
+   struct val_descriptor_pool *pool;
+   size_t size = sizeof(struct val_descriptor_pool);
+   pool = vk_zalloc2(&device->alloc, pAllocator, size, 8,
+                     VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+   if (!pool)
+      return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
+
+   vk_object_base_init(&device->vk, &pool->base,
+                       VK_OBJECT_TYPE_DESCRIPTOR_POOL);
+   pool->flags = pCreateInfo->flags;
+   list_inithead(&pool->sets);
+   *pDescriptorPool = val_descriptor_pool_to_handle(pool);
+   return VK_SUCCESS;
+}
+
+static void val_reset_descriptor_pool(struct val_device *device,
+                                      struct val_descriptor_pool *pool)
+{
+   struct val_descriptor_set *set, *tmp;
+   LIST_FOR_EACH_ENTRY_SAFE(set, tmp, &pool->sets, link) {
+      list_del(&set->link);
+      vk_free(&device->alloc, set);
+   }
+}
+
+void val_DestroyDescriptorPool(
+    VkDevice                                    _device,
+    VkDescriptorPool                            _pool,
+    const VkAllocationCallbacks*                pAllocator)
+{
+   VAL_FROM_HANDLE(val_device, device, _device);
+   VAL_FROM_HANDLE(val_descriptor_pool, pool, _pool);
+
+   if (!_pool)
+      return;
+
+   val_reset_descriptor_pool(device, pool);
+   vk_object_base_finish(&pool->base);
+   vk_free2(&device->alloc, pAllocator, pool);
+}
+
+VkResult val_ResetDescriptorPool(
+    VkDevice                                    _device,
+    VkDescriptorPool                            _pool,
+    VkDescriptorPoolResetFlags                  flags)
+{
+   VAL_FROM_HANDLE(val_device, device, _device);
+   VAL_FROM_HANDLE(val_descriptor_pool, pool, _pool);
+
+   val_reset_descriptor_pool(device, pool);
+   return VK_SUCCESS;
+}
+
+void val_GetDescriptorSetLayoutSupport(VkDevice device,
+                                       const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
+                                       VkDescriptorSetLayoutSupport* pSupport)
+{
+
+}
diff --git a/src/gallium/frontends/vallium/val_device.c b/src/gallium/frontends/vallium/val_device.c
new file mode 100644 (file)
index 0000000..b04a369
--- /dev/null
@@ -0,0 +1,1702 @@
+/*
+ * Copyright © 2019 Red Hat.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "val_private.h"
+
+#include "pipe-loader/pipe_loader.h"
+#include "git_sha1.h"
+#include "vk_util.h"
+#include "pipe/p_state.h"
+#include "pipe/p_context.h"
+#include "frontend/drisw_api.h"
+
+#include "compiler/glsl_types.h"
+#include "util/u_inlines.h"
+#include "util/os_memory.h"
+#include "util/u_thread.h"
+#include "util/u_atomic.h"
+#include "util/timespec.h"
+
+static VkResult
+val_physical_device_init(struct val_physical_device *device,
+                         struct val_instance *instance,
+                         struct pipe_loader_device *pld)
+{
+   VkResult result;
+   device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
+   device->instance = instance;
+   device->pld = pld;
+
+   device->pscreen = pipe_loader_create_screen(device->pld);
+   if (!device->pscreen)
+      return vk_error(instance, VK_ERROR_OUT_OF_HOST_MEMORY);
+
+   fprintf(stderr, "WARNING: vallium/llvmpipe is not a conformant vulkan implementation, testing use only.\n");
+
+   device->max_images = device->pscreen->get_shader_param(device->pscreen, PIPE_SHADER_FRAGMENT, PIPE_SHADER_CAP_MAX_SHADER_IMAGES);
+   val_physical_device_get_supported_extensions(device, &device->supported_extensions);
+   result = val_init_wsi(device);
+   if (result != VK_SUCCESS) {
+      vk_error(instance, result);
+      goto fail;
+   }
+
+   return VK_SUCCESS;
+ fail:
+   return result;
+}
+
+static void
+val_physical_device_finish(struct val_physical_device *device)
+{
+   val_finish_wsi(device);
+   device->pscreen->destroy(device->pscreen);
+}
+
+static void *
+default_alloc_func(void *pUserData, size_t size, size_t align,
+                   VkSystemAllocationScope allocationScope)
+{
+   return os_malloc_aligned(size, align);
+}
+
+static void *
+default_realloc_func(void *pUserData, void *pOriginal, size_t size,
+                     size_t align, VkSystemAllocationScope allocationScope)
+{
+   return realloc(pOriginal, size);
+}
+
+static void
+default_free_func(void *pUserData, void *pMemory)
+{
+   os_free_aligned(pMemory);
+}
+
+static const VkAllocationCallbacks default_alloc = {
+   .pUserData = NULL,
+   .pfnAllocation = default_alloc_func,
+   .pfnReallocation = default_realloc_func,
+   .pfnFree = default_free_func,
+};
+
+VkResult val_CreateInstance(
+   const VkInstanceCreateInfo*                 pCreateInfo,
+   const VkAllocationCallbacks*                pAllocator,
+   VkInstance*                                 pInstance)
+{
+   struct val_instance *instance;
+
+   assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
+
+   uint32_t client_version;
+   if (pCreateInfo->pApplicationInfo &&
+       pCreateInfo->pApplicationInfo->apiVersion != 0) {
+      client_version = pCreateInfo->pApplicationInfo->apiVersion;
+   } else {
+      client_version = VK_API_VERSION_1_0;
+   }
+
+   instance = vk_zalloc2(&default_alloc, pAllocator, sizeof(*instance), 8,
+                         VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
+   if (!instance)
+      return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
+
+   vk_object_base_init(NULL, &instance->base, VK_OBJECT_TYPE_INSTANCE);
+
+   if (pAllocator)
+      instance->alloc = *pAllocator;
+   else
+      instance->alloc = default_alloc;
+
+   instance->apiVersion = client_version;
+   instance->physicalDeviceCount = -1;
+
+   for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
+      int idx;
+      for (idx = 0; idx < VAL_INSTANCE_EXTENSION_COUNT; idx++) {
+         if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i],
+                     val_instance_extensions[idx].extensionName))
+            break;
+      }
+
+      if (idx >= VAL_INSTANCE_EXTENSION_COUNT ||
+          !val_instance_extensions_supported.extensions[idx]) {
+         vk_free2(&default_alloc, pAllocator, instance);
+         return vk_error(instance, VK_ERROR_EXTENSION_NOT_PRESENT);
+      }
+      instance->enabled_extensions.extensions[idx] = true;
+   }
+
+   bool unchecked = instance->debug_flags & VAL_DEBUG_ALL_ENTRYPOINTS;
+   for (unsigned i = 0; i < ARRAY_SIZE(instance->dispatch.entrypoints); i++) {
+      /* Vulkan requires that entrypoints for extensions which have
+       * not been enabled must not be advertised.
+       */
+      if (!unchecked &&
+          !val_instance_entrypoint_is_enabled(i, instance->apiVersion,
+                                              &instance->enabled_extensions)) {
+         instance->dispatch.entrypoints[i] = NULL;
+      } else {
+         instance->dispatch.entrypoints[i] =
+            val_instance_dispatch_table.entrypoints[i];
+      }
+   }
+
+   for (unsigned i = 0; i < ARRAY_SIZE(instance->physical_device_dispatch.entrypoints); i++) {
+      /* Vulkan requires that entrypoints for extensions which have
+       * not been enabled must not be advertised.
+       */
+      if (!unchecked &&
+          !val_physical_device_entrypoint_is_enabled(i, instance->apiVersion,
+                                                     &instance->enabled_extensions)) {
+         instance->physical_device_dispatch.entrypoints[i] = NULL;
+      } else {
+         instance->physical_device_dispatch.entrypoints[i] =
+            val_physical_device_dispatch_table.entrypoints[i];
+      }
+   }
+
+   for (unsigned i = 0; i < ARRAY_SIZE(instance->device_dispatch.entrypoints); i++) {
+      /* Vulkan requires that entrypoints for extensions which have
+       * not been enabled must not be advertised.
+       */
+      if (!unchecked &&
+          !val_device_entrypoint_is_enabled(i, instance->apiVersion,
+                                            &instance->enabled_extensions, NULL)) {
+         instance->device_dispatch.entrypoints[i] = NULL;
+      } else {
+         instance->device_dispatch.entrypoints[i] =
+            val_device_dispatch_table.entrypoints[i];
+      }
+   }
+
+   //   _mesa_locale_init();
+   glsl_type_singleton_init_or_ref();
+   //   VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
+
+   *pInstance = val_instance_to_handle(instance);
+
+   return VK_SUCCESS;
+}
+
+void val_DestroyInstance(
+   VkInstance                                  _instance,
+   const VkAllocationCallbacks*                pAllocator)
+{
+   VAL_FROM_HANDLE(val_instance, instance, _instance);
+
+   if (!instance)
+      return;
+   glsl_type_singleton_decref();
+   if (instance->physicalDeviceCount > 0)
+      val_physical_device_finish(&instance->physicalDevice);
+   //   _mesa_locale_fini();
+
+   pipe_loader_release(&instance->devs, instance->num_devices);
+
+   vk_object_base_finish(&instance->base);
+   vk_free(&instance->alloc, instance);
+}
+
+static void val_get_image(struct dri_drawable *dri_drawable,
+                          int x, int y, unsigned width, unsigned height, unsigned stride,
+                          void *data)
+{
+
+}
+
+static void val_put_image(struct dri_drawable *dri_drawable,
+                          void *data, unsigned width, unsigned height)
+{
+   fprintf(stderr, "put image %dx%d\n", width, height);
+}
+
+static void val_put_image2(struct dri_drawable *dri_drawable,
+                           void *data, int x, int y, unsigned width, unsigned height,
+                           unsigned stride)
+{
+   fprintf(stderr, "put image 2 %d,%d %dx%d\n", x, y, width, height);
+}
+
+static struct drisw_loader_funcs val_sw_lf = {
+   .get_image = val_get_image,
+   .put_image = val_put_image,
+   .put_image2 = val_put_image2,
+};
+
+VkResult val_EnumeratePhysicalDevices(
+   VkInstance                                  _instance,
+   uint32_t*                                   pPhysicalDeviceCount,
+   VkPhysicalDevice*                           pPhysicalDevices)
+{
+   VAL_FROM_HANDLE(val_instance, instance, _instance);
+   VkResult result;
+
+   if (instance->physicalDeviceCount < 0) {
+
+      /* sw only for now */
+      instance->num_devices = pipe_loader_sw_probe(NULL, 0);
+
+      assert(instance->num_devices == 1);
+
+      pipe_loader_sw_probe_dri(&instance->devs, &val_sw_lf);
+
+
+      result = val_physical_device_init(&instance->physicalDevice,
+                                        instance, &instance->devs[0]);
+      if (result == VK_ERROR_INCOMPATIBLE_DRIVER) {
+         instance->physicalDeviceCount = 0;
+      } else if (result == VK_SUCCESS) {
+         instance->physicalDeviceCount = 1;
+      } else {
+         return result;
+      }
+   }
+
+   if (!pPhysicalDevices) {
+      *pPhysicalDeviceCount = instance->physicalDeviceCount;
+   } else if (*pPhysicalDeviceCount >= 1) {
+      pPhysicalDevices[0] = val_physical_device_to_handle(&instance->physicalDevice);
+      *pPhysicalDeviceCount = 1;
+   } else {
+      *pPhysicalDeviceCount = 0;
+   }
+
+   return VK_SUCCESS;
+}
+
+void val_GetPhysicalDeviceFeatures(
+   VkPhysicalDevice                            physicalDevice,
+   VkPhysicalDeviceFeatures*                   pFeatures)
+{
+   VAL_FROM_HANDLE(val_physical_device, pdevice, physicalDevice);
+   bool indirect = false;//pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_GLSL_FEATURE_LEVEL) >= 400;
+   memset(pFeatures, 0, sizeof(*pFeatures));
+   *pFeatures = (VkPhysicalDeviceFeatures) {
+      .robustBufferAccess                       = true,
+      .fullDrawIndexUint32                      = true,
+      .imageCubeArray                           = (pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_CUBE_MAP_ARRAY) != 0),
+      .independentBlend                         = true,
+      .geometryShader                           = (pdevice->pscreen->get_shader_param(pdevice->pscreen, PIPE_SHADER_GEOMETRY, PIPE_SHADER_CAP_MAX_INSTRUCTIONS) != 0),
+      .tessellationShader                       = (pdevice->pscreen->get_shader_param(pdevice->pscreen, PIPE_SHADER_TESS_EVAL, PIPE_SHADER_CAP_MAX_INSTRUCTIONS) != 0),
+      .sampleRateShading                        = (pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_SAMPLE_SHADING) != 0),
+      .dualSrcBlend                             = (pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_MAX_DUAL_SOURCE_RENDER_TARGETS) != 0),
+      .logicOp                                  = true,
+      .multiDrawIndirect                        = (pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_MULTI_DRAW_INDIRECT) != 0),
+      .drawIndirectFirstInstance                = true,
+      .depthClamp                               = (pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_DEPTH_CLIP_DISABLE) != 0),
+      .depthBiasClamp                           = true,
+      .fillModeNonSolid                         = true,
+      .depthBounds                              = (pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_DEPTH_BOUNDS_TEST) != 0),
+      .wideLines                                = false,
+      .largePoints                              = true,
+      .alphaToOne                               = false,
+      .multiViewport                            = true,
+      .samplerAnisotropy                        = false, /* FINISHME */
+      .textureCompressionETC2                   = false,
+      .textureCompressionASTC_LDR               = false,
+      .textureCompressionBC                     = true,
+      .occlusionQueryPrecise                    = true,
+      .pipelineStatisticsQuery                  = false,
+      .vertexPipelineStoresAndAtomics           = (pdevice->pscreen->get_shader_param(pdevice->pscreen, PIPE_SHADER_VERTEX, PIPE_SHADER_CAP_MAX_SHADER_BUFFERS) != 0),
+      .fragmentStoresAndAtomics                 = (pdevice->pscreen->get_shader_param(pdevice->pscreen, PIPE_SHADER_FRAGMENT, PIPE_SHADER_CAP_MAX_SHADER_BUFFERS) != 0),
+      .shaderTessellationAndGeometryPointSize   = true,
+      .shaderImageGatherExtended                = true,
+      .shaderStorageImageExtendedFormats        = false,
+      .shaderStorageImageMultisample            = (pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_TEXTURE_MULTISAMPLE) != 0),
+      .shaderUniformBufferArrayDynamicIndexing  = indirect,
+      .shaderSampledImageArrayDynamicIndexing   = indirect,
+      .shaderStorageBufferArrayDynamicIndexing  = indirect,
+      .shaderStorageImageArrayDynamicIndexing   = indirect,
+      .shaderStorageImageReadWithoutFormat      = false,
+      .shaderStorageImageWriteWithoutFormat     = true,
+      .shaderClipDistance                       = true,
+      .shaderCullDistance                       = (pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_CULL_DISTANCE) == 1),
+      .shaderFloat64                            = (pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_DOUBLES) == 1),
+      .shaderInt64                              = (pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_INT64) == 1),
+      .shaderInt16                              = true,
+      .alphaToOne                               = true,
+      .variableMultisampleRate                  = false,
+      .inheritedQueries                         = false,
+   };
+}
+
+void val_GetPhysicalDeviceFeatures2(
+   VkPhysicalDevice                            physicalDevice,
+   VkPhysicalDeviceFeatures2                  *pFeatures)
+{
+   val_GetPhysicalDeviceFeatures(physicalDevice, &pFeatures->features);
+
+   vk_foreach_struct(ext, pFeatures->pNext) {
+      switch (ext->sType) {
+      case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES: {
+         VkPhysicalDeviceVariablePointersFeatures *features = (void *)ext;
+         features->variablePointers = true;
+         features->variablePointersStorageBuffer = true;
+         break;
+      }
+      case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES: {
+         VkPhysicalDevice16BitStorageFeatures *features =
+            (VkPhysicalDevice16BitStorageFeatures*)ext;
+         features->storageBuffer16BitAccess = true;
+         features->uniformAndStorageBuffer16BitAccess = true;
+         features->storagePushConstant16 = true;
+         features->storageInputOutput16 = false;
+         break;
+      }
+      default:
+         break;
+      }
+   }
+}
+
+void
+val_device_get_cache_uuid(void *uuid)
+{
+   memset(uuid, 0, VK_UUID_SIZE);
+   snprintf(uuid, VK_UUID_SIZE, "val-%s", MESA_GIT_SHA1 + 4);
+}
+
+void val_GetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice,
+                                     VkPhysicalDeviceProperties *pProperties)
+{
+   VAL_FROM_HANDLE(val_physical_device, pdevice, physicalDevice);
+
+   VkSampleCountFlags sample_counts = VK_SAMPLE_COUNT_1_BIT | VK_SAMPLE_COUNT_4_BIT;
+
+   uint64_t grid_size[3], block_size[3];
+   uint64_t max_threads_per_block, max_local_size;
+
+   pdevice->pscreen->get_compute_param(pdevice->pscreen, PIPE_SHADER_IR_NIR,
+                                       PIPE_COMPUTE_CAP_MAX_GRID_SIZE, grid_size);
+   pdevice->pscreen->get_compute_param(pdevice->pscreen, PIPE_SHADER_IR_NIR,
+                                       PIPE_COMPUTE_CAP_MAX_BLOCK_SIZE, block_size);
+   pdevice->pscreen->get_compute_param(pdevice->pscreen, PIPE_SHADER_IR_NIR,
+                                       PIPE_COMPUTE_CAP_MAX_THREADS_PER_BLOCK,
+                                       &max_threads_per_block);
+   pdevice->pscreen->get_compute_param(pdevice->pscreen, PIPE_SHADER_IR_NIR,
+                                       PIPE_COMPUTE_CAP_MAX_LOCAL_SIZE,
+                                       &max_local_size);
+
+   VkPhysicalDeviceLimits limits = {
+      .maxImageDimension1D                      = pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_MAX_TEXTURE_2D_SIZE),
+      .maxImageDimension2D                      = pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_MAX_TEXTURE_2D_SIZE),
+      .maxImageDimension3D                      = (1 << pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_MAX_TEXTURE_3D_LEVELS)),
+      .maxImageDimensionCube                    = (1 << pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_MAX_TEXTURE_CUBE_LEVELS)),
+      .maxImageArrayLayers                      = pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_MAX_TEXTURE_ARRAY_LAYERS),
+      .maxTexelBufferElements                   = 128 * 1024 * 1024,
+      .maxUniformBufferRange                    = pdevice->pscreen->get_shader_param(pdevice->pscreen, PIPE_SHADER_FRAGMENT, PIPE_SHADER_CAP_MAX_CONST_BUFFER_SIZE),
+      .maxStorageBufferRange                    = pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_MAX_SHADER_BUFFER_SIZE),
+      .maxPushConstantsSize                     = MAX_PUSH_CONSTANTS_SIZE,
+      .maxMemoryAllocationCount                 = 4096,
+      .maxSamplerAllocationCount                = 32 * 1024,
+      .bufferImageGranularity                   = 64, /* A cache line */
+      .sparseAddressSpaceSize                   = 0,
+      .maxBoundDescriptorSets                   = MAX_SETS,
+      .maxPerStageDescriptorSamplers            = 32,
+      .maxPerStageDescriptorUniformBuffers      = pdevice->pscreen->get_shader_param(pdevice->pscreen, PIPE_SHADER_FRAGMENT, PIPE_SHADER_CAP_MAX_CONST_BUFFERS),
+      .maxPerStageDescriptorStorageBuffers      = pdevice->pscreen->get_shader_param(pdevice->pscreen, PIPE_SHADER_FRAGMENT, PIPE_SHADER_CAP_MAX_SHADER_BUFFERS),
+      .maxPerStageDescriptorSampledImages       = pdevice->pscreen->get_shader_param(pdevice->pscreen, PIPE_SHADER_FRAGMENT, PIPE_SHADER_CAP_MAX_SAMPLER_VIEWS),
+      .maxPerStageDescriptorStorageImages       = pdevice->pscreen->get_shader_param(pdevice->pscreen, PIPE_SHADER_FRAGMENT, PIPE_SHADER_CAP_MAX_SHADER_IMAGES - 8),
+      .maxPerStageDescriptorInputAttachments    = 8,
+      .maxPerStageResources                     = 128,
+      .maxDescriptorSetSamplers                 = 32 * 1024,
+      .maxDescriptorSetUniformBuffers           = 256,
+      .maxDescriptorSetUniformBuffersDynamic    = 256,
+      .maxDescriptorSetStorageBuffers           = 256,
+      .maxDescriptorSetStorageBuffersDynamic    = 256,
+      .maxDescriptorSetSampledImages            = 256,
+      .maxDescriptorSetStorageImages            = 256,
+      .maxDescriptorSetInputAttachments         = 256,
+      .maxVertexInputAttributes                 = 32,
+      .maxVertexInputBindings                   = 32,
+      .maxVertexInputAttributeOffset            = 2047,
+      .maxVertexInputBindingStride              = 2048,
+      .maxVertexOutputComponents                = 128,
+      .maxTessellationGenerationLevel           = 64,
+      .maxTessellationPatchSize                 = 32,
+      .maxTessellationControlPerVertexInputComponents = 128,
+      .maxTessellationControlPerVertexOutputComponents = 128,
+      .maxTessellationControlPerPatchOutputComponents = 128,
+      .maxTessellationControlTotalOutputComponents = 4096,
+      .maxTessellationEvaluationInputComponents = 128,
+      .maxTessellationEvaluationOutputComponents = 128,
+      .maxGeometryShaderInvocations             = pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_MAX_GS_INVOCATIONS),
+      .maxGeometryInputComponents               = 64,
+      .maxGeometryOutputComponents              = 128,
+      .maxGeometryOutputVertices                = pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_MAX_GEOMETRY_OUTPUT_VERTICES),
+      .maxGeometryTotalOutputComponents         = pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_MAX_GEOMETRY_TOTAL_OUTPUT_COMPONENTS),
+      .maxFragmentInputComponents               = 128,
+      .maxFragmentOutputAttachments             = 8,
+      .maxFragmentDualSrcAttachments            = 2,
+      .maxFragmentCombinedOutputResources       = 8,
+      .maxComputeSharedMemorySize               = max_local_size,
+      .maxComputeWorkGroupCount                 = { grid_size[0], grid_size[1], grid_size[2] },
+      .maxComputeWorkGroupInvocations           = max_threads_per_block,
+      .maxComputeWorkGroupSize = { block_size[0], block_size[1], block_size[2] },
+      .subPixelPrecisionBits                    = pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_RASTERIZER_SUBPIXEL_BITS),
+      .subTexelPrecisionBits                    = 4 /* FIXME */,
+      .mipmapPrecisionBits                      = 4 /* FIXME */,
+      .maxDrawIndexedIndexValue                 = UINT32_MAX,
+      .maxDrawIndirectCount                     = UINT32_MAX,
+      .maxSamplerLodBias                        = 16,
+      .maxSamplerAnisotropy                     = 16,
+      .maxViewports                             = pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_MAX_VIEWPORTS),
+      .maxViewportDimensions                    = { (1 << 14), (1 << 14) },
+      .viewportBoundsRange                      = { -16384.0, 16384.0 },
+      .viewportSubPixelBits                     = pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_VIEWPORT_SUBPIXEL_BITS),
+      .minMemoryMapAlignment                    = 4096, /* A page */
+      .minTexelBufferOffsetAlignment            = pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_TEXTURE_BUFFER_OFFSET_ALIGNMENT),
+      .minUniformBufferOffsetAlignment          = pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT),
+      .minStorageBufferOffsetAlignment          = pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_SHADER_BUFFER_OFFSET_ALIGNMENT),
+      .minTexelOffset                           = pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_MIN_TEXEL_OFFSET),
+      .maxTexelOffset                           = pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_MAX_TEXEL_OFFSET),
+      .minTexelGatherOffset                     = pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_MIN_TEXTURE_GATHER_OFFSET),
+      .maxTexelGatherOffset                     = pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_MAX_TEXTURE_GATHER_OFFSET),
+      .minInterpolationOffset                   = -2, /* FIXME */
+      .maxInterpolationOffset                   = 2, /* FIXME */
+      .subPixelInterpolationOffsetBits          = 8, /* FIXME */
+      .maxFramebufferWidth                      = pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_MAX_TEXTURE_2D_SIZE),
+      .maxFramebufferHeight                     = pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_MAX_TEXTURE_2D_SIZE),
+      .maxFramebufferLayers                     = pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_MAX_TEXTURE_ARRAY_LAYERS),
+      .framebufferColorSampleCounts             = sample_counts,
+      .framebufferDepthSampleCounts             = sample_counts,
+      .framebufferStencilSampleCounts           = sample_counts,
+      .framebufferNoAttachmentsSampleCounts     = sample_counts,
+      .maxColorAttachments                      = pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_MAX_RENDER_TARGETS),
+      .sampledImageColorSampleCounts            = sample_counts,
+      .sampledImageIntegerSampleCounts          = sample_counts,
+      .sampledImageDepthSampleCounts            = sample_counts,
+      .sampledImageStencilSampleCounts          = sample_counts,
+      .storageImageSampleCounts                 = sample_counts,
+      .maxSampleMaskWords                       = 1,
+      .timestampComputeAndGraphics              = true,
+      .timestampPeriod                          = 1,
+      .maxClipDistances                         = 8,
+      .maxCullDistances                         = 8,
+      .maxCombinedClipAndCullDistances          = 8,
+      .discreteQueuePriorities                  = 2,
+      .pointSizeRange                           = { 0.0, pdevice->pscreen->get_paramf(pdevice->pscreen, PIPE_CAPF_MAX_POINT_WIDTH) },
+      .lineWidthRange                           = { 0.0, pdevice->pscreen->get_paramf(pdevice->pscreen, PIPE_CAPF_MAX_LINE_WIDTH) },
+      .pointSizeGranularity                     = (1.0 / 8.0),
+      .lineWidthGranularity                     = (1.0 / 128.0),
+      .strictLines                              = false, /* FINISHME */
+      .standardSampleLocations                  = true,
+      .optimalBufferCopyOffsetAlignment         = 128,
+      .optimalBufferCopyRowPitchAlignment       = 128,
+      .nonCoherentAtomSize                      = 64,
+   };
+
+   *pProperties = (VkPhysicalDeviceProperties) {
+      .apiVersion = VK_MAKE_VERSION(1, 0, 2),
+      .driverVersion = 1,
+      .vendorID = VK_VENDOR_ID_MESA,
+      .deviceID = 0,
+      .deviceType = VK_PHYSICAL_DEVICE_TYPE_CPU,
+      .limits = limits,
+      .sparseProperties = {0},
+   };
+
+   strcpy(pProperties->deviceName, pdevice->pscreen->get_name(pdevice->pscreen));
+   val_device_get_cache_uuid(pProperties->pipelineCacheUUID);
+
+}
+
+void val_GetPhysicalDeviceProperties2(
+   VkPhysicalDevice                            physicalDevice,
+   VkPhysicalDeviceProperties2                *pProperties)
+{
+   val_GetPhysicalDeviceProperties(physicalDevice, &pProperties->properties);
+
+   vk_foreach_struct(ext, pProperties->pNext) {
+      switch (ext->sType) {
+
+      case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES: {
+         VkPhysicalDeviceMaintenance3Properties *properties =
+            (VkPhysicalDeviceMaintenance3Properties*)ext;
+         properties->maxPerSetDescriptors = 1024;
+         properties->maxMemoryAllocationSize = (1u << 31);
+         break;
+      }
+      case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES_KHR: {
+         VkPhysicalDeviceDriverPropertiesKHR *driver_props =
+            (VkPhysicalDeviceDriverPropertiesKHR *) ext;
+         driver_props->driverID = VK_DRIVER_ID_MESA_LLVMPIPE;
+         snprintf(driver_props->driverName, VK_MAX_DRIVER_NAME_SIZE_KHR, "llvmpipe");
+         snprintf(driver_props->driverInfo, VK_MAX_DRIVER_INFO_SIZE_KHR,
+                  "Mesa " PACKAGE_VERSION MESA_GIT_SHA1
+#ifdef MESA_LLVM_VERSION_STRING
+                  " (LLVM " MESA_LLVM_VERSION_STRING ")"
+#endif
+                 );
+         driver_props->conformanceVersion.major = 1;
+         driver_props->conformanceVersion.minor = 0;
+         driver_props->conformanceVersion.subminor = 0;
+         driver_props->conformanceVersion.patch = 0;;
+         break;
+      }
+      case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES: {
+         VkPhysicalDevicePointClippingProperties *properties =
+            (VkPhysicalDevicePointClippingProperties*)ext;
+         properties->pointClippingBehavior = VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES;
+         break;
+      }
+      default:
+         break;
+      }
+   }
+}
+
+void val_GetPhysicalDeviceQueueFamilyProperties(
+   VkPhysicalDevice                            physicalDevice,
+   uint32_t*                                   pCount,
+   VkQueueFamilyProperties*                    pQueueFamilyProperties)
+{
+   if (pQueueFamilyProperties == NULL) {
+      *pCount = 1;
+      return;
+   }
+
+   assert(*pCount >= 1);
+
+   *pQueueFamilyProperties = (VkQueueFamilyProperties) {
+      .queueFlags = VK_QUEUE_GRAPHICS_BIT |
+      VK_QUEUE_COMPUTE_BIT |
+      VK_QUEUE_TRANSFER_BIT,
+      .queueCount = 1,
+      .timestampValidBits = 64,
+      .minImageTransferGranularity = (VkExtent3D) { 1, 1, 1 },
+   };
+}
+
+void val_GetPhysicalDeviceMemoryProperties(
+   VkPhysicalDevice                            physicalDevice,
+   VkPhysicalDeviceMemoryProperties*           pMemoryProperties)
+{
+   pMemoryProperties->memoryTypeCount = 1;
+   pMemoryProperties->memoryTypes[0] = (VkMemoryType) {
+      .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
+      VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
+      VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
+      VK_MEMORY_PROPERTY_HOST_CACHED_BIT,
+      .heapIndex = 0,
+   };
+
+   pMemoryProperties->memoryHeapCount = 1;
+   pMemoryProperties->memoryHeaps[0] = (VkMemoryHeap) {
+      .size = 2ULL*1024*1024*1024,
+      .flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT,
+   };
+}
+
+PFN_vkVoidFunction val_GetInstanceProcAddr(
+   VkInstance                                  _instance,
+   const char*                                 pName)
+{
+   VAL_FROM_HANDLE(val_instance, instance, _instance);
+
+   /* The Vulkan 1.0 spec for vkGetInstanceProcAddr has a table of exactly
+    * when we have to return valid function pointers, NULL, or it's left
+    * undefined.  See the table for exact details.
+    */
+   if (pName == NULL)
+      return NULL;
+
+#define LOOKUP_VAL_ENTRYPOINT(entrypoint)               \
+   if (strcmp(pName, "vk" #entrypoint) == 0)            \
+      return (PFN_vkVoidFunction)val_##entrypoint
+
+   LOOKUP_VAL_ENTRYPOINT(EnumerateInstanceExtensionProperties);
+   LOOKUP_VAL_ENTRYPOINT(EnumerateInstanceLayerProperties);
+   LOOKUP_VAL_ENTRYPOINT(EnumerateInstanceVersion);
+   LOOKUP_VAL_ENTRYPOINT(CreateInstance);
+
+   /* GetInstanceProcAddr() can also be called with a NULL instance.
+    * See https://gitlab.khronos.org/vulkan/vulkan/issues/2057
+    */
+   LOOKUP_VAL_ENTRYPOINT(GetInstanceProcAddr);
+
+#undef LOOKUP_VAL_ENTRYPOINT
+
+   if (instance == NULL)
+      return NULL;
+
+   int idx = val_get_instance_entrypoint_index(pName);
+   if (idx >= 0)
+      return instance->dispatch.entrypoints[idx];
+
+   idx = val_get_physical_device_entrypoint_index(pName);
+   if (idx >= 0)
+      return instance->physical_device_dispatch.entrypoints[idx];
+
+   idx = val_get_device_entrypoint_index(pName);
+   if (idx >= 0)
+      return instance->device_dispatch.entrypoints[idx];
+
+   return NULL;
+}
+
+/* The loader wants us to expose a second GetInstanceProcAddr function
+ * to work around certain LD_PRELOAD issues seen in apps.
+ */
+PUBLIC
+VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetInstanceProcAddr(
+   VkInstance                                  instance,
+   const char*                                 pName);
+
+PUBLIC
+VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetInstanceProcAddr(
+   VkInstance                                  instance,
+   const char*                                 pName)
+{
+   return val_GetInstanceProcAddr(instance, pName);
+}
+
+PUBLIC
+VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetPhysicalDeviceProcAddr(
+   VkInstance                                  _instance,
+   const char*                                 pName);
+
+PUBLIC
+VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetPhysicalDeviceProcAddr(
+   VkInstance                                  _instance,
+   const char*                                 pName)
+{
+   VAL_FROM_HANDLE(val_instance, instance, _instance);
+
+   if (!pName || !instance)
+      return NULL;
+
+   int idx = val_get_physical_device_entrypoint_index(pName);
+   if (idx < 0)
+      return NULL;
+
+   return instance->physical_device_dispatch.entrypoints[idx];
+}
+
+PFN_vkVoidFunction val_GetDeviceProcAddr(
+   VkDevice                                    _device,
+   const char*                                 pName)
+{
+   VAL_FROM_HANDLE(val_device, device, _device);
+   if (!device || !pName)
+      return NULL;
+
+   int idx = val_get_device_entrypoint_index(pName);
+   if (idx < 0)
+      return NULL;
+
+   return device->dispatch.entrypoints[idx];
+}
+
+static int queue_thread(void *data)
+{
+   struct val_queue *queue = data;
+
+   mtx_lock(&queue->m);
+   while (!queue->shutdown) {
+      struct val_queue_work *task;
+      while (list_is_empty(&queue->workqueue) && !queue->shutdown)
+         cnd_wait(&queue->new_work, &queue->m);
+
+      if (queue->shutdown)
+         break;
+
+      task = list_first_entry(&queue->workqueue, struct val_queue_work,
+                              list);
+
+      mtx_unlock(&queue->m);
+      //execute
+      for (unsigned i = 0; i < task->cmd_buffer_count; i++) {
+         val_execute_cmds(queue->device, queue, task->fence, task->cmd_buffers[i]);
+      }
+      if (!task->cmd_buffer_count && task->fence)
+         task->fence->signaled = true;
+      p_atomic_dec(&queue->count);
+      mtx_lock(&queue->m);
+      list_del(&task->list);
+      free(task);
+   }
+   mtx_unlock(&queue->m);
+   return 0;
+}
+
+static VkResult
+val_queue_init(struct val_device *device, struct val_queue *queue)
+{
+   queue->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
+   queue->device = device;
+
+   queue->flags = 0;
+   queue->ctx = device->pscreen->context_create(device->pscreen, NULL, PIPE_CONTEXT_ROBUST_BUFFER_ACCESS);
+   list_inithead(&queue->workqueue);
+   p_atomic_set(&queue->count, 0);
+   mtx_init(&queue->m, mtx_plain);
+   queue->exec_thread = u_thread_create(queue_thread, queue);
+
+   return VK_SUCCESS;
+}
+
+static void
+val_queue_finish(struct val_queue *queue)
+{
+   mtx_lock(&queue->m);
+   queue->shutdown = true;
+   cnd_broadcast(&queue->new_work);
+   mtx_unlock(&queue->m);
+
+   thrd_join(queue->exec_thread, NULL);
+
+   cnd_destroy(&queue->new_work);
+   mtx_destroy(&queue->m);
+   queue->ctx->destroy(queue->ctx);
+}
+
+static int val_get_device_extension_index(const char *name)
+{
+   for (unsigned i = 0; i < VAL_DEVICE_EXTENSION_COUNT; ++i) {
+      if (strcmp(name, val_device_extensions[i].extensionName) == 0)
+         return i;
+   }
+   return -1;
+}
+
+static void
+val_device_init_dispatch(struct val_device *device)
+{
+   const struct val_instance *instance = device->physical_device->instance;
+   const struct val_device_dispatch_table *dispatch_table_layer = NULL;
+   bool unchecked = instance->debug_flags & VAL_DEBUG_ALL_ENTRYPOINTS;
+
+   for (unsigned i = 0; i < ARRAY_SIZE(device->dispatch.entrypoints); i++) {
+      /* Vulkan requires that entrypoints for extensions which have not been
+       * enabled must not be advertised.
+       */
+      if (!unchecked &&
+          !val_device_entrypoint_is_enabled(i, instance->apiVersion,
+                                            &instance->enabled_extensions,
+                                            &device->enabled_extensions)) {
+         device->dispatch.entrypoints[i] = NULL;
+      } else if (dispatch_table_layer &&
+                 dispatch_table_layer->entrypoints[i]) {
+         device->dispatch.entrypoints[i] =
+            dispatch_table_layer->entrypoints[i];
+      } else {
+         device->dispatch.entrypoints[i] =
+            val_device_dispatch_table.entrypoints[i];
+      }
+   }
+}
+
+VkResult val_CreateDevice(
+   VkPhysicalDevice                            physicalDevice,
+   const VkDeviceCreateInfo*                   pCreateInfo,
+   const VkAllocationCallbacks*                pAllocator,
+   VkDevice*                                   pDevice)
+{
+   VAL_FROM_HANDLE(val_physical_device, physical_device, physicalDevice);
+   struct val_device *device;
+
+   assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO);
+
+   /* Check enabled features */
+   if (pCreateInfo->pEnabledFeatures) {
+      VkPhysicalDeviceFeatures supported_features;
+      val_GetPhysicalDeviceFeatures(physicalDevice, &supported_features);
+      VkBool32 *supported_feature = (VkBool32 *)&supported_features;
+      VkBool32 *enabled_feature = (VkBool32 *)pCreateInfo->pEnabledFeatures;
+      unsigned num_features = sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
+      for (uint32_t i = 0; i < num_features; i++) {
+         if (enabled_feature[i] && !supported_feature[i])
+            return vk_error(physical_device->instance, VK_ERROR_FEATURE_NOT_PRESENT);
+      }
+   }
+
+   device = vk_zalloc2(&physical_device->instance->alloc, pAllocator,
+                       sizeof(*device), 8,
+                       VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
+   if (!device)
+      return vk_error(physical_device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
+
+   device->instance = physical_device->instance;
+   device->physical_device = physical_device;
+
+   if (pAllocator)
+      device->alloc = *pAllocator;
+   else
+      device->alloc = physical_device->instance->alloc;
+
+   for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
+      const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
+      int index = val_get_device_extension_index(ext_name);
+      if (index < 0 || !physical_device->supported_extensions.extensions[index]) {
+         vk_free(&device->alloc, device);
+         return vk_error(physical_device->instance, VK_ERROR_EXTENSION_NOT_PRESENT);
+      }
+
+      device->enabled_extensions.extensions[index] = true;
+   }
+   val_device_init_dispatch(device);
+
+   mtx_init(&device->fence_lock, mtx_plain);
+   device->pscreen = physical_device->pscreen;
+
+   val_queue_init(device, &device->queue);
+
+   *pDevice = val_device_to_handle(device);
+
+   return VK_SUCCESS;
+
+}
+
+void val_DestroyDevice(
+   VkDevice                                    _device,
+   const VkAllocationCallbacks*                pAllocator)
+{
+   VAL_FROM_HANDLE(val_device, device, _device);
+
+   val_queue_finish(&device->queue);
+   vk_free(&device->alloc, device);
+}
+
+VkResult val_EnumerateInstanceExtensionProperties(
+   const char*                                 pLayerName,
+   uint32_t*                                   pPropertyCount,
+   VkExtensionProperties*                      pProperties)
+{
+   VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
+
+   for (int i = 0; i < VAL_INSTANCE_EXTENSION_COUNT; i++) {
+      if (val_instance_extensions_supported.extensions[i]) {
+         vk_outarray_append(&out, prop) {
+            *prop = val_instance_extensions[i];
+         }
+      }
+   }
+
+   return vk_outarray_status(&out);
+}
+
+VkResult val_EnumerateDeviceExtensionProperties(
+   VkPhysicalDevice                            physicalDevice,
+   const char*                                 pLayerName,
+   uint32_t*                                   pPropertyCount,
+   VkExtensionProperties*                      pProperties)
+{
+   VAL_FROM_HANDLE(val_physical_device, device, physicalDevice);
+   VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
+
+   for (int i = 0; i < VAL_DEVICE_EXTENSION_COUNT; i++) {
+      if (device->supported_extensions.extensions[i]) {
+         vk_outarray_append(&out, prop) {
+            *prop = val_device_extensions[i];
+         }
+      }
+   }
+   return vk_outarray_status(&out);
+}
+
+VkResult val_EnumerateInstanceLayerProperties(
+   uint32_t*                                   pPropertyCount,
+   VkLayerProperties*                          pProperties)
+{
+   if (pProperties == NULL) {
+      *pPropertyCount = 0;
+      return VK_SUCCESS;
+   }
+
+   /* None supported at this time */
+   return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
+}
+
+VkResult val_EnumerateDeviceLayerProperties(
+   VkPhysicalDevice                            physicalDevice,
+   uint32_t*                                   pPropertyCount,
+   VkLayerProperties*                          pProperties)
+{
+   if (pProperties == NULL) {
+      *pPropertyCount = 0;
+      return VK_SUCCESS;
+   }
+
+   /* None supported at this time */
+   return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
+}
+
+void val_GetDeviceQueue2(
+   VkDevice                                    _device,
+   const VkDeviceQueueInfo2*                   pQueueInfo,
+   VkQueue*                                    pQueue)
+{
+   VAL_FROM_HANDLE(val_device, device, _device);
+   struct val_queue *queue;
+
+   queue = &device->queue;
+   if (pQueueInfo->flags != queue->flags) {
+      /* From the Vulkan 1.1.70 spec:
+       *
+       * "The queue returned by vkGetDeviceQueue2 must have the same
+       * flags value from this structure as that used at device
+       * creation time in a VkDeviceQueueCreateInfo instance. If no
+       * matching flags were specified at device creation time then
+       * pQueue will return VK_NULL_HANDLE."
+       */
+      *pQueue = VK_NULL_HANDLE;
+      return;
+   }
+
+   *pQueue = val_queue_to_handle(queue);
+}
+
+
+void val_GetDeviceQueue(
+   VkDevice                                    _device,
+   uint32_t                                    queueFamilyIndex,
+   uint32_t                                    queueIndex,
+   VkQueue*                                    pQueue)
+{
+   const VkDeviceQueueInfo2 info = (VkDeviceQueueInfo2) {
+      .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2,
+      .queueFamilyIndex = queueFamilyIndex,
+      .queueIndex = queueIndex
+   };
+
+   val_GetDeviceQueue2(_device, &info, pQueue);
+}
+
+
+VkResult val_QueueSubmit(
+   VkQueue                                     _queue,
+   uint32_t                                    submitCount,
+   const VkSubmitInfo*                         pSubmits,
+   VkFence                                     _fence)
+{
+   VAL_FROM_HANDLE(val_queue, queue, _queue);
+   VAL_FROM_HANDLE(val_fence, fence, _fence);
+
+   if (submitCount == 0)
+      goto just_signal_fence;
+   for (uint32_t i = 0; i < submitCount; i++) {
+      uint32_t task_size = sizeof(struct val_queue_work) + pSubmits[i].commandBufferCount * sizeof(struct val_cmd_buffer *);
+      struct val_queue_work *task = malloc(task_size);
+
+      task->cmd_buffer_count = pSubmits[i].commandBufferCount;
+      task->fence = fence;
+      task->cmd_buffers = (struct val_cmd_buffer **)(task + 1);
+      for (uint32_t j = 0; j < pSubmits[i].commandBufferCount; j++) {
+         task->cmd_buffers[j] = val_cmd_buffer_from_handle(pSubmits[i].pCommandBuffers[j]);
+      }
+
+      mtx_lock(&queue->m);
+      p_atomic_inc(&queue->count);
+      list_addtail(&task->list, &queue->workqueue);
+      cnd_signal(&queue->new_work);
+      mtx_unlock(&queue->m);
+   }
+   return VK_SUCCESS;
+ just_signal_fence:
+   fence->signaled = true;
+   return VK_SUCCESS;
+}
+
+static VkResult queue_wait_idle(struct val_queue *queue, uint64_t timeout)
+{
+   if (timeout == 0)
+      return p_atomic_read(&queue->count) == 0 ? VK_SUCCESS : VK_TIMEOUT;
+   if (timeout == UINT64_MAX)
+      while (p_atomic_read(&queue->count))
+         usleep(100);
+   else {
+      struct timespec t, current;
+      clock_gettime(CLOCK_MONOTONIC, &current);
+      timespec_add_nsec(&t, &current, timeout);
+      bool timedout = false;
+      while (p_atomic_read(&queue->count) && !(timedout = timespec_passed(CLOCK_MONOTONIC, &t)))
+         usleep(10);
+      if (timedout)
+         return VK_TIMEOUT;
+   }
+   return VK_SUCCESS;
+}
+
+VkResult val_QueueWaitIdle(
+   VkQueue                                     _queue)
+{
+   VAL_FROM_HANDLE(val_queue, queue, _queue);
+
+   return queue_wait_idle(queue, UINT64_MAX);
+}
+
+VkResult val_DeviceWaitIdle(
+   VkDevice                                    _device)
+{
+   VAL_FROM_HANDLE(val_device, device, _device);
+
+   return queue_wait_idle(&device->queue, UINT64_MAX);
+}
+
+VkResult val_AllocateMemory(
+   VkDevice                                    _device,
+   const VkMemoryAllocateInfo*                 pAllocateInfo,
+   const VkAllocationCallbacks*                pAllocator,
+   VkDeviceMemory*                             pMem)
+{
+   VAL_FROM_HANDLE(val_device, device, _device);
+   struct val_device_memory *mem;
+   assert(pAllocateInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO);
+
+   if (pAllocateInfo->allocationSize == 0) {
+      /* Apparently, this is allowed */
+      *pMem = VK_NULL_HANDLE;
+      return VK_SUCCESS;
+   }
+
+   mem = vk_alloc2(&device->alloc, pAllocator, sizeof(*mem), 8,
+                   VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+   if (mem == NULL)
+      return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
+
+   vk_object_base_init(&device->vk, &mem->base,
+                       VK_OBJECT_TYPE_DEVICE_MEMORY);
+   mem->pmem = device->pscreen->allocate_memory(device->pscreen, pAllocateInfo->allocationSize);
+   if (!mem->pmem) {
+      vk_free2(&device->alloc, pAllocator, mem);
+      return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
+   }
+
+   mem->type_index = pAllocateInfo->memoryTypeIndex;
+
+   *pMem = val_device_memory_to_handle(mem);
+
+   return VK_SUCCESS;
+}
+
+void val_FreeMemory(
+   VkDevice                                    _device,
+   VkDeviceMemory                              _mem,
+   const VkAllocationCallbacks*                pAllocator)
+{
+   VAL_FROM_HANDLE(val_device, device, _device);
+   VAL_FROM_HANDLE(val_device_memory, mem, _mem);
+
+   if (mem == NULL)
+      return;
+
+   device->pscreen->free_memory(device->pscreen, mem->pmem);
+   vk_object_base_finish(&mem->base);
+   vk_free2(&device->alloc, pAllocator, mem);
+
+}
+
+VkResult val_MapMemory(
+   VkDevice                                    _device,
+   VkDeviceMemory                              _memory,
+   VkDeviceSize                                offset,
+   VkDeviceSize                                size,
+   VkMemoryMapFlags                            flags,
+   void**                                      ppData)
+{
+   VAL_FROM_HANDLE(val_device, device, _device);
+   VAL_FROM_HANDLE(val_device_memory, mem, _memory);
+   void *map;
+   if (mem == NULL) {
+      *ppData = NULL;
+      return VK_SUCCESS;
+   }
+
+   map = device->pscreen->map_memory(device->pscreen, mem->pmem);
+
+   *ppData = map + offset;
+   return VK_SUCCESS;
+}
+
+void val_UnmapMemory(
+   VkDevice                                    _device,
+   VkDeviceMemory                              _memory)
+{
+   VAL_FROM_HANDLE(val_device, device, _device);
+   VAL_FROM_HANDLE(val_device_memory, mem, _memory);
+
+   if (mem == NULL)
+      return;
+
+   device->pscreen->unmap_memory(device->pscreen, mem->pmem);
+}
+
+VkResult val_FlushMappedMemoryRanges(
+   VkDevice                                    _device,
+   uint32_t                                    memoryRangeCount,
+   const VkMappedMemoryRange*                  pMemoryRanges)
+{
+   return VK_SUCCESS;
+}
+VkResult val_InvalidateMappedMemoryRanges(
+   VkDevice                                    _device,
+   uint32_t                                    memoryRangeCount,
+   const VkMappedMemoryRange*                  pMemoryRanges)
+{
+   return VK_SUCCESS;
+}
+
+void val_GetBufferMemoryRequirements(
+   VkDevice                                    device,
+   VkBuffer                                    _buffer,
+   VkMemoryRequirements*                       pMemoryRequirements)
+{
+   VAL_FROM_HANDLE(val_buffer, buffer, _buffer);
+
+   /* The Vulkan spec (git aaed022) says:
+    *
+    *    memoryTypeBits is a bitfield and contains one bit set for every
+    *    supported memory type for the resource. The bit `1<<i` is set if and
+    *    only if the memory type `i` in the VkPhysicalDeviceMemoryProperties
+    *    structure for the physical device is supported.
+    *
+    * We support exactly one memory type.
+    */
+   pMemoryRequirements->memoryTypeBits = 1;
+
+   pMemoryRequirements->size = buffer->total_size;
+   pMemoryRequirements->alignment = 64;
+}
+
+void val_GetBufferMemoryRequirements2(
+   VkDevice                                     device,
+   const VkBufferMemoryRequirementsInfo2       *pInfo,
+   VkMemoryRequirements2                       *pMemoryRequirements)
+{
+   val_GetBufferMemoryRequirements(device, pInfo->buffer,
+                                   &pMemoryRequirements->memoryRequirements);
+   vk_foreach_struct(ext, pMemoryRequirements->pNext) {
+      switch (ext->sType) {
+      case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS: {
+         VkMemoryDedicatedRequirements *req =
+            (VkMemoryDedicatedRequirements *) ext;
+         req->requiresDedicatedAllocation = false;
+         req->prefersDedicatedAllocation = req->requiresDedicatedAllocation;
+         break;
+      }
+      default:
+         break;
+      }
+   }
+}
+
+void val_GetImageMemoryRequirements(
+   VkDevice                                    device,
+   VkImage                                     _image,
+   VkMemoryRequirements*                       pMemoryRequirements)
+{
+   VAL_FROM_HANDLE(val_image, image, _image);
+   pMemoryRequirements->memoryTypeBits = 1;
+
+   pMemoryRequirements->size = image->size;
+   pMemoryRequirements->alignment = image->alignment;
+}
+
+void val_GetImageMemoryRequirements2(
+   VkDevice                                    device,
+   const VkImageMemoryRequirementsInfo2       *pInfo,
+   VkMemoryRequirements2                      *pMemoryRequirements)
+{
+   val_GetImageMemoryRequirements(device, pInfo->image,
+                                  &pMemoryRequirements->memoryRequirements);
+
+   vk_foreach_struct(ext, pMemoryRequirements->pNext) {
+      switch (ext->sType) {
+      case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS: {
+         VkMemoryDedicatedRequirements *req =
+            (VkMemoryDedicatedRequirements *) ext;
+         req->requiresDedicatedAllocation = false;
+         req->prefersDedicatedAllocation = req->requiresDedicatedAllocation;
+         break;
+      }
+      default:
+         break;
+      }
+   }
+}
+
+void val_GetImageSparseMemoryRequirements(
+   VkDevice                                    device,
+   VkImage                                     image,
+   uint32_t*                                   pSparseMemoryRequirementCount,
+   VkSparseImageMemoryRequirements*            pSparseMemoryRequirements)
+{
+   stub();
+}
+
+void val_GetImageSparseMemoryRequirements2(
+   VkDevice                                    device,
+   const VkImageSparseMemoryRequirementsInfo2* pInfo,
+   uint32_t* pSparseMemoryRequirementCount,
+   VkSparseImageMemoryRequirements2* pSparseMemoryRequirements)
+{
+   stub();
+}
+
+void val_GetDeviceMemoryCommitment(
+   VkDevice                                    device,
+   VkDeviceMemory                              memory,
+   VkDeviceSize*                               pCommittedMemoryInBytes)
+{
+   *pCommittedMemoryInBytes = 0;
+}
+
+VkResult val_BindBufferMemory2(VkDevice _device,
+                               uint32_t bindInfoCount,
+                               const VkBindBufferMemoryInfo *pBindInfos)
+{
+   VAL_FROM_HANDLE(val_device, device, _device);
+   for (uint32_t i = 0; i < bindInfoCount; ++i) {
+      VAL_FROM_HANDLE(val_device_memory, mem, pBindInfos[i].memory);
+      VAL_FROM_HANDLE(val_buffer, buffer, pBindInfos[i].buffer);
+
+      device->pscreen->resource_bind_backing(device->pscreen,
+                                             buffer->bo,
+                                             mem->pmem,
+                                             pBindInfos[i].memoryOffset);
+   }
+   return VK_SUCCESS;
+}
+
+VkResult val_BindBufferMemory(
+   VkDevice                                    _device,
+   VkBuffer                                    _buffer,
+   VkDeviceMemory                              _memory,
+   VkDeviceSize                                memoryOffset)
+{
+   VAL_FROM_HANDLE(val_device, device, _device);
+   VAL_FROM_HANDLE(val_device_memory, mem, _memory);
+   VAL_FROM_HANDLE(val_buffer, buffer, _buffer);
+
+   device->pscreen->resource_bind_backing(device->pscreen,
+                                          buffer->bo,
+                                          mem->pmem,
+                                          memoryOffset);
+   return VK_SUCCESS;
+}
+
+VkResult val_BindImageMemory2(VkDevice _device,
+                              uint32_t bindInfoCount,
+                              const VkBindImageMemoryInfo *pBindInfos)
+{
+   VAL_FROM_HANDLE(val_device, device, _device);
+   for (uint32_t i = 0; i < bindInfoCount; ++i) {
+      VAL_FROM_HANDLE(val_device_memory, mem, pBindInfos[i].memory);
+      VAL_FROM_HANDLE(val_image, image, pBindInfos[i].image);
+
+      device->pscreen->resource_bind_backing(device->pscreen,
+                                             image->bo,
+                                             mem->pmem,
+                                             pBindInfos[i].memoryOffset);
+   }
+   return VK_SUCCESS;
+}
+
+VkResult val_BindImageMemory(
+   VkDevice                                    _device,
+   VkImage                                     _image,
+   VkDeviceMemory                              _memory,
+   VkDeviceSize                                memoryOffset)
+{
+   VAL_FROM_HANDLE(val_device, device, _device);
+   VAL_FROM_HANDLE(val_device_memory, mem, _memory);
+   VAL_FROM_HANDLE(val_image, image, _image);
+
+   device->pscreen->resource_bind_backing(device->pscreen,
+                                          image->bo,
+                                          mem->pmem,
+                                          memoryOffset);
+   return VK_SUCCESS;
+}
+
+VkResult val_QueueBindSparse(
+   VkQueue                                     queue,
+   uint32_t                                    bindInfoCount,
+   const VkBindSparseInfo*                     pBindInfo,
+   VkFence                                     fence)
+{
+   stub_return(VK_ERROR_INCOMPATIBLE_DRIVER);
+}
+
+
+VkResult val_CreateFence(
+   VkDevice                                    _device,
+   const VkFenceCreateInfo*                    pCreateInfo,
+   const VkAllocationCallbacks*                pAllocator,
+   VkFence*                                    pFence)
+{
+   VAL_FROM_HANDLE(val_device, device, _device);
+   struct val_fence *fence;
+
+   fence = vk_alloc2(&device->alloc, pAllocator, sizeof(*fence), 8,
+                     VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+   if (fence == NULL)
+      return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
+
+   vk_object_base_init(&device->vk, &fence->base, VK_OBJECT_TYPE_FENCE);
+   fence->signaled = pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT;
+
+   fence->handle = NULL;
+   *pFence = val_fence_to_handle(fence);
+
+   return VK_SUCCESS;
+}
+
+void val_DestroyFence(
+   VkDevice                                    _device,
+   VkFence                                     _fence,
+   const VkAllocationCallbacks*                pAllocator)
+{
+   VAL_FROM_HANDLE(val_device, device, _device);
+   VAL_FROM_HANDLE(val_fence, fence, _fence);
+
+   if (!_fence)
+      return;
+   if (fence->handle)
+      device->pscreen->fence_reference(device->pscreen, &fence->handle, NULL);
+
+   vk_object_base_finish(&fence->base);
+   vk_free2(&device->alloc, pAllocator, fence);
+}
+
+VkResult val_ResetFences(
+   VkDevice                                    _device,
+   uint32_t                                    fenceCount,
+   const VkFence*                              pFences)
+{
+   VAL_FROM_HANDLE(val_device, device, _device);
+   for (unsigned i = 0; i < fenceCount; i++) {
+      struct val_fence *fence = val_fence_from_handle(pFences[i]);
+
+      fence->signaled = false;
+
+      mtx_lock(&device->fence_lock);
+      if (fence->handle)
+         device->pscreen->fence_reference(device->pscreen, &fence->handle, NULL);
+      mtx_unlock(&device->fence_lock);
+   }
+   return VK_SUCCESS;
+}
+
+VkResult val_GetFenceStatus(
+   VkDevice                                    _device,
+   VkFence                                     _fence)
+{
+   VAL_FROM_HANDLE(val_device, device, _device);
+   VAL_FROM_HANDLE(val_fence, fence, _fence);
+
+   if (fence->signaled)
+      return VK_SUCCESS;
+
+   mtx_lock(&device->fence_lock);
+
+   if (!fence->handle) {
+      mtx_unlock(&device->fence_lock);
+      return VK_NOT_READY;
+   }
+
+   bool signalled = device->pscreen->fence_finish(device->pscreen,
+                                                  NULL,
+                                                  fence->handle,
+                                                  0);
+   mtx_unlock(&device->fence_lock);
+   if (signalled)
+      return VK_SUCCESS;
+   else
+      return VK_NOT_READY;
+}
+
+VkResult val_CreateFramebuffer(
+   VkDevice                                    _device,
+   const VkFramebufferCreateInfo*              pCreateInfo,
+   const VkAllocationCallbacks*                pAllocator,
+   VkFramebuffer*                              pFramebuffer)
+{
+   VAL_FROM_HANDLE(val_device, device, _device);
+   struct val_framebuffer *framebuffer;
+
+   assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
+
+   size_t size = sizeof(*framebuffer) +
+      sizeof(struct val_image_view *) * pCreateInfo->attachmentCount;
+   framebuffer = vk_alloc2(&device->alloc, pAllocator, size, 8,
+                           VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+   if (framebuffer == NULL)
+      return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
+
+   vk_object_base_init(&device->vk, &framebuffer->base,
+                       VK_OBJECT_TYPE_FRAMEBUFFER);
+   framebuffer->attachment_count = pCreateInfo->attachmentCount;
+   for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
+      VkImageView _iview = pCreateInfo->pAttachments[i];
+      framebuffer->attachments[i] = val_image_view_from_handle(_iview);
+   }
+
+   framebuffer->width = pCreateInfo->width;
+   framebuffer->height = pCreateInfo->height;
+   framebuffer->layers = pCreateInfo->layers;
+
+   *pFramebuffer = val_framebuffer_to_handle(framebuffer);
+
+   return VK_SUCCESS;
+}
+
+void val_DestroyFramebuffer(
+   VkDevice                                    _device,
+   VkFramebuffer                               _fb,
+   const VkAllocationCallbacks*                pAllocator)
+{
+   VAL_FROM_HANDLE(val_device, device, _device);
+   VAL_FROM_HANDLE(val_framebuffer, fb, _fb);
+
+   if (!fb)
+      return;
+   vk_object_base_finish(&fb->base);
+   vk_free2(&device->alloc, pAllocator, fb);
+}
+
+VkResult val_WaitForFences(
+   VkDevice                                    _device,
+   uint32_t                                    fenceCount,
+   const VkFence*                              pFences,
+   VkBool32                                    waitAll,
+   uint64_t                                    timeout)
+{
+   VAL_FROM_HANDLE(val_device, device, _device);
+
+   VkResult qret = queue_wait_idle(&device->queue, timeout);
+   bool timeout_status = false;
+   if (qret == VK_TIMEOUT)
+      return VK_TIMEOUT;
+
+   mtx_lock(&device->fence_lock);
+   for (unsigned i = 0; i < fenceCount; i++) {
+      struct val_fence *fence = val_fence_from_handle(pFences[i]);
+
+      if (fence->signaled)
+         continue;
+      if (!fence->handle) {
+         timeout_status |= true;
+         continue;
+      }
+      bool ret = device->pscreen->fence_finish(device->pscreen,
+                                               NULL,
+                                               fence->handle,
+                                               timeout);
+      if (ret && !waitAll) {
+         timeout_status = false;
+         break;
+      }
+
+      if (!ret)
+         timeout_status |= true;
+   }
+   mtx_unlock(&device->fence_lock);
+   return timeout_status ? VK_TIMEOUT : VK_SUCCESS;
+}
+
+VkResult val_CreateSemaphore(
+   VkDevice                                    _device,
+   const VkSemaphoreCreateInfo*                pCreateInfo,
+   const VkAllocationCallbacks*                pAllocator,
+   VkSemaphore*                                pSemaphore)
+{
+   VAL_FROM_HANDLE(val_device, device, _device);
+
+   struct val_semaphore *sema = vk_alloc2(&device->alloc, pAllocator,
+                                          sizeof(*sema), 8,
+                                          VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+
+   if (!sema)
+      return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
+   vk_object_base_init(&device->vk, &sema->base,
+                       VK_OBJECT_TYPE_SEMAPHORE);
+   *pSemaphore = val_semaphore_to_handle(sema);
+
+   return VK_SUCCESS;
+}
+
+void val_DestroySemaphore(
+   VkDevice                                    _device,
+   VkSemaphore                                 _semaphore,
+   const VkAllocationCallbacks*                pAllocator)
+{
+   VAL_FROM_HANDLE(val_device, device, _device);
+   VAL_FROM_HANDLE(val_semaphore, semaphore, _semaphore);
+
+   if (!_semaphore)
+      return;
+   vk_object_base_finish(&semaphore->base);
+   vk_free2(&device->alloc, pAllocator, semaphore);
+}
+
+VkResult val_CreateEvent(
+   VkDevice                                    _device,
+   const VkEventCreateInfo*                    pCreateInfo,
+   const VkAllocationCallbacks*                pAllocator,
+   VkEvent*                                    pEvent)
+{
+   VAL_FROM_HANDLE(val_device, device, _device);
+   struct val_event *event = vk_alloc2(&device->alloc, pAllocator,
+                                       sizeof(*event), 8,
+                                       VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+
+   if (!event)
+      return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
+
+   vk_object_base_init(&device->vk, &event->base, VK_OBJECT_TYPE_EVENT);
+   *pEvent = val_event_to_handle(event);
+
+   return VK_SUCCESS;
+}
+
+void val_DestroyEvent(
+   VkDevice                                    _device,
+   VkEvent                                     _event,
+   const VkAllocationCallbacks*                pAllocator)
+{
+   VAL_FROM_HANDLE(val_device, device, _device);
+   VAL_FROM_HANDLE(val_event, event, _event);
+
+   if (!event)
+      return;
+
+   vk_object_base_finish(&event->base);
+   vk_free2(&device->alloc, pAllocator, event);
+}
+
+VkResult val_GetEventStatus(
+   VkDevice                                    _device,
+   VkEvent                                     _event)
+{
+   VAL_FROM_HANDLE(val_event, event, _event);
+   if (event->event_storage == 1)
+      return VK_EVENT_SET;
+   return VK_EVENT_RESET;
+}
+
+VkResult val_SetEvent(
+   VkDevice                                    _device,
+   VkEvent                                     _event)
+{
+   VAL_FROM_HANDLE(val_event, event, _event);
+   event->event_storage = 1;
+
+   return VK_SUCCESS;
+}
+
+VkResult val_ResetEvent(
+   VkDevice                                    _device,
+   VkEvent                                     _event)
+{
+   VAL_FROM_HANDLE(val_event, event, _event);
+   event->event_storage = 0;
+
+   return VK_SUCCESS;
+}
+
+VkResult val_CreateSampler(
+   VkDevice                                    _device,
+   const VkSamplerCreateInfo*                  pCreateInfo,
+   const VkAllocationCallbacks*                pAllocator,
+   VkSampler*                                  pSampler)
+{
+   VAL_FROM_HANDLE(val_device, device, _device);
+   struct val_sampler *sampler;
+
+   assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO);
+
+   sampler = vk_alloc2(&device->alloc, pAllocator, sizeof(*sampler), 8,
+                       VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+   if (!sampler)
+      return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
+
+   vk_object_base_init(&device->vk, &sampler->base,
+                       VK_OBJECT_TYPE_SAMPLER);
+   sampler->create_info = *pCreateInfo;
+   *pSampler = val_sampler_to_handle(sampler);
+
+   return VK_SUCCESS;
+}
+
+void val_DestroySampler(
+   VkDevice                                    _device,
+   VkSampler                                   _sampler,
+   const VkAllocationCallbacks*                pAllocator)
+{
+   VAL_FROM_HANDLE(val_device, device, _device);
+   VAL_FROM_HANDLE(val_sampler, sampler, _sampler);
+
+   if (!_sampler)
+      return;
+   vk_object_base_finish(&sampler->base);
+   vk_free2(&device->alloc, pAllocator, sampler);
+}
+
+VkResult val_CreatePrivateDataSlotEXT(
+   VkDevice                                    _device,
+   const VkPrivateDataSlotCreateInfoEXT*       pCreateInfo,
+   const VkAllocationCallbacks*                pAllocator,
+   VkPrivateDataSlotEXT*                       pPrivateDataSlot)
+{
+   VAL_FROM_HANDLE(val_device, device, _device);
+   return vk_private_data_slot_create(&device->vk, pCreateInfo, pAllocator,
+                                      pPrivateDataSlot);
+}
+
+void val_DestroyPrivateDataSlotEXT(
+   VkDevice                                    _device,
+   VkPrivateDataSlotEXT                        privateDataSlot,
+   const VkAllocationCallbacks*                pAllocator)
+{
+   VAL_FROM_HANDLE(val_device, device, _device);
+   vk_private_data_slot_destroy(&device->vk, privateDataSlot, pAllocator);
+}
+
+VkResult val_SetPrivateDataEXT(
+   VkDevice                                    _device,
+   VkObjectType                                objectType,
+   uint64_t                                    objectHandle,
+   VkPrivateDataSlotEXT                        privateDataSlot,
+   uint64_t                                    data)
+{
+   VAL_FROM_HANDLE(val_device, device, _device);
+   return vk_object_base_set_private_data(&device->vk, objectType,
+                                          objectHandle, privateDataSlot,
+                                          data);
+}
+
+void val_GetPrivateDataEXT(
+   VkDevice                                    _device,
+   VkObjectType                                objectType,
+   uint64_t                                    objectHandle,
+   VkPrivateDataSlotEXT                        privateDataSlot,
+   uint64_t*                                   pData)
+{
+   VAL_FROM_HANDLE(val_device, device, _device);
+   vk_object_base_get_private_data(&device->vk, objectType, objectHandle,
+                                   privateDataSlot, pData);
+}
diff --git a/src/gallium/frontends/vallium/val_entrypoints_gen.py b/src/gallium/frontends/vallium/val_entrypoints_gen.py
new file mode 100644 (file)
index 0000000..c44ac6f
--- /dev/null
@@ -0,0 +1,816 @@
+# coding=utf-8
+#
+# Copyright © 2015, 2017 Intel Corporation
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice (including the next
+# paragraph) shall be included in all copies or substantial portions of the
+# Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+import argparse
+import functools
+import math
+import os
+import xml.etree.ElementTree as et
+
+from collections import OrderedDict, namedtuple
+from mako.template import Template
+
+from val_extensions import *
+
+# We generate a static hash table for entry point lookup
+# (vkGetProcAddress). We use a linear congruential generator for our hash
+# function and a power-of-two size table. The prime numbers are determined
+# experimentally.
+
+# We currently don't use layers in val, but keeping the ability for anv
+# anyways, so we can use it for device groups.
+LAYERS = [
+    'val'
+]
+
+TEMPLATE_H = Template("""\
+/* This file generated from ${filename}, don't edit directly. */
+
+struct val_instance_dispatch_table {
+   union {
+      void *entrypoints[${len(instance_entrypoints)}];
+      struct {
+      % for e in instance_entrypoints:
+        % if e.guard is not None:
+#ifdef ${e.guard}
+          PFN_${e.name} ${e.name};
+#else
+          void *${e.name};
+# endif
+        % else:
+          PFN_${e.name} ${e.name};
+        % endif
+      % endfor
+      };
+   };
+};
+
+struct val_physical_device_dispatch_table {
+   union {
+      void *entrypoints[${len(physical_device_entrypoints)}];
+      struct {
+      % for e in physical_device_entrypoints:
+        % if e.guard is not None:
+#ifdef ${e.guard}
+          PFN_${e.name} ${e.name};
+#else
+          void *${e.name};
+# endif
+        % else:
+          PFN_${e.name} ${e.name};
+        % endif
+      % endfor
+      };
+   };
+};
+
+struct val_device_dispatch_table {
+   union {
+      void *entrypoints[${len(device_entrypoints)}];
+      struct {
+      % for e in device_entrypoints:
+        % if e.guard is not None:
+#ifdef ${e.guard}
+          PFN_${e.name} ${e.name};
+#else
+          void *${e.name};
+# endif
+        % else:
+          PFN_${e.name} ${e.name};
+        % endif
+      % endfor
+      };
+   };
+};
+
+extern const struct val_instance_dispatch_table val_instance_dispatch_table;
+%for layer in LAYERS:
+extern const struct val_physical_device_dispatch_table ${layer}_physical_device_dispatch_table;
+%endfor
+%for layer in LAYERS:
+extern const struct val_device_dispatch_table ${layer}_device_dispatch_table;
+%endfor
+
+% for e in instance_entrypoints:
+  % if e.alias and e.alias.enabled:
+    <% continue %>
+  % endif
+  % if e.guard is not None:
+#ifdef ${e.guard}
+  % endif
+  ${e.return_type} ${e.prefixed_name('val')}(${e.decl_params()});
+  % if e.guard is not None:
+#endif // ${e.guard}
+  % endif
+% endfor
+
+% for e in physical_device_entrypoints:
+  % if e.alias:
+    <% continue %>
+  % endif
+  % if e.guard is not None:
+#ifdef ${e.guard}
+  % endif
+  % for layer in LAYERS:
+  ${e.return_type} ${e.prefixed_name(layer)}(${e.decl_params()});
+  % endfor
+  % if e.guard is not None:
+#endif // ${e.guard}
+  % endif
+% endfor
+
+% for e in device_entrypoints:
+  % if e.alias and e.alias.enabled:
+    <% continue %>
+  % endif
+  % if e.guard is not None:
+#ifdef ${e.guard}
+  % endif
+  % for layer in LAYERS:
+  ${e.return_type} ${e.prefixed_name(layer)}(${e.decl_params()});
+  % endfor
+  % if e.guard is not None:
+#endif // ${e.guard}
+  % endif
+% endfor
+""", output_encoding='utf-8')
+
+TEMPLATE_C = Template(u"""\
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+/* This file generated from ${filename}, don't edit directly. */
+
+#include "val_private.h"
+
+#include "util/macros.h"
+struct string_map_entry {
+   uint32_t name;
+   uint32_t hash;
+   uint32_t num;
+};
+
+/* We use a big string constant to avoid lots of relocations from the entry
+ * point table to lots of little strings. The entries in the entry point table
+ * store the index into this big string.
+ */
+
+<%def name="strmap(strmap, prefix)">
+static const char ${prefix}_strings[] =
+% for s in strmap.sorted_strings:
+    "${s.string}\\0"
+% endfor
+;
+
+static const struct string_map_entry ${prefix}_string_map_entries[] = {
+% for s in strmap.sorted_strings:
+    { ${s.offset}, ${'{:0=#8x}'.format(s.hash)}, ${s.num} }, /* ${s.string} */
+% endfor
+};
+
+/* Hash table stats:
+ * size ${len(strmap.sorted_strings)} entries
+ * collisions entries:
+% for i in range(10):
+ *     ${i}${'+' if i == 9 else ' '}     ${strmap.collisions[i]}
+% endfor
+ */
+
+#define none 0xffff
+static const uint16_t ${prefix}_string_map[${strmap.hash_size}] = {
+% for e in strmap.mapping:
+    ${ '{:0=#6x}'.format(e) if e >= 0 else 'none' },
+% endfor
+};
+
+static int
+${prefix}_string_map_lookup(const char *str)
+{
+    static const uint32_t prime_factor = ${strmap.prime_factor};
+    static const uint32_t prime_step = ${strmap.prime_step};
+    const struct string_map_entry *e;
+    uint32_t hash, h;
+    uint16_t i;
+    const char *p;
+
+    hash = 0;
+    for (p = str; *p; p++)
+        hash = hash * prime_factor + *p;
+
+    h = hash;
+    while (1) {
+        i = ${prefix}_string_map[h & ${strmap.hash_mask}];
+        if (i == none)
+           return -1;
+        e = &${prefix}_string_map_entries[i];
+        if (e->hash == hash && strcmp(str, ${prefix}_strings + e->name) == 0)
+            return e->num;
+        h += prime_step;
+    }
+
+    return -1;
+}
+
+static const char *
+${prefix}_entry_name(int num)
+{
+   for (int i = 0; i < ARRAY_SIZE(${prefix}_string_map_entries); i++) {
+      if (${prefix}_string_map_entries[i].num == num)
+         return &${prefix}_strings[${prefix}_string_map_entries[i].name];
+   }
+   return NULL;
+}
+</%def>
+
+${strmap(instance_strmap, 'instance')}
+${strmap(physical_device_strmap, 'physical_device')}
+${strmap(device_strmap, 'device')}
+
+/* Weak aliases for all potential implementations. These will resolve to
+ * NULL if they're not defined, which lets the resolve_entrypoint() function
+ * either pick the correct entry point.
+ */
+
+% for e in instance_entrypoints:
+  % if e.alias and e.alias.enabled:
+    <% continue %>
+  % endif
+  % if e.guard is not None:
+#ifdef ${e.guard}
+  % endif
+  ${e.return_type} ${e.prefixed_name('val')}(${e.decl_params()}) __attribute__ ((weak));
+  % if e.guard is not None:
+#endif // ${e.guard}
+  % endif
+% endfor
+
+const struct val_instance_dispatch_table val_instance_dispatch_table = {
+% for e in instance_entrypoints:
+  % if e.guard is not None:
+#ifdef ${e.guard}
+  % endif
+  .${e.name} = ${e.prefixed_name('val')},
+  % if e.guard is not None:
+#endif // ${e.guard}
+  % endif
+% endfor
+};
+
+% for e in physical_device_entrypoints:
+  % if e.alias and e.alias.enabled:
+    <% continue %>
+  % endif
+  % if e.guard is not None:
+#ifdef ${e.guard}
+  % endif
+  ${e.return_type} ${e.prefixed_name('val')}(${e.decl_params()}) __attribute__ ((weak));
+  % if e.guard is not None:
+#endif // ${e.guard}
+  % endif
+% endfor
+
+const struct val_physical_device_dispatch_table val_physical_device_dispatch_table = {
+% for e in physical_device_entrypoints:
+  % if e.guard is not None:
+#ifdef ${e.guard}
+  % endif
+  .${e.name} = ${e.prefixed_name('val')},
+  % if e.guard is not None:
+#endif // ${e.guard}
+  % endif
+% endfor
+};
+
+
+% for layer in LAYERS:
+  % for e in device_entrypoints:
+    % if e.alias and e.alias.enabled:
+      <% continue %>
+    % endif
+    % if e.guard is not None:
+#ifdef ${e.guard}
+    % endif
+    % if layer == 'val':
+      ${e.return_type} __attribute__ ((weak))
+      ${e.prefixed_name('val')}(${e.decl_params()})
+      {
+        % if e.params[0].type == 'VkDevice':
+          VAL_FROM_HANDLE(val_device, val_device, ${e.params[0].name});
+          return val_device->dispatch.${e.name}(${e.call_params()});
+        % elif e.params[0].type == 'VkCommandBuffer':
+          VAL_FROM_HANDLE(val_cmd_buffer, val_cmd_buffer, ${e.params[0].name});
+          return val_cmd_buffer->device->dispatch.${e.name}(${e.call_params()});
+        % elif e.params[0].type == 'VkQueue':
+          VAL_FROM_HANDLE(val_queue, val_queue, ${e.params[0].name});
+          return val_queue->device->dispatch.${e.name}(${e.call_params()});
+        % else:
+          assert(!"Unhandled device child trampoline case: ${e.params[0].type}");
+        % endif
+      }
+    % else:
+      ${e.return_type} ${e.prefixed_name(layer)}(${e.decl_params()}) __attribute__ ((weak));
+    % endif
+    % if e.guard is not None:
+#endif // ${e.guard}
+    % endif
+  % endfor
+
+  const struct val_device_dispatch_table ${layer}_device_dispatch_table = {
+  % for e in device_entrypoints:
+    % if e.guard is not None:
+#ifdef ${e.guard}
+    % endif
+    .${e.name} = ${e.prefixed_name(layer)},
+    % if e.guard is not None:
+#endif // ${e.guard}
+    % endif
+  % endfor
+  };
+% endfor
+
+
+/** Return true if the core version or extension in which the given entrypoint
+ * is defined is enabled.
+ *
+ * If device is NULL, all device extensions are considered enabled.
+ */
+bool
+val_instance_entrypoint_is_enabled(int index, uint32_t core_version,
+                                   const struct val_instance_extension_table *instance)
+{
+   switch (index) {
+% for e in instance_entrypoints:
+   case ${e.num}:
+      /* ${e.name} */
+   % if e.core_version:
+      return ${e.core_version.c_vk_version()} <= core_version;
+   % elif e.extensions:
+     % for ext in e.extensions:
+        % if ext.type == 'instance':
+      if (instance->${ext.name[3:]}) return true;
+        % else:
+      /* All device extensions are considered enabled at the instance level */
+      return true;
+        % endif
+     % endfor
+      return false;
+   % else:
+      return true;
+   % endif
+% endfor
+   default:
+      return false;
+   }
+}
+
+/** Return true if the core version or extension in which the given entrypoint
+ * is defined is enabled.
+ *
+ * If device is NULL, all device extensions are considered enabled.
+ */
+bool
+val_physical_device_entrypoint_is_enabled(int index, uint32_t core_version,
+                                          const struct val_instance_extension_table *instance)
+{
+   switch (index) {
+% for e in physical_device_entrypoints:
+   case ${e.num}:
+      /* ${e.name} */
+   % if e.core_version:
+      return ${e.core_version.c_vk_version()} <= core_version;
+   % elif e.extensions:
+     % for ext in e.extensions:
+        % if ext.type == 'instance':
+      if (instance->${ext.name[3:]}) return true;
+        % else:
+      /* All device extensions are considered enabled at the instance level */
+      return true;
+        % endif
+     % endfor
+      return false;
+   % else:
+      return true;
+   % endif
+% endfor
+   default:
+      return false;
+   }
+}
+
+/** Return true if the core version or extension in which the given entrypoint
+ * is defined is enabled.
+ *
+ * If device is NULL, all device extensions are considered enabled.
+ */
+bool
+val_device_entrypoint_is_enabled(int index, uint32_t core_version,
+                                 const struct val_instance_extension_table *instance,
+                                 const struct val_device_extension_table *device)
+{
+   switch (index) {
+% for e in device_entrypoints:
+   case ${e.num}:
+      /* ${e.name} */
+   % if e.core_version:
+      return ${e.core_version.c_vk_version()} <= core_version;
+   % elif e.extensions:
+     % for ext in e.extensions:
+        % if ext.type == 'instance':
+           <% assert False %>
+        % else:
+      if (!device || device->${ext.name[3:]}) return true;
+        % endif
+     % endfor
+      return false;
+   % else:
+      return true;
+   % endif
+% endfor
+   default:
+      return false;
+   }
+}
+
+int
+val_get_instance_entrypoint_index(const char *name)
+{
+   return instance_string_map_lookup(name);
+}
+
+int
+val_get_physical_device_entrypoint_index(const char *name)
+{
+   return physical_device_string_map_lookup(name);
+}
+
+int
+val_get_device_entrypoint_index(const char *name)
+{
+   return device_string_map_lookup(name);
+}
+
+const char *
+val_get_instance_entry_name(int index)
+{
+   return instance_entry_name(index);
+}
+
+const char *
+val_get_physical_device_entry_name(int index)
+{
+   return physical_device_entry_name(index);
+}
+
+const char *
+val_get_device_entry_name(int index)
+{
+   return device_entry_name(index);
+}
+
+static void * __attribute__ ((noinline))
+val_resolve_device_entrypoint(uint32_t index)
+{
+    return val_device_dispatch_table.entrypoints[index];
+}
+
+void *
+val_lookup_entrypoint(const char *name)
+{
+   int idx = val_get_instance_entrypoint_index(name);
+   if (idx >= 0)
+      return val_instance_dispatch_table.entrypoints[idx];
+
+   idx = val_get_physical_device_entrypoint_index(name);
+   if (idx >= 0)
+      return val_physical_device_dispatch_table.entrypoints[idx];
+
+   idx = val_get_device_entrypoint_index(name);
+   if (idx >= 0)
+      return val_resolve_device_entrypoint(idx);
+
+   return NULL;
+}""", output_encoding='utf-8')
+
+U32_MASK = 2**32 - 1
+
+PRIME_FACTOR = 5024183
+PRIME_STEP = 19
+
+def round_to_pow2(x):
+    return 2**int(math.ceil(math.log(x, 2)))
+
+class StringIntMapEntry(object):
+    def __init__(self, string, num):
+        self.string = string
+        self.num = num
+
+        # Calculate the same hash value that we will calculate in C.
+        h = 0
+        for c in string:
+            h = ((h * PRIME_FACTOR) + ord(c)) & U32_MASK
+        self.hash = h
+
+        self.offset = None
+
+class StringIntMap(object):
+    def __init__(self):
+        self.baked = False
+        self.strings = dict()
+
+    def add_string(self, string, num):
+        assert not self.baked
+        assert string not in self.strings
+        assert num >= 0 and num < 2**31
+        self.strings[string] = StringIntMapEntry(string, num)
+
+    def bake(self):
+        self.sorted_strings = \
+            sorted(self.strings.values(), key=lambda x: x.string)
+        offset = 0
+        for entry in self.sorted_strings:
+            entry.offset = offset
+            offset += len(entry.string) + 1
+
+        # Save off some values that we'll need in C
+        self.hash_size = round_to_pow2(len(self.strings) * 1.25)
+        self.hash_mask = self.hash_size - 1
+        self.prime_factor = PRIME_FACTOR
+        self.prime_step = PRIME_STEP
+
+        self.mapping = [-1] * self.hash_size
+        self.collisions = [0] * 10
+        for idx, s in enumerate(self.sorted_strings):
+            level = 0
+            h = s.hash
+            while self.mapping[h & self.hash_mask] >= 0:
+                h = h + PRIME_STEP
+                level = level + 1
+            self.collisions[min(level, 9)] += 1
+            self.mapping[h & self.hash_mask] = idx
+
+EntrypointParam = namedtuple('EntrypointParam', 'type name decl')
+
+class EntrypointBase(object):
+    def __init__(self, name):
+        self.name = name
+        self.alias = None
+        self.guard = None
+        self.enabled = False
+        self.num = None
+        # Extensions which require this entrypoint
+        self.core_version = None
+        self.extensions = []
+
+    def prefixed_name(self, prefix):
+        assert self.name.startswith('vk')
+        return prefix + '_' + self.name[2:]
+
+class Entrypoint(EntrypointBase):
+    def __init__(self, name, return_type, params, guard = None):
+        super(Entrypoint, self).__init__(name)
+        self.return_type = return_type
+        self.params = params
+        self.guard = guard
+
+    def is_physical_device_entrypoint(self):
+        return self.params[0].type in ('VkPhysicalDevice', )
+
+    def is_device_entrypoint(self):
+        return self.params[0].type in ('VkDevice', 'VkCommandBuffer', 'VkQueue')
+
+    def decl_params(self):
+        return ', '.join(p.decl for p in self.params)
+
+    def call_params(self):
+        return ', '.join(p.name for p in self.params)
+
+class EntrypointAlias(EntrypointBase):
+    def __init__(self, name, entrypoint):
+        super(EntrypointAlias, self).__init__(name)
+        self.alias = entrypoint
+
+    def is_physical_device_entrypoint(self):
+        return self.alias.is_physical_device_entrypoint()
+
+    def is_device_entrypoint(self):
+        return self.alias.is_device_entrypoint()
+
+    def prefixed_name(self, prefix):
+        if self.alias.enabled:
+            return self.alias.prefixed_name(prefix)
+        return super(EntrypointAlias, self).prefixed_name(prefix)
+
+    @property
+    def params(self):
+        return self.alias.params
+
+    @property
+    def return_type(self):
+        return self.alias.return_type
+
+    def decl_params(self):
+        return self.alias.decl_params()
+
+    def call_params(self):
+        return self.alias.call_params()
+
+def get_entrypoints(doc, entrypoints_to_defines):
+    """Extract the entry points from the registry."""
+    entrypoints = OrderedDict()
+
+    for command in doc.findall('./commands/command'):
+       if 'alias' in command.attrib:
+           alias = command.attrib['name']
+           target = command.attrib['alias']
+           entrypoints[alias] = EntrypointAlias(alias, entrypoints[target])
+       else:
+           name = command.find('./proto/name').text
+           ret_type = command.find('./proto/type').text
+           params = [EntrypointParam(
+               type = p.find('./type').text,
+               name = p.find('./name').text,
+               decl = ''.join(p.itertext())
+           ) for p in command.findall('./param')]
+           guard = entrypoints_to_defines.get(name)
+           # They really need to be unique
+           assert name not in entrypoints
+           entrypoints[name] = Entrypoint(name, ret_type, params, guard)
+
+    for feature in doc.findall('./feature'):
+        assert feature.attrib['api'] == 'vulkan'
+        version = VkVersion(feature.attrib['number'])
+        if version > MAX_API_VERSION:
+            continue
+
+        for command in feature.findall('./require/command'):
+            e = entrypoints[command.attrib['name']]
+            e.enabled = True
+            assert e.core_version is None
+            e.core_version = version
+
+    supported_exts = dict((ext.name, ext) for ext in EXTENSIONS)
+    for extension in doc.findall('.extensions/extension'):
+        ext_name = extension.attrib['name']
+        if ext_name not in supported_exts:
+            continue
+
+        ext = supported_exts[ext_name]
+        ext.type = extension.attrib['type']
+
+        for command in extension.findall('./require/command'):
+            e = entrypoints[command.attrib['name']]
+            e.enabled = True
+            assert e.core_version is None
+            e.extensions.append(ext)
+
+    # if the base command is not supported by the driver yet, don't alias aliases
+    for e in entrypoints.values():
+        if e.alias and not e.alias.enabled:
+            e_clone = copy.deepcopy(e.alias)
+            e_clone.enabled = True
+            e_clone.name = e.name
+            entrypoints[e.name] = e_clone
+
+    return [e for e in entrypoints.values() if e.enabled]
+
+
+def get_entrypoints_defines(doc):
+    """Maps entry points to extension defines."""
+    entrypoints_to_defines = {}
+
+    platform_define = {}
+    for platform in doc.findall('./platforms/platform'):
+        name = platform.attrib['name']
+        define = platform.attrib['protect']
+        platform_define[name] = define
+
+    for extension in doc.findall('./extensions/extension[@platform]'):
+        platform = extension.attrib['platform']
+        define = platform_define[platform]
+
+        for entrypoint in extension.findall('./require/command'):
+            fullname = entrypoint.attrib['name']
+            entrypoints_to_defines[fullname] = define
+
+    return entrypoints_to_defines
+
+def main():
+    parser = argparse.ArgumentParser()
+    parser.add_argument('--outdir', help='Where to write the files.',
+                        required=True)
+    parser.add_argument('--xml',
+                        help='Vulkan API XML file.',
+                        required=True,
+                        action='append',
+                        dest='xml_files')
+    args = parser.parse_args()
+
+    entrypoints = []
+
+    for filename in args.xml_files:
+        doc = et.parse(filename)
+        entrypoints += get_entrypoints(doc, get_entrypoints_defines(doc))
+
+    device_entrypoints = []
+    physical_device_entrypoints = []
+    instance_entrypoints = []
+    for e in entrypoints:
+        if e.is_device_entrypoint():
+            device_entrypoints.append(e)
+        elif e.is_physical_device_entrypoint():
+            physical_device_entrypoints.append(e)
+        else:
+            instance_entrypoints.append(e)
+
+    device_strmap = StringIntMap()
+    for num, e in enumerate(device_entrypoints):
+        device_strmap.add_string(e.name, num)
+        e.num = num
+    device_strmap.bake()
+
+    physical_device_strmap = StringIntMap()
+    for num, e in enumerate(physical_device_entrypoints):
+        physical_device_strmap.add_string(e.name, num)
+        e.num = num
+    physical_device_strmap.bake()
+
+    instance_strmap = StringIntMap()
+    for num, e in enumerate(instance_entrypoints):
+        instance_strmap.add_string(e.name, num)
+        e.num = num
+    instance_strmap.bake()
+
+    # For outputting entrypoints.h we generate a val_EntryPoint() prototype
+    # per entry point.
+    try:
+        with open(os.path.join(args.outdir, 'val_entrypoints.h'), 'wb') as f:
+            f.write(TEMPLATE_H.render(instance_entrypoints=instance_entrypoints,
+                                      physical_device_entrypoints=physical_device_entrypoints,
+                                      device_entrypoints=device_entrypoints,
+                                      LAYERS=LAYERS,
+                                      filename=os.path.basename(__file__)))
+        with open(os.path.join(args.outdir, 'val_entrypoints.c'), 'wb') as f:
+            f.write(TEMPLATE_C.render(instance_entrypoints=instance_entrypoints,
+                                      physical_device_entrypoints=physical_device_entrypoints,
+                                      device_entrypoints=device_entrypoints,
+                                      LAYERS=LAYERS,
+                                      instance_strmap=instance_strmap,
+                                      physical_device_strmap=physical_device_strmap,
+                                      device_strmap=device_strmap,
+                                      filename=os.path.basename(__file__)))
+    except Exception:
+        # In the event there's an error, this imports some helpers from mako
+        # to print a useful stack trace and prints it, then exits with
+        # status 1, if python is run with debug; otherwise it just raises
+        # the exception
+        if __debug__:
+            import sys
+            from mako import exceptions
+            sys.stderr.write(exceptions.text_error_template().render() + '\n')
+            sys.exit(1)
+        raise
+
+
+if __name__ == '__main__':
+    main()
diff --git a/src/gallium/frontends/vallium/val_execute.c b/src/gallium/frontends/vallium/val_execute.c
new file mode 100644 (file)
index 0000000..16699bf
--- /dev/null
@@ -0,0 +1,2471 @@
+/*
+ * Copyright © 2019 Red Hat.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+/* use a gallium context to execute a command buffer */
+
+#include "val_private.h"
+
+#include "pipe/p_context.h"
+#include "pipe/p_state.h"
+#include "val_conv.h"
+
+#include "pipe/p_shader_tokens.h"
+#include "tgsi/tgsi_text.h"
+#include "tgsi/tgsi_parse.h"
+
+#include "util/format/u_format.h"
+#include "util/u_surface.h"
+#include "util/u_sampler.h"
+#include "util/u_box.h"
+#include "util/u_inlines.h"
+#include "util/format/u_format_zs.h"
+
+struct rendering_state {
+   struct pipe_context *pctx;
+
+   bool blend_dirty;
+   bool rs_dirty;
+   bool dsa_dirty;
+   bool stencil_ref_dirty;
+   bool clip_state_dirty;
+   bool blend_color_dirty;
+   bool ve_dirty;
+   bool vb_dirty;
+   bool constbuf_dirty[PIPE_SHADER_TYPES];
+   bool pcbuf_dirty[PIPE_SHADER_TYPES];
+   bool vp_dirty;
+   bool scissor_dirty;
+   bool ib_dirty;
+   bool sample_mask_dirty;
+   bool min_samples_dirty;
+   struct pipe_draw_indirect_info indirect_info;
+   struct pipe_draw_info info;
+
+   struct pipe_grid_info dispatch_info;
+   struct pipe_framebuffer_state framebuffer;
+
+   struct pipe_blend_state blend_state;
+   void *blend_handle;
+   struct pipe_rasterizer_state rs_state;
+   void *rast_handle;
+   struct pipe_depth_stencil_alpha_state dsa_state;
+   void *dsa_handle;
+
+   struct pipe_blend_color blend_color;
+   struct pipe_stencil_ref stencil_ref;
+   struct pipe_clip_state clip_state;
+
+   int num_scissors;
+   struct pipe_scissor_state scissors[16];
+
+   int num_viewports;
+   struct pipe_viewport_state viewports[16];
+
+   ubyte index_size;
+   unsigned index_offset;
+   struct pipe_resource *index_buffer;
+   struct pipe_constant_buffer pc_buffer[PIPE_SHADER_TYPES];
+   struct pipe_constant_buffer const_buffer[PIPE_SHADER_TYPES][16];
+   int num_const_bufs[PIPE_SHADER_TYPES];
+   int num_vb;
+   unsigned start_vb;
+   struct pipe_vertex_buffer vb[PIPE_MAX_ATTRIBS];
+   int num_ve;
+   struct pipe_vertex_element ve[PIPE_MAX_ATTRIBS];
+
+   struct pipe_sampler_view *sv[PIPE_SHADER_TYPES][PIPE_MAX_SAMPLERS];
+   int num_sampler_views[PIPE_SHADER_TYPES];
+   struct pipe_sampler_state ss[PIPE_SHADER_TYPES][PIPE_MAX_SAMPLERS];
+   int num_sampler_states[PIPE_SHADER_TYPES];
+   bool sv_dirty[PIPE_SHADER_TYPES];
+   bool ss_dirty[PIPE_SHADER_TYPES];
+
+   struct pipe_image_view iv[PIPE_SHADER_TYPES][PIPE_MAX_SHADER_IMAGES];
+   int num_shader_images[PIPE_SHADER_TYPES];
+   struct pipe_shader_buffer sb[PIPE_SHADER_TYPES][PIPE_MAX_SHADER_BUFFERS];
+   int num_shader_buffers[PIPE_SHADER_TYPES];
+   bool iv_dirty[PIPE_SHADER_TYPES];
+   bool sb_dirty[PIPE_SHADER_TYPES];
+   void *ss_cso[PIPE_SHADER_TYPES][PIPE_MAX_SAMPLERS];
+   void *velems_cso;
+
+   uint8_t push_constants[128 * 4];
+
+   struct val_render_pass *pass;
+   uint32_t subpass;
+   struct val_framebuffer *vk_framebuffer;
+   VkRect2D render_area;
+
+   uint32_t sample_mask;
+   unsigned min_samples;
+
+   struct val_attachment_state *attachments;
+};
+
+static void emit_compute_state(struct rendering_state *state)
+{
+   if (state->iv_dirty[PIPE_SHADER_COMPUTE]) {
+      state->pctx->set_shader_images(state->pctx, PIPE_SHADER_COMPUTE,
+                                     0, state->num_shader_images[PIPE_SHADER_COMPUTE],
+                                     state->iv[PIPE_SHADER_COMPUTE]);
+      state->iv_dirty[PIPE_SHADER_COMPUTE] = false;
+   }
+
+   if (state->pcbuf_dirty[PIPE_SHADER_COMPUTE]) {
+      state->pctx->set_constant_buffer(state->pctx, PIPE_SHADER_COMPUTE,
+                                       0, &state->pc_buffer[PIPE_SHADER_COMPUTE]);
+      state->pcbuf_dirty[PIPE_SHADER_COMPUTE] = false;
+   }
+
+   if (state->constbuf_dirty[PIPE_SHADER_COMPUTE]) {
+      for (unsigned i = 0; i < state->num_const_bufs[PIPE_SHADER_COMPUTE]; i++)
+         state->pctx->set_constant_buffer(state->pctx, PIPE_SHADER_COMPUTE,
+                                          i + 1, &state->const_buffer[PIPE_SHADER_COMPUTE][i]);
+      state->constbuf_dirty[PIPE_SHADER_COMPUTE] = false;
+   }
+
+   if (state->sb_dirty[PIPE_SHADER_COMPUTE]) {
+      state->pctx->set_shader_buffers(state->pctx, PIPE_SHADER_COMPUTE,
+                                      0, state->num_shader_buffers[PIPE_SHADER_COMPUTE],
+                                      state->sb[PIPE_SHADER_COMPUTE], 0);
+      state->sb_dirty[PIPE_SHADER_COMPUTE] = false;
+   }
+
+   if (state->sv_dirty[PIPE_SHADER_COMPUTE]) {
+      state->pctx->set_sampler_views(state->pctx, PIPE_SHADER_COMPUTE, 0, state->num_sampler_views[PIPE_SHADER_COMPUTE],
+                                     state->sv[PIPE_SHADER_COMPUTE]);
+      state->sv_dirty[PIPE_SHADER_COMPUTE] = false;
+   }
+
+   if (state->ss_dirty[PIPE_SHADER_COMPUTE]) {
+      for (unsigned i = 0; i < state->num_sampler_states[PIPE_SHADER_COMPUTE]; i++) {
+         if (state->ss_cso[PIPE_SHADER_COMPUTE][i])
+            state->pctx->delete_sampler_state(state->pctx, state->ss_cso[PIPE_SHADER_COMPUTE][i]);
+         state->ss_cso[PIPE_SHADER_COMPUTE][i] = state->pctx->create_sampler_state(state->pctx, &state->ss[PIPE_SHADER_COMPUTE][i]);
+      }
+      state->pctx->bind_sampler_states(state->pctx, PIPE_SHADER_COMPUTE, 0, state->num_sampler_states[PIPE_SHADER_COMPUTE], state->ss_cso[PIPE_SHADER_COMPUTE]);
+      state->ss_dirty[PIPE_SHADER_COMPUTE] = false;
+   }
+}
+
+static void emit_state(struct rendering_state *state)
+{
+   int sh;
+   if (state->blend_dirty) {
+      if (state->blend_handle) {
+         state->pctx->bind_blend_state(state->pctx, NULL);
+         state->pctx->delete_blend_state(state->pctx, state->blend_handle);
+      }
+      state->blend_handle = state->pctx->create_blend_state(state->pctx,
+                                                            &state->blend_state);
+      state->pctx->bind_blend_state(state->pctx, state->blend_handle);
+
+      state->blend_dirty = false;
+   }
+
+   if (state->rs_dirty) {
+      if (state->rast_handle) {
+         state->pctx->bind_rasterizer_state(state->pctx, NULL);
+         state->pctx->delete_rasterizer_state(state->pctx, state->rast_handle);
+      }
+      state->rast_handle = state->pctx->create_rasterizer_state(state->pctx,
+                                                                &state->rs_state);
+      state->pctx->bind_rasterizer_state(state->pctx, state->rast_handle);
+      state->rs_dirty = false;
+   }
+
+   if (state->dsa_dirty) {
+      if (state->dsa_handle) {
+         state->pctx->bind_depth_stencil_alpha_state(state->pctx, NULL);
+         state->pctx->delete_depth_stencil_alpha_state(state->pctx, state->dsa_handle);
+      }
+      state->dsa_handle = state->pctx->create_depth_stencil_alpha_state(state->pctx,
+                                                                        &state->dsa_state);
+      state->pctx->bind_depth_stencil_alpha_state(state->pctx, state->dsa_handle);
+
+      state->dsa_dirty = false;
+   }
+
+   if (state->sample_mask_dirty) {
+      state->pctx->set_sample_mask(state->pctx, state->sample_mask);
+      state->sample_mask_dirty = false;
+   }
+
+   if (state->min_samples_dirty) {
+      state->pctx->set_min_samples(state->pctx, state->min_samples);
+      state->min_samples_dirty = false;
+   }
+
+   if (state->blend_color_dirty) {
+      state->pctx->set_blend_color(state->pctx, &state->blend_color);
+      state->blend_color_dirty = false;
+   }
+
+   if (state->stencil_ref_dirty) {
+      state->pctx->set_stencil_ref(state->pctx, &state->stencil_ref);
+      state->stencil_ref_dirty = false;
+   }
+
+   if (state->vb_dirty) {
+      state->pctx->set_vertex_buffers(state->pctx, state->start_vb,
+                                      state->num_vb, state->vb);
+      state->vb_dirty = false;
+   }
+
+   if (state->ve_dirty) {
+      void *ve = NULL;
+      if (state->velems_cso)
+         ve = state->velems_cso;
+
+      state->velems_cso = state->pctx->create_vertex_elements_state(state->pctx, state->num_ve,
+                                                                    state->ve);
+      state->pctx->bind_vertex_elements_state(state->pctx, state->velems_cso);
+
+      if (ve)
+         state->pctx->delete_vertex_elements_state(state->pctx, ve);
+   }
+
+   for (sh = 0; sh < PIPE_SHADER_TYPES; sh++) {
+      if (state->constbuf_dirty[sh]) {
+         for (unsigned idx = 0; idx < state->num_const_bufs[sh]; idx++)
+            state->pctx->set_constant_buffer(state->pctx, sh,
+                                             idx + 1, &state->const_buffer[sh][idx]);
+      }
+      state->constbuf_dirty[sh] = false;
+   }
+
+   for (sh = 0; sh < PIPE_SHADER_TYPES; sh++) {
+      if (state->pcbuf_dirty[sh]) {
+         state->pctx->set_constant_buffer(state->pctx, sh,
+                                          0, &state->pc_buffer[sh]);
+      }
+   }
+
+   for (sh = 0; sh < PIPE_SHADER_TYPES; sh++) {
+      if (state->sb_dirty[sh]) {
+         state->pctx->set_shader_buffers(state->pctx, sh,
+                                         0, state->num_shader_buffers[sh],
+                                         state->sb[sh], 0);
+      }
+   }
+
+   for (sh = 0; sh < PIPE_SHADER_TYPES; sh++) {
+      if (state->iv_dirty[sh]) {
+         state->pctx->set_shader_images(state->pctx, sh,
+                                        0, state->num_shader_images[sh],
+                                        state->iv[sh]);
+      }
+   }
+
+   for (sh = 0; sh < PIPE_SHADER_TYPES; sh++) {
+
+      if (!state->sv_dirty[sh])
+         continue;
+
+      state->pctx->set_sampler_views(state->pctx, sh, 0, state->num_sampler_views[sh],
+                                     state->sv[sh]);
+      state->sv_dirty[sh] = false;
+   }
+
+   for (sh = 0; sh < PIPE_SHADER_TYPES; sh++) {
+      int i;
+      if (!state->ss_dirty[sh])
+         continue;
+
+      for (i = 0; i < state->num_sampler_states[sh]; i++) {
+         if (state->ss_cso[sh][i])
+            state->pctx->delete_sampler_state(state->pctx, state->ss_cso[sh][i]);
+         state->ss_cso[sh][i] = state->pctx->create_sampler_state(state->pctx, &state->ss[sh][i]);
+      }
+
+      state->pctx->bind_sampler_states(state->pctx, sh, 0, state->num_sampler_states[sh], state->ss_cso[sh]);
+   }
+
+   if (state->vp_dirty) {
+      state->pctx->set_viewport_states(state->pctx, 0, state->num_viewports, state->viewports);
+      state->vp_dirty = false;
+   }
+
+   if (state->scissor_dirty) {
+      state->pctx->set_scissor_states(state->pctx, 0, state->num_scissors, state->scissors);
+      state->scissor_dirty = false;
+   }
+}
+
+static void handle_compute_pipeline(struct val_cmd_buffer_entry *cmd,
+                                    struct rendering_state *state)
+{
+   struct val_pipeline *pipeline = cmd->u.pipeline.pipeline;
+
+   state->dispatch_info.block[0] = pipeline->pipeline_nir[MESA_SHADER_COMPUTE]->info.cs.local_size[0];
+   state->dispatch_info.block[1] = pipeline->pipeline_nir[MESA_SHADER_COMPUTE]->info.cs.local_size[1];
+   state->dispatch_info.block[2] = pipeline->pipeline_nir[MESA_SHADER_COMPUTE]->info.cs.local_size[2];
+   state->pctx->bind_compute_state(state->pctx, pipeline->shader_cso[PIPE_SHADER_COMPUTE]);
+}
+
+static void
+get_viewport_xform(const VkViewport *viewport,
+                   float scale[3], float translate[3])
+{
+   float x = viewport->x;
+   float y = viewport->y;
+   float half_width = 0.5f * viewport->width;
+   float half_height = 0.5f * viewport->height;
+   double n = viewport->minDepth;
+   double f = viewport->maxDepth;
+
+   scale[0] = half_width;
+   translate[0] = half_width + x;
+   scale[1] = half_height;
+   translate[1] = half_height + y;
+
+   scale[2] = (f - n);
+   translate[2] = n;
+}
+
+static void handle_graphics_pipeline(struct val_cmd_buffer_entry *cmd,
+                                     struct rendering_state *state)
+{
+   struct val_pipeline *pipeline = cmd->u.pipeline.pipeline;
+   bool dynamic_states[VK_DYNAMIC_STATE_STENCIL_REFERENCE+1];
+   unsigned fb_samples = 0;
+
+   memset(dynamic_states, 0, sizeof(dynamic_states));
+   if (pipeline->graphics_create_info.pDynamicState)
+   {
+      const VkPipelineDynamicStateCreateInfo *dyn = pipeline->graphics_create_info.pDynamicState;
+      int i;
+      for (i = 0; i < dyn->dynamicStateCount; i++) {
+         if (dyn->pDynamicStates[i] > VK_DYNAMIC_STATE_STENCIL_REFERENCE)
+            continue;
+         dynamic_states[dyn->pDynamicStates[i]] = true;
+      }
+   }
+
+   bool has_stage[PIPE_SHADER_TYPES] = { false };
+
+   state->pctx->bind_gs_state(state->pctx, NULL);
+   if (state->pctx->bind_tcs_state)
+      state->pctx->bind_tcs_state(state->pctx, NULL);
+   if (state->pctx->bind_tes_state)
+      state->pctx->bind_tes_state(state->pctx, NULL);
+   {
+      int i;
+      for (i = 0; i < pipeline->graphics_create_info.stageCount; i++) {
+         const VkPipelineShaderStageCreateInfo *sh = &pipeline->graphics_create_info.pStages[i];
+         switch (sh->stage) {
+         case VK_SHADER_STAGE_FRAGMENT_BIT:
+            state->pctx->bind_fs_state(state->pctx, pipeline->shader_cso[PIPE_SHADER_FRAGMENT]);
+            has_stage[PIPE_SHADER_FRAGMENT] = true;
+            break;
+         case VK_SHADER_STAGE_VERTEX_BIT:
+            state->pctx->bind_vs_state(state->pctx, pipeline->shader_cso[PIPE_SHADER_VERTEX]);
+            has_stage[PIPE_SHADER_VERTEX] = true;
+            break;
+         case VK_SHADER_STAGE_GEOMETRY_BIT:
+            state->pctx->bind_gs_state(state->pctx, pipeline->shader_cso[PIPE_SHADER_GEOMETRY]);
+            has_stage[PIPE_SHADER_GEOMETRY] = true;
+            break;
+         case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT:
+            state->pctx->bind_tcs_state(state->pctx, pipeline->shader_cso[PIPE_SHADER_TESS_CTRL]);
+            has_stage[PIPE_SHADER_TESS_CTRL] = true;
+            break;
+         case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT:
+            state->pctx->bind_tes_state(state->pctx, pipeline->shader_cso[PIPE_SHADER_TESS_EVAL]);
+            has_stage[PIPE_SHADER_TESS_EVAL] = true;
+            break;
+         default:
+            assert(0);
+            break;
+         }
+      }
+   }
+
+   /* there should always be a dummy fs. */
+   if (!has_stage[PIPE_SHADER_FRAGMENT])
+      state->pctx->bind_fs_state(state->pctx, pipeline->shader_cso[PIPE_SHADER_FRAGMENT]);
+   if (state->pctx->bind_gs_state && !has_stage[PIPE_SHADER_GEOMETRY])
+      state->pctx->bind_gs_state(state->pctx, NULL);
+   if (state->pctx->bind_tcs_state && !has_stage[PIPE_SHADER_TESS_CTRL])
+      state->pctx->bind_tcs_state(state->pctx, NULL);
+   if (state->pctx->bind_tes_state && !has_stage[PIPE_SHADER_TESS_EVAL])
+      state->pctx->bind_tes_state(state->pctx, NULL);
+
+   /* rasterization state */
+   if (pipeline->graphics_create_info.pRasterizationState) {
+      const VkPipelineRasterizationStateCreateInfo *rsc = pipeline->graphics_create_info.pRasterizationState;
+      state->rs_state.depth_clip_near = state->rs_state.depth_clip_far = !rsc->depthClampEnable;
+      state->rs_state.rasterizer_discard = rsc->rasterizerDiscardEnable;
+      state->rs_state.front_ccw = (rsc->frontFace == VK_FRONT_FACE_COUNTER_CLOCKWISE);
+      state->rs_state.cull_face = vk_cull_to_pipe(rsc->cullMode);
+      state->rs_state.fill_front = vk_polygon_mode_to_pipe(rsc->polygonMode);
+      state->rs_state.fill_back = vk_polygon_mode_to_pipe(rsc->polygonMode);
+      state->rs_state.point_size_per_vertex = true;
+      state->rs_state.flatshade_first = true;
+      state->rs_state.point_quad_rasterization = true;
+      state->rs_state.clip_halfz = true;
+      state->rs_state.half_pixel_center = true;
+      state->rs_state.scissor = true;
+
+      if (!dynamic_states[VK_DYNAMIC_STATE_LINE_WIDTH])
+         state->rs_state.line_width = rsc->lineWidth;
+
+      if (!dynamic_states[VK_DYNAMIC_STATE_DEPTH_BIAS]) {
+         state->rs_state.offset_units = rsc->depthBiasConstantFactor;
+         state->rs_state.offset_scale = rsc->depthBiasSlopeFactor;
+         state->rs_state.offset_clamp = rsc->depthBiasClamp;
+      }
+      state->rs_dirty = true;
+   }
+
+   if (pipeline->graphics_create_info.pMultisampleState) {
+      const VkPipelineMultisampleStateCreateInfo *ms = pipeline->graphics_create_info.pMultisampleState;
+      state->rs_state.multisample = ms->rasterizationSamples > 1;
+      state->sample_mask = ms->pSampleMask ? ms->pSampleMask[0] : 0xffffffff;
+      state->blend_state.alpha_to_coverage = ms->alphaToCoverageEnable;
+      state->blend_state.alpha_to_one = ms->alphaToOneEnable;
+      state->blend_dirty = true;
+      state->rs_dirty = true;
+      state->min_samples = 1;
+      state->sample_mask_dirty = true;
+      fb_samples = ms->rasterizationSamples;
+      if (ms->sampleShadingEnable) {
+         state->min_samples = ceil(ms->rasterizationSamples * ms->minSampleShading);
+         if (state->min_samples > 1)
+            state->min_samples = ms->rasterizationSamples;
+         if (state->min_samples < 1)
+            state->min_samples = 1;
+      }
+      if (pipeline->force_min_sample)
+         state->min_samples = ms->rasterizationSamples;
+      state->min_samples_dirty = true;
+   } else {
+      state->rs_state.multisample = false;
+      state->blend_state.alpha_to_coverage = false;
+      state->blend_state.alpha_to_one = false;
+      state->rs_dirty = true;
+   }
+
+   if (pipeline->graphics_create_info.pDepthStencilState) {
+      const VkPipelineDepthStencilStateCreateInfo *dsa = pipeline->graphics_create_info.pDepthStencilState;
+
+      state->dsa_state.depth.enabled = dsa->depthTestEnable;
+      state->dsa_state.depth.writemask = dsa->depthWriteEnable;
+      state->dsa_state.depth.func = dsa->depthCompareOp;
+      state->dsa_state.depth.bounds_test = dsa->depthBoundsTestEnable;
+
+      if (!dynamic_states[VK_DYNAMIC_STATE_DEPTH_BOUNDS]) {
+         state->dsa_state.depth.bounds_min = dsa->minDepthBounds;
+         state->dsa_state.depth.bounds_max = dsa->maxDepthBounds;
+      }
+
+      state->dsa_state.stencil[0].enabled = dsa->stencilTestEnable;
+      state->dsa_state.stencil[0].func = dsa->front.compareOp;
+      state->dsa_state.stencil[0].fail_op = vk_conv_stencil_op(dsa->front.failOp);
+      state->dsa_state.stencil[0].zpass_op = vk_conv_stencil_op(dsa->front.passOp);
+      state->dsa_state.stencil[0].zfail_op = vk_conv_stencil_op(dsa->front.depthFailOp);
+
+      state->dsa_state.stencil[1].enabled = dsa->stencilTestEnable;
+      state->dsa_state.stencil[1].func = dsa->back.compareOp;
+      state->dsa_state.stencil[1].fail_op = vk_conv_stencil_op(dsa->back.failOp);
+      state->dsa_state.stencil[1].zpass_op = vk_conv_stencil_op(dsa->back.passOp);
+      state->dsa_state.stencil[1].zfail_op = vk_conv_stencil_op(dsa->back.depthFailOp);
+
+      if (!dynamic_states[VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK]) {
+         state->dsa_state.stencil[0].valuemask = dsa->front.compareMask;
+         state->dsa_state.stencil[1].valuemask = dsa->back.compareMask;
+      }
+
+      if (!dynamic_states[VK_DYNAMIC_STATE_STENCIL_WRITE_MASK]) {
+         state->dsa_state.stencil[0].writemask = dsa->front.writeMask;
+         state->dsa_state.stencil[1].writemask = dsa->back.writeMask;
+      }
+
+      if (dsa->stencilTestEnable) {
+         if (!dynamic_states[VK_DYNAMIC_STATE_STENCIL_REFERENCE]) {
+            state->stencil_ref.ref_value[0] = dsa->front.reference;
+            state->stencil_ref.ref_value[1] = dsa->back.reference;
+            state->stencil_ref_dirty = true;
+         }
+      }
+
+      state->dsa_dirty = true;
+   }
+
+   if (pipeline->graphics_create_info.pColorBlendState) {
+      const VkPipelineColorBlendStateCreateInfo *cb = pipeline->graphics_create_info.pColorBlendState;
+      int i;
+      if (cb->attachmentCount > 1)
+         state->blend_state.independent_blend_enable = true;
+      for (i = 0; i < cb->attachmentCount; i++) {
+         state->blend_state.rt[i].colormask = cb->pAttachments[i].colorWriteMask;
+         state->blend_state.rt[i].blend_enable = cb->pAttachments[i].blendEnable;
+         state->blend_state.rt[i].rgb_func = vk_conv_blend_func(cb->pAttachments[i].colorBlendOp);
+         state->blend_state.rt[i].rgb_src_factor = vk_conv_blend_factor(cb->pAttachments[i].srcColorBlendFactor);
+         state->blend_state.rt[i].rgb_dst_factor = vk_conv_blend_factor(cb->pAttachments[i].dstColorBlendFactor);
+         state->blend_state.rt[i].alpha_func = vk_conv_blend_func(cb->pAttachments[i].alphaBlendOp);
+         state->blend_state.rt[i].alpha_src_factor = vk_conv_blend_factor(cb->pAttachments[i].srcAlphaBlendFactor);
+         state->blend_state.rt[i].alpha_dst_factor = vk_conv_blend_factor(cb->pAttachments[i].dstAlphaBlendFactor);
+
+         /* At least llvmpipe applies the blend factor prior to the blend function,
+          * regardless of what function is used. (like i965 hardware).
+          * It means for MIN/MAX the blend factor has to be stomped to ONE.
+          */
+         if (cb->pAttachments[i].colorBlendOp == VK_BLEND_OP_MIN ||
+             cb->pAttachments[i].colorBlendOp == VK_BLEND_OP_MAX) {
+            state->blend_state.rt[i].rgb_src_factor = PIPE_BLENDFACTOR_ONE;
+            state->blend_state.rt[i].rgb_dst_factor = PIPE_BLENDFACTOR_ONE;
+         }
+
+         if (cb->pAttachments[i].alphaBlendOp == VK_BLEND_OP_MIN ||
+             cb->pAttachments[i].alphaBlendOp == VK_BLEND_OP_MAX) {
+            state->blend_state.rt[i].alpha_src_factor = PIPE_BLENDFACTOR_ONE;
+            state->blend_state.rt[i].alpha_dst_factor = PIPE_BLENDFACTOR_ONE;
+         }
+      }
+      state->blend_dirty = true;
+      if (!dynamic_states[VK_DYNAMIC_STATE_BLEND_CONSTANTS]) {
+         memcpy(state->blend_color.color, cb->blendConstants, 4 * sizeof(float));
+         state->blend_color_dirty = true;
+      }
+   }
+
+   {
+      const VkPipelineVertexInputStateCreateInfo *vi = pipeline->graphics_create_info.pVertexInputState;
+      int i;
+
+      for (i = 0; i < vi->vertexBindingDescriptionCount; i++) {
+         state->vb[i].stride = vi->pVertexBindingDescriptions[i].stride;
+      }
+
+      int max_location = -1;
+      for (i = 0; i < vi->vertexAttributeDescriptionCount; i++) {
+         unsigned location = vi->pVertexAttributeDescriptions[i].location;
+         state->ve[location].src_offset = vi->pVertexAttributeDescriptions[i].offset;
+         state->ve[location].vertex_buffer_index = vi->pVertexAttributeDescriptions[i].binding;
+         state->ve[location].src_format = vk_format_to_pipe(vi->pVertexAttributeDescriptions[i].format);
+         state->ve[location].instance_divisor = vi->pVertexBindingDescriptions[vi->pVertexAttributeDescriptions[i].binding].inputRate;
+
+         if ((int)location > max_location)
+            max_location = location;
+      }
+      state->num_ve = max_location + 1;
+      state->vb_dirty = true;
+      state->ve_dirty = true;
+   }
+
+   {
+      const VkPipelineInputAssemblyStateCreateInfo *ia = pipeline->graphics_create_info.pInputAssemblyState;
+
+      state->info.mode = vk_conv_topology(ia->topology);
+      state->info.primitive_restart = ia->primitiveRestartEnable;
+   }
+
+   if (pipeline->graphics_create_info.pTessellationState) {
+      const VkPipelineTessellationStateCreateInfo *ts = pipeline->graphics_create_info.pTessellationState;
+      state->info.vertices_per_patch = ts->patchControlPoints;
+   } else
+      state->info.vertices_per_patch = 0;
+
+   if (pipeline->graphics_create_info.pViewportState) {
+      const VkPipelineViewportStateCreateInfo *vpi= pipeline->graphics_create_info.pViewportState;
+      int i;
+
+      state->num_viewports = vpi->viewportCount;
+      state->num_scissors = vpi->scissorCount;
+      state->vp_dirty = true;
+      if (!dynamic_states[VK_DYNAMIC_STATE_VIEWPORT]) {
+         for (i = 0; i < vpi->viewportCount; i++)
+            get_viewport_xform(&vpi->pViewports[i], state->viewports[i].scale, state->viewports[i].translate);
+         state->vp_dirty = true;
+      }
+      if (!dynamic_states[VK_DYNAMIC_STATE_SCISSOR]) {
+         for (i = 0; i < vpi->scissorCount; i++) {
+            const VkRect2D *ss = &vpi->pScissors[i];
+            state->scissors[i].minx = ss->offset.x;
+            state->scissors[i].miny = ss->offset.y;
+            state->scissors[i].maxx = ss->offset.x + ss->extent.width;
+            state->scissors[i].maxy = ss->offset.y + ss->extent.height;
+            state->scissor_dirty = true;
+         }
+
+      }
+   }
+
+   if (fb_samples != state->framebuffer.samples) {
+      state->framebuffer.samples = fb_samples;
+      state->pctx->set_framebuffer_state(state->pctx, &state->framebuffer);
+   }
+}
+
+static void handle_pipeline(struct val_cmd_buffer_entry *cmd,
+                            struct rendering_state *state)
+{
+   struct val_pipeline *pipeline = cmd->u.pipeline.pipeline;
+   if (pipeline->is_compute_pipeline)
+      handle_compute_pipeline(cmd, state);
+   else
+      handle_graphics_pipeline(cmd, state);
+}
+
+static void handle_vertex_buffers(struct val_cmd_buffer_entry *cmd,
+                                  struct rendering_state *state)
+{
+   int i;
+   struct val_cmd_bind_vertex_buffers *vcb = &cmd->u.vertex_buffers;
+   for (i = 0; i < vcb->binding_count; i++) {
+      int idx = i + vcb->first;
+
+      state->vb[idx].buffer_offset = vcb->offsets[i];
+      state->vb[idx].buffer.resource = vcb->buffers[i]->bo;
+   }
+   if (vcb->first < state->start_vb)
+      state->start_vb = vcb->first;
+   if (vcb->first + vcb->binding_count >= state->num_vb)
+      state->num_vb = vcb->first + vcb->binding_count;
+   state->vb_dirty = true;
+}
+
+struct dyn_info {
+   struct {
+      uint16_t const_buffer_count;
+      uint16_t shader_buffer_count;
+      uint16_t sampler_count;
+      uint16_t sampler_view_count;
+      uint16_t image_count;
+   } stage[MESA_SHADER_STAGES];
+
+   uint32_t dyn_index;
+   const uint32_t *dynamic_offsets;
+   uint32_t dynamic_offset_count;
+};
+
+static void fill_sampler(struct pipe_sampler_state *ss,
+                         struct val_sampler *samp)
+{
+   ss->wrap_s = vk_conv_wrap_mode(samp->create_info.addressModeU);
+   ss->wrap_t = vk_conv_wrap_mode(samp->create_info.addressModeV);
+   ss->wrap_r = vk_conv_wrap_mode(samp->create_info.addressModeW);
+   ss->min_img_filter = samp->create_info.minFilter == VK_FILTER_LINEAR ? PIPE_TEX_FILTER_LINEAR : PIPE_TEX_FILTER_NEAREST;
+   ss->min_mip_filter = samp->create_info.mipmapMode == VK_SAMPLER_MIPMAP_MODE_LINEAR ? PIPE_TEX_MIPFILTER_LINEAR : PIPE_TEX_MIPFILTER_NEAREST;
+   ss->mag_img_filter = samp->create_info.magFilter == VK_FILTER_LINEAR ? PIPE_TEX_FILTER_LINEAR : PIPE_TEX_FILTER_NEAREST;
+   ss->min_lod = samp->create_info.minLod;
+   ss->max_lod = samp->create_info.maxLod;
+   ss->lod_bias = samp->create_info.mipLodBias;
+   ss->max_anisotropy = samp->create_info.maxAnisotropy;
+   ss->normalized_coords = !samp->create_info.unnormalizedCoordinates;
+   ss->compare_mode = samp->create_info.compareEnable ? PIPE_TEX_COMPARE_R_TO_TEXTURE : PIPE_TEX_COMPARE_NONE;
+   ss->compare_func = samp->create_info.compareOp;
+   ss->seamless_cube_map = true;
+
+   switch (samp->create_info.borderColor) {
+   case VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK:
+   case VK_BORDER_COLOR_INT_TRANSPARENT_BLACK:
+   default:
+      memset(ss->border_color.f, 0, 4 * sizeof(float));
+      break;
+   case VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK:
+      ss->border_color.f[0] = ss->border_color.f[1] = ss->border_color.f[2] = 0.0f;
+      ss->border_color.f[3] = 1.0f;
+      break;
+   case VK_BORDER_COLOR_INT_OPAQUE_BLACK:
+      ss->border_color.i[0] = ss->border_color.i[1] = ss->border_color.i[2] = 0;
+      ss->border_color.i[3] = 1;
+      break;
+   case VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE:
+      ss->border_color.f[0] = ss->border_color.f[1] = ss->border_color.f[2] = 1.0f;
+      ss->border_color.f[3] = 1.0f;
+      break;
+   case VK_BORDER_COLOR_INT_OPAQUE_WHITE:
+      ss->border_color.i[0] = ss->border_color.i[1] = ss->border_color.i[2] = 1;
+      ss->border_color.i[3] = 1;
+      break;
+   }
+}
+
+static void fill_sampler_stage(struct rendering_state *state,
+                               struct dyn_info *dyn_info,
+                               gl_shader_stage stage,
+                               enum pipe_shader_type p_stage,
+                               int array_idx,
+                               const struct val_descriptor *descriptor,
+                               const struct val_descriptor_set_binding_layout *binding)
+{
+   int ss_idx = binding->stage[stage].sampler_index;
+   if (ss_idx == -1)
+      return;
+   ss_idx += array_idx;
+   ss_idx += dyn_info->stage[stage].sampler_count;
+   fill_sampler(&state->ss[p_stage][ss_idx], descriptor->sampler);
+   if (state->num_sampler_states[p_stage] <= ss_idx)
+      state->num_sampler_states[p_stage] = ss_idx + 1;
+   state->ss_dirty[p_stage] = true;
+}
+
+static void fill_sampler_view_stage(struct rendering_state *state,
+                                    struct dyn_info *dyn_info,
+                                    gl_shader_stage stage,
+                                    enum pipe_shader_type p_stage,
+                                    int array_idx,
+                                    const struct val_descriptor *descriptor,
+                                    const struct val_descriptor_set_binding_layout *binding)
+{
+   int sv_idx = binding->stage[stage].sampler_view_index;
+   if (sv_idx == -1)
+      return;
+   sv_idx += array_idx;
+   sv_idx += dyn_info->stage[stage].sampler_view_count;
+   struct val_image_view *iv = descriptor->image_view;
+   struct pipe_sampler_view templ;
+
+   enum pipe_format pformat;
+   if (iv->subresourceRange.aspectMask == VK_IMAGE_ASPECT_DEPTH_BIT)
+      pformat = vk_format_to_pipe(iv->format);
+   else if (iv->subresourceRange.aspectMask == VK_IMAGE_ASPECT_STENCIL_BIT)
+      pformat = util_format_stencil_only(vk_format_to_pipe(iv->format));
+   else
+      pformat = vk_format_to_pipe(iv->format);
+   u_sampler_view_default_template(&templ,
+                                   iv->image->bo,
+                                   pformat);
+   if (iv->view_type == VK_IMAGE_VIEW_TYPE_1D)
+      templ.target = PIPE_TEXTURE_1D;
+   if (iv->view_type == VK_IMAGE_VIEW_TYPE_2D)
+      templ.target = PIPE_TEXTURE_2D;
+   if (iv->view_type == VK_IMAGE_VIEW_TYPE_CUBE)
+      templ.target = PIPE_TEXTURE_CUBE;
+   templ.u.tex.first_layer = iv->subresourceRange.baseArrayLayer;
+   templ.u.tex.last_layer = iv->subresourceRange.baseArrayLayer + val_get_layerCount(iv->image, &iv->subresourceRange) - 1;
+   templ.u.tex.first_level = iv->subresourceRange.baseMipLevel;
+   templ.u.tex.last_level = iv->subresourceRange.baseMipLevel + val_get_levelCount(iv->image, &iv->subresourceRange) - 1;
+   if (iv->components.r != VK_COMPONENT_SWIZZLE_IDENTITY)
+      templ.swizzle_r = vk_conv_swizzle(iv->components.r);
+   if (iv->components.g != VK_COMPONENT_SWIZZLE_IDENTITY)
+      templ.swizzle_g = vk_conv_swizzle(iv->components.g);
+   if (iv->components.b != VK_COMPONENT_SWIZZLE_IDENTITY)
+      templ.swizzle_b = vk_conv_swizzle(iv->components.b);
+   if (iv->components.a != VK_COMPONENT_SWIZZLE_IDENTITY)
+      templ.swizzle_a = vk_conv_swizzle(iv->components.a);
+
+   if (util_format_is_depth_or_stencil(templ.format)) {
+      templ.swizzle_r = PIPE_SWIZZLE_X;
+      templ.swizzle_g = PIPE_SWIZZLE_0;
+      templ.swizzle_b = PIPE_SWIZZLE_0;
+   }
+
+   if (state->sv[p_stage][sv_idx])
+      pipe_sampler_view_reference(&state->sv[p_stage][sv_idx], NULL);
+   state->sv[p_stage][sv_idx] = state->pctx->create_sampler_view(state->pctx, iv->image->bo, &templ);
+   if (state->num_sampler_views[p_stage] <= sv_idx)
+      state->num_sampler_views[p_stage] = sv_idx + 1;
+   state->sv_dirty[p_stage] = true;
+}
+
+static void fill_sampler_buffer_view_stage(struct rendering_state *state,
+                                           struct dyn_info *dyn_info,
+                                           gl_shader_stage stage,
+                                           enum pipe_shader_type p_stage,
+                                           int array_idx,
+                                           const struct val_descriptor *descriptor,
+                                           const struct val_descriptor_set_binding_layout *binding)
+{
+   int sv_idx = binding->stage[stage].sampler_view_index;
+   if (sv_idx == -1)
+      return;
+   sv_idx += array_idx;
+   sv_idx += dyn_info->stage[stage].sampler_view_count;
+   struct val_buffer_view *bv = descriptor->buffer_view;
+   struct pipe_sampler_view templ;
+   memset(&templ, 0, sizeof(templ));
+   templ.target = PIPE_BUFFER;
+   templ.swizzle_r = PIPE_SWIZZLE_X;
+   templ.swizzle_g = PIPE_SWIZZLE_Y;
+   templ.swizzle_b = PIPE_SWIZZLE_Z;
+   templ.swizzle_a = PIPE_SWIZZLE_W;
+   templ.format = bv->pformat;
+   templ.u.buf.offset = bv->offset + bv->buffer->offset;
+   templ.u.buf.size = bv->range == VK_WHOLE_SIZE ? (bv->buffer->size - bv->offset) : bv->range;
+   templ.texture = bv->buffer->bo;
+   templ.context = state->pctx;
+
+   if (state->sv[p_stage][sv_idx])
+      pipe_sampler_view_reference(&state->sv[p_stage][sv_idx], NULL);
+   state->sv[p_stage][sv_idx] = state->pctx->create_sampler_view(state->pctx, bv->buffer->bo, &templ);
+   if (state->num_sampler_views[p_stage] <= sv_idx)
+      state->num_sampler_views[p_stage] = sv_idx + 1;
+   state->sv_dirty[p_stage] = true;
+}
+
+static void fill_image_view_stage(struct rendering_state *state,
+                                  struct dyn_info *dyn_info,
+                                  gl_shader_stage stage,
+                                  enum pipe_shader_type p_stage,
+                                  int array_idx,
+                                  const struct val_descriptor *descriptor,
+                                  const struct val_descriptor_set_binding_layout *binding)
+{
+   struct val_image_view *iv = descriptor->image_view;
+   int idx = binding->stage[stage].image_index;
+   if (idx == -1)
+      return;
+   idx += array_idx;
+   idx += dyn_info->stage[stage].image_count;
+   state->iv[p_stage][idx].resource = iv->image->bo;
+   if (iv->subresourceRange.aspectMask == VK_IMAGE_ASPECT_DEPTH_BIT)
+      state->iv[p_stage][idx].format = vk_format_to_pipe(iv->format);
+   else if (iv->subresourceRange.aspectMask == VK_IMAGE_ASPECT_STENCIL_BIT)
+      state->iv[p_stage][idx].format = util_format_stencil_only(vk_format_to_pipe(iv->format));
+   else
+      state->iv[p_stage][idx].format = vk_format_to_pipe(iv->format);
+   state->iv[p_stage][idx].u.tex.first_layer = iv->subresourceRange.baseArrayLayer;
+   state->iv[p_stage][idx].u.tex.last_layer = iv->subresourceRange.baseArrayLayer + val_get_layerCount(iv->image, &iv->subresourceRange) - 1;
+   state->iv[p_stage][idx].u.tex.level = iv->subresourceRange.baseMipLevel;
+   if (state->num_shader_images[p_stage] <= idx)
+      state->num_shader_images[p_stage] = idx + 1;
+   state->iv_dirty[p_stage] = true;
+}
+
+static void fill_image_buffer_view_stage(struct rendering_state *state,
+                                         struct dyn_info *dyn_info,
+                                         gl_shader_stage stage,
+                                         enum pipe_shader_type p_stage,
+                                         int array_idx,
+                                         const struct val_descriptor *descriptor,
+                                         const struct val_descriptor_set_binding_layout *binding)
+{
+   struct val_buffer_view *bv = descriptor->buffer_view;
+   int idx = binding->stage[stage].image_index;
+   if (idx == -1)
+      return;
+   idx += array_idx;
+   idx += dyn_info->stage[stage].image_count;
+   state->iv[p_stage][idx].resource = bv->buffer->bo;
+   state->iv[p_stage][idx].format = bv->pformat;
+   state->iv[p_stage][idx].u.buf.offset = bv->offset + bv->buffer->offset;
+   state->iv[p_stage][idx].u.buf.size = bv->range == VK_WHOLE_SIZE ? (bv->buffer->size - bv->offset): bv->range;
+   if (state->num_shader_images[p_stage] <= idx)
+      state->num_shader_images[p_stage] = idx + 1;
+   state->iv_dirty[p_stage] = true;
+}
+
+static void handle_descriptor(struct rendering_state *state,
+                              struct dyn_info *dyn_info,
+                              const struct val_descriptor_set_binding_layout *binding,
+                              gl_shader_stage stage,
+                              enum pipe_shader_type p_stage,
+                              int array_idx,
+                              const struct val_descriptor *descriptor)
+{
+   bool is_dynamic = descriptor->type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC ||
+      descriptor->type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC;
+
+   switch (descriptor->type) {
+   case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
+   case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: {
+      fill_image_view_stage(state, dyn_info, stage, p_stage, array_idx, descriptor, binding);
+      break;
+   }
+   case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
+   case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC: {
+      int idx = binding->stage[stage].const_buffer_index;
+      if (idx == -1)
+         return;
+      idx += array_idx;
+      idx += dyn_info->stage[stage].const_buffer_count;
+      state->const_buffer[p_stage][idx].buffer = descriptor->buf.buffer->bo;
+      state->const_buffer[p_stage][idx].buffer_offset = descriptor->buf.offset + descriptor->buf.buffer->offset;
+      if (is_dynamic) {
+         uint32_t offset = dyn_info->dynamic_offsets[dyn_info->dyn_index + binding->dynamic_index + array_idx];
+         state->const_buffer[p_stage][idx].buffer_offset += offset;
+      }
+      if (descriptor->buf.range == VK_WHOLE_SIZE)
+         state->const_buffer[p_stage][idx].buffer_size = descriptor->buf.buffer->bo->width0 - state->const_buffer[p_stage][idx].buffer_offset;
+      else
+         state->const_buffer[p_stage][idx].buffer_size = descriptor->buf.range;
+      if (state->num_const_bufs[p_stage] <= idx)
+         state->num_const_bufs[p_stage] = idx + 1;
+      state->constbuf_dirty[p_stage] = true;
+      break;
+   }
+   case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
+   case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
+      int idx = binding->stage[stage].shader_buffer_index;
+      if (idx == -1)
+         return;
+      idx += array_idx;
+      idx += dyn_info->stage[stage].shader_buffer_count;
+      state->sb[p_stage][idx].buffer = descriptor->buf.buffer->bo;
+      state->sb[p_stage][idx].buffer_offset = descriptor->buf.offset + descriptor->buf.buffer->offset;
+      if (is_dynamic) {
+         uint32_t offset = dyn_info->dynamic_offsets[dyn_info->dyn_index + binding->dynamic_index + array_idx];
+         state->sb[p_stage][idx].buffer_offset += offset;
+      }
+      if (descriptor->buf.range == VK_WHOLE_SIZE)
+         state->sb[p_stage][idx].buffer_size = descriptor->buf.buffer->bo->width0 - state->sb[p_stage][idx].buffer_offset;
+      else
+         state->sb[p_stage][idx].buffer_size = descriptor->buf.range;
+      if (state->num_shader_buffers[p_stage] <= idx)
+         state->num_shader_buffers[p_stage] = idx + 1;
+      state->sb_dirty[p_stage] = true;
+      break;
+   }
+   case VK_DESCRIPTOR_TYPE_SAMPLER:
+      if (!descriptor->sampler)
+         return;
+      fill_sampler_stage(state, dyn_info, stage, p_stage, array_idx, descriptor, binding);
+      break;
+   case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
+      fill_sampler_view_stage(state, dyn_info, stage, p_stage, array_idx, descriptor, binding);
+      break;
+   case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
+      fill_sampler_stage(state, dyn_info, stage, p_stage, array_idx, descriptor, binding);
+      fill_sampler_view_stage(state, dyn_info, stage, p_stage, array_idx, descriptor, binding);
+      break;
+   case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
+      fill_sampler_buffer_view_stage(state, dyn_info, stage, p_stage, array_idx, descriptor, binding);
+      break;
+   case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
+      fill_image_buffer_view_stage(state, dyn_info, stage, p_stage, array_idx, descriptor, binding);
+      break;
+   default:
+      fprintf(stderr, "Unhandled descriptor set %d\n", descriptor->type);
+      break;
+   }
+}
+
+static void handle_set_stage(struct rendering_state *state,
+                             struct dyn_info *dyn_info,
+                             const struct val_descriptor_set *set,
+                             gl_shader_stage stage,
+                             enum pipe_shader_type p_stage)
+{
+   int j;
+   for (j = 0; j < set->layout->binding_count; j++) {
+      const struct val_descriptor_set_binding_layout *binding;
+      const struct val_descriptor *descriptor;
+      binding = &set->layout->binding[j];
+
+      if (binding->valid) {
+         for (int i = 0; i < binding->array_size; i++) {
+            descriptor = &set->descriptors[binding->descriptor_index + i];
+            handle_descriptor(state, dyn_info, binding, stage, p_stage, i, descriptor);
+         }
+      }
+   }
+}
+
+static void increment_dyn_info(struct dyn_info *dyn_info,
+                               struct val_descriptor_set_layout *layout, bool inc_dyn)
+{
+   for (gl_shader_stage stage = MESA_SHADER_VERTEX; stage < MESA_SHADER_STAGES; stage++) {
+      dyn_info->stage[stage].const_buffer_count += layout->stage[stage].const_buffer_count;
+      dyn_info->stage[stage].shader_buffer_count += layout->stage[stage].shader_buffer_count;
+      dyn_info->stage[stage].sampler_count += layout->stage[stage].sampler_count;
+      dyn_info->stage[stage].sampler_view_count += layout->stage[stage].sampler_view_count;
+      dyn_info->stage[stage].image_count += layout->stage[stage].image_count;
+   }
+   if (inc_dyn)
+      dyn_info->dyn_index += layout->dynamic_offset_count;
+}
+
+static void handle_compute_descriptor_sets(struct val_cmd_buffer_entry *cmd,
+                                           struct dyn_info *dyn_info,
+                                           struct rendering_state *state)
+{
+   struct val_cmd_bind_descriptor_sets *bds = &cmd->u.descriptor_sets;
+   int i;
+
+   for (i = 0; i < bds->first; i++) {
+      increment_dyn_info(dyn_info, bds->layout->set[i].layout, false);
+   }
+   for (i = 0; i < bds->count; i++) {
+      const struct val_descriptor_set *set = bds->sets[i];
+
+      if (set->layout->shader_stages & VK_SHADER_STAGE_COMPUTE_BIT)
+         handle_set_stage(state, dyn_info, set, MESA_SHADER_COMPUTE, PIPE_SHADER_COMPUTE);
+      increment_dyn_info(dyn_info, bds->layout->set[bds->first + i].layout, true);
+   }
+}
+
+static void handle_descriptor_sets(struct val_cmd_buffer_entry *cmd,
+                                   struct rendering_state *state)
+{
+   struct val_cmd_bind_descriptor_sets *bds = &cmd->u.descriptor_sets;
+   int i;
+   struct dyn_info dyn_info;
+
+   dyn_info.dyn_index = 0;
+   dyn_info.dynamic_offsets = bds->dynamic_offsets;
+   dyn_info.dynamic_offset_count = bds->dynamic_offset_count;
+
+   memset(dyn_info.stage, 0, sizeof(dyn_info.stage));
+   if (bds->bind_point == VK_PIPELINE_BIND_POINT_COMPUTE) {
+      handle_compute_descriptor_sets(cmd, &dyn_info, state);
+      return;
+   }
+
+   for (i = 0; i < bds->first; i++) {
+      increment_dyn_info(&dyn_info, bds->layout->set[i].layout, false);
+   }
+
+   for (i = 0; i < bds->count; i++) {
+      const struct val_descriptor_set *set = bds->sets[i];
+
+      if (set->layout->shader_stages & VK_SHADER_STAGE_VERTEX_BIT)
+         handle_set_stage(state, &dyn_info, set, MESA_SHADER_VERTEX, PIPE_SHADER_VERTEX);
+
+      if (set->layout->shader_stages & VK_SHADER_STAGE_FRAGMENT_BIT)
+         handle_set_stage(state, &dyn_info, set, MESA_SHADER_FRAGMENT, PIPE_SHADER_FRAGMENT);
+
+      if (set->layout->shader_stages & VK_SHADER_STAGE_GEOMETRY_BIT)
+         handle_set_stage(state, &dyn_info, set, MESA_SHADER_GEOMETRY, PIPE_SHADER_GEOMETRY);
+
+      if (set->layout->shader_stages & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT)
+         handle_set_stage(state, &dyn_info, set, MESA_SHADER_TESS_CTRL, PIPE_SHADER_TESS_CTRL);
+
+      if (set->layout->shader_stages & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT)
+         handle_set_stage(state, &dyn_info, set, MESA_SHADER_TESS_EVAL, PIPE_SHADER_TESS_EVAL);
+      increment_dyn_info(&dyn_info, bds->layout->set[bds->first + i].layout, true);
+   }
+}
+
+static void add_img_view_surface(struct rendering_state *state,
+                                 struct val_image_view *imgv, VkFormat format, int width, int height)
+{
+   if (!imgv->surface) {
+      struct pipe_surface template;
+
+      memset(&template, 0, sizeof(struct pipe_surface));
+
+      template.format = vk_format_to_pipe(format);
+      template.width = width;
+      template.height = height;
+      template.u.tex.first_layer = imgv->subresourceRange.baseArrayLayer;
+      template.u.tex.last_layer = imgv->subresourceRange.baseArrayLayer + val_get_layerCount(imgv->image, &imgv->subresourceRange) - 1;
+      template.u.tex.level = imgv->subresourceRange.baseMipLevel;
+
+      if (template.format == PIPE_FORMAT_NONE)
+         return;
+      imgv->surface = state->pctx->create_surface(state->pctx,
+                                                  imgv->image->bo, &template);
+   }
+}
+
+static inline bool
+attachment_needs_clear(struct rendering_state *state,
+                       uint32_t a)
+{
+   return (a != VK_ATTACHMENT_UNUSED &&
+           state->attachments[a].pending_clear_aspects);
+}
+
+static bool
+subpass_needs_clear(struct rendering_state *state)
+{
+   uint32_t a;
+   struct val_subpass *subpass = &state->pass->subpasses[state->subpass];
+   for (uint32_t i = 0; i < subpass->color_count; i++) {
+      a = subpass->color_attachments[i].attachment;
+      if (attachment_needs_clear(state, a))
+         return true;
+   }
+   if (subpass->depth_stencil_attachment) {
+      a = subpass->depth_stencil_attachment->attachment;
+      if (attachment_needs_clear(state, a))
+         return true;
+   }
+   return false;
+}
+
+static void render_subpass_clear(struct rendering_state *state)
+{
+   struct val_subpass *subpass = &state->pass->subpasses[state->subpass];
+
+   if (!subpass_needs_clear(state))
+      return;
+
+   for (unsigned i = 0; i < subpass->color_count; i++) {
+      uint32_t a = subpass->color_attachments[i].attachment;
+
+      if (!attachment_needs_clear(state, a))
+         continue;
+
+      struct val_render_pass_attachment *att = &state->pass->attachments[a];
+      struct val_image_view *imgv = state->vk_framebuffer->attachments[a];
+
+      add_img_view_surface(state, imgv, att->format, state->framebuffer.width, state->framebuffer.height);
+
+      union pipe_color_union color_clear_val = { 0 };
+      const VkClearValue value = state->attachments[a].clear_value;
+      color_clear_val.ui[0] = value.color.uint32[0];
+      color_clear_val.ui[1] = value.color.uint32[1];
+      color_clear_val.ui[2] = value.color.uint32[2];
+      color_clear_val.ui[3] = value.color.uint32[3];
+      state->pctx->clear_render_target(state->pctx,
+                                       imgv->surface,
+                                       &color_clear_val,
+                                       state->render_area.offset.x, state->render_area.offset.y,
+                                       state->render_area.extent.width, state->render_area.extent.height,
+                                       false);
+
+      state->attachments[a].pending_clear_aspects = 0;
+   }
+
+   if (subpass->depth_stencil_attachment) {
+      uint32_t ds = subpass->depth_stencil_attachment->attachment;
+
+      if (!attachment_needs_clear(state, ds))
+         return;
+
+      struct val_render_pass_attachment *att = &state->pass->attachments[ds];
+      struct val_image_view *imgv = state->vk_framebuffer->attachments[ds];
+
+      add_img_view_surface(state, imgv, att->format, state->framebuffer.width, state->framebuffer.height);
+
+      if (util_format_is_depth_or_stencil(imgv->surface->format)) {
+         const struct util_format_description *desc = util_format_description(imgv->surface->format);
+         double dclear_val = 0;
+         uint32_t sclear_val = 0;
+         uint32_t ds_clear_flags = 0;
+
+         if (util_format_has_stencil(desc) && att->stencil_load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
+            ds_clear_flags |= PIPE_CLEAR_STENCIL;
+            sclear_val = state->attachments[ds].clear_value.depthStencil.stencil;
+         }
+         if (util_format_has_depth(desc) && att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
+            ds_clear_flags |= PIPE_CLEAR_DEPTH;
+            dclear_val = state->attachments[ds].clear_value.depthStencil.depth;
+         }
+
+         if (ds_clear_flags)
+            state->pctx->clear_depth_stencil(state->pctx,
+                                             imgv->surface,
+                                             ds_clear_flags,
+                                             dclear_val, sclear_val,
+                                             state->render_area.offset.x, state->render_area.offset.y,
+                                             state->render_area.extent.width, state->render_area.extent.height,
+                                             false);
+         state->attachments[ds].pending_clear_aspects = 0;
+      }
+   }
+
+}
+
+static void render_pass_resolve(struct rendering_state *state)
+{
+   struct val_subpass *subpass = &state->pass->subpasses[state->subpass];
+   if (!subpass->has_color_resolve)
+      return;
+   for (uint32_t i = 0; i < subpass->color_count; i++) {
+      struct val_subpass_attachment src_att = subpass->color_attachments[i];
+      struct val_subpass_attachment dst_att = subpass->resolve_attachments[i];
+
+      if (dst_att.attachment == VK_ATTACHMENT_UNUSED)
+         continue;
+
+      struct val_image_view *src_imgv = state->vk_framebuffer->attachments[src_att.attachment];
+      struct val_image_view *dst_imgv = state->vk_framebuffer->attachments[dst_att.attachment];
+
+      struct pipe_blit_info info;
+      memset(&info, 0, sizeof(info));
+
+      info.src.resource = src_imgv->image->bo;
+      info.dst.resource = dst_imgv->image->bo;
+      info.src.format = src_imgv->pformat;
+      info.dst.format = dst_imgv->pformat;
+      info.filter = PIPE_TEX_FILTER_NEAREST;
+      info.mask = PIPE_MASK_RGBA;
+      info.src.box.x = state->render_area.offset.x;
+      info.src.box.y = state->render_area.offset.y;
+      info.src.box.width = state->render_area.extent.width;
+      info.src.box.height = state->render_area.extent.height;
+      info.src.box.depth = state->vk_framebuffer->layers;
+
+      info.dst.box = info.src.box;
+
+      state->pctx->blit(state->pctx, &info);
+   }
+}
+
+static void begin_render_subpass(struct rendering_state *state,
+                                 int subpass_idx)
+{
+   state->subpass = subpass_idx;
+
+   render_subpass_clear(state);
+
+   state->framebuffer.nr_cbufs = 0;
+
+   struct val_subpass *subpass = &state->pass->subpasses[subpass_idx];
+   for (unsigned i = 0; i < subpass->color_count; i++) {
+      struct val_subpass_attachment *color_att = &subpass->color_attachments[i];
+      if (color_att->attachment != VK_ATTACHMENT_UNUSED) {
+         struct val_image_view *imgv = state->vk_framebuffer->attachments[color_att->attachment];
+
+         add_img_view_surface(state, imgv, state->pass->attachments[color_att->attachment].format, state->framebuffer.width, state->framebuffer.height);
+         state->framebuffer.cbufs[state->framebuffer.nr_cbufs] = imgv->surface;
+      } else
+         state->framebuffer.cbufs[state->framebuffer.nr_cbufs] = NULL;
+      state->framebuffer.nr_cbufs++;
+   }
+
+   if (subpass->depth_stencil_attachment) {
+      struct val_subpass_attachment *ds_att = subpass->depth_stencil_attachment;
+
+      if (ds_att->attachment != VK_ATTACHMENT_UNUSED) {
+         struct val_image_view *imgv = state->vk_framebuffer->attachments[ds_att->attachment];
+         add_img_view_surface(state, imgv, state->pass->attachments[ds_att->attachment].format, state->framebuffer.width, state->framebuffer.height);
+         state->framebuffer.zsbuf = imgv->surface;
+      }
+   }
+
+   state->pctx->set_framebuffer_state(state->pctx,
+                                      &state->framebuffer);
+}
+
+static void handle_begin_render_pass(struct val_cmd_buffer_entry *cmd,
+                                     struct rendering_state *state)
+{
+   state->pass = cmd->u.begin_render_pass.render_pass;
+   state->vk_framebuffer = cmd->u.begin_render_pass.framebuffer;
+   state->render_area = cmd->u.begin_render_pass.render_area;
+
+   state->attachments = cmd->u.begin_render_pass.attachments;
+
+   state->framebuffer.width = state->vk_framebuffer->width;
+   state->framebuffer.height = state->vk_framebuffer->height;
+   state->framebuffer.layers = state->vk_framebuffer->layers;
+
+   begin_render_subpass(state, 0);
+}
+
+static void handle_end_render_pass(struct val_cmd_buffer_entry *cmd,
+                                   struct rendering_state *state)
+{
+   state->pctx->flush(state->pctx, NULL, 0);
+
+   render_pass_resolve(state);
+
+   state->attachments = NULL;
+   state->pass = NULL;
+   state->subpass = 0;
+}
+
+static void handle_next_subpass(struct val_cmd_buffer_entry *cmd,
+                                struct rendering_state *state)
+{
+   state->pctx->flush(state->pctx, NULL, 0);
+   render_pass_resolve(state);
+   state->subpass++;
+   begin_render_subpass(state, state->subpass);
+}
+
+static void handle_draw(struct val_cmd_buffer_entry *cmd,
+                        struct rendering_state *state)
+{
+   state->info.index_size = 0;
+   state->info.indirect = NULL;
+   state->info.index.resource = NULL;
+   state->info.start = cmd->u.draw.first_vertex;
+   state->info.count = cmd->u.draw.vertex_count;
+   state->info.start_instance = cmd->u.draw.first_instance;
+   state->info.instance_count = cmd->u.draw.instance_count;
+   state->pctx->draw_vbo(state->pctx, &state->info);
+}
+
+static void handle_set_viewport(struct val_cmd_buffer_entry *cmd,
+                                struct rendering_state *state)
+{
+   int i;
+
+   for (i = 0; i < cmd->u.set_viewport.viewport_count; i++) {
+      int idx = i + cmd->u.set_viewport.first_viewport;
+      const VkViewport *vp = &cmd->u.set_viewport.viewports[i];
+      get_viewport_xform(vp, state->viewports[idx].scale, state->viewports[idx].translate);
+   }
+   state->vp_dirty = true;
+}
+
+static void handle_set_scissor(struct val_cmd_buffer_entry *cmd,
+                               struct rendering_state *state)
+{
+   int i;
+
+   for (i = 0; i < cmd->u.set_scissor.scissor_count; i++) {
+      int idx = i + cmd->u.set_scissor.first_scissor;
+      const VkRect2D *ss = &cmd->u.set_scissor.scissors[i];
+      state->scissors[idx].minx = ss->offset.x;
+      state->scissors[idx].miny = ss->offset.y;
+      state->scissors[idx].maxx = ss->offset.x + ss->extent.width;
+      state->scissors[idx].maxy = ss->offset.y + ss->extent.height;
+   }
+   state->scissor_dirty = true;
+}
+
+static void handle_set_line_width(struct val_cmd_buffer_entry *cmd,
+                                  struct rendering_state *state)
+{
+   state->rs_state.line_width = cmd->u.set_line_width.line_width;
+   state->rs_dirty = true;
+}
+
+static void handle_set_depth_bias(struct val_cmd_buffer_entry *cmd,
+                                  struct rendering_state *state)
+{
+   state->rs_state.offset_units = cmd->u.set_depth_bias.constant_factor;
+   state->rs_state.offset_scale = cmd->u.set_depth_bias.slope_factor;
+   state->rs_state.offset_clamp = cmd->u.set_depth_bias.clamp;
+   state->rs_dirty = true;
+}
+
+static void handle_set_blend_constants(struct val_cmd_buffer_entry *cmd,
+                                       struct rendering_state *state)
+{
+   memcpy(state->blend_color.color, cmd->u.set_blend_constants.blend_constants, 4 * sizeof(float));
+   state->blend_color_dirty = true;
+}
+
+static void handle_set_depth_bounds(struct val_cmd_buffer_entry *cmd,
+                                    struct rendering_state *state)
+{
+   state->dsa_state.depth.bounds_min = cmd->u.set_depth_bounds.min_depth;
+   state->dsa_state.depth.bounds_max = cmd->u.set_depth_bounds.max_depth;
+   state->dsa_dirty = true;
+}
+
+static void handle_set_stencil_compare_mask(struct val_cmd_buffer_entry *cmd,
+                                            struct rendering_state *state)
+{
+   if (cmd->u.stencil_vals.face_mask & VK_STENCIL_FACE_FRONT_BIT)
+      state->dsa_state.stencil[0].valuemask = cmd->u.stencil_vals.value;
+   if (cmd->u.stencil_vals.face_mask & VK_STENCIL_FACE_BACK_BIT)
+      state->dsa_state.stencil[1].valuemask = cmd->u.stencil_vals.value;
+   state->dsa_dirty = true;
+}
+
+static void handle_set_stencil_write_mask(struct val_cmd_buffer_entry *cmd,
+                                          struct rendering_state *state)
+{
+   if (cmd->u.stencil_vals.face_mask & VK_STENCIL_FACE_FRONT_BIT)
+      state->dsa_state.stencil[0].writemask = cmd->u.stencil_vals.value;
+   if (cmd->u.stencil_vals.face_mask & VK_STENCIL_FACE_BACK_BIT)
+      state->dsa_state.stencil[1].writemask = cmd->u.stencil_vals.value;
+   state->dsa_dirty = true;
+}
+
+static void handle_set_stencil_reference(struct val_cmd_buffer_entry *cmd,
+                                         struct rendering_state *state)
+{
+   if (cmd->u.stencil_vals.face_mask & VK_STENCIL_FACE_FRONT_BIT)
+      state->stencil_ref.ref_value[0] = cmd->u.stencil_vals.value;
+   if (cmd->u.stencil_vals.face_mask & VK_STENCIL_FACE_BACK_BIT)
+      state->stencil_ref.ref_value[1] = cmd->u.stencil_vals.value;
+   state->stencil_ref_dirty = true;
+}
+
+static void
+copy_depth_rect(ubyte * dst,
+                enum pipe_format dst_format,
+                unsigned dst_stride,
+                unsigned dst_x,
+                unsigned dst_y,
+                unsigned width,
+                unsigned height,
+                const ubyte * src,
+                enum pipe_format src_format,
+                int src_stride,
+                unsigned src_x,
+                unsigned src_y)
+{
+   int src_stride_pos = src_stride < 0 ? -src_stride : src_stride;
+   int src_blocksize = util_format_get_blocksize(src_format);
+   int src_blockwidth = util_format_get_blockwidth(src_format);
+   int src_blockheight = util_format_get_blockheight(src_format);
+   int dst_blocksize = util_format_get_blocksize(dst_format);
+   int dst_blockwidth = util_format_get_blockwidth(dst_format);
+   int dst_blockheight = util_format_get_blockheight(dst_format);
+
+   assert(src_blocksize > 0);
+   assert(src_blockwidth > 0);
+   assert(src_blockheight > 0);
+
+   dst_x /= dst_blockwidth;
+   dst_y /= dst_blockheight;
+   width = (width + src_blockwidth - 1)/src_blockwidth;
+   height = (height + src_blockheight - 1)/src_blockheight;
+   src_x /= src_blockwidth;
+   src_y /= src_blockheight;
+
+   dst += dst_x * dst_blocksize;
+   src += src_x * src_blocksize;
+   dst += dst_y * dst_stride;
+   src += src_y * src_stride_pos;
+
+   if (dst_format == PIPE_FORMAT_S8_UINT) {
+      if (src_format == PIPE_FORMAT_Z32_FLOAT_S8X24_UINT) {
+         util_format_z32_float_s8x24_uint_unpack_s_8uint(dst, dst_stride,
+                                                         src, src_stride,
+                                                         width, height);
+      } else if (src_format == PIPE_FORMAT_Z24_UNORM_S8_UINT) {
+         util_format_z24_unorm_s8_uint_unpack_s_8uint(dst, dst_stride,
+                                                      src, src_stride,
+                                                      width, height);
+      } else {
+      }
+   } else if (dst_format == PIPE_FORMAT_Z24X8_UNORM) {
+      util_format_z24_unorm_s8_uint_unpack_z24(dst, dst_stride,
+                                               src, src_stride,
+                                               width, height);
+   } else if (dst_format == PIPE_FORMAT_Z32_FLOAT) {
+      if (src_format == PIPE_FORMAT_Z32_FLOAT_S8X24_UINT) {
+         util_format_z32_float_s8x24_uint_unpack_z_float((float *)dst, dst_stride,
+                                                         src, src_stride,
+                                                         width, height);
+      }
+   } else if (dst_format == PIPE_FORMAT_Z32_FLOAT_S8X24_UINT) {
+      if (src_format == PIPE_FORMAT_Z32_FLOAT)
+         util_format_z32_float_s8x24_uint_pack_z_float(dst, dst_stride,
+                                                       (float *)src, src_stride,
+                                                       width, height);
+      else if (src_format == PIPE_FORMAT_S8_UINT)
+         util_format_z32_float_s8x24_uint_pack_s_8uint(dst, dst_stride,
+                                                       src, src_stride,
+                                                       width, height);
+   } else if (dst_format == PIPE_FORMAT_Z24_UNORM_S8_UINT) {
+      if (src_format == PIPE_FORMAT_S8_UINT)
+         util_format_z24_unorm_s8_uint_pack_s_8uint(dst, dst_stride,
+                                                    src, src_stride,
+                                                    width, height);
+      if (src_format == PIPE_FORMAT_Z24X8_UNORM)
+         util_format_z24_unorm_s8_uint_pack_z24(dst, dst_stride,
+                                                src, src_stride,
+                                                width, height);
+   }
+}
+
+static void
+copy_depth_box(ubyte *dst,
+               enum pipe_format dst_format,
+               unsigned dst_stride, unsigned dst_slice_stride,
+               unsigned dst_x, unsigned dst_y, unsigned dst_z,
+               unsigned width, unsigned height, unsigned depth,
+               const ubyte * src,
+               enum pipe_format src_format,
+               int src_stride, unsigned src_slice_stride,
+               unsigned src_x, unsigned src_y, unsigned src_z)
+{
+   unsigned z;
+   dst += dst_z * dst_slice_stride;
+   src += src_z * src_slice_stride;
+   for (z = 0; z < depth; ++z) {
+      copy_depth_rect(dst,
+                      dst_format,
+                      dst_stride,
+                      dst_x, dst_y,
+                      width, height,
+                      src,
+                      src_format,
+                      src_stride,
+                      src_x, src_y);
+
+      dst += dst_slice_stride;
+      src += src_slice_stride;
+   }
+}
+
+static void handle_copy_image_to_buffer(struct val_cmd_buffer_entry *cmd,
+                                        struct rendering_state *state)
+{
+   int i;
+   struct val_cmd_copy_image_to_buffer *copycmd = &cmd->u.img_to_buffer;
+   struct pipe_box box, dbox;
+   struct pipe_transfer *src_t, *dst_t;
+   ubyte *src_data, *dst_data;
+
+   state->pctx->flush(state->pctx, NULL, 0);
+
+   for (i = 0; i < copycmd->region_count; i++) {
+
+      box.x = copycmd->regions[i].imageOffset.x;
+      box.y = copycmd->regions[i].imageOffset.y;
+      box.z = copycmd->src->type == VK_IMAGE_TYPE_3D ? copycmd->regions[i].imageOffset.z : copycmd->regions[i].imageSubresource.baseArrayLayer;
+      box.width = copycmd->regions[i].imageExtent.width;
+      box.height = copycmd->regions[i].imageExtent.height;
+      box.depth = copycmd->src->type == VK_IMAGE_TYPE_3D ? copycmd->regions[i].imageExtent.depth : copycmd->regions[i].imageSubresource.layerCount;
+
+      src_data = state->pctx->transfer_map(state->pctx,
+                                           copycmd->src->bo,
+                                           copycmd->regions[i].imageSubresource.mipLevel,
+                                           PIPE_TRANSFER_READ,
+                                           &box,
+                                           &src_t);
+
+      dbox.x = copycmd->regions[i].bufferOffset;
+      dbox.y = 0;
+      dbox.z = 0;
+      dbox.width = copycmd->dst->bo->width0;
+      dbox.height = 1;
+      dbox.depth = 1;
+      dst_data = state->pctx->transfer_map(state->pctx,
+                                           copycmd->dst->bo,
+                                           0,
+                                           PIPE_TRANSFER_WRITE,
+                                           &dbox,
+                                           &dst_t);
+
+      enum pipe_format src_format = copycmd->src->bo->format;
+      enum pipe_format dst_format = src_format;
+      if (util_format_is_depth_or_stencil(src_format)) {
+         if (copycmd->regions[i].imageSubresource.aspectMask == VK_IMAGE_ASPECT_DEPTH_BIT) {
+            dst_format = util_format_get_depth_only(src_format);
+         } else if (copycmd->regions[i].imageSubresource.aspectMask == VK_IMAGE_ASPECT_STENCIL_BIT) {
+            dst_format = PIPE_FORMAT_S8_UINT;
+         }
+      }
+
+      unsigned buffer_row_len = util_format_get_stride(dst_format, copycmd->regions[i].bufferRowLength);
+      if (buffer_row_len == 0)
+         buffer_row_len = util_format_get_stride(dst_format, copycmd->regions[i].imageExtent.width);
+      unsigned buffer_image_height = copycmd->regions[i].bufferImageHeight;
+      if (buffer_image_height == 0)
+         buffer_image_height = copycmd->regions[i].imageExtent.height;
+
+      if (src_format != dst_format) {
+         copy_depth_box(dst_data, dst_format,
+                        buffer_row_len, buffer_row_len * buffer_image_height,
+                        0, 0, 0,
+                        copycmd->regions[i].imageExtent.width,
+                        copycmd->regions[i].imageExtent.height,
+                        box.depth,
+                        src_data, src_format, src_t->stride, src_t->layer_stride, 0, 0, 0);
+      } else {
+         util_copy_box((ubyte *)dst_data, src_format,
+                       buffer_row_len, buffer_row_len * buffer_image_height,
+                       0, 0, 0,
+                       copycmd->regions[i].imageExtent.width,
+                       copycmd->regions[i].imageExtent.height,
+                       box.depth,
+                       src_data, src_t->stride, src_t->layer_stride, 0, 0, 0);
+      }
+      state->pctx->transfer_unmap(state->pctx, src_t);
+      state->pctx->transfer_unmap(state->pctx, dst_t);
+   }
+}
+
+static void handle_copy_buffer_to_image(struct val_cmd_buffer_entry *cmd,
+                                        struct rendering_state *state)
+{
+   int i;
+   struct val_cmd_copy_buffer_to_image *copycmd = &cmd->u.buffer_to_img;
+   struct pipe_box box, sbox;
+   struct pipe_transfer *src_t, *dst_t;
+   void *src_data, *dst_data;
+
+   state->pctx->flush(state->pctx, NULL, 0);
+
+   for (i = 0; i < copycmd->region_count; i++) {
+
+      sbox.x = copycmd->regions[i].bufferOffset;
+      sbox.y = 0;
+      sbox.z = 0;
+      sbox.width = copycmd->src->bo->width0;
+      sbox.height = 1;
+      sbox.depth = 1;
+      src_data = state->pctx->transfer_map(state->pctx,
+                                           copycmd->src->bo,
+                                           0,
+                                           PIPE_TRANSFER_READ,
+                                           &sbox,
+                                           &src_t);
+
+
+      box.x = copycmd->regions[i].imageOffset.x;
+      box.y = copycmd->regions[i].imageOffset.y;
+      box.z = copycmd->dst->type == VK_IMAGE_TYPE_3D ? copycmd->regions[i].imageOffset.z : copycmd->regions[i].imageSubresource.baseArrayLayer;
+      box.width = copycmd->regions[i].imageExtent.width;
+      box.height = copycmd->regions[i].imageExtent.height;
+      box.depth = copycmd->dst->type == VK_IMAGE_TYPE_3D ? copycmd->regions[i].imageExtent.depth : copycmd->regions[i].imageSubresource.layerCount;
+
+      dst_data = state->pctx->transfer_map(state->pctx,
+                                           copycmd->dst->bo,
+                                           copycmd->regions[i].imageSubresource.mipLevel,
+                                           PIPE_TRANSFER_WRITE,
+                                           &box,
+                                           &dst_t);
+
+      enum pipe_format dst_format = copycmd->dst->bo->format;
+      enum pipe_format src_format = dst_format;
+      if (util_format_is_depth_or_stencil(dst_format)) {
+         if (copycmd->regions[i].imageSubresource.aspectMask == VK_IMAGE_ASPECT_DEPTH_BIT) {
+            src_format = util_format_get_depth_only(copycmd->dst->bo->format);
+         } else if (copycmd->regions[i].imageSubresource.aspectMask == VK_IMAGE_ASPECT_STENCIL_BIT) {
+            src_format = PIPE_FORMAT_S8_UINT;
+         }
+      }
+
+      unsigned buffer_row_len = util_format_get_stride(src_format, copycmd->regions[i].bufferRowLength);
+      if (buffer_row_len == 0)
+         buffer_row_len = util_format_get_stride(src_format, copycmd->regions[i].imageExtent.width);
+      unsigned buffer_image_height = copycmd->regions[i].bufferImageHeight;
+      if (buffer_image_height == 0)
+         buffer_image_height = copycmd->regions[i].imageExtent.height;
+
+      if (src_format != dst_format) {
+         copy_depth_box(dst_data, dst_format,
+                        dst_t->stride, dst_t->layer_stride,
+                        0, 0, 0,
+                        copycmd->regions[i].imageExtent.width,
+                        copycmd->regions[i].imageExtent.height,
+                        box.depth,
+                        src_data, src_format,
+                        buffer_row_len, buffer_row_len * buffer_image_height, 0, 0, 0);
+      } else {
+         util_copy_box(dst_data, dst_format,
+                       dst_t->stride, dst_t->layer_stride,
+                       0, 0, 0,
+                       copycmd->regions[i].imageExtent.width,
+                       copycmd->regions[i].imageExtent.height,
+                       box.depth,
+                       src_data,
+                       buffer_row_len, buffer_row_len * buffer_image_height, 0, 0, 0);
+      }
+      state->pctx->transfer_unmap(state->pctx, src_t);
+      state->pctx->transfer_unmap(state->pctx, dst_t);
+   }
+}
+
+static void handle_copy_image(struct val_cmd_buffer_entry *cmd,
+                              struct rendering_state *state)
+{
+   int i;
+   struct val_cmd_copy_image *copycmd = &cmd->u.copy_image;
+
+   state->pctx->flush(state->pctx, NULL, 0);
+
+   for (i = 0; i < copycmd->region_count; i++) {
+      struct pipe_box src_box;
+      src_box.x = copycmd->regions[i].srcOffset.x;
+      src_box.y = copycmd->regions[i].srcOffset.y;
+      src_box.z = copycmd->regions[i].srcOffset.z + copycmd->regions[i].srcSubresource.baseArrayLayer;
+      src_box.width = copycmd->regions[i].extent.width;
+      src_box.height = copycmd->regions[i].extent.height;
+      src_box.depth = copycmd->regions[i].extent.depth;
+
+      state->pctx->resource_copy_region(state->pctx, copycmd->dst->bo,
+                                        copycmd->regions[i].dstSubresource.mipLevel,
+                                        copycmd->regions[i].dstOffset.x,
+                                        copycmd->regions[i].dstOffset.y,
+                                        copycmd->regions[i].dstOffset.z + copycmd->regions[i].dstSubresource.baseArrayLayer,
+                                        copycmd->src->bo,
+                                        copycmd->regions[i].srcSubresource.mipLevel,
+                                        &src_box);
+   }
+}
+
+static void handle_copy_buffer(struct val_cmd_buffer_entry *cmd,
+                               struct rendering_state *state)
+{
+   int i;
+   struct val_cmd_copy_buffer *copycmd = &cmd->u.copy_buffer;
+
+   for (i = 0; i < copycmd->region_count; i++) {
+      struct pipe_box box = { 0 };
+      u_box_1d(copycmd->regions[i].srcOffset, copycmd->regions[i].size, &box);
+      state->pctx->resource_copy_region(state->pctx, copycmd->dst->bo, 0,
+                                        copycmd->regions[i].dstOffset, 0, 0,
+                                        copycmd->src->bo, 0, &box);
+   }
+}
+
+static void handle_blit_image(struct val_cmd_buffer_entry *cmd,
+                              struct rendering_state *state)
+{
+   int i;
+   struct val_cmd_blit_image *blitcmd = &cmd->u.blit_image;
+   struct pipe_blit_info info;
+
+   memset(&info, 0, sizeof(info));
+
+   state->pctx->flush(state->pctx, NULL, 0);
+   info.src.resource = blitcmd->src->bo;
+   info.dst.resource = blitcmd->dst->bo;
+   info.src.format = blitcmd->src->bo->format;
+   info.dst.format = blitcmd->dst->bo->format;
+   info.mask = util_format_is_depth_or_stencil(info.src.format) ? PIPE_MASK_ZS : PIPE_MASK_RGBA;
+   info.filter = blitcmd->filter == VK_FILTER_NEAREST ? PIPE_TEX_FILTER_NEAREST : PIPE_TEX_FILTER_LINEAR;
+   for (i = 0; i < blitcmd->region_count; i++) {
+      int srcX0, srcX1, srcY0, srcY1;
+      unsigned dstX0, dstX1, dstY0, dstY1;
+
+      srcX0 = blitcmd->regions[i].srcOffsets[0].x;
+      srcX1 = blitcmd->regions[i].srcOffsets[1].x;
+      srcY0 = blitcmd->regions[i].srcOffsets[0].y;
+      srcY1 = blitcmd->regions[i].srcOffsets[1].y;
+
+      dstX0 = blitcmd->regions[i].dstOffsets[0].x;
+      dstX1 = blitcmd->regions[i].dstOffsets[1].x;
+      dstY0 = blitcmd->regions[i].dstOffsets[0].y;
+      dstY1 = blitcmd->regions[i].dstOffsets[1].y;
+
+      if (dstX0 < dstX1) {
+         info.dst.box.x = dstX0;
+         info.src.box.x = srcX0;
+         info.dst.box.width = dstX1 - dstX0;
+         info.src.box.width = srcX1 - srcX0;
+      } else {
+         info.dst.box.x = dstX1;
+         info.src.box.x = srcX1;
+         info.dst.box.width = dstX0 - dstX1;
+         info.src.box.width = srcX0 - srcX1;
+      }
+
+      if (dstY0 < dstY1) {
+         info.dst.box.y = dstY0;
+         info.src.box.y = srcY0;
+         info.dst.box.height = dstY1 - dstY0;
+         info.src.box.height = srcY1 - srcY0;
+      } else {
+         info.dst.box.y = dstY1;
+         info.src.box.y = srcY1;
+         info.dst.box.height = dstY0 - dstY1;
+         info.src.box.height = srcY0 - srcY1;
+      }
+      info.src.level = blitcmd->regions[i].srcSubresource.mipLevel;
+      info.src.box.z = blitcmd->regions[i].srcOffsets[0].z + blitcmd->regions[i].srcSubresource.baseArrayLayer;
+      if (blitcmd->src->bo->target == PIPE_TEXTURE_3D)
+         info.src.box.depth = blitcmd->regions[i].srcOffsets[1].z - blitcmd->regions[i].srcOffsets[0].z;
+      else
+         info.src.box.depth = blitcmd->regions[i].srcSubresource.layerCount;
+
+      info.dst.level = blitcmd->regions[i].dstSubresource.mipLevel;
+      info.dst.box.z = blitcmd->regions[i].dstOffsets[0].z + blitcmd->regions[i].dstSubresource.baseArrayLayer;
+      if (blitcmd->dst->bo->target == PIPE_TEXTURE_3D)
+         info.dst.box.depth = blitcmd->regions[i].dstOffsets[1].z - blitcmd->regions[i].dstOffsets[0].z;
+      else
+         info.dst.box.depth = blitcmd->regions[i].dstSubresource.layerCount;
+      state->pctx->blit(state->pctx, &info);
+   }
+}
+
+static void handle_fill_buffer(struct val_cmd_buffer_entry *cmd,
+                               struct rendering_state *state)
+{
+   struct val_cmd_fill_buffer *fillcmd = &cmd->u.fill_buffer;
+   uint32_t *dst;
+   struct pipe_transfer *dst_t;
+   struct pipe_box box;
+   uint32_t size = fillcmd->fill_size;
+
+   if (fillcmd->fill_size == VK_WHOLE_SIZE)
+      size = fillcmd->buffer->bo->width0 - fillcmd->offset;
+
+   u_box_1d(fillcmd->offset, size, &box);
+   dst = state->pctx->transfer_map(state->pctx,
+                                   fillcmd->buffer->bo,
+                                   0,
+                                   PIPE_TRANSFER_WRITE,
+                                   &box,
+                                   &dst_t);
+
+   for (unsigned i = 0; i < size / 4; i++)
+      dst[i] = fillcmd->data;
+   state->pctx->transfer_unmap(state->pctx, dst_t);
+}
+
+static void handle_update_buffer(struct val_cmd_buffer_entry *cmd,
+                                 struct rendering_state *state)
+{
+   struct val_cmd_update_buffer *updcmd = &cmd->u.update_buffer;
+   uint32_t *dst;
+   struct pipe_transfer *dst_t;
+   struct pipe_box box;
+
+   u_box_1d(updcmd->offset, updcmd->data_size, &box);
+   dst = state->pctx->transfer_map(state->pctx,
+                                   updcmd->buffer->bo,
+                                   0,
+                                   PIPE_TRANSFER_WRITE,
+                                   &box,
+                                   &dst_t);
+
+   memcpy(dst, updcmd->data, updcmd->data_size);
+   state->pctx->transfer_unmap(state->pctx, dst_t);
+}
+
+static void handle_draw_indexed(struct val_cmd_buffer_entry *cmd,
+                                struct rendering_state *state)
+{
+   state->info.indirect = NULL;
+   state->info.min_index = 0;
+   state->info.max_index = ~0;
+   state->info.index_size = state->index_size;
+   state->info.index.resource = state->index_buffer;
+   state->info.start = (state->index_offset / state->index_size) + cmd->u.draw_indexed.first_index;
+   state->info.count = cmd->u.draw_indexed.index_count;
+   state->info.start_instance = cmd->u.draw_indexed.first_instance;
+   state->info.instance_count = cmd->u.draw_indexed.instance_count;
+   state->info.index_bias = cmd->u.draw_indexed.vertex_offset;
+
+   if (state->info.primitive_restart) {
+      if (state->info.index_size == 4)
+         state->info.restart_index = 0xffffffff;
+      else
+         state->info.restart_index = 0xffff;
+   }
+
+   state->pctx->draw_vbo(state->pctx, &state->info);
+}
+
+static void handle_draw_indirect(struct val_cmd_buffer_entry *cmd,
+                                 struct rendering_state *state, bool indexed)
+{
+   if (indexed) {
+      state->info.index_size = state->index_size;
+      state->info.index.resource = state->index_buffer;
+      state->info.max_index = ~0;
+   } else
+      state->info.index_size = 0;
+   state->indirect_info.offset = cmd->u.draw_indirect.offset;
+   state->indirect_info.stride = cmd->u.draw_indirect.stride;
+   state->indirect_info.draw_count = cmd->u.draw_indirect.draw_count;
+   state->indirect_info.buffer = cmd->u.draw_indirect.buffer->bo;
+   state->info.indirect = &state->indirect_info;
+   state->pctx->draw_vbo(state->pctx, &state->info);
+}
+
+static void handle_index_buffer(struct val_cmd_buffer_entry *cmd,
+                                struct rendering_state *state)
+{
+   struct val_cmd_bind_index_buffer *ib = &cmd->u.index_buffer;
+   switch (ib->index_type) {
+   case VK_INDEX_TYPE_UINT16:
+      state->index_size = 2;
+      break;
+   case VK_INDEX_TYPE_UINT32:
+      state->index_size = 4;
+      break;
+   default:
+      break;
+   }
+   state->index_offset = ib->offset;
+   if (ib->buffer)
+      state->index_buffer = ib->buffer->bo;
+   else
+      state->index_buffer = NULL;
+
+   state->ib_dirty = true;
+}
+
+static void handle_dispatch(struct val_cmd_buffer_entry *cmd,
+                            struct rendering_state *state)
+{
+   state->dispatch_info.grid[0] = cmd->u.dispatch.x;
+   state->dispatch_info.grid[1] = cmd->u.dispatch.y;
+   state->dispatch_info.grid[2] = cmd->u.dispatch.z;
+   state->dispatch_info.indirect = NULL;
+   state->pctx->launch_grid(state->pctx, &state->dispatch_info);
+}
+
+static void handle_dispatch_indirect(struct val_cmd_buffer_entry *cmd,
+                                     struct rendering_state *state)
+{
+   state->dispatch_info.indirect = cmd->u.dispatch_indirect.buffer->bo;
+   state->dispatch_info.indirect_offset = cmd->u.dispatch_indirect.offset;
+   state->pctx->launch_grid(state->pctx, &state->dispatch_info);
+}
+
+static void handle_push_constants(struct val_cmd_buffer_entry *cmd,
+                                  struct rendering_state *state)
+{
+   memcpy(state->push_constants + cmd->u.push_constants.offset, cmd->u.push_constants.val, cmd->u.push_constants.size);
+
+   state->pc_buffer[PIPE_SHADER_VERTEX].buffer_size = 128 * 4;
+   state->pc_buffer[PIPE_SHADER_VERTEX].buffer_offset = 0;
+   state->pc_buffer[PIPE_SHADER_VERTEX].user_buffer = state->push_constants;
+   state->pcbuf_dirty[PIPE_SHADER_VERTEX] = true;
+   state->pc_buffer[PIPE_SHADER_FRAGMENT].buffer_size = 128 * 4;
+   state->pc_buffer[PIPE_SHADER_FRAGMENT].buffer_offset = 0;
+   state->pc_buffer[PIPE_SHADER_FRAGMENT].user_buffer = state->push_constants;
+   state->pcbuf_dirty[PIPE_SHADER_FRAGMENT] = true;
+   state->pc_buffer[PIPE_SHADER_GEOMETRY].buffer_size = 128 * 4;
+   state->pc_buffer[PIPE_SHADER_GEOMETRY].buffer_offset = 0;
+   state->pc_buffer[PIPE_SHADER_GEOMETRY].user_buffer = state->push_constants;
+   state->pcbuf_dirty[PIPE_SHADER_GEOMETRY] = true;
+   state->pc_buffer[PIPE_SHADER_TESS_CTRL].buffer_size = 128 * 4;
+   state->pc_buffer[PIPE_SHADER_TESS_CTRL].buffer_offset = 0;
+   state->pc_buffer[PIPE_SHADER_TESS_CTRL].user_buffer = state->push_constants;
+   state->pcbuf_dirty[PIPE_SHADER_TESS_CTRL] = true;
+   state->pc_buffer[PIPE_SHADER_TESS_EVAL].buffer_size = 128 * 4;
+   state->pc_buffer[PIPE_SHADER_TESS_EVAL].buffer_offset = 0;
+   state->pc_buffer[PIPE_SHADER_TESS_EVAL].user_buffer = state->push_constants;
+   state->pcbuf_dirty[PIPE_SHADER_TESS_EVAL] = true;
+   state->pc_buffer[PIPE_SHADER_COMPUTE].buffer_size = 128 * 4;
+   state->pc_buffer[PIPE_SHADER_COMPUTE].buffer_offset = 0;
+   state->pc_buffer[PIPE_SHADER_COMPUTE].user_buffer = state->push_constants;
+   state->pcbuf_dirty[PIPE_SHADER_COMPUTE] = true;
+}
+
+static void val_execute_cmd_buffer(struct val_cmd_buffer *cmd_buffer,
+                                   struct rendering_state *state);
+
+static void handle_execute_commands(struct val_cmd_buffer_entry *cmd,
+                                    struct rendering_state *state)
+{
+   for (unsigned i = 0; i < cmd->u.execute_commands.command_buffer_count; i++) {
+      struct val_cmd_buffer *secondary_buf = cmd->u.execute_commands.cmd_buffers[i];
+      val_execute_cmd_buffer(secondary_buf, state);
+   }
+}
+
+static void handle_event_set(struct val_cmd_buffer_entry *cmd,
+                             struct rendering_state *state)
+{
+   struct val_event *event = cmd->u.event_set.event;
+
+   if (cmd->u.event_set.flush)
+      state->pctx->flush(state->pctx, NULL, 0);
+   event->event_storage = (cmd->u.event_set.value == true) ? 1 : 0;
+}
+
+static void handle_wait_events(struct val_cmd_buffer_entry *cmd,
+                               struct rendering_state *state)
+{
+   for (unsigned i = 0; i < cmd->u.wait_events.event_count; i++) {
+      struct val_event *event = cmd->u.wait_events.events[i];
+
+      while (event->event_storage != true);
+   }
+}
+
+static void handle_pipeline_barrier(struct val_cmd_buffer_entry *cmd,
+                                    struct rendering_state *state)
+{
+   /* why hello nail, I'm a hammer. - TODO */
+   state->pctx->flush(state->pctx, NULL, 0);
+}
+
+static void handle_begin_query(struct val_cmd_buffer_entry *cmd,
+                               struct rendering_state *state)
+{
+   struct val_cmd_query_cmd *qcmd = &cmd->u.query;
+   struct val_query_pool *pool = qcmd->pool;
+
+   if (!pool->queries[qcmd->query]) {
+      enum pipe_query_type qtype = pool->base_type;
+      if (qtype == PIPE_QUERY_OCCLUSION_COUNTER && !qcmd->precise)
+         qtype = PIPE_QUERY_OCCLUSION_PREDICATE;
+      pool->queries[qcmd->query] = state->pctx->create_query(state->pctx,
+                                                             qtype, qcmd->index);
+   }
+
+   state->pctx->begin_query(state->pctx, pool->queries[qcmd->query]);
+}
+
+static void handle_end_query(struct val_cmd_buffer_entry *cmd,
+                             struct rendering_state *state)
+{
+   struct val_cmd_query_cmd *qcmd = &cmd->u.query;
+   struct val_query_pool *pool = qcmd->pool;
+   assert(pool->queries[qcmd->query]);
+
+   state->pctx->end_query(state->pctx, pool->queries[qcmd->query]);
+}
+
+static void handle_reset_query_pool(struct val_cmd_buffer_entry *cmd,
+                                    struct rendering_state *state)
+{
+   struct val_cmd_query_cmd *qcmd = &cmd->u.query;
+   struct val_query_pool *pool = qcmd->pool;
+   for (unsigned i = qcmd->query; i < qcmd->query + qcmd->index; i++) {
+      if (pool->queries[i]) {
+         state->pctx->destroy_query(state->pctx, pool->queries[i]);
+         pool->queries[i] = NULL;
+      }
+   }
+}
+
+static void handle_write_timestamp(struct val_cmd_buffer_entry *cmd,
+                                   struct rendering_state *state)
+{
+   struct val_cmd_query_cmd *qcmd = &cmd->u.query;
+   struct val_query_pool *pool = qcmd->pool;
+   if (!pool->queries[qcmd->query]) {
+      pool->queries[qcmd->query] = state->pctx->create_query(state->pctx,
+                                                             PIPE_QUERY_TIMESTAMP, 0);
+   }
+
+   if (qcmd->flush)
+      state->pctx->flush(state->pctx, NULL, 0);
+   state->pctx->end_query(state->pctx, pool->queries[qcmd->query]);
+
+}
+
+static void handle_copy_query_pool_results(struct val_cmd_buffer_entry *cmd,
+                                           struct rendering_state *state)
+{
+   struct val_cmd_copy_query_pool_results *copycmd = &cmd->u.copy_query_pool_results;
+   struct val_query_pool *pool = copycmd->pool;
+
+   for (unsigned i = copycmd->first_query; i < copycmd->first_query + copycmd->query_count; i++) {
+      unsigned offset = copycmd->dst->offset + (copycmd->stride * (i - copycmd->first_query));
+      if (pool->queries[i]) {
+         if (copycmd->flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT)
+            state->pctx->get_query_result_resource(state->pctx,
+                                                   pool->queries[i],
+                                                   copycmd->flags & VK_QUERY_RESULT_WAIT_BIT,
+                                                   copycmd->flags & VK_QUERY_RESULT_64_BIT ? PIPE_QUERY_TYPE_U64 : PIPE_QUERY_TYPE_U32,
+                                                   -1,
+                                                   copycmd->dst->bo,
+                                                   offset + (copycmd->flags & VK_QUERY_RESULT_64_BIT ? 8 : 4));
+         state->pctx->get_query_result_resource(state->pctx,
+                                                pool->queries[i],
+                                                copycmd->flags & VK_QUERY_RESULT_WAIT_BIT,
+                                                copycmd->flags & VK_QUERY_RESULT_64_BIT ? PIPE_QUERY_TYPE_U64 : PIPE_QUERY_TYPE_U32,
+                                                0,
+                                                copycmd->dst->bo,
+                                                offset);
+      } else {
+         /* if no queries emitted yet, just reset the buffer to 0 so avail is reported correctly */
+         if (copycmd->flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
+            struct pipe_transfer *src_t;
+            uint32_t *map;
+
+            struct pipe_box box = {};
+            box.width = copycmd->stride * copycmd->query_count;
+            box.height = 1;
+            box.depth = 1;
+            map = state->pctx->transfer_map(state->pctx,
+                                            copycmd->dst->bo, 0, PIPE_TRANSFER_READ, &box,
+                                            &src_t);
+
+            memset(map, 0, box.width);
+            state->pctx->transfer_unmap(state->pctx, src_t);
+         }
+      }
+   }
+}
+
+static void pack_clear_color(enum pipe_format pformat, VkClearColorValue *in_val, uint32_t col_val[4])
+{
+   const struct util_format_description *desc = util_format_description(pformat);
+   col_val[0] = col_val[1] = col_val[2] = col_val[3] = 0;
+   for (unsigned c = 0; c < 4; c++) {
+      if (desc->swizzle[c] >= 4)
+         continue;
+      const struct util_format_channel_description *channel = &desc->channel[desc->swizzle[c]];
+      if (channel->size == 32) {
+         col_val[c] = in_val->uint32[c];
+         continue;
+      }
+      if (channel->pure_integer) {
+         uint64_t v = in_val->uint32[c] & ((1u << channel->size) - 1);
+         switch (channel->size) {
+         case 2:
+         case 8:
+         case 10:
+            col_val[0] |= (v << channel->shift);
+            break;
+         case 16:
+            col_val[c / 2] |= (v << (16 * (c % 2)));
+            break;
+         }
+      } else {
+         util_pack_color(in_val->float32, pformat, (union util_color *)col_val);
+         break;
+      }
+   }
+}
+
+static void handle_clear_color_image(struct val_cmd_buffer_entry *cmd,
+                                     struct rendering_state *state)
+{
+   struct val_image *image = cmd->u.clear_color_image.image;
+   uint32_t col_val[4];
+   pack_clear_color(image->bo->format, &cmd->u.clear_color_image.clear_val, col_val);
+   for (unsigned i = 0; i < cmd->u.clear_color_image.range_count; i++) {
+      VkImageSubresourceRange *range = &cmd->u.clear_color_image.ranges[i];
+      struct pipe_box box;
+      box.x = 0;
+      box.y = 0;
+      box.z = 0;
+
+      uint32_t level_count = val_get_levelCount(image, range);
+      for (unsigned j = range->baseMipLevel; j < range->baseMipLevel + level_count; j++) {
+         box.width = u_minify(image->bo->width0, j);
+         box.height = u_minify(image->bo->height0, j);
+         box.depth = 1;
+         if (image->bo->target == PIPE_TEXTURE_3D)
+            box.depth = u_minify(image->bo->depth0, j);
+         else if (image->bo->target == PIPE_TEXTURE_1D_ARRAY) {
+            box.y = range->baseArrayLayer;
+            box.height = val_get_layerCount(image, range);
+            box.depth = 1;
+         } else {
+            box.z = range->baseArrayLayer;
+            box.depth = val_get_layerCount(image, range);
+         }
+
+         state->pctx->clear_texture(state->pctx, image->bo,
+                                    j, &box, (void *)col_val);
+      }
+   }
+}
+
+static void handle_clear_ds_image(struct val_cmd_buffer_entry *cmd,
+                                  struct rendering_state *state)
+{
+   struct val_image *image = cmd->u.clear_ds_image.image;
+   uint64_t col_val;
+   col_val = util_pack64_z_stencil(image->bo->format, cmd->u.clear_ds_image.clear_val.depth, cmd->u.clear_ds_image.clear_val.stencil);
+   for (unsigned i = 0; i < cmd->u.clear_ds_image.range_count; i++) {
+      VkImageSubresourceRange *range = &cmd->u.clear_ds_image.ranges[i];
+      struct pipe_box box;
+      box.x = 0;
+      box.y = 0;
+      box.z = 0;
+
+      uint32_t level_count = val_get_levelCount(image, range);
+      for (unsigned j = range->baseMipLevel; j < range->baseMipLevel + level_count; j++) {
+         box.width = u_minify(image->bo->width0, j);
+         box.height = u_minify(image->bo->height0, j);
+         box.depth = 1;
+         if (image->bo->target == PIPE_TEXTURE_3D)
+            box.depth = u_minify(image->bo->depth0, j);
+         else if (image->bo->target == PIPE_TEXTURE_1D_ARRAY) {
+            box.y = range->baseArrayLayer;
+            box.height = val_get_layerCount(image, range);
+            box.depth = 1;
+         } else {
+            box.z = range->baseArrayLayer;
+            box.depth = val_get_layerCount(image, range);
+         }
+
+         state->pctx->clear_texture(state->pctx, image->bo,
+                                    j, &box, (void *)&col_val);
+      }
+   }
+}
+
+static void handle_clear_attachments(struct val_cmd_buffer_entry *cmd,
+                                     struct rendering_state *state)
+{
+   for (uint32_t a = 0; a < cmd->u.clear_attachments.attachment_count; a++) {
+      VkClearAttachment *att = &cmd->u.clear_attachments.attachments[a];
+      struct val_subpass *subpass = &state->pass->subpasses[state->subpass];
+      struct val_image_view *imgv;
+
+      if (att->aspectMask == VK_IMAGE_ASPECT_COLOR_BIT) {
+         struct val_subpass_attachment *color_att = &subpass->color_attachments[att->colorAttachment];
+         if (!color_att || color_att->attachment == VK_ATTACHMENT_UNUSED)
+            continue;
+         imgv = state->vk_framebuffer->attachments[color_att->attachment];
+      } else {
+         struct val_subpass_attachment *ds_att = subpass->depth_stencil_attachment;
+         if (!ds_att || ds_att->attachment == VK_ATTACHMENT_UNUSED)
+            continue;
+         imgv = state->vk_framebuffer->attachments[ds_att->attachment];
+      }
+      uint32_t col_val[4];
+      if (util_format_is_depth_or_stencil(imgv->pformat)) {
+         int64_t val = util_pack64_z_stencil(imgv->pformat, att->clearValue.depthStencil.depth, att->clearValue.depthStencil.stencil);
+         memcpy(col_val, &val, 8);
+      } else
+         pack_clear_color(imgv->pformat, &att->clearValue.color, col_val);
+      for (uint32_t r = 0; r < cmd->u.clear_attachments.rect_count; r++) {
+         struct pipe_box box;
+         VkClearRect *rect = &cmd->u.clear_attachments.rects[r];
+         box.x = rect->rect.offset.x;
+         box.y = rect->rect.offset.y;
+         box.z = imgv->subresourceRange.baseArrayLayer + rect->baseArrayLayer;
+         box.width = rect->rect.extent.width;
+         box.height = rect->rect.extent.height;
+         box.depth = rect->layerCount;
+
+         state->pctx->clear_texture(state->pctx, imgv->image->bo,
+                                    imgv->subresourceRange.baseMipLevel,
+                                    &box, col_val);
+      }
+   }
+}
+
+static void handle_resolve_image(struct val_cmd_buffer_entry *cmd,
+                                 struct rendering_state *state)
+{
+   int i;
+   struct val_cmd_resolve_image *resolvecmd = &cmd->u.resolve_image;
+   struct pipe_blit_info info;
+
+   memset(&info, 0, sizeof(info));
+
+   state->pctx->flush(state->pctx, NULL, 0);
+   info.src.resource = resolvecmd->src->bo;
+   info.dst.resource = resolvecmd->dst->bo;
+   info.src.format = resolvecmd->src->bo->format;
+   info.dst.format = resolvecmd->dst->bo->format;
+   info.mask = util_format_is_depth_or_stencil(info.src.format) ? PIPE_MASK_ZS : PIPE_MASK_RGBA;
+   info.filter = PIPE_TEX_FILTER_NEAREST;
+   for (i = 0; i < resolvecmd->region_count; i++) {
+      int srcX0, srcY0;
+      unsigned dstX0, dstY0;
+
+      srcX0 = resolvecmd->regions[i].srcOffset.x;
+      srcY0 = resolvecmd->regions[i].srcOffset.y;
+
+      dstX0 = resolvecmd->regions[i].dstOffset.x;
+      dstY0 = resolvecmd->regions[i].dstOffset.y;
+
+      info.dst.box.x = dstX0;
+      info.dst.box.y = dstY0;
+      info.src.box.x = srcX0;
+      info.src.box.y = srcY0;
+
+      info.dst.box.width = resolvecmd->regions[i].extent.width;
+      info.src.box.width = resolvecmd->regions[i].extent.width;
+      info.dst.box.height = resolvecmd->regions[i].extent.height;
+      info.src.box.height = resolvecmd->regions[i].extent.height;
+
+      info.dst.box.depth = resolvecmd->regions[i].dstSubresource.layerCount;
+      info.src.box.depth = resolvecmd->regions[i].srcSubresource.layerCount;
+
+      info.src.level = resolvecmd->regions[i].srcSubresource.mipLevel;
+      info.src.box.z = resolvecmd->regions[i].srcOffset.z + resolvecmd->regions[i].srcSubresource.baseArrayLayer;
+
+      info.dst.level = resolvecmd->regions[i].dstSubresource.mipLevel;
+      info.dst.box.z = resolvecmd->regions[i].dstOffset.z + resolvecmd->regions[i].dstSubresource.baseArrayLayer;
+
+      state->pctx->blit(state->pctx, &info);
+   }
+}
+
+static void val_execute_cmd_buffer(struct val_cmd_buffer *cmd_buffer,
+                                   struct rendering_state *state)
+{
+   struct val_cmd_buffer_entry *cmd;
+
+   LIST_FOR_EACH_ENTRY(cmd, &cmd_buffer->cmds, cmd_link) {
+      switch (cmd->cmd_type) {
+      case VAL_CMD_BIND_PIPELINE:
+         handle_pipeline(cmd, state);
+         break;
+      case VAL_CMD_SET_VIEWPORT:
+         handle_set_viewport(cmd, state);
+         break;
+      case VAL_CMD_SET_SCISSOR:
+         handle_set_scissor(cmd, state);
+         break;
+      case VAL_CMD_SET_LINE_WIDTH:
+         handle_set_line_width(cmd, state);
+         break;
+      case VAL_CMD_SET_DEPTH_BIAS:
+         handle_set_depth_bias(cmd, state);
+         break;
+      case VAL_CMD_SET_BLEND_CONSTANTS:
+         handle_set_blend_constants(cmd, state);
+         break;
+      case VAL_CMD_SET_DEPTH_BOUNDS:
+         handle_set_depth_bounds(cmd, state);
+         break;
+      case VAL_CMD_SET_STENCIL_COMPARE_MASK:
+         handle_set_stencil_compare_mask(cmd, state);
+         break;
+      case VAL_CMD_SET_STENCIL_WRITE_MASK:
+         handle_set_stencil_write_mask(cmd, state);
+         break;
+      case VAL_CMD_SET_STENCIL_REFERENCE:
+         handle_set_stencil_reference(cmd, state);
+         break;
+      case VAL_CMD_BIND_DESCRIPTOR_SETS:
+         handle_descriptor_sets(cmd, state);
+         break;
+      case VAL_CMD_BIND_INDEX_BUFFER:
+         handle_index_buffer(cmd, state);
+         break;
+      case VAL_CMD_BIND_VERTEX_BUFFERS:
+         handle_vertex_buffers(cmd, state);
+         break;
+      case VAL_CMD_DRAW:
+         emit_state(state);
+         handle_draw(cmd, state);
+         break;
+      case VAL_CMD_DRAW_INDEXED:
+         emit_state(state);
+         handle_draw_indexed(cmd, state);
+         break;
+      case VAL_CMD_DRAW_INDIRECT:
+         emit_state(state);
+         handle_draw_indirect(cmd, state, false);
+         break;
+      case VAL_CMD_DRAW_INDEXED_INDIRECT:
+         emit_state(state);
+         handle_draw_indirect(cmd, state, true);
+         break;
+      case VAL_CMD_DISPATCH:
+         emit_compute_state(state);
+         handle_dispatch(cmd, state);
+         break;
+      case VAL_CMD_DISPATCH_INDIRECT:
+         emit_compute_state(state);
+         handle_dispatch_indirect(cmd, state);
+         break;
+      case VAL_CMD_COPY_BUFFER:
+         handle_copy_buffer(cmd, state);
+         break;
+      case VAL_CMD_COPY_IMAGE:
+         handle_copy_image(cmd, state);
+         break;
+      case VAL_CMD_BLIT_IMAGE:
+         handle_blit_image(cmd, state);
+         break;
+      case VAL_CMD_COPY_BUFFER_TO_IMAGE:
+         handle_copy_buffer_to_image(cmd, state);
+         break;
+      case VAL_CMD_COPY_IMAGE_TO_BUFFER:
+         handle_copy_image_to_buffer(cmd, state);
+         break;
+      case VAL_CMD_UPDATE_BUFFER:
+         handle_update_buffer(cmd, state);
+         break;
+      case VAL_CMD_FILL_BUFFER:
+         handle_fill_buffer(cmd, state);
+         break;
+      case VAL_CMD_CLEAR_COLOR_IMAGE:
+         handle_clear_color_image(cmd, state);
+         break;
+      case VAL_CMD_CLEAR_DEPTH_STENCIL_IMAGE:
+         handle_clear_ds_image(cmd, state);
+         break;
+      case VAL_CMD_CLEAR_ATTACHMENTS:
+         handle_clear_attachments(cmd, state);
+         break;
+      case VAL_CMD_RESOLVE_IMAGE:
+         handle_resolve_image(cmd, state);
+         break;
+      case VAL_CMD_SET_EVENT:
+      case VAL_CMD_RESET_EVENT:
+         handle_event_set(cmd, state);
+         break;
+      case VAL_CMD_WAIT_EVENTS:
+         handle_wait_events(cmd, state);
+         break;
+      case VAL_CMD_PIPELINE_BARRIER:
+         handle_pipeline_barrier(cmd, state);
+         break;
+      case VAL_CMD_BEGIN_QUERY:
+         handle_begin_query(cmd, state);
+         break;
+      case VAL_CMD_END_QUERY:
+         handle_end_query(cmd, state);
+         break;
+      case VAL_CMD_RESET_QUERY_POOL:
+         handle_reset_query_pool(cmd, state);
+         break;
+      case VAL_CMD_WRITE_TIMESTAMP:
+         handle_write_timestamp(cmd, state);
+         break;
+      case VAL_CMD_COPY_QUERY_POOL_RESULTS:
+         handle_copy_query_pool_results(cmd, state);
+         break;
+      case VAL_CMD_PUSH_CONSTANTS:
+         handle_push_constants(cmd, state);
+         break;
+      case VAL_CMD_BEGIN_RENDER_PASS:
+         handle_begin_render_pass(cmd, state);
+         break;
+      case VAL_CMD_NEXT_SUBPASS:
+         handle_next_subpass(cmd, state);
+         break;
+      case VAL_CMD_END_RENDER_PASS:
+         handle_end_render_pass(cmd, state);
+         break;
+      case VAL_CMD_EXECUTE_COMMANDS:
+         handle_execute_commands(cmd, state);
+         break;
+      }
+   }
+}
+
+VkResult val_execute_cmds(struct val_device *device,
+                          struct val_queue *queue,
+                          struct val_fence *fence,
+                          struct val_cmd_buffer *cmd_buffer)
+{
+   struct rendering_state state;
+   struct pipe_fence_handle *handle = NULL;
+   memset(&state, 0, sizeof(state));
+   state.pctx = queue->ctx;
+   state.blend_dirty = true;
+   state.dsa_dirty = true;
+   state.rs_dirty = true;
+   /* create a gallium context */
+   val_execute_cmd_buffer(cmd_buffer, &state);
+
+   state.pctx->flush(state.pctx, fence ? &handle : NULL, 0);
+   if (fence) {
+      mtx_lock(&device->fence_lock);
+      fence->handle = handle;
+      mtx_unlock(&device->fence_lock);
+   }
+   state.start_vb = -1;
+   state.num_vb = 0;
+   state.pctx->set_vertex_buffers(state.pctx, 0, PIPE_MAX_ATTRIBS, NULL);
+   state.pctx->bind_vertex_elements_state(state.pctx, NULL);
+   state.pctx->bind_vs_state(state.pctx, NULL);
+   state.pctx->bind_fs_state(state.pctx, NULL);
+   state.pctx->bind_gs_state(state.pctx, NULL);
+   if (state.pctx->bind_tcs_state)
+      state.pctx->bind_tcs_state(state.pctx, NULL);
+   if (state.pctx->bind_tes_state)
+      state.pctx->bind_tes_state(state.pctx, NULL);
+   if (state.pctx->bind_compute_state)
+      state.pctx->bind_compute_state(state.pctx, NULL);
+   if (state.velems_cso)
+      state.pctx->delete_vertex_elements_state(state.pctx, state.velems_cso);
+
+   state.pctx->bind_rasterizer_state(state.pctx, NULL);
+   state.pctx->delete_rasterizer_state(state.pctx, state.rast_handle);
+   if (state.blend_handle) {
+      state.pctx->bind_blend_state(state.pctx, NULL);
+      state.pctx->delete_blend_state(state.pctx, state.blend_handle);
+   }
+
+   if (state.dsa_handle) {
+      state.pctx->bind_depth_stencil_alpha_state(state.pctx, NULL);
+      state.pctx->delete_depth_stencil_alpha_state(state.pctx, state.dsa_handle);
+   }
+
+   for (enum pipe_shader_type s = PIPE_SHADER_VERTEX; s < PIPE_SHADER_TYPES; s++) {
+      for (unsigned i = 0; i < PIPE_MAX_SAMPLERS; i++) {
+         if (state.sv[s][i])
+            pipe_sampler_view_reference(&state.sv[s][i], NULL);
+         if (state.ss_cso[s][i]) {
+            state.pctx->delete_sampler_state(state.pctx, state.ss_cso[s][i]);
+            state.ss_cso[s][i] = NULL;
+         }
+      }
+      state.pctx->bind_sampler_states(state.pctx, s, 0, PIPE_MAX_SAMPLERS, state.ss_cso[s]);
+
+      state.pctx->set_shader_images(state.pctx, s, 0, device->physical_device->max_images, NULL);
+   }
+
+   return VK_SUCCESS;
+}
diff --git a/src/gallium/frontends/vallium/val_extensions.py b/src/gallium/frontends/vallium/val_extensions.py
new file mode 100644 (file)
index 0000000..905d38b
--- /dev/null
@@ -0,0 +1,166 @@
+COPYRIGHT = """\
+/*
+ * Copyright 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+"""
+
+import argparse
+import os.path
+import re
+import sys
+
+VULKAN_UTIL = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../../vulkan/util'))
+sys.path.append(VULKAN_UTIL)
+
+from vk_extensions import *
+from vk_extensions_gen import *
+
+MAX_API_VERSION = '1.1.107'
+
+# Supported API versions.  Each one is the maximum patch version for the given
+# version.  Version come in increasing order and each version is available if
+# it's provided "enable" condition is true and all previous versions are
+# available.
+# TODO: The patch version should be unified!
+API_VERSIONS = [
+    ApiVersion('1.0.68',  True),
+    ApiVersion('1.1.107', False),
+    ApiVersion('1.2.131', False),
+]
+
+MAX_API_VERSION = None # Computed later
+
+# On Android, we disable all surface and swapchain extensions. Android's Vulkan
+# loader implements VK_KHR_surface and VK_KHR_swapchain, and applications
+# cannot access the driver's implementation. Moreoever, if the driver exposes
+# the those extension strings, then tests dEQP-VK.api.info.instance.extensions
+# and dEQP-VK.api.info.device fail due to the duplicated strings.
+EXTENSIONS = [
+    Extension('VK_ANDROID_native_buffer',                 5, False),
+    Extension('VK_KHR_16bit_storage',                     1, False),
+    Extension('VK_KHR_bind_memory2',                      1, True),
+    Extension('VK_KHR_create_renderpass2',                1, False),
+    Extension('VK_KHR_dedicated_allocation',              1, True),
+    Extension('VK_KHR_depth_stencil_resolve',             1, False),
+    Extension('VK_KHR_descriptor_update_template',        1, False),
+    Extension('VK_KHR_device_group',                      1, False),
+    Extension('VK_KHR_device_group_creation',             1, False),
+    Extension('VK_KHR_draw_indirect_count',               1, False),
+    Extension('VK_KHR_driver_properties',                 1, True),
+    Extension('VK_KHR_external_fence',                    1, False),
+    Extension('VK_KHR_external_fence_capabilities',       1, True),
+    Extension('VK_KHR_external_fence_fd',                 1, False),
+    Extension('VK_KHR_external_memory',                   1, False),
+    Extension('VK_KHR_external_memory_capabilities',      1, True),
+    Extension('VK_KHR_external_memory_fd',                1, False),
+    Extension('VK_KHR_external_semaphore',                1, False),
+    Extension('VK_KHR_external_semaphore_capabilities',   1, True),
+    Extension('VK_KHR_external_semaphore_fd',             1, False),
+    Extension('VK_KHR_get_display_properties2',           1, 'VK_USE_PLATFORM_DISPLAY_KHR'),
+    Extension('VK_KHR_get_memory_requirements2',          1, True),
+    Extension('VK_KHR_get_physical_device_properties2',   1, True),
+    Extension('VK_KHR_get_surface_capabilities2',         1, 'VAL_HAS_SURFACE'),
+    Extension('VK_KHR_image_format_list',                 1, False),
+    Extension('VK_KHR_imageless_framebuffer',             1, False),
+    Extension('VK_KHR_incremental_present',               1, 'VAL_HAS_SURFACE'),
+    Extension('VK_KHR_maintenance1',                      1, True),
+    Extension('VK_KHR_maintenance2',                      1, False),
+    Extension('VK_KHR_maintenance3',                      1, False),
+    Extension('VK_KHR_pipeline_executable_properties',    1, False),
+    Extension('VK_KHR_push_descriptor',                   1, False),
+    Extension('VK_KHR_relaxed_block_layout',              1, True),
+    Extension('VK_KHR_sampler_mirror_clamp_to_edge',      1, True),
+    Extension('VK_KHR_sampler_ycbcr_conversion',          1, False),
+    Extension('VK_KHR_shader_atomic_int64',               1, False),
+    Extension('VK_KHR_shader_draw_parameters',            1, False),
+    Extension('VK_KHR_shader_float16_int8',               1, True),
+    Extension('VK_KHR_storage_buffer_storage_class',      1, True),
+    Extension('VK_KHR_surface',                          25, 'VAL_HAS_SURFACE'),
+    Extension('VK_KHR_surface_protected_capabilities',    1, 'VAL_HAS_SURFACE'),
+    Extension('VK_KHR_swapchain',                        68, 'VAL_HAS_SURFACE'),
+    Extension('VK_KHR_uniform_buffer_standard_layout',    1, False),
+    Extension('VK_KHR_variable_pointers',                 1, False),
+    Extension('VK_KHR_wayland_surface',                   6, 'VK_USE_PLATFORM_WAYLAND_KHR'),
+    Extension('VK_KHR_xcb_surface',                       6, 'VK_USE_PLATFORM_XCB_KHR'),
+    Extension('VK_KHR_xlib_surface',                      6, 'VK_USE_PLATFORM_XLIB_KHR'),
+    Extension('VK_KHR_multiview',                         1, False),
+    Extension('VK_KHR_display',                          23, 'VK_USE_PLATFORM_DISPLAY_KHR'),
+    Extension('VK_KHR_8bit_storage',                      1, False),
+    Extension('VK_EXT_direct_mode_display',               1, 'VK_USE_PLATFORM_DISPLAY_KHR'),
+    Extension('VK_EXT_acquire_xlib_display',              1, 'VK_USE_PLATFORM_XLIB_XRANDR_EXT'),
+    Extension('VK_EXT_buffer_device_address',             1, False),
+    Extension('VK_EXT_calibrated_timestamps',             1, False),
+    Extension('VK_EXT_conditional_rendering',             1, False),
+    Extension('VK_EXT_conservative_rasterization',        1, False),
+    Extension('VK_EXT_display_surface_counter',           1, 'VK_USE_PLATFORM_DISPLAY_KHR'),
+    Extension('VK_EXT_display_control',                   1, 'VK_USE_PLATFORM_DISPLAY_KHR'),
+    Extension('VK_EXT_debug_report',                      9, True),
+    Extension('VK_EXT_depth_clip_enable',                 1, False),
+    Extension('VK_EXT_depth_range_unrestricted',          1, False),
+    Extension('VK_EXT_descriptor_indexing',               2, False),
+    Extension('VK_EXT_discard_rectangles',                1, False),
+    Extension('VK_EXT_external_memory_dma_buf',           1, True),
+    Extension('VK_EXT_external_memory_host',              1, False),
+    Extension('VK_EXT_global_priority',                   1, False),
+    Extension('VK_EXT_host_query_reset',                  1, False),
+    Extension('VK_EXT_index_type_uint8',                  1, False),
+    Extension('VK_EXT_inline_uniform_block',              1, False),
+    Extension('VK_EXT_memory_budget',                     1, False),
+    Extension('VK_EXT_memory_priority',                   1, False),
+    Extension('VK_EXT_pci_bus_info',                      2, False),
+    Extension('VK_EXT_pipeline_creation_feedback',        1, False),
+    Extension('VK_EXT_post_depth_coverage',               1, False),
+    Extension('VK_EXT_private_data',                      1, True),
+    Extension('VK_EXT_queue_family_foreign',              1, False),
+    Extension('VK_EXT_sample_locations',                  1, False),
+    Extension('VK_EXT_sampler_filter_minmax',             1, False),
+    Extension('VK_EXT_scalar_block_layout',               1, False),
+    Extension('VK_EXT_shader_viewport_index_layer',       1, False),
+    Extension('VK_EXT_shader_stencil_export',             1, False),
+    Extension('VK_EXT_shader_subgroup_ballot',            1, False),
+    Extension('VK_EXT_shader_subgroup_vote',              1, False),
+    Extension('VK_EXT_transform_feedback',                1, False),
+    Extension('VK_EXT_vertex_attribute_divisor',          3, False),
+    Extension('VK_EXT_ycbcr_image_arrays',                1, False),
+    Extension('VK_GOOGLE_decorate_string',                1, True),
+    Extension('VK_GOOGLE_hlsl_functionality1',            1, True),
+]
+
+MAX_API_VERSION = VkVersion('0.0.0')
+for version in API_VERSIONS:
+    version.version = VkVersion(version.version)
+    assert version.version > MAX_API_VERSION
+    MAX_API_VERSION = version.version
+
+if __name__ == '__main__':
+    parser = argparse.ArgumentParser()
+    parser.add_argument('--out-c', help='Output C file.', required=True)
+    parser.add_argument('--out-h', help='Output H file.', required=True)
+    parser.add_argument('--xml',
+                        help='Vulkan API XML file.',
+                        required=True,
+                        action='append',
+                        dest='xml_files')
+    args = parser.parse_args()
+
+    gen_extensions('val', args.xml_files, API_VERSIONS, MAX_API_VERSION, EXTENSIONS, args.out_c, args.out_h)
diff --git a/src/gallium/frontends/vallium/val_formats.c b/src/gallium/frontends/vallium/val_formats.c
new file mode 100644 (file)
index 0000000..f0f9620
--- /dev/null
@@ -0,0 +1,442 @@
+/*
+ * Copyright © 2019 Red Hat.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "val_private.h"
+#include "util/format/u_format.h"
+#include "util/u_math.h"
+#define COMMON_NAME(x) [VK_FORMAT_##x] = PIPE_FORMAT_##x
+
+#define FLOAT_NAME(x) [VK_FORMAT_##x##_SFLOAT] = PIPE_FORMAT_##x##_FLOAT
+
+static enum pipe_format format_to_vk_table[VK_FORMAT_ASTC_12x12_SRGB_BLOCK + 1] = {
+
+   COMMON_NAME(R8_UNORM),
+   COMMON_NAME(R8G8_UNORM),
+   COMMON_NAME(R8G8B8_UNORM),
+   COMMON_NAME(R8G8B8A8_UNORM),
+
+   COMMON_NAME(R8_SNORM),
+   COMMON_NAME(R8G8_SNORM),
+   COMMON_NAME(R8G8B8_SNORM),
+   COMMON_NAME(R8G8B8A8_SNORM),
+
+   //   COMMON_NAME(R8_SRGB),
+   COMMON_NAME(R8G8B8_SRGB),
+   COMMON_NAME(R8G8B8A8_SRGB),
+
+   COMMON_NAME(B8G8R8A8_UNORM),
+   COMMON_NAME(R8G8B8A8_SRGB),
+   COMMON_NAME(B8G8R8A8_SRGB),
+
+   COMMON_NAME(R8_UINT),
+   COMMON_NAME(R8G8_UINT),
+   COMMON_NAME(R8G8B8_UINT),
+   COMMON_NAME(R8G8B8A8_UINT),
+
+   COMMON_NAME(R16_UINT),
+   COMMON_NAME(R16G16_UINT),
+   COMMON_NAME(R16G16B16_UINT),
+   COMMON_NAME(R16G16B16A16_UINT),
+
+   COMMON_NAME(R32_UINT),
+   COMMON_NAME(R32G32_UINT),
+   COMMON_NAME(R32G32B32_UINT),
+   COMMON_NAME(R32G32B32A32_UINT),
+
+   COMMON_NAME(R8_SINT),
+   COMMON_NAME(R8G8_SINT),
+   COMMON_NAME(R8G8B8_SINT),
+   COMMON_NAME(R8G8B8A8_SINT),
+
+   COMMON_NAME(R16_SINT),
+   COMMON_NAME(R16G16_SINT),
+   COMMON_NAME(R16G16B16_SINT),
+   COMMON_NAME(R16G16B16A16_SINT),
+
+   COMMON_NAME(R32_SINT),
+   COMMON_NAME(R32G32_SINT),
+   COMMON_NAME(R32G32B32_SINT),
+   COMMON_NAME(R32G32B32A32_SINT),
+
+   COMMON_NAME(R16_UNORM),
+   COMMON_NAME(R16G16_UNORM),
+   COMMON_NAME(R16G16B16A16_UNORM),
+
+   COMMON_NAME(R16_SNORM),
+   COMMON_NAME(R16G16_SNORM),
+   COMMON_NAME(R16G16B16A16_SNORM),
+   FLOAT_NAME(R16),
+   FLOAT_NAME(R16G16),
+   FLOAT_NAME(R16G16B16),
+   FLOAT_NAME(R16G16B16A16),
+
+   FLOAT_NAME(R32),
+   FLOAT_NAME(R32G32),
+   FLOAT_NAME(R32G32B32),
+   FLOAT_NAME(R32G32B32A32),
+
+   COMMON_NAME(S8_UINT),
+   [VK_FORMAT_UNDEFINED] = PIPE_FORMAT_NONE,
+   [VK_FORMAT_R5G6B5_UNORM_PACK16] = PIPE_FORMAT_B5G6R5_UNORM,
+   [VK_FORMAT_A1R5G5B5_UNORM_PACK16] = PIPE_FORMAT_B5G5R5A1_UNORM,
+   [VK_FORMAT_B4G4R4A4_UNORM_PACK16] = PIPE_FORMAT_A4R4G4B4_UNORM,
+   [VK_FORMAT_D16_UNORM] = PIPE_FORMAT_Z16_UNORM,
+
+   [VK_FORMAT_A8B8G8R8_UNORM_PACK32] = PIPE_FORMAT_R8G8B8A8_UNORM,
+   [VK_FORMAT_A8B8G8R8_SNORM_PACK32] = PIPE_FORMAT_R8G8B8A8_SNORM,
+   [VK_FORMAT_A8B8G8R8_UINT_PACK32] = PIPE_FORMAT_R8G8B8A8_UINT,
+   [VK_FORMAT_A8B8G8R8_SINT_PACK32] = PIPE_FORMAT_R8G8B8A8_SINT,
+   [VK_FORMAT_A8B8G8R8_SRGB_PACK32] = PIPE_FORMAT_R8G8B8A8_SRGB,
+
+   [VK_FORMAT_A2B10G10R10_UNORM_PACK32] = PIPE_FORMAT_R10G10B10A2_UNORM,
+   [VK_FORMAT_A2B10G10R10_UINT_PACK32] = PIPE_FORMAT_R10G10B10A2_UINT,
+
+   [VK_FORMAT_B10G11R11_UFLOAT_PACK32] = PIPE_FORMAT_R11G11B10_FLOAT,
+   [VK_FORMAT_E5B9G9R9_UFLOAT_PACK32] = PIPE_FORMAT_R9G9B9E5_FLOAT,
+
+   [VK_FORMAT_X8_D24_UNORM_PACK32] = PIPE_FORMAT_Z24X8_UNORM,
+   [VK_FORMAT_D32_SFLOAT] = PIPE_FORMAT_Z32_FLOAT,
+   [VK_FORMAT_D24_UNORM_S8_UINT] = PIPE_FORMAT_Z24_UNORM_S8_UINT,
+   [VK_FORMAT_D32_SFLOAT_S8_UINT] = PIPE_FORMAT_Z32_FLOAT_S8X24_UINT,
+
+   [VK_FORMAT_BC1_RGB_UNORM_BLOCK] = PIPE_FORMAT_DXT1_RGB,
+   [VK_FORMAT_BC1_RGBA_UNORM_BLOCK] = PIPE_FORMAT_DXT1_RGBA,
+   [VK_FORMAT_BC2_UNORM_BLOCK] = PIPE_FORMAT_DXT3_RGBA,
+   [VK_FORMAT_BC3_UNORM_BLOCK] = PIPE_FORMAT_DXT5_RGBA,
+   [VK_FORMAT_BC4_UNORM_BLOCK] = PIPE_FORMAT_RGTC1_UNORM,
+   [VK_FORMAT_BC5_UNORM_BLOCK] = PIPE_FORMAT_RGTC2_UNORM,
+
+   [VK_FORMAT_BC1_RGB_SRGB_BLOCK] = PIPE_FORMAT_DXT1_SRGB,
+   [VK_FORMAT_BC1_RGBA_SRGB_BLOCK] = PIPE_FORMAT_DXT1_SRGBA,
+   [VK_FORMAT_BC2_SRGB_BLOCK] = PIPE_FORMAT_DXT3_SRGBA,
+   [VK_FORMAT_BC3_SRGB_BLOCK] = PIPE_FORMAT_DXT5_SRGBA,
+
+   [VK_FORMAT_BC4_SNORM_BLOCK] = PIPE_FORMAT_RGTC1_SNORM,
+   [VK_FORMAT_BC5_SNORM_BLOCK] = PIPE_FORMAT_RGTC2_SNORM,
+
+   [VK_FORMAT_BC6H_UFLOAT_BLOCK] = PIPE_FORMAT_BPTC_RGB_UFLOAT,
+   [VK_FORMAT_BC6H_SFLOAT_BLOCK] = PIPE_FORMAT_BPTC_RGB_FLOAT,
+   [VK_FORMAT_BC7_UNORM_BLOCK] = PIPE_FORMAT_BPTC_RGBA_UNORM,
+   [VK_FORMAT_BC7_SRGB_BLOCK] = PIPE_FORMAT_BPTC_SRGBA,
+};
+
+enum pipe_format vk_format_to_pipe(VkFormat format)
+{
+   if (format > VK_FORMAT_ASTC_12x12_SRGB_BLOCK)
+      return PIPE_FORMAT_NONE;
+   return format_to_vk_table[format];
+}
+
+static void
+val_physical_device_get_format_properties(struct val_physical_device *physical_device,
+                                          VkFormat format,
+                                          VkFormatProperties *out_properties)
+{
+   enum pipe_format pformat = vk_format_to_pipe(format);
+   unsigned features = 0, buffer_features = 0;
+   if (pformat == PIPE_FORMAT_NONE) {
+     out_properties->linearTilingFeatures = 0;
+     out_properties->optimalTilingFeatures = 0;
+     out_properties->bufferFeatures = 0;
+     return;
+   }
+
+   if (physical_device->pscreen->is_format_supported(physical_device->pscreen, pformat,
+                                                     PIPE_TEXTURE_2D, 0, 0, PIPE_BIND_DEPTH_STENCIL)) {
+      out_properties->linearTilingFeatures = 0;
+      out_properties->optimalTilingFeatures = VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT |
+         VK_FORMAT_FEATURE_TRANSFER_SRC_BIT | VK_FORMAT_FEATURE_TRANSFER_DST_BIT |
+         VK_FORMAT_FEATURE_BLIT_SRC_BIT | VK_FORMAT_FEATURE_BLIT_DST_BIT;
+
+      out_properties->bufferFeatures = 0;
+      return;
+   }
+
+   if (util_format_is_compressed(pformat)) {
+      if (physical_device->pscreen->is_format_supported(physical_device->pscreen, pformat,
+                                                        PIPE_TEXTURE_2D, 0, 0, PIPE_BIND_SAMPLER_VIEW)) {
+         features |= VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT;
+         features |= VK_FORMAT_FEATURE_BLIT_SRC_BIT;
+         features |= VK_FORMAT_FEATURE_TRANSFER_SRC_BIT | VK_FORMAT_FEATURE_TRANSFER_DST_BIT;
+         features |= VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT;
+      }
+      out_properties->linearTilingFeatures = features;
+      out_properties->optimalTilingFeatures = features;
+      out_properties->bufferFeatures = buffer_features;
+      return;
+   }
+   buffer_features = VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT;
+   if (!util_format_is_srgb(pformat) &&
+       physical_device->pscreen->is_format_supported(physical_device->pscreen, pformat,
+                                                     PIPE_BUFFER, 0, 0, PIPE_BIND_VERTEX_BUFFER)) {
+      buffer_features |= VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT;
+   }
+
+   if (physical_device->pscreen->is_format_supported(physical_device->pscreen, pformat,
+                                                     PIPE_BUFFER, 0, 0, PIPE_BIND_CONSTANT_BUFFER)) {
+      buffer_features |= VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT;
+   }
+
+
+   if (physical_device->pscreen->is_format_supported(physical_device->pscreen, pformat,
+                                                     PIPE_TEXTURE_2D, 0, 0, PIPE_BIND_SAMPLER_VIEW)) {
+      features |= VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT;
+      features |= VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT;
+   }
+
+   if (physical_device->pscreen->is_format_supported(physical_device->pscreen, pformat,
+                                                     PIPE_TEXTURE_2D, 0, 0, PIPE_BIND_RENDER_TARGET)) {
+      features |= VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT;
+      features |= VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT;
+      features |= VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT;
+   }
+
+   if (pformat == PIPE_FORMAT_R32_UINT || pformat == PIPE_FORMAT_R32_SINT) {
+      features |= VK_FORMAT_FEATURE_STORAGE_IMAGE_ATOMIC_BIT;
+      buffer_features |= VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_ATOMIC_BIT;
+   }
+
+   if (pformat == PIPE_FORMAT_R11G11B10_FLOAT || pformat == PIPE_FORMAT_R9G9B9E5_FLOAT)
+     features |= VK_FORMAT_FEATURE_BLIT_SRC_BIT;
+
+   features |= VK_FORMAT_FEATURE_TRANSFER_SRC_BIT | VK_FORMAT_FEATURE_TRANSFER_DST_BIT;
+   if (pformat == PIPE_FORMAT_B5G6R5_UNORM)
+     features |= VK_FORMAT_FEATURE_BLIT_SRC_BIT | VK_FORMAT_FEATURE_BLIT_DST_BIT;
+   if ((pformat != PIPE_FORMAT_R9G9B9E5_FLOAT) && util_format_get_nr_components(pformat) != 3) {
+      features |= VK_FORMAT_FEATURE_BLIT_SRC_BIT | VK_FORMAT_FEATURE_BLIT_DST_BIT;
+   }
+   out_properties->linearTilingFeatures = features;
+   out_properties->optimalTilingFeatures = features;
+   out_properties->bufferFeatures = buffer_features;
+   return;
+}
+
+void val_GetPhysicalDeviceFormatProperties(
+    VkPhysicalDevice                            physicalDevice,
+    VkFormat                                    format,
+    VkFormatProperties*                         pFormatProperties)
+{
+   VAL_FROM_HANDLE(val_physical_device, physical_device, physicalDevice);
+
+   val_physical_device_get_format_properties(physical_device,
+                                             format,
+                                             pFormatProperties);
+}
+
+void val_GetPhysicalDeviceFormatProperties2(
+        VkPhysicalDevice                            physicalDevice,
+        VkFormat                                    format,
+        VkFormatProperties2*                        pFormatProperties)
+{
+   VAL_FROM_HANDLE(val_physical_device, physical_device, physicalDevice);
+
+   val_physical_device_get_format_properties(physical_device,
+                                             format,
+                                             &pFormatProperties->formatProperties);
+}
+static VkResult val_get_image_format_properties(struct val_physical_device *physical_device,
+                                                 const VkPhysicalDeviceImageFormatInfo2 *info,
+                                                 VkImageFormatProperties *pImageFormatProperties)
+{
+   VkFormatProperties format_props;
+   VkFormatFeatureFlags format_feature_flags;
+   VkExtent3D maxExtent;
+   uint32_t maxMipLevels;
+   uint32_t maxArraySize;
+   VkSampleCountFlags sampleCounts = VK_SAMPLE_COUNT_1_BIT;
+   enum pipe_format pformat = vk_format_to_pipe(info->format);
+   val_physical_device_get_format_properties(physical_device, info->format,
+                                             &format_props);
+   if (info->tiling == VK_IMAGE_TILING_LINEAR) {
+      format_feature_flags = format_props.linearTilingFeatures;
+   } else if (info->tiling == VK_IMAGE_TILING_OPTIMAL) {
+      format_feature_flags = format_props.optimalTilingFeatures;
+   } else {
+      unreachable("bad VkImageTiling");
+   }
+
+   if (format_feature_flags == 0)
+      goto unsupported;
+
+   uint32_t max_2d_ext = physical_device->pscreen->get_param(physical_device->pscreen, PIPE_CAP_MAX_TEXTURE_2D_SIZE);
+   uint32_t max_layers = physical_device->pscreen->get_param(physical_device->pscreen, PIPE_CAP_MAX_TEXTURE_ARRAY_LAYERS);
+   switch (info->type) {
+   default:
+      unreachable("bad vkimage type\n");
+   case VK_IMAGE_TYPE_1D:
+      if (util_format_is_compressed(pformat))
+         goto unsupported;
+
+      maxExtent.width = max_2d_ext;
+      maxExtent.height = 1;
+      maxExtent.depth = 1;
+      maxMipLevels = util_logbase2(max_2d_ext);
+      maxArraySize = max_layers;
+      break;
+   case VK_IMAGE_TYPE_2D:
+      maxExtent.width = max_2d_ext;
+      maxExtent.height = max_2d_ext;
+      maxExtent.depth = 1;
+      maxMipLevels = util_logbase2(max_2d_ext);
+      maxArraySize = max_layers;
+      sampleCounts |= VK_SAMPLE_COUNT_4_BIT;
+      break;
+   case VK_IMAGE_TYPE_3D:
+      maxExtent.width = max_2d_ext;
+      maxExtent.height = max_2d_ext;
+      maxExtent.depth = (1 << physical_device->pscreen->get_param(physical_device->pscreen, PIPE_CAP_MAX_TEXTURE_3D_LEVELS));
+      maxMipLevels = util_logbase2(max_2d_ext);
+      maxArraySize = 1;
+      break;
+   }
+
+   if (info->usage & VK_IMAGE_USAGE_SAMPLED_BIT) {
+      if (!(format_feature_flags & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT)) {
+         goto unsupported;
+      }
+   }
+
+   if (info->usage & VK_IMAGE_USAGE_STORAGE_BIT) {
+      if (!(format_feature_flags & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT)) {
+         goto unsupported;
+      }
+   }
+
+   if (info->usage & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT) {
+      if (!(format_feature_flags & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT)) {
+         goto unsupported;
+      }
+   }
+
+   if (info->usage & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) {
+      if (!(format_feature_flags & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
+         goto unsupported;
+      }
+   }
+
+   if (info->usage & VK_IMAGE_USAGE_TRANSFER_SRC_BIT) {
+      if (!(format_feature_flags & VK_FORMAT_FEATURE_TRANSFER_SRC_BIT)) {
+         goto unsupported;
+      }
+   }
+
+   if (info->usage & VK_IMAGE_USAGE_TRANSFER_DST_BIT) {
+      if (!(format_feature_flags & VK_FORMAT_FEATURE_TRANSFER_DST_BIT)) {
+         goto unsupported;
+      }
+   }
+
+   if (info->usage & VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT) {
+      if (!(format_feature_flags & (VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT |
+                                    VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT))) {
+         goto unsupported;
+      }
+   }
+
+   *pImageFormatProperties = (VkImageFormatProperties) {
+      .maxExtent = maxExtent,
+      .maxMipLevels = maxMipLevels,
+      .maxArrayLayers = maxArraySize,
+      .sampleCounts = sampleCounts,
+
+      /* FINISHME: Accurately calculate
+       * VkImageFormatProperties::maxResourceSize.
+       */
+      .maxResourceSize = UINT32_MAX,
+   };
+   return VK_SUCCESS;
+ unsupported:
+   *pImageFormatProperties = (VkImageFormatProperties) {
+      .maxExtent = { 0, 0, 0 },
+      .maxMipLevels = 0,
+      .maxArrayLayers = 0,
+      .sampleCounts = 0,
+      .maxResourceSize = 0,
+   };
+
+   return VK_ERROR_FORMAT_NOT_SUPPORTED;
+}
+
+VkResult val_GetPhysicalDeviceImageFormatProperties(
+    VkPhysicalDevice                            physicalDevice,
+    VkFormat                                    format,
+    VkImageType                                 type,
+    VkImageTiling                               tiling,
+    VkImageUsageFlags                           usage,
+    VkImageCreateFlags                          createFlags,
+    VkImageFormatProperties*                    pImageFormatProperties)
+{
+   VAL_FROM_HANDLE(val_physical_device, physical_device, physicalDevice);
+
+   const VkPhysicalDeviceImageFormatInfo2 info = {
+      .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2,
+      .pNext = NULL,
+      .format = format,
+      .type = type,
+      .tiling = tiling,
+      .usage = usage,
+      .flags = createFlags,
+   };
+
+   return val_get_image_format_properties(physical_device, &info,
+                                           pImageFormatProperties);
+}
+
+VkResult val_GetPhysicalDeviceImageFormatProperties2(
+        VkPhysicalDevice                            physicalDevice,
+        const VkPhysicalDeviceImageFormatInfo2     *base_info,
+        VkImageFormatProperties2                   *base_props)
+{
+   VAL_FROM_HANDLE(val_physical_device, physical_device, physicalDevice);
+   VkResult result;
+   result = val_get_image_format_properties(physical_device, base_info,
+                                             &base_props->imageFormatProperties);
+   if (result != VK_SUCCESS)
+      return result;
+
+   return VK_SUCCESS;
+}
+
+void val_GetPhysicalDeviceSparseImageFormatProperties(
+    VkPhysicalDevice                            physicalDevice,
+    VkFormat                                    format,
+    VkImageType                                 type,
+    uint32_t                                    samples,
+    VkImageUsageFlags                           usage,
+    VkImageTiling                               tiling,
+    uint32_t*                                   pNumProperties,
+    VkSparseImageFormatProperties*              pProperties)
+{
+   /* Sparse images are not yet supported. */
+   *pNumProperties = 0;
+}
+
+void val_GetPhysicalDeviceSparseImageFormatProperties2(
+        VkPhysicalDevice                            physicalDevice,
+        const VkPhysicalDeviceSparseImageFormatInfo2 *pFormatInfo,
+        uint32_t                                   *pPropertyCount,
+        VkSparseImageFormatProperties2             *pProperties)
+{
+        /* Sparse images are not yet supported. */
+        *pPropertyCount = 0;
+}
diff --git a/src/gallium/frontends/vallium/val_image.c b/src/gallium/frontends/vallium/val_image.c
new file mode 100644 (file)
index 0000000..29a493e
--- /dev/null
@@ -0,0 +1,288 @@
+/*
+ * Copyright © 2019 Red Hat.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "val_private.h"
+#include "util/format/u_format.h"
+#include "util/u_inlines.h"
+#include "pipe/p_state.h"
+
+VkResult
+val_image_create(VkDevice _device,
+                 const struct val_image_create_info *create_info,
+                 const VkAllocationCallbacks* alloc,
+                 VkImage *pImage)
+{
+   VAL_FROM_HANDLE(val_device, device, _device);
+   const VkImageCreateInfo *pCreateInfo = create_info->vk_info;
+   struct val_image *image;
+
+   assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO);
+
+   image = vk_zalloc2(&device->alloc, alloc, sizeof(*image), 8,
+                       VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+   if (image == NULL)
+      return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
+
+   vk_object_base_init(&device->vk, &image->base, VK_OBJECT_TYPE_IMAGE);
+   image->alignment = 16;
+   image->type = pCreateInfo->imageType;
+   {
+      struct pipe_resource template;
+
+      memset(&template, 0, sizeof(template));
+
+      template.screen = device->pscreen;
+      switch (pCreateInfo->imageType) {
+      case VK_IMAGE_TYPE_1D:
+         template.target = pCreateInfo->arrayLayers > 1 ? PIPE_TEXTURE_1D_ARRAY : PIPE_TEXTURE_1D;
+         break;
+      default:
+      case VK_IMAGE_TYPE_2D:
+         template.target = pCreateInfo->arrayLayers > 1 ? PIPE_TEXTURE_2D_ARRAY : PIPE_TEXTURE_2D;
+         if (pCreateInfo->flags & VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT)
+            template.target = pCreateInfo->arrayLayers == 6 ? PIPE_TEXTURE_CUBE : PIPE_TEXTURE_CUBE_ARRAY;
+         break;
+      case VK_IMAGE_TYPE_3D:
+         template.target = PIPE_TEXTURE_3D;
+         break;
+      }
+
+      template.format = vk_format_to_pipe(pCreateInfo->format);
+      template.width0 = pCreateInfo->extent.width;
+      template.height0 = pCreateInfo->extent.height;
+      template.depth0 = pCreateInfo->extent.depth;
+      template.array_size = pCreateInfo->arrayLayers;
+      template.last_level = pCreateInfo->mipLevels - 1;
+      template.nr_samples = pCreateInfo->samples;
+      template.nr_storage_samples = pCreateInfo->samples;
+      if (create_info->bind_flags)
+         template.bind = create_info->bind_flags;
+      image->bo = device->pscreen->resource_create_unbacked(device->pscreen,
+                                                            &template,
+                                                            &image->size);
+   }
+   *pImage = val_image_to_handle(image);
+
+   return VK_SUCCESS;
+}
+
+VkResult
+val_CreateImage(VkDevice device,
+                const VkImageCreateInfo *pCreateInfo,
+                const VkAllocationCallbacks *pAllocator,
+                VkImage *pImage)
+{
+   return val_image_create(device,
+      &(struct val_image_create_info) {
+         .vk_info = pCreateInfo,
+         .bind_flags = 0,
+      },
+      pAllocator,
+      pImage);
+}
+
+void
+val_DestroyImage(VkDevice _device, VkImage _image,
+                 const VkAllocationCallbacks *pAllocator)
+{
+   VAL_FROM_HANDLE(val_device, device, _device);
+   VAL_FROM_HANDLE(val_image, image, _image);
+
+   if (!_image)
+     return;
+   pipe_resource_reference(&image->bo, NULL);
+   vk_object_base_finish(&image->base);
+   vk_free2(&device->alloc, pAllocator, image);
+}
+
+VkResult
+val_CreateImageView(VkDevice _device,
+                    const VkImageViewCreateInfo *pCreateInfo,
+                    const VkAllocationCallbacks *pAllocator,
+                    VkImageView *pView)
+{
+   VAL_FROM_HANDLE(val_device, device, _device);
+   VAL_FROM_HANDLE(val_image, image, pCreateInfo->image);
+   struct val_image_view *view;
+
+   view = vk_alloc2(&device->alloc, pAllocator, sizeof(*view), 8,
+                     VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+   if (view == NULL)
+      return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
+
+   vk_object_base_init(&device->vk, &view->base,
+                       VK_OBJECT_TYPE_IMAGE_VIEW);
+   view->view_type = pCreateInfo->viewType;
+   view->format = pCreateInfo->format;
+   view->pformat = vk_format_to_pipe(pCreateInfo->format);
+   view->components = pCreateInfo->components;
+   view->subresourceRange = pCreateInfo->subresourceRange;
+   view->image = image;
+   view->surface = NULL;
+   *pView = val_image_view_to_handle(view);
+
+   return VK_SUCCESS;
+}
+
+void
+val_DestroyImageView(VkDevice _device, VkImageView _iview,
+                     const VkAllocationCallbacks *pAllocator)
+{
+   VAL_FROM_HANDLE(val_device, device, _device);
+   VAL_FROM_HANDLE(val_image_view, iview, _iview);
+
+   if (!_iview)
+     return;
+
+   pipe_surface_reference(&iview->surface, NULL);
+   vk_object_base_finish(&iview->base);
+   vk_free2(&device->alloc, pAllocator, iview);
+}
+
+void val_GetImageSubresourceLayout(
+    VkDevice                                    _device,
+    VkImage                                     _image,
+    const VkImageSubresource*                   pSubresource,
+    VkSubresourceLayout*                        pLayout)
+{
+   VAL_FROM_HANDLE(val_device, device, _device);
+   VAL_FROM_HANDLE(val_image, image, _image);
+   uint32_t stride, offset;
+   device->pscreen->resource_get_info(device->pscreen,
+                                      image->bo,
+                                      &stride, &offset);
+   pLayout->offset = offset;
+   pLayout->rowPitch = stride;
+   pLayout->arrayPitch = 0;
+   pLayout->size = image->size;
+   switch (pSubresource->aspectMask) {
+   case VK_IMAGE_ASPECT_COLOR_BIT:
+      break;
+   case VK_IMAGE_ASPECT_DEPTH_BIT:
+      break;
+   case VK_IMAGE_ASPECT_STENCIL_BIT:
+      break;
+   default:
+      assert(!"Invalid image aspect");
+   }
+}
+
+VkResult val_CreateBuffer(
+    VkDevice                                    _device,
+    const VkBufferCreateInfo*                   pCreateInfo,
+    const VkAllocationCallbacks*                pAllocator,
+    VkBuffer*                                   pBuffer)
+{
+   VAL_FROM_HANDLE(val_device, device, _device);
+   struct val_buffer *buffer;
+
+   assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
+
+   buffer = vk_alloc2(&device->alloc, pAllocator, sizeof(*buffer), 8,
+                       VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+   if (buffer == NULL)
+      return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
+
+   vk_object_base_init(&device->vk, &buffer->base, VK_OBJECT_TYPE_BUFFER);
+   buffer->size = pCreateInfo->size;
+   buffer->usage = pCreateInfo->usage;
+   buffer->offset = 0;
+
+   {
+      struct pipe_resource template;
+      memset(&template, 0, sizeof(struct pipe_resource));
+      template.screen = device->pscreen;
+      template.target = PIPE_BUFFER;
+      template.format = PIPE_FORMAT_R8_UNORM;
+      template.width0 = buffer->size;
+      template.height0 = 1;
+      template.depth0 = 1;
+      template.array_size = 1;
+      template.flags = PIPE_RESOURCE_FLAG_DONT_OVER_ALLOCATE;
+      buffer->bo = device->pscreen->resource_create_unbacked(device->pscreen,
+                                                             &template,
+                                                             &buffer->total_size);
+      if (!buffer->bo) {
+         vk_free2(&device->alloc, pAllocator, buffer);
+         return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
+      }
+   }
+   *pBuffer = val_buffer_to_handle(buffer);
+
+   return VK_SUCCESS;
+}
+
+void val_DestroyBuffer(
+    VkDevice                                    _device,
+    VkBuffer                                    _buffer,
+    const VkAllocationCallbacks*                pAllocator)
+{
+   VAL_FROM_HANDLE(val_device, device, _device);
+   VAL_FROM_HANDLE(val_buffer, buffer, _buffer);
+
+   if (!_buffer)
+     return;
+
+   pipe_resource_reference(&buffer->bo, NULL);
+   vk_object_base_finish(&buffer->base);
+   vk_free2(&device->alloc, pAllocator, buffer);
+}
+
+VkResult
+val_CreateBufferView(VkDevice _device,
+                     const VkBufferViewCreateInfo *pCreateInfo,
+                     const VkAllocationCallbacks *pAllocator,
+                     VkBufferView *pView)
+{
+   VAL_FROM_HANDLE(val_device, device, _device);
+   VAL_FROM_HANDLE(val_buffer, buffer, pCreateInfo->buffer);
+   struct val_buffer_view *view;
+   view = vk_alloc2(&device->alloc, pAllocator, sizeof(*view), 8,
+                     VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+   if (!view)
+      return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
+
+   vk_object_base_init(&device->vk, &view->base,
+                       VK_OBJECT_TYPE_BUFFER_VIEW);
+   view->buffer = buffer;
+   view->format = pCreateInfo->format;
+   view->pformat = vk_format_to_pipe(pCreateInfo->format);
+   view->offset = pCreateInfo->offset;
+   view->range = pCreateInfo->range;
+   *pView = val_buffer_view_to_handle(view);
+
+   return VK_SUCCESS;
+}
+
+void
+val_DestroyBufferView(VkDevice _device, VkBufferView bufferView,
+                      const VkAllocationCallbacks *pAllocator)
+{
+   VAL_FROM_HANDLE(val_device, device, _device);
+   VAL_FROM_HANDLE(val_buffer_view, view, bufferView);
+
+   if (!bufferView)
+     return;
+   vk_object_base_finish(&view->base);
+   vk_free2(&device->alloc, pAllocator, view);
+}
diff --git a/src/gallium/frontends/vallium/val_lower_input_attachments.c b/src/gallium/frontends/vallium/val_lower_input_attachments.c
new file mode 100644 (file)
index 0000000..a8b8af7
--- /dev/null
@@ -0,0 +1,109 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "nir.h"
+#include "nir_builder.h"
+#include "val_lower_vulkan_resource.h"
+
+static nir_ssa_def *
+load_frag_coord(nir_builder *b)
+{
+   nir_variable *pos =
+      nir_find_variable_with_location(b->shader, nir_var_shader_in,
+                                      VARYING_SLOT_POS);
+   if (pos == NULL) {
+      nir_variable *pos = nir_variable_create(b->shader, nir_var_shader_in,
+                                           glsl_vec4_type(), NULL);
+      pos->data.location = VARYING_SLOT_POS;
+   }
+   /**
+    * From Vulkan spec:
+    *   "The OriginLowerLeft execution mode must not be used; fragment entry
+    *    points must declare OriginUpperLeft."
+    *
+    * So at this point origin_upper_left should be true
+    */
+   assert(b->shader->info.fs.origin_upper_left == true);
+
+   return nir_load_var(b, pos);
+}
+
+static bool
+try_lower_input_load(nir_function_impl *impl, nir_intrinsic_instr *load,
+                     bool use_fragcoord_sysval)
+{
+   nir_deref_instr *deref = nir_src_as_deref(load->src[0]);
+   assert(glsl_type_is_image(deref->type));
+
+   enum glsl_sampler_dim image_dim = glsl_get_sampler_dim(deref->type);
+   if (image_dim != GLSL_SAMPLER_DIM_SUBPASS &&
+       image_dim != GLSL_SAMPLER_DIM_SUBPASS_MS)
+      return false;
+
+   nir_builder b;
+   nir_builder_init(&b, impl);
+   b.cursor = nir_before_instr(&load->instr);
+
+   nir_ssa_def *frag_coord = use_fragcoord_sysval ? nir_load_frag_coord(&b)
+                                                  : load_frag_coord(&b);
+   frag_coord = nir_f2i32(&b, frag_coord);
+   nir_ssa_def *offset = nir_ssa_for_src(&b, load->src[1], 2);
+   nir_ssa_def *pos = nir_iadd(&b, frag_coord, offset);
+
+   nir_ssa_def *layer = nir_imm_int(&b, 0);
+   nir_ssa_def *coord =
+      nir_vec4(&b, nir_channel(&b, pos, 0), nir_channel(&b, pos, 1), layer, layer);
+
+   nir_instr_rewrite_src(&load->instr, &load->src[1], nir_src_for_ssa(coord));
+
+   return true;
+}
+
+bool
+val_lower_input_attachments(nir_shader *shader, bool use_fragcoord_sysval)
+{
+   assert(shader->info.stage == MESA_SHADER_FRAGMENT);
+   bool progress = false;
+
+   nir_foreach_function(function, shader) {
+      if (!function->impl)
+         continue;
+
+      nir_foreach_block(block, function->impl) {
+         nir_foreach_instr_safe(instr, block) {
+            if (instr->type != nir_instr_type_intrinsic)
+               continue;
+
+            nir_intrinsic_instr *load = nir_instr_as_intrinsic(instr);
+
+            if (load->intrinsic != nir_intrinsic_image_deref_load)
+               continue;
+
+            progress |= try_lower_input_load(function->impl, load,
+                                             use_fragcoord_sysval);
+         }
+      }
+   }
+
+   return progress;
+}
diff --git a/src/gallium/frontends/vallium/val_lower_vulkan_resource.c b/src/gallium/frontends/vallium/val_lower_vulkan_resource.c
new file mode 100644 (file)
index 0000000..801fd64
--- /dev/null
@@ -0,0 +1,176 @@
+/*
+ * Copyright © 2019 Red Hat.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "val_private.h"
+#include "nir.h"
+#include "nir_builder.h"
+#include "val_lower_vulkan_resource.h"
+
+static bool
+lower_vulkan_resource_index(const nir_instr *instr, const void *data_cb)
+{
+   if (instr->type == nir_instr_type_intrinsic) {
+      nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
+      if (intrin->intrinsic == nir_intrinsic_vulkan_resource_index)
+         return true;
+   }
+   if (instr->type == nir_instr_type_tex) {
+      return true;
+   }
+   return false;
+}
+
+static nir_ssa_def *lower_vri_intrin_vri(struct nir_builder *b,
+                                           nir_instr *instr, void *data_cb)
+{
+   nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
+   unsigned desc_set_idx = nir_intrinsic_desc_set(intrin);
+   unsigned binding_idx = nir_intrinsic_binding(intrin);
+   struct val_pipeline_layout *layout = data_cb;
+   struct val_descriptor_set_binding_layout *binding = &layout->set[desc_set_idx].layout->binding[binding_idx];
+   int value = 0;
+   bool is_ubo = (binding->type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
+                  binding->type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC);
+
+   for (unsigned s = 0; s < desc_set_idx; s++) {
+     if (is_ubo)
+       value += layout->set[s].layout->stage[b->shader->info.stage].const_buffer_count;
+     else
+       value += layout->set[s].layout->stage[b->shader->info.stage].shader_buffer_count;
+   }
+   if (is_ubo)
+     value += binding->stage[b->shader->info.stage].const_buffer_index + 1;
+   else
+     value += binding->stage[b->shader->info.stage].shader_buffer_index;
+   if (nir_src_is_const(intrin->src[0])) {
+      value += nir_src_comp_as_int(intrin->src[0], 0);
+      return nir_imm_int(b, value);
+   } else
+      return nir_iadd_imm(b, intrin->src[0].ssa, value);
+}
+
+static int lower_vri_instr_tex_deref(nir_tex_instr *tex,
+                                     nir_tex_src_type deref_src_type,
+                                     gl_shader_stage stage,
+                                     struct val_pipeline_layout *layout)
+{
+   int deref_src_idx = nir_tex_instr_src_index(tex, deref_src_type);
+
+   if (deref_src_idx < 0)
+      return -1;
+
+   nir_deref_instr *deref_instr = nir_src_as_deref(tex->src[deref_src_idx].src);
+   nir_variable *var = nir_deref_instr_get_variable(deref_instr);
+   unsigned desc_set_idx = var->data.descriptor_set;
+   unsigned binding_idx = var->data.binding;
+   int value = 0;
+   struct val_descriptor_set_binding_layout *binding = &layout->set[desc_set_idx].layout->binding[binding_idx];
+   nir_tex_instr_remove_src(tex, deref_src_idx);
+   for (unsigned s = 0; s < desc_set_idx; s++) {
+      if (deref_src_type == nir_tex_src_sampler_deref)
+         value += layout->set[s].layout->stage[stage].sampler_count;
+      else
+         value += layout->set[s].layout->stage[stage].sampler_view_count;
+   }
+   if (deref_src_type == nir_tex_src_sampler_deref)
+      value += binding->stage[stage].sampler_index;
+   else
+      value += binding->stage[stage].sampler_view_index;
+
+   if (deref_instr->deref_type == nir_deref_type_array) {
+      if (nir_src_is_const(deref_instr->arr.index))
+         value += nir_src_as_uint(deref_instr->arr.index);
+      else {
+         if (deref_src_type == nir_tex_src_sampler_deref)
+            nir_tex_instr_add_src(tex, nir_tex_src_sampler_offset, deref_instr->arr.index);
+         else
+            nir_tex_instr_add_src(tex, nir_tex_src_texture_offset, deref_instr->arr.index);
+      }
+   }
+   if (deref_src_type == nir_tex_src_sampler_deref)
+      tex->sampler_index = value;
+   else
+      tex->texture_index = value;
+   return value;
+}
+
+static void lower_vri_instr_tex(struct nir_builder *b,
+                                nir_tex_instr *tex, void *data_cb)
+{
+   struct val_pipeline_layout *layout = data_cb;
+   int tex_value = 0;
+
+   lower_vri_instr_tex_deref(tex, nir_tex_src_sampler_deref, b->shader->info.stage, layout);
+   tex_value = lower_vri_instr_tex_deref(tex, nir_tex_src_texture_deref, b->shader->info.stage, layout);
+   if (tex_value >= 0)
+      b->shader->info.textures_used |= (1 << tex_value);
+}
+
+static nir_ssa_def *lower_vri_instr(struct nir_builder *b,
+                                    nir_instr *instr, void *data_cb)
+{
+   if (instr->type == nir_instr_type_intrinsic) {
+      nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
+      if (intrin->intrinsic == nir_intrinsic_vulkan_resource_index)
+         return lower_vri_intrin_vri(b, instr, data_cb);
+   }
+   if (instr->type == nir_instr_type_tex)
+      lower_vri_instr_tex(b, nir_instr_as_tex(instr), data_cb);
+   return NULL;
+}
+
+void val_lower_pipeline_layout(const struct val_device *device,
+                               struct val_pipeline_layout *layout,
+                               nir_shader *shader)
+{
+   nir_shader_lower_instructions(shader, lower_vulkan_resource_index, lower_vri_instr, layout);
+   nir_foreach_uniform_variable(var, shader) {
+      const struct glsl_type *type = var->type;
+      enum glsl_base_type base_type =
+         glsl_get_base_type(glsl_without_array(type));
+      unsigned desc_set_idx = var->data.descriptor_set;
+      unsigned binding_idx = var->data.binding;
+      struct val_descriptor_set_binding_layout *binding = &layout->set[desc_set_idx].layout->binding[binding_idx];
+      int value = 0;
+      var->data.descriptor_set = 0;
+      if (base_type == GLSL_TYPE_SAMPLER) {
+         if (binding->type == VK_DESCRIPTOR_TYPE_SAMPLER) {
+            for (unsigned s = 0; s < desc_set_idx; s++)
+               value += layout->set[s].layout->stage[shader->info.stage].sampler_count;
+            value += binding->stage[shader->info.stage].sampler_index;
+         } else {
+            for (unsigned s = 0; s < desc_set_idx; s++)
+               value += layout->set[s].layout->stage[shader->info.stage].sampler_view_count;
+            value += binding->stage[shader->info.stage].sampler_view_index;
+         }
+         var->data.binding = value;
+      }
+      if (base_type == GLSL_TYPE_IMAGE) {
+         var->data.descriptor_set = 0;
+         for (unsigned s = 0; s < desc_set_idx; s++)
+           value += layout->set[s].layout->stage[shader->info.stage].image_count;
+         value += binding->stage[shader->info.stage].image_index;
+         var->data.binding = value;
+      }
+   }
+}
diff --git a/src/gallium/frontends/vallium/val_lower_vulkan_resource.h b/src/gallium/frontends/vallium/val_lower_vulkan_resource.h
new file mode 100644 (file)
index 0000000..d325d7c
--- /dev/null
@@ -0,0 +1,36 @@
+/*
+ * Copyright © 2019 Red Hat.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef VAL_LOWER_VULKAN_RESOURCE_H
+#define VAL_LOWER_VULKAN_RESOURCE_H
+
+struct val_pipeline_layout;
+struct val_device;
+void val_lower_pipeline_layout(const struct val_device *device,
+                               struct val_pipeline_layout *layout,
+                               nir_shader *shader);
+
+bool
+val_lower_input_attachments(nir_shader *shader, bool use_fragcoord_sysval);
+
+#endif
diff --git a/src/gallium/frontends/vallium/val_pass.c b/src/gallium/frontends/vallium/val_pass.c
new file mode 100644 (file)
index 0000000..a2f9cb4
--- /dev/null
@@ -0,0 +1,290 @@
+/*
+ * Copyright © 2019 Red Hat.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "val_private.h"
+
+static void
+val_render_pass_compile(struct val_render_pass *pass)
+{
+   for (uint32_t i = 0; i < pass->subpass_count; i++) {
+      struct val_subpass *subpass = &pass->subpasses[i];
+
+      for (uint32_t j = 0; j < subpass->attachment_count; j++) {
+         struct val_subpass_attachment *subpass_att =
+            &subpass->attachments[j];
+         if (subpass_att->attachment == VK_ATTACHMENT_UNUSED)
+            continue;
+
+         struct val_render_pass_attachment *pass_att =
+            &pass->attachments[subpass_att->attachment];
+
+         pass_att->first_subpass_idx = UINT32_MAX;
+      }
+   }
+
+   for (uint32_t i = 0; i < pass->subpass_count; i++) {
+      struct val_subpass *subpass = &pass->subpasses[i];
+      uint32_t color_sample_count = 1, depth_sample_count = 1;
+
+      /* We don't allow depth_stencil_attachment to be non-NULL and
+       * be VK_ATTACHMENT_UNUSED.  This way something can just check
+       * for NULL and be guaranteed that they have a valid
+       * attachment.
+       */
+      if (subpass->depth_stencil_attachment &&
+          subpass->depth_stencil_attachment->attachment == VK_ATTACHMENT_UNUSED)
+         subpass->depth_stencil_attachment = NULL;
+
+      if (subpass->ds_resolve_attachment &&
+          subpass->ds_resolve_attachment->attachment == VK_ATTACHMENT_UNUSED)
+         subpass->ds_resolve_attachment = NULL;
+
+      for (uint32_t j = 0; j < subpass->attachment_count; j++) {
+         struct val_subpass_attachment *subpass_att =
+            &subpass->attachments[j];
+         if (subpass_att->attachment == VK_ATTACHMENT_UNUSED)
+            continue;
+
+         struct val_render_pass_attachment *pass_att =
+            &pass->attachments[subpass_att->attachment];
+
+         if (i < pass_att->first_subpass_idx)
+            pass_att->first_subpass_idx = i;
+         pass_att->last_subpass_idx = i;
+      }
+
+      subpass->has_color_att = false;
+      for (uint32_t j = 0; j < subpass->color_count; j++) {
+         struct val_subpass_attachment *subpass_att =
+            &subpass->color_attachments[j];
+         if (subpass_att->attachment == VK_ATTACHMENT_UNUSED)
+            continue;
+
+         subpass->has_color_att = true;
+
+         struct val_render_pass_attachment *pass_att =
+            &pass->attachments[subpass_att->attachment];
+
+         color_sample_count = pass_att->samples;
+      }
+
+      if (subpass->depth_stencil_attachment) {
+         const uint32_t a =
+            subpass->depth_stencil_attachment->attachment;
+         struct val_render_pass_attachment *pass_att =
+            &pass->attachments[a];
+         depth_sample_count = pass_att->samples;
+      }
+
+      subpass->max_sample_count = MAX2(color_sample_count,
+                                       depth_sample_count);
+
+      /* We have to handle resolve attachments specially */
+      subpass->has_color_resolve = false;
+      if (subpass->resolve_attachments) {
+         for (uint32_t j = 0; j < subpass->color_count; j++) {
+            struct val_subpass_attachment *resolve_att =
+               &subpass->resolve_attachments[j];
+
+            if (resolve_att->attachment == VK_ATTACHMENT_UNUSED)
+               continue;
+
+            subpass->has_color_resolve = true;
+         }
+      }
+
+      for (uint32_t j = 0; j < subpass->input_count; ++j) {
+         if (subpass->input_attachments[j].attachment == VK_ATTACHMENT_UNUSED)
+            continue;
+
+         for (uint32_t k = 0; k < subpass->color_count; ++k) {
+            if (subpass->color_attachments[k].attachment == subpass->input_attachments[j].attachment) {
+               subpass->input_attachments[j].in_render_loop = true;
+               subpass->color_attachments[k].in_render_loop = true;
+            }
+         }
+
+         if (subpass->depth_stencil_attachment &&
+             subpass->depth_stencil_attachment->attachment == subpass->input_attachments[j].attachment) {
+            subpass->input_attachments[j].in_render_loop = true;
+            subpass->depth_stencil_attachment->in_render_loop = true;
+         }
+      }
+   }
+}
+
+static unsigned
+val_num_subpass_attachments(const VkSubpassDescription *desc)
+{
+   return desc->inputAttachmentCount +
+      desc->colorAttachmentCount +
+      (desc->pResolveAttachments ? desc->colorAttachmentCount : 0) +
+      (desc->pDepthStencilAttachment != NULL);
+}
+
+VkResult val_CreateRenderPass(
+   VkDevice                                    _device,
+   const VkRenderPassCreateInfo*               pCreateInfo,
+   const VkAllocationCallbacks*                pAllocator,
+   VkRenderPass*                               pRenderPass)
+{
+   VAL_FROM_HANDLE(val_device, device, _device);
+   struct val_render_pass *pass;
+   size_t size;
+   size_t attachments_offset;
+
+   assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO);
+
+   size = sizeof(*pass);
+   size += pCreateInfo->subpassCount * sizeof(pass->subpasses[0]);
+   attachments_offset = size;
+   size += pCreateInfo->attachmentCount * sizeof(pass->attachments[0]);
+
+   pass = vk_alloc2(&device->alloc, pAllocator, size, 8,
+                    VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+   if (pass == NULL)
+      return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
+
+   /* Clear the subpasses along with the parent pass. This required because
+    * each array member of val_subpass must be a valid pointer if not NULL.
+    */
+   memset(pass, 0, size);
+
+   vk_object_base_init(&device->vk, &pass->base,
+                       VK_OBJECT_TYPE_RENDER_PASS);
+   pass->attachment_count = pCreateInfo->attachmentCount;
+   pass->subpass_count = pCreateInfo->subpassCount;
+   pass->attachments = (void *) pass + attachments_offset;
+
+   for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
+      struct val_render_pass_attachment *att = &pass->attachments[i];
+
+      att->format = pCreateInfo->pAttachments[i].format;
+      att->samples = pCreateInfo->pAttachments[i].samples;
+      att->load_op = pCreateInfo->pAttachments[i].loadOp;
+      att->stencil_load_op = pCreateInfo->pAttachments[i].stencilLoadOp;
+      att->final_layout = pCreateInfo->pAttachments[i].finalLayout;
+      att->first_subpass_idx = UINT32_MAX;
+   }
+
+   uint32_t subpass_attachment_count = 0;
+   for (uint32_t i = 0; i < pCreateInfo->subpassCount; i++) {
+      subpass_attachment_count += val_num_subpass_attachments(&pCreateInfo->pSubpasses[i]);
+   }
+
+   if (subpass_attachment_count) {
+      pass->subpass_attachments =
+         vk_alloc2(&device->alloc, pAllocator,
+                   subpass_attachment_count * sizeof(struct val_subpass_attachment), 8,
+                   VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+      if (pass->subpass_attachments == NULL) {
+         vk_free2(&device->alloc, pAllocator, pass);
+         return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
+      }
+   } else
+      pass->subpass_attachments = NULL;
+
+   struct val_subpass_attachment *p = pass->subpass_attachments;
+   for (uint32_t i = 0; i < pCreateInfo->subpassCount; i++) {
+      const VkSubpassDescription *desc = &pCreateInfo->pSubpasses[i];
+      struct val_subpass *subpass = &pass->subpasses[i];
+
+      subpass->input_count = desc->inputAttachmentCount;
+      subpass->color_count = desc->colorAttachmentCount;
+      subpass->attachment_count = val_num_subpass_attachments(desc);
+      subpass->attachments = p;
+
+      if (desc->inputAttachmentCount > 0) {
+         subpass->input_attachments = p;
+         p += desc->inputAttachmentCount;
+
+         for (uint32_t j = 0; j < desc->inputAttachmentCount; j++) {
+            subpass->input_attachments[j] = (struct val_subpass_attachment) {
+               .attachment = desc->pInputAttachments[j].attachment,
+               .layout = desc->pInputAttachments[j].layout,
+            };
+         }
+      }
+
+      if (desc->colorAttachmentCount > 0) {
+         subpass->color_attachments = p;
+         p += desc->colorAttachmentCount;
+
+         for (uint32_t j = 0; j < desc->colorAttachmentCount; j++) {
+            subpass->color_attachments[j] = (struct val_subpass_attachment) {
+               .attachment = desc->pColorAttachments[j].attachment,
+               .layout = desc->pColorAttachments[j].layout,
+            };
+         }
+      }
+
+      if (desc->pResolveAttachments) {
+         subpass->resolve_attachments = p;
+         p += desc->colorAttachmentCount;
+
+         for (uint32_t j = 0; j < desc->colorAttachmentCount; j++) {
+            subpass->resolve_attachments[j] = (struct val_subpass_attachment) {
+               .attachment = desc->pResolveAttachments[j].attachment,
+               .layout = desc->pResolveAttachments[j].layout,
+            };
+         }
+      }
+
+      if (desc->pDepthStencilAttachment) {
+         subpass->depth_stencil_attachment = p++;
+
+         *subpass->depth_stencil_attachment = (struct val_subpass_attachment) {
+            .attachment = desc->pDepthStencilAttachment->attachment,
+            .layout = desc->pDepthStencilAttachment->layout,
+         };
+      }
+   }
+
+   val_render_pass_compile(pass);
+   *pRenderPass = val_render_pass_to_handle(pass);
+
+   return VK_SUCCESS;
+}
+
+void val_DestroyRenderPass(
+   VkDevice                                    _device,
+   VkRenderPass                                _pass,
+   const VkAllocationCallbacks*                pAllocator)
+{
+   VAL_FROM_HANDLE(val_device, device, _device);
+   VAL_FROM_HANDLE(val_render_pass, pass, _pass);
+
+   if (!_pass)
+      return;
+   vk_object_base_finish(&pass->base);
+   vk_free2(&device->alloc, pAllocator, pass->subpass_attachments);
+   vk_free2(&device->alloc, pAllocator, pass);
+}
+
+void val_GetRenderAreaGranularity(
+   VkDevice                                    device,
+   VkRenderPass                                renderPass,
+   VkExtent2D*                                 pGranularity)
+{
+   *pGranularity = (VkExtent2D) { 1, 1 };
+}
diff --git a/src/gallium/frontends/vallium/val_pipeline.c b/src/gallium/frontends/vallium/val_pipeline.c
new file mode 100644 (file)
index 0000000..136ac93
--- /dev/null
@@ -0,0 +1,943 @@
+/*
+ * Copyright © 2019 Red Hat.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "val_private.h"
+
+#include "glsl_types.h"
+#include "spirv/nir_spirv.h"
+#include "nir/nir_builder.h"
+#include "val_lower_vulkan_resource.h"
+#include "pipe/p_state.h"
+#include "pipe/p_context.h"
+
+#define SPIR_V_MAGIC_NUMBER 0x07230203
+
+VkResult val_CreateShaderModule(
+   VkDevice                                    _device,
+   const VkShaderModuleCreateInfo*             pCreateInfo,
+   const VkAllocationCallbacks*                pAllocator,
+   VkShaderModule*                             pShaderModule)
+{
+   VAL_FROM_HANDLE(val_device, device, _device);
+   struct val_shader_module *module;
+
+   assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO);
+   assert(pCreateInfo->flags == 0);
+
+   module = vk_alloc2(&device->alloc, pAllocator,
+                      sizeof(*module) + pCreateInfo->codeSize, 8,
+                      VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+   if (module == NULL)
+      return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
+
+   vk_object_base_init(&device->vk, &module->base,
+                       VK_OBJECT_TYPE_SHADER_MODULE);
+   module->size = pCreateInfo->codeSize;
+   memcpy(module->data, pCreateInfo->pCode, module->size);
+
+   *pShaderModule = val_shader_module_to_handle(module);
+
+   return VK_SUCCESS;
+
+}
+
+void val_DestroyShaderModule(
+   VkDevice                                    _device,
+   VkShaderModule                              _module,
+   const VkAllocationCallbacks*                pAllocator)
+{
+   VAL_FROM_HANDLE(val_device, device, _device);
+   VAL_FROM_HANDLE(val_shader_module, module, _module);
+
+   if (!_module)
+      return;
+   vk_object_base_finish(&module->base);
+   vk_free2(&device->alloc, pAllocator, module);
+}
+
+void val_DestroyPipeline(
+   VkDevice                                    _device,
+   VkPipeline                                  _pipeline,
+   const VkAllocationCallbacks*                pAllocator)
+{
+   VAL_FROM_HANDLE(val_device, device, _device);
+   VAL_FROM_HANDLE(val_pipeline, pipeline, _pipeline);
+
+   if (!_pipeline)
+      return;
+
+   if (pipeline->shader_cso[PIPE_SHADER_VERTEX])
+      device->queue.ctx->delete_vs_state(device->queue.ctx, pipeline->shader_cso[PIPE_SHADER_VERTEX]);
+   if (pipeline->shader_cso[PIPE_SHADER_FRAGMENT])
+      device->queue.ctx->delete_fs_state(device->queue.ctx, pipeline->shader_cso[PIPE_SHADER_FRAGMENT]);
+   if (pipeline->shader_cso[PIPE_SHADER_GEOMETRY])
+      device->queue.ctx->delete_gs_state(device->queue.ctx, pipeline->shader_cso[PIPE_SHADER_GEOMETRY]);
+   if (pipeline->shader_cso[PIPE_SHADER_TESS_CTRL])
+      device->queue.ctx->delete_tcs_state(device->queue.ctx, pipeline->shader_cso[PIPE_SHADER_TESS_CTRL]);
+   if (pipeline->shader_cso[PIPE_SHADER_TESS_EVAL])
+      device->queue.ctx->delete_tes_state(device->queue.ctx, pipeline->shader_cso[PIPE_SHADER_TESS_EVAL]);
+   if (pipeline->shader_cso[PIPE_SHADER_COMPUTE])
+      device->queue.ctx->delete_compute_state(device->queue.ctx, pipeline->shader_cso[PIPE_SHADER_COMPUTE]);
+
+   if (!pipeline->is_compute_pipeline) {
+      for (unsigned i = 0; i < pipeline->graphics_create_info.stageCount; i++)
+         if (pipeline->graphics_create_info.pStages[i].pSpecializationInfo)
+            free((void *)pipeline->graphics_create_info.pStages[i].pSpecializationInfo);
+
+      free((void *)pipeline->graphics_create_info.pStages);
+      free((void *)pipeline->graphics_create_info.pVertexInputState->pVertexBindingDescriptions);
+      free((void *)pipeline->graphics_create_info.pVertexInputState->pVertexAttributeDescriptions);
+      free((void *)pipeline->graphics_create_info.pVertexInputState);
+      free((void *)pipeline->graphics_create_info.pInputAssemblyState);
+      if (pipeline->graphics_create_info.pViewportState) {
+         free((void *)pipeline->graphics_create_info.pViewportState->pViewports);
+         free((void *)pipeline->graphics_create_info.pViewportState->pScissors);
+      }
+      free((void *)pipeline->graphics_create_info.pViewportState);
+
+      if (pipeline->graphics_create_info.pTessellationState)
+         free((void *)pipeline->graphics_create_info.pTessellationState);
+      free((void *)pipeline->graphics_create_info.pRasterizationState);
+      free((void *)pipeline->graphics_create_info.pMultisampleState);
+      free((void *)pipeline->graphics_create_info.pDepthStencilState);
+      if (pipeline->graphics_create_info.pColorBlendState)
+         free((void *)pipeline->graphics_create_info.pColorBlendState->pAttachments);
+      free((void *)pipeline->graphics_create_info.pColorBlendState);
+      if (pipeline->graphics_create_info.pDynamicState)
+         free((void *)pipeline->graphics_create_info.pDynamicState->pDynamicStates);
+      free((void *)pipeline->graphics_create_info.pDynamicState);
+   } else
+      if (pipeline->compute_create_info.stage.pSpecializationInfo)
+         free((void *)pipeline->compute_create_info.stage.pSpecializationInfo);
+   vk_object_base_finish(&pipeline->base);
+   vk_free2(&device->alloc, pAllocator, pipeline);
+}
+
+static VkResult
+deep_copy_shader_stage(struct VkPipelineShaderStageCreateInfo *dst,
+                       const struct VkPipelineShaderStageCreateInfo *src)
+{
+   dst->sType = src->sType;
+   dst->pNext = NULL;
+   dst->flags = src->flags;
+   dst->stage = src->stage;
+   dst->module = src->module;
+   dst->pName = src->pName;
+   dst->pSpecializationInfo = NULL;
+   if (src->pSpecializationInfo) {
+      const VkSpecializationInfo *src_spec = src->pSpecializationInfo;
+      VkSpecializationInfo *dst_spec = malloc(sizeof(VkSpecializationInfo) +
+                                              src_spec->mapEntryCount * sizeof(VkSpecializationMapEntry) +
+                                              src_spec->dataSize);
+      VkSpecializationMapEntry *maps = (VkSpecializationMapEntry *)(dst_spec + 1);
+      dst_spec->pMapEntries = maps;
+      void *pdata = (void *)(dst_spec->pMapEntries + src_spec->mapEntryCount);
+      dst_spec->pData = pdata;
+
+
+      dst_spec->mapEntryCount = src_spec->mapEntryCount;
+      dst_spec->dataSize = src_spec->dataSize;
+      memcpy(pdata, src_spec->pData, src->pSpecializationInfo->dataSize);
+      memcpy(maps, src_spec->pMapEntries, src_spec->mapEntryCount * sizeof(VkSpecializationMapEntry));
+      dst->pSpecializationInfo = dst_spec;
+   }
+   return VK_SUCCESS;
+}
+
+static VkResult
+deep_copy_vertex_input_state(struct VkPipelineVertexInputStateCreateInfo *dst,
+                             const struct VkPipelineVertexInputStateCreateInfo *src)
+{
+   int i;
+   VkVertexInputBindingDescription *dst_binding_descriptions;
+   VkVertexInputAttributeDescription *dst_attrib_descriptions;
+   dst->sType = src->sType;
+   dst->pNext = NULL;
+   dst->flags = src->flags;
+   dst->vertexBindingDescriptionCount = src->vertexBindingDescriptionCount;
+
+   dst_binding_descriptions = malloc(src->vertexBindingDescriptionCount * sizeof(VkVertexInputBindingDescription));
+   if (!dst_binding_descriptions)
+      return VK_ERROR_OUT_OF_HOST_MEMORY;
+   for (i = 0; i < dst->vertexBindingDescriptionCount; i++) {
+      memcpy(&dst_binding_descriptions[i], &src->pVertexBindingDescriptions[i], sizeof(VkVertexInputBindingDescription));
+   }
+   dst->pVertexBindingDescriptions = dst_binding_descriptions;
+
+   dst->vertexAttributeDescriptionCount = src->vertexAttributeDescriptionCount;
+
+   dst_attrib_descriptions = malloc(src->vertexAttributeDescriptionCount * sizeof(VkVertexInputAttributeDescription));
+   if (!dst_attrib_descriptions)
+      return VK_ERROR_OUT_OF_HOST_MEMORY;
+
+   for (i = 0; i < dst->vertexAttributeDescriptionCount; i++) {
+      memcpy(&dst_attrib_descriptions[i], &src->pVertexAttributeDescriptions[i], sizeof(VkVertexInputAttributeDescription));
+   }
+   dst->pVertexAttributeDescriptions = dst_attrib_descriptions;
+   return VK_SUCCESS;
+}
+
+static VkResult
+deep_copy_viewport_state(VkPipelineViewportStateCreateInfo *dst,
+                         const VkPipelineViewportStateCreateInfo *src)
+{
+   int i;
+   VkViewport *viewports;
+   VkRect2D *scissors;
+   dst->sType = src->sType;
+   dst->pNext = src->pNext;
+
+   dst->flags = src->flags;
+
+   if (src->pViewports) {
+      viewports = malloc(src->viewportCount * sizeof(VkViewport));
+      for (i = 0; i < src->viewportCount; i++)
+         memcpy(&viewports[i], &src->pViewports[i], sizeof(VkViewport));
+      dst->pViewports = viewports;
+   } else
+      dst->pViewports = NULL;
+   dst->viewportCount = src->viewportCount;
+
+   if (src->pScissors) {
+      scissors = malloc(src->scissorCount * sizeof(VkRect2D));
+      for (i = 0; i < src->scissorCount; i++)
+         memcpy(&scissors[i], &src->pScissors[i], sizeof(VkRect2D));
+      dst->pScissors = scissors;
+   } else
+      dst->pScissors = NULL;
+   dst->scissorCount = src->scissorCount;
+
+   return VK_SUCCESS;
+}
+
+static VkResult
+deep_copy_color_blend_state(VkPipelineColorBlendStateCreateInfo *dst,
+                            const VkPipelineColorBlendStateCreateInfo *src)
+{
+   VkPipelineColorBlendAttachmentState *attachments;
+   dst->sType = src->sType;
+   dst->pNext = src->pNext;
+   dst->flags = src->flags;
+   dst->logicOpEnable = src->logicOpEnable;
+   dst->logicOp = src->logicOp;
+
+   attachments = malloc(src->attachmentCount * sizeof(VkPipelineColorBlendAttachmentState));
+   memcpy(attachments, src->pAttachments, src->attachmentCount * sizeof(VkPipelineColorBlendAttachmentState));
+   dst->attachmentCount = src->attachmentCount;
+   dst->pAttachments = attachments;
+
+   memcpy(&dst->blendConstants, &src->blendConstants, sizeof(float) * 4);
+
+   return VK_SUCCESS;
+}
+
+static VkResult
+deep_copy_dynamic_state(VkPipelineDynamicStateCreateInfo *dst,
+                        const VkPipelineDynamicStateCreateInfo *src)
+{
+   VkDynamicState *dynamic_states;
+   dst->sType = src->sType;
+   dst->pNext = src->pNext;
+   dst->flags = src->flags;
+
+   dynamic_states = malloc(src->dynamicStateCount * sizeof(VkDynamicState));
+   if (!dynamic_states)
+      return VK_ERROR_OUT_OF_HOST_MEMORY;
+
+   memcpy(dynamic_states, src->pDynamicStates, src->dynamicStateCount * sizeof(VkDynamicState));
+   dst->dynamicStateCount = src->dynamicStateCount;
+   dst->pDynamicStates = dynamic_states;
+   return VK_SUCCESS;
+}
+
+static VkResult
+deep_copy_graphics_create_info(VkGraphicsPipelineCreateInfo *dst,
+                               const VkGraphicsPipelineCreateInfo *src)
+{
+   int i;
+   VkResult result;
+   VkPipelineShaderStageCreateInfo *stages;
+   VkPipelineVertexInputStateCreateInfo *vertex_input;
+   VkPipelineInputAssemblyStateCreateInfo *input_assembly;
+   VkPipelineRasterizationStateCreateInfo* raster_state;
+
+   dst->sType = src->sType;
+   dst->pNext = NULL;
+   dst->flags = src->flags;
+   dst->layout = src->layout;
+   dst->renderPass = src->renderPass;
+   dst->subpass = src->subpass;
+   dst->basePipelineHandle = src->basePipelineHandle;
+   dst->basePipelineIndex = src->basePipelineIndex;
+
+   /* pStages */
+   dst->stageCount = src->stageCount;
+   stages = malloc(dst->stageCount * sizeof(VkPipelineShaderStageCreateInfo));
+   for (i = 0 ; i < dst->stageCount; i++) {
+      result = deep_copy_shader_stage(&stages[i], &src->pStages[i]);
+      if (result != VK_SUCCESS)
+         return result;
+   }
+   dst->pStages = stages;
+
+   /* pVertexInputState */
+   vertex_input = malloc(sizeof(VkPipelineVertexInputStateCreateInfo));
+   result = deep_copy_vertex_input_state(vertex_input,
+                                         src->pVertexInputState);
+   if (result != VK_SUCCESS)
+      return result;
+   dst->pVertexInputState = vertex_input;
+
+   /* pInputAssemblyState */
+   input_assembly = malloc(sizeof(VkPipelineInputAssemblyStateCreateInfo));
+   if (!input_assembly)
+      return VK_ERROR_OUT_OF_HOST_MEMORY;
+   memcpy(input_assembly, src->pInputAssemblyState, sizeof(VkPipelineInputAssemblyStateCreateInfo));
+   dst->pInputAssemblyState = input_assembly;
+
+   /* pTessellationState */
+   if (src->pTessellationState) {
+      VkPipelineTessellationStateCreateInfo *tess_state;
+      tess_state = malloc(sizeof(VkPipelineTessellationStateCreateInfo));
+      if (!tess_state)
+         return VK_ERROR_OUT_OF_HOST_MEMORY;
+      memcpy(tess_state, src->pTessellationState, sizeof(VkPipelineTessellationStateCreateInfo));
+      dst->pTessellationState = tess_state;
+   }
+
+
+   /* pViewportState */
+   if (src->pViewportState) {
+      VkPipelineViewportStateCreateInfo *viewport_state;
+      viewport_state = malloc(sizeof(VkPipelineViewportStateCreateInfo));
+      if (!viewport_state)
+         return VK_ERROR_OUT_OF_HOST_MEMORY;
+      deep_copy_viewport_state(viewport_state, src->pViewportState);
+      dst->pViewportState = viewport_state;
+   } else
+      dst->pViewportState = NULL;
+
+   /* pRasterizationState */
+   raster_state = malloc(sizeof(VkPipelineRasterizationStateCreateInfo));
+   if (!raster_state)
+      return VK_ERROR_OUT_OF_HOST_MEMORY;
+   memcpy(raster_state, src->pRasterizationState, sizeof(VkPipelineRasterizationStateCreateInfo));
+   dst->pRasterizationState = raster_state;
+
+   /* pMultisampleState */
+   if (src->pMultisampleState) {
+      VkPipelineMultisampleStateCreateInfo*   ms_state;
+      ms_state = malloc(sizeof(VkPipelineMultisampleStateCreateInfo) + sizeof(VkSampleMask));
+      if (!ms_state)
+         return VK_ERROR_OUT_OF_HOST_MEMORY;
+      /* does samplemask need deep copy? */
+      memcpy(ms_state, src->pMultisampleState, sizeof(VkPipelineMultisampleStateCreateInfo));
+      if (src->pMultisampleState->pSampleMask) {
+         VkSampleMask *sample_mask = (VkSampleMask *)(ms_state + 1);
+         sample_mask[0] = src->pMultisampleState->pSampleMask[0];
+         ms_state->pSampleMask = sample_mask;
+      }
+      dst->pMultisampleState = ms_state;
+   } else
+      dst->pMultisampleState = NULL;
+
+   /* pDepthStencilState */
+   if (src->pDepthStencilState) {
+      VkPipelineDepthStencilStateCreateInfo*  ds_state;
+
+      ds_state = malloc(sizeof(VkPipelineDepthStencilStateCreateInfo));
+      if (!ds_state)
+         return VK_ERROR_OUT_OF_HOST_MEMORY;
+      memcpy(ds_state, src->pDepthStencilState, sizeof(VkPipelineDepthStencilStateCreateInfo));
+      dst->pDepthStencilState = ds_state;
+   } else
+      dst->pDepthStencilState = NULL;
+
+   /* pColorBlendState */
+   if (src->pColorBlendState) {
+      VkPipelineColorBlendStateCreateInfo*    cb_state;
+
+      cb_state = malloc(sizeof(VkPipelineColorBlendStateCreateInfo));
+      if (!cb_state)
+         return VK_ERROR_OUT_OF_HOST_MEMORY;
+      deep_copy_color_blend_state(cb_state, src->pColorBlendState);
+      dst->pColorBlendState = cb_state;
+   } else
+      dst->pColorBlendState = NULL;
+
+   if (src->pDynamicState) {
+      VkPipelineDynamicStateCreateInfo*       dyn_state;
+
+      /* pDynamicState */
+      dyn_state = malloc(sizeof(VkPipelineDynamicStateCreateInfo));
+      if (!dyn_state)
+         return VK_ERROR_OUT_OF_HOST_MEMORY;
+      deep_copy_dynamic_state(dyn_state, src->pDynamicState);
+      dst->pDynamicState = dyn_state;
+   } else
+      dst->pDynamicState = NULL;
+
+   return VK_SUCCESS;
+}
+
+static VkResult
+deep_copy_compute_create_info(VkComputePipelineCreateInfo *dst,
+                              const VkComputePipelineCreateInfo *src)
+{
+   VkResult result;
+   dst->sType = src->sType;
+   dst->pNext = NULL;
+   dst->flags = src->flags;
+   dst->layout = src->layout;
+   dst->basePipelineHandle = src->basePipelineHandle;
+   dst->basePipelineIndex = src->basePipelineIndex;
+
+   result = deep_copy_shader_stage(&dst->stage, &src->stage);
+   if (result != VK_SUCCESS)
+      return result;
+   return VK_SUCCESS;
+}
+
+static inline unsigned
+st_shader_stage_to_ptarget(gl_shader_stage stage)
+{
+   switch (stage) {
+   case MESA_SHADER_VERTEX:
+      return PIPE_SHADER_VERTEX;
+   case MESA_SHADER_FRAGMENT:
+      return PIPE_SHADER_FRAGMENT;
+   case MESA_SHADER_GEOMETRY:
+      return PIPE_SHADER_GEOMETRY;
+   case MESA_SHADER_TESS_CTRL:
+      return PIPE_SHADER_TESS_CTRL;
+   case MESA_SHADER_TESS_EVAL:
+      return PIPE_SHADER_TESS_EVAL;
+   case MESA_SHADER_COMPUTE:
+      return PIPE_SHADER_COMPUTE;
+   default:
+      break;
+   }
+
+   assert(!"should not be reached");
+   return PIPE_SHADER_VERTEX;
+}
+
+static void
+shared_var_info(const struct glsl_type *type, unsigned *size, unsigned *align)
+{
+   assert(glsl_type_is_vector_or_scalar(type));
+
+   uint32_t comp_size = glsl_type_is_boolean(type)
+      ? 4 : glsl_get_bit_size(type) / 8;
+   unsigned length = glsl_get_vector_elements(type);
+   *size = comp_size * length,
+      *align = comp_size;
+}
+
+#define OPT(pass, ...) ({                                       \
+         bool this_progress = false;                            \
+         NIR_PASS(this_progress, nir, pass, ##__VA_ARGS__);     \
+         if (this_progress)                                     \
+            progress = true;                                    \
+         this_progress;                                         \
+      })
+
+static void
+val_shader_compile_to_ir(struct val_pipeline *pipeline,
+                         struct val_shader_module *module,
+                         const char *entrypoint_name,
+                         gl_shader_stage stage,
+                         const VkSpecializationInfo *spec_info)
+{
+   nir_shader *nir;
+   const nir_shader_compiler_options *drv_options = pipeline->device->pscreen->get_compiler_options(pipeline->device->pscreen, PIPE_SHADER_IR_NIR, st_shader_stage_to_ptarget(stage));
+   bool progress;
+   uint32_t *spirv = (uint32_t *) module->data;
+   assert(spirv[0] == SPIR_V_MAGIC_NUMBER);
+   assert(module->size % 4 == 0);
+
+   uint32_t num_spec_entries = 0;
+   struct nir_spirv_specialization *spec_entries = NULL;
+   if (spec_info && spec_info->mapEntryCount > 0) {
+      num_spec_entries = spec_info->mapEntryCount;
+      spec_entries = calloc(num_spec_entries, sizeof(*spec_entries));
+      for (uint32_t i = 0; i < num_spec_entries; i++) {
+         VkSpecializationMapEntry entry = spec_info->pMapEntries[i];
+         const void *data =
+            spec_info->pData + entry.offset;
+         assert((const void *)(data + entry.size) <=
+                spec_info->pData + spec_info->dataSize);
+
+         spec_entries[i].id = entry.constantID;
+         switch (entry.size) {
+         case 8:
+            spec_entries[i].value.u64 = *(const uint64_t *)data;
+            break;
+         case 4:
+            spec_entries[i].value.u32 = *(const uint32_t *)data;
+            break;
+         case 2:
+            spec_entries[i].value.u16 = *(const uint16_t *)data;
+            break;
+         case 1:
+            spec_entries[i].value.u8 = *(const uint8_t *)data;
+            break;
+         default:
+            assert(!"Invalid spec constant size");
+            break;
+         }
+      }
+   }
+   struct val_device *pdevice = pipeline->device;
+   const struct spirv_to_nir_options spirv_options = {
+      .environment = NIR_SPIRV_VULKAN,
+      .lower_ubo_ssbo_access_to_offsets = true,
+      .caps = {
+         .float64 = (pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_DOUBLES) == 1),
+         .int16 = true,
+         .int64 = (pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_INT64) == 1),
+         .tessellation = true,
+         .image_ms_array = true,
+         .storage_image_ms = true,
+         .geometry_streams = true,
+         .storage_16bit = true,
+         .variable_pointers = true,
+      },
+      .ubo_addr_format = nir_address_format_32bit_index_offset,
+      .ssbo_addr_format = nir_address_format_32bit_index_offset,
+      .phys_ssbo_addr_format = nir_address_format_64bit_global,
+      .push_const_addr_format = nir_address_format_logical,
+      .shared_addr_format = nir_address_format_32bit_offset,
+      .frag_coord_is_sysval = false,
+   };
+
+   nir = spirv_to_nir(spirv, module->size / 4,
+                      spec_entries, num_spec_entries,
+                      stage, entrypoint_name, &spirv_options, drv_options);
+
+   nir_validate_shader(nir, NULL);
+
+   free(spec_entries);
+
+   NIR_PASS_V(nir, nir_lower_variable_initializers, nir_var_function_temp);
+   NIR_PASS_V(nir, nir_lower_returns);
+   NIR_PASS_V(nir, nir_inline_functions);
+   NIR_PASS_V(nir, nir_copy_prop);
+   NIR_PASS_V(nir, nir_opt_deref);
+
+   /* Pick off the single entrypoint that we want */
+   foreach_list_typed_safe(nir_function, func, node, &nir->functions) {
+      if (!func->is_entrypoint)
+         exec_node_remove(&func->node);
+   }
+   assert(exec_list_length(&nir->functions) == 1);
+
+   NIR_PASS_V(nir, nir_lower_variable_initializers, ~0);
+   NIR_PASS_V(nir, nir_split_var_copies);
+   NIR_PASS_V(nir, nir_split_per_member_structs);
+
+   NIR_PASS_V(nir, nir_remove_dead_variables,
+              nir_var_shader_in | nir_var_shader_out | nir_var_system_value, NULL);
+
+   if (stage == MESA_SHADER_FRAGMENT)
+      val_lower_input_attachments(nir, false);
+   NIR_PASS_V(nir, nir_lower_system_values);
+
+   NIR_PASS_V(nir, nir_lower_clip_cull_distance_arrays);
+   nir_remove_dead_variables(nir, nir_var_uniform, NULL);
+
+   val_lower_pipeline_layout(pipeline->device, pipeline->layout, nir);
+
+   NIR_PASS_V(nir, nir_lower_io_to_temporaries, nir_shader_get_entrypoint(nir), true, true);
+   NIR_PASS_V(nir, nir_split_var_copies);
+   NIR_PASS_V(nir, nir_lower_global_vars_to_local);
+
+   if (nir->info.stage == MESA_SHADER_COMPUTE) {
+      NIR_PASS_V(nir, nir_lower_vars_to_explicit_types, nir_var_mem_shared, shared_var_info);
+      NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_shared, nir_address_format_32bit_offset);
+   }
+
+   NIR_PASS_V(nir, nir_remove_dead_variables, nir_var_shader_temp, NULL);
+
+   if (nir->info.stage == MESA_SHADER_VERTEX ||
+       nir->info.stage == MESA_SHADER_GEOMETRY) {
+      NIR_PASS_V(nir, nir_lower_io_arrays_to_elements_no_indirects, false);
+   } else if (nir->info.stage == MESA_SHADER_FRAGMENT) {
+      NIR_PASS_V(nir, nir_lower_io_arrays_to_elements_no_indirects, true);
+   }
+
+   do {
+      progress = false;
+
+      progress |= OPT(nir_lower_flrp, 32|64, true, false);
+      progress |= OPT(nir_split_array_vars, nir_var_function_temp);
+      progress |= OPT(nir_shrink_vec_array_vars, nir_var_function_temp);
+      progress |= OPT(nir_opt_deref);
+      progress |= OPT(nir_lower_vars_to_ssa);
+
+      progress |= nir_copy_prop(nir);
+      progress |= nir_opt_dce(nir);
+      progress |= nir_opt_dead_cf(nir);
+      progress |= nir_opt_cse(nir);
+      progress |= nir_opt_algebraic(nir);
+      progress |= nir_opt_constant_folding(nir);
+      progress |= nir_opt_undef(nir);
+
+      progress |= nir_opt_deref(nir);
+      progress |= nir_lower_alu_to_scalar(nir, NULL, NULL);
+   } while (progress);
+
+   nir_lower_var_copies(nir);
+   nir_remove_dead_variables(nir, nir_var_function_temp, NULL);
+
+   nir_validate_shader(nir, NULL);
+   nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
+
+   if (nir->info.stage != MESA_SHADER_VERTEX)
+      nir_assign_io_var_locations(nir, nir_var_shader_in, &nir->num_inputs, nir->info.stage);
+   else {
+      nir->num_inputs = util_last_bit64(nir->info.inputs_read);
+      nir_foreach_shader_in_variable(var, nir) {
+         var->data.driver_location = var->data.location - VERT_ATTRIB_GENERIC0;
+      }
+   }
+   nir_assign_io_var_locations(nir, nir_var_shader_out, &nir->num_outputs,
+                               nir->info.stage);
+   pipeline->pipeline_nir[stage] = nir;
+}
+
+static void fill_shader_prog(struct pipe_shader_state *state, gl_shader_stage stage, struct val_pipeline *pipeline)
+{
+   state->type = PIPE_SHADER_IR_NIR;
+   state->ir.nir = pipeline->pipeline_nir[stage];
+}
+
+static void
+merge_tess_info(struct shader_info *tes_info,
+                const struct shader_info *tcs_info)
+{
+   /* The Vulkan 1.0.38 spec, section 21.1 Tessellator says:
+    *
+    *    "PointMode. Controls generation of points rather than triangles
+    *     or lines. This functionality defaults to disabled, and is
+    *     enabled if either shader stage includes the execution mode.
+    *
+    * and about Triangles, Quads, IsoLines, VertexOrderCw, VertexOrderCcw,
+    * PointMode, SpacingEqual, SpacingFractionalEven, SpacingFractionalOdd,
+    * and OutputVertices, it says:
+    *
+    *    "One mode must be set in at least one of the tessellation
+    *     shader stages."
+    *
+    * So, the fields can be set in either the TCS or TES, but they must
+    * agree if set in both.  Our backend looks at TES, so bitwise-or in
+    * the values from the TCS.
+    */
+   assert(tcs_info->tess.tcs_vertices_out == 0 ||
+          tes_info->tess.tcs_vertices_out == 0 ||
+          tcs_info->tess.tcs_vertices_out == tes_info->tess.tcs_vertices_out);
+   tes_info->tess.tcs_vertices_out |= tcs_info->tess.tcs_vertices_out;
+
+   assert(tcs_info->tess.spacing == TESS_SPACING_UNSPECIFIED ||
+          tes_info->tess.spacing == TESS_SPACING_UNSPECIFIED ||
+          tcs_info->tess.spacing == tes_info->tess.spacing);
+   tes_info->tess.spacing |= tcs_info->tess.spacing;
+
+   assert(tcs_info->tess.primitive_mode == 0 ||
+          tes_info->tess.primitive_mode == 0 ||
+          tcs_info->tess.primitive_mode == tes_info->tess.primitive_mode);
+   tes_info->tess.primitive_mode |= tcs_info->tess.primitive_mode;
+   tes_info->tess.ccw |= tcs_info->tess.ccw;
+   tes_info->tess.point_mode |= tcs_info->tess.point_mode;
+}
+
+static gl_shader_stage
+val_shader_stage(VkShaderStageFlagBits stage)
+{
+   switch (stage) {
+   case VK_SHADER_STAGE_VERTEX_BIT:
+      return MESA_SHADER_VERTEX;
+   case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT:
+      return MESA_SHADER_TESS_CTRL;
+   case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT:
+      return MESA_SHADER_TESS_EVAL;
+   case VK_SHADER_STAGE_GEOMETRY_BIT:
+      return MESA_SHADER_GEOMETRY;
+   case VK_SHADER_STAGE_FRAGMENT_BIT:
+      return MESA_SHADER_FRAGMENT;
+   case VK_SHADER_STAGE_COMPUTE_BIT:
+      return MESA_SHADER_COMPUTE;
+   default:
+      unreachable("invalid VkShaderStageFlagBits");
+      return MESA_SHADER_NONE;
+   }
+}
+
+static VkResult
+val_pipeline_compile(struct val_pipeline *pipeline,
+                     gl_shader_stage stage)
+{
+   struct val_device *device = pipeline->device;
+   device->physical_device->pscreen->finalize_nir(device->physical_device->pscreen, pipeline->pipeline_nir[stage], true);
+   if (stage == MESA_SHADER_COMPUTE) {
+      struct pipe_compute_state shstate = {};
+      shstate.prog = (void *)pipeline->pipeline_nir[MESA_SHADER_COMPUTE];
+      shstate.ir_type = PIPE_SHADER_IR_NIR;
+      shstate.req_local_mem = pipeline->pipeline_nir[MESA_SHADER_COMPUTE]->info.cs.shared_size;
+      pipeline->shader_cso[PIPE_SHADER_COMPUTE] = device->queue.ctx->create_compute_state(device->queue.ctx, &shstate);
+   } else {
+      struct pipe_shader_state shstate = {};
+      fill_shader_prog(&shstate, stage, pipeline);
+      switch (stage) {
+      case MESA_SHADER_FRAGMENT:
+         pipeline->shader_cso[PIPE_SHADER_FRAGMENT] = device->queue.ctx->create_fs_state(device->queue.ctx, &shstate);
+         break;
+      case MESA_SHADER_VERTEX:
+         pipeline->shader_cso[PIPE_SHADER_VERTEX] = device->queue.ctx->create_vs_state(device->queue.ctx, &shstate);
+         break;
+      case MESA_SHADER_GEOMETRY:
+         pipeline->shader_cso[PIPE_SHADER_GEOMETRY] = device->queue.ctx->create_gs_state(device->queue.ctx, &shstate);
+         break;
+      case MESA_SHADER_TESS_CTRL:
+         pipeline->shader_cso[PIPE_SHADER_TESS_CTRL] = device->queue.ctx->create_tcs_state(device->queue.ctx, &shstate);
+         break;
+      case MESA_SHADER_TESS_EVAL:
+         pipeline->shader_cso[PIPE_SHADER_TESS_EVAL] = device->queue.ctx->create_tes_state(device->queue.ctx, &shstate);
+         break;
+      default:
+         unreachable("illegal shader");
+         break;
+      }
+   }
+   return VK_SUCCESS;
+}
+
+static VkResult
+val_graphics_pipeline_init(struct val_pipeline *pipeline,
+                           struct val_device *device,
+                           struct val_pipeline_cache *cache,
+                           const VkGraphicsPipelineCreateInfo *pCreateInfo,
+                           const VkAllocationCallbacks *alloc)
+{
+   if (alloc == NULL)
+      alloc = &device->alloc;
+   pipeline->device = device;
+   pipeline->layout = val_pipeline_layout_from_handle(pCreateInfo->layout);
+   pipeline->force_min_sample = false;
+
+   /* recreate createinfo */
+   deep_copy_graphics_create_info(&pipeline->graphics_create_info, pCreateInfo);
+   pipeline->is_compute_pipeline = false;
+
+   for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
+      VAL_FROM_HANDLE(val_shader_module, module,
+                      pCreateInfo->pStages[i].module);
+      gl_shader_stage stage = val_shader_stage(pCreateInfo->pStages[i].stage);
+      val_shader_compile_to_ir(pipeline, module,
+                               pCreateInfo->pStages[i].pName,
+                               stage,
+                               pCreateInfo->pStages[i].pSpecializationInfo);
+   }
+
+   if (pipeline->pipeline_nir[MESA_SHADER_FRAGMENT]) {
+      if (pipeline->pipeline_nir[MESA_SHADER_FRAGMENT]->info.fs.uses_sample_qualifier ||
+          pipeline->pipeline_nir[MESA_SHADER_FRAGMENT]->info.system_values_read & (SYSTEM_BIT_SAMPLE_ID |
+                                                                                   SYSTEM_BIT_SAMPLE_POS))
+         pipeline->force_min_sample = true;
+   }
+   if (pipeline->pipeline_nir[MESA_SHADER_TESS_CTRL]) {
+      nir_lower_patch_vertices(pipeline->pipeline_nir[MESA_SHADER_TESS_EVAL], pipeline->pipeline_nir[MESA_SHADER_TESS_CTRL]->info.tess.tcs_vertices_out, NULL);
+      merge_tess_info(&pipeline->pipeline_nir[MESA_SHADER_TESS_EVAL]->info, &pipeline->pipeline_nir[MESA_SHADER_TESS_CTRL]->info);
+      pipeline->pipeline_nir[MESA_SHADER_TESS_EVAL]->info.tess.ccw = !pipeline->pipeline_nir[MESA_SHADER_TESS_EVAL]->info.tess.ccw;
+   }
+
+
+   bool has_fragment_shader = false;
+   for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
+      gl_shader_stage stage = val_shader_stage(pCreateInfo->pStages[i].stage);
+      val_pipeline_compile(pipeline, stage);
+      if (stage == MESA_SHADER_FRAGMENT)
+         has_fragment_shader = true;
+   }
+
+   if (has_fragment_shader == false) {
+      /* create a dummy fragment shader for this pipeline. */
+      nir_builder b;
+
+      nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_FRAGMENT, NULL);
+      b.shader->info.name = ralloc_strdup(b.shader, "dummy_frag");
+
+      pipeline->pipeline_nir[MESA_SHADER_FRAGMENT] = b.shader;
+      struct pipe_shader_state shstate = {};
+      shstate.type = PIPE_SHADER_IR_NIR;
+      shstate.ir.nir = pipeline->pipeline_nir[MESA_SHADER_FRAGMENT];
+      pipeline->shader_cso[PIPE_SHADER_FRAGMENT] = device->queue.ctx->create_fs_state(device->queue.ctx, &shstate);
+   }
+   return VK_SUCCESS;
+}
+
+static VkResult
+val_graphics_pipeline_create(
+   VkDevice _device,
+   VkPipelineCache _cache,
+   const VkGraphicsPipelineCreateInfo *pCreateInfo,
+   const VkAllocationCallbacks *pAllocator,
+   VkPipeline *pPipeline)
+{
+   VAL_FROM_HANDLE(val_device, device, _device);
+   VAL_FROM_HANDLE(val_pipeline_cache, cache, _cache);
+   struct val_pipeline *pipeline;
+   VkResult result;
+
+   assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO);
+
+   pipeline = vk_zalloc2(&device->alloc, pAllocator, sizeof(*pipeline), 8,
+                         VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+   if (pipeline == NULL)
+      return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
+
+   vk_object_base_init(&device->vk, &pipeline->base,
+                       VK_OBJECT_TYPE_PIPELINE);
+   result = val_graphics_pipeline_init(pipeline, device, cache, pCreateInfo,
+                                       pAllocator);
+   if (result != VK_SUCCESS) {
+      vk_free2(&device->alloc, pAllocator, pipeline);
+      return result;
+   }
+
+   *pPipeline = val_pipeline_to_handle(pipeline);
+
+   return VK_SUCCESS;
+}
+
+VkResult val_CreateGraphicsPipelines(
+   VkDevice                                    _device,
+   VkPipelineCache                             pipelineCache,
+   uint32_t                                    count,
+   const VkGraphicsPipelineCreateInfo*         pCreateInfos,
+   const VkAllocationCallbacks*                pAllocator,
+   VkPipeline*                                 pPipelines)
+{
+   VkResult result = VK_SUCCESS;
+   unsigned i = 0;
+
+   for (; i < count; i++) {
+      VkResult r;
+      r = val_graphics_pipeline_create(_device,
+                                       pipelineCache,
+                                       &pCreateInfos[i],
+                                       pAllocator, &pPipelines[i]);
+      if (r != VK_SUCCESS) {
+         result = r;
+         pPipelines[i] = VK_NULL_HANDLE;
+      }
+   }
+
+   return result;
+}
+
+static VkResult
+val_compute_pipeline_init(struct val_pipeline *pipeline,
+                          struct val_device *device,
+                          struct val_pipeline_cache *cache,
+                          const VkComputePipelineCreateInfo *pCreateInfo,
+                          const VkAllocationCallbacks *alloc)
+{
+   VAL_FROM_HANDLE(val_shader_module, module,
+                   pCreateInfo->stage.module);
+   if (alloc == NULL)
+      alloc = &device->alloc;
+   pipeline->device = device;
+   pipeline->layout = val_pipeline_layout_from_handle(pCreateInfo->layout);
+   pipeline->force_min_sample = false;
+
+   deep_copy_compute_create_info(&pipeline->compute_create_info, pCreateInfo);
+   pipeline->is_compute_pipeline = true;
+
+   val_shader_compile_to_ir(pipeline, module,
+                            pCreateInfo->stage.pName,
+                            MESA_SHADER_COMPUTE,
+                            pCreateInfo->stage.pSpecializationInfo);
+   val_pipeline_compile(pipeline, MESA_SHADER_COMPUTE);
+   return VK_SUCCESS;
+}
+
+static VkResult
+val_compute_pipeline_create(
+   VkDevice _device,
+   VkPipelineCache _cache,
+   const VkComputePipelineCreateInfo *pCreateInfo,
+   const VkAllocationCallbacks *pAllocator,
+   VkPipeline *pPipeline)
+{
+   VAL_FROM_HANDLE(val_device, device, _device);
+   VAL_FROM_HANDLE(val_pipeline_cache, cache, _cache);
+   struct val_pipeline *pipeline;
+   VkResult result;
+
+   assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO);
+
+   pipeline = vk_zalloc2(&device->alloc, pAllocator, sizeof(*pipeline), 8,
+                         VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+   if (pipeline == NULL)
+      return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
+
+   vk_object_base_init(&device->vk, &pipeline->base,
+                       VK_OBJECT_TYPE_PIPELINE);
+   result = val_compute_pipeline_init(pipeline, device, cache, pCreateInfo,
+                                      pAllocator);
+   if (result != VK_SUCCESS) {
+      vk_free2(&device->alloc, pAllocator, pipeline);
+      return result;
+   }
+
+   *pPipeline = val_pipeline_to_handle(pipeline);
+
+   return VK_SUCCESS;
+}
+
+VkResult val_CreateComputePipelines(
+   VkDevice                                    _device,
+   VkPipelineCache                             pipelineCache,
+   uint32_t                                    count,
+   const VkComputePipelineCreateInfo*          pCreateInfos,
+   const VkAllocationCallbacks*                pAllocator,
+   VkPipeline*                                 pPipelines)
+{
+   VkResult result = VK_SUCCESS;
+   unsigned i = 0;
+
+   for (; i < count; i++) {
+      VkResult r;
+      r = val_compute_pipeline_create(_device,
+                                      pipelineCache,
+                                      &pCreateInfos[i],
+                                      pAllocator, &pPipelines[i]);
+      if (r != VK_SUCCESS) {
+         result = r;
+         pPipelines[i] = VK_NULL_HANDLE;
+      }
+   }
+
+   return result;
+}
diff --git a/src/gallium/frontends/vallium/val_pipeline_cache.c b/src/gallium/frontends/vallium/val_pipeline_cache.c
new file mode 100644 (file)
index 0000000..b0e519f
--- /dev/null
@@ -0,0 +1,103 @@
+/*
+ * Copyright © 2019 Red Hat.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "val_private.h"
+
+VkResult val_CreatePipelineCache(
+    VkDevice                                    _device,
+    const VkPipelineCacheCreateInfo*            pCreateInfo,
+    const VkAllocationCallbacks*                pAllocator,
+    VkPipelineCache*                            pPipelineCache)
+{
+   VAL_FROM_HANDLE(val_device, device, _device);
+   struct val_pipeline_cache *cache;
+
+   assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO);
+   assert(pCreateInfo->flags == 0);
+
+   cache = vk_alloc2(&device->alloc, pAllocator,
+                       sizeof(*cache), 8,
+                       VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+   if (cache == NULL)
+      return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
+
+   vk_object_base_init(&device->vk, &cache->base,
+                       VK_OBJECT_TYPE_PIPELINE_CACHE);
+   if (pAllocator)
+     cache->alloc = *pAllocator;
+   else
+     cache->alloc = device->alloc;
+
+   cache->device = device;
+   *pPipelineCache = val_pipeline_cache_to_handle(cache);
+
+   return VK_SUCCESS;
+}
+
+void val_DestroyPipelineCache(
+    VkDevice                                    _device,
+    VkPipelineCache                             _cache,
+    const VkAllocationCallbacks*                pAllocator)
+{
+   VAL_FROM_HANDLE(val_device, device, _device);
+   VAL_FROM_HANDLE(val_pipeline_cache, cache, _cache);
+
+   if (!_cache)
+      return;
+//   val_pipeline_cache_finish(cache);
+   vk_object_base_finish(&cache->base);
+   vk_free2(&device->alloc, pAllocator, cache);
+}
+
+VkResult val_GetPipelineCacheData(
+        VkDevice                                    _device,
+        VkPipelineCache                             _cache,
+        size_t*                                     pDataSize,
+        void*                                       pData)
+{
+   VkResult result = VK_SUCCESS;
+   if (pData) {
+      if (*pDataSize < 32) {
+         *pDataSize = 0;
+         result = VK_INCOMPLETE;
+      } else {
+         uint32_t *hdr = (uint32_t *)pData;
+         hdr[0] = 32;
+         hdr[1] = 1;
+         hdr[2] = VK_VENDOR_ID_MESA;
+         hdr[3] = 0;
+         val_device_get_cache_uuid(&hdr[4]);
+      }
+   } else
+      *pDataSize = 32;
+   return result;
+}
+
+VkResult val_MergePipelineCaches(
+        VkDevice                                    _device,
+        VkPipelineCache                             destCache,
+        uint32_t                                    srcCacheCount,
+        const VkPipelineCache*                      pSrcCaches)
+{
+   return VK_SUCCESS;
+}
diff --git a/src/gallium/frontends/vallium/val_private.h b/src/gallium/frontends/vallium/val_private.h
new file mode 100644 (file)
index 0000000..7a41c9f
--- /dev/null
@@ -0,0 +1,989 @@
+/*
+ * Copyright © 2019 Red Hat.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#pragma once
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <string.h>
+#include <strings.h>
+#include <pthread.h>
+#include <assert.h>
+#include <stdint.h>
+
+#include "util/macros.h"
+#include "util/list.h"
+
+#include "compiler/shader_enums.h"
+#include "pipe/p_screen.h"
+#include "pipe/p_state.h"
+#include "nir.h"
+
+/* Pre-declarations needed for WSI entrypoints */
+struct wl_surface;
+struct wl_display;
+typedef struct xcb_connection_t xcb_connection_t;
+typedef uint32_t xcb_visualid_t;
+typedef uint32_t xcb_window_t;
+
+#define VK_PROTOTYPES
+#include <vulkan/vulkan.h>
+#include <vulkan/vk_icd.h>
+
+#include "val_extensions.h"
+#include "val_entrypoints.h"
+#include "vk_object.h"
+
+#include "wsi_common.h"
+
+#include <assert.h>
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define MAX_SETS         8
+#define MAX_PUSH_CONSTANTS_SIZE 128
+
+#define val_printflike(a, b) __attribute__((__format__(__printf__, a, b)))
+
+#define typed_memcpy(dest, src, count) ({ \
+   memcpy((dest), (src), (count) * sizeof(*(src))); \
+})
+
+int val_get_instance_entrypoint_index(const char *name);
+int val_get_device_entrypoint_index(const char *name);
+int val_get_physical_device_entrypoint_index(const char *name);
+
+const char *val_get_instance_entry_name(int index);
+const char *val_get_physical_device_entry_name(int index);
+const char *val_get_device_entry_name(int index);
+
+bool val_instance_entrypoint_is_enabled(int index, uint32_t core_version,
+                                         const struct val_instance_extension_table *instance);
+bool val_physical_device_entrypoint_is_enabled(int index, uint32_t core_version,
+                                                const struct val_instance_extension_table *instance);
+bool val_device_entrypoint_is_enabled(int index, uint32_t core_version,
+                                       const struct val_instance_extension_table *instance,
+                                       const struct val_device_extension_table *device);
+
+void *val_lookup_entrypoint(const char *name);
+
+#define VAL_DEFINE_HANDLE_CASTS(__val_type, __VkType)                      \
+                                                                           \
+   static inline struct __val_type *                                       \
+   __val_type ## _from_handle(__VkType _handle)                            \
+   {                                                                       \
+      return (struct __val_type *) _handle;                                \
+   }                                                                       \
+                                                                           \
+   static inline __VkType                                                  \
+   __val_type ## _to_handle(struct __val_type *_obj)                       \
+   {                                                                       \
+      return (__VkType) _obj;                                              \
+   }
+
+#define VAL_DEFINE_NONDISP_HANDLE_CASTS(__val_type, __VkType)              \
+                                                                           \
+   static inline struct __val_type *                                       \
+   __val_type ## _from_handle(__VkType _handle)                            \
+   {                                                                       \
+      return (struct __val_type *)(uintptr_t) _handle;                     \
+   }                                                                       \
+                                                                           \
+   static inline __VkType                                                  \
+   __val_type ## _to_handle(struct __val_type *_obj)                       \
+   {                                                                       \
+      return (__VkType)(uintptr_t) _obj;                                   \
+   }
+
+#define VAL_FROM_HANDLE(__val_type, __name, __handle) \
+   struct __val_type *__name = __val_type ## _from_handle(__handle)
+
+VAL_DEFINE_HANDLE_CASTS(val_cmd_buffer, VkCommandBuffer)
+VAL_DEFINE_HANDLE_CASTS(val_device, VkDevice)
+VAL_DEFINE_HANDLE_CASTS(val_instance, VkInstance)
+VAL_DEFINE_HANDLE_CASTS(val_physical_device, VkPhysicalDevice)
+VAL_DEFINE_HANDLE_CASTS(val_queue, VkQueue)
+
+VAL_DEFINE_NONDISP_HANDLE_CASTS(val_cmd_pool, VkCommandPool)
+VAL_DEFINE_NONDISP_HANDLE_CASTS(val_buffer, VkBuffer)
+VAL_DEFINE_NONDISP_HANDLE_CASTS(val_buffer_view, VkBufferView)
+VAL_DEFINE_NONDISP_HANDLE_CASTS(val_descriptor_pool, VkDescriptorPool)
+VAL_DEFINE_NONDISP_HANDLE_CASTS(val_descriptor_set, VkDescriptorSet)
+VAL_DEFINE_NONDISP_HANDLE_CASTS(val_descriptor_set_layout, VkDescriptorSetLayout)
+VAL_DEFINE_NONDISP_HANDLE_CASTS(val_device_memory, VkDeviceMemory)
+VAL_DEFINE_NONDISP_HANDLE_CASTS(val_event, VkEvent)
+VAL_DEFINE_NONDISP_HANDLE_CASTS(val_framebuffer, VkFramebuffer)
+VAL_DEFINE_NONDISP_HANDLE_CASTS(val_image, VkImage)
+VAL_DEFINE_NONDISP_HANDLE_CASTS(val_image_view, VkImageView);
+VAL_DEFINE_NONDISP_HANDLE_CASTS(val_pipeline_cache, VkPipelineCache)
+VAL_DEFINE_NONDISP_HANDLE_CASTS(val_pipeline, VkPipeline)
+VAL_DEFINE_NONDISP_HANDLE_CASTS(val_pipeline_layout, VkPipelineLayout)
+VAL_DEFINE_NONDISP_HANDLE_CASTS(val_query_pool, VkQueryPool)
+VAL_DEFINE_NONDISP_HANDLE_CASTS(val_render_pass, VkRenderPass)
+VAL_DEFINE_NONDISP_HANDLE_CASTS(val_sampler, VkSampler)
+VAL_DEFINE_NONDISP_HANDLE_CASTS(val_shader_module, VkShaderModule)
+VAL_DEFINE_NONDISP_HANDLE_CASTS(val_fence, VkFence);
+VAL_DEFINE_NONDISP_HANDLE_CASTS(val_semaphore, VkSemaphore);
+
+/* Whenever we generate an error, pass it through this function. Useful for
+ * debugging, where we can break on it. Only call at error site, not when
+ * propagating errors. Might be useful to plug in a stack trace here.
+ */
+
+VkResult __vk_errorf(struct val_instance *instance, VkResult error, const char *file, int line, const char *format, ...);
+
+#define VAL_DEBUG_ALL_ENTRYPOINTS (1 << 0)
+
+#define vk_error(instance, error) __vk_errorf(instance, error, __FILE__, __LINE__, NULL);
+#define vk_errorf(instance, error, format, ...) __vk_errorf(instance, error, __FILE__, __LINE__, format, ## __VA_ARGS__);
+
+void __val_finishme(const char *file, int line, const char *format, ...)
+   val_printflike(3, 4);
+
+#define val_finishme(format, ...) \
+   __val_finishme(__FILE__, __LINE__, format, ##__VA_ARGS__);
+
+#define stub_return(v) \
+   do { \
+      val_finishme("stub %s", __func__); \
+      return (v); \
+   } while (0)
+
+#define stub() \
+   do { \
+      val_finishme("stub %s", __func__); \
+      return; \
+   } while (0)
+
+struct val_shader_module {
+   struct vk_object_base base;
+   uint32_t                                     size;
+   char                                         data[0];
+};
+
+static inline gl_shader_stage
+vk_to_mesa_shader_stage(VkShaderStageFlagBits vk_stage)
+{
+   assert(__builtin_popcount(vk_stage) == 1);
+   return ffs(vk_stage) - 1;
+}
+
+static inline VkShaderStageFlagBits
+mesa_to_vk_shader_stage(gl_shader_stage mesa_stage)
+{
+   return (1 << mesa_stage);
+}
+
+#define VAL_STAGE_MASK ((1 << MESA_SHADER_STAGES) - 1)
+
+#define val_foreach_stage(stage, stage_bits)                         \
+   for (gl_shader_stage stage,                                       \
+        __tmp = (gl_shader_stage)((stage_bits) & VAL_STAGE_MASK);    \
+        stage = __builtin_ffs(__tmp) - 1, __tmp;                     \
+        __tmp &= ~(1 << (stage)))
+
+struct val_physical_device {
+   VK_LOADER_DATA                              _loader_data;
+   struct val_instance *                       instance;
+
+   struct pipe_loader_device *pld;
+   struct pipe_screen *pscreen;
+   uint32_t max_images;
+
+   struct wsi_device                       wsi_device;
+   struct val_device_extension_table supported_extensions;
+};
+
+struct val_instance {
+   struct vk_object_base base;
+
+   VkAllocationCallbacks alloc;
+
+   uint32_t apiVersion;
+   int physicalDeviceCount;
+   struct val_physical_device physicalDevice;
+
+   uint64_t debug_flags;
+
+   struct pipe_loader_device *devs;
+   int num_devices;
+
+   struct val_instance_extension_table enabled_extensions;
+   struct val_instance_dispatch_table dispatch;
+   struct val_physical_device_dispatch_table physical_device_dispatch;
+   struct val_device_dispatch_table device_dispatch;
+};
+
+VkResult val_init_wsi(struct val_physical_device *physical_device);
+void val_finish_wsi(struct val_physical_device *physical_device);
+
+bool val_instance_extension_supported(const char *name);
+uint32_t val_physical_device_api_version(struct val_physical_device *dev);
+bool val_physical_device_extension_supported(struct val_physical_device *dev,
+                                              const char *name);
+
+struct val_queue {
+   VK_LOADER_DATA                              _loader_data;
+   VkDeviceQueueCreateFlags flags;
+   struct val_device *                         device;
+   struct pipe_context *ctx;
+   bool shutdown;
+   thrd_t exec_thread;
+   mtx_t m;
+   cnd_t new_work;
+   struct list_head workqueue;
+   uint32_t count;
+};
+
+struct val_queue_work {
+   struct list_head list;
+   uint32_t cmd_buffer_count;
+   struct val_cmd_buffer **cmd_buffers;
+   struct val_fence *fence;
+};
+
+struct val_pipeline_cache {
+   struct vk_object_base                        base;
+   struct val_device *                          device;
+   VkAllocationCallbacks                        alloc;
+};
+
+struct val_device {
+   struct vk_device vk;
+
+   VkAllocationCallbacks                       alloc;
+
+   struct val_queue queue;
+   struct val_instance *                       instance;
+   struct val_physical_device *physical_device;
+   struct pipe_screen *pscreen;
+
+   mtx_t fence_lock;
+   struct val_device_extension_table enabled_extensions;
+   struct val_device_dispatch_table dispatch;
+};
+
+void val_device_get_cache_uuid(void *uuid);
+
+struct val_device_memory {
+   struct vk_object_base base;
+   struct pipe_memory_allocation *pmem;
+   uint32_t                                     type_index;
+   VkDeviceSize                                 map_size;
+   void *                                       map;
+};
+
+struct val_image {
+   struct vk_object_base base;
+   VkImageType type;
+   VkFormat vk_format;
+   VkDeviceSize size;
+   uint32_t alignment;
+   struct pipe_resource *bo;
+};
+
+static inline uint32_t
+val_get_layerCount(const struct val_image *image,
+                   const VkImageSubresourceRange *range)
+{
+   return range->layerCount == VK_REMAINING_ARRAY_LAYERS ?
+      image->bo->array_size - range->baseArrayLayer : range->layerCount;
+}
+
+static inline uint32_t
+val_get_levelCount(const struct val_image *image,
+                   const VkImageSubresourceRange *range)
+{
+   return range->levelCount == VK_REMAINING_MIP_LEVELS ?
+      (image->bo->last_level + 1) - range->baseMipLevel : range->levelCount;
+}
+
+struct val_image_create_info {
+   const VkImageCreateInfo *vk_info;
+   uint32_t bind_flags;
+   uint32_t stride;
+};
+
+VkResult
+val_image_create(VkDevice _device,
+                 const struct val_image_create_info *create_info,
+                 const VkAllocationCallbacks* alloc,
+                 VkImage *pImage);
+
+struct val_image_view {
+   struct vk_object_base base;
+   const struct val_image *image; /**< VkImageViewCreateInfo::image */
+
+   VkImageViewType view_type;
+   VkFormat format;
+   enum pipe_format pformat;
+   VkComponentMapping components;
+   VkImageSubresourceRange subresourceRange;
+
+   struct pipe_surface *surface; /* have we created a pipe surface for this? */
+};
+
+struct val_subpass_attachment {
+   uint32_t         attachment;
+   VkImageLayout    layout;
+   bool             in_render_loop;
+};
+
+struct val_subpass {
+   uint32_t                                     attachment_count;
+   struct val_subpass_attachment *             attachments;
+
+   uint32_t                                     input_count;
+   uint32_t                                     color_count;
+   struct val_subpass_attachment *              input_attachments;
+   struct val_subpass_attachment *              color_attachments;
+   struct val_subpass_attachment *              resolve_attachments;
+   struct val_subpass_attachment *              depth_stencil_attachment;
+   struct val_subpass_attachment *              ds_resolve_attachment;
+
+   /** Subpass has at least one color resolve attachment */
+   bool                                         has_color_resolve;
+
+   /** Subpass has at least one color attachment */
+   bool                                         has_color_att;
+
+   VkSampleCountFlagBits                        max_sample_count;
+};
+
+struct val_render_pass_attachment {
+   VkFormat                                     format;
+   uint32_t                                     samples;
+   VkAttachmentLoadOp                           load_op;
+   VkAttachmentLoadOp                           stencil_load_op;
+   VkImageLayout                                initial_layout;
+   VkImageLayout                                final_layout;
+
+   /* The subpass id in which the attachment will be used first/last. */
+   uint32_t                                     first_subpass_idx;
+   uint32_t                                     last_subpass_idx;
+};
+
+struct val_render_pass {
+   struct vk_object_base                        base;
+   uint32_t                                     attachment_count;
+   uint32_t                                     subpass_count;
+   struct val_subpass_attachment *              subpass_attachments;
+   struct val_render_pass_attachment *          attachments;
+   struct val_subpass                           subpasses[0];
+};
+
+struct val_sampler {
+   struct vk_object_base base;
+   VkSamplerCreateInfo create_info;
+   uint32_t state[4];
+};
+
+struct val_framebuffer {
+   struct vk_object_base                        base;
+   uint32_t                                     width;
+   uint32_t                                     height;
+   uint32_t                                     layers;
+
+   uint32_t                                     attachment_count;
+   struct val_image_view *                      attachments[0];
+};
+
+struct val_descriptor_set_binding_layout {
+   uint16_t descriptor_index;
+   /* Number of array elements in this binding */
+   VkDescriptorType type;
+   uint16_t array_size;
+   bool valid;
+
+   int16_t dynamic_index;
+   struct {
+      int16_t const_buffer_index;
+      int16_t shader_buffer_index;
+      int16_t sampler_index;
+      int16_t sampler_view_index;
+      int16_t image_index;
+   } stage[MESA_SHADER_STAGES];
+
+   /* Immutable samplers (or NULL if no immutable samplers) */
+   struct val_sampler **immutable_samplers;
+};
+
+struct val_descriptor_set_layout {
+   struct vk_object_base base;
+   /* Number of bindings in this descriptor set */
+   uint16_t binding_count;
+
+   /* Total size of the descriptor set with room for all array entries */
+   uint16_t size;
+
+   /* Shader stages affected by this descriptor set */
+   uint16_t shader_stages;
+
+   struct {
+      uint16_t const_buffer_count;
+      uint16_t shader_buffer_count;
+      uint16_t sampler_count;
+      uint16_t sampler_view_count;
+      uint16_t image_count;
+   } stage[MESA_SHADER_STAGES];
+
+   /* Number of dynamic offsets used by this descriptor set */
+   uint16_t dynamic_offset_count;
+
+   /* Bindings in this descriptor set */
+   struct val_descriptor_set_binding_layout binding[0];
+};
+
+struct val_descriptor {
+   VkDescriptorType type;
+
+   union {
+      struct {
+         struct val_image_view *image_view;
+         struct val_sampler *sampler;
+      };
+      struct {
+         uint64_t offset;
+         uint64_t range;
+         struct val_buffer *buffer;
+      } buf;
+      struct val_buffer_view *buffer_view;
+   };
+};
+
+struct val_descriptor_set {
+   struct vk_object_base base;
+   const struct val_descriptor_set_layout *layout;
+   struct list_head link;
+   struct val_descriptor descriptors[0];
+};
+
+struct val_descriptor_pool {
+   struct vk_object_base base;
+   VkDescriptorPoolCreateFlags flags;
+   uint32_t max_sets;
+
+   struct list_head sets;
+};
+
+VkResult
+val_descriptor_set_create(struct val_device *device,
+                          const struct val_descriptor_set_layout *layout,
+                          struct val_descriptor_set **out_set);
+
+void
+val_descriptor_set_destroy(struct val_device *device,
+                           struct val_descriptor_set *set);
+
+struct val_pipeline_layout {
+   struct vk_object_base base;
+   struct {
+      struct val_descriptor_set_layout *layout;
+      uint32_t dynamic_offset_start;
+   } set[MAX_SETS];
+
+   uint32_t num_sets;
+   uint32_t push_constant_size;
+   struct {
+      bool has_dynamic_offsets;
+   } stage[MESA_SHADER_STAGES];
+};
+
+struct val_pipeline {
+   struct vk_object_base base;
+   struct val_device *                          device;
+   struct val_pipeline_layout *                 layout;
+
+   bool is_compute_pipeline;
+   bool force_min_sample;
+   nir_shader *pipeline_nir[MESA_SHADER_STAGES];
+   void *shader_cso[PIPE_SHADER_TYPES];
+   VkGraphicsPipelineCreateInfo graphics_create_info;
+   VkComputePipelineCreateInfo compute_create_info;
+};
+
+struct val_event {
+   struct vk_object_base base;
+   uint64_t event_storage;
+};
+
+struct val_fence {
+   struct vk_object_base base;
+   bool signaled;
+   struct pipe_fence_handle *handle;
+};
+
+struct val_semaphore {
+   struct vk_object_base base;
+   bool dummy;
+};
+
+struct val_buffer {
+   struct vk_object_base base;
+   struct val_device *                          device;
+   VkDeviceSize                                 size;
+
+   VkBufferUsageFlags                           usage;
+   VkDeviceSize                                 offset;
+
+   struct pipe_resource *bo;
+   uint64_t total_size;
+};
+
+struct val_buffer_view {
+   struct vk_object_base base;
+   VkFormat format;
+   enum pipe_format pformat;
+   struct val_buffer *buffer;
+   uint32_t offset;
+   uint64_t range;
+};
+
+struct val_query_pool {
+   struct vk_object_base base;
+   VkQueryType type;
+   uint32_t count;
+   enum pipe_query_type base_type;
+   struct pipe_query *queries[0];
+};
+
+struct val_cmd_pool {
+   struct vk_object_base                        base;
+   VkAllocationCallbacks                        alloc;
+   struct list_head                             cmd_buffers;
+   struct list_head                             free_cmd_buffers;
+};
+
+
+enum val_cmd_buffer_status {
+   VAL_CMD_BUFFER_STATUS_INVALID,
+   VAL_CMD_BUFFER_STATUS_INITIAL,
+   VAL_CMD_BUFFER_STATUS_RECORDING,
+   VAL_CMD_BUFFER_STATUS_EXECUTABLE,
+   VAL_CMD_BUFFER_STATUS_PENDING,
+};
+
+struct val_cmd_buffer {
+   struct vk_object_base base;
+
+   struct val_device *                          device;
+
+   VkCommandBufferLevel                         level;
+   enum val_cmd_buffer_status status;
+   struct val_cmd_pool *                        pool;
+   struct list_head                             pool_link;
+
+   struct list_head                             cmds;
+
+   uint8_t push_constants[MAX_PUSH_CONSTANTS_SIZE];
+};
+
+/* in same order and buffer building commands in spec. */
+enum val_cmds {
+   VAL_CMD_BIND_PIPELINE,
+   VAL_CMD_SET_VIEWPORT,
+   VAL_CMD_SET_SCISSOR,
+   VAL_CMD_SET_LINE_WIDTH,
+   VAL_CMD_SET_DEPTH_BIAS,
+   VAL_CMD_SET_BLEND_CONSTANTS,
+   VAL_CMD_SET_DEPTH_BOUNDS,
+   VAL_CMD_SET_STENCIL_COMPARE_MASK,
+   VAL_CMD_SET_STENCIL_WRITE_MASK,
+   VAL_CMD_SET_STENCIL_REFERENCE,
+   VAL_CMD_BIND_DESCRIPTOR_SETS,
+   VAL_CMD_BIND_INDEX_BUFFER,
+   VAL_CMD_BIND_VERTEX_BUFFERS,
+   VAL_CMD_DRAW,
+   VAL_CMD_DRAW_INDEXED,
+   VAL_CMD_DRAW_INDIRECT,
+   VAL_CMD_DRAW_INDEXED_INDIRECT,
+   VAL_CMD_DISPATCH,
+   VAL_CMD_DISPATCH_INDIRECT,
+   VAL_CMD_COPY_BUFFER,
+   VAL_CMD_COPY_IMAGE,
+   VAL_CMD_BLIT_IMAGE,
+   VAL_CMD_COPY_BUFFER_TO_IMAGE,
+   VAL_CMD_COPY_IMAGE_TO_BUFFER,
+   VAL_CMD_UPDATE_BUFFER,
+   VAL_CMD_FILL_BUFFER,
+   VAL_CMD_CLEAR_COLOR_IMAGE,
+   VAL_CMD_CLEAR_DEPTH_STENCIL_IMAGE,
+   VAL_CMD_CLEAR_ATTACHMENTS,
+   VAL_CMD_RESOLVE_IMAGE,
+   VAL_CMD_SET_EVENT,
+   VAL_CMD_RESET_EVENT,
+   VAL_CMD_WAIT_EVENTS,
+   VAL_CMD_PIPELINE_BARRIER,
+   VAL_CMD_BEGIN_QUERY,
+   VAL_CMD_END_QUERY,
+   VAL_CMD_RESET_QUERY_POOL,
+   VAL_CMD_WRITE_TIMESTAMP,
+   VAL_CMD_COPY_QUERY_POOL_RESULTS,
+   VAL_CMD_PUSH_CONSTANTS,
+   VAL_CMD_BEGIN_RENDER_PASS,
+   VAL_CMD_NEXT_SUBPASS,
+   VAL_CMD_END_RENDER_PASS,
+   VAL_CMD_EXECUTE_COMMANDS,
+};
+
+struct val_cmd_bind_pipeline {
+   VkPipelineBindPoint bind_point;
+   struct val_pipeline *pipeline;
+};
+
+struct val_cmd_set_viewport {
+   uint32_t first_viewport;
+   uint32_t viewport_count;
+   VkViewport viewports[16];
+};
+
+struct val_cmd_set_scissor {
+   uint32_t first_scissor;
+   uint32_t scissor_count;
+   VkRect2D scissors[16];
+};
+
+struct val_cmd_set_line_width {
+   float line_width;
+};
+
+struct val_cmd_set_depth_bias {
+   float constant_factor;
+   float clamp;
+   float slope_factor;
+};
+
+struct val_cmd_set_blend_constants {
+   float blend_constants[4];
+};
+
+struct val_cmd_set_depth_bounds {
+   float min_depth;
+   float max_depth;
+};
+
+struct val_cmd_set_stencil_vals {
+   VkStencilFaceFlags face_mask;
+   uint32_t value;
+};
+
+struct val_cmd_bind_descriptor_sets {
+   VkPipelineBindPoint bind_point;
+   struct val_pipeline_layout *layout;
+   uint32_t first;
+   uint32_t count;
+   struct val_descriptor_set **sets;
+   uint32_t dynamic_offset_count;
+   const uint32_t *dynamic_offsets;
+};
+
+struct val_cmd_bind_index_buffer {
+   const struct val_buffer *buffer;
+   VkDeviceSize offset;
+   VkIndexType index_type;
+};
+
+struct val_cmd_bind_vertex_buffers {
+   uint32_t first;
+   uint32_t binding_count;
+   struct val_buffer **buffers;
+   const VkDeviceSize *offsets;
+};
+
+struct val_cmd_draw {
+   uint32_t vertex_count;
+   uint32_t instance_count;
+   uint32_t first_vertex;
+   uint32_t first_instance;
+};
+
+struct val_cmd_draw_indexed {
+   uint32_t index_count;
+   uint32_t instance_count;
+   uint32_t first_index;
+   uint32_t vertex_offset;
+   uint32_t first_instance;
+};
+
+struct val_cmd_draw_indirect {
+   VkDeviceSize offset;
+   struct val_buffer *buffer;
+   uint32_t draw_count;
+   uint32_t stride;
+};
+
+struct val_cmd_dispatch {
+   uint32_t x;
+   uint32_t y;
+   uint32_t z;
+};
+
+struct val_cmd_dispatch_indirect {
+   const struct val_buffer *buffer;
+   VkDeviceSize offset;
+};
+
+struct val_cmd_copy_buffer {
+   struct val_buffer *src;
+   struct val_buffer *dst;
+   uint32_t region_count;
+   const VkBufferCopy *regions;
+};
+
+struct val_cmd_copy_image {
+   struct val_image *src;
+   struct val_image *dst;
+   VkImageLayout src_layout;
+   VkImageLayout dst_layout;
+   uint32_t region_count;
+   const VkImageCopy *regions;
+};
+
+struct val_cmd_blit_image {
+  struct val_image *src;
+  struct val_image *dst;
+  VkImageLayout src_layout;
+  VkImageLayout dst_layout;
+  uint32_t region_count;
+  const VkImageBlit *regions;
+  VkFilter filter;
+};
+
+struct val_cmd_copy_buffer_to_image {
+   struct val_buffer *src;
+   struct val_image *dst;
+   VkImageLayout dst_layout;
+   uint32_t region_count;
+   const VkBufferImageCopy *regions;
+};
+
+struct val_cmd_copy_image_to_buffer {
+   struct val_image *src;
+   struct val_buffer *dst;
+   VkImageLayout src_layout;
+   uint32_t region_count;
+   const VkBufferImageCopy *regions;
+};
+
+struct val_cmd_update_buffer {
+   struct val_buffer *buffer;
+   VkDeviceSize offset;
+   VkDeviceSize data_size;
+   char data[0];
+};
+
+struct val_cmd_fill_buffer {
+   struct val_buffer *buffer;
+   VkDeviceSize offset;
+   VkDeviceSize fill_size;
+   uint32_t data;
+};
+
+struct val_cmd_clear_color_image {
+   struct val_image *image;
+   VkImageLayout layout;
+   VkClearColorValue clear_val;
+   uint32_t range_count;
+   VkImageSubresourceRange *ranges;
+};
+
+struct val_cmd_clear_ds_image {
+   struct val_image *image;
+   VkImageLayout layout;
+   VkClearDepthStencilValue clear_val;
+   uint32_t range_count;
+   VkImageSubresourceRange *ranges;
+};
+
+struct val_cmd_clear_attachments {
+   uint32_t attachment_count;
+   VkClearAttachment *attachments;
+   uint32_t rect_count;
+   VkClearRect *rects;
+};
+
+struct val_cmd_resolve_image {
+   struct val_image *src;
+   struct val_image *dst;
+   VkImageLayout src_layout;
+   VkImageLayout dst_layout;
+   uint32_t region_count;
+   VkImageResolve *regions;
+};
+
+struct val_cmd_event_set {
+   struct val_event *event;
+   bool value;
+   bool flush;
+};
+
+struct val_cmd_wait_events {
+   uint32_t event_count;
+   struct val_event **events;
+   VkPipelineStageFlags src_stage_mask;
+   VkPipelineStageFlags dst_stage_mask;
+   uint32_t memory_barrier_count;
+   VkMemoryBarrier *memory_barriers;
+   uint32_t buffer_memory_barrier_count;
+   VkBufferMemoryBarrier *buffer_memory_barriers;
+   uint32_t image_memory_barrier_count;
+   VkImageMemoryBarrier *image_memory_barriers;
+};
+
+struct val_cmd_pipeline_barrier {
+   VkPipelineStageFlags src_stage_mask;
+   VkPipelineStageFlags dst_stage_mask;
+   bool by_region;
+   uint32_t memory_barrier_count;
+   VkMemoryBarrier *memory_barriers;
+   uint32_t buffer_memory_barrier_count;
+   VkBufferMemoryBarrier *buffer_memory_barriers;
+   uint32_t image_memory_barrier_count;
+   VkImageMemoryBarrier *image_memory_barriers;
+};
+
+struct val_cmd_query_cmd {
+   struct val_query_pool *pool;
+   uint32_t query;
+   uint32_t index;
+   bool precise;
+   bool flush;
+};
+
+struct val_cmd_copy_query_pool_results {
+   struct val_query_pool *pool;
+   uint32_t first_query;
+   uint32_t query_count;
+   struct val_buffer *dst;
+   VkDeviceSize dst_offset;
+   VkDeviceSize stride;
+   VkQueryResultFlags flags;
+};
+
+struct val_cmd_push_constants {
+   VkShaderStageFlags stage;
+   uint32_t offset;
+   uint32_t size;
+   uint32_t val[1];
+};
+
+struct val_attachment_state {
+   VkImageAspectFlags pending_clear_aspects;
+   VkClearValue clear_value;
+};
+
+struct val_cmd_begin_render_pass {
+   struct val_framebuffer *framebuffer;
+   struct val_render_pass *render_pass;
+   VkRect2D render_area;
+   struct val_attachment_state *attachments;
+};
+
+struct val_cmd_next_subpass {
+   VkSubpassContents contents;
+};
+
+struct val_cmd_execute_commands {
+   uint32_t command_buffer_count;
+   struct val_cmd_buffer *cmd_buffers[0];
+};
+
+struct val_cmd_buffer_entry {
+   struct list_head cmd_link;
+   uint32_t cmd_type;
+   union {
+      struct val_cmd_bind_pipeline pipeline;
+      struct val_cmd_set_viewport set_viewport;
+      struct val_cmd_set_scissor set_scissor;
+      struct val_cmd_set_line_width set_line_width;
+      struct val_cmd_set_depth_bias set_depth_bias;
+      struct val_cmd_set_blend_constants set_blend_constants;
+      struct val_cmd_set_depth_bounds set_depth_bounds;
+      struct val_cmd_set_stencil_vals stencil_vals;
+      struct val_cmd_bind_descriptor_sets descriptor_sets;
+      struct val_cmd_bind_vertex_buffers vertex_buffers;
+      struct val_cmd_bind_index_buffer index_buffer;
+      struct val_cmd_draw draw;
+      struct val_cmd_draw_indexed draw_indexed;
+      struct val_cmd_draw_indirect draw_indirect;
+      struct val_cmd_dispatch dispatch;
+      struct val_cmd_dispatch_indirect dispatch_indirect;
+      struct val_cmd_copy_buffer copy_buffer;
+      struct val_cmd_copy_image copy_image;
+      struct val_cmd_blit_image blit_image;
+      struct val_cmd_copy_buffer_to_image buffer_to_img;
+      struct val_cmd_copy_image_to_buffer img_to_buffer;
+      struct val_cmd_update_buffer update_buffer;
+      struct val_cmd_fill_buffer fill_buffer;
+      struct val_cmd_clear_color_image clear_color_image;
+      struct val_cmd_clear_ds_image clear_ds_image;
+      struct val_cmd_clear_attachments clear_attachments;
+      struct val_cmd_resolve_image resolve_image;
+      struct val_cmd_event_set event_set;
+      struct val_cmd_wait_events wait_events;
+      struct val_cmd_pipeline_barrier pipeline_barrier;
+      struct val_cmd_query_cmd query;
+      struct val_cmd_copy_query_pool_results copy_query_pool_results;
+      struct val_cmd_push_constants push_constants;
+      struct val_cmd_begin_render_pass begin_render_pass;
+      struct val_cmd_next_subpass next_subpass;
+      struct val_cmd_execute_commands execute_commands;
+   } u;
+};
+
+VkResult val_execute_cmds(struct val_device *device,
+                          struct val_queue *queue,
+                          struct val_fence *fence,
+                          struct val_cmd_buffer *cmd_buffer);
+
+enum pipe_format vk_format_to_pipe(VkFormat format);
+
+static inline VkImageAspectFlags
+vk_format_aspects(VkFormat format)
+{
+   switch (format) {
+   case VK_FORMAT_UNDEFINED:
+      return 0;
+
+   case VK_FORMAT_S8_UINT:
+      return VK_IMAGE_ASPECT_STENCIL_BIT;
+
+   case VK_FORMAT_D16_UNORM_S8_UINT:
+   case VK_FORMAT_D24_UNORM_S8_UINT:
+   case VK_FORMAT_D32_SFLOAT_S8_UINT:
+      return VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
+
+   case VK_FORMAT_D16_UNORM:
+   case VK_FORMAT_X8_D24_UNORM_PACK32:
+   case VK_FORMAT_D32_SFLOAT:
+      return VK_IMAGE_ASPECT_DEPTH_BIT;
+
+   default:
+      return VK_IMAGE_ASPECT_COLOR_BIT;
+   }
+}
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/src/gallium/frontends/vallium/val_query.c b/src/gallium/frontends/vallium/val_query.c
new file mode 100644 (file)
index 0000000..d0a4859
--- /dev/null
@@ -0,0 +1,136 @@
+/*
+ * Copyright © 2019 Red Hat.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "val_private.h"
+#include "pipe/p_context.h"
+
+VkResult val_CreateQueryPool(
+    VkDevice                                    _device,
+    const VkQueryPoolCreateInfo*                pCreateInfo,
+    const VkAllocationCallbacks*                pAllocator,
+    VkQueryPool*                                pQueryPool)
+{
+   VAL_FROM_HANDLE(val_device, device, _device);
+
+   enum pipe_query_type pipeq;
+   switch (pCreateInfo->queryType) {
+   case VK_QUERY_TYPE_OCCLUSION:
+      pipeq = PIPE_QUERY_OCCLUSION_COUNTER;
+      break;
+   case VK_QUERY_TYPE_TIMESTAMP:
+      pipeq = PIPE_QUERY_TIMESTAMP;
+      break;
+   default:
+      return VK_ERROR_FEATURE_NOT_PRESENT;
+   }
+   struct val_query_pool *pool;
+   uint32_t pool_size = sizeof(*pool) + pCreateInfo->queryCount * sizeof(struct pipe_query *);
+
+   pool = vk_zalloc2(&device->alloc, pAllocator,
+                    pool_size, 8,
+                    VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+   if (!pool)
+      return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
+
+   vk_object_base_init(&device->vk, &pool->base,
+                       VK_OBJECT_TYPE_QUERY_POOL);
+   pool->type = pCreateInfo->queryType;
+   pool->count = pCreateInfo->queryCount;
+   pool->base_type = pipeq;
+
+   *pQueryPool = val_query_pool_to_handle(pool);
+   return VK_SUCCESS;
+}
+
+void val_DestroyQueryPool(
+    VkDevice                                    _device,
+    VkQueryPool                                 _pool,
+    const VkAllocationCallbacks*                pAllocator)
+{
+   VAL_FROM_HANDLE(val_device, device, _device);
+   VAL_FROM_HANDLE(val_query_pool, pool, _pool);
+
+   if (!pool)
+      return;
+
+   for (unsigned i = 0; i < pool->count; i++)
+      if (pool->queries[i])
+         device->queue.ctx->destroy_query(device->queue.ctx, pool->queries[i]);
+   vk_object_base_finish(&pool->base);
+   vk_free2(&device->alloc, pAllocator, pool);
+}
+
+VkResult val_GetQueryPoolResults(
+   VkDevice                                    _device,
+   VkQueryPool                                 queryPool,
+   uint32_t                                    firstQuery,
+   uint32_t                                    queryCount,
+   size_t                                      dataSize,
+   void*                                       pData,
+   VkDeviceSize                                stride,
+   VkQueryResultFlags                          flags)
+{
+   VAL_FROM_HANDLE(val_device, device, _device);
+   VAL_FROM_HANDLE(val_query_pool, pool, queryPool);
+   VkResult vk_result = VK_SUCCESS;
+
+   val_DeviceWaitIdle(_device);
+
+   for (unsigned i = firstQuery; i < firstQuery + queryCount; i++) {
+      uint8_t *dptr = (uint8_t *)((char *)pData + (stride * (i - firstQuery)));
+      union pipe_query_result result;
+      bool ready = false;
+      if (pool->queries[i]) {
+        ready = device->queue.ctx->get_query_result(device->queue.ctx,
+                                                    pool->queries[i],
+                                                    (flags & VK_QUERY_RESULT_WAIT_BIT),
+                                                    &result);
+      } else {
+        result.u64 = 0;
+      }
+
+      if (!ready && !(flags & VK_QUERY_RESULT_PARTIAL_BIT))
+          vk_result = VK_NOT_READY;
+      if (flags & VK_QUERY_RESULT_64_BIT) {
+         if (ready || (flags & VK_QUERY_RESULT_PARTIAL_BIT))
+            *(uint64_t *)dptr = result.u64;
+         dptr += 8;
+      } else {
+         if (ready || (flags & VK_QUERY_RESULT_PARTIAL_BIT)) {
+            if (result.u64 > UINT32_MAX)
+               *(uint32_t *)dptr = UINT32_MAX;
+            else
+               *(uint32_t *)dptr = result.u32;
+         }
+         dptr += 4;
+      }
+
+      if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
+        if (flags & VK_QUERY_RESULT_64_BIT)
+           *(uint64_t *)dptr = ready;
+        else
+           *(uint32_t *)dptr = ready;
+      }
+   }
+   return vk_result;
+}
diff --git a/src/gallium/frontends/vallium/val_util.c b/src/gallium/frontends/vallium/val_util.c
new file mode 100644 (file)
index 0000000..9e92088
--- /dev/null
@@ -0,0 +1,58 @@
+/*
+ * Copyright © 2019 Red Hat.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "val_private.h"
+#include "vk_enum_to_str.h"
+void val_printflike(3, 4)
+__val_finishme(const char *file, int line, const char *format, ...)
+{
+   va_list ap;
+   char buffer[256];
+
+   va_start(ap, format);
+   vsnprintf(buffer, sizeof(buffer), format, ap);
+   va_end(ap);
+
+   fprintf(stderr, "%s:%d: FINISHME: %s\n", file, line, buffer);
+}
+
+VkResult
+__vk_errorf(struct val_instance *instance, VkResult error, const char *file, int line, const char *format, ...)
+{
+   va_list ap;
+   char buffer[256];
+
+   const char *error_str = vk_Result_to_str(error);
+
+   if (format) {
+      va_start(ap, format);
+      vsnprintf(buffer, sizeof(buffer), format, ap);
+      va_end(ap);
+
+      fprintf(stderr, "%s:%d: %s (%s)\n", file, line, buffer, error_str);
+   } else {
+      fprintf(stderr, "%s:%d: %s\n", file, line, error_str);
+   }
+
+   return error;
+}
diff --git a/src/gallium/frontends/vallium/val_wsi.c b/src/gallium/frontends/vallium/val_wsi.c
new file mode 100644 (file)
index 0000000..aa56654
--- /dev/null
@@ -0,0 +1,277 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "val_wsi.h"
+
+static PFN_vkVoidFunction
+val_wsi_proc_addr(VkPhysicalDevice physicalDevice, const char *pName)
+{
+   return val_lookup_entrypoint(pName);
+}
+
+VkResult
+val_init_wsi(struct val_physical_device *physical_device)
+{
+   return wsi_device_init(&physical_device->wsi_device,
+                          val_physical_device_to_handle(physical_device),
+                          val_wsi_proc_addr,
+                          &physical_device->instance->alloc,
+                          -1, NULL, true);
+}
+
+void
+val_finish_wsi(struct val_physical_device *physical_device)
+{
+   wsi_device_finish(&physical_device->wsi_device,
+                     &physical_device->instance->alloc);
+}
+
+void val_DestroySurfaceKHR(
+   VkInstance                                   _instance,
+   VkSurfaceKHR                                 _surface,
+   const VkAllocationCallbacks*                 pAllocator)
+{
+   VAL_FROM_HANDLE(val_instance, instance, _instance);
+   ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
+
+   vk_free2(&instance->alloc, pAllocator, surface);
+}
+
+VkResult val_GetPhysicalDeviceSurfaceSupportKHR(
+   VkPhysicalDevice                            physicalDevice,
+   uint32_t                                    queueFamilyIndex,
+   VkSurfaceKHR                                surface,
+   VkBool32*                                   pSupported)
+{
+   VAL_FROM_HANDLE(val_physical_device, device, physicalDevice);
+
+   return wsi_common_get_surface_support(&device->wsi_device,
+                                         queueFamilyIndex,
+                                         surface,
+                                         pSupported);
+}
+
+VkResult val_GetPhysicalDeviceSurfaceCapabilitiesKHR(
+   VkPhysicalDevice                            physicalDevice,
+   VkSurfaceKHR                                surface,
+   VkSurfaceCapabilitiesKHR*                   pSurfaceCapabilities)
+{
+   VAL_FROM_HANDLE(val_physical_device, device, physicalDevice);
+
+   return wsi_common_get_surface_capabilities(&device->wsi_device,
+                                              surface,
+                                              pSurfaceCapabilities);
+}
+
+VkResult val_GetPhysicalDeviceSurfaceCapabilities2KHR(
+   VkPhysicalDevice                            physicalDevice,
+   const VkPhysicalDeviceSurfaceInfo2KHR*      pSurfaceInfo,
+   VkSurfaceCapabilities2KHR*                  pSurfaceCapabilities)
+{
+   VAL_FROM_HANDLE(val_physical_device, device, physicalDevice);
+
+   return wsi_common_get_surface_capabilities2(&device->wsi_device,
+                                               pSurfaceInfo,
+                                               pSurfaceCapabilities);
+}
+
+VkResult val_GetPhysicalDeviceSurfaceCapabilities2EXT(
+   VkPhysicalDevice                            physicalDevice,
+   VkSurfaceKHR                                surface,
+   VkSurfaceCapabilities2EXT*                  pSurfaceCapabilities)
+{
+   VAL_FROM_HANDLE(val_physical_device, device, physicalDevice);
+
+   return wsi_common_get_surface_capabilities2ext(&device->wsi_device,
+                                                  surface,
+                                                  pSurfaceCapabilities);
+}
+
+VkResult val_GetPhysicalDeviceSurfaceFormatsKHR(
+   VkPhysicalDevice                            physicalDevice,
+   VkSurfaceKHR                                surface,
+   uint32_t*                                   pSurfaceFormatCount,
+   VkSurfaceFormatKHR*                         pSurfaceFormats)
+{
+   VAL_FROM_HANDLE(val_physical_device, device, physicalDevice);
+   return wsi_common_get_surface_formats(&device->wsi_device,
+                                         surface,
+                                         pSurfaceFormatCount,
+                                         pSurfaceFormats);
+}
+
+VkResult val_GetPhysicalDeviceSurfacePresentModesKHR(
+   VkPhysicalDevice                            physicalDevice,
+   VkSurfaceKHR                                surface,
+   uint32_t*                                   pPresentModeCount,
+   VkPresentModeKHR*                           pPresentModes)
+{
+   VAL_FROM_HANDLE(val_physical_device, device, physicalDevice);
+
+   return wsi_common_get_surface_present_modes(&device->wsi_device,
+                                               surface,
+                                               pPresentModeCount,
+                                               pPresentModes);
+}
+
+VkResult val_CreateSwapchainKHR(
+   VkDevice                                     _device,
+   const VkSwapchainCreateInfoKHR*              pCreateInfo,
+   const VkAllocationCallbacks*                 pAllocator,
+   VkSwapchainKHR*                              pSwapchain)
+{
+   VAL_FROM_HANDLE(val_device, device, _device);
+   const VkAllocationCallbacks *alloc;
+   if (pAllocator)
+      alloc = pAllocator;
+   else
+      alloc = &device->alloc;
+
+   return wsi_common_create_swapchain(&device->physical_device->wsi_device,
+                                      val_device_to_handle(device),
+                                      pCreateInfo,
+                                      alloc,
+                                      pSwapchain);
+}
+
+void val_DestroySwapchainKHR(
+   VkDevice                                     _device,
+   VkSwapchainKHR                               swapchain,
+   const VkAllocationCallbacks*                 pAllocator)
+{
+   VAL_FROM_HANDLE(val_device, device, _device);
+   const VkAllocationCallbacks *alloc;
+
+   if (pAllocator)
+      alloc = pAllocator;
+   else
+      alloc = &device->alloc;
+
+   wsi_common_destroy_swapchain(_device, swapchain, alloc);
+}
+
+VkResult val_GetSwapchainImagesKHR(
+   VkDevice                                     device,
+   VkSwapchainKHR                               swapchain,
+   uint32_t*                                    pSwapchainImageCount,
+   VkImage*                                     pSwapchainImages)
+{
+   return wsi_common_get_images(swapchain,
+                                pSwapchainImageCount,
+                                pSwapchainImages);
+}
+
+VkResult val_AcquireNextImageKHR(
+   VkDevice                                     device,
+   VkSwapchainKHR                               swapchain,
+   uint64_t                                     timeout,
+   VkSemaphore                                  semaphore,
+   VkFence                                      fence,
+   uint32_t*                                    pImageIndex)
+{
+   VkAcquireNextImageInfoKHR acquire_info = {
+      .sType = VK_STRUCTURE_TYPE_ACQUIRE_NEXT_IMAGE_INFO_KHR,
+      .swapchain = swapchain,
+      .timeout = timeout,
+      .semaphore = semaphore,
+      .fence = fence,
+      .deviceMask = 0,
+   };
+
+   return val_AcquireNextImage2KHR(device, &acquire_info, pImageIndex);
+}
+
+VkResult val_AcquireNextImage2KHR(
+   VkDevice                                     _device,
+   const VkAcquireNextImageInfoKHR*             pAcquireInfo,
+   uint32_t*                                    pImageIndex)
+{
+   VAL_FROM_HANDLE(val_device, device, _device);
+   struct val_physical_device *pdevice = device->physical_device;
+
+   VkResult result = wsi_common_acquire_next_image2(&pdevice->wsi_device,
+                                                    _device,
+                                                    pAcquireInfo,
+                                                    pImageIndex);
+#if 0
+   VAL_FROM_HANDLE(val_fence, fence, pAcquireInfo->fence);
+
+   if (fence && (result == VK_SUCCESS || result == VK_SUBOPTIMAL_KHR)) {
+      if (fence->fence)
+         device->ws->signal_fence(fence->fence);
+      if (fence->temp_syncobj) {
+         device->ws->signal_syncobj(device->ws, fence->temp_syncobj);
+      } else if (fence->syncobj) {
+         device->ws->signal_syncobj(device->ws, fence->syncobj);
+      }
+   }
+#endif
+   return result;
+}
+
+VkResult val_QueuePresentKHR(
+   VkQueue                                  _queue,
+   const VkPresentInfoKHR*                  pPresentInfo)
+{
+   VAL_FROM_HANDLE(val_queue, queue, _queue);
+   return wsi_common_queue_present(&queue->device->physical_device->wsi_device,
+                                   val_device_to_handle(queue->device),
+                                   _queue, 0,
+                                   pPresentInfo);
+}
+
+
+VkResult val_GetDeviceGroupPresentCapabilitiesKHR(
+   VkDevice                                    device,
+   VkDeviceGroupPresentCapabilitiesKHR*        pCapabilities)
+{
+   memset(pCapabilities->presentMask, 0,
+          sizeof(pCapabilities->presentMask));
+   pCapabilities->presentMask[0] = 0x1;
+   pCapabilities->modes = VK_DEVICE_GROUP_PRESENT_MODE_LOCAL_BIT_KHR;
+
+   return VK_SUCCESS;
+}
+
+VkResult val_GetDeviceGroupSurfacePresentModesKHR(
+   VkDevice                                    device,
+   VkSurfaceKHR                                surface,
+   VkDeviceGroupPresentModeFlagsKHR*           pModes)
+{
+   *pModes = VK_DEVICE_GROUP_PRESENT_MODE_LOCAL_BIT_KHR;
+
+   return VK_SUCCESS;
+}
+
+VkResult val_GetPhysicalDevicePresentRectanglesKHR(
+   VkPhysicalDevice                            physicalDevice,
+   VkSurfaceKHR                                surface,
+   uint32_t*                                   pRectCount,
+   VkRect2D*                                   pRects)
+{
+   VAL_FROM_HANDLE(val_physical_device, device, physicalDevice);
+
+   return wsi_common_get_present_rectangles(&device->wsi_device,
+                                            surface,
+                                            pRectCount, pRects);
+}
diff --git a/src/gallium/frontends/vallium/val_wsi.h b/src/gallium/frontends/vallium/val_wsi.h
new file mode 100644 (file)
index 0000000..26fceb0
--- /dev/null
@@ -0,0 +1,74 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#pragma once
+
+#include "val_private.h"
+
+struct val_swapchain;
+
+struct val_wsi_interface {
+   VkResult (*get_support)(VkIcdSurfaceBase *surface,
+                           struct val_physical_device *device,
+                           uint32_t queueFamilyIndex,
+                           VkBool32* pSupported);
+   VkResult (*get_capabilities)(VkIcdSurfaceBase *surface,
+                                struct val_physical_device *device,
+                                VkSurfaceCapabilitiesKHR* pSurfaceCapabilities);
+   VkResult (*get_formats)(VkIcdSurfaceBase *surface,
+                           struct val_physical_device *device,
+                           uint32_t* pSurfaceFormatCount,
+                           VkSurfaceFormatKHR* pSurfaceFormats);
+   VkResult (*get_present_modes)(VkIcdSurfaceBase *surface,
+                                 struct val_physical_device *device,
+                                 uint32_t* pPresentModeCount,
+                                 VkPresentModeKHR* pPresentModes);
+   VkResult (*create_swapchain)(VkIcdSurfaceBase *surface,
+                                struct val_device *device,
+                                const VkSwapchainCreateInfoKHR* pCreateInfo,
+                                const VkAllocationCallbacks* pAllocator,
+                                struct val_swapchain **swapchain);
+};
+
+struct val_swapchain {
+   struct val_device *device;
+
+   VkResult (*destroy)(struct val_swapchain *swapchain,
+                       const VkAllocationCallbacks *pAllocator);
+   VkResult (*get_images)(struct val_swapchain *swapchain,
+                          uint32_t *pCount, VkImage *pSwapchainImages);
+   VkResult (*acquire_next_image)(struct val_swapchain *swap_chain,
+                                  uint64_t timeout, VkSemaphore semaphore,
+                                  uint32_t *image_index);
+   VkResult (*queue_present)(struct val_swapchain *swap_chain,
+                             struct val_queue *queue,
+                             uint32_t image_index);
+};
+
+VAL_DEFINE_NONDISP_HANDLE_CASTS(_VkIcdSurfaceBase, VkSurfaceKHR)
+VAL_DEFINE_NONDISP_HANDLE_CASTS(val_swapchain, VkSwapchainKHR)
+
+VkResult val_x11_init_wsi(struct val_instance *instance);
+void val_x11_finish_wsi(struct val_instance *instance);
+VkResult val_wl_init_wsi(struct val_instance *instance);
+void val_wl_finish_wsi(struct val_instance *instance);
diff --git a/src/gallium/frontends/vallium/val_wsi_wayland.c b/src/gallium/frontends/vallium/val_wsi_wayland.c
new file mode 100644 (file)
index 0000000..2fb14a6
--- /dev/null
@@ -0,0 +1,55 @@
+/*
+ * Copyright © 2016 Red Hat
+ * based on intel anv code:
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "wsi_common_wayland.h"
+#include "val_private.h"
+
+VkBool32 val_GetPhysicalDeviceWaylandPresentationSupportKHR(
+    VkPhysicalDevice                            physicalDevice,
+    uint32_t                                    queueFamilyIndex,
+    struct wl_display*                          display)
+{
+   VAL_FROM_HANDLE(val_physical_device, physical_device, physicalDevice);
+
+   return wsi_wl_get_presentation_support(&physical_device->wsi_device, display);
+}
+
+VkResult val_CreateWaylandSurfaceKHR(
+    VkInstance                                  _instance,
+    const VkWaylandSurfaceCreateInfoKHR*        pCreateInfo,
+    const VkAllocationCallbacks*                pAllocator,
+    VkSurfaceKHR*                               pSurface)
+{
+   VAL_FROM_HANDLE(val_instance, instance, _instance);
+   const VkAllocationCallbacks *alloc;
+   assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_WAYLAND_SURFACE_CREATE_INFO_KHR);
+
+   if (pAllocator)
+      alloc = pAllocator;
+   else
+      alloc = &instance->alloc;
+
+   return wsi_create_wl_surface(alloc, pCreateInfo, pSurface);
+}
diff --git a/src/gallium/frontends/vallium/val_wsi_x11.c b/src/gallium/frontends/vallium/val_wsi_x11.c
new file mode 100644 (file)
index 0000000..26a3203
--- /dev/null
@@ -0,0 +1,93 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <X11/Xlib-xcb.h>
+#include <xcb/xcb.h>
+
+#include "wsi_common_x11.h"
+#include "val_private.h"
+
+VkBool32 val_GetPhysicalDeviceXcbPresentationSupportKHR(
+    VkPhysicalDevice                            physicalDevice,
+    uint32_t                                    queueFamilyIndex,
+    xcb_connection_t*                           connection,
+    xcb_visualid_t                              visual_id)
+{
+   VAL_FROM_HANDLE(val_physical_device, device, physicalDevice);
+
+   return wsi_get_physical_device_xcb_presentation_support(
+      &device->wsi_device,
+      queueFamilyIndex,
+      connection, visual_id);
+}
+
+VkBool32 val_GetPhysicalDeviceXlibPresentationSupportKHR(
+    VkPhysicalDevice                            physicalDevice,
+    uint32_t                                    queueFamilyIndex,
+    Display*                                    dpy,
+    VisualID                                    visualID)
+{
+   VAL_FROM_HANDLE(val_physical_device, device, physicalDevice);
+
+   return wsi_get_physical_device_xcb_presentation_support(
+      &device->wsi_device,
+      queueFamilyIndex,
+      XGetXCBConnection(dpy), visualID);
+}
+
+VkResult val_CreateXcbSurfaceKHR(
+    VkInstance                                  _instance,
+    const VkXcbSurfaceCreateInfoKHR*            pCreateInfo,
+    const VkAllocationCallbacks*                pAllocator,
+    VkSurfaceKHR*                               pSurface)
+{
+   VAL_FROM_HANDLE(val_instance, instance, _instance);
+   const VkAllocationCallbacks *alloc;
+   assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR);
+
+   if (pAllocator)
+     alloc = pAllocator;
+   else
+     alloc = &instance->alloc;
+
+   return wsi_create_xcb_surface(alloc, pCreateInfo, pSurface);
+}
+
+VkResult val_CreateXlibSurfaceKHR(
+    VkInstance                                  _instance,
+    const VkXlibSurfaceCreateInfoKHR*           pCreateInfo,
+    const VkAllocationCallbacks*                pAllocator,
+    VkSurfaceKHR*                               pSurface)
+{
+   VAL_FROM_HANDLE(val_instance, instance, _instance);
+   const VkAllocationCallbacks *alloc;
+
+   assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR);
+
+   if (pAllocator)
+     alloc = pAllocator;
+   else
+     alloc = &instance->alloc;
+
+   return wsi_create_xlib_surface(alloc, pCreateInfo, pSurface);
+}
index ba26a50bcedb557720569dece4193662b41a16be..1681fcd07343a488b6901392c41850ff82f17ea7 100644 (file)
@@ -225,3 +225,7 @@ if with_tests
   endif
   subdir('tests')
 endif
+if with_swrast_vk
+  subdir('frontends/vallium')
+  subdir('targets/vallium')
+endif
diff --git a/src/gallium/targets/vallium/meson.build b/src/gallium/targets/vallium/meson.build
new file mode 100644 (file)
index 0000000..38efd26
--- /dev/null
@@ -0,0 +1,27 @@
+libvulkan_val = shared_library(
+  'vulkan_val',
+  [ 'target.c' ],
+  include_directories : [ inc_src, inc_util, inc_include, inc_gallium, inc_gallium_aux, inc_gallium_winsys, inc_gallium_drivers ],
+  link_whole : [ libvallium_st ],
+  link_with : [libpipe_loader_static, libmegadriver_stub, libdri, libdricommon ,libgallium, libwsw, libswdri, libws_null, libswkmsdri ],
+  gnu_symbol_visibility : 'hidden',
+  link_args : [ld_args_bsymbolic, ld_args_gc_sections],
+  dependencies : driver_swrast,
+  install : true,
+  name_suffix : 'so',
+)
+
+val_icd = custom_target(
+  'val_icd',
+  input : 'val_icd.py',
+  output : 'val_icd.@0@.json'.format(host_machine.cpu()),
+  command : [
+    prog_python, '@INPUT@',
+    '--lib-path', join_paths(get_option('prefix'), get_option('libdir')),
+    '--out', '@OUTPUT@',
+  ],
+  depend_files : files('../../frontends/vallium/val_extensions.py'),
+  build_by_default : true,
+  install_dir : with_vulkan_icd_dir,
+  install : true,
+)
diff --git a/src/gallium/targets/vallium/target.c b/src/gallium/targets/vallium/target.c
new file mode 100644 (file)
index 0000000..7ca1185
--- /dev/null
@@ -0,0 +1,3 @@
+#include "target-helpers/drm_helper.h"
+#include "target-helpers/sw_helper.h"
+
diff --git a/src/gallium/targets/vallium/val_icd.py b/src/gallium/targets/vallium/val_icd.py
new file mode 100644 (file)
index 0000000..80c7d33
--- /dev/null
@@ -0,0 +1,47 @@
+# Copyright 2017 Intel Corporation
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish,
+# distribute, sub license, and/or sell copies of the Software, and to
+# permit persons to whom the Software is furnished to do so, subject to
+# the following conditions:
+#
+# The above copyright notice and this permission notice (including the
+# next paragraph) shall be included in all copies or substantial portions
+# of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+# IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+# ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+import json
+import os.path
+
+import argparse
+
+if __name__ == '__main__':
+    parser = argparse.ArgumentParser()
+    parser.add_argument('--out', help='Output json file.', required=True)
+    parser.add_argument('--lib-path', help='Path to libvulkan_val.so')
+    args = parser.parse_args()
+
+    path = 'libvulkan_val.so'
+    if args.lib_path:
+        path = os.path.join(args.lib_path, path)
+
+    json_data = {
+        'file_format_version': '1.0.0',
+        'ICD': {
+            'library_path': path,
+            'api_version': str('1.1.107'),
+        },
+    }
+
+    with open(args.out, 'w') as f:
+        json.dump(json_data, f, indent = 4, sort_keys=True, separators=(',', ': '))