anv: add new gem/drm helpers
[mesa.git] / src / intel / vulkan / gen7_cmd_buffer.c
index 5130a40d2777491078267ad45fb7701a75f26f3f..bfe9a3930594ba0647da86f3b9ff3e81078c5262 100644 (file)
 #include <fcntl.h>
 
 #include "anv_private.h"
+#include "vk_format_info.h"
 
 #include "genxml/gen_macros.h"
 #include "genxml/genX_pack.h"
 
 #if GEN_GEN == 7 && !GEN_IS_HASWELL
-void
-gen7_cmd_buffer_emit_descriptor_pointers(struct anv_cmd_buffer *cmd_buffer,
-                                         uint32_t stages)
-{
-   static const uint32_t sampler_state_opcodes[] = {
-      [MESA_SHADER_VERTEX]                      = 43,
-      [MESA_SHADER_TESS_CTRL]                   = 44, /* HS */
-      [MESA_SHADER_TESS_EVAL]                   = 45, /* DS */
-      [MESA_SHADER_GEOMETRY]                    = 46,
-      [MESA_SHADER_FRAGMENT]                    = 47,
-      [MESA_SHADER_COMPUTE]                     = 0,
-   };
-
-   static const uint32_t binding_table_opcodes[] = {
-      [MESA_SHADER_VERTEX]                      = 38,
-      [MESA_SHADER_TESS_CTRL]                   = 39,
-      [MESA_SHADER_TESS_EVAL]                   = 40,
-      [MESA_SHADER_GEOMETRY]                    = 41,
-      [MESA_SHADER_FRAGMENT]                    = 42,
-      [MESA_SHADER_COMPUTE]                     = 0,
-   };
-
-   anv_foreach_stage(s, stages) {
-      if (cmd_buffer->state.samplers[s].alloc_size > 0) {
-         anv_batch_emit(&cmd_buffer->batch,
-                        GENX(3DSTATE_SAMPLER_STATE_POINTERS_VS),
-                        ._3DCommandSubOpcode  = sampler_state_opcodes[s],
-                        .PointertoVSSamplerState = cmd_buffer->state.samplers[s].offset);
-      }
-
-      /* Always emit binding table pointers if we're asked to, since on SKL
-       * this is what flushes push constants. */
-      anv_batch_emit(&cmd_buffer->batch,
-                     GENX(3DSTATE_BINDING_TABLE_POINTERS_VS),
-                     ._3DCommandSubOpcode  = binding_table_opcodes[s],
-                     .PointertoVSBindingTable = cmd_buffer->state.binding_tables[s].offset);
-   }
-}
-
-uint32_t
-gen7_cmd_buffer_flush_descriptor_sets(struct anv_cmd_buffer *cmd_buffer)
-{
-   VkShaderStageFlags dirty = cmd_buffer->state.descriptors_dirty &
-                              cmd_buffer->state.pipeline->active_stages;
-
-   VkResult result = VK_SUCCESS;
-   anv_foreach_stage(s, dirty) {
-      result = anv_cmd_buffer_emit_samplers(cmd_buffer, s,
-                                            &cmd_buffer->state.samplers[s]);
-      if (result != VK_SUCCESS)
-         break;
-      result = anv_cmd_buffer_emit_binding_table(cmd_buffer, s,
-                                                 &cmd_buffer->state.binding_tables[s]);
-      if (result != VK_SUCCESS)
-         break;
-   }
-
-   if (result != VK_SUCCESS) {
-      assert(result == VK_ERROR_OUT_OF_DEVICE_MEMORY);
-
-      result = anv_cmd_buffer_new_binding_table_block(cmd_buffer);
-      assert(result == VK_SUCCESS);
-
-      /* Re-emit state base addresses so we get the new surface state base
-       * address before we start emitting binding tables etc.
-       */
-      anv_cmd_buffer_emit_state_base_address(cmd_buffer);
-
-      /* Re-emit all active binding tables */
-      dirty |= cmd_buffer->state.pipeline->active_stages;
-      anv_foreach_stage(s, dirty) {
-         result = anv_cmd_buffer_emit_samplers(cmd_buffer, s,
-                                               &cmd_buffer->state.samplers[s]);
-         if (result != VK_SUCCESS)
-            return result;
-         result = anv_cmd_buffer_emit_binding_table(cmd_buffer, s,
-                                                    &cmd_buffer->state.binding_tables[s]);
-         if (result != VK_SUCCESS)
-            return result;
-      }
-   }
-
-   cmd_buffer->state.descriptors_dirty &= ~dirty;
-
-   return dirty;
-}
-#endif /* GEN_GEN == 7 && !GEN_IS_HASWELL */
-
-static inline int64_t
+static int64_t
 clamp_int64(int64_t x, int64_t min, int64_t max)
 {
    if (x < min)
@@ -132,12 +45,12 @@ clamp_int64(int64_t x, int64_t min, int64_t max)
       return max;
 }
 
-#if GEN_GEN == 7 && !GEN_IS_HASWELL
 void
 gen7_cmd_buffer_emit_scissor(struct anv_cmd_buffer *cmd_buffer)
 {
-   uint32_t count = cmd_buffer->state.dynamic.scissor.count;
-   const VkRect2D *scissors =  cmd_buffer->state.dynamic.scissor.scissors;
+   struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
+   uint32_t count = cmd_buffer->state.gfx.dynamic.scissor.count;
+   const VkRect2D *scissors = cmd_buffer->state.gfx.dynamic.scissor.scissors;
    struct anv_state scissor_state =
       anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, count * 8, 32);
 
@@ -157,12 +70,36 @@ gen7_cmd_buffer_emit_scissor(struct anv_cmd_buffer *cmd_buffer)
       };
 
       const int max = 0xffff;
+
+      uint32_t y_min = s->offset.y;
+      uint32_t x_min = s->offset.x;
+      uint32_t y_max = s->offset.y + s->extent.height - 1;
+      uint32_t x_max = s->offset.x + s->extent.width - 1;
+
+      /* Do this math using int64_t so overflow gets clamped correctly. */
+      if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
+         y_min = clamp_int64((uint64_t) y_min,
+                             cmd_buffer->state.render_area.offset.y, max);
+         x_min = clamp_int64((uint64_t) x_min,
+                             cmd_buffer->state.render_area.offset.x, max);
+         y_max = clamp_int64((uint64_t) y_max, 0,
+                             cmd_buffer->state.render_area.offset.y +
+                             cmd_buffer->state.render_area.extent.height - 1);
+         x_max = clamp_int64((uint64_t) x_max, 0,
+                             cmd_buffer->state.render_area.offset.x +
+                             cmd_buffer->state.render_area.extent.width - 1);
+      } else if (fb) {
+         y_min = clamp_int64((uint64_t) y_min, 0, max);
+         x_min = clamp_int64((uint64_t) x_min, 0, max);
+         y_max = clamp_int64((uint64_t) y_max, 0, fb->height - 1);
+         x_max = clamp_int64((uint64_t) x_max, 0, fb->width - 1);
+      }
+
       struct GEN7_SCISSOR_RECT scissor = {
-         /* Do this math using int64_t so overflow gets clamped correctly. */
-         .ScissorRectangleYMin = clamp_int64(s->offset.y, 0, max),
-         .ScissorRectangleXMin = clamp_int64(s->offset.x, 0, max),
-         .ScissorRectangleYMax = clamp_int64((uint64_t) s->offset.y + s->extent.height - 1, 0, max),
-         .ScissorRectangleXMax = clamp_int64((uint64_t) s->offset.x + s->extent.width - 1, 0, max)
+         .ScissorRectangleYMin = y_min,
+         .ScissorRectangleXMin = x_min,
+         .ScissorRectangleYMax = y_max,
+         .ScissorRectangleXMax = x_max
       };
 
       if (s->extent.width <= 0 || s->extent.height <= 0) {
@@ -173,23 +110,40 @@ gen7_cmd_buffer_emit_scissor(struct anv_cmd_buffer *cmd_buffer)
       }
    }
 
-   anv_batch_emit(&cmd_buffer->batch, GEN7_3DSTATE_SCISSOR_STATE_POINTERS,
-                  .ScissorRectPointer = scissor_state.offset);
-
-   if (!cmd_buffer->device->info.has_llc)
-      anv_state_clflush(scissor_state);
+   anv_batch_emit(&cmd_buffer->batch,
+                  GEN7_3DSTATE_SCISSOR_STATE_POINTERS, ssp) {
+      ssp.ScissorRectPointer = scissor_state.offset;
+   }
 }
 #endif
 
-static const uint32_t vk_to_gen_index_type[] = {
-   [VK_INDEX_TYPE_UINT16]                       = INDEX_WORD,
-   [VK_INDEX_TYPE_UINT32]                       = INDEX_DWORD,
-};
+static uint32_t vk_to_gen_index_type(VkIndexType type)
+{
+   switch (type) {
+   case VK_INDEX_TYPE_UINT8_EXT:
+      return INDEX_BYTE;
+   case VK_INDEX_TYPE_UINT16:
+      return INDEX_WORD;
+   case VK_INDEX_TYPE_UINT32:
+      return INDEX_DWORD;
+   default:
+      unreachable("invalid index type");
+   }
+}
 
-static const uint32_t restart_index_for_type[] = {
-   [VK_INDEX_TYPE_UINT16]                    = UINT16_MAX,
-   [VK_INDEX_TYPE_UINT32]                    = UINT32_MAX,
-};
+static uint32_t restart_index_for_type(VkIndexType type)
+{
+   switch (type) {
+   case VK_INDEX_TYPE_UINT8_EXT:
+      return UINT8_MAX;
+   case VK_INDEX_TYPE_UINT16:
+      return UINT16_MAX;
+   case VK_INDEX_TYPE_UINT32:
+      return UINT32_MAX;
+   default:
+      unreachable("invalid index type");
+   }
+}
 
 void genX(CmdBindIndexBuffer)(
     VkCommandBuffer                             commandBuffer,
@@ -200,261 +154,112 @@ void genX(CmdBindIndexBuffer)(
    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
    ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
 
-   cmd_buffer->state.dirty |= ANV_CMD_DIRTY_INDEX_BUFFER;
+   cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_INDEX_BUFFER;
    if (GEN_IS_HASWELL)
-      cmd_buffer->state.restart_index = restart_index_for_type[indexType];
-   cmd_buffer->state.gen7.index_buffer = buffer;
-   cmd_buffer->state.gen7.index_type = vk_to_gen_index_type[indexType];
-   cmd_buffer->state.gen7.index_offset = offset;
-}
-
-static VkResult
-flush_compute_descriptor_set(struct anv_cmd_buffer *cmd_buffer)
-{
-   struct anv_device *device = cmd_buffer->device;
-   struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
-   struct anv_state surfaces = { 0, }, samplers = { 0, };
-   VkResult result;
-
-   result = anv_cmd_buffer_emit_samplers(cmd_buffer,
-                                         MESA_SHADER_COMPUTE, &samplers);
-   if (result != VK_SUCCESS)
-      return result;
-   result = anv_cmd_buffer_emit_binding_table(cmd_buffer,
-                                              MESA_SHADER_COMPUTE, &surfaces);
-   if (result != VK_SUCCESS)
-      return result;
-
-   struct anv_state push_state = anv_cmd_buffer_cs_push_constants(cmd_buffer);
-
-   const struct brw_cs_prog_data *cs_prog_data = get_cs_prog_data(pipeline);
-   const struct brw_stage_prog_data *prog_data = &cs_prog_data->base;
-
-   unsigned local_id_dwords = cs_prog_data->local_invocation_id_regs * 8;
-   unsigned push_constant_data_size =
-      (prog_data->nr_params + local_id_dwords) * 4;
-   unsigned reg_aligned_constant_size = ALIGN(push_constant_data_size, 32);
-   unsigned push_constant_regs = reg_aligned_constant_size / 32;
-
-   if (push_state.alloc_size) {
-      anv_batch_emit(&cmd_buffer->batch, GENX(MEDIA_CURBE_LOAD),
-                     .CURBETotalDataLength = push_state.alloc_size,
-                     .CURBEDataStartAddress = push_state.offset);
-   }
-
-   assert(prog_data->total_shared <= 64 * 1024);
-   uint32_t slm_size = 0;
-   if (prog_data->total_shared > 0) {
-      /* slm_size is in 4k increments, but must be a power of 2. */
-      slm_size = 4 * 1024;
-      while (slm_size < prog_data->total_shared)
-         slm_size <<= 1;
-      slm_size /= 4 * 1024;
-   }
-
-   struct anv_state state =
-      anv_state_pool_emit(&device->dynamic_state_pool,
-                          GENX(INTERFACE_DESCRIPTOR_DATA), 64,
-                          .KernelStartPointer = pipeline->cs_simd,
-                          .BindingTablePointer = surfaces.offset,
-                          .SamplerStatePointer = samplers.offset,
-                          .ConstantURBEntryReadLength =
-                             push_constant_regs,
-#if !GEN_IS_HASWELL
-                          .ConstantURBEntryReadOffset = 0,
-#endif
-                          .BarrierEnable = cs_prog_data->uses_barrier,
-                          .SharedLocalMemorySize = slm_size,
-                          .NumberofThreadsinGPGPUThreadGroup =
-                             pipeline->cs_thread_width_max);
-
-   const uint32_t size = GENX(INTERFACE_DESCRIPTOR_DATA_length) * sizeof(uint32_t);
-   anv_batch_emit(&cmd_buffer->batch, GENX(MEDIA_INTERFACE_DESCRIPTOR_LOAD),
-                  .InterfaceDescriptorTotalLength = size,
-                  .InterfaceDescriptorDataStartAddress = state.offset);
-
-   return VK_SUCCESS;
+      cmd_buffer->state.restart_index = restart_index_for_type(indexType);
+   cmd_buffer->state.gfx.gen7.index_buffer = buffer;
+   cmd_buffer->state.gfx.gen7.index_type = vk_to_gen_index_type(indexType);
+   cmd_buffer->state.gfx.gen7.index_offset = offset;
 }
 
-#define emit_lri(batch, reg, imm)                       \
-   anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_IMM),    \
-                  .RegisterOffset = __anv_reg_num(reg), \
-                  .DataDWord = imm)
-
-void
-genX(cmd_buffer_config_l3)(struct anv_cmd_buffer *cmd_buffer, bool enable_slm)
+static uint32_t
+get_depth_format(struct anv_cmd_buffer *cmd_buffer)
 {
-   /* References for GL state:
-    *
-    * - commits e307cfa..228d5a3
-    * - src/mesa/drivers/dri/i965/gen7_l3_state.c
-    */
-
-   uint32_t l3cr2_slm, l3cr2_noslm;
-   anv_pack_struct(&l3cr2_noslm, GENX(L3CNTLREG2),
-                   .URBAllocation = 24,
-                   .ROAllocation = 0,
-                   .DCAllocation = 16);
-   anv_pack_struct(&l3cr2_slm, GENX(L3CNTLREG2),
-                   .SLMEnable = 1,
-                   .URBAllocation = 16,
-                   .URBLowBandwidth = 1,
-                   .ROAllocation = 0,
-                   .DCAllocation = 8);
-   const uint32_t l3cr2_val = enable_slm ? l3cr2_slm : l3cr2_noslm;
-   bool changed = cmd_buffer->state.current_l3_config != l3cr2_val;
-
-   if (changed) {
-      /* According to the hardware docs, the L3 partitioning can only be
-       * changed while the pipeline is completely drained and the caches are
-       * flushed, which involves a first PIPE_CONTROL flush which stalls the
-       * pipeline...
-       */
-      anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL),
-                     .DCFlushEnable = true,
-                     .PostSyncOperation = NoWrite,
-                     .CommandStreamerStallEnable = true);
-
-      /* ...followed by a second pipelined PIPE_CONTROL that initiates
-       * invalidation of the relevant caches. Note that because RO
-       * invalidation happens at the top of the pipeline (i.e. right away as
-       * the PIPE_CONTROL command is processed by the CS) we cannot combine it
-       * with the previous stalling flush as the hardware documentation
-       * suggests, because that would cause the CS to stall on previous
-       * rendering *after* RO invalidation and wouldn't prevent the RO caches
-       * from being polluted by concurrent rendering before the stall
-       * completes. This intentionally doesn't implement the SKL+ hardware
-       * workaround suggesting to enable CS stall on PIPE_CONTROLs with the
-       * texture cache invalidation bit set for GPGPU workloads because the
-       * previous and subsequent PIPE_CONTROLs already guarantee that there is
-       * no concurrent GPGPU kernel execution (see SKL HSD 2132585).
-       */
-      anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL),
-                     .TextureCacheInvalidationEnable = true,
-                     .ConstantCacheInvalidationEnable = true,
-                     .InstructionCacheInvalidateEnable = true,
-                     .StateCacheInvalidationEnable = true,
-                     .PostSyncOperation = NoWrite);
-
-      /* Now send a third stalling flush to make sure that invalidation is
-       * complete when the L3 configuration registers are modified.
-       */
-      anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL),
-                     .DCFlushEnable = true,
-                     .PostSyncOperation = NoWrite,
-                     .CommandStreamerStallEnable = true);
-
-      anv_finishme("write GEN7_L3SQCREG1");
-      emit_lri(&cmd_buffer->batch, GENX(L3CNTLREG2), l3cr2_val);
-
-      uint32_t l3cr3_slm, l3cr3_noslm;
-      anv_pack_struct(&l3cr3_noslm, GENX(L3CNTLREG3),
-                      .ISAllocation = 8,
-                      .CAllocation = 4,
-                      .TAllocation = 8);
-      anv_pack_struct(&l3cr3_slm, GENX(L3CNTLREG3),
-                      .ISAllocation = 8,
-                      .CAllocation = 8,
-                      .TAllocation = 8);
-      const uint32_t l3cr3_val = enable_slm ? l3cr3_slm : l3cr3_noslm;
-      emit_lri(&cmd_buffer->batch, GENX(L3CNTLREG3), l3cr3_val);
-
-      cmd_buffer->state.current_l3_config = l3cr2_val;
-   }
-}
+   const struct anv_render_pass *pass = cmd_buffer->state.pass;
+   const struct anv_subpass *subpass = cmd_buffer->state.subpass;
 
-void
-genX(cmd_buffer_flush_compute_state)(struct anv_cmd_buffer *cmd_buffer)
-{
-   struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
-   const struct brw_cs_prog_data *cs_prog_data = get_cs_prog_data(pipeline);
-   VkResult result;
+   if (!subpass->depth_stencil_attachment)
+      return D16_UNORM;
 
-   assert(pipeline->active_stages == VK_SHADER_STAGE_COMPUTE_BIT);
+   struct anv_render_pass_attachment *att =
+      &pass->attachments[subpass->depth_stencil_attachment->attachment];
 
-   bool needs_slm = cs_prog_data->base.total_shared > 0;
-   genX(cmd_buffer_config_l3)(cmd_buffer, needs_slm);
+   switch (att->format) {
+   case VK_FORMAT_D16_UNORM:
+   case VK_FORMAT_D16_UNORM_S8_UINT:
+      return D16_UNORM;
 
-   genX(flush_pipeline_select_gpgpu)(cmd_buffer);
+   case VK_FORMAT_X8_D24_UNORM_PACK32:
+   case VK_FORMAT_D24_UNORM_S8_UINT:
+      return D24_UNORM_X8_UINT;
 
-   if (cmd_buffer->state.compute_dirty & ANV_CMD_DIRTY_PIPELINE)
-      anv_batch_emit_batch(&cmd_buffer->batch, &pipeline->batch);
+   case VK_FORMAT_D32_SFLOAT:
+   case VK_FORMAT_D32_SFLOAT_S8_UINT:
+      return D32_FLOAT;
 
-   if ((cmd_buffer->state.descriptors_dirty & VK_SHADER_STAGE_COMPUTE_BIT) ||
-       (cmd_buffer->state.compute_dirty & ANV_CMD_DIRTY_PIPELINE)) {
-      /* FIXME: figure out descriptors for gen7 */
-      result = flush_compute_descriptor_set(cmd_buffer);
-      assert(result == VK_SUCCESS);
-      cmd_buffer->state.descriptors_dirty &= ~VK_SHADER_STAGE_COMPUTE_BIT;
+   default:
+      return D16_UNORM;
    }
-
-   cmd_buffer->state.compute_dirty = 0;
 }
 
 void
 genX(cmd_buffer_flush_dynamic_state)(struct anv_cmd_buffer *cmd_buffer)
 {
-   struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
-
-   if (cmd_buffer->state.dirty & (ANV_CMD_DIRTY_PIPELINE |
-                                  ANV_CMD_DIRTY_RENDER_TARGETS |
-                                  ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH |
-                                  ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS)) {
-
-      const struct anv_image_view *iview =
-         anv_cmd_buffer_get_depth_stencil_view(cmd_buffer);
-      const struct anv_image *image = iview ? iview->image : NULL;
-      const struct anv_format *anv_format =
-         iview ? anv_format_for_vk_format(iview->vk_format) : NULL;
-      const bool has_depth = iview && anv_format->has_depth;
-      const uint32_t depth_format = has_depth ?
-         isl_surf_get_depth_format(&cmd_buffer->device->isl_dev,
-                                   &image->depth_surface.isl) : D16_UNORM;
-
+   struct anv_graphics_pipeline *pipeline = cmd_buffer->state.gfx.pipeline;
+   struct anv_dynamic_state *d = &cmd_buffer->state.gfx.dynamic;
+
+   if (cmd_buffer->state.gfx.dirty & (ANV_CMD_DIRTY_PIPELINE |
+                                      ANV_CMD_DIRTY_RENDER_TARGETS |
+                                      ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH |
+                                      ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS |
+                                      ANV_CMD_DIRTY_DYNAMIC_CULL_MODE |
+                                      ANV_CMD_DIRTY_DYNAMIC_FRONT_FACE)) {
       uint32_t sf_dw[GENX(3DSTATE_SF_length)];
       struct GENX(3DSTATE_SF) sf = {
          GENX(3DSTATE_SF_header),
-         .DepthBufferSurfaceFormat = depth_format,
-         .LineWidth = cmd_buffer->state.dynamic.line_width,
-         .GlobalDepthOffsetConstant = cmd_buffer->state.dynamic.depth_bias.bias,
-         .GlobalDepthOffsetScale = cmd_buffer->state.dynamic.depth_bias.slope,
-         .GlobalDepthOffsetClamp = cmd_buffer->state.dynamic.depth_bias.clamp
+         .DepthBufferSurfaceFormat = get_depth_format(cmd_buffer),
+         .LineWidth = d->line_width,
+         .GlobalDepthOffsetConstant = d->depth_bias.bias,
+         .GlobalDepthOffsetScale = d->depth_bias.slope,
+         .GlobalDepthOffsetClamp = d->depth_bias.clamp,
+         .FrontWinding            = genX(vk_to_gen_front_face)[d->front_face],
+         .CullMode                = genX(vk_to_gen_cullmode)[d->cull_mode],
       };
       GENX(3DSTATE_SF_pack)(NULL, sf_dw, &sf);
 
       anv_batch_emit_merge(&cmd_buffer->batch, sf_dw, pipeline->gen7.sf);
    }
 
-   if (cmd_buffer->state.dirty & (ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS |
-                                  ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE)) {
-      struct anv_dynamic_state *d = &cmd_buffer->state.dynamic;
+   if (cmd_buffer->state.gfx.dirty & (ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS |
+                                      ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE)) {
       struct anv_state cc_state =
          anv_cmd_buffer_alloc_dynamic_state(cmd_buffer,
                                             GENX(COLOR_CALC_STATE_length) * 4,
                                             64);
       struct GENX(COLOR_CALC_STATE) cc = {
-         .BlendConstantColorRed = cmd_buffer->state.dynamic.blend_constants[0],
-         .BlendConstantColorGreen = cmd_buffer->state.dynamic.blend_constants[1],
-         .BlendConstantColorBlue = cmd_buffer->state.dynamic.blend_constants[2],
-         .BlendConstantColorAlpha = cmd_buffer->state.dynamic.blend_constants[3],
+         .BlendConstantColorRed = d->blend_constants[0],
+         .BlendConstantColorGreen = d->blend_constants[1],
+         .BlendConstantColorBlue = d->blend_constants[2],
+         .BlendConstantColorAlpha = d->blend_constants[3],
          .StencilReferenceValue = d->stencil_reference.front & 0xff,
-         .BackFaceStencilReferenceValue = d->stencil_reference.back & 0xff,
+         .BackfaceStencilReferenceValue = d->stencil_reference.back & 0xff,
       };
       GENX(COLOR_CALC_STATE_pack)(NULL, cc_state.map, &cc);
-      if (!cmd_buffer->device->info.has_llc)
-         anv_state_clflush(cc_state);
 
-      anv_batch_emit(&cmd_buffer->batch,
-                     GENX(3DSTATE_CC_STATE_POINTERS),
-                     .ColorCalcStatePointer = cc_state.offset);
+      anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CC_STATE_POINTERS), ccp) {
+         ccp.ColorCalcStatePointer = cc_state.offset;
+      }
    }
 
-   if (cmd_buffer->state.dirty & (ANV_CMD_DIRTY_PIPELINE |
-                                  ANV_CMD_DIRTY_RENDER_TARGETS |
-                                  ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK |
-                                  ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK)) {
+   if (cmd_buffer->state.gfx.dirty & ANV_CMD_DIRTY_DYNAMIC_LINE_STIPPLE) {
+      anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_LINE_STIPPLE), ls) {
+         ls.LineStipplePattern = d->line_stipple.pattern;
+         ls.LineStippleInverseRepeatCount =
+            1.0f / MAX2(1, d->line_stipple.factor);
+         ls.LineStippleRepeatCount = d->line_stipple.factor;
+      }
+   }
+
+   if (cmd_buffer->state.gfx.dirty & (ANV_CMD_DIRTY_PIPELINE |
+                                      ANV_CMD_DIRTY_RENDER_TARGETS |
+                                      ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK |
+                                      ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK |
+                                      ANV_CMD_DIRTY_DYNAMIC_DEPTH_TEST_ENABLE |
+                                      ANV_CMD_DIRTY_DYNAMIC_DEPTH_WRITE_ENABLE |
+                                      ANV_CMD_DIRTY_DYNAMIC_DEPTH_COMPARE_OP |
+                                      ANV_CMD_DIRTY_DYNAMIC_STENCIL_TEST_ENABLE |
+                                      ANV_CMD_DIRTY_DYNAMIC_STENCIL_OP)) {
       uint32_t depth_stencil_dw[GENX(DEPTH_STENCIL_STATE_length)];
-      struct anv_dynamic_state *d = &cmd_buffer->state.dynamic;
 
       struct GENX(DEPTH_STENCIL_STATE) depth_stencil = {
          .StencilTestMask = d->stencil_compare_mask.front & 0xff,
@@ -462,6 +267,23 @@ genX(cmd_buffer_flush_dynamic_state)(struct anv_cmd_buffer *cmd_buffer)
 
          .BackfaceStencilTestMask = d->stencil_compare_mask.back & 0xff,
          .BackfaceStencilWriteMask = d->stencil_write_mask.back & 0xff,
+
+         .StencilBufferWriteEnable =
+            (d->stencil_write_mask.front || d->stencil_write_mask.back) &&
+            d->stencil_test_enable,
+
+         .DepthTestEnable = d->depth_test_enable,
+         .DepthBufferWriteEnable = d->depth_test_enable && d->depth_write_enable,
+         .DepthTestFunction = genX(vk_to_gen_compare_op)[d->depth_compare_op],
+         .StencilTestEnable = d->stencil_test_enable,
+         .StencilFailOp = genX(vk_to_gen_stencil_op)[d->stencil_op.front.fail_op],
+         .StencilPassDepthPassOp = genX(vk_to_gen_stencil_op)[d->stencil_op.front.pass_op],
+         .StencilPassDepthFailOp = genX(vk_to_gen_stencil_op)[d->stencil_op.front.depth_fail_op],
+         .StencilTestFunction = genX(vk_to_gen_compare_op)[d->stencil_op.front.compare_op],
+         .BackfaceStencilFailOp = genX(vk_to_gen_stencil_op)[d->stencil_op.back.fail_op],
+         .BackfaceStencilPassDepthPassOp = genX(vk_to_gen_stencil_op)[d->stencil_op.back.pass_op],
+         .BackfaceStencilPassDepthFailOp = genX(vk_to_gen_stencil_op)[d->stencil_op.back.depth_fail_op],
+         .BackfaceStencilTestFunction = genX(vk_to_gen_compare_op)[d->stencil_op.back.compare_op],
       };
       GENX(DEPTH_STENCIL_STATE_pack)(NULL, depth_stencil_dw, &depth_stencil);
 
@@ -471,63 +293,56 @@ genX(cmd_buffer_flush_dynamic_state)(struct anv_cmd_buffer *cmd_buffer)
                                       GENX(DEPTH_STENCIL_STATE_length), 64);
 
       anv_batch_emit(&cmd_buffer->batch,
-                     GENX(3DSTATE_DEPTH_STENCIL_STATE_POINTERS),
-                     .PointertoDEPTH_STENCIL_STATE = ds_state.offset);
+                     GENX(3DSTATE_DEPTH_STENCIL_STATE_POINTERS), dsp) {
+         dsp.PointertoDEPTH_STENCIL_STATE = ds_state.offset;
+      }
    }
 
-   if (cmd_buffer->state.gen7.index_buffer &&
-       cmd_buffer->state.dirty & (ANV_CMD_DIRTY_PIPELINE |
-                                  ANV_CMD_DIRTY_INDEX_BUFFER)) {
-      struct anv_buffer *buffer = cmd_buffer->state.gen7.index_buffer;
-      uint32_t offset = cmd_buffer->state.gen7.index_offset;
+   if (cmd_buffer->state.gfx.gen7.index_buffer &&
+       cmd_buffer->state.gfx.dirty & (ANV_CMD_DIRTY_PIPELINE |
+                                      ANV_CMD_DIRTY_INDEX_BUFFER)) {
+      struct anv_buffer *buffer = cmd_buffer->state.gfx.gen7.index_buffer;
+      uint32_t offset = cmd_buffer->state.gfx.gen7.index_offset;
 
 #if GEN_IS_HASWELL
-      anv_batch_emit(&cmd_buffer->batch, GEN75_3DSTATE_VF,
-                     .IndexedDrawCutIndexEnable = pipeline->primitive_restart,
-                     .CutIndex = cmd_buffer->state.restart_index);
+      anv_batch_emit(&cmd_buffer->batch, GEN75_3DSTATE_VF, vf) {
+         vf.IndexedDrawCutIndexEnable  = pipeline->primitive_restart;
+         vf.CutIndex                   = cmd_buffer->state.restart_index;
+      }
 #endif
 
-      anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_INDEX_BUFFER),
+      anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_INDEX_BUFFER), ib) {
 #if !GEN_IS_HASWELL
-                     .CutIndexEnable = pipeline->primitive_restart,
+         ib.CutIndexEnable             = pipeline->primitive_restart;
 #endif
-                     .IndexFormat = cmd_buffer->state.gen7.index_type,
-                     .MemoryObjectControlState = GENX(MOCS),
-                     .BufferStartingAddress = { buffer->bo, buffer->offset + offset },
-                     .BufferEndingAddress = { buffer->bo, buffer->offset + buffer->size });
+         ib.IndexFormat                = cmd_buffer->state.gfx.gen7.index_type;
+         ib.MOCS                       = anv_mocs_for_bo(cmd_buffer->device,
+                                                         buffer->address.bo);
+
+         ib.BufferStartingAddress      = anv_address_add(buffer->address,
+                                                         offset);
+         ib.BufferEndingAddress        = anv_address_add(buffer->address,
+                                                         buffer->size);
+      }
    }
 
-   cmd_buffer->state.dirty = 0;
-}
+   if (cmd_buffer->state.gfx.dirty & (ANV_CMD_DIRTY_PIPELINE |
+                                      ANV_CMD_DIRTY_DYNAMIC_PRIMITIVE_TOPOLOGY)) {
+      uint32_t topology;
+      if (anv_pipeline_has_stage(pipeline, MESA_SHADER_TESS_EVAL))
+         topology = d->primitive_topology;
+      else
+         topology = genX(vk_to_gen_primitive_type)[d->primitive_topology];
 
-void genX(CmdSetEvent)(
-    VkCommandBuffer                             commandBuffer,
-    VkEvent                                     event,
-    VkPipelineStageFlags                        stageMask)
-{
-   stub();
-}
+      cmd_buffer->state.gfx.primitive_topology = topology;
+   }
 
-void genX(CmdResetEvent)(
-    VkCommandBuffer                             commandBuffer,
-    VkEvent                                     event,
-    VkPipelineStageFlags                        stageMask)
-{
-   stub();
+   cmd_buffer->state.gfx.dirty = 0;
 }
 
-void genX(CmdWaitEvents)(
-    VkCommandBuffer                             commandBuffer,
-    uint32_t                                    eventCount,
-    const VkEvent*                              pEvents,
-    VkPipelineStageFlags                        srcStageMask,
-    VkPipelineStageFlags                        destStageMask,
-    uint32_t                                    memoryBarrierCount,
-    const VkMemoryBarrier*                      pMemoryBarriers,
-    uint32_t                                    bufferMemoryBarrierCount,
-    const VkBufferMemoryBarrier*                pBufferMemoryBarriers,
-    uint32_t                                    imageMemoryBarrierCount,
-    const VkImageMemoryBarrier*                 pImageMemoryBarriers)
+void
+genX(cmd_buffer_enable_pma_fix)(struct anv_cmd_buffer *cmd_buffer,
+                                bool enable)
 {
-   stub();
+   /* The NP PMA fix doesn't exist on gen7 */
 }