winsys/amdgpu: avoid ioctl call when fence_wait is called without timeout
[mesa.git] / src / intel / vulkan / genX_cmd_buffer.c
index abf096133a58f8f443b97b0ccb684cf53e5f8c55..ee47c2926e0579c00346a16d7eecc6b1b84c4081 100644 (file)
@@ -49,12 +49,12 @@ genX(cmd_buffer_emit_state_base_address)(struct anv_cmd_buffer *cmd_buffer)
     * this, we get GPU hangs when using multi-level command buffers which
     * clear depth, reset state base address, and then go render stuff.
     */
-   anv_batch_emit_blk(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
+   anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
       pc.RenderTargetCacheFlushEnable = true;
    }
 #endif
 
-   anv_batch_emit_blk(&cmd_buffer->batch, GENX(STATE_BASE_ADDRESS), sba) {
+   anv_batch_emit(&cmd_buffer->batch, GENX(STATE_BASE_ADDRESS), sba) {
       sba.GeneralStateBaseAddress = (struct anv_address) { scratch_bo, 0 };
       sba.GeneralStateMemoryObjectControlState = GENX(MOCS);
       sba.GeneralStateBaseAddressModifyEnable = true;
@@ -131,7 +131,7 @@ genX(cmd_buffer_emit_state_base_address)(struct anv_cmd_buffer *cmd_buffer)
     * units cache the binding table in the texture cache.  However, we have
     * yet to be able to actually confirm this.
     */
-   anv_batch_emit_blk(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
+   anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
       pc.TextureCacheInvalidationEnable = true;
    }
 }
@@ -295,10 +295,10 @@ cmd_buffer_flush_push_constants(struct anv_cmd_buffer *cmd_buffer)
       struct anv_state state = anv_cmd_buffer_push_constants(cmd_buffer, stage);
 
       if (state.offset == 0) {
-         anv_batch_emit_blk(&cmd_buffer->batch, GENX(3DSTATE_CONSTANT_VS), c)
+         anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CONSTANT_VS), c)
             c._3DCommandSubOpcode = push_constant_opcodes[stage];
       } else {
-         anv_batch_emit_blk(&cmd_buffer->batch, GENX(3DSTATE_CONSTANT_VS), c) {
+         anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CONSTANT_VS), c) {
             c._3DCommandSubOpcode = push_constant_opcodes[stage],
             c.ConstantBody = (struct GENX(3DSTATE_CONSTANT_BODY)) {
 #if GEN_GEN >= 9
@@ -330,18 +330,7 @@ genX(cmd_buffer_flush_state)(struct anv_cmd_buffer *cmd_buffer)
 
    assert((pipeline->active_stages & VK_SHADER_STAGE_COMPUTE_BIT) == 0);
 
-#if GEN_GEN >= 8
-   /* FIXME (jason): Currently, the config_l3 function causes problems on
-    * Haswell and prior if you have a kernel older than 4.4.  In order to
-    * work, it requires a couple of registers be white-listed in the
-    * command parser and they weren't added until 4.4.  What we should do
-    * is check the command parser version and make it a no-op if your
-    * command parser is either off or too old.  Compute won't work 100%,
-    * but at least 3-D will.  In the mean time, I'm going to make this
-    * gen8+ only so that we can get Haswell working again.
-    */
-   genX(cmd_buffer_config_l3)(cmd_buffer, false);
-#endif
+   genX(cmd_buffer_config_l3)(cmd_buffer, pipeline);
 
    genX(flush_pipeline_select_3d)(cmd_buffer);
 
@@ -420,7 +409,7 @@ genX(cmd_buffer_flush_state)(struct anv_cmd_buffer *cmd_buffer)
        *    PIPE_CONTROL needs to be sent before any combination of VS
        *    associated 3DSTATE."
        */
-      anv_batch_emit_blk(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
+      anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
          pc.DepthStallEnable  = true;
          pc.PostSyncOperation = WriteImmediateData;
          pc.Address           =
@@ -521,7 +510,7 @@ void genX(CmdDraw)(
    if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
       emit_base_vertex_instance(cmd_buffer, firstVertex, firstInstance);
 
-   anv_batch_emit_blk(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
+   anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
       prim.VertexAccessType         = SEQUENTIAL;
       prim.PrimitiveTopologyType    = pipeline->topology;
       prim.VertexCountPerInstance   = vertexCount;
@@ -549,7 +538,7 @@ void genX(CmdDrawIndexed)(
    if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
       emit_base_vertex_instance(cmd_buffer, vertexOffset, firstInstance);
 
-   anv_batch_emit_blk(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
+   anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
       prim.VertexAccessType         = RANDOM;
       prim.PrimitiveTopologyType    = pipeline->topology;
       prim.VertexCountPerInstance   = indexCount;
@@ -572,7 +561,7 @@ static void
 emit_lrm(struct anv_batch *batch,
          uint32_t reg, struct anv_bo *bo, uint32_t offset)
 {
-   anv_batch_emit_blk(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
+   anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
       lrm.RegisterAddress  = reg;
       lrm.MemoryAddress    = (struct anv_address) { bo, offset };
    }
@@ -581,7 +570,7 @@ emit_lrm(struct anv_batch *batch,
 static void
 emit_lri(struct anv_batch *batch, uint32_t reg, uint32_t imm)
 {
-   anv_batch_emit_blk(batch, GENX(MI_LOAD_REGISTER_IMM), lri) {
+   anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_IMM), lri) {
       lri.RegisterOffset   = reg;
       lri.DataDWord        = imm;
    }
@@ -612,7 +601,7 @@ void genX(CmdDrawIndirect)(
    emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_INSTANCE, bo, bo_offset + 12);
    emit_lri(&cmd_buffer->batch, GEN7_3DPRIM_BASE_VERTEX, 0);
 
-   anv_batch_emit_blk(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
+   anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
       prim.IndirectParameterEnable  = true;
       prim.VertexAccessType         = SEQUENTIAL;
       prim.PrimitiveTopologyType    = pipeline->topology;
@@ -645,7 +634,7 @@ void genX(CmdDrawIndexedIndirect)(
    emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_BASE_VERTEX, bo, bo_offset + 12);
    emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_INSTANCE, bo, bo_offset + 16);
 
-   anv_batch_emit_blk(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
+   anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
       prim.IndirectParameterEnable  = true;
       prim.VertexAccessType         = RANDOM;
       prim.PrimitiveTopologyType    = pipeline->topology;
@@ -697,7 +686,7 @@ void genX(CmdDispatch)(
 
    genX(cmd_buffer_flush_compute_state)(cmd_buffer);
 
-   anv_batch_emit_blk(&cmd_buffer->batch, GENX(GPGPU_WALKER), ggw) {
+   anv_batch_emit(&cmd_buffer->batch, GENX(GPGPU_WALKER), ggw) {
       ggw.SIMDSize                     = prog_data->simd_size / 16;
       ggw.ThreadDepthCounterMaximum    = 0;
       ggw.ThreadHeightCounterMaximum   = 0;
@@ -709,7 +698,7 @@ void genX(CmdDispatch)(
       ggw.BottomExecutionMask          = 0xffffffff;
    }
 
-   anv_batch_emit_blk(&cmd_buffer->batch, GENX(MEDIA_STATE_FLUSH), msf);
+   anv_batch_emit(&cmd_buffer->batch, GENX(MEDIA_STATE_FLUSH), msf);
 }
 
 #define GPGPU_DISPATCHDIMX 0x2500
@@ -761,7 +750,7 @@ void genX(CmdDispatchIndirect)(
    emit_lrm(batch, MI_PREDICATE_SRC0, bo, bo_offset + 0);
 
    /* predicate = (compute_dispatch_indirect_x_size == 0); */
-   anv_batch_emit_blk(batch, GENX(MI_PREDICATE), mip) {
+   anv_batch_emit(batch, GENX(MI_PREDICATE), mip) {
       mip.LoadOperation    = LOAD_LOAD;
       mip.CombineOperation = COMBINE_SET;
       mip.CompareOperation = COMPARE_SRCS_EQUAL;
@@ -771,7 +760,7 @@ void genX(CmdDispatchIndirect)(
    emit_lrm(batch, MI_PREDICATE_SRC0, bo, bo_offset + 4);
 
    /* predicate |= (compute_dispatch_indirect_y_size == 0); */
-   anv_batch_emit_blk(batch, GENX(MI_PREDICATE), mip) {
+   anv_batch_emit(batch, GENX(MI_PREDICATE), mip) {
       mip.LoadOperation    = LOAD_LOAD;
       mip.CombineOperation = COMBINE_OR;
       mip.CompareOperation = COMPARE_SRCS_EQUAL;
@@ -781,7 +770,7 @@ void genX(CmdDispatchIndirect)(
    emit_lrm(batch, MI_PREDICATE_SRC0, bo, bo_offset + 8);
 
    /* predicate |= (compute_dispatch_indirect_z_size == 0); */
-   anv_batch_emit_blk(batch, GENX(MI_PREDICATE), mip) {
+   anv_batch_emit(batch, GENX(MI_PREDICATE), mip) {
       mip.LoadOperation    = LOAD_LOAD;
       mip.CombineOperation = COMBINE_OR;
       mip.CompareOperation = COMPARE_SRCS_EQUAL;
@@ -789,14 +778,14 @@ void genX(CmdDispatchIndirect)(
 
    /* predicate = !predicate; */
 #define COMPARE_FALSE                           1
-   anv_batch_emit_blk(batch, GENX(MI_PREDICATE), mip) {
+   anv_batch_emit(batch, GENX(MI_PREDICATE), mip) {
       mip.LoadOperation    = LOAD_LOADINV;
       mip.CombineOperation = COMBINE_OR;
       mip.CompareOperation = COMPARE_FALSE;
    }
 #endif
 
-   anv_batch_emit_blk(batch, GENX(GPGPU_WALKER), ggw) {
+   anv_batch_emit(batch, GENX(GPGPU_WALKER), ggw) {
       ggw.IndirectParameterEnable      = true;
       ggw.PredicateEnable              = GEN_GEN <= 7;
       ggw.SIMDSize                     = prog_data->simd_size / 16;
@@ -807,7 +796,7 @@ void genX(CmdDispatchIndirect)(
       ggw.BottomExecutionMask          = 0xffffffff;
    }
 
-   anv_batch_emit_blk(batch, GENX(MEDIA_STATE_FLUSH), msf);
+   anv_batch_emit(batch, GENX(MEDIA_STATE_FLUSH), msf);
 }
 
 static void
@@ -825,7 +814,7 @@ flush_pipeline_before_pipeline_select(struct anv_cmd_buffer *cmd_buffer,
     * hardware too.
     */
    if (pipeline == GPGPU)
-      anv_batch_emit_blk(&cmd_buffer->batch, GENX(3DSTATE_CC_STATE_POINTERS), t);
+      anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CC_STATE_POINTERS), t);
 #elif GEN_GEN <= 7
       /* From "BXML » GT » MI » vol1a GPU Overview » [Instruction]
        * PIPELINE_SELECT [DevBWR+]":
@@ -837,7 +826,7 @@ flush_pipeline_before_pipeline_select(struct anv_cmd_buffer *cmd_buffer,
        *   command to invalidate read only caches prior to programming
        *   MI_PIPELINE_SELECT command to change the Pipeline Select Mode.
        */
-      anv_batch_emit_blk(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
+      anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
          pc.RenderTargetCacheFlushEnable  = true;
          pc.DepthCacheFlushEnable         = true;
          pc.DCFlushEnable                 = true;
@@ -845,7 +834,7 @@ flush_pipeline_before_pipeline_select(struct anv_cmd_buffer *cmd_buffer,
          pc.CommandStreamerStallEnable    = true;
       }
 
-      anv_batch_emit_blk(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
+      anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
          pc.TextureCacheInvalidationEnable   = true;
          pc.ConstantCacheInvalidationEnable  = true;
          pc.StateCacheInvalidationEnable     = true;
@@ -861,7 +850,7 @@ genX(flush_pipeline_select_3d)(struct anv_cmd_buffer *cmd_buffer)
    if (cmd_buffer->state.current_pipeline != _3D) {
       flush_pipeline_before_pipeline_select(cmd_buffer, _3D);
 
-      anv_batch_emit_blk(&cmd_buffer->batch, GENX(PIPELINE_SELECT), ps) {
+      anv_batch_emit(&cmd_buffer->batch, GENX(PIPELINE_SELECT), ps) {
 #if GEN_GEN >= 9
          ps.MaskBits = 3;
 #endif
@@ -878,7 +867,7 @@ genX(flush_pipeline_select_gpgpu)(struct anv_cmd_buffer *cmd_buffer)
    if (cmd_buffer->state.current_pipeline != GPGPU) {
       flush_pipeline_before_pipeline_select(cmd_buffer, GPGPU);
 
-      anv_batch_emit_blk(&cmd_buffer->batch, GENX(PIPELINE_SELECT), ps) {
+      anv_batch_emit(&cmd_buffer->batch, GENX(PIPELINE_SELECT), ps) {
 #if GEN_GEN >= 9
          ps.MaskBits = 3;
 #endif
@@ -927,17 +916,16 @@ cmd_buffer_emit_depth_stencil(struct anv_cmd_buffer *cmd_buffer)
    const struct anv_image_view *iview =
       anv_cmd_buffer_get_depth_stencil_view(cmd_buffer);
    const struct anv_image *image = iview ? iview->image : NULL;
-   const struct anv_format *anv_format =
-      iview ? anv_format_for_vk_format(iview->vk_format) : NULL;
-   const bool has_depth = iview && anv_format->has_depth;
-   const bool has_stencil = iview && anv_format->has_stencil;
+   const bool has_depth = image && (image->aspects & VK_IMAGE_ASPECT_DEPTH_BIT);
+   const bool has_stencil =
+      image && (image->aspects & VK_IMAGE_ASPECT_STENCIL_BIT);
 
    /* FIXME: Implement the PMA stall W/A */
    /* FIXME: Width and Height are wrong */
 
    /* Emit 3DSTATE_DEPTH_BUFFER */
    if (has_depth) {
-      anv_batch_emit_blk(&cmd_buffer->batch, GENX(3DSTATE_DEPTH_BUFFER), db) {
+      anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_DEPTH_BUFFER), db) {
          db.SurfaceType                   = SURFTYPE_2D;
          db.DepthWriteEnable              = true;
          db.StencilWriteEnable            = has_stencil;
@@ -984,7 +972,7 @@ cmd_buffer_emit_depth_stencil(struct anv_cmd_buffer *cmd_buffer)
        * nor stencil buffer is present.  Also, D16_UNORM is not allowed to
        * be combined with a stencil buffer so we use D32_FLOAT instead.
        */
-      anv_batch_emit_blk(&cmd_buffer->batch, GENX(3DSTATE_DEPTH_BUFFER), db) {
+      anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_DEPTH_BUFFER), db) {
          db.SurfaceType          = SURFTYPE_2D;
          db.SurfaceFormat        = D32_FLOAT;
          db.Width                = fb->width - 1;
@@ -995,7 +983,7 @@ cmd_buffer_emit_depth_stencil(struct anv_cmd_buffer *cmd_buffer)
 
    /* Emit 3DSTATE_STENCIL_BUFFER */
    if (has_stencil) {
-      anv_batch_emit_blk(&cmd_buffer->batch, GENX(3DSTATE_STENCIL_BUFFER), sb) {
+      anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_STENCIL_BUFFER), sb) {
 #if GEN_GEN >= 8 || GEN_IS_HASWELL
          sb.StencilBufferEnable = true,
 #endif
@@ -1017,14 +1005,14 @@ cmd_buffer_emit_depth_stencil(struct anv_cmd_buffer *cmd_buffer)
          };
       }
    } else {
-      anv_batch_emit_blk(&cmd_buffer->batch, GENX(3DSTATE_STENCIL_BUFFER), sb);
+      anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_STENCIL_BUFFER), sb);
    }
 
    /* Disable hierarchial depth buffers. */
-   anv_batch_emit_blk(&cmd_buffer->batch, GENX(3DSTATE_HIER_DEPTH_BUFFER), hz);
+   anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_HIER_DEPTH_BUFFER), hz);
 
    /* Clear the clear params. */
-   anv_batch_emit_blk(&cmd_buffer->batch, GENX(3DSTATE_CLEAR_PARAMS), cp);
+   anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CLEAR_PARAMS), cp);
 }
 
 /**
@@ -1058,7 +1046,7 @@ void genX(CmdBeginRenderPass)(
 
    const VkRect2D *render_area = &pRenderPassBegin->renderArea;
 
-   anv_batch_emit_blk(&cmd_buffer->batch, GENX(3DSTATE_DRAWING_RECTANGLE), r) {
+   anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_DRAWING_RECTANGLE), r) {
       r.ClippedDrawingRectangleYMin = MAX2(render_area->offset.y, 0);
       r.ClippedDrawingRectangleXMin = MAX2(render_area->offset.x, 0);
       r.ClippedDrawingRectangleYMax =
@@ -1098,7 +1086,7 @@ static void
 emit_ps_depth_count(struct anv_batch *batch,
                     struct anv_bo *bo, uint32_t offset)
 {
-   anv_batch_emit_blk(batch, GENX(PIPE_CONTROL), pc) {
+   anv_batch_emit(batch, GENX(PIPE_CONTROL), pc) {
       pc.DestinationAddressType  = DAT_PPGTT;
       pc.PostSyncOperation       = WritePSDepthCount;
       pc.DepthStallEnable        = true;
@@ -1110,7 +1098,7 @@ static void
 emit_query_availability(struct anv_batch *batch,
                         struct anv_bo *bo, uint32_t offset)
 {
-   anv_batch_emit_blk(batch, GENX(PIPE_CONTROL), pc) {
+   anv_batch_emit(batch, GENX(PIPE_CONTROL), pc) {
       pc.DestinationAddressType  = DAT_PPGTT;
       pc.PostSyncOperation       = WriteImmediateData;
       pc.Address                 = (struct anv_address) { bo, offset };
@@ -1135,7 +1123,7 @@ void genX(CmdBeginQuery)(
     */
    if (cmd_buffer->state.need_query_wa) {
       cmd_buffer->state.need_query_wa = false;
-      anv_batch_emit_blk(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
+      anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
          pc.DepthCacheFlushEnable   = true;
          pc.DepthStallEnable        = true;
       }
@@ -1192,20 +1180,23 @@ void genX(CmdWriteTimestamp)(
 
    switch (pipelineStage) {
    case VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT:
-      anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_REGISTER_MEM),
-                     .RegisterAddress = TIMESTAMP,
-                     .MemoryAddress = { &pool->bo, offset });
-      anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_REGISTER_MEM),
-                     .RegisterAddress = TIMESTAMP + 4,
-                     .MemoryAddress = { &pool->bo, offset + 4 });
+      anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_REGISTER_MEM), srm) {
+         srm.RegisterAddress  = TIMESTAMP;
+         srm.MemoryAddress    = (struct anv_address) { &pool->bo, offset };
+      }
+      anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_REGISTER_MEM), srm) {
+         srm.RegisterAddress  = TIMESTAMP + 4;
+         srm.MemoryAddress    = (struct anv_address) { &pool->bo, offset + 4 };
+      }
       break;
 
    default:
       /* Everything else is bottom-of-pipe */
-      anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL),
-                     .DestinationAddressType = DAT_PPGTT,
-                     .PostSyncOperation = WriteTimestamp,
-                     .Address = { &pool->bo, offset });
+      anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
+         pc.DestinationAddressType  = DAT_PPGTT,
+         pc.PostSyncOperation       = WriteTimestamp,
+         pc.Address = (struct anv_address) { &pool->bo, offset };
+      }
       break;
    }
 
@@ -1250,26 +1241,31 @@ static void
 emit_load_alu_reg_u64(struct anv_batch *batch, uint32_t reg,
                       struct anv_bo *bo, uint32_t offset)
 {
-   anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_MEM),
-                  .RegisterAddress = reg,
-                  .MemoryAddress = { bo, offset });
-   anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_MEM),
-                  .RegisterAddress = reg + 4,
-                  .MemoryAddress = { bo, offset + 4 });
+   anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
+      lrm.RegisterAddress  = reg,
+      lrm.MemoryAddress    = (struct anv_address) { bo, offset };
+   }
+   anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
+      lrm.RegisterAddress  = reg + 4;
+      lrm.MemoryAddress    = (struct anv_address) { bo, offset + 4 };
+   }
 }
 
 static void
 store_query_result(struct anv_batch *batch, uint32_t reg,
                    struct anv_bo *bo, uint32_t offset, VkQueryResultFlags flags)
 {
-      anv_batch_emit(batch, GENX(MI_STORE_REGISTER_MEM),
-                     .RegisterAddress = reg,
-                     .MemoryAddress = { bo, offset });
-
-      if (flags & VK_QUERY_RESULT_64_BIT)
-         anv_batch_emit(batch, GENX(MI_STORE_REGISTER_MEM),
-                        .RegisterAddress = reg + 4,
-                        .MemoryAddress = { bo, offset + 4 });
+   anv_batch_emit(batch, GENX(MI_STORE_REGISTER_MEM), srm) {
+      srm.RegisterAddress  = reg;
+      srm.MemoryAddress    = (struct anv_address) { bo, offset };
+   }
+
+   if (flags & VK_QUERY_RESULT_64_BIT) {
+      anv_batch_emit(batch, GENX(MI_STORE_REGISTER_MEM), srm) {
+         srm.RegisterAddress  = reg + 4;
+         srm.MemoryAddress    = (struct anv_address) { bo, offset + 4 };
+      }
+   }
 }
 
 void genX(CmdCopyQueryPoolResults)(
@@ -1288,7 +1284,7 @@ void genX(CmdCopyQueryPoolResults)(
    uint32_t slot_offset, dst_offset;
 
    if (flags & VK_QUERY_RESULT_WAIT_BIT) {
-      anv_batch_emit_blk(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
+      anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
          pc.CommandStreamerStallEnable = true;
          pc.StallAtPixelScoreboard     = true;
       }