2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 #include "anv_private.h"
32 #include "genxml/gen_macros.h"
33 #include "genxml/genX_pack.h"
35 #if GEN_GEN == 7 && !GEN_IS_HASWELL
37 gen7_cmd_buffer_emit_descriptor_pointers(struct anv_cmd_buffer
*cmd_buffer
,
40 static const uint32_t sampler_state_opcodes
[] = {
41 [MESA_SHADER_VERTEX
] = 43,
42 [MESA_SHADER_TESS_CTRL
] = 44, /* HS */
43 [MESA_SHADER_TESS_EVAL
] = 45, /* DS */
44 [MESA_SHADER_GEOMETRY
] = 46,
45 [MESA_SHADER_FRAGMENT
] = 47,
46 [MESA_SHADER_COMPUTE
] = 0,
49 static const uint32_t binding_table_opcodes
[] = {
50 [MESA_SHADER_VERTEX
] = 38,
51 [MESA_SHADER_TESS_CTRL
] = 39,
52 [MESA_SHADER_TESS_EVAL
] = 40,
53 [MESA_SHADER_GEOMETRY
] = 41,
54 [MESA_SHADER_FRAGMENT
] = 42,
55 [MESA_SHADER_COMPUTE
] = 0,
58 anv_foreach_stage(s
, stages
) {
59 if (cmd_buffer
->state
.samplers
[s
].alloc_size
> 0) {
60 anv_batch_emit(&cmd_buffer
->batch
,
61 GENX(3DSTATE_SAMPLER_STATE_POINTERS_VS
),
62 ._3DCommandSubOpcode
= sampler_state_opcodes
[s
],
63 .PointertoVSSamplerState
= cmd_buffer
->state
.samplers
[s
].offset
);
66 /* Always emit binding table pointers if we're asked to, since on SKL
67 * this is what flushes push constants. */
68 anv_batch_emit(&cmd_buffer
->batch
,
69 GENX(3DSTATE_BINDING_TABLE_POINTERS_VS
),
70 ._3DCommandSubOpcode
= binding_table_opcodes
[s
],
71 .PointertoVSBindingTable
= cmd_buffer
->state
.binding_tables
[s
].offset
);
76 gen7_cmd_buffer_flush_descriptor_sets(struct anv_cmd_buffer
*cmd_buffer
)
78 VkShaderStageFlags dirty
= cmd_buffer
->state
.descriptors_dirty
&
79 cmd_buffer
->state
.pipeline
->active_stages
;
81 VkResult result
= VK_SUCCESS
;
82 anv_foreach_stage(s
, dirty
) {
83 result
= anv_cmd_buffer_emit_samplers(cmd_buffer
, s
,
84 &cmd_buffer
->state
.samplers
[s
]);
85 if (result
!= VK_SUCCESS
)
87 result
= anv_cmd_buffer_emit_binding_table(cmd_buffer
, s
,
88 &cmd_buffer
->state
.binding_tables
[s
]);
89 if (result
!= VK_SUCCESS
)
93 if (result
!= VK_SUCCESS
) {
94 assert(result
== VK_ERROR_OUT_OF_DEVICE_MEMORY
);
96 result
= anv_cmd_buffer_new_binding_table_block(cmd_buffer
);
97 assert(result
== VK_SUCCESS
);
99 /* Re-emit state base addresses so we get the new surface state base
100 * address before we start emitting binding tables etc.
102 anv_cmd_buffer_emit_state_base_address(cmd_buffer
);
104 /* Re-emit all active binding tables */
105 dirty
|= cmd_buffer
->state
.pipeline
->active_stages
;
106 anv_foreach_stage(s
, dirty
) {
107 result
= anv_cmd_buffer_emit_samplers(cmd_buffer
, s
,
108 &cmd_buffer
->state
.samplers
[s
]);
109 if (result
!= VK_SUCCESS
)
111 result
= anv_cmd_buffer_emit_binding_table(cmd_buffer
, s
,
112 &cmd_buffer
->state
.binding_tables
[s
]);
113 if (result
!= VK_SUCCESS
)
118 cmd_buffer
->state
.descriptors_dirty
&= ~dirty
;
122 #endif /* GEN_GEN == 7 && !GEN_IS_HASWELL */
124 static inline int64_t
125 clamp_int64(int64_t x
, int64_t min
, int64_t max
)
135 #if GEN_GEN == 7 && !GEN_IS_HASWELL
137 emit_scissor_state(struct anv_cmd_buffer
*cmd_buffer
,
138 uint32_t count
, const VkRect2D
*scissors
)
140 struct anv_state scissor_state
=
141 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
, count
* 8, 32);
143 for (uint32_t i
= 0; i
< count
; i
++) {
144 const VkRect2D
*s
= &scissors
[i
];
146 /* Since xmax and ymax are inclusive, we have to have xmax < xmin or
147 * ymax < ymin for empty clips. In case clip x, y, width height are all
148 * 0, the clamps below produce 0 for xmin, ymin, xmax, ymax, which isn't
149 * what we want. Just special case empty clips and produce a canonical
151 static const struct GEN7_SCISSOR_RECT empty_scissor
= {
152 .ScissorRectangleYMin
= 1,
153 .ScissorRectangleXMin
= 1,
154 .ScissorRectangleYMax
= 0,
155 .ScissorRectangleXMax
= 0
158 const int max
= 0xffff;
159 struct GEN7_SCISSOR_RECT scissor
= {
160 /* Do this math using int64_t so overflow gets clamped correctly. */
161 .ScissorRectangleYMin
= clamp_int64(s
->offset
.y
, 0, max
),
162 .ScissorRectangleXMin
= clamp_int64(s
->offset
.x
, 0, max
),
163 .ScissorRectangleYMax
= clamp_int64((uint64_t) s
->offset
.y
+ s
->extent
.height
- 1, 0, max
),
164 .ScissorRectangleXMax
= clamp_int64((uint64_t) s
->offset
.x
+ s
->extent
.width
- 1, 0, max
)
167 if (s
->extent
.width
<= 0 || s
->extent
.height
<= 0) {
168 GEN7_SCISSOR_RECT_pack(NULL
, scissor_state
.map
+ i
* 8,
171 GEN7_SCISSOR_RECT_pack(NULL
, scissor_state
.map
+ i
* 8, &scissor
);
175 anv_batch_emit(&cmd_buffer
->batch
, GEN7_3DSTATE_SCISSOR_STATE_POINTERS
,
176 .ScissorRectPointer
= scissor_state
.offset
);
178 if (!cmd_buffer
->device
->info
.has_llc
)
179 anv_state_clflush(scissor_state
);
183 gen7_cmd_buffer_emit_scissor(struct anv_cmd_buffer
*cmd_buffer
)
185 if (cmd_buffer
->state
.dynamic
.scissor
.count
> 0) {
186 emit_scissor_state(cmd_buffer
, cmd_buffer
->state
.dynamic
.scissor
.count
,
187 cmd_buffer
->state
.dynamic
.scissor
.scissors
);
189 /* Emit a default scissor based on the currently bound framebuffer */
190 emit_scissor_state(cmd_buffer
, 1,
192 .offset
= { .x
= 0, .y
= 0, },
194 .width
= cmd_buffer
->state
.framebuffer
->width
,
195 .height
= cmd_buffer
->state
.framebuffer
->height
,
202 static const uint32_t vk_to_gen_index_type
[] = {
203 [VK_INDEX_TYPE_UINT16
] = INDEX_WORD
,
204 [VK_INDEX_TYPE_UINT32
] = INDEX_DWORD
,
207 static const uint32_t restart_index_for_type
[] = {
208 [VK_INDEX_TYPE_UINT16
] = UINT16_MAX
,
209 [VK_INDEX_TYPE_UINT32
] = UINT32_MAX
,
212 void genX(CmdBindIndexBuffer
)(
213 VkCommandBuffer commandBuffer
,
216 VkIndexType indexType
)
218 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
219 ANV_FROM_HANDLE(anv_buffer
, buffer
, _buffer
);
221 cmd_buffer
->state
.dirty
|= ANV_CMD_DIRTY_INDEX_BUFFER
;
223 cmd_buffer
->state
.restart_index
= restart_index_for_type
[indexType
];
224 cmd_buffer
->state
.gen7
.index_buffer
= buffer
;
225 cmd_buffer
->state
.gen7
.index_type
= vk_to_gen_index_type
[indexType
];
226 cmd_buffer
->state
.gen7
.index_offset
= offset
;
230 flush_compute_descriptor_set(struct anv_cmd_buffer
*cmd_buffer
)
232 struct anv_device
*device
= cmd_buffer
->device
;
233 struct anv_pipeline
*pipeline
= cmd_buffer
->state
.compute_pipeline
;
234 struct anv_state surfaces
= { 0, }, samplers
= { 0, };
237 result
= anv_cmd_buffer_emit_samplers(cmd_buffer
,
238 MESA_SHADER_COMPUTE
, &samplers
);
239 if (result
!= VK_SUCCESS
)
241 result
= anv_cmd_buffer_emit_binding_table(cmd_buffer
,
242 MESA_SHADER_COMPUTE
, &surfaces
);
243 if (result
!= VK_SUCCESS
)
246 struct anv_state push_state
= anv_cmd_buffer_cs_push_constants(cmd_buffer
);
248 const struct brw_cs_prog_data
*cs_prog_data
= get_cs_prog_data(pipeline
);
249 const struct brw_stage_prog_data
*prog_data
= &cs_prog_data
->base
;
251 unsigned local_id_dwords
= cs_prog_data
->local_invocation_id_regs
* 8;
252 unsigned push_constant_data_size
=
253 (prog_data
->nr_params
+ local_id_dwords
) * 4;
254 unsigned reg_aligned_constant_size
= ALIGN(push_constant_data_size
, 32);
255 unsigned push_constant_regs
= reg_aligned_constant_size
/ 32;
257 if (push_state
.alloc_size
) {
258 anv_batch_emit(&cmd_buffer
->batch
, GENX(MEDIA_CURBE_LOAD
),
259 .CURBETotalDataLength
= push_state
.alloc_size
,
260 .CURBEDataStartAddress
= push_state
.offset
);
263 assert(prog_data
->total_shared
<= 64 * 1024);
264 uint32_t slm_size
= 0;
265 if (prog_data
->total_shared
> 0) {
266 /* slm_size is in 4k increments, but must be a power of 2. */
268 while (slm_size
< prog_data
->total_shared
)
270 slm_size
/= 4 * 1024;
273 struct anv_state state
=
274 anv_state_pool_emit(&device
->dynamic_state_pool
,
275 GENX(INTERFACE_DESCRIPTOR_DATA
), 64,
276 .KernelStartPointer
= pipeline
->cs_simd
,
277 .BindingTablePointer
= surfaces
.offset
,
278 .SamplerStatePointer
= samplers
.offset
,
279 .ConstantURBEntryReadLength
=
282 .ConstantURBEntryReadOffset
= 0,
284 .BarrierEnable
= cs_prog_data
->uses_barrier
,
285 .SharedLocalMemorySize
= slm_size
,
286 .NumberofThreadsinGPGPUThreadGroup
=
287 pipeline
->cs_thread_width_max
);
289 const uint32_t size
= GENX(INTERFACE_DESCRIPTOR_DATA_length
) * sizeof(uint32_t);
290 anv_batch_emit(&cmd_buffer
->batch
, GENX(MEDIA_INTERFACE_DESCRIPTOR_LOAD
),
291 .InterfaceDescriptorTotalLength
= size
,
292 .InterfaceDescriptorDataStartAddress
= state
.offset
);
297 #define emit_lri(batch, reg, imm) \
298 anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_IMM), \
299 .RegisterOffset = __anv_reg_num(reg), \
303 genX(cmd_buffer_config_l3
)(struct anv_cmd_buffer
*cmd_buffer
, bool enable_slm
)
305 /* References for GL state:
307 * - commits e307cfa..228d5a3
308 * - src/mesa/drivers/dri/i965/gen7_l3_state.c
311 uint32_t l3cr2_slm
, l3cr2_noslm
;
312 anv_pack_struct(&l3cr2_noslm
, GENX(L3CNTLREG2
),
316 anv_pack_struct(&l3cr2_slm
, GENX(L3CNTLREG2
),
319 .URBLowBandwidth
= 1,
322 const uint32_t l3cr2_val
= enable_slm
? l3cr2_slm
: l3cr2_noslm
;
323 bool changed
= cmd_buffer
->state
.current_l3_config
!= l3cr2_val
;
326 /* According to the hardware docs, the L3 partitioning can only be
327 * changed while the pipeline is completely drained and the caches are
328 * flushed, which involves a first PIPE_CONTROL flush which stalls the
331 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
),
332 .DCFlushEnable
= true,
333 .PostSyncOperation
= NoWrite
,
334 .CommandStreamerStallEnable
= true);
336 /* ...followed by a second pipelined PIPE_CONTROL that initiates
337 * invalidation of the relevant caches. Note that because RO
338 * invalidation happens at the top of the pipeline (i.e. right away as
339 * the PIPE_CONTROL command is processed by the CS) we cannot combine it
340 * with the previous stalling flush as the hardware documentation
341 * suggests, because that would cause the CS to stall on previous
342 * rendering *after* RO invalidation and wouldn't prevent the RO caches
343 * from being polluted by concurrent rendering before the stall
344 * completes. This intentionally doesn't implement the SKL+ hardware
345 * workaround suggesting to enable CS stall on PIPE_CONTROLs with the
346 * texture cache invalidation bit set for GPGPU workloads because the
347 * previous and subsequent PIPE_CONTROLs already guarantee that there is
348 * no concurrent GPGPU kernel execution (see SKL HSD 2132585).
350 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
),
351 .TextureCacheInvalidationEnable
= true,
352 .ConstantCacheInvalidationEnable
= true,
353 .InstructionCacheInvalidateEnable
= true,
354 .StateCacheInvalidationEnable
= true,
355 .PostSyncOperation
= NoWrite
);
357 /* Now send a third stalling flush to make sure that invalidation is
358 * complete when the L3 configuration registers are modified.
360 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
),
361 .DCFlushEnable
= true,
362 .PostSyncOperation
= NoWrite
,
363 .CommandStreamerStallEnable
= true);
365 anv_finishme("write GEN7_L3SQCREG1");
366 emit_lri(&cmd_buffer
->batch
, GENX(L3CNTLREG2
), l3cr2_val
);
368 uint32_t l3cr3_slm
, l3cr3_noslm
;
369 anv_pack_struct(&l3cr3_noslm
, GENX(L3CNTLREG3
),
373 anv_pack_struct(&l3cr3_slm
, GENX(L3CNTLREG3
),
377 const uint32_t l3cr3_val
= enable_slm
? l3cr3_slm
: l3cr3_noslm
;
378 emit_lri(&cmd_buffer
->batch
, GENX(L3CNTLREG3
), l3cr3_val
);
380 cmd_buffer
->state
.current_l3_config
= l3cr2_val
;
385 genX(cmd_buffer_flush_compute_state
)(struct anv_cmd_buffer
*cmd_buffer
)
387 struct anv_pipeline
*pipeline
= cmd_buffer
->state
.compute_pipeline
;
388 const struct brw_cs_prog_data
*cs_prog_data
= get_cs_prog_data(pipeline
);
391 assert(pipeline
->active_stages
== VK_SHADER_STAGE_COMPUTE_BIT
);
393 bool needs_slm
= cs_prog_data
->base
.total_shared
> 0;
394 genX(cmd_buffer_config_l3
)(cmd_buffer
, needs_slm
);
396 genX(flush_pipeline_select_gpgpu
)(cmd_buffer
);
398 if (cmd_buffer
->state
.compute_dirty
& ANV_CMD_DIRTY_PIPELINE
)
399 anv_batch_emit_batch(&cmd_buffer
->batch
, &pipeline
->batch
);
401 if ((cmd_buffer
->state
.descriptors_dirty
& VK_SHADER_STAGE_COMPUTE_BIT
) ||
402 (cmd_buffer
->state
.compute_dirty
& ANV_CMD_DIRTY_PIPELINE
)) {
403 /* FIXME: figure out descriptors for gen7 */
404 result
= flush_compute_descriptor_set(cmd_buffer
);
405 assert(result
== VK_SUCCESS
);
406 cmd_buffer
->state
.descriptors_dirty
&= ~VK_SHADER_STAGE_COMPUTE_BIT
;
409 cmd_buffer
->state
.compute_dirty
= 0;
413 genX(cmd_buffer_flush_dynamic_state
)(struct anv_cmd_buffer
*cmd_buffer
)
415 struct anv_pipeline
*pipeline
= cmd_buffer
->state
.pipeline
;
417 if (cmd_buffer
->state
.dirty
& (ANV_CMD_DIRTY_PIPELINE
|
418 ANV_CMD_DIRTY_RENDER_TARGETS
|
419 ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH
|
420 ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS
)) {
422 const struct anv_image_view
*iview
=
423 anv_cmd_buffer_get_depth_stencil_view(cmd_buffer
);
424 const struct anv_image
*image
= iview
? iview
->image
: NULL
;
425 const struct anv_format
*anv_format
=
426 iview
? anv_format_for_vk_format(iview
->vk_format
) : NULL
;
427 const bool has_depth
= iview
&& anv_format
->has_depth
;
428 const uint32_t depth_format
= has_depth
?
429 isl_surf_get_depth_format(&cmd_buffer
->device
->isl_dev
,
430 &image
->depth_surface
.isl
) : D16_UNORM
;
432 uint32_t sf_dw
[GENX(3DSTATE_SF_length
)];
433 struct GENX(3DSTATE_SF
) sf
= {
434 GENX(3DSTATE_SF_header
),
435 .DepthBufferSurfaceFormat
= depth_format
,
436 .LineWidth
= cmd_buffer
->state
.dynamic
.line_width
,
437 .GlobalDepthOffsetConstant
= cmd_buffer
->state
.dynamic
.depth_bias
.bias
,
438 .GlobalDepthOffsetScale
= cmd_buffer
->state
.dynamic
.depth_bias
.slope
,
439 .GlobalDepthOffsetClamp
= cmd_buffer
->state
.dynamic
.depth_bias
.clamp
441 GENX(3DSTATE_SF_pack
)(NULL
, sf_dw
, &sf
);
443 anv_batch_emit_merge(&cmd_buffer
->batch
, sf_dw
, pipeline
->gen7
.sf
);
446 if (cmd_buffer
->state
.dirty
& (ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS
|
447 ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE
)) {
448 struct anv_dynamic_state
*d
= &cmd_buffer
->state
.dynamic
;
449 struct anv_state cc_state
=
450 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
,
451 GENX(COLOR_CALC_STATE_length
) * 4,
453 struct GENX(COLOR_CALC_STATE
) cc
= {
454 .BlendConstantColorRed
= cmd_buffer
->state
.dynamic
.blend_constants
[0],
455 .BlendConstantColorGreen
= cmd_buffer
->state
.dynamic
.blend_constants
[1],
456 .BlendConstantColorBlue
= cmd_buffer
->state
.dynamic
.blend_constants
[2],
457 .BlendConstantColorAlpha
= cmd_buffer
->state
.dynamic
.blend_constants
[3],
458 .StencilReferenceValue
= d
->stencil_reference
.front
& 0xff,
459 .BackFaceStencilReferenceValue
= d
->stencil_reference
.back
& 0xff,
461 GENX(COLOR_CALC_STATE_pack
)(NULL
, cc_state
.map
, &cc
);
462 if (!cmd_buffer
->device
->info
.has_llc
)
463 anv_state_clflush(cc_state
);
465 anv_batch_emit(&cmd_buffer
->batch
,
466 GENX(3DSTATE_CC_STATE_POINTERS
),
467 .ColorCalcStatePointer
= cc_state
.offset
);
470 if (cmd_buffer
->state
.dirty
& (ANV_CMD_DIRTY_PIPELINE
|
471 ANV_CMD_DIRTY_RENDER_TARGETS
|
472 ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK
|
473 ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK
)) {
474 uint32_t depth_stencil_dw
[GENX(DEPTH_STENCIL_STATE_length
)];
475 struct anv_dynamic_state
*d
= &cmd_buffer
->state
.dynamic
;
477 struct GENX(DEPTH_STENCIL_STATE
) depth_stencil
= {
478 .StencilTestMask
= d
->stencil_compare_mask
.front
& 0xff,
479 .StencilWriteMask
= d
->stencil_write_mask
.front
& 0xff,
481 .BackfaceStencilTestMask
= d
->stencil_compare_mask
.back
& 0xff,
482 .BackfaceStencilWriteMask
= d
->stencil_write_mask
.back
& 0xff,
484 GENX(DEPTH_STENCIL_STATE_pack
)(NULL
, depth_stencil_dw
, &depth_stencil
);
486 struct anv_state ds_state
=
487 anv_cmd_buffer_merge_dynamic(cmd_buffer
, depth_stencil_dw
,
488 pipeline
->gen7
.depth_stencil_state
,
489 GENX(DEPTH_STENCIL_STATE_length
), 64);
491 anv_batch_emit(&cmd_buffer
->batch
,
492 GENX(3DSTATE_DEPTH_STENCIL_STATE_POINTERS
),
493 .PointertoDEPTH_STENCIL_STATE
= ds_state
.offset
);
496 if (cmd_buffer
->state
.gen7
.index_buffer
&&
497 cmd_buffer
->state
.dirty
& (ANV_CMD_DIRTY_PIPELINE
|
498 ANV_CMD_DIRTY_INDEX_BUFFER
)) {
499 struct anv_buffer
*buffer
= cmd_buffer
->state
.gen7
.index_buffer
;
500 uint32_t offset
= cmd_buffer
->state
.gen7
.index_offset
;
503 anv_batch_emit(&cmd_buffer
->batch
, GEN75_3DSTATE_VF
,
504 .IndexedDrawCutIndexEnable
= pipeline
->primitive_restart
,
505 .CutIndex
= cmd_buffer
->state
.restart_index
);
508 anv_batch_emit(&cmd_buffer
->batch
, GENX(3DSTATE_INDEX_BUFFER
),
510 .CutIndexEnable
= pipeline
->primitive_restart
,
512 .IndexFormat
= cmd_buffer
->state
.gen7
.index_type
,
513 .MemoryObjectControlState
= GENX(MOCS
),
514 .BufferStartingAddress
= { buffer
->bo
, buffer
->offset
+ offset
},
515 .BufferEndingAddress
= { buffer
->bo
, buffer
->offset
+ buffer
->size
});
518 cmd_buffer
->state
.dirty
= 0;
521 void genX(CmdSetEvent
)(
522 VkCommandBuffer commandBuffer
,
524 VkPipelineStageFlags stageMask
)
529 void genX(CmdResetEvent
)(
530 VkCommandBuffer commandBuffer
,
532 VkPipelineStageFlags stageMask
)
537 void genX(CmdWaitEvents
)(
538 VkCommandBuffer commandBuffer
,
540 const VkEvent
* pEvents
,
541 VkPipelineStageFlags srcStageMask
,
542 VkPipelineStageFlags destStageMask
,
543 uint32_t memoryBarrierCount
,
544 const VkMemoryBarrier
* pMemoryBarriers
,
545 uint32_t bufferMemoryBarrierCount
,
546 const VkBufferMemoryBarrier
* pBufferMemoryBarriers
,
547 uint32_t imageMemoryBarrierCount
,
548 const VkImageMemoryBarrier
* pImageMemoryBarriers
)