2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 #include "anv_private.h"
32 #include "genxml/gen_macros.h"
33 #include "genxml/genX_pack.h"
35 #if GEN_GEN == 7 && !GEN_IS_HASWELL
37 gen7_cmd_buffer_emit_descriptor_pointers(struct anv_cmd_buffer
*cmd_buffer
,
40 static const uint32_t sampler_state_opcodes
[] = {
41 [MESA_SHADER_VERTEX
] = 43,
42 [MESA_SHADER_TESS_CTRL
] = 44, /* HS */
43 [MESA_SHADER_TESS_EVAL
] = 45, /* DS */
44 [MESA_SHADER_GEOMETRY
] = 46,
45 [MESA_SHADER_FRAGMENT
] = 47,
46 [MESA_SHADER_COMPUTE
] = 0,
49 static const uint32_t binding_table_opcodes
[] = {
50 [MESA_SHADER_VERTEX
] = 38,
51 [MESA_SHADER_TESS_CTRL
] = 39,
52 [MESA_SHADER_TESS_EVAL
] = 40,
53 [MESA_SHADER_GEOMETRY
] = 41,
54 [MESA_SHADER_FRAGMENT
] = 42,
55 [MESA_SHADER_COMPUTE
] = 0,
58 anv_foreach_stage(s
, stages
) {
59 if (cmd_buffer
->state
.samplers
[s
].alloc_size
> 0) {
60 anv_batch_emit(&cmd_buffer
->batch
,
61 GENX(3DSTATE_SAMPLER_STATE_POINTERS_VS
), ssp
) {
62 ssp
._3DCommandSubOpcode
= sampler_state_opcodes
[s
];
63 ssp
.PointertoVSSamplerState
= cmd_buffer
->state
.samplers
[s
].offset
;
67 /* Always emit binding table pointers if we're asked to, since on SKL
68 * this is what flushes push constants. */
69 anv_batch_emit(&cmd_buffer
->batch
,
70 GENX(3DSTATE_BINDING_TABLE_POINTERS_VS
), btp
) {
71 btp
._3DCommandSubOpcode
= binding_table_opcodes
[s
];
72 btp
.PointertoVSBindingTable
= cmd_buffer
->state
.binding_tables
[s
].offset
;
78 gen7_cmd_buffer_flush_descriptor_sets(struct anv_cmd_buffer
*cmd_buffer
)
80 VkShaderStageFlags dirty
= cmd_buffer
->state
.descriptors_dirty
&
81 cmd_buffer
->state
.pipeline
->active_stages
;
83 VkResult result
= VK_SUCCESS
;
84 anv_foreach_stage(s
, dirty
) {
85 result
= anv_cmd_buffer_emit_samplers(cmd_buffer
, s
,
86 &cmd_buffer
->state
.samplers
[s
]);
87 if (result
!= VK_SUCCESS
)
89 result
= anv_cmd_buffer_emit_binding_table(cmd_buffer
, s
,
90 &cmd_buffer
->state
.binding_tables
[s
]);
91 if (result
!= VK_SUCCESS
)
95 if (result
!= VK_SUCCESS
) {
96 assert(result
== VK_ERROR_OUT_OF_DEVICE_MEMORY
);
98 result
= anv_cmd_buffer_new_binding_table_block(cmd_buffer
);
99 assert(result
== VK_SUCCESS
);
101 /* Re-emit state base addresses so we get the new surface state base
102 * address before we start emitting binding tables etc.
104 anv_cmd_buffer_emit_state_base_address(cmd_buffer
);
106 /* Re-emit all active binding tables */
107 dirty
|= cmd_buffer
->state
.pipeline
->active_stages
;
108 anv_foreach_stage(s
, dirty
) {
109 result
= anv_cmd_buffer_emit_samplers(cmd_buffer
, s
,
110 &cmd_buffer
->state
.samplers
[s
]);
111 if (result
!= VK_SUCCESS
)
113 result
= anv_cmd_buffer_emit_binding_table(cmd_buffer
, s
,
114 &cmd_buffer
->state
.binding_tables
[s
]);
115 if (result
!= VK_SUCCESS
)
120 cmd_buffer
->state
.descriptors_dirty
&= ~dirty
;
124 #endif /* GEN_GEN == 7 && !GEN_IS_HASWELL */
126 static inline int64_t
127 clamp_int64(int64_t x
, int64_t min
, int64_t max
)
137 #if GEN_GEN == 7 && !GEN_IS_HASWELL
139 gen7_cmd_buffer_emit_scissor(struct anv_cmd_buffer
*cmd_buffer
)
141 uint32_t count
= cmd_buffer
->state
.dynamic
.scissor
.count
;
142 const VkRect2D
*scissors
= cmd_buffer
->state
.dynamic
.scissor
.scissors
;
143 struct anv_state scissor_state
=
144 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
, count
* 8, 32);
146 for (uint32_t i
= 0; i
< count
; i
++) {
147 const VkRect2D
*s
= &scissors
[i
];
149 /* Since xmax and ymax are inclusive, we have to have xmax < xmin or
150 * ymax < ymin for empty clips. In case clip x, y, width height are all
151 * 0, the clamps below produce 0 for xmin, ymin, xmax, ymax, which isn't
152 * what we want. Just special case empty clips and produce a canonical
154 static const struct GEN7_SCISSOR_RECT empty_scissor
= {
155 .ScissorRectangleYMin
= 1,
156 .ScissorRectangleXMin
= 1,
157 .ScissorRectangleYMax
= 0,
158 .ScissorRectangleXMax
= 0
161 const int max
= 0xffff;
162 struct GEN7_SCISSOR_RECT scissor
= {
163 /* Do this math using int64_t so overflow gets clamped correctly. */
164 .ScissorRectangleYMin
= clamp_int64(s
->offset
.y
, 0, max
),
165 .ScissorRectangleXMin
= clamp_int64(s
->offset
.x
, 0, max
),
166 .ScissorRectangleYMax
= clamp_int64((uint64_t) s
->offset
.y
+ s
->extent
.height
- 1, 0, max
),
167 .ScissorRectangleXMax
= clamp_int64((uint64_t) s
->offset
.x
+ s
->extent
.width
- 1, 0, max
)
170 if (s
->extent
.width
<= 0 || s
->extent
.height
<= 0) {
171 GEN7_SCISSOR_RECT_pack(NULL
, scissor_state
.map
+ i
* 8,
174 GEN7_SCISSOR_RECT_pack(NULL
, scissor_state
.map
+ i
* 8, &scissor
);
178 anv_batch_emit(&cmd_buffer
->batch
,
179 GEN7_3DSTATE_SCISSOR_STATE_POINTERS
, ssp
) {
180 ssp
.ScissorRectPointer
= scissor_state
.offset
;
183 if (!cmd_buffer
->device
->info
.has_llc
)
184 anv_state_clflush(scissor_state
);
188 static const uint32_t vk_to_gen_index_type
[] = {
189 [VK_INDEX_TYPE_UINT16
] = INDEX_WORD
,
190 [VK_INDEX_TYPE_UINT32
] = INDEX_DWORD
,
193 static const uint32_t restart_index_for_type
[] = {
194 [VK_INDEX_TYPE_UINT16
] = UINT16_MAX
,
195 [VK_INDEX_TYPE_UINT32
] = UINT32_MAX
,
198 void genX(CmdBindIndexBuffer
)(
199 VkCommandBuffer commandBuffer
,
202 VkIndexType indexType
)
204 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
205 ANV_FROM_HANDLE(anv_buffer
, buffer
, _buffer
);
207 cmd_buffer
->state
.dirty
|= ANV_CMD_DIRTY_INDEX_BUFFER
;
209 cmd_buffer
->state
.restart_index
= restart_index_for_type
[indexType
];
210 cmd_buffer
->state
.gen7
.index_buffer
= buffer
;
211 cmd_buffer
->state
.gen7
.index_type
= vk_to_gen_index_type
[indexType
];
212 cmd_buffer
->state
.gen7
.index_offset
= offset
;
216 flush_compute_descriptor_set(struct anv_cmd_buffer
*cmd_buffer
)
218 struct anv_device
*device
= cmd_buffer
->device
;
219 struct anv_pipeline
*pipeline
= cmd_buffer
->state
.compute_pipeline
;
220 struct anv_state surfaces
= { 0, }, samplers
= { 0, };
223 result
= anv_cmd_buffer_emit_samplers(cmd_buffer
,
224 MESA_SHADER_COMPUTE
, &samplers
);
225 if (result
!= VK_SUCCESS
)
227 result
= anv_cmd_buffer_emit_binding_table(cmd_buffer
,
228 MESA_SHADER_COMPUTE
, &surfaces
);
229 if (result
!= VK_SUCCESS
)
232 struct anv_state push_state
= anv_cmd_buffer_cs_push_constants(cmd_buffer
);
234 const struct brw_cs_prog_data
*cs_prog_data
= get_cs_prog_data(pipeline
);
235 const struct brw_stage_prog_data
*prog_data
= &cs_prog_data
->base
;
237 unsigned local_id_dwords
= cs_prog_data
->local_invocation_id_regs
* 8;
238 unsigned push_constant_data_size
=
239 (prog_data
->nr_params
+ local_id_dwords
) * 4;
240 unsigned reg_aligned_constant_size
= ALIGN(push_constant_data_size
, 32);
241 unsigned push_constant_regs
= reg_aligned_constant_size
/ 32;
243 if (push_state
.alloc_size
) {
244 anv_batch_emit(&cmd_buffer
->batch
, GENX(MEDIA_CURBE_LOAD
), curbe
) {
245 curbe
.CURBETotalDataLength
= push_state
.alloc_size
;
246 curbe
.CURBEDataStartAddress
= push_state
.offset
;
250 assert(prog_data
->total_shared
<= 64 * 1024);
251 uint32_t slm_size
= 0;
252 if (prog_data
->total_shared
> 0) {
253 /* slm_size is in 4k increments, but must be a power of 2. */
255 while (slm_size
< prog_data
->total_shared
)
257 slm_size
/= 4 * 1024;
260 struct anv_state state
=
261 anv_state_pool_emit(&device
->dynamic_state_pool
,
262 GENX(INTERFACE_DESCRIPTOR_DATA
), 64,
263 .KernelStartPointer
= pipeline
->cs_simd
,
264 .BindingTablePointer
= surfaces
.offset
,
265 .SamplerStatePointer
= samplers
.offset
,
266 .ConstantURBEntryReadLength
=
269 .ConstantURBEntryReadOffset
= 0,
271 .BarrierEnable
= cs_prog_data
->uses_barrier
,
272 .SharedLocalMemorySize
= slm_size
,
273 .NumberofThreadsinGPGPUThreadGroup
=
274 pipeline
->cs_thread_width_max
);
276 const uint32_t size
= GENX(INTERFACE_DESCRIPTOR_DATA_length
) * sizeof(uint32_t);
277 anv_batch_emit(&cmd_buffer
->batch
,
278 GENX(MEDIA_INTERFACE_DESCRIPTOR_LOAD
), idl
) {
279 idl
.InterfaceDescriptorTotalLength
= size
;
280 idl
.InterfaceDescriptorDataStartAddress
= state
.offset
;
287 genX(cmd_buffer_config_l3
)(struct anv_cmd_buffer
*cmd_buffer
, bool enable_slm
)
289 /* References for GL state:
291 * - commits e307cfa..228d5a3
292 * - src/mesa/drivers/dri/i965/gen7_l3_state.c
295 uint32_t l3cr2_slm
, l3cr2_noslm
;
296 anv_pack_struct(&l3cr2_noslm
, GENX(L3CNTLREG2
),
300 anv_pack_struct(&l3cr2_slm
, GENX(L3CNTLREG2
),
303 .URBLowBandwidth
= 1,
306 const uint32_t l3cr2_val
= enable_slm
? l3cr2_slm
: l3cr2_noslm
;
307 bool changed
= cmd_buffer
->state
.current_l3_config
!= l3cr2_val
;
310 /* According to the hardware docs, the L3 partitioning can only be
311 * changed while the pipeline is completely drained and the caches are
312 * flushed, which involves a first PIPE_CONTROL flush which stalls the
315 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
), pc
) {
316 pc
.DCFlushEnable
= true;
317 pc
.CommandStreamerStallEnable
= true;
318 pc
.PostSyncOperation
= NoWrite
;
321 /* ...followed by a second pipelined PIPE_CONTROL that initiates
322 * invalidation of the relevant caches. Note that because RO
323 * invalidation happens at the top of the pipeline (i.e. right away as
324 * the PIPE_CONTROL command is processed by the CS) we cannot combine it
325 * with the previous stalling flush as the hardware documentation
326 * suggests, because that would cause the CS to stall on previous
327 * rendering *after* RO invalidation and wouldn't prevent the RO caches
328 * from being polluted by concurrent rendering before the stall
329 * completes. This intentionally doesn't implement the SKL+ hardware
330 * workaround suggesting to enable CS stall on PIPE_CONTROLs with the
331 * texture cache invalidation bit set for GPGPU workloads because the
332 * previous and subsequent PIPE_CONTROLs already guarantee that there is
333 * no concurrent GPGPU kernel execution (see SKL HSD 2132585).
335 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
), pc
) {
336 pc
.TextureCacheInvalidationEnable
= true;
337 pc
.ConstantCacheInvalidationEnable
= true;
338 pc
.InstructionCacheInvalidateEnable
= true;
339 pc
.StateCacheInvalidationEnable
= true;
340 pc
.PostSyncOperation
= NoWrite
;
343 /* Now send a third stalling flush to make sure that invalidation is
344 * complete when the L3 configuration registers are modified.
346 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
), pc
) {
347 pc
.DCFlushEnable
= true;
348 pc
.CommandStreamerStallEnable
= true;
349 pc
.PostSyncOperation
= NoWrite
;
352 anv_finishme("write GEN7_L3SQCREG1");
353 anv_batch_emit(&cmd_buffer
->batch
, GENX(MI_LOAD_REGISTER_IMM
), lri
) {
354 lri
.RegisterOffset
= GENX(L3CNTLREG2_num
);
355 lri
.DataDWord
= l3cr2_val
;
358 uint32_t l3cr3_slm
, l3cr3_noslm
;
359 anv_pack_struct(&l3cr3_noslm
, GENX(L3CNTLREG3
),
363 anv_pack_struct(&l3cr3_slm
, GENX(L3CNTLREG3
),
367 const uint32_t l3cr3_val
= enable_slm
? l3cr3_slm
: l3cr3_noslm
;
368 anv_batch_emit(&cmd_buffer
->batch
, GENX(MI_LOAD_REGISTER_IMM
), lri
) {
369 lri
.RegisterOffset
= GENX(L3CNTLREG3_num
);
370 lri
.DataDWord
= l3cr3_val
;
373 cmd_buffer
->state
.current_l3_config
= l3cr2_val
;
378 genX(cmd_buffer_flush_compute_state
)(struct anv_cmd_buffer
*cmd_buffer
)
380 struct anv_pipeline
*pipeline
= cmd_buffer
->state
.compute_pipeline
;
381 const struct brw_cs_prog_data
*cs_prog_data
= get_cs_prog_data(pipeline
);
382 MAYBE_UNUSED VkResult result
;
384 assert(pipeline
->active_stages
== VK_SHADER_STAGE_COMPUTE_BIT
);
386 bool needs_slm
= cs_prog_data
->base
.total_shared
> 0;
387 genX(cmd_buffer_config_l3
)(cmd_buffer
, needs_slm
);
389 genX(flush_pipeline_select_gpgpu
)(cmd_buffer
);
391 if (cmd_buffer
->state
.compute_dirty
& ANV_CMD_DIRTY_PIPELINE
)
392 anv_batch_emit_batch(&cmd_buffer
->batch
, &pipeline
->batch
);
394 if ((cmd_buffer
->state
.descriptors_dirty
& VK_SHADER_STAGE_COMPUTE_BIT
) ||
395 (cmd_buffer
->state
.compute_dirty
& ANV_CMD_DIRTY_PIPELINE
)) {
396 /* FIXME: figure out descriptors for gen7 */
397 result
= flush_compute_descriptor_set(cmd_buffer
);
398 assert(result
== VK_SUCCESS
);
399 cmd_buffer
->state
.descriptors_dirty
&= ~VK_SHADER_STAGE_COMPUTE_BIT
;
402 cmd_buffer
->state
.compute_dirty
= 0;
406 genX(cmd_buffer_flush_dynamic_state
)(struct anv_cmd_buffer
*cmd_buffer
)
408 struct anv_pipeline
*pipeline
= cmd_buffer
->state
.pipeline
;
410 if (cmd_buffer
->state
.dirty
& (ANV_CMD_DIRTY_PIPELINE
|
411 ANV_CMD_DIRTY_RENDER_TARGETS
|
412 ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH
|
413 ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS
)) {
415 const struct anv_image_view
*iview
=
416 anv_cmd_buffer_get_depth_stencil_view(cmd_buffer
);
417 const struct anv_image
*image
= iview
? iview
->image
: NULL
;
418 const bool has_depth
=
419 image
&& (image
->aspects
& VK_IMAGE_ASPECT_DEPTH_BIT
);
420 const uint32_t depth_format
= has_depth
?
421 isl_surf_get_depth_format(&cmd_buffer
->device
->isl_dev
,
422 &image
->depth_surface
.isl
) : D16_UNORM
;
424 uint32_t sf_dw
[GENX(3DSTATE_SF_length
)];
425 struct GENX(3DSTATE_SF
) sf
= {
426 GENX(3DSTATE_SF_header
),
427 .DepthBufferSurfaceFormat
= depth_format
,
428 .LineWidth
= cmd_buffer
->state
.dynamic
.line_width
,
429 .GlobalDepthOffsetConstant
= cmd_buffer
->state
.dynamic
.depth_bias
.bias
,
430 .GlobalDepthOffsetScale
= cmd_buffer
->state
.dynamic
.depth_bias
.slope
,
431 .GlobalDepthOffsetClamp
= cmd_buffer
->state
.dynamic
.depth_bias
.clamp
433 GENX(3DSTATE_SF_pack
)(NULL
, sf_dw
, &sf
);
435 anv_batch_emit_merge(&cmd_buffer
->batch
, sf_dw
, pipeline
->gen7
.sf
);
438 if (cmd_buffer
->state
.dirty
& (ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS
|
439 ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE
)) {
440 struct anv_dynamic_state
*d
= &cmd_buffer
->state
.dynamic
;
441 struct anv_state cc_state
=
442 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
,
443 GENX(COLOR_CALC_STATE_length
) * 4,
445 struct GENX(COLOR_CALC_STATE
) cc
= {
446 .BlendConstantColorRed
= cmd_buffer
->state
.dynamic
.blend_constants
[0],
447 .BlendConstantColorGreen
= cmd_buffer
->state
.dynamic
.blend_constants
[1],
448 .BlendConstantColorBlue
= cmd_buffer
->state
.dynamic
.blend_constants
[2],
449 .BlendConstantColorAlpha
= cmd_buffer
->state
.dynamic
.blend_constants
[3],
450 .StencilReferenceValue
= d
->stencil_reference
.front
& 0xff,
451 .BackFaceStencilReferenceValue
= d
->stencil_reference
.back
& 0xff,
453 GENX(COLOR_CALC_STATE_pack
)(NULL
, cc_state
.map
, &cc
);
454 if (!cmd_buffer
->device
->info
.has_llc
)
455 anv_state_clflush(cc_state
);
457 anv_batch_emit(&cmd_buffer
->batch
, GENX(3DSTATE_CC_STATE_POINTERS
), ccp
) {
458 ccp
.ColorCalcStatePointer
= cc_state
.offset
;
462 if (cmd_buffer
->state
.dirty
& (ANV_CMD_DIRTY_PIPELINE
|
463 ANV_CMD_DIRTY_RENDER_TARGETS
|
464 ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK
|
465 ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK
)) {
466 uint32_t depth_stencil_dw
[GENX(DEPTH_STENCIL_STATE_length
)];
467 struct anv_dynamic_state
*d
= &cmd_buffer
->state
.dynamic
;
469 struct GENX(DEPTH_STENCIL_STATE
) depth_stencil
= {
470 .StencilTestMask
= d
->stencil_compare_mask
.front
& 0xff,
471 .StencilWriteMask
= d
->stencil_write_mask
.front
& 0xff,
473 .BackfaceStencilTestMask
= d
->stencil_compare_mask
.back
& 0xff,
474 .BackfaceStencilWriteMask
= d
->stencil_write_mask
.back
& 0xff,
476 GENX(DEPTH_STENCIL_STATE_pack
)(NULL
, depth_stencil_dw
, &depth_stencil
);
478 struct anv_state ds_state
=
479 anv_cmd_buffer_merge_dynamic(cmd_buffer
, depth_stencil_dw
,
480 pipeline
->gen7
.depth_stencil_state
,
481 GENX(DEPTH_STENCIL_STATE_length
), 64);
483 anv_batch_emit(&cmd_buffer
->batch
,
484 GENX(3DSTATE_DEPTH_STENCIL_STATE_POINTERS
), dsp
) {
485 dsp
.PointertoDEPTH_STENCIL_STATE
= ds_state
.offset
;
489 if (cmd_buffer
->state
.gen7
.index_buffer
&&
490 cmd_buffer
->state
.dirty
& (ANV_CMD_DIRTY_PIPELINE
|
491 ANV_CMD_DIRTY_INDEX_BUFFER
)) {
492 struct anv_buffer
*buffer
= cmd_buffer
->state
.gen7
.index_buffer
;
493 uint32_t offset
= cmd_buffer
->state
.gen7
.index_offset
;
496 anv_batch_emit(&cmd_buffer
->batch
, GEN75_3DSTATE_VF
, vf
) {
497 vf
.IndexedDrawCutIndexEnable
= pipeline
->primitive_restart
;
498 vf
.CutIndex
= cmd_buffer
->state
.restart_index
;
502 anv_batch_emit(&cmd_buffer
->batch
, GENX(3DSTATE_INDEX_BUFFER
), ib
) {
504 ib
.CutIndexEnable
= pipeline
->primitive_restart
;
506 ib
.IndexFormat
= cmd_buffer
->state
.gen7
.index_type
;
507 ib
.MemoryObjectControlState
= GENX(MOCS
);
509 ib
.BufferStartingAddress
=
510 (struct anv_address
) { buffer
->bo
, buffer
->offset
+ offset
};
511 ib
.BufferEndingAddress
=
512 (struct anv_address
) { buffer
->bo
, buffer
->offset
+ buffer
->size
};
516 cmd_buffer
->state
.dirty
= 0;
519 void genX(CmdSetEvent
)(
520 VkCommandBuffer commandBuffer
,
522 VkPipelineStageFlags stageMask
)
527 void genX(CmdResetEvent
)(
528 VkCommandBuffer commandBuffer
,
530 VkPipelineStageFlags stageMask
)
535 void genX(CmdWaitEvents
)(
536 VkCommandBuffer commandBuffer
,
538 const VkEvent
* pEvents
,
539 VkPipelineStageFlags srcStageMask
,
540 VkPipelineStageFlags destStageMask
,
541 uint32_t memoryBarrierCount
,
542 const VkMemoryBarrier
* pMemoryBarriers
,
543 uint32_t bufferMemoryBarrierCount
,
544 const VkBufferMemoryBarrier
* pBufferMemoryBarriers
,
545 uint32_t imageMemoryBarrierCount
,
546 const VkImageMemoryBarrier
* pImageMemoryBarriers
)