2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 #include "anv_private.h"
32 #include "genxml/gen_macros.h"
33 #include "genxml/genX_pack.h"
37 gen8_cmd_buffer_emit_viewport(struct anv_cmd_buffer
*cmd_buffer
)
39 uint32_t count
= cmd_buffer
->state
.dynamic
.viewport
.count
;
40 const VkViewport
*viewports
= cmd_buffer
->state
.dynamic
.viewport
.viewports
;
41 struct anv_state sf_clip_state
=
42 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
, count
* 64, 64);
43 struct anv_state cc_state
=
44 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
, count
* 8, 32);
46 for (uint32_t i
= 0; i
< count
; i
++) {
47 const VkViewport
*vp
= &viewports
[i
];
49 /* The gen7 state struct has just the matrix and guardband fields, the
50 * gen8 struct adds the min/max viewport fields. */
51 struct GENX(SF_CLIP_VIEWPORT
) sf_clip_viewport
= {
52 .ViewportMatrixElementm00
= vp
->width
/ 2,
53 .ViewportMatrixElementm11
= vp
->height
/ 2,
54 .ViewportMatrixElementm22
= 1.0,
55 .ViewportMatrixElementm30
= vp
->x
+ vp
->width
/ 2,
56 .ViewportMatrixElementm31
= vp
->y
+ vp
->height
/ 2,
57 .ViewportMatrixElementm32
= 0.0,
58 .XMinClipGuardband
= -1.0f
,
59 .XMaxClipGuardband
= 1.0f
,
60 .YMinClipGuardband
= -1.0f
,
61 .YMaxClipGuardband
= 1.0f
,
62 .XMinViewPort
= vp
->x
,
63 .XMaxViewPort
= vp
->x
+ vp
->width
- 1,
64 .YMinViewPort
= vp
->y
,
65 .YMaxViewPort
= vp
->y
+ vp
->height
- 1,
68 struct GENX(CC_VIEWPORT
) cc_viewport
= {
69 .MinimumDepth
= vp
->minDepth
,
70 .MaximumDepth
= vp
->maxDepth
73 GENX(SF_CLIP_VIEWPORT_pack
)(NULL
, sf_clip_state
.map
+ i
* 64,
75 GENX(CC_VIEWPORT_pack
)(NULL
, cc_state
.map
+ i
* 8, &cc_viewport
);
78 if (!cmd_buffer
->device
->info
.has_llc
) {
79 anv_state_clflush(sf_clip_state
);
80 anv_state_clflush(cc_state
);
83 anv_batch_emit(&cmd_buffer
->batch
,
84 GENX(3DSTATE_VIEWPORT_STATE_POINTERS_CC
),
85 .CCViewportPointer
= cc_state
.offset
);
86 anv_batch_emit(&cmd_buffer
->batch
,
87 GENX(3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP
),
88 .SFClipViewportPointer
= sf_clip_state
.offset
);
92 #define emit_lri(batch, reg, imm) \
93 anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_IMM), \
94 .RegisterOffset = __anv_reg_num(reg), \
98 genX(cmd_buffer_config_l3
)(struct anv_cmd_buffer
*cmd_buffer
, bool enable_slm
)
100 /* References for GL state:
102 * - commits e307cfa..228d5a3
103 * - src/mesa/drivers/dri/i965/gen7_l3_state.c
106 uint32_t l3cr_slm
, l3cr_noslm
;
107 anv_pack_struct(&l3cr_noslm
, GENX(L3CNTLREG
),
109 .AllAllocation
= 48);
110 anv_pack_struct(&l3cr_slm
, GENX(L3CNTLREG
),
113 .AllAllocation
= 48);
114 const uint32_t l3cr_val
= enable_slm
? l3cr_slm
: l3cr_noslm
;
115 bool changed
= cmd_buffer
->state
.current_l3_config
!= l3cr_val
;
118 /* According to the hardware docs, the L3 partitioning can only be
119 * changed while the pipeline is completely drained and the caches are
120 * flushed, which involves a first PIPE_CONTROL flush which stalls the
123 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
),
124 .DCFlushEnable
= true,
125 .PostSyncOperation
= NoWrite
,
126 .CommandStreamerStallEnable
= true);
128 /* ...followed by a second pipelined PIPE_CONTROL that initiates
129 * invalidation of the relevant caches. Note that because RO
130 * invalidation happens at the top of the pipeline (i.e. right away as
131 * the PIPE_CONTROL command is processed by the CS) we cannot combine it
132 * with the previous stalling flush as the hardware documentation
133 * suggests, because that would cause the CS to stall on previous
134 * rendering *after* RO invalidation and wouldn't prevent the RO caches
135 * from being polluted by concurrent rendering before the stall
136 * completes. This intentionally doesn't implement the SKL+ hardware
137 * workaround suggesting to enable CS stall on PIPE_CONTROLs with the
138 * texture cache invalidation bit set for GPGPU workloads because the
139 * previous and subsequent PIPE_CONTROLs already guarantee that there is
140 * no concurrent GPGPU kernel execution (see SKL HSD 2132585).
142 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
),
143 .TextureCacheInvalidationEnable
= true,
144 .ConstantCacheInvalidationEnable
= true,
145 .InstructionCacheInvalidateEnable
= true,
146 .StateCacheInvalidationEnable
= true,
147 .PostSyncOperation
= NoWrite
);
149 /* Now send a third stalling flush to make sure that invalidation is
150 * complete when the L3 configuration registers are modified.
152 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
),
153 .DCFlushEnable
= true,
154 .PostSyncOperation
= NoWrite
,
155 .CommandStreamerStallEnable
= true);
157 emit_lri(&cmd_buffer
->batch
, GENX(L3CNTLREG
), l3cr_val
);
158 cmd_buffer
->state
.current_l3_config
= l3cr_val
;
163 __emit_genx_sf_state(struct anv_cmd_buffer
*cmd_buffer
)
165 uint32_t sf_dw
[GENX(3DSTATE_SF_length
)];
166 struct GENX(3DSTATE_SF
) sf
= {
167 GENX(3DSTATE_SF_header
),
168 .LineWidth
= cmd_buffer
->state
.dynamic
.line_width
,
170 GENX(3DSTATE_SF_pack
)(NULL
, sf_dw
, &sf
);
172 anv_batch_emit_merge(&cmd_buffer
->batch
, sf_dw
,
173 cmd_buffer
->state
.pipeline
->gen8
.sf
);
176 #include "genxml/gen9_pack.h"
178 __emit_gen9_sf_state(struct anv_cmd_buffer
*cmd_buffer
)
180 uint32_t sf_dw
[GENX(3DSTATE_SF_length
)];
181 struct GEN9_3DSTATE_SF sf
= {
182 GEN9_3DSTATE_SF_header
,
183 .LineWidth
= cmd_buffer
->state
.dynamic
.line_width
,
185 GEN9_3DSTATE_SF_pack(NULL
, sf_dw
, &sf
);
187 anv_batch_emit_merge(&cmd_buffer
->batch
, sf_dw
,
188 cmd_buffer
->state
.pipeline
->gen8
.sf
);
192 __emit_sf_state(struct anv_cmd_buffer
*cmd_buffer
)
194 if (cmd_buffer
->device
->info
.is_cherryview
)
195 __emit_gen9_sf_state(cmd_buffer
);
197 __emit_genx_sf_state(cmd_buffer
);
201 genX(cmd_buffer_flush_dynamic_state
)(struct anv_cmd_buffer
*cmd_buffer
)
203 struct anv_pipeline
*pipeline
= cmd_buffer
->state
.pipeline
;
205 if (cmd_buffer
->state
.dirty
& (ANV_CMD_DIRTY_PIPELINE
|
206 ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH
)) {
207 __emit_sf_state(cmd_buffer
);
210 if (cmd_buffer
->state
.dirty
& (ANV_CMD_DIRTY_PIPELINE
|
211 ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS
)){
212 uint32_t raster_dw
[GENX(3DSTATE_RASTER_length
)];
213 struct GENX(3DSTATE_RASTER
) raster
= {
214 GENX(3DSTATE_RASTER_header
),
215 .GlobalDepthOffsetConstant
= cmd_buffer
->state
.dynamic
.depth_bias
.bias
,
216 .GlobalDepthOffsetScale
= cmd_buffer
->state
.dynamic
.depth_bias
.slope
,
217 .GlobalDepthOffsetClamp
= cmd_buffer
->state
.dynamic
.depth_bias
.clamp
219 GENX(3DSTATE_RASTER_pack
)(NULL
, raster_dw
, &raster
);
220 anv_batch_emit_merge(&cmd_buffer
->batch
, raster_dw
,
221 pipeline
->gen8
.raster
);
224 /* Stencil reference values moved from COLOR_CALC_STATE in gen8 to
225 * 3DSTATE_WM_DEPTH_STENCIL in gen9. That means the dirty bits gets split
226 * across different state packets for gen8 and gen9. We handle that by
227 * using a big old #if switch here.
230 if (cmd_buffer
->state
.dirty
& (ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS
|
231 ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE
)) {
232 struct anv_dynamic_state
*d
= &cmd_buffer
->state
.dynamic
;
233 struct anv_state cc_state
=
234 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
,
235 GENX(COLOR_CALC_STATE_length
) * 4,
237 struct GENX(COLOR_CALC_STATE
) cc
= {
238 .BlendConstantColorRed
= cmd_buffer
->state
.dynamic
.blend_constants
[0],
239 .BlendConstantColorGreen
= cmd_buffer
->state
.dynamic
.blend_constants
[1],
240 .BlendConstantColorBlue
= cmd_buffer
->state
.dynamic
.blend_constants
[2],
241 .BlendConstantColorAlpha
= cmd_buffer
->state
.dynamic
.blend_constants
[3],
242 .StencilReferenceValue
= d
->stencil_reference
.front
& 0xff,
243 .BackFaceStencilReferenceValue
= d
->stencil_reference
.back
& 0xff,
245 GENX(COLOR_CALC_STATE_pack
)(NULL
, cc_state
.map
, &cc
);
247 if (!cmd_buffer
->device
->info
.has_llc
)
248 anv_state_clflush(cc_state
);
250 anv_batch_emit(&cmd_buffer
->batch
,
251 GENX(3DSTATE_CC_STATE_POINTERS
),
252 .ColorCalcStatePointer
= cc_state
.offset
,
253 .ColorCalcStatePointerValid
= true);
256 if (cmd_buffer
->state
.dirty
& (ANV_CMD_DIRTY_PIPELINE
|
257 ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK
|
258 ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK
)) {
259 uint32_t wm_depth_stencil_dw
[GENX(3DSTATE_WM_DEPTH_STENCIL_length
)];
260 struct anv_dynamic_state
*d
= &cmd_buffer
->state
.dynamic
;
262 struct GENX(3DSTATE_WM_DEPTH_STENCIL wm_depth_stencil
) = {
263 GENX(3DSTATE_WM_DEPTH_STENCIL_header
),
265 .StencilTestMask
= d
->stencil_compare_mask
.front
& 0xff,
266 .StencilWriteMask
= d
->stencil_write_mask
.front
& 0xff,
268 .BackfaceStencilTestMask
= d
->stencil_compare_mask
.back
& 0xff,
269 .BackfaceStencilWriteMask
= d
->stencil_write_mask
.back
& 0xff,
271 GENX(3DSTATE_WM_DEPTH_STENCIL_pack
)(NULL
, wm_depth_stencil_dw
,
274 anv_batch_emit_merge(&cmd_buffer
->batch
, wm_depth_stencil_dw
,
275 pipeline
->gen8
.wm_depth_stencil
);
278 if (cmd_buffer
->state
.dirty
& ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS
) {
279 struct anv_state cc_state
=
280 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
,
281 GEN9_COLOR_CALC_STATE_length
* 4,
283 struct GEN9_COLOR_CALC_STATE cc
= {
284 .BlendConstantColorRed
= cmd_buffer
->state
.dynamic
.blend_constants
[0],
285 .BlendConstantColorGreen
= cmd_buffer
->state
.dynamic
.blend_constants
[1],
286 .BlendConstantColorBlue
= cmd_buffer
->state
.dynamic
.blend_constants
[2],
287 .BlendConstantColorAlpha
= cmd_buffer
->state
.dynamic
.blend_constants
[3],
289 GEN9_COLOR_CALC_STATE_pack(NULL
, cc_state
.map
, &cc
);
291 if (!cmd_buffer
->device
->info
.has_llc
)
292 anv_state_clflush(cc_state
);
294 anv_batch_emit(&cmd_buffer
->batch
,
295 GEN9_3DSTATE_CC_STATE_POINTERS
,
296 .ColorCalcStatePointer
= cc_state
.offset
,
297 .ColorCalcStatePointerValid
= true);
300 if (cmd_buffer
->state
.dirty
& (ANV_CMD_DIRTY_PIPELINE
|
301 ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK
|
302 ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK
|
303 ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE
)) {
304 uint32_t dwords
[GEN9_3DSTATE_WM_DEPTH_STENCIL_length
];
305 struct anv_dynamic_state
*d
= &cmd_buffer
->state
.dynamic
;
306 struct GEN9_3DSTATE_WM_DEPTH_STENCIL wm_depth_stencil
= {
307 GEN9_3DSTATE_WM_DEPTH_STENCIL_header
,
309 .StencilTestMask
= d
->stencil_compare_mask
.front
& 0xff,
310 .StencilWriteMask
= d
->stencil_write_mask
.front
& 0xff,
312 .BackfaceStencilTestMask
= d
->stencil_compare_mask
.back
& 0xff,
313 .BackfaceStencilWriteMask
= d
->stencil_write_mask
.back
& 0xff,
315 .StencilReferenceValue
= d
->stencil_reference
.front
& 0xff,
316 .BackfaceStencilReferenceValue
= d
->stencil_reference
.back
& 0xff,
318 GEN9_3DSTATE_WM_DEPTH_STENCIL_pack(NULL
, dwords
, &wm_depth_stencil
);
320 anv_batch_emit_merge(&cmd_buffer
->batch
, dwords
,
321 pipeline
->gen9
.wm_depth_stencil
);
325 if (cmd_buffer
->state
.dirty
& (ANV_CMD_DIRTY_PIPELINE
|
326 ANV_CMD_DIRTY_INDEX_BUFFER
)) {
327 anv_batch_emit(&cmd_buffer
->batch
, GENX(3DSTATE_VF
),
328 .IndexedDrawCutIndexEnable
= pipeline
->primitive_restart
,
329 .CutIndex
= cmd_buffer
->state
.restart_index
,
333 cmd_buffer
->state
.dirty
= 0;
336 void genX(CmdBindIndexBuffer
)(
337 VkCommandBuffer commandBuffer
,
340 VkIndexType indexType
)
342 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
343 ANV_FROM_HANDLE(anv_buffer
, buffer
, _buffer
);
345 static const uint32_t vk_to_gen_index_type
[] = {
346 [VK_INDEX_TYPE_UINT16
] = INDEX_WORD
,
347 [VK_INDEX_TYPE_UINT32
] = INDEX_DWORD
,
350 static const uint32_t restart_index_for_type
[] = {
351 [VK_INDEX_TYPE_UINT16
] = UINT16_MAX
,
352 [VK_INDEX_TYPE_UINT32
] = UINT32_MAX
,
355 cmd_buffer
->state
.restart_index
= restart_index_for_type
[indexType
];
357 anv_batch_emit(&cmd_buffer
->batch
, GENX(3DSTATE_INDEX_BUFFER
),
358 .IndexFormat
= vk_to_gen_index_type
[indexType
],
359 .MemoryObjectControlState
= GENX(MOCS
),
360 .BufferStartingAddress
= { buffer
->bo
, buffer
->offset
+ offset
},
361 .BufferSize
= buffer
->size
- offset
);
363 cmd_buffer
->state
.dirty
|= ANV_CMD_DIRTY_INDEX_BUFFER
;
367 flush_compute_descriptor_set(struct anv_cmd_buffer
*cmd_buffer
)
369 struct anv_device
*device
= cmd_buffer
->device
;
370 struct anv_pipeline
*pipeline
= cmd_buffer
->state
.compute_pipeline
;
371 struct anv_state surfaces
= { 0, }, samplers
= { 0, };
374 result
= anv_cmd_buffer_emit_samplers(cmd_buffer
,
375 MESA_SHADER_COMPUTE
, &samplers
);
376 if (result
!= VK_SUCCESS
)
378 result
= anv_cmd_buffer_emit_binding_table(cmd_buffer
,
379 MESA_SHADER_COMPUTE
, &surfaces
);
380 if (result
!= VK_SUCCESS
)
383 struct anv_state push_state
= anv_cmd_buffer_cs_push_constants(cmd_buffer
);
385 const struct brw_cs_prog_data
*cs_prog_data
= get_cs_prog_data(pipeline
);
386 const struct brw_stage_prog_data
*prog_data
= &cs_prog_data
->base
;
388 unsigned local_id_dwords
= cs_prog_data
->local_invocation_id_regs
* 8;
389 unsigned push_constant_data_size
=
390 (prog_data
->nr_params
+ local_id_dwords
) * 4;
391 unsigned reg_aligned_constant_size
= ALIGN(push_constant_data_size
, 32);
392 unsigned push_constant_regs
= reg_aligned_constant_size
/ 32;
394 if (push_state
.alloc_size
) {
395 anv_batch_emit(&cmd_buffer
->batch
, GENX(MEDIA_CURBE_LOAD
),
396 .CURBETotalDataLength
= push_state
.alloc_size
,
397 .CURBEDataStartAddress
= push_state
.offset
);
400 assert(prog_data
->total_shared
<= 64 * 1024);
401 uint32_t slm_size
= 0;
402 if (prog_data
->total_shared
> 0) {
403 /* slm_size is in 4k increments, but must be a power of 2. */
405 while (slm_size
< prog_data
->total_shared
)
407 slm_size
/= 4 * 1024;
410 struct anv_state state
=
411 anv_state_pool_emit(&device
->dynamic_state_pool
,
412 GENX(INTERFACE_DESCRIPTOR_DATA
), 64,
413 .KernelStartPointer
= pipeline
->cs_simd
,
414 .KernelStartPointerHigh
= 0,
415 .BindingTablePointer
= surfaces
.offset
,
416 .BindingTableEntryCount
= 0,
417 .SamplerStatePointer
= samplers
.offset
,
419 .ConstantIndirectURBEntryReadLength
= push_constant_regs
,
420 .ConstantURBEntryReadOffset
= 0,
421 .BarrierEnable
= cs_prog_data
->uses_barrier
,
422 .SharedLocalMemorySize
= slm_size
,
423 .NumberofThreadsinGPGPUThreadGroup
=
424 pipeline
->cs_thread_width_max
);
426 uint32_t size
= GENX(INTERFACE_DESCRIPTOR_DATA_length
) * sizeof(uint32_t);
427 anv_batch_emit(&cmd_buffer
->batch
, GENX(MEDIA_INTERFACE_DESCRIPTOR_LOAD
),
428 .InterfaceDescriptorTotalLength
= size
,
429 .InterfaceDescriptorDataStartAddress
= state
.offset
);
435 genX(cmd_buffer_flush_compute_state
)(struct anv_cmd_buffer
*cmd_buffer
)
437 struct anv_pipeline
*pipeline
= cmd_buffer
->state
.compute_pipeline
;
438 const struct brw_cs_prog_data
*cs_prog_data
= get_cs_prog_data(pipeline
);
441 assert(pipeline
->active_stages
== VK_SHADER_STAGE_COMPUTE_BIT
);
443 bool needs_slm
= cs_prog_data
->base
.total_shared
> 0;
444 genX(cmd_buffer_config_l3
)(cmd_buffer
, needs_slm
);
446 genX(flush_pipeline_select_gpgpu
)(cmd_buffer
);
448 if (cmd_buffer
->state
.compute_dirty
& ANV_CMD_DIRTY_PIPELINE
)
449 anv_batch_emit_batch(&cmd_buffer
->batch
, &pipeline
->batch
);
451 if ((cmd_buffer
->state
.descriptors_dirty
& VK_SHADER_STAGE_COMPUTE_BIT
) ||
452 (cmd_buffer
->state
.compute_dirty
& ANV_CMD_DIRTY_PIPELINE
)) {
453 result
= flush_compute_descriptor_set(cmd_buffer
);
454 assert(result
== VK_SUCCESS
);
455 cmd_buffer
->state
.descriptors_dirty
&= ~VK_SHADER_STAGE_COMPUTE_BIT
;
458 cmd_buffer
->state
.compute_dirty
= 0;
461 void genX(CmdSetEvent
)(
462 VkCommandBuffer commandBuffer
,
464 VkPipelineStageFlags stageMask
)
466 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
467 ANV_FROM_HANDLE(anv_event
, event
, _event
);
469 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
),
470 .DestinationAddressType
= DAT_PPGTT
,
471 .PostSyncOperation
= WriteImmediateData
,
473 &cmd_buffer
->device
->dynamic_state_block_pool
.bo
,
476 .ImmediateData
= VK_EVENT_SET
);
479 void genX(CmdResetEvent
)(
480 VkCommandBuffer commandBuffer
,
482 VkPipelineStageFlags stageMask
)
484 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
485 ANV_FROM_HANDLE(anv_event
, event
, _event
);
487 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
),
488 .DestinationAddressType
= DAT_PPGTT
,
489 .PostSyncOperation
= WriteImmediateData
,
491 &cmd_buffer
->device
->dynamic_state_block_pool
.bo
,
494 .ImmediateData
= VK_EVENT_RESET
);
497 void genX(CmdWaitEvents
)(
498 VkCommandBuffer commandBuffer
,
500 const VkEvent
* pEvents
,
501 VkPipelineStageFlags srcStageMask
,
502 VkPipelineStageFlags destStageMask
,
503 uint32_t memoryBarrierCount
,
504 const VkMemoryBarrier
* pMemoryBarriers
,
505 uint32_t bufferMemoryBarrierCount
,
506 const VkBufferMemoryBarrier
* pBufferMemoryBarriers
,
507 uint32_t imageMemoryBarrierCount
,
508 const VkImageMemoryBarrier
* pImageMemoryBarriers
)
510 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
511 for (uint32_t i
= 0; i
< eventCount
; i
++) {
512 ANV_FROM_HANDLE(anv_event
, event
, pEvents
[i
]);
514 anv_batch_emit(&cmd_buffer
->batch
, GENX(MI_SEMAPHORE_WAIT
),
515 .WaitMode
= PollingMode
,
516 .CompareOperation
= COMPARE_SAD_EQUAL_SDD
,
517 .SemaphoreDataDword
= VK_EVENT_SET
,
518 .SemaphoreAddress
= {
519 &cmd_buffer
->device
->dynamic_state_block_pool
.bo
,
524 genX(CmdPipelineBarrier
)(commandBuffer
, srcStageMask
, destStageMask
,
525 false, /* byRegion */
526 memoryBarrierCount
, pMemoryBarriers
,
527 bufferMemoryBarrierCount
, pBufferMemoryBarriers
,
528 imageMemoryBarrierCount
, pImageMemoryBarriers
);