2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 #include "anv_private.h"
32 #include "genxml/gen_macros.h"
33 #include "genxml/genX_pack.h"
37 gen8_cmd_buffer_emit_viewport(struct anv_cmd_buffer
*cmd_buffer
)
39 uint32_t count
= cmd_buffer
->state
.dynamic
.viewport
.count
;
40 const VkViewport
*viewports
= cmd_buffer
->state
.dynamic
.viewport
.viewports
;
41 struct anv_state sf_clip_state
=
42 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
, count
* 64, 64);
43 struct anv_state cc_state
=
44 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
, count
* 8, 32);
46 for (uint32_t i
= 0; i
< count
; i
++) {
47 const VkViewport
*vp
= &viewports
[i
];
49 /* The gen7 state struct has just the matrix and guardband fields, the
50 * gen8 struct adds the min/max viewport fields. */
51 struct GENX(SF_CLIP_VIEWPORT
) sf_clip_viewport
= {
52 .ViewportMatrixElementm00
= vp
->width
/ 2,
53 .ViewportMatrixElementm11
= vp
->height
/ 2,
54 .ViewportMatrixElementm22
= 1.0,
55 .ViewportMatrixElementm30
= vp
->x
+ vp
->width
/ 2,
56 .ViewportMatrixElementm31
= vp
->y
+ vp
->height
/ 2,
57 .ViewportMatrixElementm32
= 0.0,
58 .XMinClipGuardband
= -1.0f
,
59 .XMaxClipGuardband
= 1.0f
,
60 .YMinClipGuardband
= -1.0f
,
61 .YMaxClipGuardband
= 1.0f
,
62 .XMinViewPort
= vp
->x
,
63 .XMaxViewPort
= vp
->x
+ vp
->width
- 1,
64 .YMinViewPort
= vp
->y
,
65 .YMaxViewPort
= vp
->y
+ vp
->height
- 1,
68 struct GENX(CC_VIEWPORT
) cc_viewport
= {
69 .MinimumDepth
= vp
->minDepth
,
70 .MaximumDepth
= vp
->maxDepth
73 GENX(SF_CLIP_VIEWPORT_pack
)(NULL
, sf_clip_state
.map
+ i
* 64,
75 GENX(CC_VIEWPORT_pack
)(NULL
, cc_state
.map
+ i
* 8, &cc_viewport
);
78 if (!cmd_buffer
->device
->info
.has_llc
) {
79 anv_state_clflush(sf_clip_state
);
80 anv_state_clflush(cc_state
);
83 anv_batch_emit_blk(&cmd_buffer
->batch
,
84 GENX(3DSTATE_VIEWPORT_STATE_POINTERS_CC
), cc
) {
85 cc
.CCViewportPointer
= cc_state
.offset
;
87 anv_batch_emit_blk(&cmd_buffer
->batch
,
88 GENX(3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP
), clip
) {
89 clip
.SFClipViewportPointer
= sf_clip_state
.offset
;
95 genX(cmd_buffer_config_l3
)(struct anv_cmd_buffer
*cmd_buffer
, bool enable_slm
)
97 /* References for GL state:
99 * - commits e307cfa..228d5a3
100 * - src/mesa/drivers/dri/i965/gen7_l3_state.c
103 uint32_t l3cr_slm
, l3cr_noslm
;
104 anv_pack_struct(&l3cr_noslm
, GENX(L3CNTLREG
),
106 .AllAllocation
= 48);
107 anv_pack_struct(&l3cr_slm
, GENX(L3CNTLREG
),
110 .AllAllocation
= 48);
111 const uint32_t l3cr_val
= enable_slm
? l3cr_slm
: l3cr_noslm
;
112 bool changed
= cmd_buffer
->state
.current_l3_config
!= l3cr_val
;
115 /* According to the hardware docs, the L3 partitioning can only be
116 * changed while the pipeline is completely drained and the caches are
117 * flushed, which involves a first PIPE_CONTROL flush which stalls the
120 anv_batch_emit_blk(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
), pc
) {
121 pc
.DCFlushEnable
= true;
122 pc
.PostSyncOperation
= NoWrite
;
123 pc
.CommandStreamerStallEnable
= true;
126 /* ...followed by a second pipelined PIPE_CONTROL that initiates
127 * invalidation of the relevant caches. Note that because RO
128 * invalidation happens at the top of the pipeline (i.e. right away as
129 * the PIPE_CONTROL command is processed by the CS) we cannot combine it
130 * with the previous stalling flush as the hardware documentation
131 * suggests, because that would cause the CS to stall on previous
132 * rendering *after* RO invalidation and wouldn't prevent the RO caches
133 * from being polluted by concurrent rendering before the stall
134 * completes. This intentionally doesn't implement the SKL+ hardware
135 * workaround suggesting to enable CS stall on PIPE_CONTROLs with the
136 * texture cache invalidation bit set for GPGPU workloads because the
137 * previous and subsequent PIPE_CONTROLs already guarantee that there is
138 * no concurrent GPGPU kernel execution (see SKL HSD 2132585).
140 anv_batch_emit_blk(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
), pc
) {
141 pc
.TextureCacheInvalidationEnable
= true,
142 pc
.ConstantCacheInvalidationEnable
= true,
143 pc
.InstructionCacheInvalidateEnable
= true,
144 pc
.StateCacheInvalidationEnable
= true,
145 pc
.PostSyncOperation
= NoWrite
;
148 /* Now send a third stalling flush to make sure that invalidation is
149 * complete when the L3 configuration registers are modified.
151 anv_batch_emit_blk(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
), pc
) {
152 pc
.DCFlushEnable
= true;
153 pc
.PostSyncOperation
= NoWrite
;
154 pc
.CommandStreamerStallEnable
= true;
157 anv_batch_emit_blk(&cmd_buffer
->batch
, GENX(MI_LOAD_REGISTER_IMM
), lri
) {
158 lri
.RegisterOffset
= GENX(L3CNTLREG_num
);
159 lri
.DataDWord
= l3cr_val
;
161 cmd_buffer
->state
.current_l3_config
= l3cr_val
;
166 __emit_genx_sf_state(struct anv_cmd_buffer
*cmd_buffer
)
168 uint32_t sf_dw
[GENX(3DSTATE_SF_length
)];
169 struct GENX(3DSTATE_SF
) sf
= {
170 GENX(3DSTATE_SF_header
),
171 .LineWidth
= cmd_buffer
->state
.dynamic
.line_width
,
173 GENX(3DSTATE_SF_pack
)(NULL
, sf_dw
, &sf
);
175 anv_batch_emit_merge(&cmd_buffer
->batch
, sf_dw
,
176 cmd_buffer
->state
.pipeline
->gen8
.sf
);
179 #include "genxml/gen9_pack.h"
181 __emit_gen9_sf_state(struct anv_cmd_buffer
*cmd_buffer
)
183 uint32_t sf_dw
[GENX(3DSTATE_SF_length
)];
184 struct GEN9_3DSTATE_SF sf
= {
185 GEN9_3DSTATE_SF_header
,
186 .LineWidth
= cmd_buffer
->state
.dynamic
.line_width
,
188 GEN9_3DSTATE_SF_pack(NULL
, sf_dw
, &sf
);
190 anv_batch_emit_merge(&cmd_buffer
->batch
, sf_dw
,
191 cmd_buffer
->state
.pipeline
->gen8
.sf
);
195 __emit_sf_state(struct anv_cmd_buffer
*cmd_buffer
)
197 if (cmd_buffer
->device
->info
.is_cherryview
)
198 __emit_gen9_sf_state(cmd_buffer
);
200 __emit_genx_sf_state(cmd_buffer
);
204 genX(cmd_buffer_flush_dynamic_state
)(struct anv_cmd_buffer
*cmd_buffer
)
206 struct anv_pipeline
*pipeline
= cmd_buffer
->state
.pipeline
;
208 if (cmd_buffer
->state
.dirty
& (ANV_CMD_DIRTY_PIPELINE
|
209 ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH
)) {
210 __emit_sf_state(cmd_buffer
);
213 if (cmd_buffer
->state
.dirty
& (ANV_CMD_DIRTY_PIPELINE
|
214 ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS
)){
215 uint32_t raster_dw
[GENX(3DSTATE_RASTER_length
)];
216 struct GENX(3DSTATE_RASTER
) raster
= {
217 GENX(3DSTATE_RASTER_header
),
218 .GlobalDepthOffsetConstant
= cmd_buffer
->state
.dynamic
.depth_bias
.bias
,
219 .GlobalDepthOffsetScale
= cmd_buffer
->state
.dynamic
.depth_bias
.slope
,
220 .GlobalDepthOffsetClamp
= cmd_buffer
->state
.dynamic
.depth_bias
.clamp
222 GENX(3DSTATE_RASTER_pack
)(NULL
, raster_dw
, &raster
);
223 anv_batch_emit_merge(&cmd_buffer
->batch
, raster_dw
,
224 pipeline
->gen8
.raster
);
227 /* Stencil reference values moved from COLOR_CALC_STATE in gen8 to
228 * 3DSTATE_WM_DEPTH_STENCIL in gen9. That means the dirty bits gets split
229 * across different state packets for gen8 and gen9. We handle that by
230 * using a big old #if switch here.
233 if (cmd_buffer
->state
.dirty
& (ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS
|
234 ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE
)) {
235 struct anv_dynamic_state
*d
= &cmd_buffer
->state
.dynamic
;
236 struct anv_state cc_state
=
237 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
,
238 GENX(COLOR_CALC_STATE_length
) * 4,
240 struct GENX(COLOR_CALC_STATE
) cc
= {
241 .BlendConstantColorRed
= cmd_buffer
->state
.dynamic
.blend_constants
[0],
242 .BlendConstantColorGreen
= cmd_buffer
->state
.dynamic
.blend_constants
[1],
243 .BlendConstantColorBlue
= cmd_buffer
->state
.dynamic
.blend_constants
[2],
244 .BlendConstantColorAlpha
= cmd_buffer
->state
.dynamic
.blend_constants
[3],
245 .StencilReferenceValue
= d
->stencil_reference
.front
& 0xff,
246 .BackFaceStencilReferenceValue
= d
->stencil_reference
.back
& 0xff,
248 GENX(COLOR_CALC_STATE_pack
)(NULL
, cc_state
.map
, &cc
);
250 if (!cmd_buffer
->device
->info
.has_llc
)
251 anv_state_clflush(cc_state
);
253 anv_batch_emit_blk(&cmd_buffer
->batch
,
254 GENX(3DSTATE_CC_STATE_POINTERS
), ccp
) {
255 ccp
.ColorCalcStatePointer
= cc_state
.offset
;
256 ccp
.ColorCalcStatePointerValid
= true;
260 if (cmd_buffer
->state
.dirty
& (ANV_CMD_DIRTY_PIPELINE
|
261 ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK
|
262 ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK
)) {
263 uint32_t wm_depth_stencil_dw
[GENX(3DSTATE_WM_DEPTH_STENCIL_length
)];
264 struct anv_dynamic_state
*d
= &cmd_buffer
->state
.dynamic
;
266 struct GENX(3DSTATE_WM_DEPTH_STENCIL wm_depth_stencil
) = {
267 GENX(3DSTATE_WM_DEPTH_STENCIL_header
),
269 .StencilTestMask
= d
->stencil_compare_mask
.front
& 0xff,
270 .StencilWriteMask
= d
->stencil_write_mask
.front
& 0xff,
272 .BackfaceStencilTestMask
= d
->stencil_compare_mask
.back
& 0xff,
273 .BackfaceStencilWriteMask
= d
->stencil_write_mask
.back
& 0xff,
275 GENX(3DSTATE_WM_DEPTH_STENCIL_pack
)(NULL
, wm_depth_stencil_dw
,
278 anv_batch_emit_merge(&cmd_buffer
->batch
, wm_depth_stencil_dw
,
279 pipeline
->gen8
.wm_depth_stencil
);
282 if (cmd_buffer
->state
.dirty
& ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS
) {
283 struct anv_state cc_state
=
284 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
,
285 GEN9_COLOR_CALC_STATE_length
* 4,
287 struct GEN9_COLOR_CALC_STATE cc
= {
288 .BlendConstantColorRed
= cmd_buffer
->state
.dynamic
.blend_constants
[0],
289 .BlendConstantColorGreen
= cmd_buffer
->state
.dynamic
.blend_constants
[1],
290 .BlendConstantColorBlue
= cmd_buffer
->state
.dynamic
.blend_constants
[2],
291 .BlendConstantColorAlpha
= cmd_buffer
->state
.dynamic
.blend_constants
[3],
293 GEN9_COLOR_CALC_STATE_pack(NULL
, cc_state
.map
, &cc
);
295 if (!cmd_buffer
->device
->info
.has_llc
)
296 anv_state_clflush(cc_state
);
298 anv_batch_emit_blk(&cmd_buffer
->batch
,
299 GEN9_3DSTATE_CC_STATE_POINTERS
, ccp
) {
300 ccp
.ColorCalcStatePointer
= cc_state
.offset
;
301 ccp
.ColorCalcStatePointerValid
= true;
305 if (cmd_buffer
->state
.dirty
& (ANV_CMD_DIRTY_PIPELINE
|
306 ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK
|
307 ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK
|
308 ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE
)) {
309 uint32_t dwords
[GEN9_3DSTATE_WM_DEPTH_STENCIL_length
];
310 struct anv_dynamic_state
*d
= &cmd_buffer
->state
.dynamic
;
311 struct GEN9_3DSTATE_WM_DEPTH_STENCIL wm_depth_stencil
= {
312 GEN9_3DSTATE_WM_DEPTH_STENCIL_header
,
314 .StencilTestMask
= d
->stencil_compare_mask
.front
& 0xff,
315 .StencilWriteMask
= d
->stencil_write_mask
.front
& 0xff,
317 .BackfaceStencilTestMask
= d
->stencil_compare_mask
.back
& 0xff,
318 .BackfaceStencilWriteMask
= d
->stencil_write_mask
.back
& 0xff,
320 .StencilReferenceValue
= d
->stencil_reference
.front
& 0xff,
321 .BackfaceStencilReferenceValue
= d
->stencil_reference
.back
& 0xff,
323 GEN9_3DSTATE_WM_DEPTH_STENCIL_pack(NULL
, dwords
, &wm_depth_stencil
);
325 anv_batch_emit_merge(&cmd_buffer
->batch
, dwords
,
326 pipeline
->gen9
.wm_depth_stencil
);
330 if (cmd_buffer
->state
.dirty
& (ANV_CMD_DIRTY_PIPELINE
|
331 ANV_CMD_DIRTY_INDEX_BUFFER
)) {
332 anv_batch_emit_blk(&cmd_buffer
->batch
, GENX(3DSTATE_VF
), vf
) {
333 vf
.IndexedDrawCutIndexEnable
= pipeline
->primitive_restart
;
334 vf
.CutIndex
= cmd_buffer
->state
.restart_index
;
338 cmd_buffer
->state
.dirty
= 0;
341 void genX(CmdBindIndexBuffer
)(
342 VkCommandBuffer commandBuffer
,
345 VkIndexType indexType
)
347 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
348 ANV_FROM_HANDLE(anv_buffer
, buffer
, _buffer
);
350 static const uint32_t vk_to_gen_index_type
[] = {
351 [VK_INDEX_TYPE_UINT16
] = INDEX_WORD
,
352 [VK_INDEX_TYPE_UINT32
] = INDEX_DWORD
,
355 static const uint32_t restart_index_for_type
[] = {
356 [VK_INDEX_TYPE_UINT16
] = UINT16_MAX
,
357 [VK_INDEX_TYPE_UINT32
] = UINT32_MAX
,
360 cmd_buffer
->state
.restart_index
= restart_index_for_type
[indexType
];
362 anv_batch_emit_blk(&cmd_buffer
->batch
, GENX(3DSTATE_INDEX_BUFFER
), ib
) {
363 ib
.IndexFormat
= vk_to_gen_index_type
[indexType
];
364 ib
.MemoryObjectControlState
= GENX(MOCS
);
365 ib
.BufferStartingAddress
=
366 (struct anv_address
) { buffer
->bo
, buffer
->offset
+ offset
};
367 ib
.BufferSize
= buffer
->size
- offset
;
370 cmd_buffer
->state
.dirty
|= ANV_CMD_DIRTY_INDEX_BUFFER
;
374 flush_compute_descriptor_set(struct anv_cmd_buffer
*cmd_buffer
)
376 struct anv_device
*device
= cmd_buffer
->device
;
377 struct anv_pipeline
*pipeline
= cmd_buffer
->state
.compute_pipeline
;
378 struct anv_state surfaces
= { 0, }, samplers
= { 0, };
381 result
= anv_cmd_buffer_emit_samplers(cmd_buffer
,
382 MESA_SHADER_COMPUTE
, &samplers
);
383 if (result
!= VK_SUCCESS
)
385 result
= anv_cmd_buffer_emit_binding_table(cmd_buffer
,
386 MESA_SHADER_COMPUTE
, &surfaces
);
387 if (result
!= VK_SUCCESS
)
390 struct anv_state push_state
= anv_cmd_buffer_cs_push_constants(cmd_buffer
);
392 const struct brw_cs_prog_data
*cs_prog_data
= get_cs_prog_data(pipeline
);
393 const struct brw_stage_prog_data
*prog_data
= &cs_prog_data
->base
;
395 unsigned local_id_dwords
= cs_prog_data
->local_invocation_id_regs
* 8;
396 unsigned push_constant_data_size
=
397 (prog_data
->nr_params
+ local_id_dwords
) * 4;
398 unsigned reg_aligned_constant_size
= ALIGN(push_constant_data_size
, 32);
399 unsigned push_constant_regs
= reg_aligned_constant_size
/ 32;
401 if (push_state
.alloc_size
) {
402 anv_batch_emit_blk(&cmd_buffer
->batch
, GENX(MEDIA_CURBE_LOAD
), curbe
) {
403 curbe
.CURBETotalDataLength
= push_state
.alloc_size
;
404 curbe
.CURBEDataStartAddress
= push_state
.offset
;
408 assert(prog_data
->total_shared
<= 64 * 1024);
409 uint32_t slm_size
= 0;
410 if (prog_data
->total_shared
> 0) {
411 /* slm_size is in 4k increments, but must be a power of 2. */
413 while (slm_size
< prog_data
->total_shared
)
415 slm_size
/= 4 * 1024;
418 struct anv_state state
=
419 anv_state_pool_emit(&device
->dynamic_state_pool
,
420 GENX(INTERFACE_DESCRIPTOR_DATA
), 64,
421 .KernelStartPointer
= pipeline
->cs_simd
,
422 .KernelStartPointerHigh
= 0,
423 .BindingTablePointer
= surfaces
.offset
,
424 .BindingTableEntryCount
= 0,
425 .SamplerStatePointer
= samplers
.offset
,
427 .ConstantIndirectURBEntryReadLength
= push_constant_regs
,
428 .ConstantURBEntryReadOffset
= 0,
429 .BarrierEnable
= cs_prog_data
->uses_barrier
,
430 .SharedLocalMemorySize
= slm_size
,
431 .NumberofThreadsinGPGPUThreadGroup
=
432 pipeline
->cs_thread_width_max
);
434 uint32_t size
= GENX(INTERFACE_DESCRIPTOR_DATA_length
) * sizeof(uint32_t);
435 anv_batch_emit_blk(&cmd_buffer
->batch
,
436 GENX(MEDIA_INTERFACE_DESCRIPTOR_LOAD
), mid
) {
437 mid
.InterfaceDescriptorTotalLength
= size
;
438 mid
.InterfaceDescriptorDataStartAddress
= state
.offset
;
445 genX(cmd_buffer_flush_compute_state
)(struct anv_cmd_buffer
*cmd_buffer
)
447 struct anv_pipeline
*pipeline
= cmd_buffer
->state
.compute_pipeline
;
448 const struct brw_cs_prog_data
*cs_prog_data
= get_cs_prog_data(pipeline
);
451 assert(pipeline
->active_stages
== VK_SHADER_STAGE_COMPUTE_BIT
);
453 bool needs_slm
= cs_prog_data
->base
.total_shared
> 0;
454 genX(cmd_buffer_config_l3
)(cmd_buffer
, needs_slm
);
456 genX(flush_pipeline_select_gpgpu
)(cmd_buffer
);
458 if (cmd_buffer
->state
.compute_dirty
& ANV_CMD_DIRTY_PIPELINE
)
459 anv_batch_emit_batch(&cmd_buffer
->batch
, &pipeline
->batch
);
461 if ((cmd_buffer
->state
.descriptors_dirty
& VK_SHADER_STAGE_COMPUTE_BIT
) ||
462 (cmd_buffer
->state
.compute_dirty
& ANV_CMD_DIRTY_PIPELINE
)) {
463 result
= flush_compute_descriptor_set(cmd_buffer
);
464 assert(result
== VK_SUCCESS
);
465 cmd_buffer
->state
.descriptors_dirty
&= ~VK_SHADER_STAGE_COMPUTE_BIT
;
468 cmd_buffer
->state
.compute_dirty
= 0;
471 void genX(CmdSetEvent
)(
472 VkCommandBuffer commandBuffer
,
474 VkPipelineStageFlags stageMask
)
476 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
477 ANV_FROM_HANDLE(anv_event
, event
, _event
);
479 anv_batch_emit_blk(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
), pc
) {
480 pc
.DestinationAddressType
= DAT_PPGTT
,
481 pc
.PostSyncOperation
= WriteImmediateData
,
482 pc
.Address
= (struct anv_address
) {
483 &cmd_buffer
->device
->dynamic_state_block_pool
.bo
,
486 pc
.ImmediateData
= VK_EVENT_SET
;
490 void genX(CmdResetEvent
)(
491 VkCommandBuffer commandBuffer
,
493 VkPipelineStageFlags stageMask
)
495 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
496 ANV_FROM_HANDLE(anv_event
, event
, _event
);
498 anv_batch_emit_blk(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
), pc
) {
499 pc
.DestinationAddressType
= DAT_PPGTT
;
500 pc
.PostSyncOperation
= WriteImmediateData
;
501 pc
.Address
= (struct anv_address
) {
502 &cmd_buffer
->device
->dynamic_state_block_pool
.bo
,
505 pc
.ImmediateData
= VK_EVENT_RESET
;
509 void genX(CmdWaitEvents
)(
510 VkCommandBuffer commandBuffer
,
512 const VkEvent
* pEvents
,
513 VkPipelineStageFlags srcStageMask
,
514 VkPipelineStageFlags destStageMask
,
515 uint32_t memoryBarrierCount
,
516 const VkMemoryBarrier
* pMemoryBarriers
,
517 uint32_t bufferMemoryBarrierCount
,
518 const VkBufferMemoryBarrier
* pBufferMemoryBarriers
,
519 uint32_t imageMemoryBarrierCount
,
520 const VkImageMemoryBarrier
* pImageMemoryBarriers
)
522 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
523 for (uint32_t i
= 0; i
< eventCount
; i
++) {
524 ANV_FROM_HANDLE(anv_event
, event
, pEvents
[i
]);
526 anv_batch_emit_blk(&cmd_buffer
->batch
, GENX(MI_SEMAPHORE_WAIT
), sem
) {
527 sem
.WaitMode
= PollingMode
,
528 sem
.CompareOperation
= COMPARE_SAD_EQUAL_SDD
,
529 sem
.SemaphoreDataDword
= VK_EVENT_SET
,
530 sem
.SemaphoreAddress
= (struct anv_address
) {
531 &cmd_buffer
->device
->dynamic_state_block_pool
.bo
,
537 genX(CmdPipelineBarrier
)(commandBuffer
, srcStageMask
, destStageMask
,
538 false, /* byRegion */
539 memoryBarrierCount
, pMemoryBarriers
,
540 bufferMemoryBarrierCount
, pBufferMemoryBarriers
,
541 imageMemoryBarrierCount
, pImageMemoryBarriers
);