2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 #include "anv_private.h"
32 #include "genxml/gen_macros.h"
33 #include "genxml/genX_pack.h"
36 cmd_buffer_flush_push_constants(struct anv_cmd_buffer
*cmd_buffer
)
38 static const uint32_t push_constant_opcodes
[] = {
39 [MESA_SHADER_VERTEX
] = 21,
40 [MESA_SHADER_TESS_CTRL
] = 25, /* HS */
41 [MESA_SHADER_TESS_EVAL
] = 26, /* DS */
42 [MESA_SHADER_GEOMETRY
] = 22,
43 [MESA_SHADER_FRAGMENT
] = 23,
44 [MESA_SHADER_COMPUTE
] = 0,
47 VkShaderStageFlags flushed
= 0;
49 anv_foreach_stage(stage
, cmd_buffer
->state
.push_constants_dirty
) {
50 if (stage
== MESA_SHADER_COMPUTE
)
53 struct anv_state state
= anv_cmd_buffer_push_constants(cmd_buffer
, stage
);
55 if (state
.offset
== 0) {
56 anv_batch_emit(&cmd_buffer
->batch
, GENX(3DSTATE_CONSTANT_VS
),
57 ._3DCommandSubOpcode
= push_constant_opcodes
[stage
]);
59 anv_batch_emit(&cmd_buffer
->batch
, GENX(3DSTATE_CONSTANT_VS
),
60 ._3DCommandSubOpcode
= push_constant_opcodes
[stage
],
62 .PointerToConstantBuffer2
= { &cmd_buffer
->device
->dynamic_state_block_pool
.bo
, state
.offset
},
63 .ConstantBuffer2ReadLength
= DIV_ROUND_UP(state
.alloc_size
, 32),
67 flushed
|= mesa_to_vk_shader_stage(stage
);
70 cmd_buffer
->state
.push_constants_dirty
&= ~flushed
;
77 emit_viewport_state(struct anv_cmd_buffer
*cmd_buffer
,
78 uint32_t count
, const VkViewport
*viewports
)
80 struct anv_state sf_clip_state
=
81 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
, count
* 64, 64);
82 struct anv_state cc_state
=
83 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
, count
* 8, 32);
85 for (uint32_t i
= 0; i
< count
; i
++) {
86 const VkViewport
*vp
= &viewports
[i
];
88 /* The gen7 state struct has just the matrix and guardband fields, the
89 * gen8 struct adds the min/max viewport fields. */
90 struct GENX(SF_CLIP_VIEWPORT
) sf_clip_viewport
= {
91 .ViewportMatrixElementm00
= vp
->width
/ 2,
92 .ViewportMatrixElementm11
= vp
->height
/ 2,
93 .ViewportMatrixElementm22
= 1.0,
94 .ViewportMatrixElementm30
= vp
->x
+ vp
->width
/ 2,
95 .ViewportMatrixElementm31
= vp
->y
+ vp
->height
/ 2,
96 .ViewportMatrixElementm32
= 0.0,
97 .XMinClipGuardband
= -1.0f
,
98 .XMaxClipGuardband
= 1.0f
,
99 .YMinClipGuardband
= -1.0f
,
100 .YMaxClipGuardband
= 1.0f
,
101 .XMinViewPort
= vp
->x
,
102 .XMaxViewPort
= vp
->x
+ vp
->width
- 1,
103 .YMinViewPort
= vp
->y
,
104 .YMaxViewPort
= vp
->y
+ vp
->height
- 1,
107 struct GENX(CC_VIEWPORT
) cc_viewport
= {
108 .MinimumDepth
= vp
->minDepth
,
109 .MaximumDepth
= vp
->maxDepth
112 GENX(SF_CLIP_VIEWPORT_pack
)(NULL
, sf_clip_state
.map
+ i
* 64,
114 GENX(CC_VIEWPORT_pack
)(NULL
, cc_state
.map
+ i
* 8, &cc_viewport
);
117 if (!cmd_buffer
->device
->info
.has_llc
) {
118 anv_state_clflush(sf_clip_state
);
119 anv_state_clflush(cc_state
);
122 anv_batch_emit(&cmd_buffer
->batch
,
123 GENX(3DSTATE_VIEWPORT_STATE_POINTERS_CC
),
124 .CCViewportPointer
= cc_state
.offset
);
125 anv_batch_emit(&cmd_buffer
->batch
,
126 GENX(3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP
),
127 .SFClipViewportPointer
= sf_clip_state
.offset
);
131 gen8_cmd_buffer_emit_viewport(struct anv_cmd_buffer
*cmd_buffer
)
133 if (cmd_buffer
->state
.dynamic
.viewport
.count
> 0) {
134 emit_viewport_state(cmd_buffer
, cmd_buffer
->state
.dynamic
.viewport
.count
,
135 cmd_buffer
->state
.dynamic
.viewport
.viewports
);
137 /* If viewport count is 0, this is taken to mean "use the default" */
138 emit_viewport_state(cmd_buffer
, 1,
142 .width
= cmd_buffer
->state
.framebuffer
->width
,
143 .height
= cmd_buffer
->state
.framebuffer
->height
,
152 emit_lri(struct anv_batch
*batch
, uint32_t reg
, uint32_t imm
)
154 anv_batch_emit(batch
, GENX(MI_LOAD_REGISTER_IMM
),
155 .RegisterOffset
= reg
,
159 #define GEN8_L3CNTLREG 0x7034
162 config_l3(struct anv_cmd_buffer
*cmd_buffer
, bool enable_slm
)
164 /* References for GL state:
166 * - commits e307cfa..228d5a3
167 * - src/mesa/drivers/dri/i965/gen7_l3_state.c
170 uint32_t val
= enable_slm
?
171 /* All = 48 ways; URB = 16 ways; DC and RO = 0, SLM = 1 */
173 /* All = 48 ways; URB = 48 ways; DC, RO and SLM = 0 */
175 bool changed
= cmd_buffer
->state
.current_l3_config
!= val
;
178 /* According to the hardware docs, the L3 partitioning can only be changed
179 * while the pipeline is completely drained and the caches are flushed,
180 * which involves a first PIPE_CONTROL flush which stalls the pipeline and
181 * initiates invalidation of the relevant caches...
183 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
),
184 .TextureCacheInvalidationEnable
= true,
185 .ConstantCacheInvalidationEnable
= true,
186 .InstructionCacheInvalidateEnable
= true,
187 .DCFlushEnable
= true,
188 .PostSyncOperation
= NoWrite
,
189 .CommandStreamerStallEnable
= true);
191 /* ...followed by a second stalling flush which guarantees that
192 * invalidation is complete when the L3 configuration registers are
195 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
),
196 .DCFlushEnable
= true,
197 .PostSyncOperation
= NoWrite
,
198 .CommandStreamerStallEnable
= true);
200 emit_lri(&cmd_buffer
->batch
, GEN8_L3CNTLREG
, val
);
201 cmd_buffer
->state
.current_l3_config
= val
;
206 __emit_genx_sf_state(struct anv_cmd_buffer
*cmd_buffer
)
208 uint32_t sf_dw
[GENX(3DSTATE_SF_length
)];
209 struct GENX(3DSTATE_SF
) sf
= {
210 GENX(3DSTATE_SF_header
),
211 .LineWidth
= cmd_buffer
->state
.dynamic
.line_width
,
213 GENX(3DSTATE_SF_pack
)(NULL
, sf_dw
, &sf
);
215 anv_batch_emit_merge(&cmd_buffer
->batch
, sf_dw
,
216 cmd_buffer
->state
.pipeline
->gen8
.sf
);
219 #include "genxml/gen9_pack.h"
221 __emit_gen9_sf_state(struct anv_cmd_buffer
*cmd_buffer
)
223 uint32_t sf_dw
[GENX(3DSTATE_SF_length
)];
224 struct GEN9_3DSTATE_SF sf
= {
225 GEN9_3DSTATE_SF_header
,
226 .LineWidth
= cmd_buffer
->state
.dynamic
.line_width
,
228 GEN9_3DSTATE_SF_pack(NULL
, sf_dw
, &sf
);
230 anv_batch_emit_merge(&cmd_buffer
->batch
, sf_dw
,
231 cmd_buffer
->state
.pipeline
->gen8
.sf
);
235 __emit_sf_state(struct anv_cmd_buffer
*cmd_buffer
)
237 if (cmd_buffer
->device
->info
.is_cherryview
)
238 __emit_gen9_sf_state(cmd_buffer
);
240 __emit_genx_sf_state(cmd_buffer
);
244 genX(cmd_buffer_flush_state
)(struct anv_cmd_buffer
*cmd_buffer
)
246 struct anv_pipeline
*pipeline
= cmd_buffer
->state
.pipeline
;
249 uint32_t vb_emit
= cmd_buffer
->state
.vb_dirty
& pipeline
->vb_used
;
251 assert((pipeline
->active_stages
& VK_SHADER_STAGE_COMPUTE_BIT
) == 0);
253 config_l3(cmd_buffer
, false);
255 genX(flush_pipeline_select_3d
)(cmd_buffer
);
258 const uint32_t num_buffers
= __builtin_popcount(vb_emit
);
259 const uint32_t num_dwords
= 1 + num_buffers
* 4;
261 p
= anv_batch_emitn(&cmd_buffer
->batch
, num_dwords
,
262 GENX(3DSTATE_VERTEX_BUFFERS
));
264 for_each_bit(vb
, vb_emit
) {
265 struct anv_buffer
*buffer
= cmd_buffer
->state
.vertex_bindings
[vb
].buffer
;
266 uint32_t offset
= cmd_buffer
->state
.vertex_bindings
[vb
].offset
;
268 struct GENX(VERTEX_BUFFER_STATE
) state
= {
269 .VertexBufferIndex
= vb
,
270 .MemoryObjectControlState
= GENX(MOCS
),
271 .AddressModifyEnable
= true,
272 .BufferPitch
= pipeline
->binding_stride
[vb
],
273 .BufferStartingAddress
= { buffer
->bo
, buffer
->offset
+ offset
},
274 .BufferSize
= buffer
->size
- offset
277 GENX(VERTEX_BUFFER_STATE_pack
)(&cmd_buffer
->batch
, &p
[1 + i
* 4], &state
);
282 cmd_buffer
->state
.vb_dirty
&= ~vb_emit
;
284 if (cmd_buffer
->state
.dirty
& ANV_CMD_DIRTY_PIPELINE
) {
285 /* If somebody compiled a pipeline after starting a command buffer the
286 * scratch bo may have grown since we started this cmd buffer (and
287 * emitted STATE_BASE_ADDRESS). If we're binding that pipeline now,
288 * reemit STATE_BASE_ADDRESS so that we use the bigger scratch bo. */
289 if (cmd_buffer
->state
.scratch_size
< pipeline
->total_scratch
)
290 anv_cmd_buffer_emit_state_base_address(cmd_buffer
);
292 anv_batch_emit_batch(&cmd_buffer
->batch
, &pipeline
->batch
);
294 /* From the BDW PRM for 3DSTATE_PUSH_CONSTANT_ALLOC_VS:
296 * "The 3DSTATE_CONSTANT_VS must be reprogrammed prior to
297 * the next 3DPRIMITIVE command after programming the
298 * 3DSTATE_PUSH_CONSTANT_ALLOC_VS"
300 * Since 3DSTATE_PUSH_CONSTANT_ALLOC_VS is programmed as part of
301 * pipeline setup, we need to dirty push constants.
303 cmd_buffer
->state
.push_constants_dirty
|= VK_SHADER_STAGE_ALL_GRAPHICS
;
306 /* We emit the binding tables and sampler tables first, then emit push
307 * constants and then finally emit binding table and sampler table
308 * pointers. It has to happen in this order, since emitting the binding
309 * tables may change the push constants (in case of storage images). After
310 * emitting push constants, on SKL+ we have to emit the corresponding
311 * 3DSTATE_BINDING_TABLE_POINTER_* for the push constants to take effect.
314 if (cmd_buffer
->state
.descriptors_dirty
)
315 dirty
= gen7_cmd_buffer_flush_descriptor_sets(cmd_buffer
);
317 if (cmd_buffer
->state
.push_constants_dirty
)
318 dirty
|= cmd_buffer_flush_push_constants(cmd_buffer
);
321 gen7_cmd_buffer_emit_descriptor_pointers(cmd_buffer
, dirty
);
323 if (cmd_buffer
->state
.dirty
& ANV_CMD_DIRTY_DYNAMIC_VIEWPORT
)
324 gen8_cmd_buffer_emit_viewport(cmd_buffer
);
326 if (cmd_buffer
->state
.dirty
& ANV_CMD_DIRTY_DYNAMIC_SCISSOR
)
327 gen7_cmd_buffer_emit_scissor(cmd_buffer
);
329 genX(cmd_buffer_flush_dynamic_state
)(cmd_buffer
);
333 genX(cmd_buffer_flush_dynamic_state
)(struct anv_cmd_buffer
*cmd_buffer
)
335 struct anv_pipeline
*pipeline
= cmd_buffer
->state
.pipeline
;
337 if (cmd_buffer
->state
.dirty
& (ANV_CMD_DIRTY_PIPELINE
|
338 ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH
)) {
339 __emit_sf_state(cmd_buffer
);
342 if (cmd_buffer
->state
.dirty
& (ANV_CMD_DIRTY_PIPELINE
|
343 ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS
)){
344 uint32_t raster_dw
[GENX(3DSTATE_RASTER_length
)];
345 struct GENX(3DSTATE_RASTER
) raster
= {
346 GENX(3DSTATE_RASTER_header
),
347 .GlobalDepthOffsetConstant
= cmd_buffer
->state
.dynamic
.depth_bias
.bias
,
348 .GlobalDepthOffsetScale
= cmd_buffer
->state
.dynamic
.depth_bias
.slope
,
349 .GlobalDepthOffsetClamp
= cmd_buffer
->state
.dynamic
.depth_bias
.clamp
351 GENX(3DSTATE_RASTER_pack
)(NULL
, raster_dw
, &raster
);
352 anv_batch_emit_merge(&cmd_buffer
->batch
, raster_dw
,
353 pipeline
->gen8
.raster
);
356 /* Stencil reference values moved from COLOR_CALC_STATE in gen8 to
357 * 3DSTATE_WM_DEPTH_STENCIL in gen9. That means the dirty bits gets split
358 * across different state packets for gen8 and gen9. We handle that by
359 * using a big old #if switch here.
362 if (cmd_buffer
->state
.dirty
& (ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS
|
363 ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE
)) {
364 struct anv_dynamic_state
*d
= &cmd_buffer
->state
.dynamic
;
365 struct anv_state cc_state
=
366 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
,
367 GENX(COLOR_CALC_STATE_length
) * 4,
369 struct GENX(COLOR_CALC_STATE
) cc
= {
370 .BlendConstantColorRed
= cmd_buffer
->state
.dynamic
.blend_constants
[0],
371 .BlendConstantColorGreen
= cmd_buffer
->state
.dynamic
.blend_constants
[1],
372 .BlendConstantColorBlue
= cmd_buffer
->state
.dynamic
.blend_constants
[2],
373 .BlendConstantColorAlpha
= cmd_buffer
->state
.dynamic
.blend_constants
[3],
374 .StencilReferenceValue
= d
->stencil_reference
.front
& 0xff,
375 .BackFaceStencilReferenceValue
= d
->stencil_reference
.back
& 0xff,
377 GENX(COLOR_CALC_STATE_pack
)(NULL
, cc_state
.map
, &cc
);
379 if (!cmd_buffer
->device
->info
.has_llc
)
380 anv_state_clflush(cc_state
);
382 anv_batch_emit(&cmd_buffer
->batch
,
383 GENX(3DSTATE_CC_STATE_POINTERS
),
384 .ColorCalcStatePointer
= cc_state
.offset
,
385 .ColorCalcStatePointerValid
= true);
388 if (cmd_buffer
->state
.dirty
& (ANV_CMD_DIRTY_PIPELINE
|
389 ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK
|
390 ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK
)) {
391 uint32_t wm_depth_stencil_dw
[GENX(3DSTATE_WM_DEPTH_STENCIL_length
)];
392 struct anv_dynamic_state
*d
= &cmd_buffer
->state
.dynamic
;
394 struct GENX(3DSTATE_WM_DEPTH_STENCIL wm_depth_stencil
) = {
395 GENX(3DSTATE_WM_DEPTH_STENCIL_header
),
397 .StencilTestMask
= d
->stencil_compare_mask
.front
& 0xff,
398 .StencilWriteMask
= d
->stencil_write_mask
.front
& 0xff,
400 .BackfaceStencilTestMask
= d
->stencil_compare_mask
.back
& 0xff,
401 .BackfaceStencilWriteMask
= d
->stencil_write_mask
.back
& 0xff,
403 GENX(3DSTATE_WM_DEPTH_STENCIL_pack
)(NULL
, wm_depth_stencil_dw
,
406 anv_batch_emit_merge(&cmd_buffer
->batch
, wm_depth_stencil_dw
,
407 pipeline
->gen8
.wm_depth_stencil
);
410 if (cmd_buffer
->state
.dirty
& ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS
) {
411 struct anv_state cc_state
=
412 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
,
413 GEN9_COLOR_CALC_STATE_length
* 4,
415 struct GEN9_COLOR_CALC_STATE cc
= {
416 .BlendConstantColorRed
= cmd_buffer
->state
.dynamic
.blend_constants
[0],
417 .BlendConstantColorGreen
= cmd_buffer
->state
.dynamic
.blend_constants
[1],
418 .BlendConstantColorBlue
= cmd_buffer
->state
.dynamic
.blend_constants
[2],
419 .BlendConstantColorAlpha
= cmd_buffer
->state
.dynamic
.blend_constants
[3],
421 GEN9_COLOR_CALC_STATE_pack(NULL
, cc_state
.map
, &cc
);
423 if (!cmd_buffer
->device
->info
.has_llc
)
424 anv_state_clflush(cc_state
);
426 anv_batch_emit(&cmd_buffer
->batch
,
427 GEN9_3DSTATE_CC_STATE_POINTERS
,
428 .ColorCalcStatePointer
= cc_state
.offset
,
429 .ColorCalcStatePointerValid
= true);
432 if (cmd_buffer
->state
.dirty
& (ANV_CMD_DIRTY_PIPELINE
|
433 ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK
|
434 ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK
|
435 ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE
)) {
436 uint32_t dwords
[GEN9_3DSTATE_WM_DEPTH_STENCIL_length
];
437 struct anv_dynamic_state
*d
= &cmd_buffer
->state
.dynamic
;
438 struct GEN9_3DSTATE_WM_DEPTH_STENCIL wm_depth_stencil
= {
439 GEN9_3DSTATE_WM_DEPTH_STENCIL_header
,
441 .StencilTestMask
= d
->stencil_compare_mask
.front
& 0xff,
442 .StencilWriteMask
= d
->stencil_write_mask
.front
& 0xff,
444 .BackfaceStencilTestMask
= d
->stencil_compare_mask
.back
& 0xff,
445 .BackfaceStencilWriteMask
= d
->stencil_write_mask
.back
& 0xff,
447 .StencilReferenceValue
= d
->stencil_reference
.front
& 0xff,
448 .BackfaceStencilReferenceValue
= d
->stencil_reference
.back
& 0xff,
450 GEN9_3DSTATE_WM_DEPTH_STENCIL_pack(NULL
, dwords
, &wm_depth_stencil
);
452 anv_batch_emit_merge(&cmd_buffer
->batch
, dwords
,
453 pipeline
->gen9
.wm_depth_stencil
);
457 if (cmd_buffer
->state
.dirty
& (ANV_CMD_DIRTY_PIPELINE
|
458 ANV_CMD_DIRTY_INDEX_BUFFER
)) {
459 anv_batch_emit(&cmd_buffer
->batch
, GENX(3DSTATE_VF
),
460 .IndexedDrawCutIndexEnable
= pipeline
->primitive_restart
,
461 .CutIndex
= cmd_buffer
->state
.restart_index
,
465 cmd_buffer
->state
.dirty
= 0;
468 void genX(CmdBindIndexBuffer
)(
469 VkCommandBuffer commandBuffer
,
472 VkIndexType indexType
)
474 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
475 ANV_FROM_HANDLE(anv_buffer
, buffer
, _buffer
);
477 static const uint32_t vk_to_gen_index_type
[] = {
478 [VK_INDEX_TYPE_UINT16
] = INDEX_WORD
,
479 [VK_INDEX_TYPE_UINT32
] = INDEX_DWORD
,
482 static const uint32_t restart_index_for_type
[] = {
483 [VK_INDEX_TYPE_UINT16
] = UINT16_MAX
,
484 [VK_INDEX_TYPE_UINT32
] = UINT32_MAX
,
487 cmd_buffer
->state
.restart_index
= restart_index_for_type
[indexType
];
489 anv_batch_emit(&cmd_buffer
->batch
, GENX(3DSTATE_INDEX_BUFFER
),
490 .IndexFormat
= vk_to_gen_index_type
[indexType
],
491 .MemoryObjectControlState
= GENX(MOCS
),
492 .BufferStartingAddress
= { buffer
->bo
, buffer
->offset
+ offset
},
493 .BufferSize
= buffer
->size
- offset
);
495 cmd_buffer
->state
.dirty
|= ANV_CMD_DIRTY_INDEX_BUFFER
;
499 flush_compute_descriptor_set(struct anv_cmd_buffer
*cmd_buffer
)
501 struct anv_device
*device
= cmd_buffer
->device
;
502 struct anv_pipeline
*pipeline
= cmd_buffer
->state
.compute_pipeline
;
503 struct anv_state surfaces
= { 0, }, samplers
= { 0, };
506 result
= anv_cmd_buffer_emit_samplers(cmd_buffer
,
507 MESA_SHADER_COMPUTE
, &samplers
);
508 if (result
!= VK_SUCCESS
)
510 result
= anv_cmd_buffer_emit_binding_table(cmd_buffer
,
511 MESA_SHADER_COMPUTE
, &surfaces
);
512 if (result
!= VK_SUCCESS
)
515 struct anv_state push_state
= anv_cmd_buffer_cs_push_constants(cmd_buffer
);
517 const struct brw_cs_prog_data
*cs_prog_data
= get_cs_prog_data(pipeline
);
518 const struct brw_stage_prog_data
*prog_data
= &cs_prog_data
->base
;
520 unsigned local_id_dwords
= cs_prog_data
->local_invocation_id_regs
* 8;
521 unsigned push_constant_data_size
=
522 (prog_data
->nr_params
+ local_id_dwords
) * 4;
523 unsigned reg_aligned_constant_size
= ALIGN(push_constant_data_size
, 32);
524 unsigned push_constant_regs
= reg_aligned_constant_size
/ 32;
526 if (push_state
.alloc_size
) {
527 anv_batch_emit(&cmd_buffer
->batch
, GENX(MEDIA_CURBE_LOAD
),
528 .CURBETotalDataLength
= push_state
.alloc_size
,
529 .CURBEDataStartAddress
= push_state
.offset
);
532 assert(prog_data
->total_shared
<= 64 * 1024);
533 uint32_t slm_size
= 0;
534 if (prog_data
->total_shared
> 0) {
535 /* slm_size is in 4k increments, but must be a power of 2. */
537 while (slm_size
< prog_data
->total_shared
)
539 slm_size
/= 4 * 1024;
542 struct anv_state state
=
543 anv_state_pool_emit(&device
->dynamic_state_pool
,
544 GENX(INTERFACE_DESCRIPTOR_DATA
), 64,
545 .KernelStartPointer
= pipeline
->cs_simd
,
546 .KernelStartPointerHigh
= 0,
547 .BindingTablePointer
= surfaces
.offset
,
548 .BindingTableEntryCount
= 0,
549 .SamplerStatePointer
= samplers
.offset
,
551 .ConstantIndirectURBEntryReadLength
= push_constant_regs
,
552 .ConstantURBEntryReadOffset
= 0,
553 .BarrierEnable
= cs_prog_data
->uses_barrier
,
554 .SharedLocalMemorySize
= slm_size
,
555 .NumberofThreadsinGPGPUThreadGroup
=
556 pipeline
->cs_thread_width_max
);
558 uint32_t size
= GENX(INTERFACE_DESCRIPTOR_DATA_length
) * sizeof(uint32_t);
559 anv_batch_emit(&cmd_buffer
->batch
, GENX(MEDIA_INTERFACE_DESCRIPTOR_LOAD
),
560 .InterfaceDescriptorTotalLength
= size
,
561 .InterfaceDescriptorDataStartAddress
= state
.offset
);
567 genX(cmd_buffer_flush_compute_state
)(struct anv_cmd_buffer
*cmd_buffer
)
569 struct anv_pipeline
*pipeline
= cmd_buffer
->state
.compute_pipeline
;
570 const struct brw_cs_prog_data
*cs_prog_data
= get_cs_prog_data(pipeline
);
573 assert(pipeline
->active_stages
== VK_SHADER_STAGE_COMPUTE_BIT
);
575 bool needs_slm
= cs_prog_data
->base
.total_shared
> 0;
576 config_l3(cmd_buffer
, needs_slm
);
578 if (cmd_buffer
->state
.current_pipeline
!= GPGPU
) {
580 /* From the Broadwell PRM, Volume 2a: Instructions, PIPELINE_SELECT:
582 * Software must clear the COLOR_CALC_STATE Valid field in
583 * 3DSTATE_CC_STATE_POINTERS command prior to send a PIPELINE_SELECT
584 * with Pipeline Select set to GPGPU.
586 * The internal hardware docs recommend the same workaround for Gen9
589 anv_batch_emit(&cmd_buffer
->batch
,
590 GENX(3DSTATE_CC_STATE_POINTERS
));
593 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPELINE_SELECT
),
597 .PipelineSelection
= GPGPU
);
598 cmd_buffer
->state
.current_pipeline
= GPGPU
;
601 if (cmd_buffer
->state
.compute_dirty
& ANV_CMD_DIRTY_PIPELINE
)
602 anv_batch_emit_batch(&cmd_buffer
->batch
, &pipeline
->batch
);
604 if ((cmd_buffer
->state
.descriptors_dirty
& VK_SHADER_STAGE_COMPUTE_BIT
) ||
605 (cmd_buffer
->state
.compute_dirty
& ANV_CMD_DIRTY_PIPELINE
)) {
606 result
= flush_compute_descriptor_set(cmd_buffer
);
607 assert(result
== VK_SUCCESS
);
608 cmd_buffer
->state
.descriptors_dirty
&= ~VK_SHADER_STAGE_COMPUTE_BIT
;
611 cmd_buffer
->state
.compute_dirty
= 0;
614 void genX(CmdSetEvent
)(
615 VkCommandBuffer commandBuffer
,
617 VkPipelineStageFlags stageMask
)
619 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
620 ANV_FROM_HANDLE(anv_event
, event
, _event
);
622 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
),
623 .DestinationAddressType
= DAT_PPGTT
,
624 .PostSyncOperation
= WriteImmediateData
,
626 &cmd_buffer
->device
->dynamic_state_block_pool
.bo
,
629 .ImmediateData
= VK_EVENT_SET
);
632 void genX(CmdResetEvent
)(
633 VkCommandBuffer commandBuffer
,
635 VkPipelineStageFlags stageMask
)
637 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
638 ANV_FROM_HANDLE(anv_event
, event
, _event
);
640 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
),
641 .DestinationAddressType
= DAT_PPGTT
,
642 .PostSyncOperation
= WriteImmediateData
,
644 &cmd_buffer
->device
->dynamic_state_block_pool
.bo
,
647 .ImmediateData
= VK_EVENT_RESET
);
650 void genX(CmdWaitEvents
)(
651 VkCommandBuffer commandBuffer
,
653 const VkEvent
* pEvents
,
654 VkPipelineStageFlags srcStageMask
,
655 VkPipelineStageFlags destStageMask
,
656 uint32_t memoryBarrierCount
,
657 const VkMemoryBarrier
* pMemoryBarriers
,
658 uint32_t bufferMemoryBarrierCount
,
659 const VkBufferMemoryBarrier
* pBufferMemoryBarriers
,
660 uint32_t imageMemoryBarrierCount
,
661 const VkImageMemoryBarrier
* pImageMemoryBarriers
)
663 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
664 for (uint32_t i
= 0; i
< eventCount
; i
++) {
665 ANV_FROM_HANDLE(anv_event
, event
, pEvents
[i
]);
667 anv_batch_emit(&cmd_buffer
->batch
, GENX(MI_SEMAPHORE_WAIT
),
668 .WaitMode
= PollingMode
,
669 .CompareOperation
= COMPARE_SAD_EQUAL_SDD
,
670 .SemaphoreDataDword
= VK_EVENT_SET
,
671 .SemaphoreAddress
= {
672 &cmd_buffer
->device
->dynamic_state_block_pool
.bo
,
677 genX(CmdPipelineBarrier
)(commandBuffer
, srcStageMask
, destStageMask
,
678 false, /* byRegion */
679 memoryBarrierCount
, pMemoryBarriers
,
680 bufferMemoryBarrierCount
, pBufferMemoryBarriers
,
681 imageMemoryBarrierCount
, pImageMemoryBarriers
);