2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 #include "anv_private.h"
32 #include "genxml/gen_macros.h"
33 #include "genxml/genX_pack.h"
36 cmd_buffer_flush_push_constants(struct anv_cmd_buffer
*cmd_buffer
)
38 static const uint32_t push_constant_opcodes
[] = {
39 [MESA_SHADER_VERTEX
] = 21,
40 [MESA_SHADER_TESS_CTRL
] = 25, /* HS */
41 [MESA_SHADER_TESS_EVAL
] = 26, /* DS */
42 [MESA_SHADER_GEOMETRY
] = 22,
43 [MESA_SHADER_FRAGMENT
] = 23,
44 [MESA_SHADER_COMPUTE
] = 0,
47 VkShaderStageFlags flushed
= 0;
49 anv_foreach_stage(stage
, cmd_buffer
->state
.push_constants_dirty
) {
50 if (stage
== MESA_SHADER_COMPUTE
)
53 struct anv_state state
= anv_cmd_buffer_push_constants(cmd_buffer
, stage
);
55 if (state
.offset
== 0) {
56 anv_batch_emit(&cmd_buffer
->batch
, GENX(3DSTATE_CONSTANT_VS
),
57 ._3DCommandSubOpcode
= push_constant_opcodes
[stage
]);
59 anv_batch_emit(&cmd_buffer
->batch
, GENX(3DSTATE_CONSTANT_VS
),
60 ._3DCommandSubOpcode
= push_constant_opcodes
[stage
],
62 .PointerToConstantBuffer2
= { &cmd_buffer
->device
->dynamic_state_block_pool
.bo
, state
.offset
},
63 .ConstantBuffer2ReadLength
= DIV_ROUND_UP(state
.alloc_size
, 32),
67 flushed
|= mesa_to_vk_shader_stage(stage
);
70 cmd_buffer
->state
.push_constants_dirty
&= ~flushed
;
77 emit_viewport_state(struct anv_cmd_buffer
*cmd_buffer
,
78 uint32_t count
, const VkViewport
*viewports
)
80 struct anv_state sf_clip_state
=
81 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
, count
* 64, 64);
82 struct anv_state cc_state
=
83 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
, count
* 8, 32);
85 for (uint32_t i
= 0; i
< count
; i
++) {
86 const VkViewport
*vp
= &viewports
[i
];
88 /* The gen7 state struct has just the matrix and guardband fields, the
89 * gen8 struct adds the min/max viewport fields. */
90 struct GENX(SF_CLIP_VIEWPORT
) sf_clip_viewport
= {
91 .ViewportMatrixElementm00
= vp
->width
/ 2,
92 .ViewportMatrixElementm11
= vp
->height
/ 2,
93 .ViewportMatrixElementm22
= 1.0,
94 .ViewportMatrixElementm30
= vp
->x
+ vp
->width
/ 2,
95 .ViewportMatrixElementm31
= vp
->y
+ vp
->height
/ 2,
96 .ViewportMatrixElementm32
= 0.0,
97 .XMinClipGuardband
= -1.0f
,
98 .XMaxClipGuardband
= 1.0f
,
99 .YMinClipGuardband
= -1.0f
,
100 .YMaxClipGuardband
= 1.0f
,
101 .XMinViewPort
= vp
->x
,
102 .XMaxViewPort
= vp
->x
+ vp
->width
- 1,
103 .YMinViewPort
= vp
->y
,
104 .YMaxViewPort
= vp
->y
+ vp
->height
- 1,
107 struct GENX(CC_VIEWPORT
) cc_viewport
= {
108 .MinimumDepth
= vp
->minDepth
,
109 .MaximumDepth
= vp
->maxDepth
112 GENX(SF_CLIP_VIEWPORT_pack
)(NULL
, sf_clip_state
.map
+ i
* 64,
114 GENX(CC_VIEWPORT_pack
)(NULL
, cc_state
.map
+ i
* 8, &cc_viewport
);
117 if (!cmd_buffer
->device
->info
.has_llc
) {
118 anv_state_clflush(sf_clip_state
);
119 anv_state_clflush(cc_state
);
122 anv_batch_emit(&cmd_buffer
->batch
,
123 GENX(3DSTATE_VIEWPORT_STATE_POINTERS_CC
),
124 .CCViewportPointer
= cc_state
.offset
);
125 anv_batch_emit(&cmd_buffer
->batch
,
126 GENX(3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP
),
127 .SFClipViewportPointer
= sf_clip_state
.offset
);
131 gen8_cmd_buffer_emit_viewport(struct anv_cmd_buffer
*cmd_buffer
)
133 if (cmd_buffer
->state
.dynamic
.viewport
.count
> 0) {
134 emit_viewport_state(cmd_buffer
, cmd_buffer
->state
.dynamic
.viewport
.count
,
135 cmd_buffer
->state
.dynamic
.viewport
.viewports
);
137 /* If viewport count is 0, this is taken to mean "use the default" */
138 emit_viewport_state(cmd_buffer
, 1,
142 .width
= cmd_buffer
->state
.framebuffer
->width
,
143 .height
= cmd_buffer
->state
.framebuffer
->height
,
152 emit_lri(struct anv_batch
*batch
, uint32_t reg
, uint32_t imm
)
154 anv_batch_emit(batch
, GENX(MI_LOAD_REGISTER_IMM
),
155 .RegisterOffset
= reg
,
159 #define GEN8_L3CNTLREG 0x7034
162 config_l3(struct anv_cmd_buffer
*cmd_buffer
, bool enable_slm
)
164 /* References for GL state:
166 * - commits e307cfa..228d5a3
167 * - src/mesa/drivers/dri/i965/gen7_l3_state.c
170 uint32_t val
= enable_slm
?
171 /* All = 48 ways; URB = 16 ways; DC and RO = 0, SLM = 1 */
173 /* All = 48 ways; URB = 48 ways; DC, RO and SLM = 0 */
175 bool changed
= cmd_buffer
->state
.current_l3_config
!= val
;
178 /* According to the hardware docs, the L3 partitioning can only be changed
179 * while the pipeline is completely drained and the caches are flushed,
180 * which involves a first PIPE_CONTROL flush which stalls the pipeline and
181 * initiates invalidation of the relevant caches...
183 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
),
184 .TextureCacheInvalidationEnable
= true,
185 .ConstantCacheInvalidationEnable
= true,
186 .InstructionCacheInvalidateEnable
= true,
187 .DCFlushEnable
= true,
188 .PostSyncOperation
= NoWrite
,
189 .CommandStreamerStallEnable
= true);
191 /* ...followed by a second stalling flush which guarantees that
192 * invalidation is complete when the L3 configuration registers are
195 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
),
196 .DCFlushEnable
= true,
197 .PostSyncOperation
= NoWrite
,
198 .CommandStreamerStallEnable
= true);
200 emit_lri(&cmd_buffer
->batch
, GEN8_L3CNTLREG
, val
);
201 cmd_buffer
->state
.current_l3_config
= val
;
206 __emit_genx_sf_state(struct anv_cmd_buffer
*cmd_buffer
)
208 uint32_t sf_dw
[GENX(3DSTATE_SF_length
)];
209 struct GENX(3DSTATE_SF
) sf
= {
210 GENX(3DSTATE_SF_header
),
211 .LineWidth
= cmd_buffer
->state
.dynamic
.line_width
,
213 GENX(3DSTATE_SF_pack
)(NULL
, sf_dw
, &sf
);
215 anv_batch_emit_merge(&cmd_buffer
->batch
, sf_dw
,
216 cmd_buffer
->state
.pipeline
->gen8
.sf
);
219 #include "genxml/gen9_pack.h"
221 __emit_gen9_sf_state(struct anv_cmd_buffer
*cmd_buffer
)
223 uint32_t sf_dw
[GENX(3DSTATE_SF_length
)];
224 struct GEN9_3DSTATE_SF sf
= {
225 GEN9_3DSTATE_SF_header
,
226 .LineWidth
= cmd_buffer
->state
.dynamic
.line_width
,
228 GEN9_3DSTATE_SF_pack(NULL
, sf_dw
, &sf
);
230 anv_batch_emit_merge(&cmd_buffer
->batch
, sf_dw
,
231 cmd_buffer
->state
.pipeline
->gen8
.sf
);
235 __emit_sf_state(struct anv_cmd_buffer
*cmd_buffer
)
237 if (cmd_buffer
->device
->info
.is_cherryview
)
238 __emit_gen9_sf_state(cmd_buffer
);
240 __emit_genx_sf_state(cmd_buffer
);
244 genX(cmd_buffer_flush_state
)(struct anv_cmd_buffer
*cmd_buffer
)
246 struct anv_pipeline
*pipeline
= cmd_buffer
->state
.pipeline
;
249 uint32_t vb_emit
= cmd_buffer
->state
.vb_dirty
& pipeline
->vb_used
;
251 assert((pipeline
->active_stages
& VK_SHADER_STAGE_COMPUTE_BIT
) == 0);
253 config_l3(cmd_buffer
, false);
255 genX(flush_pipeline_select_3d
)(cmd_buffer
);
258 const uint32_t num_buffers
= __builtin_popcount(vb_emit
);
259 const uint32_t num_dwords
= 1 + num_buffers
* 4;
261 p
= anv_batch_emitn(&cmd_buffer
->batch
, num_dwords
,
262 GENX(3DSTATE_VERTEX_BUFFERS
));
264 for_each_bit(vb
, vb_emit
) {
265 struct anv_buffer
*buffer
= cmd_buffer
->state
.vertex_bindings
[vb
].buffer
;
266 uint32_t offset
= cmd_buffer
->state
.vertex_bindings
[vb
].offset
;
268 struct GENX(VERTEX_BUFFER_STATE
) state
= {
269 .VertexBufferIndex
= vb
,
270 .MemoryObjectControlState
= GENX(MOCS
),
271 .AddressModifyEnable
= true,
272 .BufferPitch
= pipeline
->binding_stride
[vb
],
273 .BufferStartingAddress
= { buffer
->bo
, buffer
->offset
+ offset
},
274 .BufferSize
= buffer
->size
- offset
277 GENX(VERTEX_BUFFER_STATE_pack
)(&cmd_buffer
->batch
, &p
[1 + i
* 4], &state
);
282 if (cmd_buffer
->state
.dirty
& ANV_CMD_DIRTY_PIPELINE
) {
283 /* If somebody compiled a pipeline after starting a command buffer the
284 * scratch bo may have grown since we started this cmd buffer (and
285 * emitted STATE_BASE_ADDRESS). If we're binding that pipeline now,
286 * reemit STATE_BASE_ADDRESS so that we use the bigger scratch bo. */
287 if (cmd_buffer
->state
.scratch_size
< pipeline
->total_scratch
)
288 anv_cmd_buffer_emit_state_base_address(cmd_buffer
);
290 anv_batch_emit_batch(&cmd_buffer
->batch
, &pipeline
->batch
);
292 /* From the BDW PRM for 3DSTATE_PUSH_CONSTANT_ALLOC_VS:
294 * "The 3DSTATE_CONSTANT_VS must be reprogrammed prior to
295 * the next 3DPRIMITIVE command after programming the
296 * 3DSTATE_PUSH_CONSTANT_ALLOC_VS"
298 * Since 3DSTATE_PUSH_CONSTANT_ALLOC_VS is programmed as part of
299 * pipeline setup, we need to dirty push constants.
301 cmd_buffer
->state
.push_constants_dirty
|= VK_SHADER_STAGE_ALL_GRAPHICS
;
304 /* We emit the binding tables and sampler tables first, then emit push
305 * constants and then finally emit binding table and sampler table
306 * pointers. It has to happen in this order, since emitting the binding
307 * tables may change the push constants (in case of storage images). After
308 * emitting push constants, on SKL+ we have to emit the corresponding
309 * 3DSTATE_BINDING_TABLE_POINTER_* for the push constants to take effect.
312 if (cmd_buffer
->state
.descriptors_dirty
)
313 dirty
= gen7_cmd_buffer_flush_descriptor_sets(cmd_buffer
);
315 if (cmd_buffer
->state
.push_constants_dirty
)
316 dirty
|= cmd_buffer_flush_push_constants(cmd_buffer
);
319 gen7_cmd_buffer_emit_descriptor_pointers(cmd_buffer
, dirty
);
321 if (cmd_buffer
->state
.dirty
& ANV_CMD_DIRTY_DYNAMIC_VIEWPORT
)
322 gen8_cmd_buffer_emit_viewport(cmd_buffer
);
324 if (cmd_buffer
->state
.dirty
& ANV_CMD_DIRTY_DYNAMIC_SCISSOR
)
325 gen7_cmd_buffer_emit_scissor(cmd_buffer
);
327 if (cmd_buffer
->state
.dirty
& (ANV_CMD_DIRTY_PIPELINE
|
328 ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH
)) {
329 __emit_sf_state(cmd_buffer
);
332 if (cmd_buffer
->state
.dirty
& (ANV_CMD_DIRTY_PIPELINE
|
333 ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS
)){
334 uint32_t raster_dw
[GENX(3DSTATE_RASTER_length
)];
335 struct GENX(3DSTATE_RASTER
) raster
= {
336 GENX(3DSTATE_RASTER_header
),
337 .GlobalDepthOffsetConstant
= cmd_buffer
->state
.dynamic
.depth_bias
.bias
,
338 .GlobalDepthOffsetScale
= cmd_buffer
->state
.dynamic
.depth_bias
.slope
,
339 .GlobalDepthOffsetClamp
= cmd_buffer
->state
.dynamic
.depth_bias
.clamp
341 GENX(3DSTATE_RASTER_pack
)(NULL
, raster_dw
, &raster
);
342 anv_batch_emit_merge(&cmd_buffer
->batch
, raster_dw
,
343 pipeline
->gen8
.raster
);
346 /* Stencil reference values moved from COLOR_CALC_STATE in gen8 to
347 * 3DSTATE_WM_DEPTH_STENCIL in gen9. That means the dirty bits gets split
348 * across different state packets for gen8 and gen9. We handle that by
349 * using a big old #if switch here.
352 if (cmd_buffer
->state
.dirty
& (ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS
|
353 ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE
)) {
354 struct anv_dynamic_state
*d
= &cmd_buffer
->state
.dynamic
;
355 struct anv_state cc_state
=
356 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
,
357 GENX(COLOR_CALC_STATE_length
) * 4,
359 struct GENX(COLOR_CALC_STATE
) cc
= {
360 .BlendConstantColorRed
= cmd_buffer
->state
.dynamic
.blend_constants
[0],
361 .BlendConstantColorGreen
= cmd_buffer
->state
.dynamic
.blend_constants
[1],
362 .BlendConstantColorBlue
= cmd_buffer
->state
.dynamic
.blend_constants
[2],
363 .BlendConstantColorAlpha
= cmd_buffer
->state
.dynamic
.blend_constants
[3],
364 .StencilReferenceValue
= d
->stencil_reference
.front
& 0xff,
365 .BackFaceStencilReferenceValue
= d
->stencil_reference
.back
& 0xff,
367 GENX(COLOR_CALC_STATE_pack
)(NULL
, cc_state
.map
, &cc
);
369 if (!cmd_buffer
->device
->info
.has_llc
)
370 anv_state_clflush(cc_state
);
372 anv_batch_emit(&cmd_buffer
->batch
,
373 GENX(3DSTATE_CC_STATE_POINTERS
),
374 .ColorCalcStatePointer
= cc_state
.offset
,
375 .ColorCalcStatePointerValid
= true);
378 if (cmd_buffer
->state
.dirty
& (ANV_CMD_DIRTY_PIPELINE
|
379 ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK
|
380 ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK
)) {
381 uint32_t wm_depth_stencil_dw
[GENX(3DSTATE_WM_DEPTH_STENCIL_length
)];
382 struct anv_dynamic_state
*d
= &cmd_buffer
->state
.dynamic
;
384 struct GENX(3DSTATE_WM_DEPTH_STENCIL wm_depth_stencil
) = {
385 GENX(3DSTATE_WM_DEPTH_STENCIL_header
),
387 .StencilTestMask
= d
->stencil_compare_mask
.front
& 0xff,
388 .StencilWriteMask
= d
->stencil_write_mask
.front
& 0xff,
390 .BackfaceStencilTestMask
= d
->stencil_compare_mask
.back
& 0xff,
391 .BackfaceStencilWriteMask
= d
->stencil_write_mask
.back
& 0xff,
393 GENX(3DSTATE_WM_DEPTH_STENCIL_pack
)(NULL
, wm_depth_stencil_dw
,
396 anv_batch_emit_merge(&cmd_buffer
->batch
, wm_depth_stencil_dw
,
397 pipeline
->gen8
.wm_depth_stencil
);
400 if (cmd_buffer
->state
.dirty
& ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS
) {
401 struct anv_state cc_state
=
402 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
,
403 GEN9_COLOR_CALC_STATE_length
* 4,
405 struct GEN9_COLOR_CALC_STATE cc
= {
406 .BlendConstantColorRed
= cmd_buffer
->state
.dynamic
.blend_constants
[0],
407 .BlendConstantColorGreen
= cmd_buffer
->state
.dynamic
.blend_constants
[1],
408 .BlendConstantColorBlue
= cmd_buffer
->state
.dynamic
.blend_constants
[2],
409 .BlendConstantColorAlpha
= cmd_buffer
->state
.dynamic
.blend_constants
[3],
411 GEN9_COLOR_CALC_STATE_pack(NULL
, cc_state
.map
, &cc
);
413 if (!cmd_buffer
->device
->info
.has_llc
)
414 anv_state_clflush(cc_state
);
416 anv_batch_emit(&cmd_buffer
->batch
,
417 GEN9_3DSTATE_CC_STATE_POINTERS
,
418 .ColorCalcStatePointer
= cc_state
.offset
,
419 .ColorCalcStatePointerValid
= true);
422 if (cmd_buffer
->state
.dirty
& (ANV_CMD_DIRTY_PIPELINE
|
423 ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK
|
424 ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK
|
425 ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE
)) {
426 uint32_t dwords
[GEN9_3DSTATE_WM_DEPTH_STENCIL_length
];
427 struct anv_dynamic_state
*d
= &cmd_buffer
->state
.dynamic
;
428 struct GEN9_3DSTATE_WM_DEPTH_STENCIL wm_depth_stencil
= {
429 GEN9_3DSTATE_WM_DEPTH_STENCIL_header
,
431 .StencilBufferWriteEnable
= d
->stencil_write_mask
.front
!= 0 ||
432 d
->stencil_write_mask
.back
!= 0,
434 .StencilTestMask
= d
->stencil_compare_mask
.front
& 0xff,
435 .StencilWriteMask
= d
->stencil_write_mask
.front
& 0xff,
437 .BackfaceStencilTestMask
= d
->stencil_compare_mask
.back
& 0xff,
438 .BackfaceStencilWriteMask
= d
->stencil_write_mask
.back
& 0xff,
440 .StencilReferenceValue
= d
->stencil_reference
.front
& 0xff,
441 .BackfaceStencilReferenceValue
= d
->stencil_reference
.back
& 0xff,
443 GEN9_3DSTATE_WM_DEPTH_STENCIL_pack(NULL
, dwords
, &wm_depth_stencil
);
445 anv_batch_emit_merge(&cmd_buffer
->batch
, dwords
,
446 pipeline
->gen9
.wm_depth_stencil
);
450 if (cmd_buffer
->state
.dirty
& (ANV_CMD_DIRTY_PIPELINE
|
451 ANV_CMD_DIRTY_INDEX_BUFFER
)) {
452 anv_batch_emit(&cmd_buffer
->batch
, GENX(3DSTATE_VF
),
453 .IndexedDrawCutIndexEnable
= pipeline
->primitive_restart
,
454 .CutIndex
= cmd_buffer
->state
.restart_index
,
458 cmd_buffer
->state
.vb_dirty
&= ~vb_emit
;
459 cmd_buffer
->state
.dirty
= 0;
462 void genX(CmdBindIndexBuffer
)(
463 VkCommandBuffer commandBuffer
,
466 VkIndexType indexType
)
468 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
469 ANV_FROM_HANDLE(anv_buffer
, buffer
, _buffer
);
471 static const uint32_t vk_to_gen_index_type
[] = {
472 [VK_INDEX_TYPE_UINT16
] = INDEX_WORD
,
473 [VK_INDEX_TYPE_UINT32
] = INDEX_DWORD
,
476 static const uint32_t restart_index_for_type
[] = {
477 [VK_INDEX_TYPE_UINT16
] = UINT16_MAX
,
478 [VK_INDEX_TYPE_UINT32
] = UINT32_MAX
,
481 cmd_buffer
->state
.restart_index
= restart_index_for_type
[indexType
];
483 anv_batch_emit(&cmd_buffer
->batch
, GENX(3DSTATE_INDEX_BUFFER
),
484 .IndexFormat
= vk_to_gen_index_type
[indexType
],
485 .MemoryObjectControlState
= GENX(MOCS
),
486 .BufferStartingAddress
= { buffer
->bo
, buffer
->offset
+ offset
},
487 .BufferSize
= buffer
->size
- offset
);
489 cmd_buffer
->state
.dirty
|= ANV_CMD_DIRTY_INDEX_BUFFER
;
493 flush_compute_descriptor_set(struct anv_cmd_buffer
*cmd_buffer
)
495 struct anv_device
*device
= cmd_buffer
->device
;
496 struct anv_pipeline
*pipeline
= cmd_buffer
->state
.compute_pipeline
;
497 struct anv_state surfaces
= { 0, }, samplers
= { 0, };
500 result
= anv_cmd_buffer_emit_samplers(cmd_buffer
,
501 MESA_SHADER_COMPUTE
, &samplers
);
502 if (result
!= VK_SUCCESS
)
504 result
= anv_cmd_buffer_emit_binding_table(cmd_buffer
,
505 MESA_SHADER_COMPUTE
, &surfaces
);
506 if (result
!= VK_SUCCESS
)
509 struct anv_state push_state
= anv_cmd_buffer_cs_push_constants(cmd_buffer
);
511 const struct brw_cs_prog_data
*cs_prog_data
= &pipeline
->cs_prog_data
;
512 const struct brw_stage_prog_data
*prog_data
= &cs_prog_data
->base
;
514 unsigned local_id_dwords
= cs_prog_data
->local_invocation_id_regs
* 8;
515 unsigned push_constant_data_size
=
516 (prog_data
->nr_params
+ local_id_dwords
) * 4;
517 unsigned reg_aligned_constant_size
= ALIGN(push_constant_data_size
, 32);
518 unsigned push_constant_regs
= reg_aligned_constant_size
/ 32;
520 if (push_state
.alloc_size
) {
521 anv_batch_emit(&cmd_buffer
->batch
, GENX(MEDIA_CURBE_LOAD
),
522 .CURBETotalDataLength
= push_state
.alloc_size
,
523 .CURBEDataStartAddress
= push_state
.offset
);
526 assert(prog_data
->total_shared
<= 64 * 1024);
527 uint32_t slm_size
= 0;
528 if (prog_data
->total_shared
> 0) {
529 /* slm_size is in 4k increments, but must be a power of 2. */
531 while (slm_size
< prog_data
->total_shared
)
533 slm_size
/= 4 * 1024;
536 struct anv_state state
=
537 anv_state_pool_emit(&device
->dynamic_state_pool
,
538 GENX(INTERFACE_DESCRIPTOR_DATA
), 64,
539 .KernelStartPointer
= pipeline
->cs_simd
,
540 .KernelStartPointerHigh
= 0,
541 .BindingTablePointer
= surfaces
.offset
,
542 .BindingTableEntryCount
= 0,
543 .SamplerStatePointer
= samplers
.offset
,
545 .ConstantIndirectURBEntryReadLength
= push_constant_regs
,
546 .ConstantURBEntryReadOffset
= 0,
547 .BarrierEnable
= cs_prog_data
->uses_barrier
,
548 .SharedLocalMemorySize
= slm_size
,
549 .NumberofThreadsinGPGPUThreadGroup
=
550 pipeline
->cs_thread_width_max
);
552 uint32_t size
= GENX(INTERFACE_DESCRIPTOR_DATA_length
) * sizeof(uint32_t);
553 anv_batch_emit(&cmd_buffer
->batch
, GENX(MEDIA_INTERFACE_DESCRIPTOR_LOAD
),
554 .InterfaceDescriptorTotalLength
= size
,
555 .InterfaceDescriptorDataStartAddress
= state
.offset
);
561 genX(cmd_buffer_flush_compute_state
)(struct anv_cmd_buffer
*cmd_buffer
)
563 struct anv_pipeline
*pipeline
= cmd_buffer
->state
.compute_pipeline
;
566 assert(pipeline
->active_stages
== VK_SHADER_STAGE_COMPUTE_BIT
);
568 bool needs_slm
= pipeline
->cs_prog_data
.base
.total_shared
> 0;
569 config_l3(cmd_buffer
, needs_slm
);
571 if (cmd_buffer
->state
.current_pipeline
!= GPGPU
) {
573 /* From the Broadwell PRM, Volume 2a: Instructions, PIPELINE_SELECT:
575 * Software must clear the COLOR_CALC_STATE Valid field in
576 * 3DSTATE_CC_STATE_POINTERS command prior to send a PIPELINE_SELECT
577 * with Pipeline Select set to GPGPU.
579 * The internal hardware docs recommend the same workaround for Gen9
582 anv_batch_emit(&cmd_buffer
->batch
,
583 GENX(3DSTATE_CC_STATE_POINTERS
));
586 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPELINE_SELECT
),
590 .PipelineSelection
= GPGPU
);
591 cmd_buffer
->state
.current_pipeline
= GPGPU
;
594 if (cmd_buffer
->state
.compute_dirty
& ANV_CMD_DIRTY_PIPELINE
)
595 anv_batch_emit_batch(&cmd_buffer
->batch
, &pipeline
->batch
);
597 if ((cmd_buffer
->state
.descriptors_dirty
& VK_SHADER_STAGE_COMPUTE_BIT
) ||
598 (cmd_buffer
->state
.compute_dirty
& ANV_CMD_DIRTY_PIPELINE
)) {
599 result
= flush_compute_descriptor_set(cmd_buffer
);
600 assert(result
== VK_SUCCESS
);
601 cmd_buffer
->state
.descriptors_dirty
&= ~VK_SHADER_STAGE_COMPUTE_BIT
;
604 cmd_buffer
->state
.compute_dirty
= 0;
608 emit_ps_depth_count(struct anv_batch
*batch
,
609 struct anv_bo
*bo
, uint32_t offset
)
611 anv_batch_emit(batch
, GENX(PIPE_CONTROL
),
612 .DestinationAddressType
= DAT_PPGTT
,
613 .PostSyncOperation
= WritePSDepthCount
,
614 .DepthStallEnable
= true,
615 .Address
= { bo
, offset
});
619 emit_query_availability(struct anv_batch
*batch
,
620 struct anv_bo
*bo
, uint32_t offset
)
622 anv_batch_emit(batch
, GENX(PIPE_CONTROL
),
623 .DestinationAddressType
= DAT_PPGTT
,
624 .PostSyncOperation
= WriteImmediateData
,
625 .Address
= { bo
, offset
},
629 void genX(CmdBeginQuery
)(
630 VkCommandBuffer commandBuffer
,
631 VkQueryPool queryPool
,
633 VkQueryControlFlags flags
)
635 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
636 ANV_FROM_HANDLE(anv_query_pool
, pool
, queryPool
);
638 /* Workaround: When meta uses the pipeline with the VS disabled, it seems
639 * that the pipelining of the depth write breaks. What we see is that
640 * samples from the render pass clear leaks into the first query
641 * immediately after the clear. Doing a pipecontrol with a post-sync
642 * operation and DepthStallEnable seems to work around the issue.
644 if (cmd_buffer
->state
.need_query_wa
) {
645 cmd_buffer
->state
.need_query_wa
= false;
646 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
),
647 .DepthCacheFlushEnable
= true,
648 .DepthStallEnable
= true);
651 switch (pool
->type
) {
652 case VK_QUERY_TYPE_OCCLUSION
:
653 emit_ps_depth_count(&cmd_buffer
->batch
, &pool
->bo
,
654 query
* sizeof(struct anv_query_pool_slot
));
657 case VK_QUERY_TYPE_PIPELINE_STATISTICS
:
663 void genX(CmdEndQuery
)(
664 VkCommandBuffer commandBuffer
,
665 VkQueryPool queryPool
,
668 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
669 ANV_FROM_HANDLE(anv_query_pool
, pool
, queryPool
);
671 switch (pool
->type
) {
672 case VK_QUERY_TYPE_OCCLUSION
:
673 emit_ps_depth_count(&cmd_buffer
->batch
, &pool
->bo
,
674 query
* sizeof(struct anv_query_pool_slot
) + 8);
676 emit_query_availability(&cmd_buffer
->batch
, &pool
->bo
,
677 query
* sizeof(struct anv_query_pool_slot
) + 16);
680 case VK_QUERY_TYPE_PIPELINE_STATISTICS
:
686 #define TIMESTAMP 0x2358
688 void genX(CmdWriteTimestamp
)(
689 VkCommandBuffer commandBuffer
,
690 VkPipelineStageFlagBits pipelineStage
,
691 VkQueryPool queryPool
,
694 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
695 ANV_FROM_HANDLE(anv_query_pool
, pool
, queryPool
);
696 uint32_t offset
= query
* sizeof(struct anv_query_pool_slot
);
698 assert(pool
->type
== VK_QUERY_TYPE_TIMESTAMP
);
700 switch (pipelineStage
) {
701 case VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT
:
702 anv_batch_emit(&cmd_buffer
->batch
, GENX(MI_STORE_REGISTER_MEM
),
703 .RegisterAddress
= TIMESTAMP
,
704 .MemoryAddress
= { &pool
->bo
, offset
});
705 anv_batch_emit(&cmd_buffer
->batch
, GENX(MI_STORE_REGISTER_MEM
),
706 .RegisterAddress
= TIMESTAMP
+ 4,
707 .MemoryAddress
= { &pool
->bo
, offset
+ 4 });
711 /* Everything else is bottom-of-pipe */
712 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
),
713 .DestinationAddressType
= DAT_PPGTT
,
714 .PostSyncOperation
= WriteTimestamp
,
715 .Address
= { &pool
->bo
, offset
});
719 emit_query_availability(&cmd_buffer
->batch
, &pool
->bo
, query
+ 16);
722 #define alu_opcode(v) __gen_uint((v), 20, 31)
723 #define alu_operand1(v) __gen_uint((v), 10, 19)
724 #define alu_operand2(v) __gen_uint((v), 0, 9)
725 #define alu(opcode, operand1, operand2) \
726 alu_opcode(opcode) | alu_operand1(operand1) | alu_operand2(operand2)
728 #define OPCODE_NOOP 0x000
729 #define OPCODE_LOAD 0x080
730 #define OPCODE_LOADINV 0x480
731 #define OPCODE_LOAD0 0x081
732 #define OPCODE_LOAD1 0x481
733 #define OPCODE_ADD 0x100
734 #define OPCODE_SUB 0x101
735 #define OPCODE_AND 0x102
736 #define OPCODE_OR 0x103
737 #define OPCODE_XOR 0x104
738 #define OPCODE_STORE 0x180
739 #define OPCODE_STOREINV 0x580
741 #define OPERAND_R0 0x00
742 #define OPERAND_R1 0x01
743 #define OPERAND_R2 0x02
744 #define OPERAND_R3 0x03
745 #define OPERAND_R4 0x04
746 #define OPERAND_SRCA 0x20
747 #define OPERAND_SRCB 0x21
748 #define OPERAND_ACCU 0x31
749 #define OPERAND_ZF 0x32
750 #define OPERAND_CF 0x33
752 #define CS_GPR(n) (0x2600 + (n) * 8)
755 emit_load_alu_reg_u64(struct anv_batch
*batch
, uint32_t reg
,
756 struct anv_bo
*bo
, uint32_t offset
)
758 anv_batch_emit(batch
, GENX(MI_LOAD_REGISTER_MEM
),
759 .RegisterAddress
= reg
,
760 .MemoryAddress
= { bo
, offset
});
761 anv_batch_emit(batch
, GENX(MI_LOAD_REGISTER_MEM
),
762 .RegisterAddress
= reg
+ 4,
763 .MemoryAddress
= { bo
, offset
+ 4 });
767 store_query_result(struct anv_batch
*batch
, uint32_t reg
,
768 struct anv_bo
*bo
, uint32_t offset
, VkQueryResultFlags flags
)
770 anv_batch_emit(batch
, GENX(MI_STORE_REGISTER_MEM
),
771 .RegisterAddress
= reg
,
772 .MemoryAddress
= { bo
, offset
});
774 if (flags
& VK_QUERY_RESULT_64_BIT
)
775 anv_batch_emit(batch
, GENX(MI_STORE_REGISTER_MEM
),
776 .RegisterAddress
= reg
+ 4,
777 .MemoryAddress
= { bo
, offset
+ 4 });
780 void genX(CmdCopyQueryPoolResults
)(
781 VkCommandBuffer commandBuffer
,
782 VkQueryPool queryPool
,
786 VkDeviceSize destOffset
,
787 VkDeviceSize destStride
,
788 VkQueryResultFlags flags
)
790 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
791 ANV_FROM_HANDLE(anv_query_pool
, pool
, queryPool
);
792 ANV_FROM_HANDLE(anv_buffer
, buffer
, destBuffer
);
793 uint32_t slot_offset
, dst_offset
;
795 if (flags
& VK_QUERY_RESULT_WAIT_BIT
)
796 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
),
797 .CommandStreamerStallEnable
= true,
798 .StallAtPixelScoreboard
= true);
800 dst_offset
= buffer
->offset
+ destOffset
;
801 for (uint32_t i
= 0; i
< queryCount
; i
++) {
803 slot_offset
= (firstQuery
+ i
) * sizeof(struct anv_query_pool_slot
);
804 switch (pool
->type
) {
805 case VK_QUERY_TYPE_OCCLUSION
:
806 emit_load_alu_reg_u64(&cmd_buffer
->batch
,
807 CS_GPR(0), &pool
->bo
, slot_offset
);
808 emit_load_alu_reg_u64(&cmd_buffer
->batch
,
809 CS_GPR(1), &pool
->bo
, slot_offset
+ 8);
811 /* FIXME: We need to clamp the result for 32 bit. */
813 uint32_t *dw
= anv_batch_emitn(&cmd_buffer
->batch
, 5, GENX(MI_MATH
));
814 dw
[1] = alu(OPCODE_LOAD
, OPERAND_SRCA
, OPERAND_R1
);
815 dw
[2] = alu(OPCODE_LOAD
, OPERAND_SRCB
, OPERAND_R0
);
816 dw
[3] = alu(OPCODE_SUB
, 0, 0);
817 dw
[4] = alu(OPCODE_STORE
, OPERAND_R2
, OPERAND_ACCU
);
820 case VK_QUERY_TYPE_TIMESTAMP
:
821 emit_load_alu_reg_u64(&cmd_buffer
->batch
,
822 CS_GPR(2), &pool
->bo
, slot_offset
);
826 unreachable("unhandled query type");
829 store_query_result(&cmd_buffer
->batch
,
830 CS_GPR(2), buffer
->bo
, dst_offset
, flags
);
832 if (flags
& VK_QUERY_RESULT_WITH_AVAILABILITY_BIT
) {
833 emit_load_alu_reg_u64(&cmd_buffer
->batch
, CS_GPR(0),
834 &pool
->bo
, slot_offset
+ 16);
835 if (flags
& VK_QUERY_RESULT_64_BIT
)
836 store_query_result(&cmd_buffer
->batch
,
837 CS_GPR(0), buffer
->bo
, dst_offset
+ 8, flags
);
839 store_query_result(&cmd_buffer
->batch
,
840 CS_GPR(0), buffer
->bo
, dst_offset
+ 4, flags
);
843 dst_offset
+= destStride
;
847 void genX(CmdSetEvent
)(
848 VkCommandBuffer commandBuffer
,
850 VkPipelineStageFlags stageMask
)
852 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
853 ANV_FROM_HANDLE(anv_event
, event
, _event
);
855 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
),
856 .DestinationAddressType
= DAT_PPGTT
,
857 .PostSyncOperation
= WriteImmediateData
,
859 &cmd_buffer
->device
->dynamic_state_block_pool
.bo
,
862 .ImmediateData
= VK_EVENT_SET
);
865 void genX(CmdResetEvent
)(
866 VkCommandBuffer commandBuffer
,
868 VkPipelineStageFlags stageMask
)
870 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
871 ANV_FROM_HANDLE(anv_event
, event
, _event
);
873 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
),
874 .DestinationAddressType
= DAT_PPGTT
,
875 .PostSyncOperation
= WriteImmediateData
,
877 &cmd_buffer
->device
->dynamic_state_block_pool
.bo
,
880 .ImmediateData
= VK_EVENT_RESET
);
883 void genX(CmdWaitEvents
)(
884 VkCommandBuffer commandBuffer
,
886 const VkEvent
* pEvents
,
887 VkPipelineStageFlags srcStageMask
,
888 VkPipelineStageFlags destStageMask
,
889 uint32_t memoryBarrierCount
,
890 const VkMemoryBarrier
* pMemoryBarriers
,
891 uint32_t bufferMemoryBarrierCount
,
892 const VkBufferMemoryBarrier
* pBufferMemoryBarriers
,
893 uint32_t imageMemoryBarrierCount
,
894 const VkImageMemoryBarrier
* pImageMemoryBarriers
)
896 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
897 for (uint32_t i
= 0; i
< eventCount
; i
++) {
898 ANV_FROM_HANDLE(anv_event
, event
, pEvents
[i
]);
900 anv_batch_emit(&cmd_buffer
->batch
, GENX(MI_SEMAPHORE_WAIT
),
901 .WaitMode
= PollingMode
,
902 .CompareOperation
= COMPARE_SAD_EQUAL_SDD
,
903 .SemaphoreDataDword
= VK_EVENT_SET
,
904 .SemaphoreAddress
= {
905 &cmd_buffer
->device
->dynamic_state_block_pool
.bo
,
910 genX(CmdPipelineBarrier
)(commandBuffer
, srcStageMask
, destStageMask
,
911 false, /* byRegion */
912 memoryBarrierCount
, pMemoryBarriers
,
913 bufferMemoryBarrierCount
, pBufferMemoryBarriers
,
914 imageMemoryBarrierCount
, pImageMemoryBarriers
);