2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 #include "anv_private.h"
32 #include "genxml/gen8_pack.h"
33 #include "genxml/gen9_pack.h"
36 cmd_buffer_flush_push_constants(struct anv_cmd_buffer
*cmd_buffer
)
38 static const uint32_t push_constant_opcodes
[] = {
39 [MESA_SHADER_VERTEX
] = 21,
40 [MESA_SHADER_TESS_CTRL
] = 25, /* HS */
41 [MESA_SHADER_TESS_EVAL
] = 26, /* DS */
42 [MESA_SHADER_GEOMETRY
] = 22,
43 [MESA_SHADER_FRAGMENT
] = 23,
44 [MESA_SHADER_COMPUTE
] = 0,
47 VkShaderStageFlags flushed
= 0;
49 anv_foreach_stage(stage
, cmd_buffer
->state
.push_constants_dirty
) {
50 if (stage
== MESA_SHADER_COMPUTE
)
53 struct anv_state state
= anv_cmd_buffer_push_constants(cmd_buffer
, stage
);
55 if (state
.offset
== 0)
58 anv_batch_emit(&cmd_buffer
->batch
, GENX(3DSTATE_CONSTANT_VS
),
59 ._3DCommandSubOpcode
= push_constant_opcodes
[stage
],
61 .PointerToConstantBuffer2
= { &cmd_buffer
->device
->dynamic_state_block_pool
.bo
, state
.offset
},
62 .ConstantBuffer2ReadLength
= DIV_ROUND_UP(state
.alloc_size
, 32),
65 flushed
|= mesa_to_vk_shader_stage(stage
);
68 cmd_buffer
->state
.push_constants_dirty
&= ~flushed
;
75 emit_viewport_state(struct anv_cmd_buffer
*cmd_buffer
,
76 uint32_t count
, const VkViewport
*viewports
)
78 struct anv_state sf_clip_state
=
79 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
, count
* 64, 64);
80 struct anv_state cc_state
=
81 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
, count
* 8, 32);
83 for (uint32_t i
= 0; i
< count
; i
++) {
84 const VkViewport
*vp
= &viewports
[i
];
86 /* The gen7 state struct has just the matrix and guardband fields, the
87 * gen8 struct adds the min/max viewport fields. */
88 struct GENX(SF_CLIP_VIEWPORT
) sf_clip_viewport
= {
89 .ViewportMatrixElementm00
= vp
->width
/ 2,
90 .ViewportMatrixElementm11
= vp
->height
/ 2,
91 .ViewportMatrixElementm22
= 1.0,
92 .ViewportMatrixElementm30
= vp
->x
+ vp
->width
/ 2,
93 .ViewportMatrixElementm31
= vp
->y
+ vp
->height
/ 2,
94 .ViewportMatrixElementm32
= 0.0,
95 .XMinClipGuardband
= -1.0f
,
96 .XMaxClipGuardband
= 1.0f
,
97 .YMinClipGuardband
= -1.0f
,
98 .YMaxClipGuardband
= 1.0f
,
99 .XMinViewPort
= vp
->x
,
100 .XMaxViewPort
= vp
->x
+ vp
->width
- 1,
101 .YMinViewPort
= vp
->y
,
102 .YMaxViewPort
= vp
->y
+ vp
->height
- 1,
105 struct GENX(CC_VIEWPORT
) cc_viewport
= {
106 .MinimumDepth
= vp
->minDepth
,
107 .MaximumDepth
= vp
->maxDepth
110 GENX(SF_CLIP_VIEWPORT_pack
)(NULL
, sf_clip_state
.map
+ i
* 64,
112 GENX(CC_VIEWPORT_pack
)(NULL
, cc_state
.map
+ i
* 8, &cc_viewport
);
115 if (!cmd_buffer
->device
->info
.has_llc
) {
116 anv_state_clflush(sf_clip_state
);
117 anv_state_clflush(cc_state
);
120 anv_batch_emit(&cmd_buffer
->batch
,
121 GENX(3DSTATE_VIEWPORT_STATE_POINTERS_CC
),
122 .CCViewportPointer
= cc_state
.offset
);
123 anv_batch_emit(&cmd_buffer
->batch
,
124 GENX(3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP
),
125 .SFClipViewportPointer
= sf_clip_state
.offset
);
129 gen8_cmd_buffer_emit_viewport(struct anv_cmd_buffer
*cmd_buffer
)
131 if (cmd_buffer
->state
.dynamic
.viewport
.count
> 0) {
132 emit_viewport_state(cmd_buffer
, cmd_buffer
->state
.dynamic
.viewport
.count
,
133 cmd_buffer
->state
.dynamic
.viewport
.viewports
);
135 /* If viewport count is 0, this is taken to mean "use the default" */
136 emit_viewport_state(cmd_buffer
, 1,
140 .width
= cmd_buffer
->state
.framebuffer
->width
,
141 .height
= cmd_buffer
->state
.framebuffer
->height
,
150 emit_lri(struct anv_batch
*batch
, uint32_t reg
, uint32_t imm
)
152 anv_batch_emit(batch
, GENX(MI_LOAD_REGISTER_IMM
),
153 .RegisterOffset
= reg
,
157 #define GEN8_L3CNTLREG 0x7034
160 config_l3(struct anv_cmd_buffer
*cmd_buffer
, bool enable_slm
)
162 /* References for GL state:
164 * - commits e307cfa..228d5a3
165 * - src/mesa/drivers/dri/i965/gen7_l3_state.c
168 uint32_t val
= enable_slm
?
169 /* All = 48 ways; URB = 16 ways; DC and RO = 0, SLM = 1 */
171 /* All = 48 ways; URB = 48 ways; DC, RO and SLM = 0 */
173 bool changed
= cmd_buffer
->state
.current_l3_config
!= val
;
176 /* According to the hardware docs, the L3 partitioning can only be changed
177 * while the pipeline is completely drained and the caches are flushed,
178 * which involves a first PIPE_CONTROL flush which stalls the pipeline and
179 * initiates invalidation of the relevant caches...
181 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
),
182 .TextureCacheInvalidationEnable
= true,
183 .ConstantCacheInvalidationEnable
= true,
184 .InstructionCacheInvalidateEnable
= true,
185 .DCFlushEnable
= true,
186 .PostSyncOperation
= NoWrite
,
187 .CommandStreamerStallEnable
= true);
189 /* ...followed by a second stalling flush which guarantees that
190 * invalidation is complete when the L3 configuration registers are
193 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
),
194 .DCFlushEnable
= true,
195 .PostSyncOperation
= NoWrite
,
196 .CommandStreamerStallEnable
= true);
198 emit_lri(&cmd_buffer
->batch
, GEN8_L3CNTLREG
, val
);
199 cmd_buffer
->state
.current_l3_config
= val
;
204 __emit_genx_sf_state(struct anv_cmd_buffer
*cmd_buffer
)
206 uint32_t sf_dw
[GENX(3DSTATE_SF_length
)];
207 struct GENX(3DSTATE_SF
) sf
= {
208 GENX(3DSTATE_SF_header
),
209 .LineWidth
= cmd_buffer
->state
.dynamic
.line_width
,
211 GENX(3DSTATE_SF_pack
)(NULL
, sf_dw
, &sf
);
213 anv_batch_emit_merge(&cmd_buffer
->batch
, sf_dw
,
214 cmd_buffer
->state
.pipeline
->gen8
.sf
);
217 __emit_gen9_sf_state(struct anv_cmd_buffer
*cmd_buffer
)
219 uint32_t sf_dw
[GENX(3DSTATE_SF_length
)];
220 struct GEN9_3DSTATE_SF sf
= {
221 GEN9_3DSTATE_SF_header
,
222 .LineWidth
= cmd_buffer
->state
.dynamic
.line_width
,
224 GEN9_3DSTATE_SF_pack(NULL
, sf_dw
, &sf
);
226 anv_batch_emit_merge(&cmd_buffer
->batch
, sf_dw
,
227 cmd_buffer
->state
.pipeline
->gen8
.sf
);
231 __emit_sf_state(struct anv_cmd_buffer
*cmd_buffer
)
233 if (cmd_buffer
->device
->info
.is_cherryview
)
234 __emit_gen9_sf_state(cmd_buffer
);
236 __emit_genx_sf_state(cmd_buffer
);
240 genX(cmd_buffer_flush_state
)(struct anv_cmd_buffer
*cmd_buffer
)
242 struct anv_pipeline
*pipeline
= cmd_buffer
->state
.pipeline
;
245 uint32_t vb_emit
= cmd_buffer
->state
.vb_dirty
& pipeline
->vb_used
;
247 assert((pipeline
->active_stages
& VK_SHADER_STAGE_COMPUTE_BIT
) == 0);
249 config_l3(cmd_buffer
, false);
251 genX(flush_pipeline_select_3d
)(cmd_buffer
);
254 const uint32_t num_buffers
= __builtin_popcount(vb_emit
);
255 const uint32_t num_dwords
= 1 + num_buffers
* 4;
257 p
= anv_batch_emitn(&cmd_buffer
->batch
, num_dwords
,
258 GENX(3DSTATE_VERTEX_BUFFERS
));
260 for_each_bit(vb
, vb_emit
) {
261 struct anv_buffer
*buffer
= cmd_buffer
->state
.vertex_bindings
[vb
].buffer
;
262 uint32_t offset
= cmd_buffer
->state
.vertex_bindings
[vb
].offset
;
264 struct GENX(VERTEX_BUFFER_STATE
) state
= {
265 .VertexBufferIndex
= vb
,
266 .MemoryObjectControlState
= GENX(MOCS
),
267 .AddressModifyEnable
= true,
268 .BufferPitch
= pipeline
->binding_stride
[vb
],
269 .BufferStartingAddress
= { buffer
->bo
, buffer
->offset
+ offset
},
270 .BufferSize
= buffer
->size
- offset
273 GENX(VERTEX_BUFFER_STATE_pack
)(&cmd_buffer
->batch
, &p
[1 + i
* 4], &state
);
278 if (cmd_buffer
->state
.dirty
& ANV_CMD_DIRTY_PIPELINE
) {
279 /* If somebody compiled a pipeline after starting a command buffer the
280 * scratch bo may have grown since we started this cmd buffer (and
281 * emitted STATE_BASE_ADDRESS). If we're binding that pipeline now,
282 * reemit STATE_BASE_ADDRESS so that we use the bigger scratch bo. */
283 if (cmd_buffer
->state
.scratch_size
< pipeline
->total_scratch
)
284 anv_cmd_buffer_emit_state_base_address(cmd_buffer
);
286 anv_batch_emit_batch(&cmd_buffer
->batch
, &pipeline
->batch
);
289 /* We emit the binding tables and sampler tables first, then emit push
290 * constants and then finally emit binding table and sampler table
291 * pointers. It has to happen in this order, since emitting the binding
292 * tables may change the push constants (in case of storage images). After
293 * emitting push constants, on SKL+ we have to emit the corresponding
294 * 3DSTATE_BINDING_TABLE_POINTER_* for the push constants to take effect.
297 if (cmd_buffer
->state
.descriptors_dirty
)
298 dirty
= gen7_cmd_buffer_flush_descriptor_sets(cmd_buffer
);
300 if (cmd_buffer
->state
.push_constants_dirty
)
301 dirty
|= cmd_buffer_flush_push_constants(cmd_buffer
);
304 gen7_cmd_buffer_emit_descriptor_pointers(cmd_buffer
, dirty
);
306 if (cmd_buffer
->state
.dirty
& ANV_CMD_DIRTY_DYNAMIC_VIEWPORT
)
307 gen8_cmd_buffer_emit_viewport(cmd_buffer
);
309 if (cmd_buffer
->state
.dirty
& ANV_CMD_DIRTY_DYNAMIC_SCISSOR
)
310 gen7_cmd_buffer_emit_scissor(cmd_buffer
);
312 if (cmd_buffer
->state
.dirty
& (ANV_CMD_DIRTY_PIPELINE
|
313 ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH
)) {
314 __emit_sf_state(cmd_buffer
);
317 if (cmd_buffer
->state
.dirty
& (ANV_CMD_DIRTY_PIPELINE
|
318 ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS
)){
319 bool enable_bias
= cmd_buffer
->state
.dynamic
.depth_bias
.bias
!= 0.0f
||
320 cmd_buffer
->state
.dynamic
.depth_bias
.slope
!= 0.0f
;
322 uint32_t raster_dw
[GENX(3DSTATE_RASTER_length
)];
323 struct GENX(3DSTATE_RASTER
) raster
= {
324 GENX(3DSTATE_RASTER_header
),
325 .GlobalDepthOffsetEnableSolid
= enable_bias
,
326 .GlobalDepthOffsetEnableWireframe
= enable_bias
,
327 .GlobalDepthOffsetEnablePoint
= enable_bias
,
328 .GlobalDepthOffsetConstant
= cmd_buffer
->state
.dynamic
.depth_bias
.bias
,
329 .GlobalDepthOffsetScale
= cmd_buffer
->state
.dynamic
.depth_bias
.slope
,
330 .GlobalDepthOffsetClamp
= cmd_buffer
->state
.dynamic
.depth_bias
.clamp
332 GENX(3DSTATE_RASTER_pack
)(NULL
, raster_dw
, &raster
);
333 anv_batch_emit_merge(&cmd_buffer
->batch
, raster_dw
,
334 pipeline
->gen8
.raster
);
337 /* Stencil reference values moved from COLOR_CALC_STATE in gen8 to
338 * 3DSTATE_WM_DEPTH_STENCIL in gen9. That means the dirty bits gets split
339 * across different state packets for gen8 and gen9. We handle that by
340 * using a big old #if switch here.
343 if (cmd_buffer
->state
.dirty
& (ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS
|
344 ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE
)) {
345 struct anv_state cc_state
=
346 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
,
347 GEN8_COLOR_CALC_STATE_length
* 4,
349 struct GEN8_COLOR_CALC_STATE cc
= {
350 .BlendConstantColorRed
= cmd_buffer
->state
.dynamic
.blend_constants
[0],
351 .BlendConstantColorGreen
= cmd_buffer
->state
.dynamic
.blend_constants
[1],
352 .BlendConstantColorBlue
= cmd_buffer
->state
.dynamic
.blend_constants
[2],
353 .BlendConstantColorAlpha
= cmd_buffer
->state
.dynamic
.blend_constants
[3],
354 .StencilReferenceValue
=
355 cmd_buffer
->state
.dynamic
.stencil_reference
.front
,
356 .BackFaceStencilReferenceValue
=
357 cmd_buffer
->state
.dynamic
.stencil_reference
.back
,
359 GEN8_COLOR_CALC_STATE_pack(NULL
, cc_state
.map
, &cc
);
361 if (!cmd_buffer
->device
->info
.has_llc
)
362 anv_state_clflush(cc_state
);
364 anv_batch_emit(&cmd_buffer
->batch
,
365 GEN8_3DSTATE_CC_STATE_POINTERS
,
366 .ColorCalcStatePointer
= cc_state
.offset
,
367 .ColorCalcStatePointerValid
= true);
370 if (cmd_buffer
->state
.dirty
& (ANV_CMD_DIRTY_PIPELINE
|
371 ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK
|
372 ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK
)) {
373 uint32_t wm_depth_stencil_dw
[GEN8_3DSTATE_WM_DEPTH_STENCIL_length
];
375 struct GEN8_3DSTATE_WM_DEPTH_STENCIL wm_depth_stencil
= {
376 GEN8_3DSTATE_WM_DEPTH_STENCIL_header
,
378 /* Is this what we need to do? */
379 .StencilBufferWriteEnable
=
380 cmd_buffer
->state
.dynamic
.stencil_write_mask
.front
!= 0,
383 cmd_buffer
->state
.dynamic
.stencil_compare_mask
.front
& 0xff,
385 cmd_buffer
->state
.dynamic
.stencil_write_mask
.front
& 0xff,
387 .BackfaceStencilTestMask
=
388 cmd_buffer
->state
.dynamic
.stencil_compare_mask
.back
& 0xff,
389 .BackfaceStencilWriteMask
=
390 cmd_buffer
->state
.dynamic
.stencil_write_mask
.back
& 0xff,
392 GEN8_3DSTATE_WM_DEPTH_STENCIL_pack(NULL
, wm_depth_stencil_dw
,
395 anv_batch_emit_merge(&cmd_buffer
->batch
, wm_depth_stencil_dw
,
396 pipeline
->gen8
.wm_depth_stencil
);
399 if (cmd_buffer
->state
.dirty
& ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS
) {
400 struct anv_state cc_state
=
401 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
,
402 GEN9_COLOR_CALC_STATE_length
* 4,
404 struct GEN9_COLOR_CALC_STATE cc
= {
405 .BlendConstantColorRed
= cmd_buffer
->state
.dynamic
.blend_constants
[0],
406 .BlendConstantColorGreen
= cmd_buffer
->state
.dynamic
.blend_constants
[1],
407 .BlendConstantColorBlue
= cmd_buffer
->state
.dynamic
.blend_constants
[2],
408 .BlendConstantColorAlpha
= cmd_buffer
->state
.dynamic
.blend_constants
[3],
410 GEN9_COLOR_CALC_STATE_pack(NULL
, cc_state
.map
, &cc
);
412 if (!cmd_buffer
->device
->info
.has_llc
)
413 anv_state_clflush(cc_state
);
415 anv_batch_emit(&cmd_buffer
->batch
,
416 GEN9_3DSTATE_CC_STATE_POINTERS
,
417 .ColorCalcStatePointer
= cc_state
.offset
,
418 .ColorCalcStatePointerValid
= true);
421 if (cmd_buffer
->state
.dirty
& (ANV_CMD_DIRTY_PIPELINE
|
422 ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK
|
423 ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK
|
424 ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE
)) {
425 uint32_t dwords
[GEN9_3DSTATE_WM_DEPTH_STENCIL_length
];
426 struct anv_dynamic_state
*d
= &cmd_buffer
->state
.dynamic
;
427 struct GEN9_3DSTATE_WM_DEPTH_STENCIL wm_depth_stencil
= {
428 GEN9_3DSTATE_WM_DEPTH_STENCIL_header
,
430 .StencilBufferWriteEnable
= d
->stencil_write_mask
.front
!= 0 ||
431 d
->stencil_write_mask
.back
!= 0,
433 .StencilTestMask
= d
->stencil_compare_mask
.front
& 0xff,
434 .StencilWriteMask
= d
->stencil_write_mask
.front
& 0xff,
436 .BackfaceStencilTestMask
= d
->stencil_compare_mask
.back
& 0xff,
437 .BackfaceStencilWriteMask
= d
->stencil_write_mask
.back
& 0xff,
439 .StencilReferenceValue
= d
->stencil_reference
.front
,
440 .BackfaceStencilReferenceValue
= d
->stencil_reference
.back
442 GEN9_3DSTATE_WM_DEPTH_STENCIL_pack(NULL
, dwords
, &wm_depth_stencil
);
444 anv_batch_emit_merge(&cmd_buffer
->batch
, dwords
,
445 pipeline
->gen9
.wm_depth_stencil
);
449 if (cmd_buffer
->state
.dirty
& (ANV_CMD_DIRTY_PIPELINE
|
450 ANV_CMD_DIRTY_INDEX_BUFFER
)) {
451 anv_batch_emit(&cmd_buffer
->batch
, GENX(3DSTATE_VF
),
452 .IndexedDrawCutIndexEnable
= pipeline
->primitive_restart
,
453 .CutIndex
= cmd_buffer
->state
.restart_index
,
457 cmd_buffer
->state
.vb_dirty
&= ~vb_emit
;
458 cmd_buffer
->state
.dirty
= 0;
461 void genX(CmdBindIndexBuffer
)(
462 VkCommandBuffer commandBuffer
,
465 VkIndexType indexType
)
467 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
468 ANV_FROM_HANDLE(anv_buffer
, buffer
, _buffer
);
470 static const uint32_t vk_to_gen_index_type
[] = {
471 [VK_INDEX_TYPE_UINT16
] = INDEX_WORD
,
472 [VK_INDEX_TYPE_UINT32
] = INDEX_DWORD
,
475 static const uint32_t restart_index_for_type
[] = {
476 [VK_INDEX_TYPE_UINT16
] = UINT16_MAX
,
477 [VK_INDEX_TYPE_UINT32
] = UINT32_MAX
,
480 cmd_buffer
->state
.restart_index
= restart_index_for_type
[indexType
];
482 anv_batch_emit(&cmd_buffer
->batch
, GENX(3DSTATE_INDEX_BUFFER
),
483 .IndexFormat
= vk_to_gen_index_type
[indexType
],
484 .MemoryObjectControlState
= GENX(MOCS
),
485 .BufferStartingAddress
= { buffer
->bo
, buffer
->offset
+ offset
},
486 .BufferSize
= buffer
->size
- offset
);
488 cmd_buffer
->state
.dirty
|= ANV_CMD_DIRTY_INDEX_BUFFER
;
492 flush_compute_descriptor_set(struct anv_cmd_buffer
*cmd_buffer
)
494 struct anv_device
*device
= cmd_buffer
->device
;
495 struct anv_pipeline
*pipeline
= cmd_buffer
->state
.compute_pipeline
;
496 struct anv_state surfaces
= { 0, }, samplers
= { 0, };
499 result
= anv_cmd_buffer_emit_samplers(cmd_buffer
,
500 MESA_SHADER_COMPUTE
, &samplers
);
501 if (result
!= VK_SUCCESS
)
503 result
= anv_cmd_buffer_emit_binding_table(cmd_buffer
,
504 MESA_SHADER_COMPUTE
, &surfaces
);
505 if (result
!= VK_SUCCESS
)
508 struct anv_state push_state
= anv_cmd_buffer_cs_push_constants(cmd_buffer
);
510 const struct brw_cs_prog_data
*cs_prog_data
= &pipeline
->cs_prog_data
;
511 const struct brw_stage_prog_data
*prog_data
= &cs_prog_data
->base
;
513 unsigned local_id_dwords
= cs_prog_data
->local_invocation_id_regs
* 8;
514 unsigned push_constant_data_size
=
515 (prog_data
->nr_params
+ local_id_dwords
) * 4;
516 unsigned reg_aligned_constant_size
= ALIGN(push_constant_data_size
, 32);
517 unsigned push_constant_regs
= reg_aligned_constant_size
/ 32;
519 if (push_state
.alloc_size
) {
520 anv_batch_emit(&cmd_buffer
->batch
, GENX(MEDIA_CURBE_LOAD
),
521 .CURBETotalDataLength
= push_state
.alloc_size
,
522 .CURBEDataStartAddress
= push_state
.offset
);
525 assert(prog_data
->total_shared
<= 64 * 1024);
526 uint32_t slm_size
= 0;
527 if (prog_data
->total_shared
> 0) {
528 /* slm_size is in 4k increments, but must be a power of 2. */
530 while (slm_size
< prog_data
->total_shared
)
532 slm_size
/= 4 * 1024;
535 struct anv_state state
=
536 anv_state_pool_emit(&device
->dynamic_state_pool
,
537 GENX(INTERFACE_DESCRIPTOR_DATA
), 64,
538 .KernelStartPointer
= pipeline
->cs_simd
,
539 .KernelStartPointerHigh
= 0,
540 .BindingTablePointer
= surfaces
.offset
,
541 .BindingTableEntryCount
= 0,
542 .SamplerStatePointer
= samplers
.offset
,
544 .ConstantIndirectURBEntryReadLength
= push_constant_regs
,
545 .ConstantURBEntryReadOffset
= 0,
546 .BarrierEnable
= cs_prog_data
->uses_barrier
,
547 .SharedLocalMemorySize
= slm_size
,
548 .NumberofThreadsinGPGPUThreadGroup
=
549 pipeline
->cs_thread_width_max
);
551 uint32_t size
= GENX(INTERFACE_DESCRIPTOR_DATA_length
) * sizeof(uint32_t);
552 anv_batch_emit(&cmd_buffer
->batch
, GENX(MEDIA_INTERFACE_DESCRIPTOR_LOAD
),
553 .InterfaceDescriptorTotalLength
= size
,
554 .InterfaceDescriptorDataStartAddress
= state
.offset
);
560 genX(cmd_buffer_flush_compute_state
)(struct anv_cmd_buffer
*cmd_buffer
)
562 struct anv_pipeline
*pipeline
= cmd_buffer
->state
.compute_pipeline
;
565 assert(pipeline
->active_stages
== VK_SHADER_STAGE_COMPUTE_BIT
);
567 bool needs_slm
= pipeline
->cs_prog_data
.base
.total_shared
> 0;
568 config_l3(cmd_buffer
, needs_slm
);
570 if (cmd_buffer
->state
.current_pipeline
!= GPGPU
) {
572 /* From the Broadwell PRM, Volume 2a: Instructions, PIPELINE_SELECT:
574 * Software must clear the COLOR_CALC_STATE Valid field in
575 * 3DSTATE_CC_STATE_POINTERS command prior to send a PIPELINE_SELECT
576 * with Pipeline Select set to GPGPU.
578 * The internal hardware docs recommend the same workaround for Gen9
581 anv_batch_emit(&cmd_buffer
->batch
,
582 GENX(3DSTATE_CC_STATE_POINTERS
));
585 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPELINE_SELECT
),
589 .PipelineSelection
= GPGPU
);
590 cmd_buffer
->state
.current_pipeline
= GPGPU
;
593 if (cmd_buffer
->state
.compute_dirty
& ANV_CMD_DIRTY_PIPELINE
)
594 anv_batch_emit_batch(&cmd_buffer
->batch
, &pipeline
->batch
);
596 if ((cmd_buffer
->state
.descriptors_dirty
& VK_SHADER_STAGE_COMPUTE_BIT
) ||
597 (cmd_buffer
->state
.compute_dirty
& ANV_CMD_DIRTY_PIPELINE
)) {
598 result
= flush_compute_descriptor_set(cmd_buffer
);
599 assert(result
== VK_SUCCESS
);
600 cmd_buffer
->state
.descriptors_dirty
&= ~VK_SHADER_STAGE_COMPUTE_BIT
;
603 cmd_buffer
->state
.compute_dirty
= 0;
607 emit_ps_depth_count(struct anv_batch
*batch
,
608 struct anv_bo
*bo
, uint32_t offset
)
610 anv_batch_emit(batch
, GENX(PIPE_CONTROL
),
611 .DestinationAddressType
= DAT_PPGTT
,
612 .PostSyncOperation
= WritePSDepthCount
,
613 .DepthStallEnable
= true,
614 .Address
= { bo
, offset
});
618 emit_query_availability(struct anv_batch
*batch
,
619 struct anv_bo
*bo
, uint32_t offset
)
621 anv_batch_emit(batch
, GENX(PIPE_CONTROL
),
622 .DestinationAddressType
= DAT_PPGTT
,
623 .PostSyncOperation
= WriteImmediateData
,
624 .Address
= { bo
, offset
},
628 void genX(CmdBeginQuery
)(
629 VkCommandBuffer commandBuffer
,
630 VkQueryPool queryPool
,
632 VkQueryControlFlags flags
)
634 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
635 ANV_FROM_HANDLE(anv_query_pool
, pool
, queryPool
);
637 /* Workaround: When meta uses the pipeline with the VS disabled, it seems
638 * that the pipelining of the depth write breaks. What we see is that
639 * samples from the render pass clear leaks into the first query
640 * immediately after the clear. Doing a pipecontrol with a post-sync
641 * operation and DepthStallEnable seems to work around the issue.
643 if (cmd_buffer
->state
.need_query_wa
) {
644 cmd_buffer
->state
.need_query_wa
= false;
645 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
),
646 .DepthCacheFlushEnable
= true,
647 .DepthStallEnable
= true);
650 switch (pool
->type
) {
651 case VK_QUERY_TYPE_OCCLUSION
:
652 emit_ps_depth_count(&cmd_buffer
->batch
, &pool
->bo
,
653 query
* sizeof(struct anv_query_pool_slot
));
656 case VK_QUERY_TYPE_PIPELINE_STATISTICS
:
662 void genX(CmdEndQuery
)(
663 VkCommandBuffer commandBuffer
,
664 VkQueryPool queryPool
,
667 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
668 ANV_FROM_HANDLE(anv_query_pool
, pool
, queryPool
);
670 switch (pool
->type
) {
671 case VK_QUERY_TYPE_OCCLUSION
:
672 emit_ps_depth_count(&cmd_buffer
->batch
, &pool
->bo
,
673 query
* sizeof(struct anv_query_pool_slot
) + 8);
675 emit_query_availability(&cmd_buffer
->batch
, &pool
->bo
,
676 query
* sizeof(struct anv_query_pool_slot
) + 16);
679 case VK_QUERY_TYPE_PIPELINE_STATISTICS
:
685 #define TIMESTAMP 0x2358
687 void genX(CmdWriteTimestamp
)(
688 VkCommandBuffer commandBuffer
,
689 VkPipelineStageFlagBits pipelineStage
,
690 VkQueryPool queryPool
,
693 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
694 ANV_FROM_HANDLE(anv_query_pool
, pool
, queryPool
);
695 uint32_t offset
= query
* sizeof(struct anv_query_pool_slot
);
697 assert(pool
->type
== VK_QUERY_TYPE_TIMESTAMP
);
699 switch (pipelineStage
) {
700 case VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT
:
701 anv_batch_emit(&cmd_buffer
->batch
, GENX(MI_STORE_REGISTER_MEM
),
702 .RegisterAddress
= TIMESTAMP
,
703 .MemoryAddress
= { &pool
->bo
, offset
});
704 anv_batch_emit(&cmd_buffer
->batch
, GENX(MI_STORE_REGISTER_MEM
),
705 .RegisterAddress
= TIMESTAMP
+ 4,
706 .MemoryAddress
= { &pool
->bo
, offset
+ 4 });
710 /* Everything else is bottom-of-pipe */
711 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
),
712 .DestinationAddressType
= DAT_PPGTT
,
713 .PostSyncOperation
= WriteTimestamp
,
714 .Address
= { &pool
->bo
, offset
});
718 emit_query_availability(&cmd_buffer
->batch
, &pool
->bo
, query
+ 16);
721 #define alu_opcode(v) __gen_uint((v), 20, 31)
722 #define alu_operand1(v) __gen_uint((v), 10, 19)
723 #define alu_operand2(v) __gen_uint((v), 0, 9)
724 #define alu(opcode, operand1, operand2) \
725 alu_opcode(opcode) | alu_operand1(operand1) | alu_operand2(operand2)
727 #define OPCODE_NOOP 0x000
728 #define OPCODE_LOAD 0x080
729 #define OPCODE_LOADINV 0x480
730 #define OPCODE_LOAD0 0x081
731 #define OPCODE_LOAD1 0x481
732 #define OPCODE_ADD 0x100
733 #define OPCODE_SUB 0x101
734 #define OPCODE_AND 0x102
735 #define OPCODE_OR 0x103
736 #define OPCODE_XOR 0x104
737 #define OPCODE_STORE 0x180
738 #define OPCODE_STOREINV 0x580
740 #define OPERAND_R0 0x00
741 #define OPERAND_R1 0x01
742 #define OPERAND_R2 0x02
743 #define OPERAND_R3 0x03
744 #define OPERAND_R4 0x04
745 #define OPERAND_SRCA 0x20
746 #define OPERAND_SRCB 0x21
747 #define OPERAND_ACCU 0x31
748 #define OPERAND_ZF 0x32
749 #define OPERAND_CF 0x33
751 #define CS_GPR(n) (0x2600 + (n) * 8)
754 emit_load_alu_reg_u64(struct anv_batch
*batch
, uint32_t reg
,
755 struct anv_bo
*bo
, uint32_t offset
)
757 anv_batch_emit(batch
, GENX(MI_LOAD_REGISTER_MEM
),
758 .RegisterAddress
= reg
,
759 .MemoryAddress
= { bo
, offset
});
760 anv_batch_emit(batch
, GENX(MI_LOAD_REGISTER_MEM
),
761 .RegisterAddress
= reg
+ 4,
762 .MemoryAddress
= { bo
, offset
+ 4 });
766 store_query_result(struct anv_batch
*batch
, uint32_t reg
,
767 struct anv_bo
*bo
, uint32_t offset
, VkQueryResultFlags flags
)
769 anv_batch_emit(batch
, GENX(MI_STORE_REGISTER_MEM
),
770 .RegisterAddress
= reg
,
771 .MemoryAddress
= { bo
, offset
});
773 if (flags
& VK_QUERY_RESULT_64_BIT
)
774 anv_batch_emit(batch
, GENX(MI_STORE_REGISTER_MEM
),
775 .RegisterAddress
= reg
+ 4,
776 .MemoryAddress
= { bo
, offset
+ 4 });
779 void genX(CmdCopyQueryPoolResults
)(
780 VkCommandBuffer commandBuffer
,
781 VkQueryPool queryPool
,
785 VkDeviceSize destOffset
,
786 VkDeviceSize destStride
,
787 VkQueryResultFlags flags
)
789 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
790 ANV_FROM_HANDLE(anv_query_pool
, pool
, queryPool
);
791 ANV_FROM_HANDLE(anv_buffer
, buffer
, destBuffer
);
792 uint32_t slot_offset
, dst_offset
;
794 if (flags
& VK_QUERY_RESULT_WAIT_BIT
)
795 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
),
796 .CommandStreamerStallEnable
= true,
797 .StallAtPixelScoreboard
= true);
799 dst_offset
= buffer
->offset
+ destOffset
;
800 for (uint32_t i
= 0; i
< queryCount
; i
++) {
802 slot_offset
= (firstQuery
+ i
) * sizeof(struct anv_query_pool_slot
);
803 switch (pool
->type
) {
804 case VK_QUERY_TYPE_OCCLUSION
:
805 emit_load_alu_reg_u64(&cmd_buffer
->batch
,
806 CS_GPR(0), &pool
->bo
, slot_offset
);
807 emit_load_alu_reg_u64(&cmd_buffer
->batch
,
808 CS_GPR(1), &pool
->bo
, slot_offset
+ 8);
810 /* FIXME: We need to clamp the result for 32 bit. */
812 uint32_t *dw
= anv_batch_emitn(&cmd_buffer
->batch
, 5, GENX(MI_MATH
));
813 dw
[1] = alu(OPCODE_LOAD
, OPERAND_SRCA
, OPERAND_R1
);
814 dw
[2] = alu(OPCODE_LOAD
, OPERAND_SRCB
, OPERAND_R0
);
815 dw
[3] = alu(OPCODE_SUB
, 0, 0);
816 dw
[4] = alu(OPCODE_STORE
, OPERAND_R2
, OPERAND_ACCU
);
819 case VK_QUERY_TYPE_TIMESTAMP
:
820 emit_load_alu_reg_u64(&cmd_buffer
->batch
,
821 CS_GPR(2), &pool
->bo
, slot_offset
);
825 unreachable("unhandled query type");
828 store_query_result(&cmd_buffer
->batch
,
829 CS_GPR(2), buffer
->bo
, dst_offset
, flags
);
831 if (flags
& VK_QUERY_RESULT_WITH_AVAILABILITY_BIT
) {
832 emit_load_alu_reg_u64(&cmd_buffer
->batch
, CS_GPR(0),
833 &pool
->bo
, slot_offset
+ 16);
834 if (flags
& VK_QUERY_RESULT_64_BIT
)
835 store_query_result(&cmd_buffer
->batch
,
836 CS_GPR(0), buffer
->bo
, dst_offset
+ 8, flags
);
838 store_query_result(&cmd_buffer
->batch
,
839 CS_GPR(0), buffer
->bo
, dst_offset
+ 4, flags
);
842 dst_offset
+= destStride
;
846 void genX(CmdSetEvent
)(
847 VkCommandBuffer commandBuffer
,
849 VkPipelineStageFlags stageMask
)
851 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
852 ANV_FROM_HANDLE(anv_event
, event
, _event
);
854 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
),
855 .DestinationAddressType
= DAT_PPGTT
,
856 .PostSyncOperation
= WriteImmediateData
,
858 &cmd_buffer
->device
->dynamic_state_block_pool
.bo
,
861 .ImmediateData
= VK_EVENT_SET
);
864 void genX(CmdResetEvent
)(
865 VkCommandBuffer commandBuffer
,
867 VkPipelineStageFlags stageMask
)
869 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
870 ANV_FROM_HANDLE(anv_event
, event
, _event
);
872 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
),
873 .DestinationAddressType
= DAT_PPGTT
,
874 .PostSyncOperation
= WriteImmediateData
,
876 &cmd_buffer
->device
->dynamic_state_block_pool
.bo
,
879 .ImmediateData
= VK_EVENT_RESET
);
882 void genX(CmdWaitEvents
)(
883 VkCommandBuffer commandBuffer
,
885 const VkEvent
* pEvents
,
886 VkPipelineStageFlags srcStageMask
,
887 VkPipelineStageFlags destStageMask
,
888 uint32_t memoryBarrierCount
,
889 const VkMemoryBarrier
* pMemoryBarriers
,
890 uint32_t bufferMemoryBarrierCount
,
891 const VkBufferMemoryBarrier
* pBufferMemoryBarriers
,
892 uint32_t imageMemoryBarrierCount
,
893 const VkImageMemoryBarrier
* pImageMemoryBarriers
)
895 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
896 for (uint32_t i
= 0; i
< eventCount
; i
++) {
897 ANV_FROM_HANDLE(anv_event
, event
, pEvents
[i
]);
899 anv_batch_emit(&cmd_buffer
->batch
, GENX(MI_SEMAPHORE_WAIT
),
900 .WaitMode
= PollingMode
,
901 .CompareOperation
= COMPARE_SAD_EQUAL_SDD
,
902 .SemaphoreDataDword
= VK_EVENT_SET
,
903 .SemaphoreAddress
= {
904 &cmd_buffer
->device
->dynamic_state_block_pool
.bo
,
909 genX(CmdPipelineBarrier
)(commandBuffer
, srcStageMask
, destStageMask
,
910 false, /* byRegion */
911 memoryBarrierCount
, pMemoryBarriers
,
912 bufferMemoryBarrierCount
, pBufferMemoryBarriers
,
913 imageMemoryBarrierCount
, pImageMemoryBarriers
);