2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 #include "anv_private.h"
32 #include "genxml/gen_macros.h"
33 #include "genxml/genX_pack.h"
36 cmd_buffer_flush_push_constants(struct anv_cmd_buffer
*cmd_buffer
)
38 static const uint32_t push_constant_opcodes
[] = {
39 [MESA_SHADER_VERTEX
] = 21,
40 [MESA_SHADER_TESS_CTRL
] = 25, /* HS */
41 [MESA_SHADER_TESS_EVAL
] = 26, /* DS */
42 [MESA_SHADER_GEOMETRY
] = 22,
43 [MESA_SHADER_FRAGMENT
] = 23,
44 [MESA_SHADER_COMPUTE
] = 0,
47 VkShaderStageFlags flushed
= 0;
49 anv_foreach_stage(stage
, cmd_buffer
->state
.push_constants_dirty
) {
50 if (stage
== MESA_SHADER_COMPUTE
)
53 struct anv_state state
= anv_cmd_buffer_push_constants(cmd_buffer
, stage
);
55 if (state
.offset
== 0) {
56 anv_batch_emit(&cmd_buffer
->batch
, GENX(3DSTATE_CONSTANT_VS
),
57 ._3DCommandSubOpcode
= push_constant_opcodes
[stage
]);
59 anv_batch_emit(&cmd_buffer
->batch
, GENX(3DSTATE_CONSTANT_VS
),
60 ._3DCommandSubOpcode
= push_constant_opcodes
[stage
],
62 .PointerToConstantBuffer2
= { &cmd_buffer
->device
->dynamic_state_block_pool
.bo
, state
.offset
},
63 .ConstantBuffer2ReadLength
= DIV_ROUND_UP(state
.alloc_size
, 32),
68 cmd_buffer
->state
.push_constants_dirty
&= ~VK_SHADER_STAGE_ALL_GRAPHICS
;
75 emit_viewport_state(struct anv_cmd_buffer
*cmd_buffer
,
76 uint32_t count
, const VkViewport
*viewports
)
78 struct anv_state sf_clip_state
=
79 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
, count
* 64, 64);
80 struct anv_state cc_state
=
81 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
, count
* 8, 32);
83 for (uint32_t i
= 0; i
< count
; i
++) {
84 const VkViewport
*vp
= &viewports
[i
];
86 /* The gen7 state struct has just the matrix and guardband fields, the
87 * gen8 struct adds the min/max viewport fields. */
88 struct GENX(SF_CLIP_VIEWPORT
) sf_clip_viewport
= {
89 .ViewportMatrixElementm00
= vp
->width
/ 2,
90 .ViewportMatrixElementm11
= vp
->height
/ 2,
91 .ViewportMatrixElementm22
= 1.0,
92 .ViewportMatrixElementm30
= vp
->x
+ vp
->width
/ 2,
93 .ViewportMatrixElementm31
= vp
->y
+ vp
->height
/ 2,
94 .ViewportMatrixElementm32
= 0.0,
95 .XMinClipGuardband
= -1.0f
,
96 .XMaxClipGuardband
= 1.0f
,
97 .YMinClipGuardband
= -1.0f
,
98 .YMaxClipGuardband
= 1.0f
,
99 .XMinViewPort
= vp
->x
,
100 .XMaxViewPort
= vp
->x
+ vp
->width
- 1,
101 .YMinViewPort
= vp
->y
,
102 .YMaxViewPort
= vp
->y
+ vp
->height
- 1,
105 struct GENX(CC_VIEWPORT
) cc_viewport
= {
106 .MinimumDepth
= vp
->minDepth
,
107 .MaximumDepth
= vp
->maxDepth
110 GENX(SF_CLIP_VIEWPORT_pack
)(NULL
, sf_clip_state
.map
+ i
* 64,
112 GENX(CC_VIEWPORT_pack
)(NULL
, cc_state
.map
+ i
* 8, &cc_viewport
);
115 if (!cmd_buffer
->device
->info
.has_llc
) {
116 anv_state_clflush(sf_clip_state
);
117 anv_state_clflush(cc_state
);
120 anv_batch_emit(&cmd_buffer
->batch
,
121 GENX(3DSTATE_VIEWPORT_STATE_POINTERS_CC
),
122 .CCViewportPointer
= cc_state
.offset
);
123 anv_batch_emit(&cmd_buffer
->batch
,
124 GENX(3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP
),
125 .SFClipViewportPointer
= sf_clip_state
.offset
);
129 gen8_cmd_buffer_emit_viewport(struct anv_cmd_buffer
*cmd_buffer
)
131 if (cmd_buffer
->state
.dynamic
.viewport
.count
> 0) {
132 emit_viewport_state(cmd_buffer
, cmd_buffer
->state
.dynamic
.viewport
.count
,
133 cmd_buffer
->state
.dynamic
.viewport
.viewports
);
135 /* If viewport count is 0, this is taken to mean "use the default" */
136 emit_viewport_state(cmd_buffer
, 1,
140 .width
= cmd_buffer
->state
.framebuffer
->width
,
141 .height
= cmd_buffer
->state
.framebuffer
->height
,
150 emit_lri(struct anv_batch
*batch
, uint32_t reg
, uint32_t imm
)
152 anv_batch_emit(batch
, GENX(MI_LOAD_REGISTER_IMM
),
153 .RegisterOffset
= reg
,
157 #define GEN8_L3CNTLREG 0x7034
160 config_l3(struct anv_cmd_buffer
*cmd_buffer
, bool enable_slm
)
162 /* References for GL state:
164 * - commits e307cfa..228d5a3
165 * - src/mesa/drivers/dri/i965/gen7_l3_state.c
168 uint32_t val
= enable_slm
?
169 /* All = 48 ways; URB = 16 ways; DC and RO = 0, SLM = 1 */
171 /* All = 48 ways; URB = 48 ways; DC, RO and SLM = 0 */
173 bool changed
= cmd_buffer
->state
.current_l3_config
!= val
;
176 /* According to the hardware docs, the L3 partitioning can only be changed
177 * while the pipeline is completely drained and the caches are flushed,
178 * which involves a first PIPE_CONTROL flush which stalls the pipeline and
179 * initiates invalidation of the relevant caches...
181 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
),
182 .TextureCacheInvalidationEnable
= true,
183 .ConstantCacheInvalidationEnable
= true,
184 .InstructionCacheInvalidateEnable
= true,
185 .DCFlushEnable
= true,
186 .PostSyncOperation
= NoWrite
,
187 .CommandStreamerStallEnable
= true);
189 /* ...followed by a second stalling flush which guarantees that
190 * invalidation is complete when the L3 configuration registers are
193 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
),
194 .DCFlushEnable
= true,
195 .PostSyncOperation
= NoWrite
,
196 .CommandStreamerStallEnable
= true);
198 emit_lri(&cmd_buffer
->batch
, GEN8_L3CNTLREG
, val
);
199 cmd_buffer
->state
.current_l3_config
= val
;
204 __emit_genx_sf_state(struct anv_cmd_buffer
*cmd_buffer
)
206 uint32_t sf_dw
[GENX(3DSTATE_SF_length
)];
207 struct GENX(3DSTATE_SF
) sf
= {
208 GENX(3DSTATE_SF_header
),
209 .LineWidth
= cmd_buffer
->state
.dynamic
.line_width
,
211 GENX(3DSTATE_SF_pack
)(NULL
, sf_dw
, &sf
);
213 anv_batch_emit_merge(&cmd_buffer
->batch
, sf_dw
,
214 cmd_buffer
->state
.pipeline
->gen8
.sf
);
217 #include "genxml/gen9_pack.h"
219 __emit_gen9_sf_state(struct anv_cmd_buffer
*cmd_buffer
)
221 uint32_t sf_dw
[GENX(3DSTATE_SF_length
)];
222 struct GEN9_3DSTATE_SF sf
= {
223 GEN9_3DSTATE_SF_header
,
224 .LineWidth
= cmd_buffer
->state
.dynamic
.line_width
,
226 GEN9_3DSTATE_SF_pack(NULL
, sf_dw
, &sf
);
228 anv_batch_emit_merge(&cmd_buffer
->batch
, sf_dw
,
229 cmd_buffer
->state
.pipeline
->gen8
.sf
);
233 __emit_sf_state(struct anv_cmd_buffer
*cmd_buffer
)
235 if (cmd_buffer
->device
->info
.is_cherryview
)
236 __emit_gen9_sf_state(cmd_buffer
);
238 __emit_genx_sf_state(cmd_buffer
);
242 genX(cmd_buffer_flush_state
)(struct anv_cmd_buffer
*cmd_buffer
)
244 struct anv_pipeline
*pipeline
= cmd_buffer
->state
.pipeline
;
247 uint32_t vb_emit
= cmd_buffer
->state
.vb_dirty
& pipeline
->vb_used
;
249 assert((pipeline
->active_stages
& VK_SHADER_STAGE_COMPUTE_BIT
) == 0);
251 config_l3(cmd_buffer
, false);
253 genX(flush_pipeline_select_3d
)(cmd_buffer
);
256 const uint32_t num_buffers
= __builtin_popcount(vb_emit
);
257 const uint32_t num_dwords
= 1 + num_buffers
* 4;
259 p
= anv_batch_emitn(&cmd_buffer
->batch
, num_dwords
,
260 GENX(3DSTATE_VERTEX_BUFFERS
));
262 for_each_bit(vb
, vb_emit
) {
263 struct anv_buffer
*buffer
= cmd_buffer
->state
.vertex_bindings
[vb
].buffer
;
264 uint32_t offset
= cmd_buffer
->state
.vertex_bindings
[vb
].offset
;
266 struct GENX(VERTEX_BUFFER_STATE
) state
= {
267 .VertexBufferIndex
= vb
,
268 .MemoryObjectControlState
= GENX(MOCS
),
269 .AddressModifyEnable
= true,
270 .BufferPitch
= pipeline
->binding_stride
[vb
],
271 .BufferStartingAddress
= { buffer
->bo
, buffer
->offset
+ offset
},
272 .BufferSize
= buffer
->size
- offset
275 GENX(VERTEX_BUFFER_STATE_pack
)(&cmd_buffer
->batch
, &p
[1 + i
* 4], &state
);
280 if (cmd_buffer
->state
.dirty
& ANV_CMD_DIRTY_PIPELINE
) {
281 /* If somebody compiled a pipeline after starting a command buffer the
282 * scratch bo may have grown since we started this cmd buffer (and
283 * emitted STATE_BASE_ADDRESS). If we're binding that pipeline now,
284 * reemit STATE_BASE_ADDRESS so that we use the bigger scratch bo. */
285 if (cmd_buffer
->state
.scratch_size
< pipeline
->total_scratch
)
286 anv_cmd_buffer_emit_state_base_address(cmd_buffer
);
288 anv_batch_emit_batch(&cmd_buffer
->batch
, &pipeline
->batch
);
290 /* From the BDW PRM for 3DSTATE_PUSH_CONSTANT_ALLOC_VS:
292 * "The 3DSTATE_CONSTANT_VS must be reprogrammed prior to
293 * the next 3DPRIMITIVE command after programming the
294 * 3DSTATE_PUSH_CONSTANT_ALLOC_VS"
296 * Since 3DSTATE_PUSH_CONSTANT_ALLOC_VS is programmed as part of
297 * pipeline setup, we need to dirty push constants.
299 cmd_buffer
->state
.push_constants_dirty
|= VK_SHADER_STAGE_ALL_GRAPHICS
;
302 /* We emit the binding tables and sampler tables first, then emit push
303 * constants and then finally emit binding table and sampler table
304 * pointers. It has to happen in this order, since emitting the binding
305 * tables may change the push constants (in case of storage images). After
306 * emitting push constants, on SKL+ we have to emit the corresponding
307 * 3DSTATE_BINDING_TABLE_POINTER_* for the push constants to take effect.
310 if (cmd_buffer
->state
.descriptors_dirty
)
311 dirty
= gen7_cmd_buffer_flush_descriptor_sets(cmd_buffer
);
313 if (cmd_buffer
->state
.push_constants_dirty
)
314 dirty
|= cmd_buffer_flush_push_constants(cmd_buffer
);
317 gen7_cmd_buffer_emit_descriptor_pointers(cmd_buffer
, dirty
);
319 if (cmd_buffer
->state
.dirty
& ANV_CMD_DIRTY_DYNAMIC_VIEWPORT
)
320 gen8_cmd_buffer_emit_viewport(cmd_buffer
);
322 if (cmd_buffer
->state
.dirty
& ANV_CMD_DIRTY_DYNAMIC_SCISSOR
)
323 gen7_cmd_buffer_emit_scissor(cmd_buffer
);
325 if (cmd_buffer
->state
.dirty
& (ANV_CMD_DIRTY_PIPELINE
|
326 ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH
)) {
327 __emit_sf_state(cmd_buffer
);
330 if (cmd_buffer
->state
.dirty
& (ANV_CMD_DIRTY_PIPELINE
|
331 ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS
)){
332 bool enable_bias
= cmd_buffer
->state
.dynamic
.depth_bias
.bias
!= 0.0f
||
333 cmd_buffer
->state
.dynamic
.depth_bias
.slope
!= 0.0f
;
335 uint32_t raster_dw
[GENX(3DSTATE_RASTER_length
)];
336 struct GENX(3DSTATE_RASTER
) raster
= {
337 GENX(3DSTATE_RASTER_header
),
338 .GlobalDepthOffsetEnableSolid
= enable_bias
,
339 .GlobalDepthOffsetEnableWireframe
= enable_bias
,
340 .GlobalDepthOffsetEnablePoint
= enable_bias
,
341 .GlobalDepthOffsetConstant
= cmd_buffer
->state
.dynamic
.depth_bias
.bias
,
342 .GlobalDepthOffsetScale
= cmd_buffer
->state
.dynamic
.depth_bias
.slope
,
343 .GlobalDepthOffsetClamp
= cmd_buffer
->state
.dynamic
.depth_bias
.clamp
345 GENX(3DSTATE_RASTER_pack
)(NULL
, raster_dw
, &raster
);
346 anv_batch_emit_merge(&cmd_buffer
->batch
, raster_dw
,
347 pipeline
->gen8
.raster
);
350 /* Stencil reference values moved from COLOR_CALC_STATE in gen8 to
351 * 3DSTATE_WM_DEPTH_STENCIL in gen9. That means the dirty bits gets split
352 * across different state packets for gen8 and gen9. We handle that by
353 * using a big old #if switch here.
356 if (cmd_buffer
->state
.dirty
& (ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS
|
357 ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE
)) {
358 struct anv_dynamic_state
*d
= &cmd_buffer
->state
.dynamic
;
359 struct anv_state cc_state
=
360 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
,
361 GENX(COLOR_CALC_STATE_length
) * 4,
363 struct GENX(COLOR_CALC_STATE
) cc
= {
364 .BlendConstantColorRed
= cmd_buffer
->state
.dynamic
.blend_constants
[0],
365 .BlendConstantColorGreen
= cmd_buffer
->state
.dynamic
.blend_constants
[1],
366 .BlendConstantColorBlue
= cmd_buffer
->state
.dynamic
.blend_constants
[2],
367 .BlendConstantColorAlpha
= cmd_buffer
->state
.dynamic
.blend_constants
[3],
368 .StencilReferenceValue
= d
->stencil_reference
.front
,
369 .BackFaceStencilReferenceValue
= d
->stencil_reference
.back
,
371 GENX(COLOR_CALC_STATE_pack
)(NULL
, cc_state
.map
, &cc
);
373 if (!cmd_buffer
->device
->info
.has_llc
)
374 anv_state_clflush(cc_state
);
376 anv_batch_emit(&cmd_buffer
->batch
,
377 GENX(3DSTATE_CC_STATE_POINTERS
),
378 .ColorCalcStatePointer
= cc_state
.offset
,
379 .ColorCalcStatePointerValid
= true);
382 if (cmd_buffer
->state
.dirty
& (ANV_CMD_DIRTY_PIPELINE
|
383 ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK
|
384 ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK
)) {
385 uint32_t wm_depth_stencil_dw
[GENX(3DSTATE_WM_DEPTH_STENCIL_length
)];
386 struct anv_dynamic_state
*d
= &cmd_buffer
->state
.dynamic
;
388 struct GENX(3DSTATE_WM_DEPTH_STENCIL wm_depth_stencil
) = {
389 GENX(3DSTATE_WM_DEPTH_STENCIL_header
),
391 .StencilBufferWriteEnable
= d
->stencil_write_mask
.front
!= 0 ||
392 d
->stencil_write_mask
.back
!= 0,
394 .StencilTestMask
= d
->stencil_compare_mask
.front
& 0xff,
395 .StencilWriteMask
= d
->stencil_write_mask
.front
& 0xff,
397 .BackfaceStencilTestMask
= d
->stencil_compare_mask
.back
& 0xff,
398 .BackfaceStencilWriteMask
= d
->stencil_write_mask
.back
& 0xff,
400 GENX(3DSTATE_WM_DEPTH_STENCIL_pack
)(NULL
, wm_depth_stencil_dw
,
403 anv_batch_emit_merge(&cmd_buffer
->batch
, wm_depth_stencil_dw
,
404 pipeline
->gen8
.wm_depth_stencil
);
407 if (cmd_buffer
->state
.dirty
& ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS
) {
408 struct anv_state cc_state
=
409 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
,
410 GEN9_COLOR_CALC_STATE_length
* 4,
412 struct GEN9_COLOR_CALC_STATE cc
= {
413 .BlendConstantColorRed
= cmd_buffer
->state
.dynamic
.blend_constants
[0],
414 .BlendConstantColorGreen
= cmd_buffer
->state
.dynamic
.blend_constants
[1],
415 .BlendConstantColorBlue
= cmd_buffer
->state
.dynamic
.blend_constants
[2],
416 .BlendConstantColorAlpha
= cmd_buffer
->state
.dynamic
.blend_constants
[3],
418 GEN9_COLOR_CALC_STATE_pack(NULL
, cc_state
.map
, &cc
);
420 if (!cmd_buffer
->device
->info
.has_llc
)
421 anv_state_clflush(cc_state
);
423 anv_batch_emit(&cmd_buffer
->batch
,
424 GEN9_3DSTATE_CC_STATE_POINTERS
,
425 .ColorCalcStatePointer
= cc_state
.offset
,
426 .ColorCalcStatePointerValid
= true);
429 if (cmd_buffer
->state
.dirty
& (ANV_CMD_DIRTY_PIPELINE
|
430 ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK
|
431 ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK
|
432 ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE
)) {
433 uint32_t dwords
[GEN9_3DSTATE_WM_DEPTH_STENCIL_length
];
434 struct anv_dynamic_state
*d
= &cmd_buffer
->state
.dynamic
;
435 struct GEN9_3DSTATE_WM_DEPTH_STENCIL wm_depth_stencil
= {
436 GEN9_3DSTATE_WM_DEPTH_STENCIL_header
,
438 .StencilBufferWriteEnable
= d
->stencil_write_mask
.front
!= 0 ||
439 d
->stencil_write_mask
.back
!= 0,
441 .StencilTestMask
= d
->stencil_compare_mask
.front
& 0xff,
442 .StencilWriteMask
= d
->stencil_write_mask
.front
& 0xff,
444 .BackfaceStencilTestMask
= d
->stencil_compare_mask
.back
& 0xff,
445 .BackfaceStencilWriteMask
= d
->stencil_write_mask
.back
& 0xff,
447 .StencilReferenceValue
= d
->stencil_reference
.front
,
448 .BackfaceStencilReferenceValue
= d
->stencil_reference
.back
450 GEN9_3DSTATE_WM_DEPTH_STENCIL_pack(NULL
, dwords
, &wm_depth_stencil
);
452 anv_batch_emit_merge(&cmd_buffer
->batch
, dwords
,
453 pipeline
->gen9
.wm_depth_stencil
);
457 if (cmd_buffer
->state
.dirty
& (ANV_CMD_DIRTY_PIPELINE
|
458 ANV_CMD_DIRTY_INDEX_BUFFER
)) {
459 anv_batch_emit(&cmd_buffer
->batch
, GENX(3DSTATE_VF
),
460 .IndexedDrawCutIndexEnable
= pipeline
->primitive_restart
,
461 .CutIndex
= cmd_buffer
->state
.restart_index
,
465 cmd_buffer
->state
.vb_dirty
&= ~vb_emit
;
466 cmd_buffer
->state
.dirty
= 0;
469 void genX(CmdBindIndexBuffer
)(
470 VkCommandBuffer commandBuffer
,
473 VkIndexType indexType
)
475 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
476 ANV_FROM_HANDLE(anv_buffer
, buffer
, _buffer
);
478 static const uint32_t vk_to_gen_index_type
[] = {
479 [VK_INDEX_TYPE_UINT16
] = INDEX_WORD
,
480 [VK_INDEX_TYPE_UINT32
] = INDEX_DWORD
,
483 static const uint32_t restart_index_for_type
[] = {
484 [VK_INDEX_TYPE_UINT16
] = UINT16_MAX
,
485 [VK_INDEX_TYPE_UINT32
] = UINT32_MAX
,
488 cmd_buffer
->state
.restart_index
= restart_index_for_type
[indexType
];
490 anv_batch_emit(&cmd_buffer
->batch
, GENX(3DSTATE_INDEX_BUFFER
),
491 .IndexFormat
= vk_to_gen_index_type
[indexType
],
492 .MemoryObjectControlState
= GENX(MOCS
),
493 .BufferStartingAddress
= { buffer
->bo
, buffer
->offset
+ offset
},
494 .BufferSize
= buffer
->size
- offset
);
496 cmd_buffer
->state
.dirty
|= ANV_CMD_DIRTY_INDEX_BUFFER
;
500 flush_compute_descriptor_set(struct anv_cmd_buffer
*cmd_buffer
)
502 struct anv_device
*device
= cmd_buffer
->device
;
503 struct anv_pipeline
*pipeline
= cmd_buffer
->state
.compute_pipeline
;
504 struct anv_state surfaces
= { 0, }, samplers
= { 0, };
507 result
= anv_cmd_buffer_emit_samplers(cmd_buffer
,
508 MESA_SHADER_COMPUTE
, &samplers
);
509 if (result
!= VK_SUCCESS
)
511 result
= anv_cmd_buffer_emit_binding_table(cmd_buffer
,
512 MESA_SHADER_COMPUTE
, &surfaces
);
513 if (result
!= VK_SUCCESS
)
516 struct anv_state push_state
= anv_cmd_buffer_cs_push_constants(cmd_buffer
);
518 const struct brw_cs_prog_data
*cs_prog_data
= &pipeline
->cs_prog_data
;
519 const struct brw_stage_prog_data
*prog_data
= &cs_prog_data
->base
;
521 unsigned local_id_dwords
= cs_prog_data
->local_invocation_id_regs
* 8;
522 unsigned push_constant_data_size
=
523 (prog_data
->nr_params
+ local_id_dwords
) * 4;
524 unsigned reg_aligned_constant_size
= ALIGN(push_constant_data_size
, 32);
525 unsigned push_constant_regs
= reg_aligned_constant_size
/ 32;
527 if (push_state
.alloc_size
) {
528 anv_batch_emit(&cmd_buffer
->batch
, GENX(MEDIA_CURBE_LOAD
),
529 .CURBETotalDataLength
= push_state
.alloc_size
,
530 .CURBEDataStartAddress
= push_state
.offset
);
533 assert(prog_data
->total_shared
<= 64 * 1024);
534 uint32_t slm_size
= 0;
535 if (prog_data
->total_shared
> 0) {
536 /* slm_size is in 4k increments, but must be a power of 2. */
538 while (slm_size
< prog_data
->total_shared
)
540 slm_size
/= 4 * 1024;
543 struct anv_state state
=
544 anv_state_pool_emit(&device
->dynamic_state_pool
,
545 GENX(INTERFACE_DESCRIPTOR_DATA
), 64,
546 .KernelStartPointer
= pipeline
->cs_simd
,
547 .KernelStartPointerHigh
= 0,
548 .BindingTablePointer
= surfaces
.offset
,
549 .BindingTableEntryCount
= 0,
550 .SamplerStatePointer
= samplers
.offset
,
552 .ConstantIndirectURBEntryReadLength
= push_constant_regs
,
553 .ConstantURBEntryReadOffset
= 0,
554 .BarrierEnable
= cs_prog_data
->uses_barrier
,
555 .SharedLocalMemorySize
= slm_size
,
556 .NumberofThreadsinGPGPUThreadGroup
=
557 pipeline
->cs_thread_width_max
);
559 uint32_t size
= GENX(INTERFACE_DESCRIPTOR_DATA_length
) * sizeof(uint32_t);
560 anv_batch_emit(&cmd_buffer
->batch
, GENX(MEDIA_INTERFACE_DESCRIPTOR_LOAD
),
561 .InterfaceDescriptorTotalLength
= size
,
562 .InterfaceDescriptorDataStartAddress
= state
.offset
);
568 genX(cmd_buffer_flush_compute_state
)(struct anv_cmd_buffer
*cmd_buffer
)
570 struct anv_pipeline
*pipeline
= cmd_buffer
->state
.compute_pipeline
;
573 assert(pipeline
->active_stages
== VK_SHADER_STAGE_COMPUTE_BIT
);
575 bool needs_slm
= pipeline
->cs_prog_data
.base
.total_shared
> 0;
576 config_l3(cmd_buffer
, needs_slm
);
578 if (cmd_buffer
->state
.current_pipeline
!= GPGPU
) {
580 /* From the Broadwell PRM, Volume 2a: Instructions, PIPELINE_SELECT:
582 * Software must clear the COLOR_CALC_STATE Valid field in
583 * 3DSTATE_CC_STATE_POINTERS command prior to send a PIPELINE_SELECT
584 * with Pipeline Select set to GPGPU.
586 * The internal hardware docs recommend the same workaround for Gen9
589 anv_batch_emit(&cmd_buffer
->batch
,
590 GENX(3DSTATE_CC_STATE_POINTERS
));
593 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPELINE_SELECT
),
597 .PipelineSelection
= GPGPU
);
598 cmd_buffer
->state
.current_pipeline
= GPGPU
;
601 if (cmd_buffer
->state
.compute_dirty
& ANV_CMD_DIRTY_PIPELINE
)
602 anv_batch_emit_batch(&cmd_buffer
->batch
, &pipeline
->batch
);
604 if ((cmd_buffer
->state
.descriptors_dirty
& VK_SHADER_STAGE_COMPUTE_BIT
) ||
605 (cmd_buffer
->state
.compute_dirty
& ANV_CMD_DIRTY_PIPELINE
)) {
606 result
= flush_compute_descriptor_set(cmd_buffer
);
607 assert(result
== VK_SUCCESS
);
608 cmd_buffer
->state
.descriptors_dirty
&= ~VK_SHADER_STAGE_COMPUTE_BIT
;
611 cmd_buffer
->state
.compute_dirty
= 0;
615 emit_ps_depth_count(struct anv_batch
*batch
,
616 struct anv_bo
*bo
, uint32_t offset
)
618 anv_batch_emit(batch
, GENX(PIPE_CONTROL
),
619 .DestinationAddressType
= DAT_PPGTT
,
620 .PostSyncOperation
= WritePSDepthCount
,
621 .DepthStallEnable
= true,
622 .Address
= { bo
, offset
});
626 emit_query_availability(struct anv_batch
*batch
,
627 struct anv_bo
*bo
, uint32_t offset
)
629 anv_batch_emit(batch
, GENX(PIPE_CONTROL
),
630 .DestinationAddressType
= DAT_PPGTT
,
631 .PostSyncOperation
= WriteImmediateData
,
632 .Address
= { bo
, offset
},
636 void genX(CmdBeginQuery
)(
637 VkCommandBuffer commandBuffer
,
638 VkQueryPool queryPool
,
640 VkQueryControlFlags flags
)
642 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
643 ANV_FROM_HANDLE(anv_query_pool
, pool
, queryPool
);
645 /* Workaround: When meta uses the pipeline with the VS disabled, it seems
646 * that the pipelining of the depth write breaks. What we see is that
647 * samples from the render pass clear leaks into the first query
648 * immediately after the clear. Doing a pipecontrol with a post-sync
649 * operation and DepthStallEnable seems to work around the issue.
651 if (cmd_buffer
->state
.need_query_wa
) {
652 cmd_buffer
->state
.need_query_wa
= false;
653 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
),
654 .DepthCacheFlushEnable
= true,
655 .DepthStallEnable
= true);
658 switch (pool
->type
) {
659 case VK_QUERY_TYPE_OCCLUSION
:
660 emit_ps_depth_count(&cmd_buffer
->batch
, &pool
->bo
,
661 query
* sizeof(struct anv_query_pool_slot
));
664 case VK_QUERY_TYPE_PIPELINE_STATISTICS
:
670 void genX(CmdEndQuery
)(
671 VkCommandBuffer commandBuffer
,
672 VkQueryPool queryPool
,
675 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
676 ANV_FROM_HANDLE(anv_query_pool
, pool
, queryPool
);
678 switch (pool
->type
) {
679 case VK_QUERY_TYPE_OCCLUSION
:
680 emit_ps_depth_count(&cmd_buffer
->batch
, &pool
->bo
,
681 query
* sizeof(struct anv_query_pool_slot
) + 8);
683 emit_query_availability(&cmd_buffer
->batch
, &pool
->bo
,
684 query
* sizeof(struct anv_query_pool_slot
) + 16);
687 case VK_QUERY_TYPE_PIPELINE_STATISTICS
:
693 #define TIMESTAMP 0x2358
695 void genX(CmdWriteTimestamp
)(
696 VkCommandBuffer commandBuffer
,
697 VkPipelineStageFlagBits pipelineStage
,
698 VkQueryPool queryPool
,
701 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
702 ANV_FROM_HANDLE(anv_query_pool
, pool
, queryPool
);
703 uint32_t offset
= query
* sizeof(struct anv_query_pool_slot
);
705 assert(pool
->type
== VK_QUERY_TYPE_TIMESTAMP
);
707 switch (pipelineStage
) {
708 case VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT
:
709 anv_batch_emit(&cmd_buffer
->batch
, GENX(MI_STORE_REGISTER_MEM
),
710 .RegisterAddress
= TIMESTAMP
,
711 .MemoryAddress
= { &pool
->bo
, offset
});
712 anv_batch_emit(&cmd_buffer
->batch
, GENX(MI_STORE_REGISTER_MEM
),
713 .RegisterAddress
= TIMESTAMP
+ 4,
714 .MemoryAddress
= { &pool
->bo
, offset
+ 4 });
718 /* Everything else is bottom-of-pipe */
719 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
),
720 .DestinationAddressType
= DAT_PPGTT
,
721 .PostSyncOperation
= WriteTimestamp
,
722 .Address
= { &pool
->bo
, offset
});
726 emit_query_availability(&cmd_buffer
->batch
, &pool
->bo
, query
+ 16);
729 #define alu_opcode(v) __gen_uint((v), 20, 31)
730 #define alu_operand1(v) __gen_uint((v), 10, 19)
731 #define alu_operand2(v) __gen_uint((v), 0, 9)
732 #define alu(opcode, operand1, operand2) \
733 alu_opcode(opcode) | alu_operand1(operand1) | alu_operand2(operand2)
735 #define OPCODE_NOOP 0x000
736 #define OPCODE_LOAD 0x080
737 #define OPCODE_LOADINV 0x480
738 #define OPCODE_LOAD0 0x081
739 #define OPCODE_LOAD1 0x481
740 #define OPCODE_ADD 0x100
741 #define OPCODE_SUB 0x101
742 #define OPCODE_AND 0x102
743 #define OPCODE_OR 0x103
744 #define OPCODE_XOR 0x104
745 #define OPCODE_STORE 0x180
746 #define OPCODE_STOREINV 0x580
748 #define OPERAND_R0 0x00
749 #define OPERAND_R1 0x01
750 #define OPERAND_R2 0x02
751 #define OPERAND_R3 0x03
752 #define OPERAND_R4 0x04
753 #define OPERAND_SRCA 0x20
754 #define OPERAND_SRCB 0x21
755 #define OPERAND_ACCU 0x31
756 #define OPERAND_ZF 0x32
757 #define OPERAND_CF 0x33
759 #define CS_GPR(n) (0x2600 + (n) * 8)
762 emit_load_alu_reg_u64(struct anv_batch
*batch
, uint32_t reg
,
763 struct anv_bo
*bo
, uint32_t offset
)
765 anv_batch_emit(batch
, GENX(MI_LOAD_REGISTER_MEM
),
766 .RegisterAddress
= reg
,
767 .MemoryAddress
= { bo
, offset
});
768 anv_batch_emit(batch
, GENX(MI_LOAD_REGISTER_MEM
),
769 .RegisterAddress
= reg
+ 4,
770 .MemoryAddress
= { bo
, offset
+ 4 });
774 store_query_result(struct anv_batch
*batch
, uint32_t reg
,
775 struct anv_bo
*bo
, uint32_t offset
, VkQueryResultFlags flags
)
777 anv_batch_emit(batch
, GENX(MI_STORE_REGISTER_MEM
),
778 .RegisterAddress
= reg
,
779 .MemoryAddress
= { bo
, offset
});
781 if (flags
& VK_QUERY_RESULT_64_BIT
)
782 anv_batch_emit(batch
, GENX(MI_STORE_REGISTER_MEM
),
783 .RegisterAddress
= reg
+ 4,
784 .MemoryAddress
= { bo
, offset
+ 4 });
787 void genX(CmdCopyQueryPoolResults
)(
788 VkCommandBuffer commandBuffer
,
789 VkQueryPool queryPool
,
793 VkDeviceSize destOffset
,
794 VkDeviceSize destStride
,
795 VkQueryResultFlags flags
)
797 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
798 ANV_FROM_HANDLE(anv_query_pool
, pool
, queryPool
);
799 ANV_FROM_HANDLE(anv_buffer
, buffer
, destBuffer
);
800 uint32_t slot_offset
, dst_offset
;
802 if (flags
& VK_QUERY_RESULT_WAIT_BIT
)
803 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
),
804 .CommandStreamerStallEnable
= true,
805 .StallAtPixelScoreboard
= true);
807 dst_offset
= buffer
->offset
+ destOffset
;
808 for (uint32_t i
= 0; i
< queryCount
; i
++) {
810 slot_offset
= (firstQuery
+ i
) * sizeof(struct anv_query_pool_slot
);
811 switch (pool
->type
) {
812 case VK_QUERY_TYPE_OCCLUSION
:
813 emit_load_alu_reg_u64(&cmd_buffer
->batch
,
814 CS_GPR(0), &pool
->bo
, slot_offset
);
815 emit_load_alu_reg_u64(&cmd_buffer
->batch
,
816 CS_GPR(1), &pool
->bo
, slot_offset
+ 8);
818 /* FIXME: We need to clamp the result for 32 bit. */
820 uint32_t *dw
= anv_batch_emitn(&cmd_buffer
->batch
, 5, GENX(MI_MATH
));
821 dw
[1] = alu(OPCODE_LOAD
, OPERAND_SRCA
, OPERAND_R1
);
822 dw
[2] = alu(OPCODE_LOAD
, OPERAND_SRCB
, OPERAND_R0
);
823 dw
[3] = alu(OPCODE_SUB
, 0, 0);
824 dw
[4] = alu(OPCODE_STORE
, OPERAND_R2
, OPERAND_ACCU
);
827 case VK_QUERY_TYPE_TIMESTAMP
:
828 emit_load_alu_reg_u64(&cmd_buffer
->batch
,
829 CS_GPR(2), &pool
->bo
, slot_offset
);
833 unreachable("unhandled query type");
836 store_query_result(&cmd_buffer
->batch
,
837 CS_GPR(2), buffer
->bo
, dst_offset
, flags
);
839 if (flags
& VK_QUERY_RESULT_WITH_AVAILABILITY_BIT
) {
840 emit_load_alu_reg_u64(&cmd_buffer
->batch
, CS_GPR(0),
841 &pool
->bo
, slot_offset
+ 16);
842 if (flags
& VK_QUERY_RESULT_64_BIT
)
843 store_query_result(&cmd_buffer
->batch
,
844 CS_GPR(0), buffer
->bo
, dst_offset
+ 8, flags
);
846 store_query_result(&cmd_buffer
->batch
,
847 CS_GPR(0), buffer
->bo
, dst_offset
+ 4, flags
);
850 dst_offset
+= destStride
;
854 void genX(CmdSetEvent
)(
855 VkCommandBuffer commandBuffer
,
857 VkPipelineStageFlags stageMask
)
859 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
860 ANV_FROM_HANDLE(anv_event
, event
, _event
);
862 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
),
863 .DestinationAddressType
= DAT_PPGTT
,
864 .PostSyncOperation
= WriteImmediateData
,
866 &cmd_buffer
->device
->dynamic_state_block_pool
.bo
,
869 .ImmediateData
= VK_EVENT_SET
);
872 void genX(CmdResetEvent
)(
873 VkCommandBuffer commandBuffer
,
875 VkPipelineStageFlags stageMask
)
877 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
878 ANV_FROM_HANDLE(anv_event
, event
, _event
);
880 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
),
881 .DestinationAddressType
= DAT_PPGTT
,
882 .PostSyncOperation
= WriteImmediateData
,
884 &cmd_buffer
->device
->dynamic_state_block_pool
.bo
,
887 .ImmediateData
= VK_EVENT_RESET
);
890 void genX(CmdWaitEvents
)(
891 VkCommandBuffer commandBuffer
,
893 const VkEvent
* pEvents
,
894 VkPipelineStageFlags srcStageMask
,
895 VkPipelineStageFlags destStageMask
,
896 uint32_t memoryBarrierCount
,
897 const VkMemoryBarrier
* pMemoryBarriers
,
898 uint32_t bufferMemoryBarrierCount
,
899 const VkBufferMemoryBarrier
* pBufferMemoryBarriers
,
900 uint32_t imageMemoryBarrierCount
,
901 const VkImageMemoryBarrier
* pImageMemoryBarriers
)
903 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
904 for (uint32_t i
= 0; i
< eventCount
; i
++) {
905 ANV_FROM_HANDLE(anv_event
, event
, pEvents
[i
]);
907 anv_batch_emit(&cmd_buffer
->batch
, GENX(MI_SEMAPHORE_WAIT
),
908 .WaitMode
= PollingMode
,
909 .CompareOperation
= COMPARE_SAD_EQUAL_SDD
,
910 .SemaphoreDataDword
= VK_EVENT_SET
,
911 .SemaphoreAddress
= {
912 &cmd_buffer
->device
->dynamic_state_block_pool
.bo
,
917 genX(CmdPipelineBarrier
)(commandBuffer
, srcStageMask
, destStageMask
,
918 false, /* byRegion */
919 memoryBarrierCount
, pMemoryBarriers
,
920 bufferMemoryBarrierCount
, pBufferMemoryBarriers
,
921 imageMemoryBarrierCount
, pImageMemoryBarriers
);