2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 #include "anv_private.h"
32 #include "genxml/gen_macros.h"
33 #include "genxml/genX_pack.h"
36 cmd_buffer_flush_push_constants(struct anv_cmd_buffer
*cmd_buffer
)
38 static const uint32_t push_constant_opcodes
[] = {
39 [MESA_SHADER_VERTEX
] = 21,
40 [MESA_SHADER_TESS_CTRL
] = 25, /* HS */
41 [MESA_SHADER_TESS_EVAL
] = 26, /* DS */
42 [MESA_SHADER_GEOMETRY
] = 22,
43 [MESA_SHADER_FRAGMENT
] = 23,
44 [MESA_SHADER_COMPUTE
] = 0,
47 VkShaderStageFlags flushed
= 0;
49 anv_foreach_stage(stage
, cmd_buffer
->state
.push_constants_dirty
) {
50 if (stage
== MESA_SHADER_COMPUTE
)
53 struct anv_state state
= anv_cmd_buffer_push_constants(cmd_buffer
, stage
);
55 if (state
.offset
== 0) {
56 anv_batch_emit(&cmd_buffer
->batch
, GENX(3DSTATE_CONSTANT_VS
),
57 ._3DCommandSubOpcode
= push_constant_opcodes
[stage
]);
59 anv_batch_emit(&cmd_buffer
->batch
, GENX(3DSTATE_CONSTANT_VS
),
60 ._3DCommandSubOpcode
= push_constant_opcodes
[stage
],
62 .PointerToConstantBuffer2
= { &cmd_buffer
->device
->dynamic_state_block_pool
.bo
, state
.offset
},
63 .ConstantBuffer2ReadLength
= DIV_ROUND_UP(state
.alloc_size
, 32),
67 flushed
|= mesa_to_vk_shader_stage(stage
);
70 cmd_buffer
->state
.push_constants_dirty
&= ~flushed
;
77 emit_viewport_state(struct anv_cmd_buffer
*cmd_buffer
,
78 uint32_t count
, const VkViewport
*viewports
)
80 struct anv_state sf_clip_state
=
81 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
, count
* 64, 64);
82 struct anv_state cc_state
=
83 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
, count
* 8, 32);
85 for (uint32_t i
= 0; i
< count
; i
++) {
86 const VkViewport
*vp
= &viewports
[i
];
88 /* The gen7 state struct has just the matrix and guardband fields, the
89 * gen8 struct adds the min/max viewport fields. */
90 struct GENX(SF_CLIP_VIEWPORT
) sf_clip_viewport
= {
91 .ViewportMatrixElementm00
= vp
->width
/ 2,
92 .ViewportMatrixElementm11
= vp
->height
/ 2,
93 .ViewportMatrixElementm22
= 1.0,
94 .ViewportMatrixElementm30
= vp
->x
+ vp
->width
/ 2,
95 .ViewportMatrixElementm31
= vp
->y
+ vp
->height
/ 2,
96 .ViewportMatrixElementm32
= 0.0,
97 .XMinClipGuardband
= -1.0f
,
98 .XMaxClipGuardband
= 1.0f
,
99 .YMinClipGuardband
= -1.0f
,
100 .YMaxClipGuardband
= 1.0f
,
101 .XMinViewPort
= vp
->x
,
102 .XMaxViewPort
= vp
->x
+ vp
->width
- 1,
103 .YMinViewPort
= vp
->y
,
104 .YMaxViewPort
= vp
->y
+ vp
->height
- 1,
107 struct GENX(CC_VIEWPORT
) cc_viewport
= {
108 .MinimumDepth
= vp
->minDepth
,
109 .MaximumDepth
= vp
->maxDepth
112 GENX(SF_CLIP_VIEWPORT_pack
)(NULL
, sf_clip_state
.map
+ i
* 64,
114 GENX(CC_VIEWPORT_pack
)(NULL
, cc_state
.map
+ i
* 8, &cc_viewport
);
117 if (!cmd_buffer
->device
->info
.has_llc
) {
118 anv_state_clflush(sf_clip_state
);
119 anv_state_clflush(cc_state
);
122 anv_batch_emit(&cmd_buffer
->batch
,
123 GENX(3DSTATE_VIEWPORT_STATE_POINTERS_CC
),
124 .CCViewportPointer
= cc_state
.offset
);
125 anv_batch_emit(&cmd_buffer
->batch
,
126 GENX(3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP
),
127 .SFClipViewportPointer
= sf_clip_state
.offset
);
131 gen8_cmd_buffer_emit_viewport(struct anv_cmd_buffer
*cmd_buffer
)
133 if (cmd_buffer
->state
.dynamic
.viewport
.count
> 0) {
134 emit_viewport_state(cmd_buffer
, cmd_buffer
->state
.dynamic
.viewport
.count
,
135 cmd_buffer
->state
.dynamic
.viewport
.viewports
);
137 /* If viewport count is 0, this is taken to mean "use the default" */
138 emit_viewport_state(cmd_buffer
, 1,
142 .width
= cmd_buffer
->state
.framebuffer
->width
,
143 .height
= cmd_buffer
->state
.framebuffer
->height
,
152 emit_lri(struct anv_batch
*batch
, uint32_t reg
, uint32_t imm
)
154 anv_batch_emit(batch
, GENX(MI_LOAD_REGISTER_IMM
),
155 .RegisterOffset
= reg
,
159 #define GEN8_L3CNTLREG 0x7034
162 config_l3(struct anv_cmd_buffer
*cmd_buffer
, bool enable_slm
)
164 /* References for GL state:
166 * - commits e307cfa..228d5a3
167 * - src/mesa/drivers/dri/i965/gen7_l3_state.c
170 uint32_t val
= enable_slm
?
171 /* All = 48 ways; URB = 16 ways; DC and RO = 0, SLM = 1 */
173 /* All = 48 ways; URB = 48 ways; DC, RO and SLM = 0 */
175 bool changed
= cmd_buffer
->state
.current_l3_config
!= val
;
178 /* According to the hardware docs, the L3 partitioning can only be changed
179 * while the pipeline is completely drained and the caches are flushed,
180 * which involves a first PIPE_CONTROL flush which stalls the pipeline and
181 * initiates invalidation of the relevant caches...
183 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
),
184 .TextureCacheInvalidationEnable
= true,
185 .ConstantCacheInvalidationEnable
= true,
186 .InstructionCacheInvalidateEnable
= true,
187 .DCFlushEnable
= true,
188 .PostSyncOperation
= NoWrite
,
189 .CommandStreamerStallEnable
= true);
191 /* ...followed by a second stalling flush which guarantees that
192 * invalidation is complete when the L3 configuration registers are
195 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
),
196 .DCFlushEnable
= true,
197 .PostSyncOperation
= NoWrite
,
198 .CommandStreamerStallEnable
= true);
200 emit_lri(&cmd_buffer
->batch
, GEN8_L3CNTLREG
, val
);
201 cmd_buffer
->state
.current_l3_config
= val
;
206 __emit_genx_sf_state(struct anv_cmd_buffer
*cmd_buffer
)
208 uint32_t sf_dw
[GENX(3DSTATE_SF_length
)];
209 struct GENX(3DSTATE_SF
) sf
= {
210 GENX(3DSTATE_SF_header
),
211 .LineWidth
= cmd_buffer
->state
.dynamic
.line_width
,
213 GENX(3DSTATE_SF_pack
)(NULL
, sf_dw
, &sf
);
215 anv_batch_emit_merge(&cmd_buffer
->batch
, sf_dw
,
216 cmd_buffer
->state
.pipeline
->gen8
.sf
);
219 #include "genxml/gen9_pack.h"
221 __emit_gen9_sf_state(struct anv_cmd_buffer
*cmd_buffer
)
223 uint32_t sf_dw
[GENX(3DSTATE_SF_length
)];
224 struct GEN9_3DSTATE_SF sf
= {
225 GEN9_3DSTATE_SF_header
,
226 .LineWidth
= cmd_buffer
->state
.dynamic
.line_width
,
228 GEN9_3DSTATE_SF_pack(NULL
, sf_dw
, &sf
);
230 anv_batch_emit_merge(&cmd_buffer
->batch
, sf_dw
,
231 cmd_buffer
->state
.pipeline
->gen8
.sf
);
235 __emit_sf_state(struct anv_cmd_buffer
*cmd_buffer
)
237 if (cmd_buffer
->device
->info
.is_cherryview
)
238 __emit_gen9_sf_state(cmd_buffer
);
240 __emit_genx_sf_state(cmd_buffer
);
244 genX(cmd_buffer_flush_state
)(struct anv_cmd_buffer
*cmd_buffer
)
246 struct anv_pipeline
*pipeline
= cmd_buffer
->state
.pipeline
;
249 uint32_t vb_emit
= cmd_buffer
->state
.vb_dirty
& pipeline
->vb_used
;
251 assert((pipeline
->active_stages
& VK_SHADER_STAGE_COMPUTE_BIT
) == 0);
253 config_l3(cmd_buffer
, false);
255 genX(flush_pipeline_select_3d
)(cmd_buffer
);
258 const uint32_t num_buffers
= __builtin_popcount(vb_emit
);
259 const uint32_t num_dwords
= 1 + num_buffers
* 4;
261 p
= anv_batch_emitn(&cmd_buffer
->batch
, num_dwords
,
262 GENX(3DSTATE_VERTEX_BUFFERS
));
264 for_each_bit(vb
, vb_emit
) {
265 struct anv_buffer
*buffer
= cmd_buffer
->state
.vertex_bindings
[vb
].buffer
;
266 uint32_t offset
= cmd_buffer
->state
.vertex_bindings
[vb
].offset
;
268 struct GENX(VERTEX_BUFFER_STATE
) state
= {
269 .VertexBufferIndex
= vb
,
270 .MemoryObjectControlState
= GENX(MOCS
),
271 .AddressModifyEnable
= true,
272 .BufferPitch
= pipeline
->binding_stride
[vb
],
273 .BufferStartingAddress
= { buffer
->bo
, buffer
->offset
+ offset
},
274 .BufferSize
= buffer
->size
- offset
277 GENX(VERTEX_BUFFER_STATE_pack
)(&cmd_buffer
->batch
, &p
[1 + i
* 4], &state
);
282 if (cmd_buffer
->state
.dirty
& ANV_CMD_DIRTY_PIPELINE
) {
283 /* If somebody compiled a pipeline after starting a command buffer the
284 * scratch bo may have grown since we started this cmd buffer (and
285 * emitted STATE_BASE_ADDRESS). If we're binding that pipeline now,
286 * reemit STATE_BASE_ADDRESS so that we use the bigger scratch bo. */
287 if (cmd_buffer
->state
.scratch_size
< pipeline
->total_scratch
)
288 anv_cmd_buffer_emit_state_base_address(cmd_buffer
);
290 anv_batch_emit_batch(&cmd_buffer
->batch
, &pipeline
->batch
);
292 /* From the BDW PRM for 3DSTATE_PUSH_CONSTANT_ALLOC_VS:
294 * "The 3DSTATE_CONSTANT_VS must be reprogrammed prior to
295 * the next 3DPRIMITIVE command after programming the
296 * 3DSTATE_PUSH_CONSTANT_ALLOC_VS"
298 * Since 3DSTATE_PUSH_CONSTANT_ALLOC_VS is programmed as part of
299 * pipeline setup, we need to dirty push constants.
301 cmd_buffer
->state
.push_constants_dirty
|= VK_SHADER_STAGE_ALL_GRAPHICS
;
304 /* We emit the binding tables and sampler tables first, then emit push
305 * constants and then finally emit binding table and sampler table
306 * pointers. It has to happen in this order, since emitting the binding
307 * tables may change the push constants (in case of storage images). After
308 * emitting push constants, on SKL+ we have to emit the corresponding
309 * 3DSTATE_BINDING_TABLE_POINTER_* for the push constants to take effect.
312 if (cmd_buffer
->state
.descriptors_dirty
)
313 dirty
= gen7_cmd_buffer_flush_descriptor_sets(cmd_buffer
);
315 if (cmd_buffer
->state
.push_constants_dirty
)
316 dirty
|= cmd_buffer_flush_push_constants(cmd_buffer
);
319 gen7_cmd_buffer_emit_descriptor_pointers(cmd_buffer
, dirty
);
321 if (cmd_buffer
->state
.dirty
& ANV_CMD_DIRTY_DYNAMIC_VIEWPORT
)
322 gen8_cmd_buffer_emit_viewport(cmd_buffer
);
324 if (cmd_buffer
->state
.dirty
& ANV_CMD_DIRTY_DYNAMIC_SCISSOR
)
325 gen7_cmd_buffer_emit_scissor(cmd_buffer
);
327 if (cmd_buffer
->state
.dirty
& (ANV_CMD_DIRTY_PIPELINE
|
328 ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH
)) {
329 __emit_sf_state(cmd_buffer
);
332 if (cmd_buffer
->state
.dirty
& (ANV_CMD_DIRTY_PIPELINE
|
333 ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS
)){
334 uint32_t raster_dw
[GENX(3DSTATE_RASTER_length
)];
335 struct GENX(3DSTATE_RASTER
) raster
= {
336 GENX(3DSTATE_RASTER_header
),
337 .GlobalDepthOffsetConstant
= cmd_buffer
->state
.dynamic
.depth_bias
.bias
,
338 .GlobalDepthOffsetScale
= cmd_buffer
->state
.dynamic
.depth_bias
.slope
,
339 .GlobalDepthOffsetClamp
= cmd_buffer
->state
.dynamic
.depth_bias
.clamp
341 GENX(3DSTATE_RASTER_pack
)(NULL
, raster_dw
, &raster
);
342 anv_batch_emit_merge(&cmd_buffer
->batch
, raster_dw
,
343 pipeline
->gen8
.raster
);
346 /* Stencil reference values moved from COLOR_CALC_STATE in gen8 to
347 * 3DSTATE_WM_DEPTH_STENCIL in gen9. That means the dirty bits gets split
348 * across different state packets for gen8 and gen9. We handle that by
349 * using a big old #if switch here.
352 if (cmd_buffer
->state
.dirty
& (ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS
|
353 ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE
)) {
354 struct anv_dynamic_state
*d
= &cmd_buffer
->state
.dynamic
;
355 struct anv_state cc_state
=
356 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
,
357 GENX(COLOR_CALC_STATE_length
) * 4,
359 struct GENX(COLOR_CALC_STATE
) cc
= {
360 .BlendConstantColorRed
= cmd_buffer
->state
.dynamic
.blend_constants
[0],
361 .BlendConstantColorGreen
= cmd_buffer
->state
.dynamic
.blend_constants
[1],
362 .BlendConstantColorBlue
= cmd_buffer
->state
.dynamic
.blend_constants
[2],
363 .BlendConstantColorAlpha
= cmd_buffer
->state
.dynamic
.blend_constants
[3],
364 .StencilReferenceValue
= d
->stencil_reference
.front
,
365 .BackFaceStencilReferenceValue
= d
->stencil_reference
.back
,
367 GENX(COLOR_CALC_STATE_pack
)(NULL
, cc_state
.map
, &cc
);
369 if (!cmd_buffer
->device
->info
.has_llc
)
370 anv_state_clflush(cc_state
);
372 anv_batch_emit(&cmd_buffer
->batch
,
373 GENX(3DSTATE_CC_STATE_POINTERS
),
374 .ColorCalcStatePointer
= cc_state
.offset
,
375 .ColorCalcStatePointerValid
= true);
378 if (cmd_buffer
->state
.dirty
& (ANV_CMD_DIRTY_PIPELINE
|
379 ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK
|
380 ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK
)) {
381 uint32_t wm_depth_stencil_dw
[GENX(3DSTATE_WM_DEPTH_STENCIL_length
)];
382 struct anv_dynamic_state
*d
= &cmd_buffer
->state
.dynamic
;
384 struct GENX(3DSTATE_WM_DEPTH_STENCIL wm_depth_stencil
) = {
385 GENX(3DSTATE_WM_DEPTH_STENCIL_header
),
387 .StencilBufferWriteEnable
= d
->stencil_write_mask
.front
!= 0 ||
388 d
->stencil_write_mask
.back
!= 0,
390 .StencilTestMask
= d
->stencil_compare_mask
.front
& 0xff,
391 .StencilWriteMask
= d
->stencil_write_mask
.front
& 0xff,
393 .BackfaceStencilTestMask
= d
->stencil_compare_mask
.back
& 0xff,
394 .BackfaceStencilWriteMask
= d
->stencil_write_mask
.back
& 0xff,
396 GENX(3DSTATE_WM_DEPTH_STENCIL_pack
)(NULL
, wm_depth_stencil_dw
,
399 anv_batch_emit_merge(&cmd_buffer
->batch
, wm_depth_stencil_dw
,
400 pipeline
->gen8
.wm_depth_stencil
);
403 if (cmd_buffer
->state
.dirty
& ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS
) {
404 struct anv_state cc_state
=
405 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
,
406 GEN9_COLOR_CALC_STATE_length
* 4,
408 struct GEN9_COLOR_CALC_STATE cc
= {
409 .BlendConstantColorRed
= cmd_buffer
->state
.dynamic
.blend_constants
[0],
410 .BlendConstantColorGreen
= cmd_buffer
->state
.dynamic
.blend_constants
[1],
411 .BlendConstantColorBlue
= cmd_buffer
->state
.dynamic
.blend_constants
[2],
412 .BlendConstantColorAlpha
= cmd_buffer
->state
.dynamic
.blend_constants
[3],
414 GEN9_COLOR_CALC_STATE_pack(NULL
, cc_state
.map
, &cc
);
416 if (!cmd_buffer
->device
->info
.has_llc
)
417 anv_state_clflush(cc_state
);
419 anv_batch_emit(&cmd_buffer
->batch
,
420 GEN9_3DSTATE_CC_STATE_POINTERS
,
421 .ColorCalcStatePointer
= cc_state
.offset
,
422 .ColorCalcStatePointerValid
= true);
425 if (cmd_buffer
->state
.dirty
& (ANV_CMD_DIRTY_PIPELINE
|
426 ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK
|
427 ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK
|
428 ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE
)) {
429 uint32_t dwords
[GEN9_3DSTATE_WM_DEPTH_STENCIL_length
];
430 struct anv_dynamic_state
*d
= &cmd_buffer
->state
.dynamic
;
431 struct GEN9_3DSTATE_WM_DEPTH_STENCIL wm_depth_stencil
= {
432 GEN9_3DSTATE_WM_DEPTH_STENCIL_header
,
434 .StencilBufferWriteEnable
= d
->stencil_write_mask
.front
!= 0 ||
435 d
->stencil_write_mask
.back
!= 0,
437 .StencilTestMask
= d
->stencil_compare_mask
.front
& 0xff,
438 .StencilWriteMask
= d
->stencil_write_mask
.front
& 0xff,
440 .BackfaceStencilTestMask
= d
->stencil_compare_mask
.back
& 0xff,
441 .BackfaceStencilWriteMask
= d
->stencil_write_mask
.back
& 0xff,
443 .StencilReferenceValue
= d
->stencil_reference
.front
,
444 .BackfaceStencilReferenceValue
= d
->stencil_reference
.back
446 GEN9_3DSTATE_WM_DEPTH_STENCIL_pack(NULL
, dwords
, &wm_depth_stencil
);
448 anv_batch_emit_merge(&cmd_buffer
->batch
, dwords
,
449 pipeline
->gen9
.wm_depth_stencil
);
453 if (cmd_buffer
->state
.dirty
& (ANV_CMD_DIRTY_PIPELINE
|
454 ANV_CMD_DIRTY_INDEX_BUFFER
)) {
455 anv_batch_emit(&cmd_buffer
->batch
, GENX(3DSTATE_VF
),
456 .IndexedDrawCutIndexEnable
= pipeline
->primitive_restart
,
457 .CutIndex
= cmd_buffer
->state
.restart_index
,
461 cmd_buffer
->state
.vb_dirty
&= ~vb_emit
;
462 cmd_buffer
->state
.dirty
= 0;
465 void genX(CmdBindIndexBuffer
)(
466 VkCommandBuffer commandBuffer
,
469 VkIndexType indexType
)
471 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
472 ANV_FROM_HANDLE(anv_buffer
, buffer
, _buffer
);
474 static const uint32_t vk_to_gen_index_type
[] = {
475 [VK_INDEX_TYPE_UINT16
] = INDEX_WORD
,
476 [VK_INDEX_TYPE_UINT32
] = INDEX_DWORD
,
479 static const uint32_t restart_index_for_type
[] = {
480 [VK_INDEX_TYPE_UINT16
] = UINT16_MAX
,
481 [VK_INDEX_TYPE_UINT32
] = UINT32_MAX
,
484 cmd_buffer
->state
.restart_index
= restart_index_for_type
[indexType
];
486 anv_batch_emit(&cmd_buffer
->batch
, GENX(3DSTATE_INDEX_BUFFER
),
487 .IndexFormat
= vk_to_gen_index_type
[indexType
],
488 .MemoryObjectControlState
= GENX(MOCS
),
489 .BufferStartingAddress
= { buffer
->bo
, buffer
->offset
+ offset
},
490 .BufferSize
= buffer
->size
- offset
);
492 cmd_buffer
->state
.dirty
|= ANV_CMD_DIRTY_INDEX_BUFFER
;
496 flush_compute_descriptor_set(struct anv_cmd_buffer
*cmd_buffer
)
498 struct anv_device
*device
= cmd_buffer
->device
;
499 struct anv_pipeline
*pipeline
= cmd_buffer
->state
.compute_pipeline
;
500 struct anv_state surfaces
= { 0, }, samplers
= { 0, };
503 result
= anv_cmd_buffer_emit_samplers(cmd_buffer
,
504 MESA_SHADER_COMPUTE
, &samplers
);
505 if (result
!= VK_SUCCESS
)
507 result
= anv_cmd_buffer_emit_binding_table(cmd_buffer
,
508 MESA_SHADER_COMPUTE
, &surfaces
);
509 if (result
!= VK_SUCCESS
)
512 struct anv_state push_state
= anv_cmd_buffer_cs_push_constants(cmd_buffer
);
514 const struct brw_cs_prog_data
*cs_prog_data
= &pipeline
->cs_prog_data
;
515 const struct brw_stage_prog_data
*prog_data
= &cs_prog_data
->base
;
517 unsigned local_id_dwords
= cs_prog_data
->local_invocation_id_regs
* 8;
518 unsigned push_constant_data_size
=
519 (prog_data
->nr_params
+ local_id_dwords
) * 4;
520 unsigned reg_aligned_constant_size
= ALIGN(push_constant_data_size
, 32);
521 unsigned push_constant_regs
= reg_aligned_constant_size
/ 32;
523 if (push_state
.alloc_size
) {
524 anv_batch_emit(&cmd_buffer
->batch
, GENX(MEDIA_CURBE_LOAD
),
525 .CURBETotalDataLength
= push_state
.alloc_size
,
526 .CURBEDataStartAddress
= push_state
.offset
);
529 assert(prog_data
->total_shared
<= 64 * 1024);
530 uint32_t slm_size
= 0;
531 if (prog_data
->total_shared
> 0) {
532 /* slm_size is in 4k increments, but must be a power of 2. */
534 while (slm_size
< prog_data
->total_shared
)
536 slm_size
/= 4 * 1024;
539 struct anv_state state
=
540 anv_state_pool_emit(&device
->dynamic_state_pool
,
541 GENX(INTERFACE_DESCRIPTOR_DATA
), 64,
542 .KernelStartPointer
= pipeline
->cs_simd
,
543 .KernelStartPointerHigh
= 0,
544 .BindingTablePointer
= surfaces
.offset
,
545 .BindingTableEntryCount
= 0,
546 .SamplerStatePointer
= samplers
.offset
,
548 .ConstantIndirectURBEntryReadLength
= push_constant_regs
,
549 .ConstantURBEntryReadOffset
= 0,
550 .BarrierEnable
= cs_prog_data
->uses_barrier
,
551 .SharedLocalMemorySize
= slm_size
,
552 .NumberofThreadsinGPGPUThreadGroup
=
553 pipeline
->cs_thread_width_max
);
555 uint32_t size
= GENX(INTERFACE_DESCRIPTOR_DATA_length
) * sizeof(uint32_t);
556 anv_batch_emit(&cmd_buffer
->batch
, GENX(MEDIA_INTERFACE_DESCRIPTOR_LOAD
),
557 .InterfaceDescriptorTotalLength
= size
,
558 .InterfaceDescriptorDataStartAddress
= state
.offset
);
564 genX(cmd_buffer_flush_compute_state
)(struct anv_cmd_buffer
*cmd_buffer
)
566 struct anv_pipeline
*pipeline
= cmd_buffer
->state
.compute_pipeline
;
569 assert(pipeline
->active_stages
== VK_SHADER_STAGE_COMPUTE_BIT
);
571 bool needs_slm
= pipeline
->cs_prog_data
.base
.total_shared
> 0;
572 config_l3(cmd_buffer
, needs_slm
);
574 if (cmd_buffer
->state
.current_pipeline
!= GPGPU
) {
576 /* From the Broadwell PRM, Volume 2a: Instructions, PIPELINE_SELECT:
578 * Software must clear the COLOR_CALC_STATE Valid field in
579 * 3DSTATE_CC_STATE_POINTERS command prior to send a PIPELINE_SELECT
580 * with Pipeline Select set to GPGPU.
582 * The internal hardware docs recommend the same workaround for Gen9
585 anv_batch_emit(&cmd_buffer
->batch
,
586 GENX(3DSTATE_CC_STATE_POINTERS
));
589 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPELINE_SELECT
),
593 .PipelineSelection
= GPGPU
);
594 cmd_buffer
->state
.current_pipeline
= GPGPU
;
597 if (cmd_buffer
->state
.compute_dirty
& ANV_CMD_DIRTY_PIPELINE
)
598 anv_batch_emit_batch(&cmd_buffer
->batch
, &pipeline
->batch
);
600 if ((cmd_buffer
->state
.descriptors_dirty
& VK_SHADER_STAGE_COMPUTE_BIT
) ||
601 (cmd_buffer
->state
.compute_dirty
& ANV_CMD_DIRTY_PIPELINE
)) {
602 result
= flush_compute_descriptor_set(cmd_buffer
);
603 assert(result
== VK_SUCCESS
);
604 cmd_buffer
->state
.descriptors_dirty
&= ~VK_SHADER_STAGE_COMPUTE_BIT
;
607 cmd_buffer
->state
.compute_dirty
= 0;
611 emit_ps_depth_count(struct anv_batch
*batch
,
612 struct anv_bo
*bo
, uint32_t offset
)
614 anv_batch_emit(batch
, GENX(PIPE_CONTROL
),
615 .DestinationAddressType
= DAT_PPGTT
,
616 .PostSyncOperation
= WritePSDepthCount
,
617 .DepthStallEnable
= true,
618 .Address
= { bo
, offset
});
622 emit_query_availability(struct anv_batch
*batch
,
623 struct anv_bo
*bo
, uint32_t offset
)
625 anv_batch_emit(batch
, GENX(PIPE_CONTROL
),
626 .DestinationAddressType
= DAT_PPGTT
,
627 .PostSyncOperation
= WriteImmediateData
,
628 .Address
= { bo
, offset
},
632 void genX(CmdBeginQuery
)(
633 VkCommandBuffer commandBuffer
,
634 VkQueryPool queryPool
,
636 VkQueryControlFlags flags
)
638 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
639 ANV_FROM_HANDLE(anv_query_pool
, pool
, queryPool
);
641 /* Workaround: When meta uses the pipeline with the VS disabled, it seems
642 * that the pipelining of the depth write breaks. What we see is that
643 * samples from the render pass clear leaks into the first query
644 * immediately after the clear. Doing a pipecontrol with a post-sync
645 * operation and DepthStallEnable seems to work around the issue.
647 if (cmd_buffer
->state
.need_query_wa
) {
648 cmd_buffer
->state
.need_query_wa
= false;
649 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
),
650 .DepthCacheFlushEnable
= true,
651 .DepthStallEnable
= true);
654 switch (pool
->type
) {
655 case VK_QUERY_TYPE_OCCLUSION
:
656 emit_ps_depth_count(&cmd_buffer
->batch
, &pool
->bo
,
657 query
* sizeof(struct anv_query_pool_slot
));
660 case VK_QUERY_TYPE_PIPELINE_STATISTICS
:
666 void genX(CmdEndQuery
)(
667 VkCommandBuffer commandBuffer
,
668 VkQueryPool queryPool
,
671 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
672 ANV_FROM_HANDLE(anv_query_pool
, pool
, queryPool
);
674 switch (pool
->type
) {
675 case VK_QUERY_TYPE_OCCLUSION
:
676 emit_ps_depth_count(&cmd_buffer
->batch
, &pool
->bo
,
677 query
* sizeof(struct anv_query_pool_slot
) + 8);
679 emit_query_availability(&cmd_buffer
->batch
, &pool
->bo
,
680 query
* sizeof(struct anv_query_pool_slot
) + 16);
683 case VK_QUERY_TYPE_PIPELINE_STATISTICS
:
689 #define TIMESTAMP 0x2358
691 void genX(CmdWriteTimestamp
)(
692 VkCommandBuffer commandBuffer
,
693 VkPipelineStageFlagBits pipelineStage
,
694 VkQueryPool queryPool
,
697 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
698 ANV_FROM_HANDLE(anv_query_pool
, pool
, queryPool
);
699 uint32_t offset
= query
* sizeof(struct anv_query_pool_slot
);
701 assert(pool
->type
== VK_QUERY_TYPE_TIMESTAMP
);
703 switch (pipelineStage
) {
704 case VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT
:
705 anv_batch_emit(&cmd_buffer
->batch
, GENX(MI_STORE_REGISTER_MEM
),
706 .RegisterAddress
= TIMESTAMP
,
707 .MemoryAddress
= { &pool
->bo
, offset
});
708 anv_batch_emit(&cmd_buffer
->batch
, GENX(MI_STORE_REGISTER_MEM
),
709 .RegisterAddress
= TIMESTAMP
+ 4,
710 .MemoryAddress
= { &pool
->bo
, offset
+ 4 });
714 /* Everything else is bottom-of-pipe */
715 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
),
716 .DestinationAddressType
= DAT_PPGTT
,
717 .PostSyncOperation
= WriteTimestamp
,
718 .Address
= { &pool
->bo
, offset
});
722 emit_query_availability(&cmd_buffer
->batch
, &pool
->bo
, query
+ 16);
725 #define alu_opcode(v) __gen_uint((v), 20, 31)
726 #define alu_operand1(v) __gen_uint((v), 10, 19)
727 #define alu_operand2(v) __gen_uint((v), 0, 9)
728 #define alu(opcode, operand1, operand2) \
729 alu_opcode(opcode) | alu_operand1(operand1) | alu_operand2(operand2)
731 #define OPCODE_NOOP 0x000
732 #define OPCODE_LOAD 0x080
733 #define OPCODE_LOADINV 0x480
734 #define OPCODE_LOAD0 0x081
735 #define OPCODE_LOAD1 0x481
736 #define OPCODE_ADD 0x100
737 #define OPCODE_SUB 0x101
738 #define OPCODE_AND 0x102
739 #define OPCODE_OR 0x103
740 #define OPCODE_XOR 0x104
741 #define OPCODE_STORE 0x180
742 #define OPCODE_STOREINV 0x580
744 #define OPERAND_R0 0x00
745 #define OPERAND_R1 0x01
746 #define OPERAND_R2 0x02
747 #define OPERAND_R3 0x03
748 #define OPERAND_R4 0x04
749 #define OPERAND_SRCA 0x20
750 #define OPERAND_SRCB 0x21
751 #define OPERAND_ACCU 0x31
752 #define OPERAND_ZF 0x32
753 #define OPERAND_CF 0x33
755 #define CS_GPR(n) (0x2600 + (n) * 8)
758 emit_load_alu_reg_u64(struct anv_batch
*batch
, uint32_t reg
,
759 struct anv_bo
*bo
, uint32_t offset
)
761 anv_batch_emit(batch
, GENX(MI_LOAD_REGISTER_MEM
),
762 .RegisterAddress
= reg
,
763 .MemoryAddress
= { bo
, offset
});
764 anv_batch_emit(batch
, GENX(MI_LOAD_REGISTER_MEM
),
765 .RegisterAddress
= reg
+ 4,
766 .MemoryAddress
= { bo
, offset
+ 4 });
770 store_query_result(struct anv_batch
*batch
, uint32_t reg
,
771 struct anv_bo
*bo
, uint32_t offset
, VkQueryResultFlags flags
)
773 anv_batch_emit(batch
, GENX(MI_STORE_REGISTER_MEM
),
774 .RegisterAddress
= reg
,
775 .MemoryAddress
= { bo
, offset
});
777 if (flags
& VK_QUERY_RESULT_64_BIT
)
778 anv_batch_emit(batch
, GENX(MI_STORE_REGISTER_MEM
),
779 .RegisterAddress
= reg
+ 4,
780 .MemoryAddress
= { bo
, offset
+ 4 });
783 void genX(CmdCopyQueryPoolResults
)(
784 VkCommandBuffer commandBuffer
,
785 VkQueryPool queryPool
,
789 VkDeviceSize destOffset
,
790 VkDeviceSize destStride
,
791 VkQueryResultFlags flags
)
793 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
794 ANV_FROM_HANDLE(anv_query_pool
, pool
, queryPool
);
795 ANV_FROM_HANDLE(anv_buffer
, buffer
, destBuffer
);
796 uint32_t slot_offset
, dst_offset
;
798 if (flags
& VK_QUERY_RESULT_WAIT_BIT
)
799 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
),
800 .CommandStreamerStallEnable
= true,
801 .StallAtPixelScoreboard
= true);
803 dst_offset
= buffer
->offset
+ destOffset
;
804 for (uint32_t i
= 0; i
< queryCount
; i
++) {
806 slot_offset
= (firstQuery
+ i
) * sizeof(struct anv_query_pool_slot
);
807 switch (pool
->type
) {
808 case VK_QUERY_TYPE_OCCLUSION
:
809 emit_load_alu_reg_u64(&cmd_buffer
->batch
,
810 CS_GPR(0), &pool
->bo
, slot_offset
);
811 emit_load_alu_reg_u64(&cmd_buffer
->batch
,
812 CS_GPR(1), &pool
->bo
, slot_offset
+ 8);
814 /* FIXME: We need to clamp the result for 32 bit. */
816 uint32_t *dw
= anv_batch_emitn(&cmd_buffer
->batch
, 5, GENX(MI_MATH
));
817 dw
[1] = alu(OPCODE_LOAD
, OPERAND_SRCA
, OPERAND_R1
);
818 dw
[2] = alu(OPCODE_LOAD
, OPERAND_SRCB
, OPERAND_R0
);
819 dw
[3] = alu(OPCODE_SUB
, 0, 0);
820 dw
[4] = alu(OPCODE_STORE
, OPERAND_R2
, OPERAND_ACCU
);
823 case VK_QUERY_TYPE_TIMESTAMP
:
824 emit_load_alu_reg_u64(&cmd_buffer
->batch
,
825 CS_GPR(2), &pool
->bo
, slot_offset
);
829 unreachable("unhandled query type");
832 store_query_result(&cmd_buffer
->batch
,
833 CS_GPR(2), buffer
->bo
, dst_offset
, flags
);
835 if (flags
& VK_QUERY_RESULT_WITH_AVAILABILITY_BIT
) {
836 emit_load_alu_reg_u64(&cmd_buffer
->batch
, CS_GPR(0),
837 &pool
->bo
, slot_offset
+ 16);
838 if (flags
& VK_QUERY_RESULT_64_BIT
)
839 store_query_result(&cmd_buffer
->batch
,
840 CS_GPR(0), buffer
->bo
, dst_offset
+ 8, flags
);
842 store_query_result(&cmd_buffer
->batch
,
843 CS_GPR(0), buffer
->bo
, dst_offset
+ 4, flags
);
846 dst_offset
+= destStride
;
850 void genX(CmdSetEvent
)(
851 VkCommandBuffer commandBuffer
,
853 VkPipelineStageFlags stageMask
)
855 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
856 ANV_FROM_HANDLE(anv_event
, event
, _event
);
858 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
),
859 .DestinationAddressType
= DAT_PPGTT
,
860 .PostSyncOperation
= WriteImmediateData
,
862 &cmd_buffer
->device
->dynamic_state_block_pool
.bo
,
865 .ImmediateData
= VK_EVENT_SET
);
868 void genX(CmdResetEvent
)(
869 VkCommandBuffer commandBuffer
,
871 VkPipelineStageFlags stageMask
)
873 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
874 ANV_FROM_HANDLE(anv_event
, event
, _event
);
876 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
),
877 .DestinationAddressType
= DAT_PPGTT
,
878 .PostSyncOperation
= WriteImmediateData
,
880 &cmd_buffer
->device
->dynamic_state_block_pool
.bo
,
883 .ImmediateData
= VK_EVENT_RESET
);
886 void genX(CmdWaitEvents
)(
887 VkCommandBuffer commandBuffer
,
889 const VkEvent
* pEvents
,
890 VkPipelineStageFlags srcStageMask
,
891 VkPipelineStageFlags destStageMask
,
892 uint32_t memoryBarrierCount
,
893 const VkMemoryBarrier
* pMemoryBarriers
,
894 uint32_t bufferMemoryBarrierCount
,
895 const VkBufferMemoryBarrier
* pBufferMemoryBarriers
,
896 uint32_t imageMemoryBarrierCount
,
897 const VkImageMemoryBarrier
* pImageMemoryBarriers
)
899 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
900 for (uint32_t i
= 0; i
< eventCount
; i
++) {
901 ANV_FROM_HANDLE(anv_event
, event
, pEvents
[i
]);
903 anv_batch_emit(&cmd_buffer
->batch
, GENX(MI_SEMAPHORE_WAIT
),
904 .WaitMode
= PollingMode
,
905 .CompareOperation
= COMPARE_SAD_EQUAL_SDD
,
906 .SemaphoreDataDword
= VK_EVENT_SET
,
907 .SemaphoreAddress
= {
908 &cmd_buffer
->device
->dynamic_state_block_pool
.bo
,
913 genX(CmdPipelineBarrier
)(commandBuffer
, srcStageMask
, destStageMask
,
914 false, /* byRegion */
915 memoryBarrierCount
, pMemoryBarriers
,
916 bufferMemoryBarrierCount
, pBufferMemoryBarriers
,
917 imageMemoryBarrierCount
, pImageMemoryBarriers
);