2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 #include "anv_private.h"
32 #include "genxml/gen_macros.h"
33 #include "genxml/genX_pack.h"
36 cmd_buffer_flush_push_constants(struct anv_cmd_buffer
*cmd_buffer
)
38 static const uint32_t push_constant_opcodes
[] = {
39 [MESA_SHADER_VERTEX
] = 21,
40 [MESA_SHADER_TESS_CTRL
] = 25, /* HS */
41 [MESA_SHADER_TESS_EVAL
] = 26, /* DS */
42 [MESA_SHADER_GEOMETRY
] = 22,
43 [MESA_SHADER_FRAGMENT
] = 23,
44 [MESA_SHADER_COMPUTE
] = 0,
47 VkShaderStageFlags flushed
= 0;
49 anv_foreach_stage(stage
, cmd_buffer
->state
.push_constants_dirty
) {
50 if (stage
== MESA_SHADER_COMPUTE
)
53 struct anv_state state
= anv_cmd_buffer_push_constants(cmd_buffer
, stage
);
55 if (state
.offset
== 0)
58 anv_batch_emit(&cmd_buffer
->batch
, GENX(3DSTATE_CONSTANT_VS
),
59 ._3DCommandSubOpcode
= push_constant_opcodes
[stage
],
61 .PointerToConstantBuffer2
= { &cmd_buffer
->device
->dynamic_state_block_pool
.bo
, state
.offset
},
62 .ConstantBuffer2ReadLength
= DIV_ROUND_UP(state
.alloc_size
, 32),
65 flushed
|= mesa_to_vk_shader_stage(stage
);
68 cmd_buffer
->state
.push_constants_dirty
&= ~flushed
;
75 emit_viewport_state(struct anv_cmd_buffer
*cmd_buffer
,
76 uint32_t count
, const VkViewport
*viewports
)
78 struct anv_state sf_clip_state
=
79 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
, count
* 64, 64);
80 struct anv_state cc_state
=
81 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
, count
* 8, 32);
83 for (uint32_t i
= 0; i
< count
; i
++) {
84 const VkViewport
*vp
= &viewports
[i
];
86 /* The gen7 state struct has just the matrix and guardband fields, the
87 * gen8 struct adds the min/max viewport fields. */
88 struct GENX(SF_CLIP_VIEWPORT
) sf_clip_viewport
= {
89 .ViewportMatrixElementm00
= vp
->width
/ 2,
90 .ViewportMatrixElementm11
= vp
->height
/ 2,
91 .ViewportMatrixElementm22
= 1.0,
92 .ViewportMatrixElementm30
= vp
->x
+ vp
->width
/ 2,
93 .ViewportMatrixElementm31
= vp
->y
+ vp
->height
/ 2,
94 .ViewportMatrixElementm32
= 0.0,
95 .XMinClipGuardband
= -1.0f
,
96 .XMaxClipGuardband
= 1.0f
,
97 .YMinClipGuardband
= -1.0f
,
98 .YMaxClipGuardband
= 1.0f
,
99 .XMinViewPort
= vp
->x
,
100 .XMaxViewPort
= vp
->x
+ vp
->width
- 1,
101 .YMinViewPort
= vp
->y
,
102 .YMaxViewPort
= vp
->y
+ vp
->height
- 1,
105 struct GENX(CC_VIEWPORT
) cc_viewport
= {
106 .MinimumDepth
= vp
->minDepth
,
107 .MaximumDepth
= vp
->maxDepth
110 GENX(SF_CLIP_VIEWPORT_pack
)(NULL
, sf_clip_state
.map
+ i
* 64,
112 GENX(CC_VIEWPORT_pack
)(NULL
, cc_state
.map
+ i
* 8, &cc_viewport
);
115 if (!cmd_buffer
->device
->info
.has_llc
) {
116 anv_state_clflush(sf_clip_state
);
117 anv_state_clflush(cc_state
);
120 anv_batch_emit(&cmd_buffer
->batch
,
121 GENX(3DSTATE_VIEWPORT_STATE_POINTERS_CC
),
122 .CCViewportPointer
= cc_state
.offset
);
123 anv_batch_emit(&cmd_buffer
->batch
,
124 GENX(3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP
),
125 .SFClipViewportPointer
= sf_clip_state
.offset
);
129 gen8_cmd_buffer_emit_viewport(struct anv_cmd_buffer
*cmd_buffer
)
131 if (cmd_buffer
->state
.dynamic
.viewport
.count
> 0) {
132 emit_viewport_state(cmd_buffer
, cmd_buffer
->state
.dynamic
.viewport
.count
,
133 cmd_buffer
->state
.dynamic
.viewport
.viewports
);
135 /* If viewport count is 0, this is taken to mean "use the default" */
136 emit_viewport_state(cmd_buffer
, 1,
140 .width
= cmd_buffer
->state
.framebuffer
->width
,
141 .height
= cmd_buffer
->state
.framebuffer
->height
,
150 emit_lri(struct anv_batch
*batch
, uint32_t reg
, uint32_t imm
)
152 anv_batch_emit(batch
, GENX(MI_LOAD_REGISTER_IMM
),
153 .RegisterOffset
= reg
,
157 #define GEN8_L3CNTLREG 0x7034
160 config_l3(struct anv_cmd_buffer
*cmd_buffer
, bool enable_slm
)
162 /* References for GL state:
164 * - commits e307cfa..228d5a3
165 * - src/mesa/drivers/dri/i965/gen7_l3_state.c
168 uint32_t val
= enable_slm
?
169 /* All = 48 ways; URB = 16 ways; DC and RO = 0, SLM = 1 */
171 /* All = 48 ways; URB = 48 ways; DC, RO and SLM = 0 */
173 bool changed
= cmd_buffer
->state
.current_l3_config
!= val
;
176 /* According to the hardware docs, the L3 partitioning can only be changed
177 * while the pipeline is completely drained and the caches are flushed,
178 * which involves a first PIPE_CONTROL flush which stalls the pipeline and
179 * initiates invalidation of the relevant caches...
181 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
),
182 .TextureCacheInvalidationEnable
= true,
183 .ConstantCacheInvalidationEnable
= true,
184 .InstructionCacheInvalidateEnable
= true,
185 .DCFlushEnable
= true,
186 .PostSyncOperation
= NoWrite
,
187 .CommandStreamerStallEnable
= true);
189 /* ...followed by a second stalling flush which guarantees that
190 * invalidation is complete when the L3 configuration registers are
193 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
),
194 .DCFlushEnable
= true,
195 .PostSyncOperation
= NoWrite
,
196 .CommandStreamerStallEnable
= true);
198 emit_lri(&cmd_buffer
->batch
, GEN8_L3CNTLREG
, val
);
199 cmd_buffer
->state
.current_l3_config
= val
;
204 __emit_genx_sf_state(struct anv_cmd_buffer
*cmd_buffer
)
206 uint32_t sf_dw
[GENX(3DSTATE_SF_length
)];
207 struct GENX(3DSTATE_SF
) sf
= {
208 GENX(3DSTATE_SF_header
),
209 .LineWidth
= cmd_buffer
->state
.dynamic
.line_width
,
211 GENX(3DSTATE_SF_pack
)(NULL
, sf_dw
, &sf
);
213 anv_batch_emit_merge(&cmd_buffer
->batch
, sf_dw
,
214 cmd_buffer
->state
.pipeline
->gen8
.sf
);
217 #include "genxml/gen9_pack.h"
219 __emit_gen9_sf_state(struct anv_cmd_buffer
*cmd_buffer
)
221 uint32_t sf_dw
[GENX(3DSTATE_SF_length
)];
222 struct GEN9_3DSTATE_SF sf
= {
223 GEN9_3DSTATE_SF_header
,
224 .LineWidth
= cmd_buffer
->state
.dynamic
.line_width
,
226 GEN9_3DSTATE_SF_pack(NULL
, sf_dw
, &sf
);
228 anv_batch_emit_merge(&cmd_buffer
->batch
, sf_dw
,
229 cmd_buffer
->state
.pipeline
->gen8
.sf
);
233 __emit_sf_state(struct anv_cmd_buffer
*cmd_buffer
)
235 if (cmd_buffer
->device
->info
.is_cherryview
)
236 __emit_gen9_sf_state(cmd_buffer
);
238 __emit_genx_sf_state(cmd_buffer
);
242 genX(cmd_buffer_flush_state
)(struct anv_cmd_buffer
*cmd_buffer
)
244 struct anv_pipeline
*pipeline
= cmd_buffer
->state
.pipeline
;
247 uint32_t vb_emit
= cmd_buffer
->state
.vb_dirty
& pipeline
->vb_used
;
249 assert((pipeline
->active_stages
& VK_SHADER_STAGE_COMPUTE_BIT
) == 0);
251 config_l3(cmd_buffer
, false);
253 genX(flush_pipeline_select_3d
)(cmd_buffer
);
256 const uint32_t num_buffers
= __builtin_popcount(vb_emit
);
257 const uint32_t num_dwords
= 1 + num_buffers
* 4;
259 p
= anv_batch_emitn(&cmd_buffer
->batch
, num_dwords
,
260 GENX(3DSTATE_VERTEX_BUFFERS
));
262 for_each_bit(vb
, vb_emit
) {
263 struct anv_buffer
*buffer
= cmd_buffer
->state
.vertex_bindings
[vb
].buffer
;
264 uint32_t offset
= cmd_buffer
->state
.vertex_bindings
[vb
].offset
;
266 struct GENX(VERTEX_BUFFER_STATE
) state
= {
267 .VertexBufferIndex
= vb
,
268 .MemoryObjectControlState
= GENX(MOCS
),
269 .AddressModifyEnable
= true,
270 .BufferPitch
= pipeline
->binding_stride
[vb
],
271 .BufferStartingAddress
= { buffer
->bo
, buffer
->offset
+ offset
},
272 .BufferSize
= buffer
->size
- offset
275 GENX(VERTEX_BUFFER_STATE_pack
)(&cmd_buffer
->batch
, &p
[1 + i
* 4], &state
);
280 if (cmd_buffer
->state
.dirty
& ANV_CMD_DIRTY_PIPELINE
) {
281 /* If somebody compiled a pipeline after starting a command buffer the
282 * scratch bo may have grown since we started this cmd buffer (and
283 * emitted STATE_BASE_ADDRESS). If we're binding that pipeline now,
284 * reemit STATE_BASE_ADDRESS so that we use the bigger scratch bo. */
285 if (cmd_buffer
->state
.scratch_size
< pipeline
->total_scratch
)
286 anv_cmd_buffer_emit_state_base_address(cmd_buffer
);
288 anv_batch_emit_batch(&cmd_buffer
->batch
, &pipeline
->batch
);
291 /* We emit the binding tables and sampler tables first, then emit push
292 * constants and then finally emit binding table and sampler table
293 * pointers. It has to happen in this order, since emitting the binding
294 * tables may change the push constants (in case of storage images). After
295 * emitting push constants, on SKL+ we have to emit the corresponding
296 * 3DSTATE_BINDING_TABLE_POINTER_* for the push constants to take effect.
299 if (cmd_buffer
->state
.descriptors_dirty
)
300 dirty
= gen7_cmd_buffer_flush_descriptor_sets(cmd_buffer
);
302 if (cmd_buffer
->state
.push_constants_dirty
)
303 dirty
|= cmd_buffer_flush_push_constants(cmd_buffer
);
306 gen7_cmd_buffer_emit_descriptor_pointers(cmd_buffer
, dirty
);
308 if (cmd_buffer
->state
.dirty
& ANV_CMD_DIRTY_DYNAMIC_VIEWPORT
)
309 gen8_cmd_buffer_emit_viewport(cmd_buffer
);
311 if (cmd_buffer
->state
.dirty
& ANV_CMD_DIRTY_DYNAMIC_SCISSOR
)
312 gen7_cmd_buffer_emit_scissor(cmd_buffer
);
314 if (cmd_buffer
->state
.dirty
& (ANV_CMD_DIRTY_PIPELINE
|
315 ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH
)) {
316 __emit_sf_state(cmd_buffer
);
319 if (cmd_buffer
->state
.dirty
& (ANV_CMD_DIRTY_PIPELINE
|
320 ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS
)){
321 bool enable_bias
= cmd_buffer
->state
.dynamic
.depth_bias
.bias
!= 0.0f
||
322 cmd_buffer
->state
.dynamic
.depth_bias
.slope
!= 0.0f
;
324 uint32_t raster_dw
[GENX(3DSTATE_RASTER_length
)];
325 struct GENX(3DSTATE_RASTER
) raster
= {
326 GENX(3DSTATE_RASTER_header
),
327 .GlobalDepthOffsetEnableSolid
= enable_bias
,
328 .GlobalDepthOffsetEnableWireframe
= enable_bias
,
329 .GlobalDepthOffsetEnablePoint
= enable_bias
,
330 .GlobalDepthOffsetConstant
= cmd_buffer
->state
.dynamic
.depth_bias
.bias
,
331 .GlobalDepthOffsetScale
= cmd_buffer
->state
.dynamic
.depth_bias
.slope
,
332 .GlobalDepthOffsetClamp
= cmd_buffer
->state
.dynamic
.depth_bias
.clamp
334 GENX(3DSTATE_RASTER_pack
)(NULL
, raster_dw
, &raster
);
335 anv_batch_emit_merge(&cmd_buffer
->batch
, raster_dw
,
336 pipeline
->gen8
.raster
);
339 /* Stencil reference values moved from COLOR_CALC_STATE in gen8 to
340 * 3DSTATE_WM_DEPTH_STENCIL in gen9. That means the dirty bits gets split
341 * across different state packets for gen8 and gen9. We handle that by
342 * using a big old #if switch here.
345 if (cmd_buffer
->state
.dirty
& (ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS
|
346 ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE
)) {
347 struct anv_state cc_state
=
348 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
,
349 GENX(COLOR_CALC_STATE_length
) * 4,
351 struct GENX(COLOR_CALC_STATE
) cc
= {
352 .BlendConstantColorRed
= cmd_buffer
->state
.dynamic
.blend_constants
[0],
353 .BlendConstantColorGreen
= cmd_buffer
->state
.dynamic
.blend_constants
[1],
354 .BlendConstantColorBlue
= cmd_buffer
->state
.dynamic
.blend_constants
[2],
355 .BlendConstantColorAlpha
= cmd_buffer
->state
.dynamic
.blend_constants
[3],
356 .StencilReferenceValue
=
357 cmd_buffer
->state
.dynamic
.stencil_reference
.front
,
358 .BackFaceStencilReferenceValue
=
359 cmd_buffer
->state
.dynamic
.stencil_reference
.back
,
361 GENX(COLOR_CALC_STATE_pack
)(NULL
, cc_state
.map
, &cc
);
363 if (!cmd_buffer
->device
->info
.has_llc
)
364 anv_state_clflush(cc_state
);
366 anv_batch_emit(&cmd_buffer
->batch
,
367 GENX(3DSTATE_CC_STATE_POINTERS
),
368 .ColorCalcStatePointer
= cc_state
.offset
,
369 .ColorCalcStatePointerValid
= true);
372 if (cmd_buffer
->state
.dirty
& (ANV_CMD_DIRTY_PIPELINE
|
373 ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK
|
374 ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK
)) {
375 uint32_t wm_depth_stencil_dw
[GENX(3DSTATE_WM_DEPTH_STENCIL_length
)];
377 struct GENX(3DSTATE_WM_DEPTH_STENCIL wm_depth_stencil
) = {
378 GENX(3DSTATE_WM_DEPTH_STENCIL_header
),
380 /* Is this what we need to do? */
381 .StencilBufferWriteEnable
=
382 cmd_buffer
->state
.dynamic
.stencil_write_mask
.front
!= 0,
385 cmd_buffer
->state
.dynamic
.stencil_compare_mask
.front
& 0xff,
387 cmd_buffer
->state
.dynamic
.stencil_write_mask
.front
& 0xff,
389 .BackfaceStencilTestMask
=
390 cmd_buffer
->state
.dynamic
.stencil_compare_mask
.back
& 0xff,
391 .BackfaceStencilWriteMask
=
392 cmd_buffer
->state
.dynamic
.stencil_write_mask
.back
& 0xff,
394 GENX(3DSTATE_WM_DEPTH_STENCIL_pack
)(NULL
, wm_depth_stencil_dw
,
397 anv_batch_emit_merge(&cmd_buffer
->batch
, wm_depth_stencil_dw
,
398 pipeline
->gen8
.wm_depth_stencil
);
401 if (cmd_buffer
->state
.dirty
& ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS
) {
402 struct anv_state cc_state
=
403 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
,
404 GEN9_COLOR_CALC_STATE_length
* 4,
406 struct GEN9_COLOR_CALC_STATE cc
= {
407 .BlendConstantColorRed
= cmd_buffer
->state
.dynamic
.blend_constants
[0],
408 .BlendConstantColorGreen
= cmd_buffer
->state
.dynamic
.blend_constants
[1],
409 .BlendConstantColorBlue
= cmd_buffer
->state
.dynamic
.blend_constants
[2],
410 .BlendConstantColorAlpha
= cmd_buffer
->state
.dynamic
.blend_constants
[3],
412 GEN9_COLOR_CALC_STATE_pack(NULL
, cc_state
.map
, &cc
);
414 if (!cmd_buffer
->device
->info
.has_llc
)
415 anv_state_clflush(cc_state
);
417 anv_batch_emit(&cmd_buffer
->batch
,
418 GEN9_3DSTATE_CC_STATE_POINTERS
,
419 .ColorCalcStatePointer
= cc_state
.offset
,
420 .ColorCalcStatePointerValid
= true);
423 if (cmd_buffer
->state
.dirty
& (ANV_CMD_DIRTY_PIPELINE
|
424 ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK
|
425 ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK
|
426 ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE
)) {
427 uint32_t dwords
[GEN9_3DSTATE_WM_DEPTH_STENCIL_length
];
428 struct anv_dynamic_state
*d
= &cmd_buffer
->state
.dynamic
;
429 struct GEN9_3DSTATE_WM_DEPTH_STENCIL wm_depth_stencil
= {
430 GEN9_3DSTATE_WM_DEPTH_STENCIL_header
,
432 .StencilBufferWriteEnable
= d
->stencil_write_mask
.front
!= 0 ||
433 d
->stencil_write_mask
.back
!= 0,
435 .StencilTestMask
= d
->stencil_compare_mask
.front
& 0xff,
436 .StencilWriteMask
= d
->stencil_write_mask
.front
& 0xff,
438 .BackfaceStencilTestMask
= d
->stencil_compare_mask
.back
& 0xff,
439 .BackfaceStencilWriteMask
= d
->stencil_write_mask
.back
& 0xff,
441 .StencilReferenceValue
= d
->stencil_reference
.front
,
442 .BackfaceStencilReferenceValue
= d
->stencil_reference
.back
444 GEN9_3DSTATE_WM_DEPTH_STENCIL_pack(NULL
, dwords
, &wm_depth_stencil
);
446 anv_batch_emit_merge(&cmd_buffer
->batch
, dwords
,
447 pipeline
->gen9
.wm_depth_stencil
);
451 if (cmd_buffer
->state
.dirty
& (ANV_CMD_DIRTY_PIPELINE
|
452 ANV_CMD_DIRTY_INDEX_BUFFER
)) {
453 anv_batch_emit(&cmd_buffer
->batch
, GENX(3DSTATE_VF
),
454 .IndexedDrawCutIndexEnable
= pipeline
->primitive_restart
,
455 .CutIndex
= cmd_buffer
->state
.restart_index
,
459 cmd_buffer
->state
.vb_dirty
&= ~vb_emit
;
460 cmd_buffer
->state
.dirty
= 0;
463 void genX(CmdBindIndexBuffer
)(
464 VkCommandBuffer commandBuffer
,
467 VkIndexType indexType
)
469 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
470 ANV_FROM_HANDLE(anv_buffer
, buffer
, _buffer
);
472 static const uint32_t vk_to_gen_index_type
[] = {
473 [VK_INDEX_TYPE_UINT16
] = INDEX_WORD
,
474 [VK_INDEX_TYPE_UINT32
] = INDEX_DWORD
,
477 static const uint32_t restart_index_for_type
[] = {
478 [VK_INDEX_TYPE_UINT16
] = UINT16_MAX
,
479 [VK_INDEX_TYPE_UINT32
] = UINT32_MAX
,
482 cmd_buffer
->state
.restart_index
= restart_index_for_type
[indexType
];
484 anv_batch_emit(&cmd_buffer
->batch
, GENX(3DSTATE_INDEX_BUFFER
),
485 .IndexFormat
= vk_to_gen_index_type
[indexType
],
486 .MemoryObjectControlState
= GENX(MOCS
),
487 .BufferStartingAddress
= { buffer
->bo
, buffer
->offset
+ offset
},
488 .BufferSize
= buffer
->size
- offset
);
490 cmd_buffer
->state
.dirty
|= ANV_CMD_DIRTY_INDEX_BUFFER
;
494 flush_compute_descriptor_set(struct anv_cmd_buffer
*cmd_buffer
)
496 struct anv_device
*device
= cmd_buffer
->device
;
497 struct anv_pipeline
*pipeline
= cmd_buffer
->state
.compute_pipeline
;
498 struct anv_state surfaces
= { 0, }, samplers
= { 0, };
501 result
= anv_cmd_buffer_emit_samplers(cmd_buffer
,
502 MESA_SHADER_COMPUTE
, &samplers
);
503 if (result
!= VK_SUCCESS
)
505 result
= anv_cmd_buffer_emit_binding_table(cmd_buffer
,
506 MESA_SHADER_COMPUTE
, &surfaces
);
507 if (result
!= VK_SUCCESS
)
510 struct anv_state push_state
= anv_cmd_buffer_cs_push_constants(cmd_buffer
);
512 const struct brw_cs_prog_data
*cs_prog_data
= &pipeline
->cs_prog_data
;
513 const struct brw_stage_prog_data
*prog_data
= &cs_prog_data
->base
;
515 unsigned local_id_dwords
= cs_prog_data
->local_invocation_id_regs
* 8;
516 unsigned push_constant_data_size
=
517 (prog_data
->nr_params
+ local_id_dwords
) * 4;
518 unsigned reg_aligned_constant_size
= ALIGN(push_constant_data_size
, 32);
519 unsigned push_constant_regs
= reg_aligned_constant_size
/ 32;
521 if (push_state
.alloc_size
) {
522 anv_batch_emit(&cmd_buffer
->batch
, GENX(MEDIA_CURBE_LOAD
),
523 .CURBETotalDataLength
= push_state
.alloc_size
,
524 .CURBEDataStartAddress
= push_state
.offset
);
527 assert(prog_data
->total_shared
<= 64 * 1024);
528 uint32_t slm_size
= 0;
529 if (prog_data
->total_shared
> 0) {
530 /* slm_size is in 4k increments, but must be a power of 2. */
532 while (slm_size
< prog_data
->total_shared
)
534 slm_size
/= 4 * 1024;
537 struct anv_state state
=
538 anv_state_pool_emit(&device
->dynamic_state_pool
,
539 GENX(INTERFACE_DESCRIPTOR_DATA
), 64,
540 .KernelStartPointer
= pipeline
->cs_simd
,
541 .KernelStartPointerHigh
= 0,
542 .BindingTablePointer
= surfaces
.offset
,
543 .BindingTableEntryCount
= 0,
544 .SamplerStatePointer
= samplers
.offset
,
546 .ConstantIndirectURBEntryReadLength
= push_constant_regs
,
547 .ConstantURBEntryReadOffset
= 0,
548 .BarrierEnable
= cs_prog_data
->uses_barrier
,
549 .SharedLocalMemorySize
= slm_size
,
550 .NumberofThreadsinGPGPUThreadGroup
=
551 pipeline
->cs_thread_width_max
);
553 uint32_t size
= GENX(INTERFACE_DESCRIPTOR_DATA_length
) * sizeof(uint32_t);
554 anv_batch_emit(&cmd_buffer
->batch
, GENX(MEDIA_INTERFACE_DESCRIPTOR_LOAD
),
555 .InterfaceDescriptorTotalLength
= size
,
556 .InterfaceDescriptorDataStartAddress
= state
.offset
);
562 genX(cmd_buffer_flush_compute_state
)(struct anv_cmd_buffer
*cmd_buffer
)
564 struct anv_pipeline
*pipeline
= cmd_buffer
->state
.compute_pipeline
;
567 assert(pipeline
->active_stages
== VK_SHADER_STAGE_COMPUTE_BIT
);
569 bool needs_slm
= pipeline
->cs_prog_data
.base
.total_shared
> 0;
570 config_l3(cmd_buffer
, needs_slm
);
572 if (cmd_buffer
->state
.current_pipeline
!= GPGPU
) {
574 /* From the Broadwell PRM, Volume 2a: Instructions, PIPELINE_SELECT:
576 * Software must clear the COLOR_CALC_STATE Valid field in
577 * 3DSTATE_CC_STATE_POINTERS command prior to send a PIPELINE_SELECT
578 * with Pipeline Select set to GPGPU.
580 * The internal hardware docs recommend the same workaround for Gen9
583 anv_batch_emit(&cmd_buffer
->batch
,
584 GENX(3DSTATE_CC_STATE_POINTERS
));
587 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPELINE_SELECT
),
591 .PipelineSelection
= GPGPU
);
592 cmd_buffer
->state
.current_pipeline
= GPGPU
;
595 if (cmd_buffer
->state
.compute_dirty
& ANV_CMD_DIRTY_PIPELINE
)
596 anv_batch_emit_batch(&cmd_buffer
->batch
, &pipeline
->batch
);
598 if ((cmd_buffer
->state
.descriptors_dirty
& VK_SHADER_STAGE_COMPUTE_BIT
) ||
599 (cmd_buffer
->state
.compute_dirty
& ANV_CMD_DIRTY_PIPELINE
)) {
600 result
= flush_compute_descriptor_set(cmd_buffer
);
601 assert(result
== VK_SUCCESS
);
602 cmd_buffer
->state
.descriptors_dirty
&= ~VK_SHADER_STAGE_COMPUTE_BIT
;
605 cmd_buffer
->state
.compute_dirty
= 0;
609 emit_ps_depth_count(struct anv_batch
*batch
,
610 struct anv_bo
*bo
, uint32_t offset
)
612 anv_batch_emit(batch
, GENX(PIPE_CONTROL
),
613 .DestinationAddressType
= DAT_PPGTT
,
614 .PostSyncOperation
= WritePSDepthCount
,
615 .DepthStallEnable
= true,
616 .Address
= { bo
, offset
});
620 emit_query_availability(struct anv_batch
*batch
,
621 struct anv_bo
*bo
, uint32_t offset
)
623 anv_batch_emit(batch
, GENX(PIPE_CONTROL
),
624 .DestinationAddressType
= DAT_PPGTT
,
625 .PostSyncOperation
= WriteImmediateData
,
626 .Address
= { bo
, offset
},
630 void genX(CmdBeginQuery
)(
631 VkCommandBuffer commandBuffer
,
632 VkQueryPool queryPool
,
634 VkQueryControlFlags flags
)
636 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
637 ANV_FROM_HANDLE(anv_query_pool
, pool
, queryPool
);
639 /* Workaround: When meta uses the pipeline with the VS disabled, it seems
640 * that the pipelining of the depth write breaks. What we see is that
641 * samples from the render pass clear leaks into the first query
642 * immediately after the clear. Doing a pipecontrol with a post-sync
643 * operation and DepthStallEnable seems to work around the issue.
645 if (cmd_buffer
->state
.need_query_wa
) {
646 cmd_buffer
->state
.need_query_wa
= false;
647 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
),
648 .DepthCacheFlushEnable
= true,
649 .DepthStallEnable
= true);
652 switch (pool
->type
) {
653 case VK_QUERY_TYPE_OCCLUSION
:
654 emit_ps_depth_count(&cmd_buffer
->batch
, &pool
->bo
,
655 query
* sizeof(struct anv_query_pool_slot
));
658 case VK_QUERY_TYPE_PIPELINE_STATISTICS
:
664 void genX(CmdEndQuery
)(
665 VkCommandBuffer commandBuffer
,
666 VkQueryPool queryPool
,
669 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
670 ANV_FROM_HANDLE(anv_query_pool
, pool
, queryPool
);
672 switch (pool
->type
) {
673 case VK_QUERY_TYPE_OCCLUSION
:
674 emit_ps_depth_count(&cmd_buffer
->batch
, &pool
->bo
,
675 query
* sizeof(struct anv_query_pool_slot
) + 8);
677 emit_query_availability(&cmd_buffer
->batch
, &pool
->bo
,
678 query
* sizeof(struct anv_query_pool_slot
) + 16);
681 case VK_QUERY_TYPE_PIPELINE_STATISTICS
:
687 #define TIMESTAMP 0x2358
689 void genX(CmdWriteTimestamp
)(
690 VkCommandBuffer commandBuffer
,
691 VkPipelineStageFlagBits pipelineStage
,
692 VkQueryPool queryPool
,
695 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
696 ANV_FROM_HANDLE(anv_query_pool
, pool
, queryPool
);
697 uint32_t offset
= query
* sizeof(struct anv_query_pool_slot
);
699 assert(pool
->type
== VK_QUERY_TYPE_TIMESTAMP
);
701 switch (pipelineStage
) {
702 case VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT
:
703 anv_batch_emit(&cmd_buffer
->batch
, GENX(MI_STORE_REGISTER_MEM
),
704 .RegisterAddress
= TIMESTAMP
,
705 .MemoryAddress
= { &pool
->bo
, offset
});
706 anv_batch_emit(&cmd_buffer
->batch
, GENX(MI_STORE_REGISTER_MEM
),
707 .RegisterAddress
= TIMESTAMP
+ 4,
708 .MemoryAddress
= { &pool
->bo
, offset
+ 4 });
712 /* Everything else is bottom-of-pipe */
713 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
),
714 .DestinationAddressType
= DAT_PPGTT
,
715 .PostSyncOperation
= WriteTimestamp
,
716 .Address
= { &pool
->bo
, offset
});
720 emit_query_availability(&cmd_buffer
->batch
, &pool
->bo
, query
+ 16);
723 #define alu_opcode(v) __gen_uint((v), 20, 31)
724 #define alu_operand1(v) __gen_uint((v), 10, 19)
725 #define alu_operand2(v) __gen_uint((v), 0, 9)
726 #define alu(opcode, operand1, operand2) \
727 alu_opcode(opcode) | alu_operand1(operand1) | alu_operand2(operand2)
729 #define OPCODE_NOOP 0x000
730 #define OPCODE_LOAD 0x080
731 #define OPCODE_LOADINV 0x480
732 #define OPCODE_LOAD0 0x081
733 #define OPCODE_LOAD1 0x481
734 #define OPCODE_ADD 0x100
735 #define OPCODE_SUB 0x101
736 #define OPCODE_AND 0x102
737 #define OPCODE_OR 0x103
738 #define OPCODE_XOR 0x104
739 #define OPCODE_STORE 0x180
740 #define OPCODE_STOREINV 0x580
742 #define OPERAND_R0 0x00
743 #define OPERAND_R1 0x01
744 #define OPERAND_R2 0x02
745 #define OPERAND_R3 0x03
746 #define OPERAND_R4 0x04
747 #define OPERAND_SRCA 0x20
748 #define OPERAND_SRCB 0x21
749 #define OPERAND_ACCU 0x31
750 #define OPERAND_ZF 0x32
751 #define OPERAND_CF 0x33
753 #define CS_GPR(n) (0x2600 + (n) * 8)
756 emit_load_alu_reg_u64(struct anv_batch
*batch
, uint32_t reg
,
757 struct anv_bo
*bo
, uint32_t offset
)
759 anv_batch_emit(batch
, GENX(MI_LOAD_REGISTER_MEM
),
760 .RegisterAddress
= reg
,
761 .MemoryAddress
= { bo
, offset
});
762 anv_batch_emit(batch
, GENX(MI_LOAD_REGISTER_MEM
),
763 .RegisterAddress
= reg
+ 4,
764 .MemoryAddress
= { bo
, offset
+ 4 });
768 store_query_result(struct anv_batch
*batch
, uint32_t reg
,
769 struct anv_bo
*bo
, uint32_t offset
, VkQueryResultFlags flags
)
771 anv_batch_emit(batch
, GENX(MI_STORE_REGISTER_MEM
),
772 .RegisterAddress
= reg
,
773 .MemoryAddress
= { bo
, offset
});
775 if (flags
& VK_QUERY_RESULT_64_BIT
)
776 anv_batch_emit(batch
, GENX(MI_STORE_REGISTER_MEM
),
777 .RegisterAddress
= reg
+ 4,
778 .MemoryAddress
= { bo
, offset
+ 4 });
781 void genX(CmdCopyQueryPoolResults
)(
782 VkCommandBuffer commandBuffer
,
783 VkQueryPool queryPool
,
787 VkDeviceSize destOffset
,
788 VkDeviceSize destStride
,
789 VkQueryResultFlags flags
)
791 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
792 ANV_FROM_HANDLE(anv_query_pool
, pool
, queryPool
);
793 ANV_FROM_HANDLE(anv_buffer
, buffer
, destBuffer
);
794 uint32_t slot_offset
, dst_offset
;
796 if (flags
& VK_QUERY_RESULT_WAIT_BIT
)
797 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
),
798 .CommandStreamerStallEnable
= true,
799 .StallAtPixelScoreboard
= true);
801 dst_offset
= buffer
->offset
+ destOffset
;
802 for (uint32_t i
= 0; i
< queryCount
; i
++) {
804 slot_offset
= (firstQuery
+ i
) * sizeof(struct anv_query_pool_slot
);
805 switch (pool
->type
) {
806 case VK_QUERY_TYPE_OCCLUSION
:
807 emit_load_alu_reg_u64(&cmd_buffer
->batch
,
808 CS_GPR(0), &pool
->bo
, slot_offset
);
809 emit_load_alu_reg_u64(&cmd_buffer
->batch
,
810 CS_GPR(1), &pool
->bo
, slot_offset
+ 8);
812 /* FIXME: We need to clamp the result for 32 bit. */
814 uint32_t *dw
= anv_batch_emitn(&cmd_buffer
->batch
, 5, GENX(MI_MATH
));
815 dw
[1] = alu(OPCODE_LOAD
, OPERAND_SRCA
, OPERAND_R1
);
816 dw
[2] = alu(OPCODE_LOAD
, OPERAND_SRCB
, OPERAND_R0
);
817 dw
[3] = alu(OPCODE_SUB
, 0, 0);
818 dw
[4] = alu(OPCODE_STORE
, OPERAND_R2
, OPERAND_ACCU
);
821 case VK_QUERY_TYPE_TIMESTAMP
:
822 emit_load_alu_reg_u64(&cmd_buffer
->batch
,
823 CS_GPR(2), &pool
->bo
, slot_offset
);
827 unreachable("unhandled query type");
830 store_query_result(&cmd_buffer
->batch
,
831 CS_GPR(2), buffer
->bo
, dst_offset
, flags
);
833 if (flags
& VK_QUERY_RESULT_WITH_AVAILABILITY_BIT
) {
834 emit_load_alu_reg_u64(&cmd_buffer
->batch
, CS_GPR(0),
835 &pool
->bo
, slot_offset
+ 16);
836 if (flags
& VK_QUERY_RESULT_64_BIT
)
837 store_query_result(&cmd_buffer
->batch
,
838 CS_GPR(0), buffer
->bo
, dst_offset
+ 8, flags
);
840 store_query_result(&cmd_buffer
->batch
,
841 CS_GPR(0), buffer
->bo
, dst_offset
+ 4, flags
);
844 dst_offset
+= destStride
;
848 void genX(CmdSetEvent
)(
849 VkCommandBuffer commandBuffer
,
851 VkPipelineStageFlags stageMask
)
853 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
854 ANV_FROM_HANDLE(anv_event
, event
, _event
);
856 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
),
857 .DestinationAddressType
= DAT_PPGTT
,
858 .PostSyncOperation
= WriteImmediateData
,
860 &cmd_buffer
->device
->dynamic_state_block_pool
.bo
,
863 .ImmediateData
= VK_EVENT_SET
);
866 void genX(CmdResetEvent
)(
867 VkCommandBuffer commandBuffer
,
869 VkPipelineStageFlags stageMask
)
871 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
872 ANV_FROM_HANDLE(anv_event
, event
, _event
);
874 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
),
875 .DestinationAddressType
= DAT_PPGTT
,
876 .PostSyncOperation
= WriteImmediateData
,
878 &cmd_buffer
->device
->dynamic_state_block_pool
.bo
,
881 .ImmediateData
= VK_EVENT_RESET
);
884 void genX(CmdWaitEvents
)(
885 VkCommandBuffer commandBuffer
,
887 const VkEvent
* pEvents
,
888 VkPipelineStageFlags srcStageMask
,
889 VkPipelineStageFlags destStageMask
,
890 uint32_t memoryBarrierCount
,
891 const VkMemoryBarrier
* pMemoryBarriers
,
892 uint32_t bufferMemoryBarrierCount
,
893 const VkBufferMemoryBarrier
* pBufferMemoryBarriers
,
894 uint32_t imageMemoryBarrierCount
,
895 const VkImageMemoryBarrier
* pImageMemoryBarriers
)
897 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
898 for (uint32_t i
= 0; i
< eventCount
; i
++) {
899 ANV_FROM_HANDLE(anv_event
, event
, pEvents
[i
]);
901 anv_batch_emit(&cmd_buffer
->batch
, GENX(MI_SEMAPHORE_WAIT
),
902 .WaitMode
= PollingMode
,
903 .CompareOperation
= COMPARE_SAD_EQUAL_SDD
,
904 .SemaphoreDataDword
= VK_EVENT_SET
,
905 .SemaphoreAddress
= {
906 &cmd_buffer
->device
->dynamic_state_block_pool
.bo
,
911 genX(CmdPipelineBarrier
)(commandBuffer
, srcStageMask
, destStageMask
,
912 false, /* byRegion */
913 memoryBarrierCount
, pMemoryBarriers
,
914 bufferMemoryBarrierCount
, pBufferMemoryBarriers
,
915 imageMemoryBarrierCount
, pImageMemoryBarriers
);