2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
27 #include "anv_private.h"
28 #include "vk_format_info.h"
31 #include "common/gen_l3_config.h"
32 #include "genxml/gen_macros.h"
33 #include "genxml/genX_pack.h"
36 emit_lrm(struct anv_batch
*batch
,
37 uint32_t reg
, struct anv_bo
*bo
, uint32_t offset
)
39 anv_batch_emit(batch
, GENX(MI_LOAD_REGISTER_MEM
), lrm
) {
40 lrm
.RegisterAddress
= reg
;
41 lrm
.MemoryAddress
= (struct anv_address
) { bo
, offset
};
46 emit_lri(struct anv_batch
*batch
, uint32_t reg
, uint32_t imm
)
48 anv_batch_emit(batch
, GENX(MI_LOAD_REGISTER_IMM
), lri
) {
49 lri
.RegisterOffset
= reg
;
54 #if GEN_IS_HASWELL || GEN_GEN >= 8
56 emit_lrr(struct anv_batch
*batch
, uint32_t dst
, uint32_t src
)
58 anv_batch_emit(batch
, GENX(MI_LOAD_REGISTER_REG
), lrr
) {
59 lrr
.SourceRegisterAddress
= src
;
60 lrr
.DestinationRegisterAddress
= dst
;
66 genX(cmd_buffer_emit_state_base_address
)(struct anv_cmd_buffer
*cmd_buffer
)
68 struct anv_device
*device
= cmd_buffer
->device
;
70 /* Emit a render target cache flush.
72 * This isn't documented anywhere in the PRM. However, it seems to be
73 * necessary prior to changing the surface state base adress. Without
74 * this, we get GPU hangs when using multi-level command buffers which
75 * clear depth, reset state base address, and then go render stuff.
77 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
), pc
) {
78 pc
.DCFlushEnable
= true;
79 pc
.RenderTargetCacheFlushEnable
= true;
80 pc
.CommandStreamerStallEnable
= true;
83 anv_batch_emit(&cmd_buffer
->batch
, GENX(STATE_BASE_ADDRESS
), sba
) {
84 sba
.GeneralStateBaseAddress
= (struct anv_address
) { NULL
, 0 };
85 sba
.GeneralStateMemoryObjectControlState
= GENX(MOCS
);
86 sba
.GeneralStateBaseAddressModifyEnable
= true;
88 sba
.SurfaceStateBaseAddress
=
89 anv_cmd_buffer_surface_base_address(cmd_buffer
);
90 sba
.SurfaceStateMemoryObjectControlState
= GENX(MOCS
);
91 sba
.SurfaceStateBaseAddressModifyEnable
= true;
93 sba
.DynamicStateBaseAddress
=
94 (struct anv_address
) { &device
->dynamic_state_pool
.block_pool
.bo
, 0 };
95 sba
.DynamicStateMemoryObjectControlState
= GENX(MOCS
);
96 sba
.DynamicStateBaseAddressModifyEnable
= true;
98 sba
.IndirectObjectBaseAddress
= (struct anv_address
) { NULL
, 0 };
99 sba
.IndirectObjectMemoryObjectControlState
= GENX(MOCS
);
100 sba
.IndirectObjectBaseAddressModifyEnable
= true;
102 sba
.InstructionBaseAddress
=
103 (struct anv_address
) { &device
->instruction_state_pool
.block_pool
.bo
, 0 };
104 sba
.InstructionMemoryObjectControlState
= GENX(MOCS
);
105 sba
.InstructionBaseAddressModifyEnable
= true;
108 /* Broadwell requires that we specify a buffer size for a bunch of
109 * these fields. However, since we will be growing the BO's live, we
110 * just set them all to the maximum.
112 sba
.GeneralStateBufferSize
= 0xfffff;
113 sba
.GeneralStateBufferSizeModifyEnable
= true;
114 sba
.DynamicStateBufferSize
= 0xfffff;
115 sba
.DynamicStateBufferSizeModifyEnable
= true;
116 sba
.IndirectObjectBufferSize
= 0xfffff;
117 sba
.IndirectObjectBufferSizeModifyEnable
= true;
118 sba
.InstructionBufferSize
= 0xfffff;
119 sba
.InstructionBuffersizeModifyEnable
= true;
123 /* After re-setting the surface state base address, we have to do some
124 * cache flusing so that the sampler engine will pick up the new
125 * SURFACE_STATE objects and binding tables. From the Broadwell PRM,
126 * Shared Function > 3D Sampler > State > State Caching (page 96):
128 * Coherency with system memory in the state cache, like the texture
129 * cache is handled partially by software. It is expected that the
130 * command stream or shader will issue Cache Flush operation or
131 * Cache_Flush sampler message to ensure that the L1 cache remains
132 * coherent with system memory.
136 * Whenever the value of the Dynamic_State_Base_Addr,
137 * Surface_State_Base_Addr are altered, the L1 state cache must be
138 * invalidated to ensure the new surface or sampler state is fetched
139 * from system memory.
141 * The PIPE_CONTROL command has a "State Cache Invalidation Enable" bit
142 * which, according the PIPE_CONTROL instruction documentation in the
145 * Setting this bit is independent of any other bit in this packet.
146 * This bit controls the invalidation of the L1 and L2 state caches
147 * at the top of the pipe i.e. at the parsing time.
149 * Unfortunately, experimentation seems to indicate that state cache
150 * invalidation through a PIPE_CONTROL does nothing whatsoever in
151 * regards to surface state and binding tables. In stead, it seems that
152 * invalidating the texture cache is what is actually needed.
154 * XXX: As far as we have been able to determine through
155 * experimentation, shows that flush the texture cache appears to be
156 * sufficient. The theory here is that all of the sampling/rendering
157 * units cache the binding table in the texture cache. However, we have
158 * yet to be able to actually confirm this.
160 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
), pc
) {
161 pc
.TextureCacheInvalidationEnable
= true;
162 pc
.ConstantCacheInvalidationEnable
= true;
163 pc
.StateCacheInvalidationEnable
= true;
168 add_surface_state_reloc(struct anv_cmd_buffer
*cmd_buffer
,
169 struct anv_state state
,
170 struct anv_bo
*bo
, uint32_t offset
)
172 const struct isl_device
*isl_dev
= &cmd_buffer
->device
->isl_dev
;
175 anv_reloc_list_add(&cmd_buffer
->surface_relocs
, &cmd_buffer
->pool
->alloc
,
176 state
.offset
+ isl_dev
->ss
.addr_offset
, bo
, offset
);
177 if (result
!= VK_SUCCESS
)
178 anv_batch_set_error(&cmd_buffer
->batch
, result
);
182 add_image_relocs(struct anv_cmd_buffer
* const cmd_buffer
,
183 const struct anv_image
* const image
,
184 const VkImageAspectFlags aspect_mask
,
185 const enum isl_aux_usage aux_usage
,
186 const struct anv_state state
)
188 const struct isl_device
*isl_dev
= &cmd_buffer
->device
->isl_dev
;
189 const uint32_t surf_offset
= image
->offset
+
190 anv_image_get_surface_for_aspect_mask(image
, aspect_mask
)->offset
;
192 add_surface_state_reloc(cmd_buffer
, state
, image
->bo
, surf_offset
);
194 if (aux_usage
!= ISL_AUX_USAGE_NONE
) {
195 uint32_t aux_offset
= image
->offset
+ image
->aux_surface
.offset
;
197 /* On gen7 and prior, the bottom 12 bits of the MCS base address are
198 * used to store other information. This should be ok, however, because
199 * surface buffer addresses are always 4K page alinged.
201 assert((aux_offset
& 0xfff) == 0);
202 uint32_t *aux_addr_dw
= state
.map
+ isl_dev
->ss
.aux_addr_offset
;
203 aux_offset
+= *aux_addr_dw
& 0xfff;
206 anv_reloc_list_add(&cmd_buffer
->surface_relocs
,
207 &cmd_buffer
->pool
->alloc
,
208 state
.offset
+ isl_dev
->ss
.aux_addr_offset
,
209 image
->bo
, aux_offset
);
210 if (result
!= VK_SUCCESS
)
211 anv_batch_set_error(&cmd_buffer
->batch
, result
);
216 color_is_zero_one(VkClearColorValue value
, enum isl_format format
)
218 if (isl_format_has_int_channel(format
)) {
219 for (unsigned i
= 0; i
< 4; i
++) {
220 if (value
.int32
[i
] != 0 && value
.int32
[i
] != 1)
224 for (unsigned i
= 0; i
< 4; i
++) {
225 if (value
.float32
[i
] != 0.0f
&& value
.float32
[i
] != 1.0f
)
234 color_attachment_compute_aux_usage(struct anv_device
*device
,
235 struct anv_attachment_state
*att_state
,
236 struct anv_image_view
*iview
,
237 VkRect2D render_area
,
238 union isl_color_value
*fast_clear_color
)
240 if (iview
->image
->aux_surface
.isl
.size
== 0) {
241 att_state
->aux_usage
= ISL_AUX_USAGE_NONE
;
242 att_state
->input_aux_usage
= ISL_AUX_USAGE_NONE
;
243 att_state
->fast_clear
= false;
245 } else if (iview
->image
->aux_usage
== ISL_AUX_USAGE_MCS
) {
246 att_state
->aux_usage
= ISL_AUX_USAGE_MCS
;
247 att_state
->input_aux_usage
= ISL_AUX_USAGE_MCS
;
248 att_state
->fast_clear
= false;
252 assert(iview
->image
->aux_surface
.isl
.usage
& ISL_SURF_USAGE_CCS_BIT
);
254 att_state
->clear_color_is_zero_one
=
255 color_is_zero_one(att_state
->clear_value
.color
, iview
->isl
.format
);
257 if (att_state
->pending_clear_aspects
== VK_IMAGE_ASPECT_COLOR_BIT
) {
258 /* Start off assuming fast clears are possible */
259 att_state
->fast_clear
= true;
261 /* Potentially, we could do partial fast-clears but doing so has crazy
262 * alignment restrictions. It's easier to just restrict to full size
263 * fast clears for now.
265 if (render_area
.offset
.x
!= 0 ||
266 render_area
.offset
.y
!= 0 ||
267 render_area
.extent
.width
!= iview
->extent
.width
||
268 render_area
.extent
.height
!= iview
->extent
.height
)
269 att_state
->fast_clear
= false;
272 /* On gen7, we can't do multi-LOD or multi-layer fast-clears. We
273 * technically can, but it comes with crazy restrictions that we
274 * don't want to deal with now.
276 if (iview
->isl
.base_level
> 0 ||
277 iview
->isl
.base_array_layer
> 0 ||
278 iview
->isl
.array_len
> 1)
279 att_state
->fast_clear
= false;
282 /* On Broadwell and earlier, we can only handle 0/1 clear colors */
283 if (GEN_GEN
<= 8 && !att_state
->clear_color_is_zero_one
)
284 att_state
->fast_clear
= false;
286 if (att_state
->fast_clear
) {
287 memcpy(fast_clear_color
->u32
, att_state
->clear_value
.color
.uint32
,
288 sizeof(fast_clear_color
->u32
));
291 att_state
->fast_clear
= false;
295 * TODO: Consider using a heuristic to determine if temporarily enabling
296 * CCS_E for this image view would be beneficial.
298 * While fast-clear resolves and partial resolves are fairly cheap in the
299 * case where you render to most of the pixels, full resolves are not
300 * because they potentially involve reading and writing the entire
301 * framebuffer. If we can't texture with CCS_E, we should leave it off and
302 * limit ourselves to fast clears.
304 if (iview
->image
->aux_usage
== ISL_AUX_USAGE_CCS_E
) {
305 att_state
->aux_usage
= ISL_AUX_USAGE_CCS_E
;
306 att_state
->input_aux_usage
= ISL_AUX_USAGE_CCS_E
;
307 } else if (att_state
->fast_clear
) {
308 att_state
->aux_usage
= ISL_AUX_USAGE_CCS_D
;
309 /* From the Sky Lake PRM, RENDER_SURFACE_STATE::AuxiliarySurfaceMode:
311 * "If Number of Multisamples is MULTISAMPLECOUNT_1, AUX_CCS_D
312 * setting is only allowed if Surface Format supported for Fast
313 * Clear. In addition, if the surface is bound to the sampling
314 * engine, Surface Format must be supported for Render Target
315 * Compression for surfaces bound to the sampling engine."
317 * In other words, we can only sample from a fast-cleared image if it
318 * also supports color compression.
320 if (isl_format_supports_ccs_e(&device
->info
, iview
->isl
.format
))
321 att_state
->input_aux_usage
= ISL_AUX_USAGE_CCS_D
;
323 att_state
->input_aux_usage
= ISL_AUX_USAGE_NONE
;
325 att_state
->aux_usage
= ISL_AUX_USAGE_NONE
;
326 att_state
->input_aux_usage
= ISL_AUX_USAGE_NONE
;
331 need_input_attachment_state(const struct anv_render_pass_attachment
*att
)
333 if (!(att
->usage
& VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT
))
336 /* We only allocate input attachment states for color surfaces. Compression
337 * is not yet enabled for depth textures and stencil doesn't allow
338 * compression so we can just use the texture surface state from the view.
340 return vk_format_is_color(att
->format
);
343 /* Transitions a HiZ-enabled depth buffer from one layout to another. Unless
344 * the initial layout is undefined, the HiZ buffer and depth buffer will
345 * represent the same data at the end of this operation.
348 transition_depth_buffer(struct anv_cmd_buffer
*cmd_buffer
,
349 const struct anv_image
*image
,
350 VkImageLayout initial_layout
,
351 VkImageLayout final_layout
)
355 /* A transition is a no-op if HiZ is not enabled, or if the initial and
356 * final layouts are equal.
358 * The undefined layout indicates that the user doesn't care about the data
359 * that's currently in the buffer. Therefore, a data-preserving resolve
360 * operation is not needed.
362 if (image
->aux_usage
!= ISL_AUX_USAGE_HIZ
|| initial_layout
== final_layout
)
365 const bool hiz_enabled
= ISL_AUX_USAGE_HIZ
==
366 anv_layout_to_aux_usage(&cmd_buffer
->device
->info
, image
, image
->aspects
,
368 const bool enable_hiz
= ISL_AUX_USAGE_HIZ
==
369 anv_layout_to_aux_usage(&cmd_buffer
->device
->info
, image
, image
->aspects
,
372 enum blorp_hiz_op hiz_op
;
373 if (hiz_enabled
&& !enable_hiz
) {
374 hiz_op
= BLORP_HIZ_OP_DEPTH_RESOLVE
;
375 } else if (!hiz_enabled
&& enable_hiz
) {
376 hiz_op
= BLORP_HIZ_OP_HIZ_RESOLVE
;
378 assert(hiz_enabled
== enable_hiz
);
379 /* If the same buffer will be used, no resolves are necessary. */
380 hiz_op
= BLORP_HIZ_OP_NONE
;
383 if (hiz_op
!= BLORP_HIZ_OP_NONE
)
384 anv_gen8_hiz_op_resolve(cmd_buffer
, image
, hiz_op
);
388 transition_color_buffer(struct anv_cmd_buffer
*cmd_buffer
,
389 const struct anv_image
*image
,
390 const uint32_t base_level
, uint32_t level_count
,
391 uint32_t base_layer
, uint32_t layer_count
,
392 VkImageLayout initial_layout
,
393 VkImageLayout final_layout
)
395 if (image
->aux_usage
!= ISL_AUX_USAGE_CCS_E
)
398 if (initial_layout
!= VK_IMAGE_LAYOUT_UNDEFINED
&&
399 initial_layout
!= VK_IMAGE_LAYOUT_PREINITIALIZED
)
402 /* A transition of a 3D subresource works on all slices at a time. */
403 if (image
->type
== VK_IMAGE_TYPE_3D
) {
405 layer_count
= anv_minify(image
->extent
.depth
, base_level
);
409 /* We're transitioning from an undefined layout so it doesn't really matter
410 * what data ends up in the color buffer. We do, however, need to ensure
411 * that the CCS has valid data in it. One easy way to do that is to
412 * fast-clear the specified range.
414 anv_image_ccs_clear(cmd_buffer
, image
, base_level
, level_count
,
415 base_layer
, layer_count
);
420 * Setup anv_cmd_state::attachments for vkCmdBeginRenderPass.
423 genX(cmd_buffer_setup_attachments
)(struct anv_cmd_buffer
*cmd_buffer
,
424 struct anv_render_pass
*pass
,
425 const VkRenderPassBeginInfo
*begin
)
427 const struct isl_device
*isl_dev
= &cmd_buffer
->device
->isl_dev
;
428 struct anv_cmd_state
*state
= &cmd_buffer
->state
;
430 vk_free(&cmd_buffer
->pool
->alloc
, state
->attachments
);
432 if (pass
->attachment_count
> 0) {
433 state
->attachments
= vk_alloc(&cmd_buffer
->pool
->alloc
,
434 pass
->attachment_count
*
435 sizeof(state
->attachments
[0]),
436 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
437 if (state
->attachments
== NULL
) {
438 /* Propagate VK_ERROR_OUT_OF_HOST_MEMORY to vkEndCommandBuffer */
439 return anv_batch_set_error(&cmd_buffer
->batch
,
440 VK_ERROR_OUT_OF_HOST_MEMORY
);
443 state
->attachments
= NULL
;
446 /* Reserve one for the NULL state. */
447 unsigned num_states
= 1;
448 for (uint32_t i
= 0; i
< pass
->attachment_count
; ++i
) {
449 if (vk_format_is_color(pass
->attachments
[i
].format
))
452 if (need_input_attachment_state(&pass
->attachments
[i
]))
456 const uint32_t ss_stride
= align_u32(isl_dev
->ss
.size
, isl_dev
->ss
.align
);
457 state
->render_pass_states
=
458 anv_state_stream_alloc(&cmd_buffer
->surface_state_stream
,
459 num_states
* ss_stride
, isl_dev
->ss
.align
);
461 struct anv_state next_state
= state
->render_pass_states
;
462 next_state
.alloc_size
= isl_dev
->ss
.size
;
464 state
->null_surface_state
= next_state
;
465 next_state
.offset
+= ss_stride
;
466 next_state
.map
+= ss_stride
;
468 for (uint32_t i
= 0; i
< pass
->attachment_count
; ++i
) {
469 if (vk_format_is_color(pass
->attachments
[i
].format
)) {
470 state
->attachments
[i
].color_rt_state
= next_state
;
471 next_state
.offset
+= ss_stride
;
472 next_state
.map
+= ss_stride
;
475 if (need_input_attachment_state(&pass
->attachments
[i
])) {
476 state
->attachments
[i
].input_att_state
= next_state
;
477 next_state
.offset
+= ss_stride
;
478 next_state
.map
+= ss_stride
;
481 assert(next_state
.offset
== state
->render_pass_states
.offset
+
482 state
->render_pass_states
.alloc_size
);
485 ANV_FROM_HANDLE(anv_framebuffer
, framebuffer
, begin
->framebuffer
);
486 assert(pass
->attachment_count
== framebuffer
->attachment_count
);
488 struct GENX(RENDER_SURFACE_STATE
) null_ss
= {
489 .SurfaceType
= SURFTYPE_NULL
,
490 .SurfaceArray
= framebuffer
->layers
> 0,
491 .SurfaceFormat
= ISL_FORMAT_R8G8B8A8_UNORM
,
495 .TiledSurface
= true,
497 .Width
= framebuffer
->width
- 1,
498 .Height
= framebuffer
->height
- 1,
499 .Depth
= framebuffer
->layers
- 1,
500 .RenderTargetViewExtent
= framebuffer
->layers
- 1,
502 GENX(RENDER_SURFACE_STATE_pack
)(NULL
, state
->null_surface_state
.map
,
505 for (uint32_t i
= 0; i
< pass
->attachment_count
; ++i
) {
506 struct anv_render_pass_attachment
*att
= &pass
->attachments
[i
];
507 VkImageAspectFlags att_aspects
= vk_format_aspects(att
->format
);
508 VkImageAspectFlags clear_aspects
= 0;
510 if (att_aspects
== VK_IMAGE_ASPECT_COLOR_BIT
) {
511 /* color attachment */
512 if (att
->load_op
== VK_ATTACHMENT_LOAD_OP_CLEAR
) {
513 clear_aspects
|= VK_IMAGE_ASPECT_COLOR_BIT
;
516 /* depthstencil attachment */
517 if ((att_aspects
& VK_IMAGE_ASPECT_DEPTH_BIT
) &&
518 att
->load_op
== VK_ATTACHMENT_LOAD_OP_CLEAR
) {
519 clear_aspects
|= VK_IMAGE_ASPECT_DEPTH_BIT
;
521 if ((att_aspects
& VK_IMAGE_ASPECT_STENCIL_BIT
) &&
522 att
->stencil_load_op
== VK_ATTACHMENT_LOAD_OP_CLEAR
) {
523 clear_aspects
|= VK_IMAGE_ASPECT_STENCIL_BIT
;
527 state
->attachments
[i
].current_layout
= att
->initial_layout
;
528 state
->attachments
[i
].pending_clear_aspects
= clear_aspects
;
530 state
->attachments
[i
].clear_value
= begin
->pClearValues
[i
];
532 struct anv_image_view
*iview
= framebuffer
->attachments
[i
];
533 anv_assert(iview
->vk_format
== att
->format
);
535 union isl_color_value clear_color
= { .u32
= { 0, } };
536 if (att_aspects
== VK_IMAGE_ASPECT_COLOR_BIT
) {
537 color_attachment_compute_aux_usage(cmd_buffer
->device
,
538 &state
->attachments
[i
],
539 iview
, begin
->renderArea
,
542 struct isl_view view
= iview
->isl
;
543 view
.usage
|= ISL_SURF_USAGE_RENDER_TARGET_BIT
;
544 view
.swizzle
= anv_swizzle_for_render(view
.swizzle
);
545 isl_surf_fill_state(isl_dev
,
546 state
->attachments
[i
].color_rt_state
.map
,
547 .surf
= &iview
->image
->color_surface
.isl
,
549 .aux_surf
= &iview
->image
->aux_surface
.isl
,
550 .aux_usage
= state
->attachments
[i
].aux_usage
,
551 .clear_color
= clear_color
,
552 .mocs
= cmd_buffer
->device
->default_mocs
);
554 add_image_relocs(cmd_buffer
, iview
->image
, iview
->aspect_mask
,
555 state
->attachments
[i
].aux_usage
,
556 state
->attachments
[i
].color_rt_state
);
558 /* This field will be initialized after the first subpass
561 state
->attachments
[i
].aux_usage
= ISL_AUX_USAGE_NONE
;
563 state
->attachments
[i
].input_aux_usage
= ISL_AUX_USAGE_NONE
;
566 if (need_input_attachment_state(&pass
->attachments
[i
])) {
567 struct isl_view view
= iview
->isl
;
568 view
.usage
|= ISL_SURF_USAGE_TEXTURE_BIT
;
569 isl_surf_fill_state(isl_dev
,
570 state
->attachments
[i
].input_att_state
.map
,
571 .surf
= &iview
->image
->color_surface
.isl
,
573 .aux_surf
= &iview
->image
->aux_surface
.isl
,
574 .aux_usage
= state
->attachments
[i
].input_aux_usage
,
575 .clear_color
= clear_color
,
576 .mocs
= cmd_buffer
->device
->default_mocs
);
578 add_image_relocs(cmd_buffer
, iview
->image
, iview
->aspect_mask
,
579 state
->attachments
[i
].input_aux_usage
,
580 state
->attachments
[i
].input_att_state
);
584 anv_state_flush(cmd_buffer
->device
, state
->render_pass_states
);
591 genX(BeginCommandBuffer
)(
592 VkCommandBuffer commandBuffer
,
593 const VkCommandBufferBeginInfo
* pBeginInfo
)
595 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
597 /* If this is the first vkBeginCommandBuffer, we must *initialize* the
598 * command buffer's state. Otherwise, we must *reset* its state. In both
601 * From the Vulkan 1.0 spec:
603 * If a command buffer is in the executable state and the command buffer
604 * was allocated from a command pool with the
605 * VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT flag set, then
606 * vkBeginCommandBuffer implicitly resets the command buffer, behaving
607 * as if vkResetCommandBuffer had been called with
608 * VK_COMMAND_BUFFER_RESET_RELEASE_RESOURCES_BIT not set. It then puts
609 * the command buffer in the recording state.
611 anv_cmd_buffer_reset(cmd_buffer
);
613 cmd_buffer
->usage_flags
= pBeginInfo
->flags
;
615 assert(cmd_buffer
->level
== VK_COMMAND_BUFFER_LEVEL_SECONDARY
||
616 !(cmd_buffer
->usage_flags
& VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT
));
618 genX(cmd_buffer_emit_state_base_address
)(cmd_buffer
);
620 /* We sometimes store vertex data in the dynamic state buffer for blorp
621 * operations and our dynamic state stream may re-use data from previous
622 * command buffers. In order to prevent stale cache data, we flush the VF
623 * cache. We could do this on every blorp call but that's not really
624 * needed as all of the data will get written by the CPU prior to the GPU
625 * executing anything. The chances are fairly high that they will use
626 * blorp at least once per primary command buffer so it shouldn't be
629 if (cmd_buffer
->level
== VK_COMMAND_BUFFER_LEVEL_PRIMARY
)
630 cmd_buffer
->state
.pending_pipe_bits
|= ANV_PIPE_VF_CACHE_INVALIDATE_BIT
;
632 VkResult result
= VK_SUCCESS
;
633 if (cmd_buffer
->usage_flags
&
634 VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT
) {
635 cmd_buffer
->state
.pass
=
636 anv_render_pass_from_handle(pBeginInfo
->pInheritanceInfo
->renderPass
);
637 cmd_buffer
->state
.subpass
=
638 &cmd_buffer
->state
.pass
->subpasses
[pBeginInfo
->pInheritanceInfo
->subpass
];
639 cmd_buffer
->state
.framebuffer
= NULL
;
641 result
= genX(cmd_buffer_setup_attachments
)(cmd_buffer
,
642 cmd_buffer
->state
.pass
, NULL
);
644 cmd_buffer
->state
.dirty
|= ANV_CMD_DIRTY_RENDER_TARGETS
;
651 genX(EndCommandBuffer
)(
652 VkCommandBuffer commandBuffer
)
654 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
656 if (anv_batch_has_error(&cmd_buffer
->batch
))
657 return cmd_buffer
->batch
.status
;
659 /* We want every command buffer to start with the PMA fix in a known state,
660 * so we disable it at the end of the command buffer.
662 genX(cmd_buffer_enable_pma_fix
)(cmd_buffer
, false);
664 genX(cmd_buffer_apply_pipe_flushes
)(cmd_buffer
);
666 anv_cmd_buffer_end_batch_buffer(cmd_buffer
);
672 genX(CmdExecuteCommands
)(
673 VkCommandBuffer commandBuffer
,
674 uint32_t commandBufferCount
,
675 const VkCommandBuffer
* pCmdBuffers
)
677 ANV_FROM_HANDLE(anv_cmd_buffer
, primary
, commandBuffer
);
679 assert(primary
->level
== VK_COMMAND_BUFFER_LEVEL_PRIMARY
);
681 if (anv_batch_has_error(&primary
->batch
))
684 /* The secondary command buffers will assume that the PMA fix is disabled
685 * when they begin executing. Make sure this is true.
687 genX(cmd_buffer_enable_pma_fix
)(primary
, false);
689 /* The secondary command buffer doesn't know which textures etc. have been
690 * flushed prior to their execution. Apply those flushes now.
692 genX(cmd_buffer_apply_pipe_flushes
)(primary
);
694 for (uint32_t i
= 0; i
< commandBufferCount
; i
++) {
695 ANV_FROM_HANDLE(anv_cmd_buffer
, secondary
, pCmdBuffers
[i
]);
697 assert(secondary
->level
== VK_COMMAND_BUFFER_LEVEL_SECONDARY
);
698 assert(!anv_batch_has_error(&secondary
->batch
));
700 if (secondary
->usage_flags
&
701 VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT
) {
702 /* If we're continuing a render pass from the primary, we need to
703 * copy the surface states for the current subpass into the storage
704 * we allocated for them in BeginCommandBuffer.
706 struct anv_bo
*ss_bo
=
707 &primary
->device
->surface_state_pool
.block_pool
.bo
;
708 struct anv_state src_state
= primary
->state
.render_pass_states
;
709 struct anv_state dst_state
= secondary
->state
.render_pass_states
;
710 assert(src_state
.alloc_size
== dst_state
.alloc_size
);
712 genX(cmd_buffer_so_memcpy
)(primary
, ss_bo
, dst_state
.offset
,
713 ss_bo
, src_state
.offset
,
714 src_state
.alloc_size
);
717 anv_cmd_buffer_add_secondary(primary
, secondary
);
720 /* Each of the secondary command buffers will use its own state base
721 * address. We need to re-emit state base address for the primary after
722 * all of the secondaries are done.
724 * TODO: Maybe we want to make this a dirty bit to avoid extra state base
727 genX(cmd_buffer_emit_state_base_address
)(primary
);
730 #define IVB_L3SQCREG1_SQGHPCI_DEFAULT 0x00730000
731 #define VLV_L3SQCREG1_SQGHPCI_DEFAULT 0x00d30000
732 #define HSW_L3SQCREG1_SQGHPCI_DEFAULT 0x00610000
735 * Program the hardware to use the specified L3 configuration.
738 genX(cmd_buffer_config_l3
)(struct anv_cmd_buffer
*cmd_buffer
,
739 const struct gen_l3_config
*cfg
)
742 if (cfg
== cmd_buffer
->state
.current_l3_config
)
745 if (unlikely(INTEL_DEBUG
& DEBUG_L3
)) {
746 fprintf(stderr
, "L3 config transition: ");
747 gen_dump_l3_config(cfg
, stderr
);
750 const bool has_slm
= cfg
->n
[GEN_L3P_SLM
];
752 /* According to the hardware docs, the L3 partitioning can only be changed
753 * while the pipeline is completely drained and the caches are flushed,
754 * which involves a first PIPE_CONTROL flush which stalls the pipeline...
756 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
), pc
) {
757 pc
.DCFlushEnable
= true;
758 pc
.PostSyncOperation
= NoWrite
;
759 pc
.CommandStreamerStallEnable
= true;
762 /* ...followed by a second pipelined PIPE_CONTROL that initiates
763 * invalidation of the relevant caches. Note that because RO invalidation
764 * happens at the top of the pipeline (i.e. right away as the PIPE_CONTROL
765 * command is processed by the CS) we cannot combine it with the previous
766 * stalling flush as the hardware documentation suggests, because that
767 * would cause the CS to stall on previous rendering *after* RO
768 * invalidation and wouldn't prevent the RO caches from being polluted by
769 * concurrent rendering before the stall completes. This intentionally
770 * doesn't implement the SKL+ hardware workaround suggesting to enable CS
771 * stall on PIPE_CONTROLs with the texture cache invalidation bit set for
772 * GPGPU workloads because the previous and subsequent PIPE_CONTROLs
773 * already guarantee that there is no concurrent GPGPU kernel execution
774 * (see SKL HSD 2132585).
776 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
), pc
) {
777 pc
.TextureCacheInvalidationEnable
= true;
778 pc
.ConstantCacheInvalidationEnable
= true;
779 pc
.InstructionCacheInvalidateEnable
= true;
780 pc
.StateCacheInvalidationEnable
= true;
781 pc
.PostSyncOperation
= NoWrite
;
784 /* Now send a third stalling flush to make sure that invalidation is
785 * complete when the L3 configuration registers are modified.
787 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
), pc
) {
788 pc
.DCFlushEnable
= true;
789 pc
.PostSyncOperation
= NoWrite
;
790 pc
.CommandStreamerStallEnable
= true;
795 assert(!cfg
->n
[GEN_L3P_IS
] && !cfg
->n
[GEN_L3P_C
] && !cfg
->n
[GEN_L3P_T
]);
798 anv_pack_struct(&l3cr
, GENX(L3CNTLREG
),
799 .SLMEnable
= has_slm
,
800 .URBAllocation
= cfg
->n
[GEN_L3P_URB
],
801 .ROAllocation
= cfg
->n
[GEN_L3P_RO
],
802 .DCAllocation
= cfg
->n
[GEN_L3P_DC
],
803 .AllAllocation
= cfg
->n
[GEN_L3P_ALL
]);
805 /* Set up the L3 partitioning. */
806 emit_lri(&cmd_buffer
->batch
, GENX(L3CNTLREG_num
), l3cr
);
810 const bool has_dc
= cfg
->n
[GEN_L3P_DC
] || cfg
->n
[GEN_L3P_ALL
];
811 const bool has_is
= cfg
->n
[GEN_L3P_IS
] || cfg
->n
[GEN_L3P_RO
] ||
813 const bool has_c
= cfg
->n
[GEN_L3P_C
] || cfg
->n
[GEN_L3P_RO
] ||
815 const bool has_t
= cfg
->n
[GEN_L3P_T
] || cfg
->n
[GEN_L3P_RO
] ||
818 assert(!cfg
->n
[GEN_L3P_ALL
]);
820 /* When enabled SLM only uses a portion of the L3 on half of the banks,
821 * the matching space on the remaining banks has to be allocated to a
822 * client (URB for all validated configurations) set to the
823 * lower-bandwidth 2-bank address hashing mode.
825 const struct gen_device_info
*devinfo
= &cmd_buffer
->device
->info
;
826 const bool urb_low_bw
= has_slm
&& !devinfo
->is_baytrail
;
827 assert(!urb_low_bw
|| cfg
->n
[GEN_L3P_URB
] == cfg
->n
[GEN_L3P_SLM
]);
829 /* Minimum number of ways that can be allocated to the URB. */
830 MAYBE_UNUSED
const unsigned n0_urb
= devinfo
->is_baytrail
? 32 : 0;
831 assert(cfg
->n
[GEN_L3P_URB
] >= n0_urb
);
833 uint32_t l3sqcr1
, l3cr2
, l3cr3
;
834 anv_pack_struct(&l3sqcr1
, GENX(L3SQCREG1
),
835 .ConvertDC_UC
= !has_dc
,
836 .ConvertIS_UC
= !has_is
,
837 .ConvertC_UC
= !has_c
,
838 .ConvertT_UC
= !has_t
);
840 GEN_IS_HASWELL
? HSW_L3SQCREG1_SQGHPCI_DEFAULT
:
841 devinfo
->is_baytrail
? VLV_L3SQCREG1_SQGHPCI_DEFAULT
:
842 IVB_L3SQCREG1_SQGHPCI_DEFAULT
;
844 anv_pack_struct(&l3cr2
, GENX(L3CNTLREG2
),
845 .SLMEnable
= has_slm
,
846 .URBLowBandwidth
= urb_low_bw
,
847 .URBAllocation
= cfg
->n
[GEN_L3P_URB
] - n0_urb
,
849 .ALLAllocation
= cfg
->n
[GEN_L3P_ALL
],
851 .ROAllocation
= cfg
->n
[GEN_L3P_RO
],
852 .DCAllocation
= cfg
->n
[GEN_L3P_DC
]);
854 anv_pack_struct(&l3cr3
, GENX(L3CNTLREG3
),
855 .ISAllocation
= cfg
->n
[GEN_L3P_IS
],
857 .CAllocation
= cfg
->n
[GEN_L3P_C
],
859 .TAllocation
= cfg
->n
[GEN_L3P_T
],
862 /* Set up the L3 partitioning. */
863 emit_lri(&cmd_buffer
->batch
, GENX(L3SQCREG1_num
), l3sqcr1
);
864 emit_lri(&cmd_buffer
->batch
, GENX(L3CNTLREG2_num
), l3cr2
);
865 emit_lri(&cmd_buffer
->batch
, GENX(L3CNTLREG3_num
), l3cr3
);
868 if (cmd_buffer
->device
->instance
->physicalDevice
.cmd_parser_version
>= 4) {
869 /* Enable L3 atomics on HSW if we have a DC partition, otherwise keep
870 * them disabled to avoid crashing the system hard.
872 uint32_t scratch1
, chicken3
;
873 anv_pack_struct(&scratch1
, GENX(SCRATCH1
),
874 .L3AtomicDisable
= !has_dc
);
875 anv_pack_struct(&chicken3
, GENX(CHICKEN3
),
876 .L3AtomicDisableMask
= true,
877 .L3AtomicDisable
= !has_dc
);
878 emit_lri(&cmd_buffer
->batch
, GENX(SCRATCH1_num
), scratch1
);
879 emit_lri(&cmd_buffer
->batch
, GENX(CHICKEN3_num
), chicken3
);
885 cmd_buffer
->state
.current_l3_config
= cfg
;
889 genX(cmd_buffer_apply_pipe_flushes
)(struct anv_cmd_buffer
*cmd_buffer
)
891 enum anv_pipe_bits bits
= cmd_buffer
->state
.pending_pipe_bits
;
893 /* Flushes are pipelined while invalidations are handled immediately.
894 * Therefore, if we're flushing anything then we need to schedule a stall
895 * before any invalidations can happen.
897 if (bits
& ANV_PIPE_FLUSH_BITS
)
898 bits
|= ANV_PIPE_NEEDS_CS_STALL_BIT
;
900 /* If we're going to do an invalidate and we have a pending CS stall that
901 * has yet to be resolved, we do the CS stall now.
903 if ((bits
& ANV_PIPE_INVALIDATE_BITS
) &&
904 (bits
& ANV_PIPE_NEEDS_CS_STALL_BIT
)) {
905 bits
|= ANV_PIPE_CS_STALL_BIT
;
906 bits
&= ~ANV_PIPE_NEEDS_CS_STALL_BIT
;
909 if (bits
& (ANV_PIPE_FLUSH_BITS
| ANV_PIPE_CS_STALL_BIT
)) {
910 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
), pipe
) {
911 pipe
.DepthCacheFlushEnable
= bits
& ANV_PIPE_DEPTH_CACHE_FLUSH_BIT
;
912 pipe
.DCFlushEnable
= bits
& ANV_PIPE_DATA_CACHE_FLUSH_BIT
;
913 pipe
.RenderTargetCacheFlushEnable
=
914 bits
& ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT
;
916 pipe
.DepthStallEnable
= bits
& ANV_PIPE_DEPTH_STALL_BIT
;
917 pipe
.CommandStreamerStallEnable
= bits
& ANV_PIPE_CS_STALL_BIT
;
918 pipe
.StallAtPixelScoreboard
= bits
& ANV_PIPE_STALL_AT_SCOREBOARD_BIT
;
921 * According to the Broadwell documentation, any PIPE_CONTROL with the
922 * "Command Streamer Stall" bit set must also have another bit set,
923 * with five different options:
925 * - Render Target Cache Flush
926 * - Depth Cache Flush
927 * - Stall at Pixel Scoreboard
928 * - Post-Sync Operation
932 * I chose "Stall at Pixel Scoreboard" since that's what we use in
933 * mesa and it seems to work fine. The choice is fairly arbitrary.
935 if ((bits
& ANV_PIPE_CS_STALL_BIT
) &&
936 !(bits
& (ANV_PIPE_FLUSH_BITS
| ANV_PIPE_DEPTH_STALL_BIT
|
937 ANV_PIPE_STALL_AT_SCOREBOARD_BIT
)))
938 pipe
.StallAtPixelScoreboard
= true;
941 bits
&= ~(ANV_PIPE_FLUSH_BITS
| ANV_PIPE_CS_STALL_BIT
);
944 if (bits
& ANV_PIPE_INVALIDATE_BITS
) {
945 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
), pipe
) {
946 pipe
.StateCacheInvalidationEnable
=
947 bits
& ANV_PIPE_STATE_CACHE_INVALIDATE_BIT
;
948 pipe
.ConstantCacheInvalidationEnable
=
949 bits
& ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT
;
950 pipe
.VFCacheInvalidationEnable
=
951 bits
& ANV_PIPE_VF_CACHE_INVALIDATE_BIT
;
952 pipe
.TextureCacheInvalidationEnable
=
953 bits
& ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT
;
954 pipe
.InstructionCacheInvalidateEnable
=
955 bits
& ANV_PIPE_INSTRUCTION_CACHE_INVALIDATE_BIT
;
958 bits
&= ~ANV_PIPE_INVALIDATE_BITS
;
961 cmd_buffer
->state
.pending_pipe_bits
= bits
;
964 void genX(CmdPipelineBarrier
)(
965 VkCommandBuffer commandBuffer
,
966 VkPipelineStageFlags srcStageMask
,
967 VkPipelineStageFlags destStageMask
,
969 uint32_t memoryBarrierCount
,
970 const VkMemoryBarrier
* pMemoryBarriers
,
971 uint32_t bufferMemoryBarrierCount
,
972 const VkBufferMemoryBarrier
* pBufferMemoryBarriers
,
973 uint32_t imageMemoryBarrierCount
,
974 const VkImageMemoryBarrier
* pImageMemoryBarriers
)
976 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
978 /* XXX: Right now, we're really dumb and just flush whatever categories
979 * the app asks for. One of these days we may make this a bit better
980 * but right now that's all the hardware allows for in most areas.
982 VkAccessFlags src_flags
= 0;
983 VkAccessFlags dst_flags
= 0;
985 for (uint32_t i
= 0; i
< memoryBarrierCount
; i
++) {
986 src_flags
|= pMemoryBarriers
[i
].srcAccessMask
;
987 dst_flags
|= pMemoryBarriers
[i
].dstAccessMask
;
990 for (uint32_t i
= 0; i
< bufferMemoryBarrierCount
; i
++) {
991 src_flags
|= pBufferMemoryBarriers
[i
].srcAccessMask
;
992 dst_flags
|= pBufferMemoryBarriers
[i
].dstAccessMask
;
995 for (uint32_t i
= 0; i
< imageMemoryBarrierCount
; i
++) {
996 src_flags
|= pImageMemoryBarriers
[i
].srcAccessMask
;
997 dst_flags
|= pImageMemoryBarriers
[i
].dstAccessMask
;
998 ANV_FROM_HANDLE(anv_image
, image
, pImageMemoryBarriers
[i
].image
);
999 const VkImageSubresourceRange
*range
=
1000 &pImageMemoryBarriers
[i
].subresourceRange
;
1002 if (range
->aspectMask
& VK_IMAGE_ASPECT_DEPTH_BIT
) {
1003 transition_depth_buffer(cmd_buffer
, image
,
1004 pImageMemoryBarriers
[i
].oldLayout
,
1005 pImageMemoryBarriers
[i
].newLayout
);
1006 } else if (range
->aspectMask
== VK_IMAGE_ASPECT_COLOR_BIT
) {
1007 transition_color_buffer(cmd_buffer
, image
,
1008 range
->baseMipLevel
,
1009 anv_get_levelCount(image
, range
),
1010 range
->baseArrayLayer
,
1011 anv_get_layerCount(image
, range
),
1012 pImageMemoryBarriers
[i
].oldLayout
,
1013 pImageMemoryBarriers
[i
].newLayout
);
1017 cmd_buffer
->state
.pending_pipe_bits
|=
1018 anv_pipe_flush_bits_for_access_flags(src_flags
) |
1019 anv_pipe_invalidate_bits_for_access_flags(dst_flags
);
1023 cmd_buffer_alloc_push_constants(struct anv_cmd_buffer
*cmd_buffer
)
1025 VkShaderStageFlags stages
= cmd_buffer
->state
.pipeline
->active_stages
;
1027 /* In order to avoid thrash, we assume that vertex and fragment stages
1028 * always exist. In the rare case where one is missing *and* the other
1029 * uses push concstants, this may be suboptimal. However, avoiding stalls
1030 * seems more important.
1032 stages
|= VK_SHADER_STAGE_FRAGMENT_BIT
| VK_SHADER_STAGE_VERTEX_BIT
;
1034 if (stages
== cmd_buffer
->state
.push_constant_stages
)
1038 const unsigned push_constant_kb
= 32;
1039 #elif GEN_IS_HASWELL
1040 const unsigned push_constant_kb
= cmd_buffer
->device
->info
.gt
== 3 ? 32 : 16;
1042 const unsigned push_constant_kb
= 16;
1045 const unsigned num_stages
=
1046 _mesa_bitcount(stages
& VK_SHADER_STAGE_ALL_GRAPHICS
);
1047 unsigned size_per_stage
= push_constant_kb
/ num_stages
;
1049 /* Broadwell+ and Haswell gt3 require that the push constant sizes be in
1050 * units of 2KB. Incidentally, these are the same platforms that have
1051 * 32KB worth of push constant space.
1053 if (push_constant_kb
== 32)
1054 size_per_stage
&= ~1u;
1056 uint32_t kb_used
= 0;
1057 for (int i
= MESA_SHADER_VERTEX
; i
< MESA_SHADER_FRAGMENT
; i
++) {
1058 unsigned push_size
= (stages
& (1 << i
)) ? size_per_stage
: 0;
1059 anv_batch_emit(&cmd_buffer
->batch
,
1060 GENX(3DSTATE_PUSH_CONSTANT_ALLOC_VS
), alloc
) {
1061 alloc
._3DCommandSubOpcode
= 18 + i
;
1062 alloc
.ConstantBufferOffset
= (push_size
> 0) ? kb_used
: 0;
1063 alloc
.ConstantBufferSize
= push_size
;
1065 kb_used
+= push_size
;
1068 anv_batch_emit(&cmd_buffer
->batch
,
1069 GENX(3DSTATE_PUSH_CONSTANT_ALLOC_PS
), alloc
) {
1070 alloc
.ConstantBufferOffset
= kb_used
;
1071 alloc
.ConstantBufferSize
= push_constant_kb
- kb_used
;
1074 cmd_buffer
->state
.push_constant_stages
= stages
;
1076 /* From the BDW PRM for 3DSTATE_PUSH_CONSTANT_ALLOC_VS:
1078 * "The 3DSTATE_CONSTANT_VS must be reprogrammed prior to
1079 * the next 3DPRIMITIVE command after programming the
1080 * 3DSTATE_PUSH_CONSTANT_ALLOC_VS"
1082 * Since 3DSTATE_PUSH_CONSTANT_ALLOC_VS is programmed as part of
1083 * pipeline setup, we need to dirty push constants.
1085 cmd_buffer
->state
.push_constants_dirty
|= VK_SHADER_STAGE_ALL_GRAPHICS
;
1089 emit_binding_table(struct anv_cmd_buffer
*cmd_buffer
,
1090 gl_shader_stage stage
,
1091 struct anv_state
*bt_state
)
1093 struct anv_subpass
*subpass
= cmd_buffer
->state
.subpass
;
1094 struct anv_pipeline
*pipeline
;
1095 uint32_t bias
, state_offset
;
1098 case MESA_SHADER_COMPUTE
:
1099 pipeline
= cmd_buffer
->state
.compute_pipeline
;
1103 pipeline
= cmd_buffer
->state
.pipeline
;
1108 if (!anv_pipeline_has_stage(pipeline
, stage
)) {
1109 *bt_state
= (struct anv_state
) { 0, };
1113 struct anv_pipeline_bind_map
*map
= &pipeline
->shaders
[stage
]->bind_map
;
1114 if (bias
+ map
->surface_count
== 0) {
1115 *bt_state
= (struct anv_state
) { 0, };
1119 *bt_state
= anv_cmd_buffer_alloc_binding_table(cmd_buffer
,
1120 bias
+ map
->surface_count
,
1122 uint32_t *bt_map
= bt_state
->map
;
1124 if (bt_state
->map
== NULL
)
1125 return VK_ERROR_OUT_OF_DEVICE_MEMORY
;
1127 if (stage
== MESA_SHADER_COMPUTE
&&
1128 get_cs_prog_data(cmd_buffer
->state
.compute_pipeline
)->uses_num_work_groups
) {
1129 struct anv_bo
*bo
= cmd_buffer
->state
.num_workgroups_bo
;
1130 uint32_t bo_offset
= cmd_buffer
->state
.num_workgroups_offset
;
1132 struct anv_state surface_state
;
1134 anv_cmd_buffer_alloc_surface_state(cmd_buffer
);
1136 const enum isl_format format
=
1137 anv_isl_format_for_descriptor_type(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
);
1138 anv_fill_buffer_surface_state(cmd_buffer
->device
, surface_state
,
1139 format
, bo_offset
, 12, 1);
1141 bt_map
[0] = surface_state
.offset
+ state_offset
;
1142 add_surface_state_reloc(cmd_buffer
, surface_state
, bo
, bo_offset
);
1145 if (map
->surface_count
== 0)
1148 if (map
->image_count
> 0) {
1150 anv_cmd_buffer_ensure_push_constant_field(cmd_buffer
, stage
, images
);
1151 if (result
!= VK_SUCCESS
)
1154 cmd_buffer
->state
.push_constants_dirty
|= 1 << stage
;
1158 for (uint32_t s
= 0; s
< map
->surface_count
; s
++) {
1159 struct anv_pipeline_binding
*binding
= &map
->surface_to_descriptor
[s
];
1161 struct anv_state surface_state
;
1163 if (binding
->set
== ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS
) {
1164 /* Color attachment binding */
1165 assert(stage
== MESA_SHADER_FRAGMENT
);
1166 assert(binding
->binding
== 0);
1167 if (binding
->index
< subpass
->color_count
) {
1168 const unsigned att
=
1169 subpass
->color_attachments
[binding
->index
].attachment
;
1171 /* From the Vulkan 1.0.46 spec:
1173 * "If any color or depth/stencil attachments are
1174 * VK_ATTACHMENT_UNUSED, then no writes occur for those
1177 if (att
== VK_ATTACHMENT_UNUSED
) {
1178 surface_state
= cmd_buffer
->state
.null_surface_state
;
1180 surface_state
= cmd_buffer
->state
.attachments
[att
].color_rt_state
;
1183 surface_state
= cmd_buffer
->state
.null_surface_state
;
1186 bt_map
[bias
+ s
] = surface_state
.offset
+ state_offset
;
1190 struct anv_descriptor_set
*set
=
1191 cmd_buffer
->state
.descriptors
[binding
->set
];
1192 uint32_t offset
= set
->layout
->binding
[binding
->binding
].descriptor_index
;
1193 struct anv_descriptor
*desc
= &set
->descriptors
[offset
+ binding
->index
];
1195 switch (desc
->type
) {
1196 case VK_DESCRIPTOR_TYPE_SAMPLER
:
1197 /* Nothing for us to do here */
1200 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
1201 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
:
1202 surface_state
= desc
->aux_usage
== ISL_AUX_USAGE_NONE
?
1203 desc
->image_view
->no_aux_sampler_surface_state
:
1204 desc
->image_view
->sampler_surface_state
;
1205 assert(surface_state
.alloc_size
);
1206 add_image_relocs(cmd_buffer
, desc
->image_view
->image
,
1207 desc
->image_view
->aspect_mask
,
1208 desc
->aux_usage
, surface_state
);
1210 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT
:
1211 assert(stage
== MESA_SHADER_FRAGMENT
);
1212 if (desc
->image_view
->aspect_mask
!= VK_IMAGE_ASPECT_COLOR_BIT
) {
1213 /* For depth and stencil input attachments, we treat it like any
1214 * old texture that a user may have bound.
1216 surface_state
= desc
->aux_usage
== ISL_AUX_USAGE_NONE
?
1217 desc
->image_view
->no_aux_sampler_surface_state
:
1218 desc
->image_view
->sampler_surface_state
;
1219 assert(surface_state
.alloc_size
);
1220 add_image_relocs(cmd_buffer
, desc
->image_view
->image
,
1221 desc
->image_view
->aspect_mask
,
1222 desc
->aux_usage
, surface_state
);
1224 /* For color input attachments, we create the surface state at
1225 * vkBeginRenderPass time so that we can include aux and clear
1226 * color information.
1228 assert(binding
->input_attachment_index
< subpass
->input_count
);
1229 const unsigned subpass_att
= binding
->input_attachment_index
;
1230 const unsigned att
= subpass
->input_attachments
[subpass_att
].attachment
;
1231 surface_state
= cmd_buffer
->state
.attachments
[att
].input_att_state
;
1235 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
: {
1236 surface_state
= (binding
->write_only
)
1237 ? desc
->image_view
->writeonly_storage_surface_state
1238 : desc
->image_view
->storage_surface_state
;
1239 assert(surface_state
.alloc_size
);
1240 add_image_relocs(cmd_buffer
, desc
->image_view
->image
,
1241 desc
->image_view
->aspect_mask
,
1242 desc
->image_view
->image
->aux_usage
, surface_state
);
1244 struct brw_image_param
*image_param
=
1245 &cmd_buffer
->state
.push_constants
[stage
]->images
[image
++];
1247 *image_param
= desc
->image_view
->storage_image_param
;
1248 image_param
->surface_idx
= bias
+ s
;
1252 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
1253 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
1254 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
:
1255 surface_state
= desc
->buffer_view
->surface_state
;
1256 assert(surface_state
.alloc_size
);
1257 add_surface_state_reloc(cmd_buffer
, surface_state
,
1258 desc
->buffer_view
->bo
,
1259 desc
->buffer_view
->offset
);
1262 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
1263 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
: {
1264 uint32_t dynamic_offset_idx
=
1265 pipeline
->layout
->set
[binding
->set
].dynamic_offset_start
+
1266 set
->layout
->binding
[binding
->binding
].dynamic_offset_index
+
1269 /* Compute the offset within the buffer */
1270 uint64_t offset
= desc
->offset
+
1271 cmd_buffer
->state
.dynamic_offsets
[dynamic_offset_idx
];
1272 /* Clamp to the buffer size */
1273 offset
= MIN2(offset
, desc
->buffer
->size
);
1274 /* Clamp the range to the buffer size */
1275 uint32_t range
= MIN2(desc
->range
, desc
->buffer
->size
- offset
);
1278 anv_state_stream_alloc(&cmd_buffer
->surface_state_stream
, 64, 64);
1279 enum isl_format format
=
1280 anv_isl_format_for_descriptor_type(desc
->type
);
1282 anv_fill_buffer_surface_state(cmd_buffer
->device
, surface_state
,
1283 format
, offset
, range
, 1);
1284 add_surface_state_reloc(cmd_buffer
, surface_state
,
1286 desc
->buffer
->offset
+ offset
);
1290 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER
:
1291 surface_state
= (binding
->write_only
)
1292 ? desc
->buffer_view
->writeonly_storage_surface_state
1293 : desc
->buffer_view
->storage_surface_state
;
1294 assert(surface_state
.alloc_size
);
1295 add_surface_state_reloc(cmd_buffer
, surface_state
,
1296 desc
->buffer_view
->bo
,
1297 desc
->buffer_view
->offset
);
1299 struct brw_image_param
*image_param
=
1300 &cmd_buffer
->state
.push_constants
[stage
]->images
[image
++];
1302 *image_param
= desc
->buffer_view
->storage_image_param
;
1303 image_param
->surface_idx
= bias
+ s
;
1307 assert(!"Invalid descriptor type");
1311 bt_map
[bias
+ s
] = surface_state
.offset
+ state_offset
;
1313 assert(image
== map
->image_count
);
1316 anv_state_flush(cmd_buffer
->device
, *bt_state
);
1322 emit_samplers(struct anv_cmd_buffer
*cmd_buffer
,
1323 gl_shader_stage stage
,
1324 struct anv_state
*state
)
1326 struct anv_pipeline
*pipeline
;
1328 if (stage
== MESA_SHADER_COMPUTE
)
1329 pipeline
= cmd_buffer
->state
.compute_pipeline
;
1331 pipeline
= cmd_buffer
->state
.pipeline
;
1333 if (!anv_pipeline_has_stage(pipeline
, stage
)) {
1334 *state
= (struct anv_state
) { 0, };
1338 struct anv_pipeline_bind_map
*map
= &pipeline
->shaders
[stage
]->bind_map
;
1339 if (map
->sampler_count
== 0) {
1340 *state
= (struct anv_state
) { 0, };
1344 uint32_t size
= map
->sampler_count
* 16;
1345 *state
= anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
, size
, 32);
1347 if (state
->map
== NULL
)
1348 return VK_ERROR_OUT_OF_DEVICE_MEMORY
;
1350 for (uint32_t s
= 0; s
< map
->sampler_count
; s
++) {
1351 struct anv_pipeline_binding
*binding
= &map
->sampler_to_descriptor
[s
];
1352 struct anv_descriptor_set
*set
=
1353 cmd_buffer
->state
.descriptors
[binding
->set
];
1354 uint32_t offset
= set
->layout
->binding
[binding
->binding
].descriptor_index
;
1355 struct anv_descriptor
*desc
= &set
->descriptors
[offset
+ binding
->index
];
1357 if (desc
->type
!= VK_DESCRIPTOR_TYPE_SAMPLER
&&
1358 desc
->type
!= VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
)
1361 struct anv_sampler
*sampler
= desc
->sampler
;
1363 /* This can happen if we have an unfilled slot since TYPE_SAMPLER
1364 * happens to be zero.
1366 if (sampler
== NULL
)
1369 memcpy(state
->map
+ (s
* 16),
1370 sampler
->state
, sizeof(sampler
->state
));
1373 anv_state_flush(cmd_buffer
->device
, *state
);
1379 flush_descriptor_sets(struct anv_cmd_buffer
*cmd_buffer
)
1381 VkShaderStageFlags dirty
= cmd_buffer
->state
.descriptors_dirty
&
1382 cmd_buffer
->state
.pipeline
->active_stages
;
1384 VkResult result
= VK_SUCCESS
;
1385 anv_foreach_stage(s
, dirty
) {
1386 result
= emit_samplers(cmd_buffer
, s
, &cmd_buffer
->state
.samplers
[s
]);
1387 if (result
!= VK_SUCCESS
)
1389 result
= emit_binding_table(cmd_buffer
, s
,
1390 &cmd_buffer
->state
.binding_tables
[s
]);
1391 if (result
!= VK_SUCCESS
)
1395 if (result
!= VK_SUCCESS
) {
1396 assert(result
== VK_ERROR_OUT_OF_DEVICE_MEMORY
);
1398 result
= anv_cmd_buffer_new_binding_table_block(cmd_buffer
);
1399 if (result
!= VK_SUCCESS
)
1402 /* Re-emit state base addresses so we get the new surface state base
1403 * address before we start emitting binding tables etc.
1405 genX(cmd_buffer_emit_state_base_address
)(cmd_buffer
);
1407 /* Re-emit all active binding tables */
1408 dirty
|= cmd_buffer
->state
.pipeline
->active_stages
;
1409 anv_foreach_stage(s
, dirty
) {
1410 result
= emit_samplers(cmd_buffer
, s
, &cmd_buffer
->state
.samplers
[s
]);
1411 if (result
!= VK_SUCCESS
) {
1412 anv_batch_set_error(&cmd_buffer
->batch
, result
);
1415 result
= emit_binding_table(cmd_buffer
, s
,
1416 &cmd_buffer
->state
.binding_tables
[s
]);
1417 if (result
!= VK_SUCCESS
) {
1418 anv_batch_set_error(&cmd_buffer
->batch
, result
);
1424 cmd_buffer
->state
.descriptors_dirty
&= ~dirty
;
1430 cmd_buffer_emit_descriptor_pointers(struct anv_cmd_buffer
*cmd_buffer
,
1433 static const uint32_t sampler_state_opcodes
[] = {
1434 [MESA_SHADER_VERTEX
] = 43,
1435 [MESA_SHADER_TESS_CTRL
] = 44, /* HS */
1436 [MESA_SHADER_TESS_EVAL
] = 45, /* DS */
1437 [MESA_SHADER_GEOMETRY
] = 46,
1438 [MESA_SHADER_FRAGMENT
] = 47,
1439 [MESA_SHADER_COMPUTE
] = 0,
1442 static const uint32_t binding_table_opcodes
[] = {
1443 [MESA_SHADER_VERTEX
] = 38,
1444 [MESA_SHADER_TESS_CTRL
] = 39,
1445 [MESA_SHADER_TESS_EVAL
] = 40,
1446 [MESA_SHADER_GEOMETRY
] = 41,
1447 [MESA_SHADER_FRAGMENT
] = 42,
1448 [MESA_SHADER_COMPUTE
] = 0,
1451 anv_foreach_stage(s
, stages
) {
1452 if (cmd_buffer
->state
.samplers
[s
].alloc_size
> 0) {
1453 anv_batch_emit(&cmd_buffer
->batch
,
1454 GENX(3DSTATE_SAMPLER_STATE_POINTERS_VS
), ssp
) {
1455 ssp
._3DCommandSubOpcode
= sampler_state_opcodes
[s
];
1456 ssp
.PointertoVSSamplerState
= cmd_buffer
->state
.samplers
[s
].offset
;
1460 /* Always emit binding table pointers if we're asked to, since on SKL
1461 * this is what flushes push constants. */
1462 anv_batch_emit(&cmd_buffer
->batch
,
1463 GENX(3DSTATE_BINDING_TABLE_POINTERS_VS
), btp
) {
1464 btp
._3DCommandSubOpcode
= binding_table_opcodes
[s
];
1465 btp
.PointertoVSBindingTable
= cmd_buffer
->state
.binding_tables
[s
].offset
;
1471 cmd_buffer_flush_push_constants(struct anv_cmd_buffer
*cmd_buffer
)
1473 static const uint32_t push_constant_opcodes
[] = {
1474 [MESA_SHADER_VERTEX
] = 21,
1475 [MESA_SHADER_TESS_CTRL
] = 25, /* HS */
1476 [MESA_SHADER_TESS_EVAL
] = 26, /* DS */
1477 [MESA_SHADER_GEOMETRY
] = 22,
1478 [MESA_SHADER_FRAGMENT
] = 23,
1479 [MESA_SHADER_COMPUTE
] = 0,
1482 VkShaderStageFlags flushed
= 0;
1484 anv_foreach_stage(stage
, cmd_buffer
->state
.push_constants_dirty
) {
1485 if (stage
== MESA_SHADER_COMPUTE
)
1488 struct anv_state state
= anv_cmd_buffer_push_constants(cmd_buffer
, stage
);
1490 if (state
.offset
== 0) {
1491 anv_batch_emit(&cmd_buffer
->batch
, GENX(3DSTATE_CONSTANT_VS
), c
)
1492 c
._3DCommandSubOpcode
= push_constant_opcodes
[stage
];
1494 anv_batch_emit(&cmd_buffer
->batch
, GENX(3DSTATE_CONSTANT_VS
), c
) {
1495 c
._3DCommandSubOpcode
= push_constant_opcodes
[stage
],
1496 c
.ConstantBody
= (struct GENX(3DSTATE_CONSTANT_BODY
)) {
1498 .Buffer
[2] = { &cmd_buffer
->device
->dynamic_state_pool
.block_pool
.bo
, state
.offset
},
1499 .ReadLength
[2] = DIV_ROUND_UP(state
.alloc_size
, 32),
1501 .Buffer
[0] = { .offset
= state
.offset
},
1502 .ReadLength
[0] = DIV_ROUND_UP(state
.alloc_size
, 32),
1508 flushed
|= mesa_to_vk_shader_stage(stage
);
1511 cmd_buffer
->state
.push_constants_dirty
&= ~VK_SHADER_STAGE_ALL_GRAPHICS
;
1517 genX(cmd_buffer_flush_state
)(struct anv_cmd_buffer
*cmd_buffer
)
1519 struct anv_pipeline
*pipeline
= cmd_buffer
->state
.pipeline
;
1522 uint32_t vb_emit
= cmd_buffer
->state
.vb_dirty
& pipeline
->vb_used
;
1524 assert((pipeline
->active_stages
& VK_SHADER_STAGE_COMPUTE_BIT
) == 0);
1526 genX(cmd_buffer_config_l3
)(cmd_buffer
, pipeline
->urb
.l3_config
);
1528 genX(flush_pipeline_select_3d
)(cmd_buffer
);
1531 const uint32_t num_buffers
= __builtin_popcount(vb_emit
);
1532 const uint32_t num_dwords
= 1 + num_buffers
* 4;
1534 p
= anv_batch_emitn(&cmd_buffer
->batch
, num_dwords
,
1535 GENX(3DSTATE_VERTEX_BUFFERS
));
1537 for_each_bit(vb
, vb_emit
) {
1538 struct anv_buffer
*buffer
= cmd_buffer
->state
.vertex_bindings
[vb
].buffer
;
1539 uint32_t offset
= cmd_buffer
->state
.vertex_bindings
[vb
].offset
;
1541 struct GENX(VERTEX_BUFFER_STATE
) state
= {
1542 .VertexBufferIndex
= vb
,
1545 .MemoryObjectControlState
= GENX(MOCS
),
1547 .BufferAccessType
= pipeline
->instancing_enable
[vb
] ? INSTANCEDATA
: VERTEXDATA
,
1548 /* Our implementation of VK_KHR_multiview uses instancing to draw
1549 * the different views. If the client asks for instancing, we
1550 * need to use the Instance Data Step Rate to ensure that we
1551 * repeat the client's per-instance data once for each view.
1553 .InstanceDataStepRate
= anv_subpass_view_count(pipeline
->subpass
),
1554 .VertexBufferMemoryObjectControlState
= GENX(MOCS
),
1557 .AddressModifyEnable
= true,
1558 .BufferPitch
= pipeline
->binding_stride
[vb
],
1559 .BufferStartingAddress
= { buffer
->bo
, buffer
->offset
+ offset
},
1562 .BufferSize
= buffer
->size
- offset
1564 .EndAddress
= { buffer
->bo
, buffer
->offset
+ buffer
->size
- 1},
1568 GENX(VERTEX_BUFFER_STATE_pack
)(&cmd_buffer
->batch
, &p
[1 + i
* 4], &state
);
1573 cmd_buffer
->state
.vb_dirty
&= ~vb_emit
;
1575 if (cmd_buffer
->state
.dirty
& ANV_CMD_DIRTY_PIPELINE
) {
1576 anv_batch_emit_batch(&cmd_buffer
->batch
, &pipeline
->batch
);
1578 /* The exact descriptor layout is pulled from the pipeline, so we need
1579 * to re-emit binding tables on every pipeline change.
1581 cmd_buffer
->state
.descriptors_dirty
|=
1582 cmd_buffer
->state
.pipeline
->active_stages
;
1584 /* If the pipeline changed, we may need to re-allocate push constant
1587 cmd_buffer_alloc_push_constants(cmd_buffer
);
1591 if (cmd_buffer
->state
.descriptors_dirty
& VK_SHADER_STAGE_VERTEX_BIT
||
1592 cmd_buffer
->state
.push_constants_dirty
& VK_SHADER_STAGE_VERTEX_BIT
) {
1593 /* From the IVB PRM Vol. 2, Part 1, Section 3.2.1:
1595 * "A PIPE_CONTROL with Post-Sync Operation set to 1h and a depth
1596 * stall needs to be sent just prior to any 3DSTATE_VS,
1597 * 3DSTATE_URB_VS, 3DSTATE_CONSTANT_VS,
1598 * 3DSTATE_BINDING_TABLE_POINTER_VS,
1599 * 3DSTATE_SAMPLER_STATE_POINTER_VS command. Only one
1600 * PIPE_CONTROL needs to be sent before any combination of VS
1601 * associated 3DSTATE."
1603 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
), pc
) {
1604 pc
.DepthStallEnable
= true;
1605 pc
.PostSyncOperation
= WriteImmediateData
;
1607 (struct anv_address
) { &cmd_buffer
->device
->workaround_bo
, 0 };
1612 /* Render targets live in the same binding table as fragment descriptors */
1613 if (cmd_buffer
->state
.dirty
& ANV_CMD_DIRTY_RENDER_TARGETS
)
1614 cmd_buffer
->state
.descriptors_dirty
|= VK_SHADER_STAGE_FRAGMENT_BIT
;
1616 /* We emit the binding tables and sampler tables first, then emit push
1617 * constants and then finally emit binding table and sampler table
1618 * pointers. It has to happen in this order, since emitting the binding
1619 * tables may change the push constants (in case of storage images). After
1620 * emitting push constants, on SKL+ we have to emit the corresponding
1621 * 3DSTATE_BINDING_TABLE_POINTER_* for the push constants to take effect.
1624 if (cmd_buffer
->state
.descriptors_dirty
)
1625 dirty
= flush_descriptor_sets(cmd_buffer
);
1627 if (cmd_buffer
->state
.push_constants_dirty
) {
1629 /* On Sky Lake and later, the binding table pointers commands are
1630 * what actually flush the changes to push constant state so we need
1631 * to dirty them so they get re-emitted below.
1633 dirty
|= cmd_buffer_flush_push_constants(cmd_buffer
);
1635 cmd_buffer_flush_push_constants(cmd_buffer
);
1640 cmd_buffer_emit_descriptor_pointers(cmd_buffer
, dirty
);
1642 if (cmd_buffer
->state
.dirty
& ANV_CMD_DIRTY_DYNAMIC_VIEWPORT
)
1643 gen8_cmd_buffer_emit_viewport(cmd_buffer
);
1645 if (cmd_buffer
->state
.dirty
& (ANV_CMD_DIRTY_DYNAMIC_VIEWPORT
|
1646 ANV_CMD_DIRTY_PIPELINE
)) {
1647 gen8_cmd_buffer_emit_depth_viewport(cmd_buffer
,
1648 pipeline
->depth_clamp_enable
);
1651 if (cmd_buffer
->state
.dirty
& ANV_CMD_DIRTY_DYNAMIC_SCISSOR
)
1652 gen7_cmd_buffer_emit_scissor(cmd_buffer
);
1654 genX(cmd_buffer_flush_dynamic_state
)(cmd_buffer
);
1656 genX(cmd_buffer_apply_pipe_flushes
)(cmd_buffer
);
1660 emit_vertex_bo(struct anv_cmd_buffer
*cmd_buffer
,
1661 struct anv_bo
*bo
, uint32_t offset
,
1662 uint32_t size
, uint32_t index
)
1664 uint32_t *p
= anv_batch_emitn(&cmd_buffer
->batch
, 5,
1665 GENX(3DSTATE_VERTEX_BUFFERS
));
1667 GENX(VERTEX_BUFFER_STATE_pack
)(&cmd_buffer
->batch
, p
+ 1,
1668 &(struct GENX(VERTEX_BUFFER_STATE
)) {
1669 .VertexBufferIndex
= index
,
1670 .AddressModifyEnable
= true,
1673 .MemoryObjectControlState
= GENX(MOCS
),
1674 .BufferStartingAddress
= { bo
, offset
},
1677 .VertexBufferMemoryObjectControlState
= GENX(MOCS
),
1678 .BufferStartingAddress
= { bo
, offset
},
1679 .EndAddress
= { bo
, offset
+ size
},
1685 emit_base_vertex_instance_bo(struct anv_cmd_buffer
*cmd_buffer
,
1686 struct anv_bo
*bo
, uint32_t offset
)
1688 emit_vertex_bo(cmd_buffer
, bo
, offset
, 8, ANV_SVGS_VB_INDEX
);
1692 emit_base_vertex_instance(struct anv_cmd_buffer
*cmd_buffer
,
1693 uint32_t base_vertex
, uint32_t base_instance
)
1695 struct anv_state id_state
=
1696 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
, 8, 4);
1698 ((uint32_t *)id_state
.map
)[0] = base_vertex
;
1699 ((uint32_t *)id_state
.map
)[1] = base_instance
;
1701 anv_state_flush(cmd_buffer
->device
, id_state
);
1703 emit_base_vertex_instance_bo(cmd_buffer
,
1704 &cmd_buffer
->device
->dynamic_state_pool
.block_pool
.bo
, id_state
.offset
);
1708 emit_draw_index(struct anv_cmd_buffer
*cmd_buffer
, uint32_t draw_index
)
1710 struct anv_state state
=
1711 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
, 4, 4);
1713 ((uint32_t *)state
.map
)[0] = draw_index
;
1715 anv_state_flush(cmd_buffer
->device
, state
);
1717 emit_vertex_bo(cmd_buffer
,
1718 &cmd_buffer
->device
->dynamic_state_pool
.block_pool
.bo
,
1719 state
.offset
, 4, ANV_DRAWID_VB_INDEX
);
1723 VkCommandBuffer commandBuffer
,
1724 uint32_t vertexCount
,
1725 uint32_t instanceCount
,
1726 uint32_t firstVertex
,
1727 uint32_t firstInstance
)
1729 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
1730 struct anv_pipeline
*pipeline
= cmd_buffer
->state
.pipeline
;
1731 const struct brw_vs_prog_data
*vs_prog_data
= get_vs_prog_data(pipeline
);
1733 if (anv_batch_has_error(&cmd_buffer
->batch
))
1736 genX(cmd_buffer_flush_state
)(cmd_buffer
);
1738 if (vs_prog_data
->uses_basevertex
|| vs_prog_data
->uses_baseinstance
)
1739 emit_base_vertex_instance(cmd_buffer
, firstVertex
, firstInstance
);
1740 if (vs_prog_data
->uses_drawid
)
1741 emit_draw_index(cmd_buffer
, 0);
1743 /* Our implementation of VK_KHR_multiview uses instancing to draw the
1744 * different views. We need to multiply instanceCount by the view count.
1746 instanceCount
*= anv_subpass_view_count(cmd_buffer
->state
.subpass
);
1748 anv_batch_emit(&cmd_buffer
->batch
, GENX(3DPRIMITIVE
), prim
) {
1749 prim
.VertexAccessType
= SEQUENTIAL
;
1750 prim
.PrimitiveTopologyType
= pipeline
->topology
;
1751 prim
.VertexCountPerInstance
= vertexCount
;
1752 prim
.StartVertexLocation
= firstVertex
;
1753 prim
.InstanceCount
= instanceCount
;
1754 prim
.StartInstanceLocation
= firstInstance
;
1755 prim
.BaseVertexLocation
= 0;
1759 void genX(CmdDrawIndexed
)(
1760 VkCommandBuffer commandBuffer
,
1761 uint32_t indexCount
,
1762 uint32_t instanceCount
,
1763 uint32_t firstIndex
,
1764 int32_t vertexOffset
,
1765 uint32_t firstInstance
)
1767 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
1768 struct anv_pipeline
*pipeline
= cmd_buffer
->state
.pipeline
;
1769 const struct brw_vs_prog_data
*vs_prog_data
= get_vs_prog_data(pipeline
);
1771 if (anv_batch_has_error(&cmd_buffer
->batch
))
1774 genX(cmd_buffer_flush_state
)(cmd_buffer
);
1776 if (vs_prog_data
->uses_basevertex
|| vs_prog_data
->uses_baseinstance
)
1777 emit_base_vertex_instance(cmd_buffer
, vertexOffset
, firstInstance
);
1778 if (vs_prog_data
->uses_drawid
)
1779 emit_draw_index(cmd_buffer
, 0);
1781 /* Our implementation of VK_KHR_multiview uses instancing to draw the
1782 * different views. We need to multiply instanceCount by the view count.
1784 instanceCount
*= anv_subpass_view_count(cmd_buffer
->state
.subpass
);
1786 anv_batch_emit(&cmd_buffer
->batch
, GENX(3DPRIMITIVE
), prim
) {
1787 prim
.VertexAccessType
= RANDOM
;
1788 prim
.PrimitiveTopologyType
= pipeline
->topology
;
1789 prim
.VertexCountPerInstance
= indexCount
;
1790 prim
.StartVertexLocation
= firstIndex
;
1791 prim
.InstanceCount
= instanceCount
;
1792 prim
.StartInstanceLocation
= firstInstance
;
1793 prim
.BaseVertexLocation
= vertexOffset
;
1797 /* Auto-Draw / Indirect Registers */
1798 #define GEN7_3DPRIM_END_OFFSET 0x2420
1799 #define GEN7_3DPRIM_START_VERTEX 0x2430
1800 #define GEN7_3DPRIM_VERTEX_COUNT 0x2434
1801 #define GEN7_3DPRIM_INSTANCE_COUNT 0x2438
1802 #define GEN7_3DPRIM_START_INSTANCE 0x243C
1803 #define GEN7_3DPRIM_BASE_VERTEX 0x2440
1805 /* MI_MATH only exists on Haswell+ */
1806 #if GEN_IS_HASWELL || GEN_GEN >= 8
1809 mi_alu(uint32_t opcode
, uint32_t op1
, uint32_t op2
)
1811 struct GENX(MI_MATH_ALU_INSTRUCTION
) instr
= {
1812 .ALUOpcode
= opcode
,
1818 GENX(MI_MATH_ALU_INSTRUCTION_pack
)(NULL
, &dw
, &instr
);
1823 #define CS_GPR(n) (0x2600 + (n) * 8)
1825 /* Emit dwords to multiply GPR0 by N */
1827 build_alu_multiply_gpr0(uint32_t *dw
, unsigned *dw_count
, uint32_t N
)
1829 VK_OUTARRAY_MAKE(out
, dw
, dw_count
);
1831 #define append_alu(opcode, operand1, operand2) \
1832 vk_outarray_append(&out, alu_dw) *alu_dw = mi_alu(opcode, operand1, operand2)
1835 unsigned top_bit
= 31 - __builtin_clz(N
);
1836 for (int i
= top_bit
- 1; i
>= 0; i
--) {
1837 /* We get our initial data in GPR0 and we write the final data out to
1838 * GPR0 but we use GPR1 as our scratch register.
1840 unsigned src_reg
= i
== top_bit
- 1 ? MI_ALU_REG0
: MI_ALU_REG1
;
1841 unsigned dst_reg
= i
== 0 ? MI_ALU_REG0
: MI_ALU_REG1
;
1843 /* Shift the current value left by 1 */
1844 append_alu(MI_ALU_LOAD
, MI_ALU_SRCA
, src_reg
);
1845 append_alu(MI_ALU_LOAD
, MI_ALU_SRCB
, src_reg
);
1846 append_alu(MI_ALU_ADD
, 0, 0);
1849 /* Store ACCU to R1 and add R0 to R1 */
1850 append_alu(MI_ALU_STORE
, MI_ALU_REG1
, MI_ALU_ACCU
);
1851 append_alu(MI_ALU_LOAD
, MI_ALU_SRCA
, MI_ALU_REG0
);
1852 append_alu(MI_ALU_LOAD
, MI_ALU_SRCB
, MI_ALU_REG1
);
1853 append_alu(MI_ALU_ADD
, 0, 0);
1856 append_alu(MI_ALU_STORE
, dst_reg
, MI_ALU_ACCU
);
1863 emit_mul_gpr0(struct anv_batch
*batch
, uint32_t N
)
1865 uint32_t num_dwords
;
1866 build_alu_multiply_gpr0(NULL
, &num_dwords
, N
);
1868 uint32_t *dw
= anv_batch_emitn(batch
, 1 + num_dwords
, GENX(MI_MATH
));
1869 build_alu_multiply_gpr0(dw
+ 1, &num_dwords
, N
);
1872 #endif /* GEN_IS_HASWELL || GEN_GEN >= 8 */
1875 load_indirect_parameters(struct anv_cmd_buffer
*cmd_buffer
,
1876 struct anv_buffer
*buffer
, uint64_t offset
,
1879 struct anv_batch
*batch
= &cmd_buffer
->batch
;
1880 struct anv_bo
*bo
= buffer
->bo
;
1881 uint32_t bo_offset
= buffer
->offset
+ offset
;
1883 emit_lrm(batch
, GEN7_3DPRIM_VERTEX_COUNT
, bo
, bo_offset
);
1885 unsigned view_count
= anv_subpass_view_count(cmd_buffer
->state
.subpass
);
1886 if (view_count
> 1) {
1887 #if GEN_IS_HASWELL || GEN_GEN >= 8
1888 emit_lrm(batch
, CS_GPR(0), bo
, bo_offset
+ 4);
1889 emit_mul_gpr0(batch
, view_count
);
1890 emit_lrr(batch
, GEN7_3DPRIM_INSTANCE_COUNT
, CS_GPR(0));
1892 anv_finishme("Multiview + indirect draw requires MI_MATH\n"
1893 "MI_MATH is not supported on Ivy Bridge");
1894 emit_lrm(batch
, GEN7_3DPRIM_INSTANCE_COUNT
, bo
, bo_offset
+ 4);
1897 emit_lrm(batch
, GEN7_3DPRIM_INSTANCE_COUNT
, bo
, bo_offset
+ 4);
1900 emit_lrm(batch
, GEN7_3DPRIM_START_VERTEX
, bo
, bo_offset
+ 8);
1903 emit_lrm(batch
, GEN7_3DPRIM_BASE_VERTEX
, bo
, bo_offset
+ 12);
1904 emit_lrm(batch
, GEN7_3DPRIM_START_INSTANCE
, bo
, bo_offset
+ 16);
1906 emit_lrm(batch
, GEN7_3DPRIM_START_INSTANCE
, bo
, bo_offset
+ 12);
1907 emit_lri(batch
, GEN7_3DPRIM_BASE_VERTEX
, 0);
1911 void genX(CmdDrawIndirect
)(
1912 VkCommandBuffer commandBuffer
,
1914 VkDeviceSize offset
,
1918 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
1919 ANV_FROM_HANDLE(anv_buffer
, buffer
, _buffer
);
1920 struct anv_pipeline
*pipeline
= cmd_buffer
->state
.pipeline
;
1921 const struct brw_vs_prog_data
*vs_prog_data
= get_vs_prog_data(pipeline
);
1923 if (anv_batch_has_error(&cmd_buffer
->batch
))
1926 genX(cmd_buffer_flush_state
)(cmd_buffer
);
1928 for (uint32_t i
= 0; i
< drawCount
; i
++) {
1929 struct anv_bo
*bo
= buffer
->bo
;
1930 uint32_t bo_offset
= buffer
->offset
+ offset
;
1932 if (vs_prog_data
->uses_basevertex
|| vs_prog_data
->uses_baseinstance
)
1933 emit_base_vertex_instance_bo(cmd_buffer
, bo
, bo_offset
+ 8);
1934 if (vs_prog_data
->uses_drawid
)
1935 emit_draw_index(cmd_buffer
, i
);
1937 load_indirect_parameters(cmd_buffer
, buffer
, offset
, false);
1939 anv_batch_emit(&cmd_buffer
->batch
, GENX(3DPRIMITIVE
), prim
) {
1940 prim
.IndirectParameterEnable
= true;
1941 prim
.VertexAccessType
= SEQUENTIAL
;
1942 prim
.PrimitiveTopologyType
= pipeline
->topology
;
1949 void genX(CmdDrawIndexedIndirect
)(
1950 VkCommandBuffer commandBuffer
,
1952 VkDeviceSize offset
,
1956 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
1957 ANV_FROM_HANDLE(anv_buffer
, buffer
, _buffer
);
1958 struct anv_pipeline
*pipeline
= cmd_buffer
->state
.pipeline
;
1959 const struct brw_vs_prog_data
*vs_prog_data
= get_vs_prog_data(pipeline
);
1961 if (anv_batch_has_error(&cmd_buffer
->batch
))
1964 genX(cmd_buffer_flush_state
)(cmd_buffer
);
1966 for (uint32_t i
= 0; i
< drawCount
; i
++) {
1967 struct anv_bo
*bo
= buffer
->bo
;
1968 uint32_t bo_offset
= buffer
->offset
+ offset
;
1970 /* TODO: We need to stomp base vertex to 0 somehow */
1971 if (vs_prog_data
->uses_basevertex
|| vs_prog_data
->uses_baseinstance
)
1972 emit_base_vertex_instance_bo(cmd_buffer
, bo
, bo_offset
+ 12);
1973 if (vs_prog_data
->uses_drawid
)
1974 emit_draw_index(cmd_buffer
, i
);
1976 load_indirect_parameters(cmd_buffer
, buffer
, offset
, true);
1978 anv_batch_emit(&cmd_buffer
->batch
, GENX(3DPRIMITIVE
), prim
) {
1979 prim
.IndirectParameterEnable
= true;
1980 prim
.VertexAccessType
= RANDOM
;
1981 prim
.PrimitiveTopologyType
= pipeline
->topology
;
1989 flush_compute_descriptor_set(struct anv_cmd_buffer
*cmd_buffer
)
1991 struct anv_pipeline
*pipeline
= cmd_buffer
->state
.compute_pipeline
;
1992 struct anv_state surfaces
= { 0, }, samplers
= { 0, };
1995 result
= emit_binding_table(cmd_buffer
, MESA_SHADER_COMPUTE
, &surfaces
);
1996 if (result
!= VK_SUCCESS
) {
1997 assert(result
== VK_ERROR_OUT_OF_DEVICE_MEMORY
);
1999 result
= anv_cmd_buffer_new_binding_table_block(cmd_buffer
);
2000 if (result
!= VK_SUCCESS
)
2003 /* Re-emit state base addresses so we get the new surface state base
2004 * address before we start emitting binding tables etc.
2006 genX(cmd_buffer_emit_state_base_address
)(cmd_buffer
);
2008 result
= emit_binding_table(cmd_buffer
, MESA_SHADER_COMPUTE
, &surfaces
);
2009 if (result
!= VK_SUCCESS
) {
2010 anv_batch_set_error(&cmd_buffer
->batch
, result
);
2015 result
= emit_samplers(cmd_buffer
, MESA_SHADER_COMPUTE
, &samplers
);
2016 if (result
!= VK_SUCCESS
) {
2017 anv_batch_set_error(&cmd_buffer
->batch
, result
);
2021 uint32_t iface_desc_data_dw
[GENX(INTERFACE_DESCRIPTOR_DATA_length
)];
2022 struct GENX(INTERFACE_DESCRIPTOR_DATA
) desc
= {
2023 .BindingTablePointer
= surfaces
.offset
,
2024 .SamplerStatePointer
= samplers
.offset
,
2026 GENX(INTERFACE_DESCRIPTOR_DATA_pack
)(NULL
, iface_desc_data_dw
, &desc
);
2028 struct anv_state state
=
2029 anv_cmd_buffer_merge_dynamic(cmd_buffer
, iface_desc_data_dw
,
2030 pipeline
->interface_descriptor_data
,
2031 GENX(INTERFACE_DESCRIPTOR_DATA_length
),
2034 uint32_t size
= GENX(INTERFACE_DESCRIPTOR_DATA_length
) * sizeof(uint32_t);
2035 anv_batch_emit(&cmd_buffer
->batch
,
2036 GENX(MEDIA_INTERFACE_DESCRIPTOR_LOAD
), mid
) {
2037 mid
.InterfaceDescriptorTotalLength
= size
;
2038 mid
.InterfaceDescriptorDataStartAddress
= state
.offset
;
2045 genX(cmd_buffer_flush_compute_state
)(struct anv_cmd_buffer
*cmd_buffer
)
2047 struct anv_pipeline
*pipeline
= cmd_buffer
->state
.compute_pipeline
;
2048 MAYBE_UNUSED VkResult result
;
2050 assert(pipeline
->active_stages
== VK_SHADER_STAGE_COMPUTE_BIT
);
2052 genX(cmd_buffer_config_l3
)(cmd_buffer
, pipeline
->urb
.l3_config
);
2054 genX(flush_pipeline_select_gpgpu
)(cmd_buffer
);
2056 if (cmd_buffer
->state
.compute_dirty
& ANV_CMD_DIRTY_PIPELINE
) {
2057 /* From the Sky Lake PRM Vol 2a, MEDIA_VFE_STATE:
2059 * "A stalling PIPE_CONTROL is required before MEDIA_VFE_STATE unless
2060 * the only bits that are changed are scoreboard related: Scoreboard
2061 * Enable, Scoreboard Type, Scoreboard Mask, Scoreboard * Delta. For
2062 * these scoreboard related states, a MEDIA_STATE_FLUSH is
2065 cmd_buffer
->state
.pending_pipe_bits
|= ANV_PIPE_CS_STALL_BIT
;
2066 genX(cmd_buffer_apply_pipe_flushes
)(cmd_buffer
);
2068 anv_batch_emit_batch(&cmd_buffer
->batch
, &pipeline
->batch
);
2071 if ((cmd_buffer
->state
.descriptors_dirty
& VK_SHADER_STAGE_COMPUTE_BIT
) ||
2072 (cmd_buffer
->state
.compute_dirty
& ANV_CMD_DIRTY_PIPELINE
)) {
2073 /* FIXME: figure out descriptors for gen7 */
2074 result
= flush_compute_descriptor_set(cmd_buffer
);
2075 if (result
!= VK_SUCCESS
)
2078 cmd_buffer
->state
.descriptors_dirty
&= ~VK_SHADER_STAGE_COMPUTE_BIT
;
2081 if (cmd_buffer
->state
.push_constants_dirty
& VK_SHADER_STAGE_COMPUTE_BIT
) {
2082 struct anv_state push_state
=
2083 anv_cmd_buffer_cs_push_constants(cmd_buffer
);
2085 if (push_state
.alloc_size
) {
2086 anv_batch_emit(&cmd_buffer
->batch
, GENX(MEDIA_CURBE_LOAD
), curbe
) {
2087 curbe
.CURBETotalDataLength
= push_state
.alloc_size
;
2088 curbe
.CURBEDataStartAddress
= push_state
.offset
;
2093 cmd_buffer
->state
.compute_dirty
= 0;
2095 genX(cmd_buffer_apply_pipe_flushes
)(cmd_buffer
);
2101 verify_cmd_parser(const struct anv_device
*device
,
2102 int required_version
,
2103 const char *function
)
2105 if (device
->instance
->physicalDevice
.cmd_parser_version
< required_version
) {
2106 return vk_errorf(VK_ERROR_FEATURE_NOT_PRESENT
,
2107 "cmd parser version %d is required for %s",
2108 required_version
, function
);
2116 void genX(CmdDispatch
)(
2117 VkCommandBuffer commandBuffer
,
2122 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
2123 struct anv_pipeline
*pipeline
= cmd_buffer
->state
.compute_pipeline
;
2124 const struct brw_cs_prog_data
*prog_data
= get_cs_prog_data(pipeline
);
2126 if (anv_batch_has_error(&cmd_buffer
->batch
))
2129 if (prog_data
->uses_num_work_groups
) {
2130 struct anv_state state
=
2131 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
, 12, 4);
2132 uint32_t *sizes
= state
.map
;
2136 anv_state_flush(cmd_buffer
->device
, state
);
2137 cmd_buffer
->state
.num_workgroups_offset
= state
.offset
;
2138 cmd_buffer
->state
.num_workgroups_bo
=
2139 &cmd_buffer
->device
->dynamic_state_pool
.block_pool
.bo
;
2142 genX(cmd_buffer_flush_compute_state
)(cmd_buffer
);
2144 anv_batch_emit(&cmd_buffer
->batch
, GENX(GPGPU_WALKER
), ggw
) {
2145 ggw
.SIMDSize
= prog_data
->simd_size
/ 16;
2146 ggw
.ThreadDepthCounterMaximum
= 0;
2147 ggw
.ThreadHeightCounterMaximum
= 0;
2148 ggw
.ThreadWidthCounterMaximum
= prog_data
->threads
- 1;
2149 ggw
.ThreadGroupIDXDimension
= x
;
2150 ggw
.ThreadGroupIDYDimension
= y
;
2151 ggw
.ThreadGroupIDZDimension
= z
;
2152 ggw
.RightExecutionMask
= pipeline
->cs_right_mask
;
2153 ggw
.BottomExecutionMask
= 0xffffffff;
2156 anv_batch_emit(&cmd_buffer
->batch
, GENX(MEDIA_STATE_FLUSH
), msf
);
2159 #define GPGPU_DISPATCHDIMX 0x2500
2160 #define GPGPU_DISPATCHDIMY 0x2504
2161 #define GPGPU_DISPATCHDIMZ 0x2508
2163 #define MI_PREDICATE_SRC0 0x2400
2164 #define MI_PREDICATE_SRC1 0x2408
2166 void genX(CmdDispatchIndirect
)(
2167 VkCommandBuffer commandBuffer
,
2169 VkDeviceSize offset
)
2171 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
2172 ANV_FROM_HANDLE(anv_buffer
, buffer
, _buffer
);
2173 struct anv_pipeline
*pipeline
= cmd_buffer
->state
.compute_pipeline
;
2174 const struct brw_cs_prog_data
*prog_data
= get_cs_prog_data(pipeline
);
2175 struct anv_bo
*bo
= buffer
->bo
;
2176 uint32_t bo_offset
= buffer
->offset
+ offset
;
2177 struct anv_batch
*batch
= &cmd_buffer
->batch
;
2180 /* Linux 4.4 added command parser version 5 which allows the GPGPU
2181 * indirect dispatch registers to be written.
2183 if (verify_cmd_parser(cmd_buffer
->device
, 5,
2184 "vkCmdDispatchIndirect") != VK_SUCCESS
)
2188 if (prog_data
->uses_num_work_groups
) {
2189 cmd_buffer
->state
.num_workgroups_offset
= bo_offset
;
2190 cmd_buffer
->state
.num_workgroups_bo
= bo
;
2193 genX(cmd_buffer_flush_compute_state
)(cmd_buffer
);
2195 emit_lrm(batch
, GPGPU_DISPATCHDIMX
, bo
, bo_offset
);
2196 emit_lrm(batch
, GPGPU_DISPATCHDIMY
, bo
, bo_offset
+ 4);
2197 emit_lrm(batch
, GPGPU_DISPATCHDIMZ
, bo
, bo_offset
+ 8);
2200 /* Clear upper 32-bits of SRC0 and all 64-bits of SRC1 */
2201 emit_lri(batch
, MI_PREDICATE_SRC0
+ 4, 0);
2202 emit_lri(batch
, MI_PREDICATE_SRC1
+ 0, 0);
2203 emit_lri(batch
, MI_PREDICATE_SRC1
+ 4, 0);
2205 /* Load compute_dispatch_indirect_x_size into SRC0 */
2206 emit_lrm(batch
, MI_PREDICATE_SRC0
, bo
, bo_offset
+ 0);
2208 /* predicate = (compute_dispatch_indirect_x_size == 0); */
2209 anv_batch_emit(batch
, GENX(MI_PREDICATE
), mip
) {
2210 mip
.LoadOperation
= LOAD_LOAD
;
2211 mip
.CombineOperation
= COMBINE_SET
;
2212 mip
.CompareOperation
= COMPARE_SRCS_EQUAL
;
2215 /* Load compute_dispatch_indirect_y_size into SRC0 */
2216 emit_lrm(batch
, MI_PREDICATE_SRC0
, bo
, bo_offset
+ 4);
2218 /* predicate |= (compute_dispatch_indirect_y_size == 0); */
2219 anv_batch_emit(batch
, GENX(MI_PREDICATE
), mip
) {
2220 mip
.LoadOperation
= LOAD_LOAD
;
2221 mip
.CombineOperation
= COMBINE_OR
;
2222 mip
.CompareOperation
= COMPARE_SRCS_EQUAL
;
2225 /* Load compute_dispatch_indirect_z_size into SRC0 */
2226 emit_lrm(batch
, MI_PREDICATE_SRC0
, bo
, bo_offset
+ 8);
2228 /* predicate |= (compute_dispatch_indirect_z_size == 0); */
2229 anv_batch_emit(batch
, GENX(MI_PREDICATE
), mip
) {
2230 mip
.LoadOperation
= LOAD_LOAD
;
2231 mip
.CombineOperation
= COMBINE_OR
;
2232 mip
.CompareOperation
= COMPARE_SRCS_EQUAL
;
2235 /* predicate = !predicate; */
2236 #define COMPARE_FALSE 1
2237 anv_batch_emit(batch
, GENX(MI_PREDICATE
), mip
) {
2238 mip
.LoadOperation
= LOAD_LOADINV
;
2239 mip
.CombineOperation
= COMBINE_OR
;
2240 mip
.CompareOperation
= COMPARE_FALSE
;
2244 anv_batch_emit(batch
, GENX(GPGPU_WALKER
), ggw
) {
2245 ggw
.IndirectParameterEnable
= true;
2246 ggw
.PredicateEnable
= GEN_GEN
<= 7;
2247 ggw
.SIMDSize
= prog_data
->simd_size
/ 16;
2248 ggw
.ThreadDepthCounterMaximum
= 0;
2249 ggw
.ThreadHeightCounterMaximum
= 0;
2250 ggw
.ThreadWidthCounterMaximum
= prog_data
->threads
- 1;
2251 ggw
.RightExecutionMask
= pipeline
->cs_right_mask
;
2252 ggw
.BottomExecutionMask
= 0xffffffff;
2255 anv_batch_emit(batch
, GENX(MEDIA_STATE_FLUSH
), msf
);
2259 genX(flush_pipeline_select
)(struct anv_cmd_buffer
*cmd_buffer
,
2262 if (cmd_buffer
->state
.current_pipeline
== pipeline
)
2265 #if GEN_GEN >= 8 && GEN_GEN < 10
2266 /* From the Broadwell PRM, Volume 2a: Instructions, PIPELINE_SELECT:
2268 * Software must clear the COLOR_CALC_STATE Valid field in
2269 * 3DSTATE_CC_STATE_POINTERS command prior to send a PIPELINE_SELECT
2270 * with Pipeline Select set to GPGPU.
2272 * The internal hardware docs recommend the same workaround for Gen9
2275 if (pipeline
== GPGPU
)
2276 anv_batch_emit(&cmd_buffer
->batch
, GENX(3DSTATE_CC_STATE_POINTERS
), t
);
2279 /* From "BXML » GT » MI » vol1a GPU Overview » [Instruction]
2280 * PIPELINE_SELECT [DevBWR+]":
2284 * Software must ensure all the write caches are flushed through a
2285 * stalling PIPE_CONTROL command followed by another PIPE_CONTROL
2286 * command to invalidate read only caches prior to programming
2287 * MI_PIPELINE_SELECT command to change the Pipeline Select Mode.
2289 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
), pc
) {
2290 pc
.RenderTargetCacheFlushEnable
= true;
2291 pc
.DepthCacheFlushEnable
= true;
2292 pc
.DCFlushEnable
= true;
2293 pc
.PostSyncOperation
= NoWrite
;
2294 pc
.CommandStreamerStallEnable
= true;
2297 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
), pc
) {
2298 pc
.TextureCacheInvalidationEnable
= true;
2299 pc
.ConstantCacheInvalidationEnable
= true;
2300 pc
.StateCacheInvalidationEnable
= true;
2301 pc
.InstructionCacheInvalidateEnable
= true;
2302 pc
.PostSyncOperation
= NoWrite
;
2305 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPELINE_SELECT
), ps
) {
2309 ps
.PipelineSelection
= pipeline
;
2312 cmd_buffer
->state
.current_pipeline
= pipeline
;
2316 genX(flush_pipeline_select_3d
)(struct anv_cmd_buffer
*cmd_buffer
)
2318 genX(flush_pipeline_select
)(cmd_buffer
, _3D
);
2322 genX(flush_pipeline_select_gpgpu
)(struct anv_cmd_buffer
*cmd_buffer
)
2324 genX(flush_pipeline_select
)(cmd_buffer
, GPGPU
);
2328 genX(cmd_buffer_emit_gen7_depth_flush
)(struct anv_cmd_buffer
*cmd_buffer
)
2333 /* From the Haswell PRM, documentation for 3DSTATE_DEPTH_BUFFER:
2335 * "Restriction: Prior to changing Depth/Stencil Buffer state (i.e., any
2336 * combination of 3DSTATE_DEPTH_BUFFER, 3DSTATE_CLEAR_PARAMS,
2337 * 3DSTATE_STENCIL_BUFFER, 3DSTATE_HIER_DEPTH_BUFFER) SW must first
2338 * issue a pipelined depth stall (PIPE_CONTROL with Depth Stall bit
2339 * set), followed by a pipelined depth cache flush (PIPE_CONTROL with
2340 * Depth Flush Bit set, followed by another pipelined depth stall
2341 * (PIPE_CONTROL with Depth Stall Bit set), unless SW can otherwise
2342 * guarantee that the pipeline from WM onwards is already flushed (e.g.,
2343 * via a preceding MI_FLUSH)."
2345 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
), pipe
) {
2346 pipe
.DepthStallEnable
= true;
2348 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
), pipe
) {
2349 pipe
.DepthCacheFlushEnable
= true;
2351 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
), pipe
) {
2352 pipe
.DepthStallEnable
= true;
2357 cmd_buffer_emit_depth_stencil(struct anv_cmd_buffer
*cmd_buffer
)
2359 struct anv_device
*device
= cmd_buffer
->device
;
2360 const struct anv_image_view
*iview
=
2361 anv_cmd_buffer_get_depth_stencil_view(cmd_buffer
);
2362 const struct anv_image
*image
= iview
? iview
->image
: NULL
;
2364 /* FIXME: Width and Height are wrong */
2366 genX(cmd_buffer_emit_gen7_depth_flush
)(cmd_buffer
);
2368 uint32_t *dw
= anv_batch_emit_dwords(&cmd_buffer
->batch
,
2369 device
->isl_dev
.ds
.size
/ 4);
2373 struct isl_depth_stencil_hiz_emit_info info
= {
2374 .mocs
= device
->default_mocs
,
2378 info
.view
= &iview
->isl
;
2380 if (image
&& (image
->aspects
& VK_IMAGE_ASPECT_DEPTH_BIT
)) {
2381 info
.depth_surf
= &image
->depth_surface
.isl
;
2383 info
.depth_address
=
2384 anv_batch_emit_reloc(&cmd_buffer
->batch
,
2385 dw
+ device
->isl_dev
.ds
.depth_offset
/ 4,
2387 image
->offset
+ image
->depth_surface
.offset
);
2390 cmd_buffer
->state
.subpass
->depth_stencil_attachment
.attachment
;
2391 info
.hiz_usage
= cmd_buffer
->state
.attachments
[ds
].aux_usage
;
2392 if (info
.hiz_usage
== ISL_AUX_USAGE_HIZ
) {
2393 info
.hiz_surf
= &image
->aux_surface
.isl
;
2396 anv_batch_emit_reloc(&cmd_buffer
->batch
,
2397 dw
+ device
->isl_dev
.ds
.hiz_offset
/ 4,
2399 image
->offset
+ image
->aux_surface
.offset
);
2401 info
.depth_clear_value
= ANV_HZ_FC_VAL
;
2405 if (image
&& (image
->aspects
& VK_IMAGE_ASPECT_STENCIL_BIT
)) {
2406 info
.stencil_surf
= &image
->stencil_surface
.isl
;
2408 info
.stencil_address
=
2409 anv_batch_emit_reloc(&cmd_buffer
->batch
,
2410 dw
+ device
->isl_dev
.ds
.stencil_offset
/ 4,
2412 image
->offset
+ image
->stencil_surface
.offset
);
2415 isl_emit_depth_stencil_hiz_s(&device
->isl_dev
, dw
, &info
);
2417 cmd_buffer
->state
.hiz_enabled
= info
.hiz_usage
== ISL_AUX_USAGE_HIZ
;
2422 * @brief Perform any layout transitions required at the beginning and/or end
2423 * of the current subpass for depth buffers.
2425 * TODO: Consider preprocessing the attachment reference array at render pass
2426 * create time to determine if no layout transition is needed at the
2427 * beginning and/or end of each subpass.
2429 * @param cmd_buffer The command buffer the transition is happening within.
2430 * @param subpass_end If true, marks that the transition is happening at the
2431 * end of the subpass.
2434 cmd_buffer_subpass_transition_layouts(struct anv_cmd_buffer
* const cmd_buffer
,
2435 const bool subpass_end
)
2437 /* We need a non-NULL command buffer. */
2440 const struct anv_cmd_state
* const cmd_state
= &cmd_buffer
->state
;
2441 const struct anv_subpass
* const subpass
= cmd_state
->subpass
;
2443 /* This function must be called within a subpass. */
2446 /* If there are attachment references, the array shouldn't be NULL.
2448 if (subpass
->attachment_count
> 0)
2449 assert(subpass
->attachments
);
2451 /* Iterate over the array of attachment references. */
2452 for (const VkAttachmentReference
*att_ref
= subpass
->attachments
;
2453 att_ref
< subpass
->attachments
+ subpass
->attachment_count
; att_ref
++) {
2455 /* If the attachment is unused, we can't perform a layout transition. */
2456 if (att_ref
->attachment
== VK_ATTACHMENT_UNUSED
)
2459 /* This attachment index shouldn't go out of bounds. */
2460 assert(att_ref
->attachment
< cmd_state
->pass
->attachment_count
);
2462 const struct anv_render_pass_attachment
* const att_desc
=
2463 &cmd_state
->pass
->attachments
[att_ref
->attachment
];
2464 struct anv_attachment_state
* const att_state
=
2465 &cmd_buffer
->state
.attachments
[att_ref
->attachment
];
2467 /* The attachment should not be used in a subpass after its last. */
2468 assert(att_desc
->last_subpass_idx
>= anv_get_subpass_id(cmd_state
));
2470 if (subpass_end
&& anv_get_subpass_id(cmd_state
) <
2471 att_desc
->last_subpass_idx
) {
2472 /* We're calling this function on a buffer twice in one subpass and
2473 * this is not the last use of the buffer. The layout should not have
2474 * changed from the first call and no transition is necessary.
2476 assert(att_ref
->layout
== att_state
->current_layout
);
2480 /* Get the appropriate target layout for this attachment. */
2481 const VkImageLayout target_layout
= subpass_end
?
2482 att_desc
->final_layout
: att_ref
->layout
;
2484 /* The attachment index must be less than the number of attachments
2485 * within the framebuffer.
2487 assert(att_ref
->attachment
< cmd_state
->framebuffer
->attachment_count
);
2489 const struct anv_image_view
* const iview
=
2490 cmd_state
->framebuffer
->attachments
[att_ref
->attachment
];
2491 const struct anv_image
* const image
= iview
->image
;
2493 /* Perform the layout transition. */
2494 if (image
->aspects
& VK_IMAGE_ASPECT_DEPTH_BIT
) {
2495 transition_depth_buffer(cmd_buffer
, image
,
2496 att_state
->current_layout
, target_layout
);
2497 att_state
->aux_usage
=
2498 anv_layout_to_aux_usage(&cmd_buffer
->device
->info
, image
,
2499 image
->aspects
, target_layout
);
2500 } else if (image
->aspects
== VK_IMAGE_ASPECT_COLOR_BIT
) {
2501 transition_color_buffer(cmd_buffer
, image
,
2502 iview
->isl
.base_level
, 1,
2503 iview
->isl
.base_array_layer
,
2504 iview
->isl
.array_len
,
2505 att_state
->current_layout
, target_layout
);
2508 att_state
->current_layout
= target_layout
;
2513 genX(cmd_buffer_set_subpass
)(struct anv_cmd_buffer
*cmd_buffer
,
2514 struct anv_subpass
*subpass
)
2516 cmd_buffer
->state
.subpass
= subpass
;
2518 cmd_buffer
->state
.dirty
|= ANV_CMD_DIRTY_RENDER_TARGETS
;
2520 /* Our implementation of VK_KHR_multiview uses instancing to draw the
2521 * different views. If the client asks for instancing, we need to use the
2522 * Instance Data Step Rate to ensure that we repeat the client's
2523 * per-instance data once for each view. Since this bit is in
2524 * VERTEX_BUFFER_STATE on gen7, we need to dirty vertex buffers at the top
2528 cmd_buffer
->state
.vb_dirty
|= ~0;
2530 /* Perform transitions to the subpass layout before any writes have
2533 cmd_buffer_subpass_transition_layouts(cmd_buffer
, false);
2535 cmd_buffer_emit_depth_stencil(cmd_buffer
);
2537 anv_cmd_buffer_clear_subpass(cmd_buffer
);
2540 void genX(CmdBeginRenderPass
)(
2541 VkCommandBuffer commandBuffer
,
2542 const VkRenderPassBeginInfo
* pRenderPassBegin
,
2543 VkSubpassContents contents
)
2545 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
2546 ANV_FROM_HANDLE(anv_render_pass
, pass
, pRenderPassBegin
->renderPass
);
2547 ANV_FROM_HANDLE(anv_framebuffer
, framebuffer
, pRenderPassBegin
->framebuffer
);
2549 cmd_buffer
->state
.framebuffer
= framebuffer
;
2550 cmd_buffer
->state
.pass
= pass
;
2551 cmd_buffer
->state
.render_area
= pRenderPassBegin
->renderArea
;
2553 genX(cmd_buffer_setup_attachments
)(cmd_buffer
, pass
, pRenderPassBegin
);
2555 /* If we failed to setup the attachments we should not try to go further */
2556 if (result
!= VK_SUCCESS
) {
2557 assert(anv_batch_has_error(&cmd_buffer
->batch
));
2561 genX(flush_pipeline_select_3d
)(cmd_buffer
);
2563 genX(cmd_buffer_set_subpass
)(cmd_buffer
, pass
->subpasses
);
2565 cmd_buffer
->state
.pending_pipe_bits
|=
2566 cmd_buffer
->state
.pass
->subpass_flushes
[0];
2569 void genX(CmdNextSubpass
)(
2570 VkCommandBuffer commandBuffer
,
2571 VkSubpassContents contents
)
2573 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
2575 if (anv_batch_has_error(&cmd_buffer
->batch
))
2578 assert(cmd_buffer
->level
== VK_COMMAND_BUFFER_LEVEL_PRIMARY
);
2580 anv_cmd_buffer_resolve_subpass(cmd_buffer
);
2582 /* Perform transitions to the final layout after all writes have occurred.
2584 cmd_buffer_subpass_transition_layouts(cmd_buffer
, true);
2586 genX(cmd_buffer_set_subpass
)(cmd_buffer
, cmd_buffer
->state
.subpass
+ 1);
2588 uint32_t subpass_id
= anv_get_subpass_id(&cmd_buffer
->state
);
2589 cmd_buffer
->state
.pending_pipe_bits
|=
2590 cmd_buffer
->state
.pass
->subpass_flushes
[subpass_id
];
2593 void genX(CmdEndRenderPass
)(
2594 VkCommandBuffer commandBuffer
)
2596 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
2598 if (anv_batch_has_error(&cmd_buffer
->batch
))
2601 anv_cmd_buffer_resolve_subpass(cmd_buffer
);
2603 /* Perform transitions to the final layout after all writes have occurred.
2605 cmd_buffer_subpass_transition_layouts(cmd_buffer
, true);
2607 cmd_buffer
->state
.pending_pipe_bits
|=
2608 cmd_buffer
->state
.pass
->subpass_flushes
[cmd_buffer
->state
.pass
->subpass_count
];
2610 cmd_buffer
->state
.hiz_enabled
= false;
2613 anv_dump_add_framebuffer(cmd_buffer
, cmd_buffer
->state
.framebuffer
);
2616 /* Remove references to render pass specific state. This enables us to
2617 * detect whether or not we're in a renderpass.
2619 cmd_buffer
->state
.framebuffer
= NULL
;
2620 cmd_buffer
->state
.pass
= NULL
;
2621 cmd_buffer
->state
.subpass
= NULL
;