anv/cmd_buffer: Use some pre-existing pipeline temporaries
[mesa.git] / src / intel / vulkan / genX_cmd_buffer.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26
27 #include "anv_private.h"
28 #include "vk_format_info.h"
29 #include "vk_util.h"
30
31 #include "common/gen_l3_config.h"
32 #include "genxml/gen_macros.h"
33 #include "genxml/genX_pack.h"
34
35 static void
36 emit_lrm(struct anv_batch *batch,
37 uint32_t reg, struct anv_bo *bo, uint32_t offset)
38 {
39 anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
40 lrm.RegisterAddress = reg;
41 lrm.MemoryAddress = (struct anv_address) { bo, offset };
42 }
43 }
44
45 static void
46 emit_lri(struct anv_batch *batch, uint32_t reg, uint32_t imm)
47 {
48 anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_IMM), lri) {
49 lri.RegisterOffset = reg;
50 lri.DataDWord = imm;
51 }
52 }
53
54 #if GEN_IS_HASWELL || GEN_GEN >= 8
55 static void
56 emit_lrr(struct anv_batch *batch, uint32_t dst, uint32_t src)
57 {
58 anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_REG), lrr) {
59 lrr.SourceRegisterAddress = src;
60 lrr.DestinationRegisterAddress = dst;
61 }
62 }
63 #endif
64
65 void
66 genX(cmd_buffer_emit_state_base_address)(struct anv_cmd_buffer *cmd_buffer)
67 {
68 struct anv_device *device = cmd_buffer->device;
69
70 /* Emit a render target cache flush.
71 *
72 * This isn't documented anywhere in the PRM. However, it seems to be
73 * necessary prior to changing the surface state base adress. Without
74 * this, we get GPU hangs when using multi-level command buffers which
75 * clear depth, reset state base address, and then go render stuff.
76 */
77 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
78 pc.DCFlushEnable = true;
79 pc.RenderTargetCacheFlushEnable = true;
80 pc.CommandStreamerStallEnable = true;
81 }
82
83 anv_batch_emit(&cmd_buffer->batch, GENX(STATE_BASE_ADDRESS), sba) {
84 sba.GeneralStateBaseAddress = (struct anv_address) { NULL, 0 };
85 sba.GeneralStateMemoryObjectControlState = GENX(MOCS);
86 sba.GeneralStateBaseAddressModifyEnable = true;
87
88 sba.SurfaceStateBaseAddress =
89 anv_cmd_buffer_surface_base_address(cmd_buffer);
90 sba.SurfaceStateMemoryObjectControlState = GENX(MOCS);
91 sba.SurfaceStateBaseAddressModifyEnable = true;
92
93 sba.DynamicStateBaseAddress =
94 (struct anv_address) { &device->dynamic_state_pool.block_pool.bo, 0 };
95 sba.DynamicStateMemoryObjectControlState = GENX(MOCS);
96 sba.DynamicStateBaseAddressModifyEnable = true;
97
98 sba.IndirectObjectBaseAddress = (struct anv_address) { NULL, 0 };
99 sba.IndirectObjectMemoryObjectControlState = GENX(MOCS);
100 sba.IndirectObjectBaseAddressModifyEnable = true;
101
102 sba.InstructionBaseAddress =
103 (struct anv_address) { &device->instruction_state_pool.block_pool.bo, 0 };
104 sba.InstructionMemoryObjectControlState = GENX(MOCS);
105 sba.InstructionBaseAddressModifyEnable = true;
106
107 # if (GEN_GEN >= 8)
108 /* Broadwell requires that we specify a buffer size for a bunch of
109 * these fields. However, since we will be growing the BO's live, we
110 * just set them all to the maximum.
111 */
112 sba.GeneralStateBufferSize = 0xfffff;
113 sba.GeneralStateBufferSizeModifyEnable = true;
114 sba.DynamicStateBufferSize = 0xfffff;
115 sba.DynamicStateBufferSizeModifyEnable = true;
116 sba.IndirectObjectBufferSize = 0xfffff;
117 sba.IndirectObjectBufferSizeModifyEnable = true;
118 sba.InstructionBufferSize = 0xfffff;
119 sba.InstructionBuffersizeModifyEnable = true;
120 # endif
121 }
122
123 /* After re-setting the surface state base address, we have to do some
124 * cache flusing so that the sampler engine will pick up the new
125 * SURFACE_STATE objects and binding tables. From the Broadwell PRM,
126 * Shared Function > 3D Sampler > State > State Caching (page 96):
127 *
128 * Coherency with system memory in the state cache, like the texture
129 * cache is handled partially by software. It is expected that the
130 * command stream or shader will issue Cache Flush operation or
131 * Cache_Flush sampler message to ensure that the L1 cache remains
132 * coherent with system memory.
133 *
134 * [...]
135 *
136 * Whenever the value of the Dynamic_State_Base_Addr,
137 * Surface_State_Base_Addr are altered, the L1 state cache must be
138 * invalidated to ensure the new surface or sampler state is fetched
139 * from system memory.
140 *
141 * The PIPE_CONTROL command has a "State Cache Invalidation Enable" bit
142 * which, according the PIPE_CONTROL instruction documentation in the
143 * Broadwell PRM:
144 *
145 * Setting this bit is independent of any other bit in this packet.
146 * This bit controls the invalidation of the L1 and L2 state caches
147 * at the top of the pipe i.e. at the parsing time.
148 *
149 * Unfortunately, experimentation seems to indicate that state cache
150 * invalidation through a PIPE_CONTROL does nothing whatsoever in
151 * regards to surface state and binding tables. In stead, it seems that
152 * invalidating the texture cache is what is actually needed.
153 *
154 * XXX: As far as we have been able to determine through
155 * experimentation, shows that flush the texture cache appears to be
156 * sufficient. The theory here is that all of the sampling/rendering
157 * units cache the binding table in the texture cache. However, we have
158 * yet to be able to actually confirm this.
159 */
160 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
161 pc.TextureCacheInvalidationEnable = true;
162 pc.ConstantCacheInvalidationEnable = true;
163 pc.StateCacheInvalidationEnable = true;
164 }
165 }
166
167 static void
168 add_surface_state_reloc(struct anv_cmd_buffer *cmd_buffer,
169 struct anv_state state,
170 struct anv_bo *bo, uint32_t offset)
171 {
172 const struct isl_device *isl_dev = &cmd_buffer->device->isl_dev;
173
174 VkResult result =
175 anv_reloc_list_add(&cmd_buffer->surface_relocs, &cmd_buffer->pool->alloc,
176 state.offset + isl_dev->ss.addr_offset, bo, offset);
177 if (result != VK_SUCCESS)
178 anv_batch_set_error(&cmd_buffer->batch, result);
179 }
180
181 static void
182 add_image_view_relocs(struct anv_cmd_buffer *cmd_buffer,
183 const struct anv_image_view *image_view,
184 const uint32_t plane,
185 struct anv_surface_state state)
186 {
187 const struct isl_device *isl_dev = &cmd_buffer->device->isl_dev;
188 const struct anv_image *image = image_view->image;
189 uint32_t image_plane = image_view->planes[plane].image_plane;
190
191 add_surface_state_reloc(cmd_buffer, state.state,
192 image->planes[image_plane].bo, state.address);
193
194 if (state.aux_address) {
195 VkResult result =
196 anv_reloc_list_add(&cmd_buffer->surface_relocs,
197 &cmd_buffer->pool->alloc,
198 state.state.offset + isl_dev->ss.aux_addr_offset,
199 image->planes[image_plane].bo, state.aux_address);
200 if (result != VK_SUCCESS)
201 anv_batch_set_error(&cmd_buffer->batch, result);
202 }
203 }
204
205 static bool
206 color_is_zero_one(VkClearColorValue value, enum isl_format format)
207 {
208 if (isl_format_has_int_channel(format)) {
209 for (unsigned i = 0; i < 4; i++) {
210 if (value.int32[i] != 0 && value.int32[i] != 1)
211 return false;
212 }
213 } else {
214 for (unsigned i = 0; i < 4; i++) {
215 if (value.float32[i] != 0.0f && value.float32[i] != 1.0f)
216 return false;
217 }
218 }
219
220 return true;
221 }
222
223 static void
224 color_attachment_compute_aux_usage(struct anv_device * device,
225 struct anv_cmd_state * cmd_state,
226 uint32_t att, VkRect2D render_area,
227 union isl_color_value *fast_clear_color)
228 {
229 struct anv_attachment_state *att_state = &cmd_state->attachments[att];
230 struct anv_image_view *iview = cmd_state->framebuffer->attachments[att];
231
232 assert(iview->n_planes == 1);
233
234 if (iview->planes[0].isl.base_array_layer >=
235 anv_image_aux_layers(iview->image, VK_IMAGE_ASPECT_COLOR_BIT,
236 iview->planes[0].isl.base_level)) {
237 /* There is no aux buffer which corresponds to the level and layer(s)
238 * being accessed.
239 */
240 att_state->aux_usage = ISL_AUX_USAGE_NONE;
241 att_state->input_aux_usage = ISL_AUX_USAGE_NONE;
242 att_state->fast_clear = false;
243 return;
244 } else if (iview->image->planes[0].aux_usage == ISL_AUX_USAGE_MCS) {
245 att_state->aux_usage = ISL_AUX_USAGE_MCS;
246 att_state->input_aux_usage = ISL_AUX_USAGE_MCS;
247 att_state->fast_clear = false;
248 return;
249 } else if (iview->image->planes[0].aux_usage == ISL_AUX_USAGE_CCS_E) {
250 att_state->aux_usage = ISL_AUX_USAGE_CCS_E;
251 att_state->input_aux_usage = ISL_AUX_USAGE_CCS_E;
252 } else {
253 att_state->aux_usage = ISL_AUX_USAGE_CCS_D;
254 /* From the Sky Lake PRM, RENDER_SURFACE_STATE::AuxiliarySurfaceMode:
255 *
256 * "If Number of Multisamples is MULTISAMPLECOUNT_1, AUX_CCS_D
257 * setting is only allowed if Surface Format supported for Fast
258 * Clear. In addition, if the surface is bound to the sampling
259 * engine, Surface Format must be supported for Render Target
260 * Compression for surfaces bound to the sampling engine."
261 *
262 * In other words, we can only sample from a fast-cleared image if it
263 * also supports color compression.
264 */
265 if (isl_format_supports_ccs_e(&device->info, iview->planes[0].isl.format)) {
266 att_state->input_aux_usage = ISL_AUX_USAGE_CCS_D;
267
268 /* While fast-clear resolves and partial resolves are fairly cheap in the
269 * case where you render to most of the pixels, full resolves are not
270 * because they potentially involve reading and writing the entire
271 * framebuffer. If we can't texture with CCS_E, we should leave it off and
272 * limit ourselves to fast clears.
273 */
274 if (cmd_state->pass->attachments[att].first_subpass_layout ==
275 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL) {
276 anv_perf_warn(device->instance, iview->image,
277 "Not temporarily enabling CCS_E.");
278 }
279 } else {
280 att_state->input_aux_usage = ISL_AUX_USAGE_NONE;
281 }
282 }
283
284 assert(iview->image->planes[0].aux_surface.isl.usage & ISL_SURF_USAGE_CCS_BIT);
285
286 att_state->clear_color_is_zero_one =
287 color_is_zero_one(att_state->clear_value.color, iview->planes[0].isl.format);
288 att_state->clear_color_is_zero =
289 att_state->clear_value.color.uint32[0] == 0 &&
290 att_state->clear_value.color.uint32[1] == 0 &&
291 att_state->clear_value.color.uint32[2] == 0 &&
292 att_state->clear_value.color.uint32[3] == 0;
293
294 if (att_state->pending_clear_aspects == VK_IMAGE_ASPECT_COLOR_BIT) {
295 /* Start off assuming fast clears are possible */
296 att_state->fast_clear = true;
297
298 /* Potentially, we could do partial fast-clears but doing so has crazy
299 * alignment restrictions. It's easier to just restrict to full size
300 * fast clears for now.
301 */
302 if (render_area.offset.x != 0 ||
303 render_area.offset.y != 0 ||
304 render_area.extent.width != iview->extent.width ||
305 render_area.extent.height != iview->extent.height)
306 att_state->fast_clear = false;
307
308 /* On Broadwell and earlier, we can only handle 0/1 clear colors */
309 if (GEN_GEN <= 8 && !att_state->clear_color_is_zero_one)
310 att_state->fast_clear = false;
311
312 /* We allow fast clears when all aux layers of the miplevel are targeted.
313 * See add_fast_clear_state_buffer() for more information. Also, because
314 * we only either do a fast clear or a normal clear and not both, this
315 * complies with the gen7 restriction of not fast-clearing multiple
316 * layers.
317 */
318 if (cmd_state->framebuffer->layers !=
319 anv_image_aux_layers(iview->image, VK_IMAGE_ASPECT_COLOR_BIT,
320 iview->planes[0].isl.base_level)) {
321 att_state->fast_clear = false;
322 if (GEN_GEN == 7) {
323 anv_perf_warn(device->instance, iview->image,
324 "Not fast-clearing the first layer in "
325 "a multi-layer fast clear.");
326 }
327 }
328
329 /* We only allow fast clears in the GENERAL layout if the auxiliary
330 * buffer is always enabled and the fast-clear value is all 0's. See
331 * add_fast_clear_state_buffer() for more information.
332 */
333 if (cmd_state->pass->attachments[att].first_subpass_layout ==
334 VK_IMAGE_LAYOUT_GENERAL &&
335 (!att_state->clear_color_is_zero ||
336 iview->image->planes[0].aux_usage == ISL_AUX_USAGE_NONE)) {
337 att_state->fast_clear = false;
338 }
339
340 if (att_state->fast_clear) {
341 memcpy(fast_clear_color->u32, att_state->clear_value.color.uint32,
342 sizeof(fast_clear_color->u32));
343 }
344 } else {
345 att_state->fast_clear = false;
346 }
347 }
348
349 static bool
350 need_input_attachment_state(const struct anv_render_pass_attachment *att)
351 {
352 if (!(att->usage & VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT))
353 return false;
354
355 /* We only allocate input attachment states for color surfaces. Compression
356 * is not yet enabled for depth textures and stencil doesn't allow
357 * compression so we can just use the texture surface state from the view.
358 */
359 return vk_format_is_color(att->format);
360 }
361
362 /* Transitions a HiZ-enabled depth buffer from one layout to another. Unless
363 * the initial layout is undefined, the HiZ buffer and depth buffer will
364 * represent the same data at the end of this operation.
365 */
366 static void
367 transition_depth_buffer(struct anv_cmd_buffer *cmd_buffer,
368 const struct anv_image *image,
369 VkImageLayout initial_layout,
370 VkImageLayout final_layout)
371 {
372 assert(image);
373
374 /* A transition is a no-op if HiZ is not enabled, or if the initial and
375 * final layouts are equal.
376 *
377 * The undefined layout indicates that the user doesn't care about the data
378 * that's currently in the buffer. Therefore, a data-preserving resolve
379 * operation is not needed.
380 */
381 if (image->planes[0].aux_usage != ISL_AUX_USAGE_HIZ || initial_layout == final_layout)
382 return;
383
384 const bool hiz_enabled = ISL_AUX_USAGE_HIZ ==
385 anv_layout_to_aux_usage(&cmd_buffer->device->info, image,
386 VK_IMAGE_ASPECT_DEPTH_BIT, initial_layout);
387 const bool enable_hiz = ISL_AUX_USAGE_HIZ ==
388 anv_layout_to_aux_usage(&cmd_buffer->device->info, image,
389 VK_IMAGE_ASPECT_DEPTH_BIT, final_layout);
390
391 enum blorp_hiz_op hiz_op;
392 if (hiz_enabled && !enable_hiz) {
393 hiz_op = BLORP_HIZ_OP_DEPTH_RESOLVE;
394 } else if (!hiz_enabled && enable_hiz) {
395 hiz_op = BLORP_HIZ_OP_HIZ_RESOLVE;
396 } else {
397 assert(hiz_enabled == enable_hiz);
398 /* If the same buffer will be used, no resolves are necessary. */
399 hiz_op = BLORP_HIZ_OP_NONE;
400 }
401
402 if (hiz_op != BLORP_HIZ_OP_NONE)
403 anv_gen8_hiz_op_resolve(cmd_buffer, image, hiz_op);
404 }
405
406 #define MI_PREDICATE_SRC0 0x2400
407 #define MI_PREDICATE_SRC1 0x2408
408
409 /* Manages the state of an color image subresource to ensure resolves are
410 * performed properly.
411 */
412 static void
413 genX(set_image_needs_resolve)(struct anv_cmd_buffer *cmd_buffer,
414 const struct anv_image *image,
415 VkImageAspectFlagBits aspect,
416 unsigned level, bool needs_resolve)
417 {
418 assert(cmd_buffer && image);
419 assert(image->aspects & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV);
420 assert(level < anv_image_aux_levels(image, aspect));
421
422 /* The HW docs say that there is no way to guarantee the completion of
423 * the following command. We use it nevertheless because it shows no
424 * issues in testing is currently being used in the GL driver.
425 */
426 anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_DATA_IMM), sdi) {
427 sdi.Address = anv_image_get_needs_resolve_addr(cmd_buffer->device,
428 image, aspect, level);
429 sdi.ImmediateData = needs_resolve;
430 }
431 }
432
433 static void
434 genX(load_needs_resolve_predicate)(struct anv_cmd_buffer *cmd_buffer,
435 const struct anv_image *image,
436 VkImageAspectFlagBits aspect,
437 unsigned level)
438 {
439 assert(cmd_buffer && image);
440 assert(image->aspects & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV);
441 assert(level < anv_image_aux_levels(image, aspect));
442
443 const struct anv_address resolve_flag_addr =
444 anv_image_get_needs_resolve_addr(cmd_buffer->device,
445 image, aspect, level);
446
447 /* Make the pending predicated resolve a no-op if one is not needed.
448 * predicate = do_resolve = resolve_flag != 0;
449 */
450 emit_lri(&cmd_buffer->batch, MI_PREDICATE_SRC1 , 0);
451 emit_lri(&cmd_buffer->batch, MI_PREDICATE_SRC1 + 4, 0);
452 emit_lri(&cmd_buffer->batch, MI_PREDICATE_SRC0 , 0);
453 emit_lrm(&cmd_buffer->batch, MI_PREDICATE_SRC0 + 4,
454 resolve_flag_addr.bo, resolve_flag_addr.offset);
455 anv_batch_emit(&cmd_buffer->batch, GENX(MI_PREDICATE), mip) {
456 mip.LoadOperation = LOAD_LOADINV;
457 mip.CombineOperation = COMBINE_SET;
458 mip.CompareOperation = COMPARE_SRCS_EQUAL;
459 }
460 }
461
462 static void
463 init_fast_clear_state_entry(struct anv_cmd_buffer *cmd_buffer,
464 const struct anv_image *image,
465 VkImageAspectFlagBits aspect,
466 unsigned level)
467 {
468 assert(cmd_buffer && image);
469 assert(image->aspects & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV);
470 assert(level < anv_image_aux_levels(image, aspect));
471
472 uint32_t plane = anv_image_aspect_to_plane(image->aspects, aspect);
473 enum isl_aux_usage aux_usage = image->planes[plane].aux_usage;
474
475 /* The resolve flag should updated to signify that fast-clear/compression
476 * data needs to be removed when leaving the undefined layout. Such data
477 * may need to be removed if it would cause accesses to the color buffer
478 * to return incorrect data. The fast clear data in CCS_D buffers should
479 * be removed because CCS_D isn't enabled all the time.
480 */
481 genX(set_image_needs_resolve)(cmd_buffer, image, aspect, level,
482 aux_usage == ISL_AUX_USAGE_NONE);
483
484 /* The fast clear value dword(s) will be copied into a surface state object.
485 * Ensure that the restrictions of the fields in the dword(s) are followed.
486 *
487 * CCS buffers on SKL+ can have any value set for the clear colors.
488 */
489 if (image->samples == 1 && GEN_GEN >= 9)
490 return;
491
492 /* Other combinations of auxiliary buffers and platforms require specific
493 * values in the clear value dword(s).
494 */
495 struct anv_address addr =
496 anv_image_get_clear_color_addr(cmd_buffer->device, image, aspect, level);
497 unsigned i = 0;
498 for (; i < cmd_buffer->device->isl_dev.ss.clear_value_size; i += 4) {
499 anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_DATA_IMM), sdi) {
500 sdi.Address = addr;
501
502 if (GEN_GEN >= 9) {
503 /* MCS buffers on SKL+ can only have 1/0 clear colors. */
504 assert(aux_usage == ISL_AUX_USAGE_MCS);
505 sdi.ImmediateData = 0;
506 } else if (GEN_VERSIONx10 >= 75) {
507 /* Pre-SKL, the dword containing the clear values also contains
508 * other fields, so we need to initialize those fields to match the
509 * values that would be in a color attachment.
510 */
511 assert(i == 0);
512 sdi.ImmediateData = ISL_CHANNEL_SELECT_RED << 25 |
513 ISL_CHANNEL_SELECT_GREEN << 22 |
514 ISL_CHANNEL_SELECT_BLUE << 19 |
515 ISL_CHANNEL_SELECT_ALPHA << 16;
516 } else if (GEN_VERSIONx10 == 70) {
517 /* On IVB, the dword containing the clear values also contains
518 * other fields that must be zero or can be zero.
519 */
520 assert(i == 0);
521 sdi.ImmediateData = 0;
522 }
523 }
524
525 addr.offset += 4;
526 }
527 }
528
529 /* Copy the fast-clear value dword(s) between a surface state object and an
530 * image's fast clear state buffer.
531 */
532 static void
533 genX(copy_fast_clear_dwords)(struct anv_cmd_buffer *cmd_buffer,
534 struct anv_state surface_state,
535 const struct anv_image *image,
536 VkImageAspectFlagBits aspect,
537 unsigned level,
538 bool copy_from_surface_state)
539 {
540 assert(cmd_buffer && image);
541 assert(image->aspects & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV);
542 assert(level < anv_image_aux_levels(image, aspect));
543
544 struct anv_bo *ss_bo =
545 &cmd_buffer->device->surface_state_pool.block_pool.bo;
546 uint32_t ss_clear_offset = surface_state.offset +
547 cmd_buffer->device->isl_dev.ss.clear_value_offset;
548 const struct anv_address entry_addr =
549 anv_image_get_clear_color_addr(cmd_buffer->device, image, aspect, level);
550 unsigned copy_size = cmd_buffer->device->isl_dev.ss.clear_value_size;
551
552 if (copy_from_surface_state) {
553 genX(cmd_buffer_mi_memcpy)(cmd_buffer, entry_addr.bo, entry_addr.offset,
554 ss_bo, ss_clear_offset, copy_size);
555 } else {
556 genX(cmd_buffer_mi_memcpy)(cmd_buffer, ss_bo, ss_clear_offset,
557 entry_addr.bo, entry_addr.offset, copy_size);
558
559 /* Updating a surface state object may require that the state cache be
560 * invalidated. From the SKL PRM, Shared Functions -> State -> State
561 * Caching:
562 *
563 * Whenever the RENDER_SURFACE_STATE object in memory pointed to by
564 * the Binding Table Pointer (BTP) and Binding Table Index (BTI) is
565 * modified [...], the L1 state cache must be invalidated to ensure
566 * the new surface or sampler state is fetched from system memory.
567 *
568 * In testing, SKL doesn't actually seem to need this, but HSW does.
569 */
570 cmd_buffer->state.pending_pipe_bits |=
571 ANV_PIPE_STATE_CACHE_INVALIDATE_BIT;
572 }
573 }
574
575 /**
576 * @brief Transitions a color buffer from one layout to another.
577 *
578 * See section 6.1.1. Image Layout Transitions of the Vulkan 1.0.50 spec for
579 * more information.
580 *
581 * @param level_count VK_REMAINING_MIP_LEVELS isn't supported.
582 * @param layer_count VK_REMAINING_ARRAY_LAYERS isn't supported. For 3D images,
583 * this represents the maximum layers to transition at each
584 * specified miplevel.
585 */
586 static void
587 transition_color_buffer(struct anv_cmd_buffer *cmd_buffer,
588 const struct anv_image *image,
589 VkImageAspectFlagBits aspect,
590 const uint32_t base_level, uint32_t level_count,
591 uint32_t base_layer, uint32_t layer_count,
592 VkImageLayout initial_layout,
593 VkImageLayout final_layout)
594 {
595 /* Validate the inputs. */
596 assert(cmd_buffer);
597 assert(image && image->aspects & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV);
598 /* These values aren't supported for simplicity's sake. */
599 assert(level_count != VK_REMAINING_MIP_LEVELS &&
600 layer_count != VK_REMAINING_ARRAY_LAYERS);
601 /* Ensure the subresource range is valid. */
602 uint64_t last_level_num = base_level + level_count;
603 const uint32_t max_depth = anv_minify(image->extent.depth, base_level);
604 UNUSED const uint32_t image_layers = MAX2(image->array_size, max_depth);
605 assert((uint64_t)base_layer + layer_count <= image_layers);
606 assert(last_level_num <= image->levels);
607 /* The spec disallows these final layouts. */
608 assert(final_layout != VK_IMAGE_LAYOUT_UNDEFINED &&
609 final_layout != VK_IMAGE_LAYOUT_PREINITIALIZED);
610
611 /* No work is necessary if the layout stays the same or if this subresource
612 * range lacks auxiliary data.
613 */
614 if (initial_layout == final_layout)
615 return;
616
617 uint32_t plane = anv_image_aspect_to_plane(image->aspects, aspect);
618
619 if (image->planes[plane].shadow_surface.isl.size > 0 &&
620 final_layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) {
621 /* This surface is a linear compressed image with a tiled shadow surface
622 * for texturing. The client is about to use it in READ_ONLY_OPTIMAL so
623 * we need to ensure the shadow copy is up-to-date.
624 */
625 assert(image->aspects == VK_IMAGE_ASPECT_COLOR_BIT);
626 assert(image->planes[plane].surface.isl.tiling == ISL_TILING_LINEAR);
627 assert(image->planes[plane].shadow_surface.isl.tiling != ISL_TILING_LINEAR);
628 assert(isl_format_is_compressed(image->planes[plane].surface.isl.format));
629 assert(plane == 0);
630 anv_image_copy_to_shadow(cmd_buffer, image,
631 base_level, level_count,
632 base_layer, layer_count);
633 }
634
635 if (base_layer >= anv_image_aux_layers(image, aspect, base_level))
636 return;
637
638 /* A transition of a 3D subresource works on all slices at a time. */
639 if (image->type == VK_IMAGE_TYPE_3D) {
640 base_layer = 0;
641 layer_count = anv_minify(image->extent.depth, base_level);
642 }
643
644 /* We're interested in the subresource range subset that has aux data. */
645 level_count = MIN2(level_count, anv_image_aux_levels(image, aspect) - base_level);
646 layer_count = MIN2(layer_count,
647 anv_image_aux_layers(image, aspect, base_level) - base_layer);
648 last_level_num = base_level + level_count;
649
650 /* Record whether or not the layout is undefined. Pre-initialized images
651 * with auxiliary buffers have a non-linear layout and are thus undefined.
652 */
653 assert(image->tiling == VK_IMAGE_TILING_OPTIMAL);
654 const bool undef_layout = initial_layout == VK_IMAGE_LAYOUT_UNDEFINED ||
655 initial_layout == VK_IMAGE_LAYOUT_PREINITIALIZED;
656
657 /* Do preparatory work before the resolve operation or return early if no
658 * resolve is actually needed.
659 */
660 if (undef_layout) {
661 /* A subresource in the undefined layout may have been aliased and
662 * populated with any arrangement of bits. Therefore, we must initialize
663 * the related aux buffer and clear buffer entry with desirable values.
664 *
665 * Initialize the relevant clear buffer entries.
666 */
667 for (unsigned level = base_level; level < last_level_num; level++)
668 init_fast_clear_state_entry(cmd_buffer, image, aspect, level);
669
670 /* Initialize the aux buffers to enable correct rendering. This operation
671 * requires up to two steps: one to rid the aux buffer of data that may
672 * cause GPU hangs, and another to ensure that writes done without aux
673 * will be visible to reads done with aux.
674 *
675 * Having an aux buffer with invalid data is possible for CCS buffers
676 * SKL+ and for MCS buffers with certain sample counts (2x and 8x). One
677 * easy way to get to a valid state is to fast-clear the specified range.
678 *
679 * Even for MCS buffers that have sample counts that don't require
680 * certain bits to be reserved (4x and 8x), we're unsure if the hardware
681 * will be okay with the sample mappings given by the undefined buffer.
682 * We don't have any data to show that this is a problem, but we want to
683 * avoid causing difficult-to-debug problems.
684 */
685 if ((GEN_GEN >= 9 && image->samples == 1) || image->samples > 1) {
686 if (image->samples == 4 || image->samples == 16) {
687 anv_perf_warn(cmd_buffer->device->instance, image,
688 "Doing a potentially unnecessary fast-clear to "
689 "define an MCS buffer.");
690 }
691
692 anv_image_fast_clear(cmd_buffer, image, aspect,
693 base_level, level_count,
694 base_layer, layer_count);
695 }
696 /* At this point, some elements of the CCS buffer may have the fast-clear
697 * bit-arrangement. As the user writes to a subresource, we need to have
698 * the associated CCS elements enter the ambiguated state. This enables
699 * reads (implicit or explicit) to reflect the user-written data instead
700 * of the clear color. The only time such elements will not change their
701 * state as described above, is in a final layout that doesn't have CCS
702 * enabled. In this case, we must force the associated CCS buffers of the
703 * specified range to enter the ambiguated state in advance.
704 */
705 if (image->samples == 1 &&
706 image->planes[plane].aux_usage != ISL_AUX_USAGE_CCS_E &&
707 final_layout != VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL) {
708 /* The CCS_D buffer may not be enabled in the final layout. Continue
709 * executing this function to perform a resolve.
710 */
711 anv_perf_warn(cmd_buffer->device->instance, image,
712 "Performing an additional resolve for CCS_D layout "
713 "transition. Consider always leaving it on or "
714 "performing an ambiguation pass.");
715 } else {
716 /* Writes in the final layout will be aware of the auxiliary buffer.
717 * In addition, the clear buffer entries and the auxiliary buffers
718 * have been populated with values that will result in correct
719 * rendering.
720 */
721 return;
722 }
723 } else if (initial_layout != VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL) {
724 /* Resolves are only necessary if the subresource may contain blocks
725 * fast-cleared to values unsupported in other layouts. This only occurs
726 * if the initial layout is COLOR_ATTACHMENT_OPTIMAL.
727 */
728 return;
729 } else if (image->samples > 1) {
730 /* MCS buffers don't need resolving. */
731 return;
732 }
733
734 /* Perform a resolve to synchronize data between the main and aux buffer.
735 * Before we begin, we must satisfy the cache flushing requirement specified
736 * in the Sky Lake PRM Vol. 7, "MCS Buffer for Render Target(s)":
737 *
738 * Any transition from any value in {Clear, Render, Resolve} to a
739 * different value in {Clear, Render, Resolve} requires end of pipe
740 * synchronization.
741 *
742 * We perform a flush of the write cache before and after the clear and
743 * resolve operations to meet this requirement.
744 *
745 * Unlike other drawing, fast clear operations are not properly
746 * synchronized. The first PIPE_CONTROL here likely ensures that the
747 * contents of the previous render or clear hit the render target before we
748 * resolve and the second likely ensures that the resolve is complete before
749 * we do any more rendering or clearing.
750 */
751 cmd_buffer->state.pending_pipe_bits |=
752 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | ANV_PIPE_CS_STALL_BIT;
753
754 for (uint32_t level = base_level; level < last_level_num; level++) {
755
756 /* The number of layers changes at each 3D miplevel. */
757 if (image->type == VK_IMAGE_TYPE_3D) {
758 layer_count = MIN2(layer_count, anv_image_aux_layers(image, aspect, level));
759 }
760
761 genX(load_needs_resolve_predicate)(cmd_buffer, image, aspect, level);
762
763 anv_ccs_resolve(cmd_buffer, image, aspect, level, base_layer, layer_count,
764 image->planes[plane].aux_usage == ISL_AUX_USAGE_CCS_E ?
765 BLORP_FAST_CLEAR_OP_RESOLVE_PARTIAL :
766 BLORP_FAST_CLEAR_OP_RESOLVE_FULL);
767
768 genX(set_image_needs_resolve)(cmd_buffer, image, aspect, level, false);
769 }
770
771 cmd_buffer->state.pending_pipe_bits |=
772 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | ANV_PIPE_CS_STALL_BIT;
773 }
774
775 /**
776 * Setup anv_cmd_state::attachments for vkCmdBeginRenderPass.
777 */
778 static VkResult
779 genX(cmd_buffer_setup_attachments)(struct anv_cmd_buffer *cmd_buffer,
780 struct anv_render_pass *pass,
781 const VkRenderPassBeginInfo *begin)
782 {
783 const struct isl_device *isl_dev = &cmd_buffer->device->isl_dev;
784 struct anv_cmd_state *state = &cmd_buffer->state;
785
786 vk_free(&cmd_buffer->pool->alloc, state->attachments);
787
788 if (pass->attachment_count > 0) {
789 state->attachments = vk_alloc(&cmd_buffer->pool->alloc,
790 pass->attachment_count *
791 sizeof(state->attachments[0]),
792 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
793 if (state->attachments == NULL) {
794 /* Propagate VK_ERROR_OUT_OF_HOST_MEMORY to vkEndCommandBuffer */
795 return anv_batch_set_error(&cmd_buffer->batch,
796 VK_ERROR_OUT_OF_HOST_MEMORY);
797 }
798 } else {
799 state->attachments = NULL;
800 }
801
802 /* Reserve one for the NULL state. */
803 unsigned num_states = 1;
804 for (uint32_t i = 0; i < pass->attachment_count; ++i) {
805 if (vk_format_is_color(pass->attachments[i].format))
806 num_states++;
807
808 if (need_input_attachment_state(&pass->attachments[i]))
809 num_states++;
810 }
811
812 const uint32_t ss_stride = align_u32(isl_dev->ss.size, isl_dev->ss.align);
813 state->render_pass_states =
814 anv_state_stream_alloc(&cmd_buffer->surface_state_stream,
815 num_states * ss_stride, isl_dev->ss.align);
816
817 struct anv_state next_state = state->render_pass_states;
818 next_state.alloc_size = isl_dev->ss.size;
819
820 state->null_surface_state = next_state;
821 next_state.offset += ss_stride;
822 next_state.map += ss_stride;
823
824 for (uint32_t i = 0; i < pass->attachment_count; ++i) {
825 if (vk_format_is_color(pass->attachments[i].format)) {
826 state->attachments[i].color.state = next_state;
827 next_state.offset += ss_stride;
828 next_state.map += ss_stride;
829 }
830
831 if (need_input_attachment_state(&pass->attachments[i])) {
832 state->attachments[i].input.state = next_state;
833 next_state.offset += ss_stride;
834 next_state.map += ss_stride;
835 }
836 }
837 assert(next_state.offset == state->render_pass_states.offset +
838 state->render_pass_states.alloc_size);
839
840 if (begin) {
841 ANV_FROM_HANDLE(anv_framebuffer, framebuffer, begin->framebuffer);
842 assert(pass->attachment_count == framebuffer->attachment_count);
843
844 isl_null_fill_state(isl_dev, state->null_surface_state.map,
845 isl_extent3d(framebuffer->width,
846 framebuffer->height,
847 framebuffer->layers));
848
849 for (uint32_t i = 0; i < pass->attachment_count; ++i) {
850 struct anv_render_pass_attachment *att = &pass->attachments[i];
851 VkImageAspectFlags att_aspects = vk_format_aspects(att->format);
852 VkImageAspectFlags clear_aspects = 0;
853
854 if (att_aspects & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV) {
855 /* color attachment */
856 if (att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
857 clear_aspects |= VK_IMAGE_ASPECT_COLOR_BIT;
858 }
859 } else {
860 /* depthstencil attachment */
861 if ((att_aspects & VK_IMAGE_ASPECT_DEPTH_BIT) &&
862 att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
863 clear_aspects |= VK_IMAGE_ASPECT_DEPTH_BIT;
864 }
865 if ((att_aspects & VK_IMAGE_ASPECT_STENCIL_BIT) &&
866 att->stencil_load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
867 clear_aspects |= VK_IMAGE_ASPECT_STENCIL_BIT;
868 }
869 }
870
871 state->attachments[i].current_layout = att->initial_layout;
872 state->attachments[i].pending_clear_aspects = clear_aspects;
873 if (clear_aspects)
874 state->attachments[i].clear_value = begin->pClearValues[i];
875
876 struct anv_image_view *iview = framebuffer->attachments[i];
877 anv_assert(iview->vk_format == att->format);
878 anv_assert(iview->n_planes == 1);
879
880 union isl_color_value clear_color = { .u32 = { 0, } };
881 if (att_aspects & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV) {
882 assert(att_aspects == VK_IMAGE_ASPECT_COLOR_BIT);
883 color_attachment_compute_aux_usage(cmd_buffer->device,
884 state, i, begin->renderArea,
885 &clear_color);
886
887 anv_image_fill_surface_state(cmd_buffer->device,
888 iview->image,
889 VK_IMAGE_ASPECT_COLOR_BIT,
890 &iview->planes[0].isl,
891 ISL_SURF_USAGE_RENDER_TARGET_BIT,
892 state->attachments[i].aux_usage,
893 &clear_color,
894 0,
895 &state->attachments[i].color,
896 NULL);
897
898 add_image_view_relocs(cmd_buffer, iview, 0,
899 state->attachments[i].color);
900 } else {
901 /* This field will be initialized after the first subpass
902 * transition.
903 */
904 state->attachments[i].aux_usage = ISL_AUX_USAGE_NONE;
905
906 state->attachments[i].input_aux_usage = ISL_AUX_USAGE_NONE;
907 }
908
909 if (need_input_attachment_state(&pass->attachments[i])) {
910 anv_image_fill_surface_state(cmd_buffer->device,
911 iview->image,
912 VK_IMAGE_ASPECT_COLOR_BIT,
913 &iview->planes[0].isl,
914 ISL_SURF_USAGE_TEXTURE_BIT,
915 state->attachments[i].input_aux_usage,
916 &clear_color,
917 0,
918 &state->attachments[i].input,
919 NULL);
920
921 add_image_view_relocs(cmd_buffer, iview, 0,
922 state->attachments[i].input);
923 }
924 }
925 }
926
927 return VK_SUCCESS;
928 }
929
930 VkResult
931 genX(BeginCommandBuffer)(
932 VkCommandBuffer commandBuffer,
933 const VkCommandBufferBeginInfo* pBeginInfo)
934 {
935 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
936
937 /* If this is the first vkBeginCommandBuffer, we must *initialize* the
938 * command buffer's state. Otherwise, we must *reset* its state. In both
939 * cases we reset it.
940 *
941 * From the Vulkan 1.0 spec:
942 *
943 * If a command buffer is in the executable state and the command buffer
944 * was allocated from a command pool with the
945 * VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT flag set, then
946 * vkBeginCommandBuffer implicitly resets the command buffer, behaving
947 * as if vkResetCommandBuffer had been called with
948 * VK_COMMAND_BUFFER_RESET_RELEASE_RESOURCES_BIT not set. It then puts
949 * the command buffer in the recording state.
950 */
951 anv_cmd_buffer_reset(cmd_buffer);
952
953 cmd_buffer->usage_flags = pBeginInfo->flags;
954
955 assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY ||
956 !(cmd_buffer->usage_flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT));
957
958 genX(cmd_buffer_emit_state_base_address)(cmd_buffer);
959
960 /* We sometimes store vertex data in the dynamic state buffer for blorp
961 * operations and our dynamic state stream may re-use data from previous
962 * command buffers. In order to prevent stale cache data, we flush the VF
963 * cache. We could do this on every blorp call but that's not really
964 * needed as all of the data will get written by the CPU prior to the GPU
965 * executing anything. The chances are fairly high that they will use
966 * blorp at least once per primary command buffer so it shouldn't be
967 * wasted.
968 */
969 if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY)
970 cmd_buffer->state.pending_pipe_bits |= ANV_PIPE_VF_CACHE_INVALIDATE_BIT;
971
972 VkResult result = VK_SUCCESS;
973 if (cmd_buffer->usage_flags &
974 VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
975 assert(pBeginInfo->pInheritanceInfo);
976 cmd_buffer->state.pass =
977 anv_render_pass_from_handle(pBeginInfo->pInheritanceInfo->renderPass);
978 cmd_buffer->state.subpass =
979 &cmd_buffer->state.pass->subpasses[pBeginInfo->pInheritanceInfo->subpass];
980
981 /* This is optional in the inheritance info. */
982 cmd_buffer->state.framebuffer =
983 anv_framebuffer_from_handle(pBeginInfo->pInheritanceInfo->framebuffer);
984
985 result = genX(cmd_buffer_setup_attachments)(cmd_buffer,
986 cmd_buffer->state.pass, NULL);
987
988 /* Record that HiZ is enabled if we can. */
989 if (cmd_buffer->state.framebuffer) {
990 const struct anv_image_view * const iview =
991 anv_cmd_buffer_get_depth_stencil_view(cmd_buffer);
992
993 if (iview) {
994 VkImageLayout layout =
995 cmd_buffer->state.subpass->depth_stencil_attachment.layout;
996
997 enum isl_aux_usage aux_usage =
998 anv_layout_to_aux_usage(&cmd_buffer->device->info, iview->image,
999 VK_IMAGE_ASPECT_DEPTH_BIT, layout);
1000
1001 cmd_buffer->state.hiz_enabled = aux_usage == ISL_AUX_USAGE_HIZ;
1002 }
1003 }
1004
1005 cmd_buffer->state.dirty |= ANV_CMD_DIRTY_RENDER_TARGETS;
1006 }
1007
1008 return result;
1009 }
1010
1011 VkResult
1012 genX(EndCommandBuffer)(
1013 VkCommandBuffer commandBuffer)
1014 {
1015 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1016
1017 if (anv_batch_has_error(&cmd_buffer->batch))
1018 return cmd_buffer->batch.status;
1019
1020 /* We want every command buffer to start with the PMA fix in a known state,
1021 * so we disable it at the end of the command buffer.
1022 */
1023 genX(cmd_buffer_enable_pma_fix)(cmd_buffer, false);
1024
1025 genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
1026
1027 anv_cmd_buffer_end_batch_buffer(cmd_buffer);
1028
1029 return VK_SUCCESS;
1030 }
1031
1032 void
1033 genX(CmdExecuteCommands)(
1034 VkCommandBuffer commandBuffer,
1035 uint32_t commandBufferCount,
1036 const VkCommandBuffer* pCmdBuffers)
1037 {
1038 ANV_FROM_HANDLE(anv_cmd_buffer, primary, commandBuffer);
1039
1040 assert(primary->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
1041
1042 if (anv_batch_has_error(&primary->batch))
1043 return;
1044
1045 /* The secondary command buffers will assume that the PMA fix is disabled
1046 * when they begin executing. Make sure this is true.
1047 */
1048 genX(cmd_buffer_enable_pma_fix)(primary, false);
1049
1050 /* The secondary command buffer doesn't know which textures etc. have been
1051 * flushed prior to their execution. Apply those flushes now.
1052 */
1053 genX(cmd_buffer_apply_pipe_flushes)(primary);
1054
1055 for (uint32_t i = 0; i < commandBufferCount; i++) {
1056 ANV_FROM_HANDLE(anv_cmd_buffer, secondary, pCmdBuffers[i]);
1057
1058 assert(secondary->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
1059 assert(!anv_batch_has_error(&secondary->batch));
1060
1061 if (secondary->usage_flags &
1062 VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
1063 /* If we're continuing a render pass from the primary, we need to
1064 * copy the surface states for the current subpass into the storage
1065 * we allocated for them in BeginCommandBuffer.
1066 */
1067 struct anv_bo *ss_bo =
1068 &primary->device->surface_state_pool.block_pool.bo;
1069 struct anv_state src_state = primary->state.render_pass_states;
1070 struct anv_state dst_state = secondary->state.render_pass_states;
1071 assert(src_state.alloc_size == dst_state.alloc_size);
1072
1073 genX(cmd_buffer_so_memcpy)(primary, ss_bo, dst_state.offset,
1074 ss_bo, src_state.offset,
1075 src_state.alloc_size);
1076 }
1077
1078 anv_cmd_buffer_add_secondary(primary, secondary);
1079 }
1080
1081 /* The secondary may have selected a different pipeline (3D or compute) and
1082 * may have changed the current L3$ configuration. Reset our tracking
1083 * variables to invalid values to ensure that we re-emit these in the case
1084 * where we do any draws or compute dispatches from the primary after the
1085 * secondary has returned.
1086 */
1087 primary->state.current_pipeline = UINT32_MAX;
1088 primary->state.current_l3_config = NULL;
1089
1090 /* Each of the secondary command buffers will use its own state base
1091 * address. We need to re-emit state base address for the primary after
1092 * all of the secondaries are done.
1093 *
1094 * TODO: Maybe we want to make this a dirty bit to avoid extra state base
1095 * address calls?
1096 */
1097 genX(cmd_buffer_emit_state_base_address)(primary);
1098 }
1099
1100 #define IVB_L3SQCREG1_SQGHPCI_DEFAULT 0x00730000
1101 #define VLV_L3SQCREG1_SQGHPCI_DEFAULT 0x00d30000
1102 #define HSW_L3SQCREG1_SQGHPCI_DEFAULT 0x00610000
1103
1104 /**
1105 * Program the hardware to use the specified L3 configuration.
1106 */
1107 void
1108 genX(cmd_buffer_config_l3)(struct anv_cmd_buffer *cmd_buffer,
1109 const struct gen_l3_config *cfg)
1110 {
1111 assert(cfg);
1112 if (cfg == cmd_buffer->state.current_l3_config)
1113 return;
1114
1115 if (unlikely(INTEL_DEBUG & DEBUG_L3)) {
1116 intel_logd("L3 config transition: ");
1117 gen_dump_l3_config(cfg, stderr);
1118 }
1119
1120 const bool has_slm = cfg->n[GEN_L3P_SLM];
1121
1122 /* According to the hardware docs, the L3 partitioning can only be changed
1123 * while the pipeline is completely drained and the caches are flushed,
1124 * which involves a first PIPE_CONTROL flush which stalls the pipeline...
1125 */
1126 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
1127 pc.DCFlushEnable = true;
1128 pc.PostSyncOperation = NoWrite;
1129 pc.CommandStreamerStallEnable = true;
1130 }
1131
1132 /* ...followed by a second pipelined PIPE_CONTROL that initiates
1133 * invalidation of the relevant caches. Note that because RO invalidation
1134 * happens at the top of the pipeline (i.e. right away as the PIPE_CONTROL
1135 * command is processed by the CS) we cannot combine it with the previous
1136 * stalling flush as the hardware documentation suggests, because that
1137 * would cause the CS to stall on previous rendering *after* RO
1138 * invalidation and wouldn't prevent the RO caches from being polluted by
1139 * concurrent rendering before the stall completes. This intentionally
1140 * doesn't implement the SKL+ hardware workaround suggesting to enable CS
1141 * stall on PIPE_CONTROLs with the texture cache invalidation bit set for
1142 * GPGPU workloads because the previous and subsequent PIPE_CONTROLs
1143 * already guarantee that there is no concurrent GPGPU kernel execution
1144 * (see SKL HSD 2132585).
1145 */
1146 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
1147 pc.TextureCacheInvalidationEnable = true;
1148 pc.ConstantCacheInvalidationEnable = true;
1149 pc.InstructionCacheInvalidateEnable = true;
1150 pc.StateCacheInvalidationEnable = true;
1151 pc.PostSyncOperation = NoWrite;
1152 }
1153
1154 /* Now send a third stalling flush to make sure that invalidation is
1155 * complete when the L3 configuration registers are modified.
1156 */
1157 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
1158 pc.DCFlushEnable = true;
1159 pc.PostSyncOperation = NoWrite;
1160 pc.CommandStreamerStallEnable = true;
1161 }
1162
1163 #if GEN_GEN >= 8
1164
1165 assert(!cfg->n[GEN_L3P_IS] && !cfg->n[GEN_L3P_C] && !cfg->n[GEN_L3P_T]);
1166
1167 uint32_t l3cr;
1168 anv_pack_struct(&l3cr, GENX(L3CNTLREG),
1169 .SLMEnable = has_slm,
1170 .URBAllocation = cfg->n[GEN_L3P_URB],
1171 .ROAllocation = cfg->n[GEN_L3P_RO],
1172 .DCAllocation = cfg->n[GEN_L3P_DC],
1173 .AllAllocation = cfg->n[GEN_L3P_ALL]);
1174
1175 /* Set up the L3 partitioning. */
1176 emit_lri(&cmd_buffer->batch, GENX(L3CNTLREG_num), l3cr);
1177
1178 #else
1179
1180 const bool has_dc = cfg->n[GEN_L3P_DC] || cfg->n[GEN_L3P_ALL];
1181 const bool has_is = cfg->n[GEN_L3P_IS] || cfg->n[GEN_L3P_RO] ||
1182 cfg->n[GEN_L3P_ALL];
1183 const bool has_c = cfg->n[GEN_L3P_C] || cfg->n[GEN_L3P_RO] ||
1184 cfg->n[GEN_L3P_ALL];
1185 const bool has_t = cfg->n[GEN_L3P_T] || cfg->n[GEN_L3P_RO] ||
1186 cfg->n[GEN_L3P_ALL];
1187
1188 assert(!cfg->n[GEN_L3P_ALL]);
1189
1190 /* When enabled SLM only uses a portion of the L3 on half of the banks,
1191 * the matching space on the remaining banks has to be allocated to a
1192 * client (URB for all validated configurations) set to the
1193 * lower-bandwidth 2-bank address hashing mode.
1194 */
1195 const struct gen_device_info *devinfo = &cmd_buffer->device->info;
1196 const bool urb_low_bw = has_slm && !devinfo->is_baytrail;
1197 assert(!urb_low_bw || cfg->n[GEN_L3P_URB] == cfg->n[GEN_L3P_SLM]);
1198
1199 /* Minimum number of ways that can be allocated to the URB. */
1200 MAYBE_UNUSED const unsigned n0_urb = devinfo->is_baytrail ? 32 : 0;
1201 assert(cfg->n[GEN_L3P_URB] >= n0_urb);
1202
1203 uint32_t l3sqcr1, l3cr2, l3cr3;
1204 anv_pack_struct(&l3sqcr1, GENX(L3SQCREG1),
1205 .ConvertDC_UC = !has_dc,
1206 .ConvertIS_UC = !has_is,
1207 .ConvertC_UC = !has_c,
1208 .ConvertT_UC = !has_t);
1209 l3sqcr1 |=
1210 GEN_IS_HASWELL ? HSW_L3SQCREG1_SQGHPCI_DEFAULT :
1211 devinfo->is_baytrail ? VLV_L3SQCREG1_SQGHPCI_DEFAULT :
1212 IVB_L3SQCREG1_SQGHPCI_DEFAULT;
1213
1214 anv_pack_struct(&l3cr2, GENX(L3CNTLREG2),
1215 .SLMEnable = has_slm,
1216 .URBLowBandwidth = urb_low_bw,
1217 .URBAllocation = cfg->n[GEN_L3P_URB] - n0_urb,
1218 #if !GEN_IS_HASWELL
1219 .ALLAllocation = cfg->n[GEN_L3P_ALL],
1220 #endif
1221 .ROAllocation = cfg->n[GEN_L3P_RO],
1222 .DCAllocation = cfg->n[GEN_L3P_DC]);
1223
1224 anv_pack_struct(&l3cr3, GENX(L3CNTLREG3),
1225 .ISAllocation = cfg->n[GEN_L3P_IS],
1226 .ISLowBandwidth = 0,
1227 .CAllocation = cfg->n[GEN_L3P_C],
1228 .CLowBandwidth = 0,
1229 .TAllocation = cfg->n[GEN_L3P_T],
1230 .TLowBandwidth = 0);
1231
1232 /* Set up the L3 partitioning. */
1233 emit_lri(&cmd_buffer->batch, GENX(L3SQCREG1_num), l3sqcr1);
1234 emit_lri(&cmd_buffer->batch, GENX(L3CNTLREG2_num), l3cr2);
1235 emit_lri(&cmd_buffer->batch, GENX(L3CNTLREG3_num), l3cr3);
1236
1237 #if GEN_IS_HASWELL
1238 if (cmd_buffer->device->instance->physicalDevice.cmd_parser_version >= 4) {
1239 /* Enable L3 atomics on HSW if we have a DC partition, otherwise keep
1240 * them disabled to avoid crashing the system hard.
1241 */
1242 uint32_t scratch1, chicken3;
1243 anv_pack_struct(&scratch1, GENX(SCRATCH1),
1244 .L3AtomicDisable = !has_dc);
1245 anv_pack_struct(&chicken3, GENX(CHICKEN3),
1246 .L3AtomicDisableMask = true,
1247 .L3AtomicDisable = !has_dc);
1248 emit_lri(&cmd_buffer->batch, GENX(SCRATCH1_num), scratch1);
1249 emit_lri(&cmd_buffer->batch, GENX(CHICKEN3_num), chicken3);
1250 }
1251 #endif
1252
1253 #endif
1254
1255 cmd_buffer->state.current_l3_config = cfg;
1256 }
1257
1258 void
1259 genX(cmd_buffer_apply_pipe_flushes)(struct anv_cmd_buffer *cmd_buffer)
1260 {
1261 enum anv_pipe_bits bits = cmd_buffer->state.pending_pipe_bits;
1262
1263 /* Flushes are pipelined while invalidations are handled immediately.
1264 * Therefore, if we're flushing anything then we need to schedule a stall
1265 * before any invalidations can happen.
1266 */
1267 if (bits & ANV_PIPE_FLUSH_BITS)
1268 bits |= ANV_PIPE_NEEDS_CS_STALL_BIT;
1269
1270 /* If we're going to do an invalidate and we have a pending CS stall that
1271 * has yet to be resolved, we do the CS stall now.
1272 */
1273 if ((bits & ANV_PIPE_INVALIDATE_BITS) &&
1274 (bits & ANV_PIPE_NEEDS_CS_STALL_BIT)) {
1275 bits |= ANV_PIPE_CS_STALL_BIT;
1276 bits &= ~ANV_PIPE_NEEDS_CS_STALL_BIT;
1277 }
1278
1279 if (bits & (ANV_PIPE_FLUSH_BITS | ANV_PIPE_CS_STALL_BIT)) {
1280 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pipe) {
1281 pipe.DepthCacheFlushEnable = bits & ANV_PIPE_DEPTH_CACHE_FLUSH_BIT;
1282 pipe.DCFlushEnable = bits & ANV_PIPE_DATA_CACHE_FLUSH_BIT;
1283 pipe.RenderTargetCacheFlushEnable =
1284 bits & ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT;
1285
1286 pipe.DepthStallEnable = bits & ANV_PIPE_DEPTH_STALL_BIT;
1287 pipe.CommandStreamerStallEnable = bits & ANV_PIPE_CS_STALL_BIT;
1288 pipe.StallAtPixelScoreboard = bits & ANV_PIPE_STALL_AT_SCOREBOARD_BIT;
1289
1290 /*
1291 * According to the Broadwell documentation, any PIPE_CONTROL with the
1292 * "Command Streamer Stall" bit set must also have another bit set,
1293 * with five different options:
1294 *
1295 * - Render Target Cache Flush
1296 * - Depth Cache Flush
1297 * - Stall at Pixel Scoreboard
1298 * - Post-Sync Operation
1299 * - Depth Stall
1300 * - DC Flush Enable
1301 *
1302 * I chose "Stall at Pixel Scoreboard" since that's what we use in
1303 * mesa and it seems to work fine. The choice is fairly arbitrary.
1304 */
1305 if ((bits & ANV_PIPE_CS_STALL_BIT) &&
1306 !(bits & (ANV_PIPE_FLUSH_BITS | ANV_PIPE_DEPTH_STALL_BIT |
1307 ANV_PIPE_STALL_AT_SCOREBOARD_BIT)))
1308 pipe.StallAtPixelScoreboard = true;
1309 }
1310
1311 bits &= ~(ANV_PIPE_FLUSH_BITS | ANV_PIPE_CS_STALL_BIT);
1312 }
1313
1314 if (bits & ANV_PIPE_INVALIDATE_BITS) {
1315 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pipe) {
1316 pipe.StateCacheInvalidationEnable =
1317 bits & ANV_PIPE_STATE_CACHE_INVALIDATE_BIT;
1318 pipe.ConstantCacheInvalidationEnable =
1319 bits & ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT;
1320 pipe.VFCacheInvalidationEnable =
1321 bits & ANV_PIPE_VF_CACHE_INVALIDATE_BIT;
1322 pipe.TextureCacheInvalidationEnable =
1323 bits & ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT;
1324 pipe.InstructionCacheInvalidateEnable =
1325 bits & ANV_PIPE_INSTRUCTION_CACHE_INVALIDATE_BIT;
1326 }
1327
1328 bits &= ~ANV_PIPE_INVALIDATE_BITS;
1329 }
1330
1331 cmd_buffer->state.pending_pipe_bits = bits;
1332 }
1333
1334 void genX(CmdPipelineBarrier)(
1335 VkCommandBuffer commandBuffer,
1336 VkPipelineStageFlags srcStageMask,
1337 VkPipelineStageFlags destStageMask,
1338 VkBool32 byRegion,
1339 uint32_t memoryBarrierCount,
1340 const VkMemoryBarrier* pMemoryBarriers,
1341 uint32_t bufferMemoryBarrierCount,
1342 const VkBufferMemoryBarrier* pBufferMemoryBarriers,
1343 uint32_t imageMemoryBarrierCount,
1344 const VkImageMemoryBarrier* pImageMemoryBarriers)
1345 {
1346 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1347
1348 /* XXX: Right now, we're really dumb and just flush whatever categories
1349 * the app asks for. One of these days we may make this a bit better
1350 * but right now that's all the hardware allows for in most areas.
1351 */
1352 VkAccessFlags src_flags = 0;
1353 VkAccessFlags dst_flags = 0;
1354
1355 for (uint32_t i = 0; i < memoryBarrierCount; i++) {
1356 src_flags |= pMemoryBarriers[i].srcAccessMask;
1357 dst_flags |= pMemoryBarriers[i].dstAccessMask;
1358 }
1359
1360 for (uint32_t i = 0; i < bufferMemoryBarrierCount; i++) {
1361 src_flags |= pBufferMemoryBarriers[i].srcAccessMask;
1362 dst_flags |= pBufferMemoryBarriers[i].dstAccessMask;
1363 }
1364
1365 for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
1366 src_flags |= pImageMemoryBarriers[i].srcAccessMask;
1367 dst_flags |= pImageMemoryBarriers[i].dstAccessMask;
1368 ANV_FROM_HANDLE(anv_image, image, pImageMemoryBarriers[i].image);
1369 const VkImageSubresourceRange *range =
1370 &pImageMemoryBarriers[i].subresourceRange;
1371
1372 if (range->aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) {
1373 transition_depth_buffer(cmd_buffer, image,
1374 pImageMemoryBarriers[i].oldLayout,
1375 pImageMemoryBarriers[i].newLayout);
1376 } else if (range->aspectMask & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV) {
1377 VkImageAspectFlags color_aspects =
1378 anv_image_expand_aspects(image, range->aspectMask);
1379 uint32_t aspect_bit;
1380
1381 anv_foreach_image_aspect_bit(aspect_bit, image, color_aspects) {
1382 transition_color_buffer(cmd_buffer, image, 1UL << aspect_bit,
1383 range->baseMipLevel,
1384 anv_get_levelCount(image, range),
1385 range->baseArrayLayer,
1386 anv_get_layerCount(image, range),
1387 pImageMemoryBarriers[i].oldLayout,
1388 pImageMemoryBarriers[i].newLayout);
1389 }
1390 }
1391 }
1392
1393 cmd_buffer->state.pending_pipe_bits |=
1394 anv_pipe_flush_bits_for_access_flags(src_flags) |
1395 anv_pipe_invalidate_bits_for_access_flags(dst_flags);
1396 }
1397
1398 static void
1399 cmd_buffer_alloc_push_constants(struct anv_cmd_buffer *cmd_buffer)
1400 {
1401 VkShaderStageFlags stages = cmd_buffer->state.pipeline->active_stages;
1402
1403 /* In order to avoid thrash, we assume that vertex and fragment stages
1404 * always exist. In the rare case where one is missing *and* the other
1405 * uses push concstants, this may be suboptimal. However, avoiding stalls
1406 * seems more important.
1407 */
1408 stages |= VK_SHADER_STAGE_FRAGMENT_BIT | VK_SHADER_STAGE_VERTEX_BIT;
1409
1410 if (stages == cmd_buffer->state.push_constant_stages)
1411 return;
1412
1413 #if GEN_GEN >= 8
1414 const unsigned push_constant_kb = 32;
1415 #elif GEN_IS_HASWELL
1416 const unsigned push_constant_kb = cmd_buffer->device->info.gt == 3 ? 32 : 16;
1417 #else
1418 const unsigned push_constant_kb = 16;
1419 #endif
1420
1421 const unsigned num_stages =
1422 _mesa_bitcount(stages & VK_SHADER_STAGE_ALL_GRAPHICS);
1423 unsigned size_per_stage = push_constant_kb / num_stages;
1424
1425 /* Broadwell+ and Haswell gt3 require that the push constant sizes be in
1426 * units of 2KB. Incidentally, these are the same platforms that have
1427 * 32KB worth of push constant space.
1428 */
1429 if (push_constant_kb == 32)
1430 size_per_stage &= ~1u;
1431
1432 uint32_t kb_used = 0;
1433 for (int i = MESA_SHADER_VERTEX; i < MESA_SHADER_FRAGMENT; i++) {
1434 unsigned push_size = (stages & (1 << i)) ? size_per_stage : 0;
1435 anv_batch_emit(&cmd_buffer->batch,
1436 GENX(3DSTATE_PUSH_CONSTANT_ALLOC_VS), alloc) {
1437 alloc._3DCommandSubOpcode = 18 + i;
1438 alloc.ConstantBufferOffset = (push_size > 0) ? kb_used : 0;
1439 alloc.ConstantBufferSize = push_size;
1440 }
1441 kb_used += push_size;
1442 }
1443
1444 anv_batch_emit(&cmd_buffer->batch,
1445 GENX(3DSTATE_PUSH_CONSTANT_ALLOC_PS), alloc) {
1446 alloc.ConstantBufferOffset = kb_used;
1447 alloc.ConstantBufferSize = push_constant_kb - kb_used;
1448 }
1449
1450 cmd_buffer->state.push_constant_stages = stages;
1451
1452 /* From the BDW PRM for 3DSTATE_PUSH_CONSTANT_ALLOC_VS:
1453 *
1454 * "The 3DSTATE_CONSTANT_VS must be reprogrammed prior to
1455 * the next 3DPRIMITIVE command after programming the
1456 * 3DSTATE_PUSH_CONSTANT_ALLOC_VS"
1457 *
1458 * Since 3DSTATE_PUSH_CONSTANT_ALLOC_VS is programmed as part of
1459 * pipeline setup, we need to dirty push constants.
1460 */
1461 cmd_buffer->state.push_constants_dirty |= VK_SHADER_STAGE_ALL_GRAPHICS;
1462 }
1463
1464 static const struct anv_descriptor *
1465 anv_descriptor_for_binding(const struct anv_cmd_buffer *cmd_buffer,
1466 const struct anv_pipeline_binding *binding)
1467 {
1468 assert(binding->set < MAX_SETS);
1469 const struct anv_descriptor_set *set =
1470 cmd_buffer->state.descriptors[binding->set];
1471 const uint32_t offset =
1472 set->layout->binding[binding->binding].descriptor_index;
1473 return &set->descriptors[offset + binding->index];
1474 }
1475
1476 static uint32_t
1477 dynamic_offset_for_binding(const struct anv_cmd_buffer *cmd_buffer,
1478 const struct anv_pipeline *pipeline,
1479 const struct anv_pipeline_binding *binding)
1480 {
1481 assert(binding->set < MAX_SETS);
1482 const struct anv_descriptor_set *set =
1483 cmd_buffer->state.descriptors[binding->set];
1484
1485 uint32_t dynamic_offset_idx =
1486 pipeline->layout->set[binding->set].dynamic_offset_start +
1487 set->layout->binding[binding->binding].dynamic_offset_index +
1488 binding->index;
1489
1490 return cmd_buffer->state.dynamic_offsets[dynamic_offset_idx];
1491 }
1492
1493 static VkResult
1494 emit_binding_table(struct anv_cmd_buffer *cmd_buffer,
1495 gl_shader_stage stage,
1496 struct anv_state *bt_state)
1497 {
1498 struct anv_subpass *subpass = cmd_buffer->state.subpass;
1499 struct anv_pipeline *pipeline;
1500 uint32_t bias, state_offset;
1501
1502 switch (stage) {
1503 case MESA_SHADER_COMPUTE:
1504 pipeline = cmd_buffer->state.compute_pipeline;
1505 bias = 1;
1506 break;
1507 default:
1508 pipeline = cmd_buffer->state.pipeline;
1509 bias = 0;
1510 break;
1511 }
1512
1513 if (!anv_pipeline_has_stage(pipeline, stage)) {
1514 *bt_state = (struct anv_state) { 0, };
1515 return VK_SUCCESS;
1516 }
1517
1518 struct anv_pipeline_bind_map *map = &pipeline->shaders[stage]->bind_map;
1519 if (bias + map->surface_count == 0) {
1520 *bt_state = (struct anv_state) { 0, };
1521 return VK_SUCCESS;
1522 }
1523
1524 *bt_state = anv_cmd_buffer_alloc_binding_table(cmd_buffer,
1525 bias + map->surface_count,
1526 &state_offset);
1527 uint32_t *bt_map = bt_state->map;
1528
1529 if (bt_state->map == NULL)
1530 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
1531
1532 if (stage == MESA_SHADER_COMPUTE &&
1533 get_cs_prog_data(pipeline)->uses_num_work_groups) {
1534 struct anv_bo *bo = cmd_buffer->state.num_workgroups_bo;
1535 uint32_t bo_offset = cmd_buffer->state.num_workgroups_offset;
1536
1537 struct anv_state surface_state;
1538 surface_state =
1539 anv_cmd_buffer_alloc_surface_state(cmd_buffer);
1540
1541 const enum isl_format format =
1542 anv_isl_format_for_descriptor_type(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
1543 anv_fill_buffer_surface_state(cmd_buffer->device, surface_state,
1544 format, bo_offset, 12, 1);
1545
1546 bt_map[0] = surface_state.offset + state_offset;
1547 add_surface_state_reloc(cmd_buffer, surface_state, bo, bo_offset);
1548 }
1549
1550 if (map->surface_count == 0)
1551 goto out;
1552
1553 if (map->image_count > 0) {
1554 VkResult result =
1555 anv_cmd_buffer_ensure_push_constant_field(cmd_buffer, stage, images);
1556 if (result != VK_SUCCESS)
1557 return result;
1558
1559 cmd_buffer->state.push_constants_dirty |= 1 << stage;
1560 }
1561
1562 uint32_t image = 0;
1563 for (uint32_t s = 0; s < map->surface_count; s++) {
1564 struct anv_pipeline_binding *binding = &map->surface_to_descriptor[s];
1565
1566 struct anv_state surface_state;
1567
1568 if (binding->set == ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS) {
1569 /* Color attachment binding */
1570 assert(stage == MESA_SHADER_FRAGMENT);
1571 assert(binding->binding == 0);
1572 if (binding->index < subpass->color_count) {
1573 const unsigned att =
1574 subpass->color_attachments[binding->index].attachment;
1575
1576 /* From the Vulkan 1.0.46 spec:
1577 *
1578 * "If any color or depth/stencil attachments are
1579 * VK_ATTACHMENT_UNUSED, then no writes occur for those
1580 * attachments."
1581 */
1582 if (att == VK_ATTACHMENT_UNUSED) {
1583 surface_state = cmd_buffer->state.null_surface_state;
1584 } else {
1585 surface_state = cmd_buffer->state.attachments[att].color.state;
1586 }
1587 } else {
1588 surface_state = cmd_buffer->state.null_surface_state;
1589 }
1590
1591 bt_map[bias + s] = surface_state.offset + state_offset;
1592 continue;
1593 }
1594
1595 const struct anv_descriptor *desc =
1596 anv_descriptor_for_binding(cmd_buffer, binding);
1597
1598 switch (desc->type) {
1599 case VK_DESCRIPTOR_TYPE_SAMPLER:
1600 /* Nothing for us to do here */
1601 continue;
1602
1603 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1604 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE: {
1605 struct anv_surface_state sstate =
1606 (desc->layout == VK_IMAGE_LAYOUT_GENERAL) ?
1607 desc->image_view->planes[binding->plane].general_sampler_surface_state :
1608 desc->image_view->planes[binding->plane].optimal_sampler_surface_state;
1609 surface_state = sstate.state;
1610 assert(surface_state.alloc_size);
1611 add_image_view_relocs(cmd_buffer, desc->image_view,
1612 binding->plane, sstate);
1613 break;
1614 }
1615 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
1616 assert(stage == MESA_SHADER_FRAGMENT);
1617 if ((desc->image_view->aspect_mask & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV) == 0) {
1618 /* For depth and stencil input attachments, we treat it like any
1619 * old texture that a user may have bound.
1620 */
1621 struct anv_surface_state sstate =
1622 (desc->layout == VK_IMAGE_LAYOUT_GENERAL) ?
1623 desc->image_view->planes[binding->plane].general_sampler_surface_state :
1624 desc->image_view->planes[binding->plane].optimal_sampler_surface_state;
1625 surface_state = sstate.state;
1626 assert(surface_state.alloc_size);
1627 add_image_view_relocs(cmd_buffer, desc->image_view,
1628 binding->plane, sstate);
1629 } else {
1630 /* For color input attachments, we create the surface state at
1631 * vkBeginRenderPass time so that we can include aux and clear
1632 * color information.
1633 */
1634 assert(binding->input_attachment_index < subpass->input_count);
1635 const unsigned subpass_att = binding->input_attachment_index;
1636 const unsigned att = subpass->input_attachments[subpass_att].attachment;
1637 surface_state = cmd_buffer->state.attachments[att].input.state;
1638 }
1639 break;
1640
1641 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: {
1642 struct anv_surface_state sstate = (binding->write_only)
1643 ? desc->image_view->planes[binding->plane].writeonly_storage_surface_state
1644 : desc->image_view->planes[binding->plane].storage_surface_state;
1645 surface_state = sstate.state;
1646 assert(surface_state.alloc_size);
1647 add_image_view_relocs(cmd_buffer, desc->image_view,
1648 binding->plane, sstate);
1649
1650 struct brw_image_param *image_param =
1651 &cmd_buffer->state.push_constants[stage]->images[image++];
1652
1653 *image_param = desc->image_view->planes[binding->plane].storage_image_param;
1654 image_param->surface_idx = bias + s;
1655 break;
1656 }
1657
1658 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1659 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1660 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1661 surface_state = desc->buffer_view->surface_state;
1662 assert(surface_state.alloc_size);
1663 add_surface_state_reloc(cmd_buffer, surface_state,
1664 desc->buffer_view->bo,
1665 desc->buffer_view->offset);
1666 break;
1667
1668 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1669 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
1670 /* Compute the offset within the buffer */
1671 uint32_t dynamic_offset =
1672 dynamic_offset_for_binding(cmd_buffer, pipeline, binding);
1673 uint64_t offset = desc->offset + dynamic_offset;
1674 /* Clamp to the buffer size */
1675 offset = MIN2(offset, desc->buffer->size);
1676 /* Clamp the range to the buffer size */
1677 uint32_t range = MIN2(desc->range, desc->buffer->size - offset);
1678
1679 surface_state =
1680 anv_state_stream_alloc(&cmd_buffer->surface_state_stream, 64, 64);
1681 enum isl_format format =
1682 anv_isl_format_for_descriptor_type(desc->type);
1683
1684 anv_fill_buffer_surface_state(cmd_buffer->device, surface_state,
1685 format, offset, range, 1);
1686 add_surface_state_reloc(cmd_buffer, surface_state,
1687 desc->buffer->bo,
1688 desc->buffer->offset + offset);
1689 break;
1690 }
1691
1692 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1693 surface_state = (binding->write_only)
1694 ? desc->buffer_view->writeonly_storage_surface_state
1695 : desc->buffer_view->storage_surface_state;
1696 assert(surface_state.alloc_size);
1697 add_surface_state_reloc(cmd_buffer, surface_state,
1698 desc->buffer_view->bo,
1699 desc->buffer_view->offset);
1700
1701 struct brw_image_param *image_param =
1702 &cmd_buffer->state.push_constants[stage]->images[image++];
1703
1704 *image_param = desc->buffer_view->storage_image_param;
1705 image_param->surface_idx = bias + s;
1706 break;
1707
1708 default:
1709 assert(!"Invalid descriptor type");
1710 continue;
1711 }
1712
1713 bt_map[bias + s] = surface_state.offset + state_offset;
1714 }
1715 assert(image == map->image_count);
1716
1717 out:
1718 anv_state_flush(cmd_buffer->device, *bt_state);
1719
1720 return VK_SUCCESS;
1721 }
1722
1723 static VkResult
1724 emit_samplers(struct anv_cmd_buffer *cmd_buffer,
1725 gl_shader_stage stage,
1726 struct anv_state *state)
1727 {
1728 struct anv_pipeline *pipeline;
1729
1730 if (stage == MESA_SHADER_COMPUTE)
1731 pipeline = cmd_buffer->state.compute_pipeline;
1732 else
1733 pipeline = cmd_buffer->state.pipeline;
1734
1735 if (!anv_pipeline_has_stage(pipeline, stage)) {
1736 *state = (struct anv_state) { 0, };
1737 return VK_SUCCESS;
1738 }
1739
1740 struct anv_pipeline_bind_map *map = &pipeline->shaders[stage]->bind_map;
1741 if (map->sampler_count == 0) {
1742 *state = (struct anv_state) { 0, };
1743 return VK_SUCCESS;
1744 }
1745
1746 uint32_t size = map->sampler_count * 16;
1747 *state = anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, size, 32);
1748
1749 if (state->map == NULL)
1750 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
1751
1752 for (uint32_t s = 0; s < map->sampler_count; s++) {
1753 struct anv_pipeline_binding *binding = &map->sampler_to_descriptor[s];
1754 struct anv_descriptor_set *set =
1755 cmd_buffer->state.descriptors[binding->set];
1756 uint32_t offset = set->layout->binding[binding->binding].descriptor_index;
1757 struct anv_descriptor *desc = &set->descriptors[offset + binding->index];
1758
1759 if (desc->type != VK_DESCRIPTOR_TYPE_SAMPLER &&
1760 desc->type != VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
1761 continue;
1762
1763 struct anv_sampler *sampler = desc->sampler;
1764
1765 /* This can happen if we have an unfilled slot since TYPE_SAMPLER
1766 * happens to be zero.
1767 */
1768 if (sampler == NULL)
1769 continue;
1770
1771 memcpy(state->map + (s * 16),
1772 sampler->state[binding->plane], sizeof(sampler->state[0]));
1773 }
1774
1775 anv_state_flush(cmd_buffer->device, *state);
1776
1777 return VK_SUCCESS;
1778 }
1779
1780 static uint32_t
1781 flush_descriptor_sets(struct anv_cmd_buffer *cmd_buffer)
1782 {
1783 VkShaderStageFlags dirty = cmd_buffer->state.descriptors_dirty &
1784 cmd_buffer->state.pipeline->active_stages;
1785
1786 VkResult result = VK_SUCCESS;
1787 anv_foreach_stage(s, dirty) {
1788 result = emit_samplers(cmd_buffer, s, &cmd_buffer->state.samplers[s]);
1789 if (result != VK_SUCCESS)
1790 break;
1791 result = emit_binding_table(cmd_buffer, s,
1792 &cmd_buffer->state.binding_tables[s]);
1793 if (result != VK_SUCCESS)
1794 break;
1795 }
1796
1797 if (result != VK_SUCCESS) {
1798 assert(result == VK_ERROR_OUT_OF_DEVICE_MEMORY);
1799
1800 result = anv_cmd_buffer_new_binding_table_block(cmd_buffer);
1801 if (result != VK_SUCCESS)
1802 return 0;
1803
1804 /* Re-emit state base addresses so we get the new surface state base
1805 * address before we start emitting binding tables etc.
1806 */
1807 genX(cmd_buffer_emit_state_base_address)(cmd_buffer);
1808
1809 /* Re-emit all active binding tables */
1810 dirty |= cmd_buffer->state.pipeline->active_stages;
1811 anv_foreach_stage(s, dirty) {
1812 result = emit_samplers(cmd_buffer, s, &cmd_buffer->state.samplers[s]);
1813 if (result != VK_SUCCESS) {
1814 anv_batch_set_error(&cmd_buffer->batch, result);
1815 return 0;
1816 }
1817 result = emit_binding_table(cmd_buffer, s,
1818 &cmd_buffer->state.binding_tables[s]);
1819 if (result != VK_SUCCESS) {
1820 anv_batch_set_error(&cmd_buffer->batch, result);
1821 return 0;
1822 }
1823 }
1824 }
1825
1826 cmd_buffer->state.descriptors_dirty &= ~dirty;
1827
1828 return dirty;
1829 }
1830
1831 static void
1832 cmd_buffer_emit_descriptor_pointers(struct anv_cmd_buffer *cmd_buffer,
1833 uint32_t stages)
1834 {
1835 static const uint32_t sampler_state_opcodes[] = {
1836 [MESA_SHADER_VERTEX] = 43,
1837 [MESA_SHADER_TESS_CTRL] = 44, /* HS */
1838 [MESA_SHADER_TESS_EVAL] = 45, /* DS */
1839 [MESA_SHADER_GEOMETRY] = 46,
1840 [MESA_SHADER_FRAGMENT] = 47,
1841 [MESA_SHADER_COMPUTE] = 0,
1842 };
1843
1844 static const uint32_t binding_table_opcodes[] = {
1845 [MESA_SHADER_VERTEX] = 38,
1846 [MESA_SHADER_TESS_CTRL] = 39,
1847 [MESA_SHADER_TESS_EVAL] = 40,
1848 [MESA_SHADER_GEOMETRY] = 41,
1849 [MESA_SHADER_FRAGMENT] = 42,
1850 [MESA_SHADER_COMPUTE] = 0,
1851 };
1852
1853 anv_foreach_stage(s, stages) {
1854 assert(s < ARRAY_SIZE(binding_table_opcodes));
1855 assert(binding_table_opcodes[s] > 0);
1856
1857 if (cmd_buffer->state.samplers[s].alloc_size > 0) {
1858 anv_batch_emit(&cmd_buffer->batch,
1859 GENX(3DSTATE_SAMPLER_STATE_POINTERS_VS), ssp) {
1860 ssp._3DCommandSubOpcode = sampler_state_opcodes[s];
1861 ssp.PointertoVSSamplerState = cmd_buffer->state.samplers[s].offset;
1862 }
1863 }
1864
1865 /* Always emit binding table pointers if we're asked to, since on SKL
1866 * this is what flushes push constants. */
1867 anv_batch_emit(&cmd_buffer->batch,
1868 GENX(3DSTATE_BINDING_TABLE_POINTERS_VS), btp) {
1869 btp._3DCommandSubOpcode = binding_table_opcodes[s];
1870 btp.PointertoVSBindingTable = cmd_buffer->state.binding_tables[s].offset;
1871 }
1872 }
1873 }
1874
1875 static void
1876 cmd_buffer_flush_push_constants(struct anv_cmd_buffer *cmd_buffer,
1877 VkShaderStageFlags dirty_stages)
1878 {
1879 const struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
1880
1881 static const uint32_t push_constant_opcodes[] = {
1882 [MESA_SHADER_VERTEX] = 21,
1883 [MESA_SHADER_TESS_CTRL] = 25, /* HS */
1884 [MESA_SHADER_TESS_EVAL] = 26, /* DS */
1885 [MESA_SHADER_GEOMETRY] = 22,
1886 [MESA_SHADER_FRAGMENT] = 23,
1887 [MESA_SHADER_COMPUTE] = 0,
1888 };
1889
1890 VkShaderStageFlags flushed = 0;
1891
1892 anv_foreach_stage(stage, dirty_stages) {
1893 assert(stage < ARRAY_SIZE(push_constant_opcodes));
1894 assert(push_constant_opcodes[stage] > 0);
1895
1896 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CONSTANT_VS), c) {
1897 c._3DCommandSubOpcode = push_constant_opcodes[stage];
1898
1899 if (anv_pipeline_has_stage(pipeline, stage)) {
1900 #if GEN_GEN >= 8 || GEN_IS_HASWELL
1901 const struct brw_stage_prog_data *prog_data =
1902 pipeline->shaders[stage]->prog_data;
1903 const struct anv_pipeline_bind_map *bind_map =
1904 &pipeline->shaders[stage]->bind_map;
1905
1906 /* The Skylake PRM contains the following restriction:
1907 *
1908 * "The driver must ensure The following case does not occur
1909 * without a flush to the 3D engine: 3DSTATE_CONSTANT_* with
1910 * buffer 3 read length equal to zero committed followed by a
1911 * 3DSTATE_CONSTANT_* with buffer 0 read length not equal to
1912 * zero committed."
1913 *
1914 * To avoid this, we program the buffers in the highest slots.
1915 * This way, slot 0 is only used if slot 3 is also used.
1916 */
1917 int n = 3;
1918
1919 for (int i = 3; i >= 0; i--) {
1920 const struct brw_ubo_range *range = &prog_data->ubo_ranges[i];
1921 if (range->length == 0)
1922 continue;
1923
1924 const unsigned surface =
1925 prog_data->binding_table.ubo_start + range->block;
1926
1927 assert(surface <= bind_map->surface_count);
1928 const struct anv_pipeline_binding *binding =
1929 &bind_map->surface_to_descriptor[surface];
1930
1931 const struct anv_descriptor *desc =
1932 anv_descriptor_for_binding(cmd_buffer, binding);
1933
1934 struct anv_address read_addr;
1935 uint32_t read_len;
1936 if (desc->type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER) {
1937 read_len = MIN2(range->length,
1938 DIV_ROUND_UP(desc->buffer_view->range, 32) - range->start);
1939 read_addr = (struct anv_address) {
1940 .bo = desc->buffer_view->bo,
1941 .offset = desc->buffer_view->offset +
1942 range->start * 32,
1943 };
1944 } else {
1945 assert(desc->type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC);
1946
1947 uint32_t dynamic_offset =
1948 dynamic_offset_for_binding(cmd_buffer, pipeline, binding);
1949 uint32_t buf_offset =
1950 MIN2(desc->offset + dynamic_offset, desc->buffer->size);
1951 uint32_t buf_range =
1952 MIN2(desc->range, desc->buffer->size - buf_offset);
1953
1954 read_len = MIN2(range->length,
1955 DIV_ROUND_UP(buf_range, 32) - range->start);
1956 read_addr = (struct anv_address) {
1957 .bo = desc->buffer->bo,
1958 .offset = desc->buffer->offset + buf_offset +
1959 range->start * 32,
1960 };
1961 }
1962
1963 if (read_len > 0) {
1964 c.ConstantBody.Buffer[n] = read_addr;
1965 c.ConstantBody.ReadLength[n] = read_len;
1966 n--;
1967 }
1968 }
1969
1970 struct anv_state state =
1971 anv_cmd_buffer_push_constants(cmd_buffer, stage);
1972
1973 if (state.alloc_size > 0) {
1974 c.ConstantBody.Buffer[n] = (struct anv_address) {
1975 .bo = &cmd_buffer->device->dynamic_state_pool.block_pool.bo,
1976 .offset = state.offset,
1977 };
1978 c.ConstantBody.ReadLength[n] =
1979 DIV_ROUND_UP(state.alloc_size, 32);
1980 }
1981 #else
1982 /* For Ivy Bridge, the push constants packets have a different
1983 * rule that would require us to iterate in the other direction
1984 * and possibly mess around with dynamic state base address.
1985 * Don't bother; just emit regular push constants at n = 0.
1986 */
1987 struct anv_state state =
1988 anv_cmd_buffer_push_constants(cmd_buffer, stage);
1989
1990 if (state.alloc_size > 0) {
1991 c.ConstantBody.Buffer[0].offset = state.offset,
1992 c.ConstantBody.ReadLength[0] =
1993 DIV_ROUND_UP(state.alloc_size, 32);
1994 }
1995 #endif
1996 }
1997 }
1998
1999 flushed |= mesa_to_vk_shader_stage(stage);
2000 }
2001
2002 cmd_buffer->state.push_constants_dirty &= ~flushed;
2003 }
2004
2005 void
2006 genX(cmd_buffer_flush_state)(struct anv_cmd_buffer *cmd_buffer)
2007 {
2008 struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
2009 uint32_t *p;
2010
2011 uint32_t vb_emit = cmd_buffer->state.vb_dirty & pipeline->vb_used;
2012
2013 assert((pipeline->active_stages & VK_SHADER_STAGE_COMPUTE_BIT) == 0);
2014
2015 genX(cmd_buffer_config_l3)(cmd_buffer, pipeline->urb.l3_config);
2016
2017 genX(flush_pipeline_select_3d)(cmd_buffer);
2018
2019 if (vb_emit) {
2020 const uint32_t num_buffers = __builtin_popcount(vb_emit);
2021 const uint32_t num_dwords = 1 + num_buffers * 4;
2022
2023 p = anv_batch_emitn(&cmd_buffer->batch, num_dwords,
2024 GENX(3DSTATE_VERTEX_BUFFERS));
2025 uint32_t vb, i = 0;
2026 for_each_bit(vb, vb_emit) {
2027 struct anv_buffer *buffer = cmd_buffer->state.vertex_bindings[vb].buffer;
2028 uint32_t offset = cmd_buffer->state.vertex_bindings[vb].offset;
2029
2030 struct GENX(VERTEX_BUFFER_STATE) state = {
2031 .VertexBufferIndex = vb,
2032
2033 #if GEN_GEN >= 8
2034 .MemoryObjectControlState = GENX(MOCS),
2035 #else
2036 .BufferAccessType = pipeline->instancing_enable[vb] ? INSTANCEDATA : VERTEXDATA,
2037 /* Our implementation of VK_KHR_multiview uses instancing to draw
2038 * the different views. If the client asks for instancing, we
2039 * need to use the Instance Data Step Rate to ensure that we
2040 * repeat the client's per-instance data once for each view.
2041 */
2042 .InstanceDataStepRate = anv_subpass_view_count(pipeline->subpass),
2043 .VertexBufferMemoryObjectControlState = GENX(MOCS),
2044 #endif
2045
2046 .AddressModifyEnable = true,
2047 .BufferPitch = pipeline->binding_stride[vb],
2048 .BufferStartingAddress = { buffer->bo, buffer->offset + offset },
2049
2050 #if GEN_GEN >= 8
2051 .BufferSize = buffer->size - offset
2052 #else
2053 .EndAddress = { buffer->bo, buffer->offset + buffer->size - 1},
2054 #endif
2055 };
2056
2057 GENX(VERTEX_BUFFER_STATE_pack)(&cmd_buffer->batch, &p[1 + i * 4], &state);
2058 i++;
2059 }
2060 }
2061
2062 cmd_buffer->state.vb_dirty &= ~vb_emit;
2063
2064 if (cmd_buffer->state.dirty & ANV_CMD_DIRTY_PIPELINE) {
2065 anv_batch_emit_batch(&cmd_buffer->batch, &pipeline->batch);
2066
2067 /* The exact descriptor layout is pulled from the pipeline, so we need
2068 * to re-emit binding tables on every pipeline change.
2069 */
2070 cmd_buffer->state.descriptors_dirty |= pipeline->active_stages;
2071
2072 /* If the pipeline changed, we may need to re-allocate push constant
2073 * space in the URB.
2074 */
2075 cmd_buffer_alloc_push_constants(cmd_buffer);
2076 }
2077
2078 #if GEN_GEN <= 7
2079 if (cmd_buffer->state.descriptors_dirty & VK_SHADER_STAGE_VERTEX_BIT ||
2080 cmd_buffer->state.push_constants_dirty & VK_SHADER_STAGE_VERTEX_BIT) {
2081 /* From the IVB PRM Vol. 2, Part 1, Section 3.2.1:
2082 *
2083 * "A PIPE_CONTROL with Post-Sync Operation set to 1h and a depth
2084 * stall needs to be sent just prior to any 3DSTATE_VS,
2085 * 3DSTATE_URB_VS, 3DSTATE_CONSTANT_VS,
2086 * 3DSTATE_BINDING_TABLE_POINTER_VS,
2087 * 3DSTATE_SAMPLER_STATE_POINTER_VS command. Only one
2088 * PIPE_CONTROL needs to be sent before any combination of VS
2089 * associated 3DSTATE."
2090 */
2091 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
2092 pc.DepthStallEnable = true;
2093 pc.PostSyncOperation = WriteImmediateData;
2094 pc.Address =
2095 (struct anv_address) { &cmd_buffer->device->workaround_bo, 0 };
2096 }
2097 }
2098 #endif
2099
2100 /* Render targets live in the same binding table as fragment descriptors */
2101 if (cmd_buffer->state.dirty & ANV_CMD_DIRTY_RENDER_TARGETS)
2102 cmd_buffer->state.descriptors_dirty |= VK_SHADER_STAGE_FRAGMENT_BIT;
2103
2104 /* We emit the binding tables and sampler tables first, then emit push
2105 * constants and then finally emit binding table and sampler table
2106 * pointers. It has to happen in this order, since emitting the binding
2107 * tables may change the push constants (in case of storage images). After
2108 * emitting push constants, on SKL+ we have to emit the corresponding
2109 * 3DSTATE_BINDING_TABLE_POINTER_* for the push constants to take effect.
2110 */
2111 uint32_t dirty = 0;
2112 if (cmd_buffer->state.descriptors_dirty)
2113 dirty = flush_descriptor_sets(cmd_buffer);
2114
2115 if (dirty || cmd_buffer->state.push_constants_dirty) {
2116 /* Because we're pushing UBOs, we have to push whenever either
2117 * descriptors or push constants is dirty.
2118 */
2119 dirty |= cmd_buffer->state.push_constants_dirty;
2120 dirty &= ANV_STAGE_MASK & VK_SHADER_STAGE_ALL_GRAPHICS;
2121 cmd_buffer_flush_push_constants(cmd_buffer, dirty);
2122 }
2123
2124 if (dirty)
2125 cmd_buffer_emit_descriptor_pointers(cmd_buffer, dirty);
2126
2127 if (cmd_buffer->state.dirty & ANV_CMD_DIRTY_DYNAMIC_VIEWPORT)
2128 gen8_cmd_buffer_emit_viewport(cmd_buffer);
2129
2130 if (cmd_buffer->state.dirty & (ANV_CMD_DIRTY_DYNAMIC_VIEWPORT |
2131 ANV_CMD_DIRTY_PIPELINE)) {
2132 gen8_cmd_buffer_emit_depth_viewport(cmd_buffer,
2133 pipeline->depth_clamp_enable);
2134 }
2135
2136 if (cmd_buffer->state.dirty & ANV_CMD_DIRTY_DYNAMIC_SCISSOR)
2137 gen7_cmd_buffer_emit_scissor(cmd_buffer);
2138
2139 genX(cmd_buffer_flush_dynamic_state)(cmd_buffer);
2140
2141 genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
2142 }
2143
2144 static void
2145 emit_vertex_bo(struct anv_cmd_buffer *cmd_buffer,
2146 struct anv_bo *bo, uint32_t offset,
2147 uint32_t size, uint32_t index)
2148 {
2149 uint32_t *p = anv_batch_emitn(&cmd_buffer->batch, 5,
2150 GENX(3DSTATE_VERTEX_BUFFERS));
2151
2152 GENX(VERTEX_BUFFER_STATE_pack)(&cmd_buffer->batch, p + 1,
2153 &(struct GENX(VERTEX_BUFFER_STATE)) {
2154 .VertexBufferIndex = index,
2155 .AddressModifyEnable = true,
2156 .BufferPitch = 0,
2157 #if (GEN_GEN >= 8)
2158 .MemoryObjectControlState = GENX(MOCS),
2159 .BufferStartingAddress = { bo, offset },
2160 .BufferSize = size
2161 #else
2162 .VertexBufferMemoryObjectControlState = GENX(MOCS),
2163 .BufferStartingAddress = { bo, offset },
2164 .EndAddress = { bo, offset + size },
2165 #endif
2166 });
2167 }
2168
2169 static void
2170 emit_base_vertex_instance_bo(struct anv_cmd_buffer *cmd_buffer,
2171 struct anv_bo *bo, uint32_t offset)
2172 {
2173 emit_vertex_bo(cmd_buffer, bo, offset, 8, ANV_SVGS_VB_INDEX);
2174 }
2175
2176 static void
2177 emit_base_vertex_instance(struct anv_cmd_buffer *cmd_buffer,
2178 uint32_t base_vertex, uint32_t base_instance)
2179 {
2180 struct anv_state id_state =
2181 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, 8, 4);
2182
2183 ((uint32_t *)id_state.map)[0] = base_vertex;
2184 ((uint32_t *)id_state.map)[1] = base_instance;
2185
2186 anv_state_flush(cmd_buffer->device, id_state);
2187
2188 emit_base_vertex_instance_bo(cmd_buffer,
2189 &cmd_buffer->device->dynamic_state_pool.block_pool.bo, id_state.offset);
2190 }
2191
2192 static void
2193 emit_draw_index(struct anv_cmd_buffer *cmd_buffer, uint32_t draw_index)
2194 {
2195 struct anv_state state =
2196 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, 4, 4);
2197
2198 ((uint32_t *)state.map)[0] = draw_index;
2199
2200 anv_state_flush(cmd_buffer->device, state);
2201
2202 emit_vertex_bo(cmd_buffer,
2203 &cmd_buffer->device->dynamic_state_pool.block_pool.bo,
2204 state.offset, 4, ANV_DRAWID_VB_INDEX);
2205 }
2206
2207 void genX(CmdDraw)(
2208 VkCommandBuffer commandBuffer,
2209 uint32_t vertexCount,
2210 uint32_t instanceCount,
2211 uint32_t firstVertex,
2212 uint32_t firstInstance)
2213 {
2214 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
2215 struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
2216 const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
2217
2218 if (anv_batch_has_error(&cmd_buffer->batch))
2219 return;
2220
2221 genX(cmd_buffer_flush_state)(cmd_buffer);
2222
2223 if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
2224 emit_base_vertex_instance(cmd_buffer, firstVertex, firstInstance);
2225 if (vs_prog_data->uses_drawid)
2226 emit_draw_index(cmd_buffer, 0);
2227
2228 /* Our implementation of VK_KHR_multiview uses instancing to draw the
2229 * different views. We need to multiply instanceCount by the view count.
2230 */
2231 instanceCount *= anv_subpass_view_count(cmd_buffer->state.subpass);
2232
2233 anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
2234 prim.VertexAccessType = SEQUENTIAL;
2235 prim.PrimitiveTopologyType = pipeline->topology;
2236 prim.VertexCountPerInstance = vertexCount;
2237 prim.StartVertexLocation = firstVertex;
2238 prim.InstanceCount = instanceCount;
2239 prim.StartInstanceLocation = firstInstance;
2240 prim.BaseVertexLocation = 0;
2241 }
2242 }
2243
2244 void genX(CmdDrawIndexed)(
2245 VkCommandBuffer commandBuffer,
2246 uint32_t indexCount,
2247 uint32_t instanceCount,
2248 uint32_t firstIndex,
2249 int32_t vertexOffset,
2250 uint32_t firstInstance)
2251 {
2252 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
2253 struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
2254 const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
2255
2256 if (anv_batch_has_error(&cmd_buffer->batch))
2257 return;
2258
2259 genX(cmd_buffer_flush_state)(cmd_buffer);
2260
2261 if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
2262 emit_base_vertex_instance(cmd_buffer, vertexOffset, firstInstance);
2263 if (vs_prog_data->uses_drawid)
2264 emit_draw_index(cmd_buffer, 0);
2265
2266 /* Our implementation of VK_KHR_multiview uses instancing to draw the
2267 * different views. We need to multiply instanceCount by the view count.
2268 */
2269 instanceCount *= anv_subpass_view_count(cmd_buffer->state.subpass);
2270
2271 anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
2272 prim.VertexAccessType = RANDOM;
2273 prim.PrimitiveTopologyType = pipeline->topology;
2274 prim.VertexCountPerInstance = indexCount;
2275 prim.StartVertexLocation = firstIndex;
2276 prim.InstanceCount = instanceCount;
2277 prim.StartInstanceLocation = firstInstance;
2278 prim.BaseVertexLocation = vertexOffset;
2279 }
2280 }
2281
2282 /* Auto-Draw / Indirect Registers */
2283 #define GEN7_3DPRIM_END_OFFSET 0x2420
2284 #define GEN7_3DPRIM_START_VERTEX 0x2430
2285 #define GEN7_3DPRIM_VERTEX_COUNT 0x2434
2286 #define GEN7_3DPRIM_INSTANCE_COUNT 0x2438
2287 #define GEN7_3DPRIM_START_INSTANCE 0x243C
2288 #define GEN7_3DPRIM_BASE_VERTEX 0x2440
2289
2290 /* MI_MATH only exists on Haswell+ */
2291 #if GEN_IS_HASWELL || GEN_GEN >= 8
2292
2293 static uint32_t
2294 mi_alu(uint32_t opcode, uint32_t op1, uint32_t op2)
2295 {
2296 struct GENX(MI_MATH_ALU_INSTRUCTION) instr = {
2297 .ALUOpcode = opcode,
2298 .Operand1 = op1,
2299 .Operand2 = op2,
2300 };
2301
2302 uint32_t dw;
2303 GENX(MI_MATH_ALU_INSTRUCTION_pack)(NULL, &dw, &instr);
2304
2305 return dw;
2306 }
2307
2308 #define CS_GPR(n) (0x2600 + (n) * 8)
2309
2310 /* Emit dwords to multiply GPR0 by N */
2311 static void
2312 build_alu_multiply_gpr0(uint32_t *dw, unsigned *dw_count, uint32_t N)
2313 {
2314 VK_OUTARRAY_MAKE(out, dw, dw_count);
2315
2316 #define append_alu(opcode, operand1, operand2) \
2317 vk_outarray_append(&out, alu_dw) *alu_dw = mi_alu(opcode, operand1, operand2)
2318
2319 assert(N > 0);
2320 unsigned top_bit = 31 - __builtin_clz(N);
2321 for (int i = top_bit - 1; i >= 0; i--) {
2322 /* We get our initial data in GPR0 and we write the final data out to
2323 * GPR0 but we use GPR1 as our scratch register.
2324 */
2325 unsigned src_reg = i == top_bit - 1 ? MI_ALU_REG0 : MI_ALU_REG1;
2326 unsigned dst_reg = i == 0 ? MI_ALU_REG0 : MI_ALU_REG1;
2327
2328 /* Shift the current value left by 1 */
2329 append_alu(MI_ALU_LOAD, MI_ALU_SRCA, src_reg);
2330 append_alu(MI_ALU_LOAD, MI_ALU_SRCB, src_reg);
2331 append_alu(MI_ALU_ADD, 0, 0);
2332
2333 if (N & (1 << i)) {
2334 /* Store ACCU to R1 and add R0 to R1 */
2335 append_alu(MI_ALU_STORE, MI_ALU_REG1, MI_ALU_ACCU);
2336 append_alu(MI_ALU_LOAD, MI_ALU_SRCA, MI_ALU_REG0);
2337 append_alu(MI_ALU_LOAD, MI_ALU_SRCB, MI_ALU_REG1);
2338 append_alu(MI_ALU_ADD, 0, 0);
2339 }
2340
2341 append_alu(MI_ALU_STORE, dst_reg, MI_ALU_ACCU);
2342 }
2343
2344 #undef append_alu
2345 }
2346
2347 static void
2348 emit_mul_gpr0(struct anv_batch *batch, uint32_t N)
2349 {
2350 uint32_t num_dwords;
2351 build_alu_multiply_gpr0(NULL, &num_dwords, N);
2352
2353 uint32_t *dw = anv_batch_emitn(batch, 1 + num_dwords, GENX(MI_MATH));
2354 build_alu_multiply_gpr0(dw + 1, &num_dwords, N);
2355 }
2356
2357 #endif /* GEN_IS_HASWELL || GEN_GEN >= 8 */
2358
2359 static void
2360 load_indirect_parameters(struct anv_cmd_buffer *cmd_buffer,
2361 struct anv_buffer *buffer, uint64_t offset,
2362 bool indexed)
2363 {
2364 struct anv_batch *batch = &cmd_buffer->batch;
2365 struct anv_bo *bo = buffer->bo;
2366 uint32_t bo_offset = buffer->offset + offset;
2367
2368 emit_lrm(batch, GEN7_3DPRIM_VERTEX_COUNT, bo, bo_offset);
2369
2370 unsigned view_count = anv_subpass_view_count(cmd_buffer->state.subpass);
2371 if (view_count > 1) {
2372 #if GEN_IS_HASWELL || GEN_GEN >= 8
2373 emit_lrm(batch, CS_GPR(0), bo, bo_offset + 4);
2374 emit_mul_gpr0(batch, view_count);
2375 emit_lrr(batch, GEN7_3DPRIM_INSTANCE_COUNT, CS_GPR(0));
2376 #else
2377 anv_finishme("Multiview + indirect draw requires MI_MATH; "
2378 "MI_MATH is not supported on Ivy Bridge");
2379 emit_lrm(batch, GEN7_3DPRIM_INSTANCE_COUNT, bo, bo_offset + 4);
2380 #endif
2381 } else {
2382 emit_lrm(batch, GEN7_3DPRIM_INSTANCE_COUNT, bo, bo_offset + 4);
2383 }
2384
2385 emit_lrm(batch, GEN7_3DPRIM_START_VERTEX, bo, bo_offset + 8);
2386
2387 if (indexed) {
2388 emit_lrm(batch, GEN7_3DPRIM_BASE_VERTEX, bo, bo_offset + 12);
2389 emit_lrm(batch, GEN7_3DPRIM_START_INSTANCE, bo, bo_offset + 16);
2390 } else {
2391 emit_lrm(batch, GEN7_3DPRIM_START_INSTANCE, bo, bo_offset + 12);
2392 emit_lri(batch, GEN7_3DPRIM_BASE_VERTEX, 0);
2393 }
2394 }
2395
2396 void genX(CmdDrawIndirect)(
2397 VkCommandBuffer commandBuffer,
2398 VkBuffer _buffer,
2399 VkDeviceSize offset,
2400 uint32_t drawCount,
2401 uint32_t stride)
2402 {
2403 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
2404 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
2405 struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
2406 const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
2407
2408 if (anv_batch_has_error(&cmd_buffer->batch))
2409 return;
2410
2411 genX(cmd_buffer_flush_state)(cmd_buffer);
2412
2413 for (uint32_t i = 0; i < drawCount; i++) {
2414 struct anv_bo *bo = buffer->bo;
2415 uint32_t bo_offset = buffer->offset + offset;
2416
2417 if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
2418 emit_base_vertex_instance_bo(cmd_buffer, bo, bo_offset + 8);
2419 if (vs_prog_data->uses_drawid)
2420 emit_draw_index(cmd_buffer, i);
2421
2422 load_indirect_parameters(cmd_buffer, buffer, offset, false);
2423
2424 anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
2425 prim.IndirectParameterEnable = true;
2426 prim.VertexAccessType = SEQUENTIAL;
2427 prim.PrimitiveTopologyType = pipeline->topology;
2428 }
2429
2430 offset += stride;
2431 }
2432 }
2433
2434 void genX(CmdDrawIndexedIndirect)(
2435 VkCommandBuffer commandBuffer,
2436 VkBuffer _buffer,
2437 VkDeviceSize offset,
2438 uint32_t drawCount,
2439 uint32_t stride)
2440 {
2441 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
2442 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
2443 struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
2444 const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
2445
2446 if (anv_batch_has_error(&cmd_buffer->batch))
2447 return;
2448
2449 genX(cmd_buffer_flush_state)(cmd_buffer);
2450
2451 for (uint32_t i = 0; i < drawCount; i++) {
2452 struct anv_bo *bo = buffer->bo;
2453 uint32_t bo_offset = buffer->offset + offset;
2454
2455 /* TODO: We need to stomp base vertex to 0 somehow */
2456 if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
2457 emit_base_vertex_instance_bo(cmd_buffer, bo, bo_offset + 12);
2458 if (vs_prog_data->uses_drawid)
2459 emit_draw_index(cmd_buffer, i);
2460
2461 load_indirect_parameters(cmd_buffer, buffer, offset, true);
2462
2463 anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
2464 prim.IndirectParameterEnable = true;
2465 prim.VertexAccessType = RANDOM;
2466 prim.PrimitiveTopologyType = pipeline->topology;
2467 }
2468
2469 offset += stride;
2470 }
2471 }
2472
2473 static VkResult
2474 flush_compute_descriptor_set(struct anv_cmd_buffer *cmd_buffer)
2475 {
2476 struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
2477 struct anv_state surfaces = { 0, }, samplers = { 0, };
2478 VkResult result;
2479
2480 result = emit_binding_table(cmd_buffer, MESA_SHADER_COMPUTE, &surfaces);
2481 if (result != VK_SUCCESS) {
2482 assert(result == VK_ERROR_OUT_OF_DEVICE_MEMORY);
2483
2484 result = anv_cmd_buffer_new_binding_table_block(cmd_buffer);
2485 if (result != VK_SUCCESS)
2486 return result;
2487
2488 /* Re-emit state base addresses so we get the new surface state base
2489 * address before we start emitting binding tables etc.
2490 */
2491 genX(cmd_buffer_emit_state_base_address)(cmd_buffer);
2492
2493 result = emit_binding_table(cmd_buffer, MESA_SHADER_COMPUTE, &surfaces);
2494 if (result != VK_SUCCESS) {
2495 anv_batch_set_error(&cmd_buffer->batch, result);
2496 return result;
2497 }
2498 }
2499
2500 result = emit_samplers(cmd_buffer, MESA_SHADER_COMPUTE, &samplers);
2501 if (result != VK_SUCCESS) {
2502 anv_batch_set_error(&cmd_buffer->batch, result);
2503 return result;
2504 }
2505
2506 uint32_t iface_desc_data_dw[GENX(INTERFACE_DESCRIPTOR_DATA_length)];
2507 struct GENX(INTERFACE_DESCRIPTOR_DATA) desc = {
2508 .BindingTablePointer = surfaces.offset,
2509 .SamplerStatePointer = samplers.offset,
2510 };
2511 GENX(INTERFACE_DESCRIPTOR_DATA_pack)(NULL, iface_desc_data_dw, &desc);
2512
2513 struct anv_state state =
2514 anv_cmd_buffer_merge_dynamic(cmd_buffer, iface_desc_data_dw,
2515 pipeline->interface_descriptor_data,
2516 GENX(INTERFACE_DESCRIPTOR_DATA_length),
2517 64);
2518
2519 uint32_t size = GENX(INTERFACE_DESCRIPTOR_DATA_length) * sizeof(uint32_t);
2520 anv_batch_emit(&cmd_buffer->batch,
2521 GENX(MEDIA_INTERFACE_DESCRIPTOR_LOAD), mid) {
2522 mid.InterfaceDescriptorTotalLength = size;
2523 mid.InterfaceDescriptorDataStartAddress = state.offset;
2524 }
2525
2526 return VK_SUCCESS;
2527 }
2528
2529 void
2530 genX(cmd_buffer_flush_compute_state)(struct anv_cmd_buffer *cmd_buffer)
2531 {
2532 struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
2533 MAYBE_UNUSED VkResult result;
2534
2535 assert(pipeline->active_stages == VK_SHADER_STAGE_COMPUTE_BIT);
2536
2537 genX(cmd_buffer_config_l3)(cmd_buffer, pipeline->urb.l3_config);
2538
2539 genX(flush_pipeline_select_gpgpu)(cmd_buffer);
2540
2541 if (cmd_buffer->state.compute_dirty & ANV_CMD_DIRTY_PIPELINE) {
2542 /* From the Sky Lake PRM Vol 2a, MEDIA_VFE_STATE:
2543 *
2544 * "A stalling PIPE_CONTROL is required before MEDIA_VFE_STATE unless
2545 * the only bits that are changed are scoreboard related: Scoreboard
2546 * Enable, Scoreboard Type, Scoreboard Mask, Scoreboard * Delta. For
2547 * these scoreboard related states, a MEDIA_STATE_FLUSH is
2548 * sufficient."
2549 */
2550 cmd_buffer->state.pending_pipe_bits |= ANV_PIPE_CS_STALL_BIT;
2551 genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
2552
2553 anv_batch_emit_batch(&cmd_buffer->batch, &pipeline->batch);
2554 }
2555
2556 if ((cmd_buffer->state.descriptors_dirty & VK_SHADER_STAGE_COMPUTE_BIT) ||
2557 (cmd_buffer->state.compute_dirty & ANV_CMD_DIRTY_PIPELINE)) {
2558 /* FIXME: figure out descriptors for gen7 */
2559 result = flush_compute_descriptor_set(cmd_buffer);
2560 if (result != VK_SUCCESS)
2561 return;
2562
2563 cmd_buffer->state.descriptors_dirty &= ~VK_SHADER_STAGE_COMPUTE_BIT;
2564 }
2565
2566 if (cmd_buffer->state.push_constants_dirty & VK_SHADER_STAGE_COMPUTE_BIT) {
2567 struct anv_state push_state =
2568 anv_cmd_buffer_cs_push_constants(cmd_buffer);
2569
2570 if (push_state.alloc_size) {
2571 anv_batch_emit(&cmd_buffer->batch, GENX(MEDIA_CURBE_LOAD), curbe) {
2572 curbe.CURBETotalDataLength = push_state.alloc_size;
2573 curbe.CURBEDataStartAddress = push_state.offset;
2574 }
2575 }
2576 }
2577
2578 cmd_buffer->state.compute_dirty = 0;
2579
2580 genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
2581 }
2582
2583 #if GEN_GEN == 7
2584
2585 static VkResult
2586 verify_cmd_parser(const struct anv_device *device,
2587 int required_version,
2588 const char *function)
2589 {
2590 if (device->instance->physicalDevice.cmd_parser_version < required_version) {
2591 return vk_errorf(device->instance, device->instance,
2592 VK_ERROR_FEATURE_NOT_PRESENT,
2593 "cmd parser version %d is required for %s",
2594 required_version, function);
2595 } else {
2596 return VK_SUCCESS;
2597 }
2598 }
2599
2600 #endif
2601
2602 void genX(CmdDispatch)(
2603 VkCommandBuffer commandBuffer,
2604 uint32_t x,
2605 uint32_t y,
2606 uint32_t z)
2607 {
2608 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
2609 struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
2610 const struct brw_cs_prog_data *prog_data = get_cs_prog_data(pipeline);
2611
2612 if (anv_batch_has_error(&cmd_buffer->batch))
2613 return;
2614
2615 if (prog_data->uses_num_work_groups) {
2616 struct anv_state state =
2617 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, 12, 4);
2618 uint32_t *sizes = state.map;
2619 sizes[0] = x;
2620 sizes[1] = y;
2621 sizes[2] = z;
2622 anv_state_flush(cmd_buffer->device, state);
2623 cmd_buffer->state.num_workgroups_offset = state.offset;
2624 cmd_buffer->state.num_workgroups_bo =
2625 &cmd_buffer->device->dynamic_state_pool.block_pool.bo;
2626 }
2627
2628 genX(cmd_buffer_flush_compute_state)(cmd_buffer);
2629
2630 anv_batch_emit(&cmd_buffer->batch, GENX(GPGPU_WALKER), ggw) {
2631 ggw.SIMDSize = prog_data->simd_size / 16;
2632 ggw.ThreadDepthCounterMaximum = 0;
2633 ggw.ThreadHeightCounterMaximum = 0;
2634 ggw.ThreadWidthCounterMaximum = prog_data->threads - 1;
2635 ggw.ThreadGroupIDXDimension = x;
2636 ggw.ThreadGroupIDYDimension = y;
2637 ggw.ThreadGroupIDZDimension = z;
2638 ggw.RightExecutionMask = pipeline->cs_right_mask;
2639 ggw.BottomExecutionMask = 0xffffffff;
2640 }
2641
2642 anv_batch_emit(&cmd_buffer->batch, GENX(MEDIA_STATE_FLUSH), msf);
2643 }
2644
2645 #define GPGPU_DISPATCHDIMX 0x2500
2646 #define GPGPU_DISPATCHDIMY 0x2504
2647 #define GPGPU_DISPATCHDIMZ 0x2508
2648
2649 void genX(CmdDispatchIndirect)(
2650 VkCommandBuffer commandBuffer,
2651 VkBuffer _buffer,
2652 VkDeviceSize offset)
2653 {
2654 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
2655 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
2656 struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
2657 const struct brw_cs_prog_data *prog_data = get_cs_prog_data(pipeline);
2658 struct anv_bo *bo = buffer->bo;
2659 uint32_t bo_offset = buffer->offset + offset;
2660 struct anv_batch *batch = &cmd_buffer->batch;
2661
2662 #if GEN_GEN == 7
2663 /* Linux 4.4 added command parser version 5 which allows the GPGPU
2664 * indirect dispatch registers to be written.
2665 */
2666 if (verify_cmd_parser(cmd_buffer->device, 5,
2667 "vkCmdDispatchIndirect") != VK_SUCCESS)
2668 return;
2669 #endif
2670
2671 if (prog_data->uses_num_work_groups) {
2672 cmd_buffer->state.num_workgroups_offset = bo_offset;
2673 cmd_buffer->state.num_workgroups_bo = bo;
2674 }
2675
2676 genX(cmd_buffer_flush_compute_state)(cmd_buffer);
2677
2678 emit_lrm(batch, GPGPU_DISPATCHDIMX, bo, bo_offset);
2679 emit_lrm(batch, GPGPU_DISPATCHDIMY, bo, bo_offset + 4);
2680 emit_lrm(batch, GPGPU_DISPATCHDIMZ, bo, bo_offset + 8);
2681
2682 #if GEN_GEN <= 7
2683 /* Clear upper 32-bits of SRC0 and all 64-bits of SRC1 */
2684 emit_lri(batch, MI_PREDICATE_SRC0 + 4, 0);
2685 emit_lri(batch, MI_PREDICATE_SRC1 + 0, 0);
2686 emit_lri(batch, MI_PREDICATE_SRC1 + 4, 0);
2687
2688 /* Load compute_dispatch_indirect_x_size into SRC0 */
2689 emit_lrm(batch, MI_PREDICATE_SRC0, bo, bo_offset + 0);
2690
2691 /* predicate = (compute_dispatch_indirect_x_size == 0); */
2692 anv_batch_emit(batch, GENX(MI_PREDICATE), mip) {
2693 mip.LoadOperation = LOAD_LOAD;
2694 mip.CombineOperation = COMBINE_SET;
2695 mip.CompareOperation = COMPARE_SRCS_EQUAL;
2696 }
2697
2698 /* Load compute_dispatch_indirect_y_size into SRC0 */
2699 emit_lrm(batch, MI_PREDICATE_SRC0, bo, bo_offset + 4);
2700
2701 /* predicate |= (compute_dispatch_indirect_y_size == 0); */
2702 anv_batch_emit(batch, GENX(MI_PREDICATE), mip) {
2703 mip.LoadOperation = LOAD_LOAD;
2704 mip.CombineOperation = COMBINE_OR;
2705 mip.CompareOperation = COMPARE_SRCS_EQUAL;
2706 }
2707
2708 /* Load compute_dispatch_indirect_z_size into SRC0 */
2709 emit_lrm(batch, MI_PREDICATE_SRC0, bo, bo_offset + 8);
2710
2711 /* predicate |= (compute_dispatch_indirect_z_size == 0); */
2712 anv_batch_emit(batch, GENX(MI_PREDICATE), mip) {
2713 mip.LoadOperation = LOAD_LOAD;
2714 mip.CombineOperation = COMBINE_OR;
2715 mip.CompareOperation = COMPARE_SRCS_EQUAL;
2716 }
2717
2718 /* predicate = !predicate; */
2719 #define COMPARE_FALSE 1
2720 anv_batch_emit(batch, GENX(MI_PREDICATE), mip) {
2721 mip.LoadOperation = LOAD_LOADINV;
2722 mip.CombineOperation = COMBINE_OR;
2723 mip.CompareOperation = COMPARE_FALSE;
2724 }
2725 #endif
2726
2727 anv_batch_emit(batch, GENX(GPGPU_WALKER), ggw) {
2728 ggw.IndirectParameterEnable = true;
2729 ggw.PredicateEnable = GEN_GEN <= 7;
2730 ggw.SIMDSize = prog_data->simd_size / 16;
2731 ggw.ThreadDepthCounterMaximum = 0;
2732 ggw.ThreadHeightCounterMaximum = 0;
2733 ggw.ThreadWidthCounterMaximum = prog_data->threads - 1;
2734 ggw.RightExecutionMask = pipeline->cs_right_mask;
2735 ggw.BottomExecutionMask = 0xffffffff;
2736 }
2737
2738 anv_batch_emit(batch, GENX(MEDIA_STATE_FLUSH), msf);
2739 }
2740
2741 static void
2742 genX(flush_pipeline_select)(struct anv_cmd_buffer *cmd_buffer,
2743 uint32_t pipeline)
2744 {
2745 UNUSED const struct gen_device_info *devinfo = &cmd_buffer->device->info;
2746
2747 if (cmd_buffer->state.current_pipeline == pipeline)
2748 return;
2749
2750 #if GEN_GEN >= 8 && GEN_GEN < 10
2751 /* From the Broadwell PRM, Volume 2a: Instructions, PIPELINE_SELECT:
2752 *
2753 * Software must clear the COLOR_CALC_STATE Valid field in
2754 * 3DSTATE_CC_STATE_POINTERS command prior to send a PIPELINE_SELECT
2755 * with Pipeline Select set to GPGPU.
2756 *
2757 * The internal hardware docs recommend the same workaround for Gen9
2758 * hardware too.
2759 */
2760 if (pipeline == GPGPU)
2761 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CC_STATE_POINTERS), t);
2762 #endif
2763
2764 /* From "BXML » GT » MI » vol1a GPU Overview » [Instruction]
2765 * PIPELINE_SELECT [DevBWR+]":
2766 *
2767 * Project: DEVSNB+
2768 *
2769 * Software must ensure all the write caches are flushed through a
2770 * stalling PIPE_CONTROL command followed by another PIPE_CONTROL
2771 * command to invalidate read only caches prior to programming
2772 * MI_PIPELINE_SELECT command to change the Pipeline Select Mode.
2773 */
2774 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
2775 pc.RenderTargetCacheFlushEnable = true;
2776 pc.DepthCacheFlushEnable = true;
2777 pc.DCFlushEnable = true;
2778 pc.PostSyncOperation = NoWrite;
2779 pc.CommandStreamerStallEnable = true;
2780 }
2781
2782 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
2783 pc.TextureCacheInvalidationEnable = true;
2784 pc.ConstantCacheInvalidationEnable = true;
2785 pc.StateCacheInvalidationEnable = true;
2786 pc.InstructionCacheInvalidateEnable = true;
2787 pc.PostSyncOperation = NoWrite;
2788 }
2789
2790 anv_batch_emit(&cmd_buffer->batch, GENX(PIPELINE_SELECT), ps) {
2791 #if GEN_GEN >= 9
2792 ps.MaskBits = 3;
2793 #endif
2794 ps.PipelineSelection = pipeline;
2795 }
2796
2797 #if GEN_GEN == 9
2798 if (devinfo->is_geminilake) {
2799 /* Project: DevGLK
2800 *
2801 * "This chicken bit works around a hardware issue with barrier logic
2802 * encountered when switching between GPGPU and 3D pipelines. To
2803 * workaround the issue, this mode bit should be set after a pipeline
2804 * is selected."
2805 */
2806 uint32_t scec;
2807 anv_pack_struct(&scec, GENX(SLICE_COMMON_ECO_CHICKEN1),
2808 .GLKBarrierMode =
2809 pipeline == GPGPU ? GLK_BARRIER_MODE_GPGPU
2810 : GLK_BARRIER_MODE_3D_HULL,
2811 .GLKBarrierModeMask = 1);
2812 emit_lri(&cmd_buffer->batch, GENX(SLICE_COMMON_ECO_CHICKEN1_num), scec);
2813 }
2814 #endif
2815
2816 cmd_buffer->state.current_pipeline = pipeline;
2817 }
2818
2819 void
2820 genX(flush_pipeline_select_3d)(struct anv_cmd_buffer *cmd_buffer)
2821 {
2822 genX(flush_pipeline_select)(cmd_buffer, _3D);
2823 }
2824
2825 void
2826 genX(flush_pipeline_select_gpgpu)(struct anv_cmd_buffer *cmd_buffer)
2827 {
2828 genX(flush_pipeline_select)(cmd_buffer, GPGPU);
2829 }
2830
2831 void
2832 genX(cmd_buffer_emit_gen7_depth_flush)(struct anv_cmd_buffer *cmd_buffer)
2833 {
2834 if (GEN_GEN >= 8)
2835 return;
2836
2837 /* From the Haswell PRM, documentation for 3DSTATE_DEPTH_BUFFER:
2838 *
2839 * "Restriction: Prior to changing Depth/Stencil Buffer state (i.e., any
2840 * combination of 3DSTATE_DEPTH_BUFFER, 3DSTATE_CLEAR_PARAMS,
2841 * 3DSTATE_STENCIL_BUFFER, 3DSTATE_HIER_DEPTH_BUFFER) SW must first
2842 * issue a pipelined depth stall (PIPE_CONTROL with Depth Stall bit
2843 * set), followed by a pipelined depth cache flush (PIPE_CONTROL with
2844 * Depth Flush Bit set, followed by another pipelined depth stall
2845 * (PIPE_CONTROL with Depth Stall Bit set), unless SW can otherwise
2846 * guarantee that the pipeline from WM onwards is already flushed (e.g.,
2847 * via a preceding MI_FLUSH)."
2848 */
2849 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pipe) {
2850 pipe.DepthStallEnable = true;
2851 }
2852 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pipe) {
2853 pipe.DepthCacheFlushEnable = true;
2854 }
2855 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pipe) {
2856 pipe.DepthStallEnable = true;
2857 }
2858 }
2859
2860 static void
2861 cmd_buffer_emit_depth_stencil(struct anv_cmd_buffer *cmd_buffer)
2862 {
2863 struct anv_device *device = cmd_buffer->device;
2864 const struct anv_image_view *iview =
2865 anv_cmd_buffer_get_depth_stencil_view(cmd_buffer);
2866 const struct anv_image *image = iview ? iview->image : NULL;
2867
2868 /* FIXME: Width and Height are wrong */
2869
2870 genX(cmd_buffer_emit_gen7_depth_flush)(cmd_buffer);
2871
2872 uint32_t *dw = anv_batch_emit_dwords(&cmd_buffer->batch,
2873 device->isl_dev.ds.size / 4);
2874 if (dw == NULL)
2875 return;
2876
2877 struct isl_depth_stencil_hiz_emit_info info = {
2878 .mocs = device->default_mocs,
2879 };
2880
2881 if (iview)
2882 info.view = &iview->planes[0].isl;
2883
2884 if (image && (image->aspects & VK_IMAGE_ASPECT_DEPTH_BIT)) {
2885 uint32_t depth_plane =
2886 anv_image_aspect_to_plane(image->aspects, VK_IMAGE_ASPECT_DEPTH_BIT);
2887 const struct anv_surface *surface = &image->planes[depth_plane].surface;
2888
2889 info.depth_surf = &surface->isl;
2890
2891 info.depth_address =
2892 anv_batch_emit_reloc(&cmd_buffer->batch,
2893 dw + device->isl_dev.ds.depth_offset / 4,
2894 image->planes[depth_plane].bo,
2895 image->planes[depth_plane].bo_offset +
2896 surface->offset);
2897
2898 const uint32_t ds =
2899 cmd_buffer->state.subpass->depth_stencil_attachment.attachment;
2900 info.hiz_usage = cmd_buffer->state.attachments[ds].aux_usage;
2901 if (info.hiz_usage == ISL_AUX_USAGE_HIZ) {
2902 info.hiz_surf = &image->planes[depth_plane].aux_surface.isl;
2903
2904 info.hiz_address =
2905 anv_batch_emit_reloc(&cmd_buffer->batch,
2906 dw + device->isl_dev.ds.hiz_offset / 4,
2907 image->planes[depth_plane].bo,
2908 image->planes[depth_plane].bo_offset +
2909 image->planes[depth_plane].aux_surface.offset);
2910
2911 info.depth_clear_value = ANV_HZ_FC_VAL;
2912 }
2913 }
2914
2915 if (image && (image->aspects & VK_IMAGE_ASPECT_STENCIL_BIT)) {
2916 uint32_t stencil_plane =
2917 anv_image_aspect_to_plane(image->aspects, VK_IMAGE_ASPECT_STENCIL_BIT);
2918 const struct anv_surface *surface = &image->planes[stencil_plane].surface;
2919
2920 info.stencil_surf = &surface->isl;
2921
2922 info.stencil_address =
2923 anv_batch_emit_reloc(&cmd_buffer->batch,
2924 dw + device->isl_dev.ds.stencil_offset / 4,
2925 image->planes[stencil_plane].bo,
2926 image->planes[stencil_plane].bo_offset + surface->offset);
2927 }
2928
2929 isl_emit_depth_stencil_hiz_s(&device->isl_dev, dw, &info);
2930
2931 cmd_buffer->state.hiz_enabled = info.hiz_usage == ISL_AUX_USAGE_HIZ;
2932 }
2933
2934
2935 /**
2936 * @brief Perform any layout transitions required at the beginning and/or end
2937 * of the current subpass for depth buffers.
2938 *
2939 * TODO: Consider preprocessing the attachment reference array at render pass
2940 * create time to determine if no layout transition is needed at the
2941 * beginning and/or end of each subpass.
2942 *
2943 * @param cmd_buffer The command buffer the transition is happening within.
2944 * @param subpass_end If true, marks that the transition is happening at the
2945 * end of the subpass.
2946 */
2947 static void
2948 cmd_buffer_subpass_transition_layouts(struct anv_cmd_buffer * const cmd_buffer,
2949 const bool subpass_end)
2950 {
2951 /* We need a non-NULL command buffer. */
2952 assert(cmd_buffer);
2953
2954 const struct anv_cmd_state * const cmd_state = &cmd_buffer->state;
2955 const struct anv_subpass * const subpass = cmd_state->subpass;
2956
2957 /* This function must be called within a subpass. */
2958 assert(subpass);
2959
2960 /* If there are attachment references, the array shouldn't be NULL.
2961 */
2962 if (subpass->attachment_count > 0)
2963 assert(subpass->attachments);
2964
2965 /* Iterate over the array of attachment references. */
2966 for (const VkAttachmentReference *att_ref = subpass->attachments;
2967 att_ref < subpass->attachments + subpass->attachment_count; att_ref++) {
2968
2969 /* If the attachment is unused, we can't perform a layout transition. */
2970 if (att_ref->attachment == VK_ATTACHMENT_UNUSED)
2971 continue;
2972
2973 /* This attachment index shouldn't go out of bounds. */
2974 assert(att_ref->attachment < cmd_state->pass->attachment_count);
2975
2976 const struct anv_render_pass_attachment * const att_desc =
2977 &cmd_state->pass->attachments[att_ref->attachment];
2978 struct anv_attachment_state * const att_state =
2979 &cmd_buffer->state.attachments[att_ref->attachment];
2980
2981 /* The attachment should not be used in a subpass after its last. */
2982 assert(att_desc->last_subpass_idx >= anv_get_subpass_id(cmd_state));
2983
2984 if (subpass_end && anv_get_subpass_id(cmd_state) <
2985 att_desc->last_subpass_idx) {
2986 /* We're calling this function on a buffer twice in one subpass and
2987 * this is not the last use of the buffer. The layout should not have
2988 * changed from the first call and no transition is necessary.
2989 */
2990 assert(att_state->current_layout == att_ref->layout ||
2991 att_state->current_layout ==
2992 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
2993 continue;
2994 }
2995
2996 /* The attachment index must be less than the number of attachments
2997 * within the framebuffer.
2998 */
2999 assert(att_ref->attachment < cmd_state->framebuffer->attachment_count);
3000
3001 const struct anv_image_view * const iview =
3002 cmd_state->framebuffer->attachments[att_ref->attachment];
3003 const struct anv_image * const image = iview->image;
3004
3005 /* Get the appropriate target layout for this attachment. */
3006 VkImageLayout target_layout;
3007
3008 /* A resolve is necessary before use as an input attachment if the clear
3009 * color or auxiliary buffer usage isn't supported by the sampler.
3010 */
3011 const bool input_needs_resolve =
3012 (att_state->fast_clear && !att_state->clear_color_is_zero_one) ||
3013 att_state->input_aux_usage != att_state->aux_usage;
3014 if (subpass_end) {
3015 target_layout = att_desc->final_layout;
3016 } else if (iview->aspect_mask & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV &&
3017 !input_needs_resolve) {
3018 /* Layout transitions before the final only help to enable sampling as
3019 * an input attachment. If the input attachment supports sampling
3020 * using the auxiliary surface, we can skip such transitions by making
3021 * the target layout one that is CCS-aware.
3022 */
3023 target_layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
3024 } else {
3025 target_layout = att_ref->layout;
3026 }
3027
3028 /* Perform the layout transition. */
3029 if (image->aspects & VK_IMAGE_ASPECT_DEPTH_BIT) {
3030 transition_depth_buffer(cmd_buffer, image,
3031 att_state->current_layout, target_layout);
3032 att_state->aux_usage =
3033 anv_layout_to_aux_usage(&cmd_buffer->device->info, image,
3034 VK_IMAGE_ASPECT_DEPTH_BIT, target_layout);
3035 } else if (image->aspects & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV) {
3036 assert(image->aspects == VK_IMAGE_ASPECT_COLOR_BIT);
3037 transition_color_buffer(cmd_buffer, image, VK_IMAGE_ASPECT_COLOR_BIT,
3038 iview->planes[0].isl.base_level, 1,
3039 iview->planes[0].isl.base_array_layer,
3040 iview->planes[0].isl.array_len,
3041 att_state->current_layout, target_layout);
3042 }
3043
3044 att_state->current_layout = target_layout;
3045 }
3046 }
3047
3048 /* Update the clear value dword(s) in surface state objects or the fast clear
3049 * state buffer entry for the color attachments used in this subpass.
3050 */
3051 static void
3052 cmd_buffer_subpass_sync_fast_clear_values(struct anv_cmd_buffer *cmd_buffer)
3053 {
3054 assert(cmd_buffer && cmd_buffer->state.subpass);
3055
3056 const struct anv_cmd_state *state = &cmd_buffer->state;
3057
3058 /* Iterate through every color attachment used in this subpass. */
3059 for (uint32_t i = 0; i < state->subpass->color_count; ++i) {
3060
3061 /* The attachment should be one of the attachments described in the
3062 * render pass and used in the subpass.
3063 */
3064 const uint32_t a = state->subpass->color_attachments[i].attachment;
3065 if (a == VK_ATTACHMENT_UNUSED)
3066 continue;
3067
3068 assert(a < state->pass->attachment_count);
3069
3070 /* Store some information regarding this attachment. */
3071 const struct anv_attachment_state *att_state = &state->attachments[a];
3072 const struct anv_image_view *iview = state->framebuffer->attachments[a];
3073 const struct anv_render_pass_attachment *rp_att =
3074 &state->pass->attachments[a];
3075
3076 if (att_state->aux_usage == ISL_AUX_USAGE_NONE)
3077 continue;
3078
3079 /* The fast clear state entry must be updated if a fast clear is going to
3080 * happen. The surface state must be updated if the clear value from a
3081 * prior fast clear may be needed.
3082 */
3083 if (att_state->pending_clear_aspects && att_state->fast_clear) {
3084 /* Update the fast clear state entry. */
3085 genX(copy_fast_clear_dwords)(cmd_buffer, att_state->color.state,
3086 iview->image,
3087 VK_IMAGE_ASPECT_COLOR_BIT,
3088 iview->planes[0].isl.base_level,
3089 true /* copy from ss */);
3090
3091 /* Fast-clears impact whether or not a resolve will be necessary. */
3092 if (iview->image->planes[0].aux_usage == ISL_AUX_USAGE_CCS_E &&
3093 att_state->clear_color_is_zero) {
3094 /* This image always has the auxiliary buffer enabled. We can mark
3095 * the subresource as not needing a resolve because the clear color
3096 * will match what's in every RENDER_SURFACE_STATE object when it's
3097 * being used for sampling.
3098 */
3099 genX(set_image_needs_resolve)(cmd_buffer, iview->image,
3100 VK_IMAGE_ASPECT_COLOR_BIT,
3101 iview->planes[0].isl.base_level,
3102 false);
3103 } else {
3104 genX(set_image_needs_resolve)(cmd_buffer, iview->image,
3105 VK_IMAGE_ASPECT_COLOR_BIT,
3106 iview->planes[0].isl.base_level,
3107 true);
3108 }
3109 } else if (rp_att->load_op == VK_ATTACHMENT_LOAD_OP_LOAD) {
3110 /* The attachment may have been fast-cleared in a previous render
3111 * pass and the value is needed now. Update the surface state(s).
3112 *
3113 * TODO: Do this only once per render pass instead of every subpass.
3114 */
3115 genX(copy_fast_clear_dwords)(cmd_buffer, att_state->color.state,
3116 iview->image,
3117 VK_IMAGE_ASPECT_COLOR_BIT,
3118 iview->planes[0].isl.base_level,
3119 false /* copy to ss */);
3120
3121 if (need_input_attachment_state(rp_att) &&
3122 att_state->input_aux_usage != ISL_AUX_USAGE_NONE) {
3123 genX(copy_fast_clear_dwords)(cmd_buffer, att_state->input.state,
3124 iview->image,
3125 VK_IMAGE_ASPECT_COLOR_BIT,
3126 iview->planes[0].isl.base_level,
3127 false /* copy to ss */);
3128 }
3129 }
3130 }
3131 }
3132
3133
3134 static void
3135 genX(cmd_buffer_set_subpass)(struct anv_cmd_buffer *cmd_buffer,
3136 struct anv_subpass *subpass)
3137 {
3138 cmd_buffer->state.subpass = subpass;
3139
3140 cmd_buffer->state.dirty |= ANV_CMD_DIRTY_RENDER_TARGETS;
3141
3142 /* Our implementation of VK_KHR_multiview uses instancing to draw the
3143 * different views. If the client asks for instancing, we need to use the
3144 * Instance Data Step Rate to ensure that we repeat the client's
3145 * per-instance data once for each view. Since this bit is in
3146 * VERTEX_BUFFER_STATE on gen7, we need to dirty vertex buffers at the top
3147 * of each subpass.
3148 */
3149 if (GEN_GEN == 7)
3150 cmd_buffer->state.vb_dirty |= ~0;
3151
3152 /* Perform transitions to the subpass layout before any writes have
3153 * occurred.
3154 */
3155 cmd_buffer_subpass_transition_layouts(cmd_buffer, false);
3156
3157 /* Update clear values *after* performing automatic layout transitions.
3158 * This ensures that transitions from the UNDEFINED layout have had a chance
3159 * to populate the clear value buffer with the correct values for the
3160 * LOAD_OP_LOAD loadOp and that the fast-clears will update the buffer
3161 * without the aforementioned layout transition overwriting the fast-clear
3162 * value.
3163 */
3164 cmd_buffer_subpass_sync_fast_clear_values(cmd_buffer);
3165
3166 cmd_buffer_emit_depth_stencil(cmd_buffer);
3167
3168 anv_cmd_buffer_clear_subpass(cmd_buffer);
3169 }
3170
3171 void genX(CmdBeginRenderPass)(
3172 VkCommandBuffer commandBuffer,
3173 const VkRenderPassBeginInfo* pRenderPassBegin,
3174 VkSubpassContents contents)
3175 {
3176 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
3177 ANV_FROM_HANDLE(anv_render_pass, pass, pRenderPassBegin->renderPass);
3178 ANV_FROM_HANDLE(anv_framebuffer, framebuffer, pRenderPassBegin->framebuffer);
3179
3180 cmd_buffer->state.framebuffer = framebuffer;
3181 cmd_buffer->state.pass = pass;
3182 cmd_buffer->state.render_area = pRenderPassBegin->renderArea;
3183 VkResult result =
3184 genX(cmd_buffer_setup_attachments)(cmd_buffer, pass, pRenderPassBegin);
3185
3186 /* If we failed to setup the attachments we should not try to go further */
3187 if (result != VK_SUCCESS) {
3188 assert(anv_batch_has_error(&cmd_buffer->batch));
3189 return;
3190 }
3191
3192 genX(flush_pipeline_select_3d)(cmd_buffer);
3193
3194 genX(cmd_buffer_set_subpass)(cmd_buffer, pass->subpasses);
3195
3196 cmd_buffer->state.pending_pipe_bits |=
3197 cmd_buffer->state.pass->subpass_flushes[0];
3198 }
3199
3200 void genX(CmdNextSubpass)(
3201 VkCommandBuffer commandBuffer,
3202 VkSubpassContents contents)
3203 {
3204 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
3205
3206 if (anv_batch_has_error(&cmd_buffer->batch))
3207 return;
3208
3209 assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
3210
3211 anv_cmd_buffer_resolve_subpass(cmd_buffer);
3212
3213 /* Perform transitions to the final layout after all writes have occurred.
3214 */
3215 cmd_buffer_subpass_transition_layouts(cmd_buffer, true);
3216
3217 genX(cmd_buffer_set_subpass)(cmd_buffer, cmd_buffer->state.subpass + 1);
3218
3219 uint32_t subpass_id = anv_get_subpass_id(&cmd_buffer->state);
3220 cmd_buffer->state.pending_pipe_bits |=
3221 cmd_buffer->state.pass->subpass_flushes[subpass_id];
3222 }
3223
3224 void genX(CmdEndRenderPass)(
3225 VkCommandBuffer commandBuffer)
3226 {
3227 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
3228
3229 if (anv_batch_has_error(&cmd_buffer->batch))
3230 return;
3231
3232 anv_cmd_buffer_resolve_subpass(cmd_buffer);
3233
3234 /* Perform transitions to the final layout after all writes have occurred.
3235 */
3236 cmd_buffer_subpass_transition_layouts(cmd_buffer, true);
3237
3238 cmd_buffer->state.pending_pipe_bits |=
3239 cmd_buffer->state.pass->subpass_flushes[cmd_buffer->state.pass->subpass_count];
3240
3241 cmd_buffer->state.hiz_enabled = false;
3242
3243 #ifndef NDEBUG
3244 anv_dump_add_framebuffer(cmd_buffer, cmd_buffer->state.framebuffer);
3245 #endif
3246
3247 /* Remove references to render pass specific state. This enables us to
3248 * detect whether or not we're in a renderpass.
3249 */
3250 cmd_buffer->state.framebuffer = NULL;
3251 cmd_buffer->state.pass = NULL;
3252 cmd_buffer->state.subpass = NULL;
3253 }