anv: Separate surface states by layout instead of aux_usage
[mesa.git] / src / intel / vulkan / genX_cmd_buffer.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26
27 #include "anv_private.h"
28 #include "vk_format_info.h"
29 #include "vk_util.h"
30
31 #include "common/gen_l3_config.h"
32 #include "genxml/gen_macros.h"
33 #include "genxml/genX_pack.h"
34
35 static void
36 emit_lrm(struct anv_batch *batch,
37 uint32_t reg, struct anv_bo *bo, uint32_t offset)
38 {
39 anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
40 lrm.RegisterAddress = reg;
41 lrm.MemoryAddress = (struct anv_address) { bo, offset };
42 }
43 }
44
45 static void
46 emit_lri(struct anv_batch *batch, uint32_t reg, uint32_t imm)
47 {
48 anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_IMM), lri) {
49 lri.RegisterOffset = reg;
50 lri.DataDWord = imm;
51 }
52 }
53
54 #if GEN_IS_HASWELL || GEN_GEN >= 8
55 static void
56 emit_lrr(struct anv_batch *batch, uint32_t dst, uint32_t src)
57 {
58 anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_REG), lrr) {
59 lrr.SourceRegisterAddress = src;
60 lrr.DestinationRegisterAddress = dst;
61 }
62 }
63 #endif
64
65 void
66 genX(cmd_buffer_emit_state_base_address)(struct anv_cmd_buffer *cmd_buffer)
67 {
68 struct anv_device *device = cmd_buffer->device;
69
70 /* Emit a render target cache flush.
71 *
72 * This isn't documented anywhere in the PRM. However, it seems to be
73 * necessary prior to changing the surface state base adress. Without
74 * this, we get GPU hangs when using multi-level command buffers which
75 * clear depth, reset state base address, and then go render stuff.
76 */
77 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
78 pc.DCFlushEnable = true;
79 pc.RenderTargetCacheFlushEnable = true;
80 pc.CommandStreamerStallEnable = true;
81 }
82
83 anv_batch_emit(&cmd_buffer->batch, GENX(STATE_BASE_ADDRESS), sba) {
84 sba.GeneralStateBaseAddress = (struct anv_address) { NULL, 0 };
85 sba.GeneralStateMemoryObjectControlState = GENX(MOCS);
86 sba.GeneralStateBaseAddressModifyEnable = true;
87
88 sba.SurfaceStateBaseAddress =
89 anv_cmd_buffer_surface_base_address(cmd_buffer);
90 sba.SurfaceStateMemoryObjectControlState = GENX(MOCS);
91 sba.SurfaceStateBaseAddressModifyEnable = true;
92
93 sba.DynamicStateBaseAddress =
94 (struct anv_address) { &device->dynamic_state_pool.block_pool.bo, 0 };
95 sba.DynamicStateMemoryObjectControlState = GENX(MOCS);
96 sba.DynamicStateBaseAddressModifyEnable = true;
97
98 sba.IndirectObjectBaseAddress = (struct anv_address) { NULL, 0 };
99 sba.IndirectObjectMemoryObjectControlState = GENX(MOCS);
100 sba.IndirectObjectBaseAddressModifyEnable = true;
101
102 sba.InstructionBaseAddress =
103 (struct anv_address) { &device->instruction_state_pool.block_pool.bo, 0 };
104 sba.InstructionMemoryObjectControlState = GENX(MOCS);
105 sba.InstructionBaseAddressModifyEnable = true;
106
107 # if (GEN_GEN >= 8)
108 /* Broadwell requires that we specify a buffer size for a bunch of
109 * these fields. However, since we will be growing the BO's live, we
110 * just set them all to the maximum.
111 */
112 sba.GeneralStateBufferSize = 0xfffff;
113 sba.GeneralStateBufferSizeModifyEnable = true;
114 sba.DynamicStateBufferSize = 0xfffff;
115 sba.DynamicStateBufferSizeModifyEnable = true;
116 sba.IndirectObjectBufferSize = 0xfffff;
117 sba.IndirectObjectBufferSizeModifyEnable = true;
118 sba.InstructionBufferSize = 0xfffff;
119 sba.InstructionBuffersizeModifyEnable = true;
120 # endif
121 }
122
123 /* After re-setting the surface state base address, we have to do some
124 * cache flusing so that the sampler engine will pick up the new
125 * SURFACE_STATE objects and binding tables. From the Broadwell PRM,
126 * Shared Function > 3D Sampler > State > State Caching (page 96):
127 *
128 * Coherency with system memory in the state cache, like the texture
129 * cache is handled partially by software. It is expected that the
130 * command stream or shader will issue Cache Flush operation or
131 * Cache_Flush sampler message to ensure that the L1 cache remains
132 * coherent with system memory.
133 *
134 * [...]
135 *
136 * Whenever the value of the Dynamic_State_Base_Addr,
137 * Surface_State_Base_Addr are altered, the L1 state cache must be
138 * invalidated to ensure the new surface or sampler state is fetched
139 * from system memory.
140 *
141 * The PIPE_CONTROL command has a "State Cache Invalidation Enable" bit
142 * which, according the PIPE_CONTROL instruction documentation in the
143 * Broadwell PRM:
144 *
145 * Setting this bit is independent of any other bit in this packet.
146 * This bit controls the invalidation of the L1 and L2 state caches
147 * at the top of the pipe i.e. at the parsing time.
148 *
149 * Unfortunately, experimentation seems to indicate that state cache
150 * invalidation through a PIPE_CONTROL does nothing whatsoever in
151 * regards to surface state and binding tables. In stead, it seems that
152 * invalidating the texture cache is what is actually needed.
153 *
154 * XXX: As far as we have been able to determine through
155 * experimentation, shows that flush the texture cache appears to be
156 * sufficient. The theory here is that all of the sampling/rendering
157 * units cache the binding table in the texture cache. However, we have
158 * yet to be able to actually confirm this.
159 */
160 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
161 pc.TextureCacheInvalidationEnable = true;
162 pc.ConstantCacheInvalidationEnable = true;
163 pc.StateCacheInvalidationEnable = true;
164 }
165 }
166
167 static void
168 add_surface_state_reloc(struct anv_cmd_buffer *cmd_buffer,
169 struct anv_state state,
170 struct anv_bo *bo, uint32_t offset)
171 {
172 const struct isl_device *isl_dev = &cmd_buffer->device->isl_dev;
173
174 VkResult result =
175 anv_reloc_list_add(&cmd_buffer->surface_relocs, &cmd_buffer->pool->alloc,
176 state.offset + isl_dev->ss.addr_offset, bo, offset);
177 if (result != VK_SUCCESS)
178 anv_batch_set_error(&cmd_buffer->batch, result);
179 }
180
181 static void
182 add_image_relocs(struct anv_cmd_buffer * const cmd_buffer,
183 const struct anv_image * const image,
184 const VkImageAspectFlags aspect_mask,
185 const enum isl_aux_usage aux_usage,
186 const struct anv_state state)
187 {
188 const struct isl_device *isl_dev = &cmd_buffer->device->isl_dev;
189 const uint32_t surf_offset = image->offset +
190 anv_image_get_surface_for_aspect_mask(image, aspect_mask)->offset;
191
192 add_surface_state_reloc(cmd_buffer, state, image->bo, surf_offset);
193
194 if (aux_usage != ISL_AUX_USAGE_NONE) {
195 uint32_t aux_offset = image->offset + image->aux_surface.offset;
196
197 /* On gen7 and prior, the bottom 12 bits of the MCS base address are
198 * used to store other information. This should be ok, however, because
199 * surface buffer addresses are always 4K page alinged.
200 */
201 assert((aux_offset & 0xfff) == 0);
202 uint32_t *aux_addr_dw = state.map + isl_dev->ss.aux_addr_offset;
203 aux_offset += *aux_addr_dw & 0xfff;
204
205 VkResult result =
206 anv_reloc_list_add(&cmd_buffer->surface_relocs,
207 &cmd_buffer->pool->alloc,
208 state.offset + isl_dev->ss.aux_addr_offset,
209 image->bo, aux_offset);
210 if (result != VK_SUCCESS)
211 anv_batch_set_error(&cmd_buffer->batch, result);
212 }
213 }
214
215 static bool
216 color_is_zero_one(VkClearColorValue value, enum isl_format format)
217 {
218 if (isl_format_has_int_channel(format)) {
219 for (unsigned i = 0; i < 4; i++) {
220 if (value.int32[i] != 0 && value.int32[i] != 1)
221 return false;
222 }
223 } else {
224 for (unsigned i = 0; i < 4; i++) {
225 if (value.float32[i] != 0.0f && value.float32[i] != 1.0f)
226 return false;
227 }
228 }
229
230 return true;
231 }
232
233 static void
234 color_attachment_compute_aux_usage(struct anv_device * device,
235 struct anv_cmd_state * cmd_state,
236 uint32_t att, VkRect2D render_area,
237 union isl_color_value *fast_clear_color)
238 {
239 struct anv_attachment_state *att_state = &cmd_state->attachments[att];
240 struct anv_image_view *iview = cmd_state->framebuffer->attachments[att];
241
242 if (iview->isl.base_array_layer >=
243 anv_image_aux_layers(iview->image, iview->isl.base_level)) {
244 /* There is no aux buffer which corresponds to the level and layer(s)
245 * being accessed.
246 */
247 att_state->aux_usage = ISL_AUX_USAGE_NONE;
248 att_state->input_aux_usage = ISL_AUX_USAGE_NONE;
249 att_state->fast_clear = false;
250 return;
251 } else if (iview->image->aux_usage == ISL_AUX_USAGE_MCS) {
252 att_state->aux_usage = ISL_AUX_USAGE_MCS;
253 att_state->input_aux_usage = ISL_AUX_USAGE_MCS;
254 att_state->fast_clear = false;
255 return;
256 } else if (iview->image->aux_usage == ISL_AUX_USAGE_CCS_E) {
257 att_state->aux_usage = ISL_AUX_USAGE_CCS_E;
258 att_state->input_aux_usage = ISL_AUX_USAGE_CCS_E;
259 } else {
260 att_state->aux_usage = ISL_AUX_USAGE_CCS_D;
261 /* From the Sky Lake PRM, RENDER_SURFACE_STATE::AuxiliarySurfaceMode:
262 *
263 * "If Number of Multisamples is MULTISAMPLECOUNT_1, AUX_CCS_D
264 * setting is only allowed if Surface Format supported for Fast
265 * Clear. In addition, if the surface is bound to the sampling
266 * engine, Surface Format must be supported for Render Target
267 * Compression for surfaces bound to the sampling engine."
268 *
269 * In other words, we can only sample from a fast-cleared image if it
270 * also supports color compression.
271 */
272 if (isl_format_supports_ccs_e(&device->info, iview->isl.format)) {
273 att_state->input_aux_usage = ISL_AUX_USAGE_CCS_D;
274
275 /* While fast-clear resolves and partial resolves are fairly cheap in the
276 * case where you render to most of the pixels, full resolves are not
277 * because they potentially involve reading and writing the entire
278 * framebuffer. If we can't texture with CCS_E, we should leave it off and
279 * limit ourselves to fast clears.
280 */
281 if (cmd_state->pass->attachments[att].first_subpass_layout ==
282 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL) {
283 anv_perf_warn("Not temporarily enabling CCS_E.");
284 }
285 } else {
286 att_state->input_aux_usage = ISL_AUX_USAGE_NONE;
287 }
288 }
289
290 assert(iview->image->aux_surface.isl.usage & ISL_SURF_USAGE_CCS_BIT);
291
292 att_state->clear_color_is_zero_one =
293 color_is_zero_one(att_state->clear_value.color, iview->isl.format);
294 att_state->clear_color_is_zero =
295 att_state->clear_value.color.uint32[0] == 0 &&
296 att_state->clear_value.color.uint32[1] == 0 &&
297 att_state->clear_value.color.uint32[2] == 0 &&
298 att_state->clear_value.color.uint32[3] == 0;
299
300 if (att_state->pending_clear_aspects == VK_IMAGE_ASPECT_COLOR_BIT) {
301 /* Start off assuming fast clears are possible */
302 att_state->fast_clear = true;
303
304 /* Potentially, we could do partial fast-clears but doing so has crazy
305 * alignment restrictions. It's easier to just restrict to full size
306 * fast clears for now.
307 */
308 if (render_area.offset.x != 0 ||
309 render_area.offset.y != 0 ||
310 render_area.extent.width != iview->extent.width ||
311 render_area.extent.height != iview->extent.height)
312 att_state->fast_clear = false;
313
314 /* On Broadwell and earlier, we can only handle 0/1 clear colors */
315 if (GEN_GEN <= 8 && !att_state->clear_color_is_zero_one)
316 att_state->fast_clear = false;
317
318 /* We allow fast clears when all aux layers of the miplevel are targeted.
319 * See add_fast_clear_state_buffer() for more information. Also, because
320 * we only either do a fast clear or a normal clear and not both, this
321 * complies with the gen7 restriction of not fast-clearing multiple
322 * layers.
323 */
324 if (cmd_state->framebuffer->layers !=
325 anv_image_aux_layers(iview->image, iview->isl.base_level)) {
326 att_state->fast_clear = false;
327 if (GEN_GEN == 7) {
328 anv_perf_warn("Not fast-clearing the first layer in "
329 "a multi-layer fast clear.");
330 }
331 }
332
333 /* We only allow fast clears in the GENERAL layout if the auxiliary
334 * buffer is always enabled and the fast-clear value is all 0's. See
335 * add_fast_clear_state_buffer() for more information.
336 */
337 if (cmd_state->pass->attachments[att].first_subpass_layout ==
338 VK_IMAGE_LAYOUT_GENERAL &&
339 (!att_state->clear_color_is_zero ||
340 iview->image->aux_usage == ISL_AUX_USAGE_NONE)) {
341 att_state->fast_clear = false;
342 }
343
344 if (att_state->fast_clear) {
345 memcpy(fast_clear_color->u32, att_state->clear_value.color.uint32,
346 sizeof(fast_clear_color->u32));
347 }
348 } else {
349 att_state->fast_clear = false;
350 }
351 }
352
353 static bool
354 need_input_attachment_state(const struct anv_render_pass_attachment *att)
355 {
356 if (!(att->usage & VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT))
357 return false;
358
359 /* We only allocate input attachment states for color surfaces. Compression
360 * is not yet enabled for depth textures and stencil doesn't allow
361 * compression so we can just use the texture surface state from the view.
362 */
363 return vk_format_is_color(att->format);
364 }
365
366 /* Transitions a HiZ-enabled depth buffer from one layout to another. Unless
367 * the initial layout is undefined, the HiZ buffer and depth buffer will
368 * represent the same data at the end of this operation.
369 */
370 static void
371 transition_depth_buffer(struct anv_cmd_buffer *cmd_buffer,
372 const struct anv_image *image,
373 VkImageLayout initial_layout,
374 VkImageLayout final_layout)
375 {
376 assert(image);
377
378 /* A transition is a no-op if HiZ is not enabled, or if the initial and
379 * final layouts are equal.
380 *
381 * The undefined layout indicates that the user doesn't care about the data
382 * that's currently in the buffer. Therefore, a data-preserving resolve
383 * operation is not needed.
384 */
385 if (image->aux_usage != ISL_AUX_USAGE_HIZ || initial_layout == final_layout)
386 return;
387
388 const bool hiz_enabled = ISL_AUX_USAGE_HIZ ==
389 anv_layout_to_aux_usage(&cmd_buffer->device->info, image, image->aspects,
390 initial_layout);
391 const bool enable_hiz = ISL_AUX_USAGE_HIZ ==
392 anv_layout_to_aux_usage(&cmd_buffer->device->info, image, image->aspects,
393 final_layout);
394
395 enum blorp_hiz_op hiz_op;
396 if (hiz_enabled && !enable_hiz) {
397 hiz_op = BLORP_HIZ_OP_DEPTH_RESOLVE;
398 } else if (!hiz_enabled && enable_hiz) {
399 hiz_op = BLORP_HIZ_OP_HIZ_RESOLVE;
400 } else {
401 assert(hiz_enabled == enable_hiz);
402 /* If the same buffer will be used, no resolves are necessary. */
403 hiz_op = BLORP_HIZ_OP_NONE;
404 }
405
406 if (hiz_op != BLORP_HIZ_OP_NONE)
407 anv_gen8_hiz_op_resolve(cmd_buffer, image, hiz_op);
408 }
409
410 enum fast_clear_state_field {
411 FAST_CLEAR_STATE_FIELD_CLEAR_COLOR,
412 FAST_CLEAR_STATE_FIELD_NEEDS_RESOLVE,
413 };
414
415 static inline uint32_t
416 get_fast_clear_state_offset(const struct anv_device *device,
417 const struct anv_image *image,
418 unsigned level, enum fast_clear_state_field field)
419 {
420 assert(device && image);
421 assert(image->aspects == VK_IMAGE_ASPECT_COLOR_BIT);
422 assert(level < anv_image_aux_levels(image));
423 uint32_t offset = image->offset + image->aux_surface.offset +
424 image->aux_surface.isl.size +
425 anv_fast_clear_state_entry_size(device) * level;
426
427 switch (field) {
428 case FAST_CLEAR_STATE_FIELD_NEEDS_RESOLVE:
429 offset += device->isl_dev.ss.clear_value_size;
430 /* Fall-through */
431 case FAST_CLEAR_STATE_FIELD_CLEAR_COLOR:
432 break;
433 }
434
435 assert(offset < image->offset + image->size);
436 return offset;
437 }
438
439 #define MI_PREDICATE_SRC0 0x2400
440 #define MI_PREDICATE_SRC1 0x2408
441
442 /* Manages the state of an color image subresource to ensure resolves are
443 * performed properly.
444 */
445 static void
446 genX(set_image_needs_resolve)(struct anv_cmd_buffer *cmd_buffer,
447 const struct anv_image *image,
448 unsigned level, bool needs_resolve)
449 {
450 assert(cmd_buffer && image);
451 assert(image->aspects == VK_IMAGE_ASPECT_COLOR_BIT);
452 assert(level < anv_image_aux_levels(image));
453
454 const uint32_t resolve_flag_offset =
455 get_fast_clear_state_offset(cmd_buffer->device, image, level,
456 FAST_CLEAR_STATE_FIELD_NEEDS_RESOLVE);
457
458 /* The HW docs say that there is no way to guarantee the completion of
459 * the following command. We use it nevertheless because it shows no
460 * issues in testing is currently being used in the GL driver.
461 */
462 anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_DATA_IMM), sdi) {
463 sdi.Address = (struct anv_address) { image->bo, resolve_flag_offset };
464 sdi.ImmediateData = needs_resolve;
465 }
466 }
467
468 static void
469 genX(load_needs_resolve_predicate)(struct anv_cmd_buffer *cmd_buffer,
470 const struct anv_image *image,
471 unsigned level)
472 {
473 assert(cmd_buffer && image);
474 assert(image->aspects == VK_IMAGE_ASPECT_COLOR_BIT);
475 assert(level < anv_image_aux_levels(image));
476
477 const uint32_t resolve_flag_offset =
478 get_fast_clear_state_offset(cmd_buffer->device, image, level,
479 FAST_CLEAR_STATE_FIELD_NEEDS_RESOLVE);
480
481 /* Make the pending predicated resolve a no-op if one is not needed.
482 * predicate = do_resolve = resolve_flag != 0;
483 */
484 emit_lri(&cmd_buffer->batch, MI_PREDICATE_SRC1 , 0);
485 emit_lri(&cmd_buffer->batch, MI_PREDICATE_SRC1 + 4, 0);
486 emit_lri(&cmd_buffer->batch, MI_PREDICATE_SRC0 , 0);
487 emit_lrm(&cmd_buffer->batch, MI_PREDICATE_SRC0 + 4,
488 image->bo, resolve_flag_offset);
489 anv_batch_emit(&cmd_buffer->batch, GENX(MI_PREDICATE), mip) {
490 mip.LoadOperation = LOAD_LOADINV;
491 mip.CombineOperation = COMBINE_SET;
492 mip.CompareOperation = COMPARE_SRCS_EQUAL;
493 }
494 }
495
496 static void
497 init_fast_clear_state_entry(struct anv_cmd_buffer *cmd_buffer,
498 const struct anv_image *image,
499 unsigned level)
500 {
501 assert(cmd_buffer && image);
502 assert(image->aspects == VK_IMAGE_ASPECT_COLOR_BIT);
503 assert(level < anv_image_aux_levels(image));
504
505 /* The resolve flag should updated to signify that fast-clear/compression
506 * data needs to be removed when leaving the undefined layout. Such data
507 * may need to be removed if it would cause accesses to the color buffer
508 * to return incorrect data. The fast clear data in CCS_D buffers should
509 * be removed because CCS_D isn't enabled all the time.
510 */
511 genX(set_image_needs_resolve)(cmd_buffer, image, level,
512 image->aux_usage == ISL_AUX_USAGE_NONE);
513
514 /* The fast clear value dword(s) will be copied into a surface state object.
515 * Ensure that the restrictions of the fields in the dword(s) are followed.
516 *
517 * CCS buffers on SKL+ can have any value set for the clear colors.
518 */
519 if (image->samples == 1 && GEN_GEN >= 9)
520 return;
521
522 /* Other combinations of auxiliary buffers and platforms require specific
523 * values in the clear value dword(s).
524 */
525 unsigned i = 0;
526 for (; i < cmd_buffer->device->isl_dev.ss.clear_value_size; i += 4) {
527 anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_DATA_IMM), sdi) {
528 const uint32_t entry_offset =
529 get_fast_clear_state_offset(cmd_buffer->device, image, level,
530 FAST_CLEAR_STATE_FIELD_CLEAR_COLOR);
531 sdi.Address = (struct anv_address) { image->bo, entry_offset + i };
532
533 if (GEN_GEN >= 9) {
534 /* MCS buffers on SKL+ can only have 1/0 clear colors. */
535 assert(image->aux_usage == ISL_AUX_USAGE_MCS);
536 sdi.ImmediateData = 0;
537 } else if (GEN_VERSIONx10 >= 75) {
538 /* Pre-SKL, the dword containing the clear values also contains
539 * other fields, so we need to initialize those fields to match the
540 * values that would be in a color attachment.
541 */
542 assert(i == 0);
543 sdi.ImmediateData = ISL_CHANNEL_SELECT_RED << 25 |
544 ISL_CHANNEL_SELECT_GREEN << 22 |
545 ISL_CHANNEL_SELECT_BLUE << 19 |
546 ISL_CHANNEL_SELECT_ALPHA << 16;
547 } else if (GEN_VERSIONx10 == 70) {
548 /* On IVB, the dword containing the clear values also contains
549 * other fields that must be zero or can be zero.
550 */
551 assert(i == 0);
552 sdi.ImmediateData = 0;
553 }
554 }
555 }
556 }
557
558 /* Copy the fast-clear value dword(s) between a surface state object and an
559 * image's fast clear state buffer.
560 */
561 static void
562 genX(copy_fast_clear_dwords)(struct anv_cmd_buffer *cmd_buffer,
563 struct anv_state surface_state,
564 const struct anv_image *image,
565 unsigned level,
566 bool copy_from_surface_state)
567 {
568 assert(cmd_buffer && image);
569 assert(image->aspects == VK_IMAGE_ASPECT_COLOR_BIT);
570 assert(level < anv_image_aux_levels(image));
571
572 struct anv_bo *ss_bo =
573 &cmd_buffer->device->surface_state_pool.block_pool.bo;
574 uint32_t ss_clear_offset = surface_state.offset +
575 cmd_buffer->device->isl_dev.ss.clear_value_offset;
576 uint32_t entry_offset =
577 get_fast_clear_state_offset(cmd_buffer->device, image, level,
578 FAST_CLEAR_STATE_FIELD_CLEAR_COLOR);
579 unsigned copy_size = cmd_buffer->device->isl_dev.ss.clear_value_size;
580
581 if (copy_from_surface_state) {
582 genX(cmd_buffer_mi_memcpy)(cmd_buffer, image->bo, entry_offset,
583 ss_bo, ss_clear_offset, copy_size);
584 } else {
585 genX(cmd_buffer_mi_memcpy)(cmd_buffer, ss_bo, ss_clear_offset,
586 image->bo, entry_offset, copy_size);
587
588 /* Updating a surface state object may require that the state cache be
589 * invalidated. From the SKL PRM, Shared Functions -> State -> State
590 * Caching:
591 *
592 * Whenever the RENDER_SURFACE_STATE object in memory pointed to by
593 * the Binding Table Pointer (BTP) and Binding Table Index (BTI) is
594 * modified [...], the L1 state cache must be invalidated to ensure
595 * the new surface or sampler state is fetched from system memory.
596 *
597 * In testing, SKL doesn't actually seem to need this, but HSW does.
598 */
599 cmd_buffer->state.pending_pipe_bits |=
600 ANV_PIPE_STATE_CACHE_INVALIDATE_BIT;
601 }
602 }
603
604 /**
605 * @brief Transitions a color buffer from one layout to another.
606 *
607 * See section 6.1.1. Image Layout Transitions of the Vulkan 1.0.50 spec for
608 * more information.
609 *
610 * @param level_count VK_REMAINING_MIP_LEVELS isn't supported.
611 * @param layer_count VK_REMAINING_ARRAY_LAYERS isn't supported. For 3D images,
612 * this represents the maximum layers to transition at each
613 * specified miplevel.
614 */
615 static void
616 transition_color_buffer(struct anv_cmd_buffer *cmd_buffer,
617 const struct anv_image *image,
618 const uint32_t base_level, uint32_t level_count,
619 uint32_t base_layer, uint32_t layer_count,
620 VkImageLayout initial_layout,
621 VkImageLayout final_layout)
622 {
623 /* Validate the inputs. */
624 assert(cmd_buffer);
625 assert(image && image->aspects == VK_IMAGE_ASPECT_COLOR_BIT);
626 /* These values aren't supported for simplicity's sake. */
627 assert(level_count != VK_REMAINING_MIP_LEVELS &&
628 layer_count != VK_REMAINING_ARRAY_LAYERS);
629 /* Ensure the subresource range is valid. */
630 uint64_t last_level_num = base_level + level_count;
631 const uint32_t max_depth = anv_minify(image->extent.depth, base_level);
632 const uint32_t image_layers = MAX2(image->array_size, max_depth);
633 assert((uint64_t)base_layer + layer_count <= image_layers);
634 assert(last_level_num <= image->levels);
635 /* The spec disallows these final layouts. */
636 assert(final_layout != VK_IMAGE_LAYOUT_UNDEFINED &&
637 final_layout != VK_IMAGE_LAYOUT_PREINITIALIZED);
638
639 /* No work is necessary if the layout stays the same or if this subresource
640 * range lacks auxiliary data.
641 */
642 if (initial_layout == final_layout ||
643 base_layer >= anv_image_aux_layers(image, base_level))
644 return;
645
646 /* A transition of a 3D subresource works on all slices at a time. */
647 if (image->type == VK_IMAGE_TYPE_3D) {
648 base_layer = 0;
649 layer_count = anv_minify(image->extent.depth, base_level);
650 }
651
652 /* We're interested in the subresource range subset that has aux data. */
653 level_count = MIN2(level_count, anv_image_aux_levels(image) - base_level);
654 layer_count = MIN2(layer_count,
655 anv_image_aux_layers(image, base_level) - base_layer);
656 last_level_num = base_level + level_count;
657
658 /* Record whether or not the layout is undefined. Pre-initialized images
659 * with auxiliary buffers have a non-linear layout and are thus undefined.
660 */
661 assert(image->tiling == VK_IMAGE_TILING_OPTIMAL);
662 const bool undef_layout = initial_layout == VK_IMAGE_LAYOUT_UNDEFINED ||
663 initial_layout == VK_IMAGE_LAYOUT_PREINITIALIZED;
664
665 /* Do preparatory work before the resolve operation or return early if no
666 * resolve is actually needed.
667 */
668 if (undef_layout) {
669 /* A subresource in the undefined layout may have been aliased and
670 * populated with any arrangement of bits. Therefore, we must initialize
671 * the related aux buffer and clear buffer entry with desirable values.
672 *
673 * Initialize the relevant clear buffer entries.
674 */
675 for (unsigned level = base_level; level < last_level_num; level++)
676 init_fast_clear_state_entry(cmd_buffer, image, level);
677
678 /* Initialize the aux buffers to enable correct rendering. This operation
679 * requires up to two steps: one to rid the aux buffer of data that may
680 * cause GPU hangs, and another to ensure that writes done without aux
681 * will be visible to reads done with aux.
682 *
683 * Having an aux buffer with invalid data is possible for CCS buffers
684 * SKL+ and for MCS buffers with certain sample counts (2x and 8x). One
685 * easy way to get to a valid state is to fast-clear the specified range.
686 *
687 * Even for MCS buffers that have sample counts that don't require
688 * certain bits to be reserved (4x and 8x), we're unsure if the hardware
689 * will be okay with the sample mappings given by the undefined buffer.
690 * We don't have any data to show that this is a problem, but we want to
691 * avoid causing difficult-to-debug problems.
692 */
693 if ((GEN_GEN >= 9 && image->samples == 1) || image->samples > 1) {
694 if (image->samples == 4 || image->samples == 16) {
695 anv_perf_warn("Doing a potentially unnecessary fast-clear to "
696 "define an MCS buffer.");
697 }
698
699 anv_image_fast_clear(cmd_buffer, image, base_level, level_count,
700 base_layer, layer_count);
701 }
702 /* At this point, some elements of the CCS buffer may have the fast-clear
703 * bit-arrangement. As the user writes to a subresource, we need to have
704 * the associated CCS elements enter the ambiguated state. This enables
705 * reads (implicit or explicit) to reflect the user-written data instead
706 * of the clear color. The only time such elements will not change their
707 * state as described above, is in a final layout that doesn't have CCS
708 * enabled. In this case, we must force the associated CCS buffers of the
709 * specified range to enter the ambiguated state in advance.
710 */
711 if (image->samples == 1 && image->aux_usage != ISL_AUX_USAGE_CCS_E &&
712 final_layout != VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL) {
713 /* The CCS_D buffer may not be enabled in the final layout. Continue
714 * executing this function to perform a resolve.
715 */
716 anv_perf_warn("Performing an additional resolve for CCS_D layout "
717 "transition. Consider always leaving it on or "
718 "performing an ambiguation pass.");
719 } else {
720 /* Writes in the final layout will be aware of the auxiliary buffer.
721 * In addition, the clear buffer entries and the auxiliary buffers
722 * have been populated with values that will result in correct
723 * rendering.
724 */
725 return;
726 }
727 } else if (initial_layout != VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL) {
728 /* Resolves are only necessary if the subresource may contain blocks
729 * fast-cleared to values unsupported in other layouts. This only occurs
730 * if the initial layout is COLOR_ATTACHMENT_OPTIMAL.
731 */
732 return;
733 } else if (image->samples > 1) {
734 /* MCS buffers don't need resolving. */
735 return;
736 }
737
738 /* Perform a resolve to synchronize data between the main and aux buffer.
739 * Before we begin, we must satisfy the cache flushing requirement specified
740 * in the Sky Lake PRM Vol. 7, "MCS Buffer for Render Target(s)":
741 *
742 * Any transition from any value in {Clear, Render, Resolve} to a
743 * different value in {Clear, Render, Resolve} requires end of pipe
744 * synchronization.
745 *
746 * We perform a flush of the write cache before and after the clear and
747 * resolve operations to meet this requirement.
748 *
749 * Unlike other drawing, fast clear operations are not properly
750 * synchronized. The first PIPE_CONTROL here likely ensures that the
751 * contents of the previous render or clear hit the render target before we
752 * resolve and the second likely ensures that the resolve is complete before
753 * we do any more rendering or clearing.
754 */
755 cmd_buffer->state.pending_pipe_bits |=
756 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | ANV_PIPE_CS_STALL_BIT;
757
758 for (uint32_t level = base_level; level < last_level_num; level++) {
759
760 /* The number of layers changes at each 3D miplevel. */
761 if (image->type == VK_IMAGE_TYPE_3D) {
762 layer_count = MIN2(layer_count, anv_image_aux_layers(image, level));
763 }
764
765 genX(load_needs_resolve_predicate)(cmd_buffer, image, level);
766
767 /* Create a surface state with the right clear color and perform the
768 * resolve.
769 */
770 struct anv_state surface_state =
771 anv_cmd_buffer_alloc_surface_state(cmd_buffer);
772 isl_surf_fill_state(&cmd_buffer->device->isl_dev, surface_state.map,
773 .surf = &image->color_surface.isl,
774 .view = &(struct isl_view) {
775 .usage = ISL_SURF_USAGE_RENDER_TARGET_BIT,
776 .format = image->color_surface.isl.format,
777 .swizzle = ISL_SWIZZLE_IDENTITY,
778 .base_level = level,
779 .levels = 1,
780 .base_array_layer = base_layer,
781 .array_len = layer_count,
782 },
783 .aux_surf = &image->aux_surface.isl,
784 .aux_usage = image->aux_usage == ISL_AUX_USAGE_NONE ?
785 ISL_AUX_USAGE_CCS_D : image->aux_usage,
786 .mocs = cmd_buffer->device->default_mocs);
787 add_image_relocs(cmd_buffer, image, VK_IMAGE_ASPECT_COLOR_BIT,
788 image->aux_usage == ISL_AUX_USAGE_CCS_E ?
789 ISL_AUX_USAGE_CCS_E : ISL_AUX_USAGE_CCS_D,
790 surface_state);
791 anv_state_flush(cmd_buffer->device, surface_state);
792 genX(copy_fast_clear_dwords)(cmd_buffer, surface_state, image, level,
793 false /* copy to ss */);
794 anv_ccs_resolve(cmd_buffer, surface_state, image, level, layer_count,
795 image->aux_usage == ISL_AUX_USAGE_CCS_E ?
796 BLORP_FAST_CLEAR_OP_RESOLVE_PARTIAL :
797 BLORP_FAST_CLEAR_OP_RESOLVE_FULL);
798
799 genX(set_image_needs_resolve)(cmd_buffer, image, level, false);
800 }
801
802 cmd_buffer->state.pending_pipe_bits |=
803 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | ANV_PIPE_CS_STALL_BIT;
804 }
805
806 /**
807 * Setup anv_cmd_state::attachments for vkCmdBeginRenderPass.
808 */
809 static VkResult
810 genX(cmd_buffer_setup_attachments)(struct anv_cmd_buffer *cmd_buffer,
811 struct anv_render_pass *pass,
812 const VkRenderPassBeginInfo *begin)
813 {
814 const struct isl_device *isl_dev = &cmd_buffer->device->isl_dev;
815 struct anv_cmd_state *state = &cmd_buffer->state;
816
817 vk_free(&cmd_buffer->pool->alloc, state->attachments);
818
819 if (pass->attachment_count > 0) {
820 state->attachments = vk_alloc(&cmd_buffer->pool->alloc,
821 pass->attachment_count *
822 sizeof(state->attachments[0]),
823 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
824 if (state->attachments == NULL) {
825 /* Propagate VK_ERROR_OUT_OF_HOST_MEMORY to vkEndCommandBuffer */
826 return anv_batch_set_error(&cmd_buffer->batch,
827 VK_ERROR_OUT_OF_HOST_MEMORY);
828 }
829 } else {
830 state->attachments = NULL;
831 }
832
833 /* Reserve one for the NULL state. */
834 unsigned num_states = 1;
835 for (uint32_t i = 0; i < pass->attachment_count; ++i) {
836 if (vk_format_is_color(pass->attachments[i].format))
837 num_states++;
838
839 if (need_input_attachment_state(&pass->attachments[i]))
840 num_states++;
841 }
842
843 const uint32_t ss_stride = align_u32(isl_dev->ss.size, isl_dev->ss.align);
844 state->render_pass_states =
845 anv_state_stream_alloc(&cmd_buffer->surface_state_stream,
846 num_states * ss_stride, isl_dev->ss.align);
847
848 struct anv_state next_state = state->render_pass_states;
849 next_state.alloc_size = isl_dev->ss.size;
850
851 state->null_surface_state = next_state;
852 next_state.offset += ss_stride;
853 next_state.map += ss_stride;
854
855 for (uint32_t i = 0; i < pass->attachment_count; ++i) {
856 if (vk_format_is_color(pass->attachments[i].format)) {
857 state->attachments[i].color_rt_state = next_state;
858 next_state.offset += ss_stride;
859 next_state.map += ss_stride;
860 }
861
862 if (need_input_attachment_state(&pass->attachments[i])) {
863 state->attachments[i].input_att_state = next_state;
864 next_state.offset += ss_stride;
865 next_state.map += ss_stride;
866 }
867 }
868 assert(next_state.offset == state->render_pass_states.offset +
869 state->render_pass_states.alloc_size);
870
871 if (begin) {
872 ANV_FROM_HANDLE(anv_framebuffer, framebuffer, begin->framebuffer);
873 assert(pass->attachment_count == framebuffer->attachment_count);
874
875 struct GENX(RENDER_SURFACE_STATE) null_ss = {
876 .SurfaceType = SURFTYPE_NULL,
877 .SurfaceArray = framebuffer->layers > 0,
878 .SurfaceFormat = ISL_FORMAT_R8G8B8A8_UNORM,
879 #if GEN_GEN >= 8
880 .TileMode = YMAJOR,
881 #else
882 .TiledSurface = true,
883 #endif
884 .Width = framebuffer->width - 1,
885 .Height = framebuffer->height - 1,
886 .Depth = framebuffer->layers - 1,
887 .RenderTargetViewExtent = framebuffer->layers - 1,
888 };
889 GENX(RENDER_SURFACE_STATE_pack)(NULL, state->null_surface_state.map,
890 &null_ss);
891
892 for (uint32_t i = 0; i < pass->attachment_count; ++i) {
893 struct anv_render_pass_attachment *att = &pass->attachments[i];
894 VkImageAspectFlags att_aspects = vk_format_aspects(att->format);
895 VkImageAspectFlags clear_aspects = 0;
896
897 if (att_aspects == VK_IMAGE_ASPECT_COLOR_BIT) {
898 /* color attachment */
899 if (att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
900 clear_aspects |= VK_IMAGE_ASPECT_COLOR_BIT;
901 }
902 } else {
903 /* depthstencil attachment */
904 if ((att_aspects & VK_IMAGE_ASPECT_DEPTH_BIT) &&
905 att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
906 clear_aspects |= VK_IMAGE_ASPECT_DEPTH_BIT;
907 }
908 if ((att_aspects & VK_IMAGE_ASPECT_STENCIL_BIT) &&
909 att->stencil_load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
910 clear_aspects |= VK_IMAGE_ASPECT_STENCIL_BIT;
911 }
912 }
913
914 state->attachments[i].current_layout = att->initial_layout;
915 state->attachments[i].pending_clear_aspects = clear_aspects;
916 if (clear_aspects)
917 state->attachments[i].clear_value = begin->pClearValues[i];
918
919 struct anv_image_view *iview = framebuffer->attachments[i];
920 anv_assert(iview->vk_format == att->format);
921
922 union isl_color_value clear_color = { .u32 = { 0, } };
923 if (att_aspects == VK_IMAGE_ASPECT_COLOR_BIT) {
924 color_attachment_compute_aux_usage(cmd_buffer->device,
925 state, i, begin->renderArea,
926 &clear_color);
927
928 struct isl_view view = iview->isl;
929 view.usage |= ISL_SURF_USAGE_RENDER_TARGET_BIT;
930 view.swizzle = anv_swizzle_for_render(view.swizzle);
931 isl_surf_fill_state(isl_dev,
932 state->attachments[i].color_rt_state.map,
933 .surf = &iview->image->color_surface.isl,
934 .view = &view,
935 .aux_surf = &iview->image->aux_surface.isl,
936 .aux_usage = state->attachments[i].aux_usage,
937 .clear_color = clear_color,
938 .mocs = cmd_buffer->device->default_mocs);
939
940 add_image_relocs(cmd_buffer, iview->image, iview->aspect_mask,
941 state->attachments[i].aux_usage,
942 state->attachments[i].color_rt_state);
943 } else {
944 /* This field will be initialized after the first subpass
945 * transition.
946 */
947 state->attachments[i].aux_usage = ISL_AUX_USAGE_NONE;
948
949 state->attachments[i].input_aux_usage = ISL_AUX_USAGE_NONE;
950 }
951
952 if (need_input_attachment_state(&pass->attachments[i])) {
953 struct isl_view view = iview->isl;
954 view.usage |= ISL_SURF_USAGE_TEXTURE_BIT;
955 isl_surf_fill_state(isl_dev,
956 state->attachments[i].input_att_state.map,
957 .surf = &iview->image->color_surface.isl,
958 .view = &view,
959 .aux_surf = &iview->image->aux_surface.isl,
960 .aux_usage = state->attachments[i].input_aux_usage,
961 .clear_color = clear_color,
962 .mocs = cmd_buffer->device->default_mocs);
963
964 add_image_relocs(cmd_buffer, iview->image, iview->aspect_mask,
965 state->attachments[i].input_aux_usage,
966 state->attachments[i].input_att_state);
967 }
968 }
969
970 anv_state_flush(cmd_buffer->device, state->render_pass_states);
971 }
972
973 return VK_SUCCESS;
974 }
975
976 VkResult
977 genX(BeginCommandBuffer)(
978 VkCommandBuffer commandBuffer,
979 const VkCommandBufferBeginInfo* pBeginInfo)
980 {
981 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
982
983 /* If this is the first vkBeginCommandBuffer, we must *initialize* the
984 * command buffer's state. Otherwise, we must *reset* its state. In both
985 * cases we reset it.
986 *
987 * From the Vulkan 1.0 spec:
988 *
989 * If a command buffer is in the executable state and the command buffer
990 * was allocated from a command pool with the
991 * VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT flag set, then
992 * vkBeginCommandBuffer implicitly resets the command buffer, behaving
993 * as if vkResetCommandBuffer had been called with
994 * VK_COMMAND_BUFFER_RESET_RELEASE_RESOURCES_BIT not set. It then puts
995 * the command buffer in the recording state.
996 */
997 anv_cmd_buffer_reset(cmd_buffer);
998
999 cmd_buffer->usage_flags = pBeginInfo->flags;
1000
1001 assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY ||
1002 !(cmd_buffer->usage_flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT));
1003
1004 genX(cmd_buffer_emit_state_base_address)(cmd_buffer);
1005
1006 /* We sometimes store vertex data in the dynamic state buffer for blorp
1007 * operations and our dynamic state stream may re-use data from previous
1008 * command buffers. In order to prevent stale cache data, we flush the VF
1009 * cache. We could do this on every blorp call but that's not really
1010 * needed as all of the data will get written by the CPU prior to the GPU
1011 * executing anything. The chances are fairly high that they will use
1012 * blorp at least once per primary command buffer so it shouldn't be
1013 * wasted.
1014 */
1015 if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY)
1016 cmd_buffer->state.pending_pipe_bits |= ANV_PIPE_VF_CACHE_INVALIDATE_BIT;
1017
1018 VkResult result = VK_SUCCESS;
1019 if (cmd_buffer->usage_flags &
1020 VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
1021 cmd_buffer->state.pass =
1022 anv_render_pass_from_handle(pBeginInfo->pInheritanceInfo->renderPass);
1023 cmd_buffer->state.subpass =
1024 &cmd_buffer->state.pass->subpasses[pBeginInfo->pInheritanceInfo->subpass];
1025 cmd_buffer->state.framebuffer = NULL;
1026
1027 result = genX(cmd_buffer_setup_attachments)(cmd_buffer,
1028 cmd_buffer->state.pass, NULL);
1029
1030 cmd_buffer->state.dirty |= ANV_CMD_DIRTY_RENDER_TARGETS;
1031 }
1032
1033 return result;
1034 }
1035
1036 VkResult
1037 genX(EndCommandBuffer)(
1038 VkCommandBuffer commandBuffer)
1039 {
1040 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1041
1042 if (anv_batch_has_error(&cmd_buffer->batch))
1043 return cmd_buffer->batch.status;
1044
1045 /* We want every command buffer to start with the PMA fix in a known state,
1046 * so we disable it at the end of the command buffer.
1047 */
1048 genX(cmd_buffer_enable_pma_fix)(cmd_buffer, false);
1049
1050 genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
1051
1052 anv_cmd_buffer_end_batch_buffer(cmd_buffer);
1053
1054 return VK_SUCCESS;
1055 }
1056
1057 void
1058 genX(CmdExecuteCommands)(
1059 VkCommandBuffer commandBuffer,
1060 uint32_t commandBufferCount,
1061 const VkCommandBuffer* pCmdBuffers)
1062 {
1063 ANV_FROM_HANDLE(anv_cmd_buffer, primary, commandBuffer);
1064
1065 assert(primary->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
1066
1067 if (anv_batch_has_error(&primary->batch))
1068 return;
1069
1070 /* The secondary command buffers will assume that the PMA fix is disabled
1071 * when they begin executing. Make sure this is true.
1072 */
1073 genX(cmd_buffer_enable_pma_fix)(primary, false);
1074
1075 /* The secondary command buffer doesn't know which textures etc. have been
1076 * flushed prior to their execution. Apply those flushes now.
1077 */
1078 genX(cmd_buffer_apply_pipe_flushes)(primary);
1079
1080 for (uint32_t i = 0; i < commandBufferCount; i++) {
1081 ANV_FROM_HANDLE(anv_cmd_buffer, secondary, pCmdBuffers[i]);
1082
1083 assert(secondary->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
1084 assert(!anv_batch_has_error(&secondary->batch));
1085
1086 if (secondary->usage_flags &
1087 VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
1088 /* If we're continuing a render pass from the primary, we need to
1089 * copy the surface states for the current subpass into the storage
1090 * we allocated for them in BeginCommandBuffer.
1091 */
1092 struct anv_bo *ss_bo =
1093 &primary->device->surface_state_pool.block_pool.bo;
1094 struct anv_state src_state = primary->state.render_pass_states;
1095 struct anv_state dst_state = secondary->state.render_pass_states;
1096 assert(src_state.alloc_size == dst_state.alloc_size);
1097
1098 genX(cmd_buffer_so_memcpy)(primary, ss_bo, dst_state.offset,
1099 ss_bo, src_state.offset,
1100 src_state.alloc_size);
1101 }
1102
1103 anv_cmd_buffer_add_secondary(primary, secondary);
1104 }
1105
1106 /* Each of the secondary command buffers will use its own state base
1107 * address. We need to re-emit state base address for the primary after
1108 * all of the secondaries are done.
1109 *
1110 * TODO: Maybe we want to make this a dirty bit to avoid extra state base
1111 * address calls?
1112 */
1113 genX(cmd_buffer_emit_state_base_address)(primary);
1114 }
1115
1116 #define IVB_L3SQCREG1_SQGHPCI_DEFAULT 0x00730000
1117 #define VLV_L3SQCREG1_SQGHPCI_DEFAULT 0x00d30000
1118 #define HSW_L3SQCREG1_SQGHPCI_DEFAULT 0x00610000
1119
1120 /**
1121 * Program the hardware to use the specified L3 configuration.
1122 */
1123 void
1124 genX(cmd_buffer_config_l3)(struct anv_cmd_buffer *cmd_buffer,
1125 const struct gen_l3_config *cfg)
1126 {
1127 assert(cfg);
1128 if (cfg == cmd_buffer->state.current_l3_config)
1129 return;
1130
1131 if (unlikely(INTEL_DEBUG & DEBUG_L3)) {
1132 fprintf(stderr, "L3 config transition: ");
1133 gen_dump_l3_config(cfg, stderr);
1134 }
1135
1136 const bool has_slm = cfg->n[GEN_L3P_SLM];
1137
1138 /* According to the hardware docs, the L3 partitioning can only be changed
1139 * while the pipeline is completely drained and the caches are flushed,
1140 * which involves a first PIPE_CONTROL flush which stalls the pipeline...
1141 */
1142 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
1143 pc.DCFlushEnable = true;
1144 pc.PostSyncOperation = NoWrite;
1145 pc.CommandStreamerStallEnable = true;
1146 }
1147
1148 /* ...followed by a second pipelined PIPE_CONTROL that initiates
1149 * invalidation of the relevant caches. Note that because RO invalidation
1150 * happens at the top of the pipeline (i.e. right away as the PIPE_CONTROL
1151 * command is processed by the CS) we cannot combine it with the previous
1152 * stalling flush as the hardware documentation suggests, because that
1153 * would cause the CS to stall on previous rendering *after* RO
1154 * invalidation and wouldn't prevent the RO caches from being polluted by
1155 * concurrent rendering before the stall completes. This intentionally
1156 * doesn't implement the SKL+ hardware workaround suggesting to enable CS
1157 * stall on PIPE_CONTROLs with the texture cache invalidation bit set for
1158 * GPGPU workloads because the previous and subsequent PIPE_CONTROLs
1159 * already guarantee that there is no concurrent GPGPU kernel execution
1160 * (see SKL HSD 2132585).
1161 */
1162 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
1163 pc.TextureCacheInvalidationEnable = true;
1164 pc.ConstantCacheInvalidationEnable = true;
1165 pc.InstructionCacheInvalidateEnable = true;
1166 pc.StateCacheInvalidationEnable = true;
1167 pc.PostSyncOperation = NoWrite;
1168 }
1169
1170 /* Now send a third stalling flush to make sure that invalidation is
1171 * complete when the L3 configuration registers are modified.
1172 */
1173 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
1174 pc.DCFlushEnable = true;
1175 pc.PostSyncOperation = NoWrite;
1176 pc.CommandStreamerStallEnable = true;
1177 }
1178
1179 #if GEN_GEN >= 8
1180
1181 assert(!cfg->n[GEN_L3P_IS] && !cfg->n[GEN_L3P_C] && !cfg->n[GEN_L3P_T]);
1182
1183 uint32_t l3cr;
1184 anv_pack_struct(&l3cr, GENX(L3CNTLREG),
1185 .SLMEnable = has_slm,
1186 .URBAllocation = cfg->n[GEN_L3P_URB],
1187 .ROAllocation = cfg->n[GEN_L3P_RO],
1188 .DCAllocation = cfg->n[GEN_L3P_DC],
1189 .AllAllocation = cfg->n[GEN_L3P_ALL]);
1190
1191 /* Set up the L3 partitioning. */
1192 emit_lri(&cmd_buffer->batch, GENX(L3CNTLREG_num), l3cr);
1193
1194 #else
1195
1196 const bool has_dc = cfg->n[GEN_L3P_DC] || cfg->n[GEN_L3P_ALL];
1197 const bool has_is = cfg->n[GEN_L3P_IS] || cfg->n[GEN_L3P_RO] ||
1198 cfg->n[GEN_L3P_ALL];
1199 const bool has_c = cfg->n[GEN_L3P_C] || cfg->n[GEN_L3P_RO] ||
1200 cfg->n[GEN_L3P_ALL];
1201 const bool has_t = cfg->n[GEN_L3P_T] || cfg->n[GEN_L3P_RO] ||
1202 cfg->n[GEN_L3P_ALL];
1203
1204 assert(!cfg->n[GEN_L3P_ALL]);
1205
1206 /* When enabled SLM only uses a portion of the L3 on half of the banks,
1207 * the matching space on the remaining banks has to be allocated to a
1208 * client (URB for all validated configurations) set to the
1209 * lower-bandwidth 2-bank address hashing mode.
1210 */
1211 const struct gen_device_info *devinfo = &cmd_buffer->device->info;
1212 const bool urb_low_bw = has_slm && !devinfo->is_baytrail;
1213 assert(!urb_low_bw || cfg->n[GEN_L3P_URB] == cfg->n[GEN_L3P_SLM]);
1214
1215 /* Minimum number of ways that can be allocated to the URB. */
1216 MAYBE_UNUSED const unsigned n0_urb = devinfo->is_baytrail ? 32 : 0;
1217 assert(cfg->n[GEN_L3P_URB] >= n0_urb);
1218
1219 uint32_t l3sqcr1, l3cr2, l3cr3;
1220 anv_pack_struct(&l3sqcr1, GENX(L3SQCREG1),
1221 .ConvertDC_UC = !has_dc,
1222 .ConvertIS_UC = !has_is,
1223 .ConvertC_UC = !has_c,
1224 .ConvertT_UC = !has_t);
1225 l3sqcr1 |=
1226 GEN_IS_HASWELL ? HSW_L3SQCREG1_SQGHPCI_DEFAULT :
1227 devinfo->is_baytrail ? VLV_L3SQCREG1_SQGHPCI_DEFAULT :
1228 IVB_L3SQCREG1_SQGHPCI_DEFAULT;
1229
1230 anv_pack_struct(&l3cr2, GENX(L3CNTLREG2),
1231 .SLMEnable = has_slm,
1232 .URBLowBandwidth = urb_low_bw,
1233 .URBAllocation = cfg->n[GEN_L3P_URB] - n0_urb,
1234 #if !GEN_IS_HASWELL
1235 .ALLAllocation = cfg->n[GEN_L3P_ALL],
1236 #endif
1237 .ROAllocation = cfg->n[GEN_L3P_RO],
1238 .DCAllocation = cfg->n[GEN_L3P_DC]);
1239
1240 anv_pack_struct(&l3cr3, GENX(L3CNTLREG3),
1241 .ISAllocation = cfg->n[GEN_L3P_IS],
1242 .ISLowBandwidth = 0,
1243 .CAllocation = cfg->n[GEN_L3P_C],
1244 .CLowBandwidth = 0,
1245 .TAllocation = cfg->n[GEN_L3P_T],
1246 .TLowBandwidth = 0);
1247
1248 /* Set up the L3 partitioning. */
1249 emit_lri(&cmd_buffer->batch, GENX(L3SQCREG1_num), l3sqcr1);
1250 emit_lri(&cmd_buffer->batch, GENX(L3CNTLREG2_num), l3cr2);
1251 emit_lri(&cmd_buffer->batch, GENX(L3CNTLREG3_num), l3cr3);
1252
1253 #if GEN_IS_HASWELL
1254 if (cmd_buffer->device->instance->physicalDevice.cmd_parser_version >= 4) {
1255 /* Enable L3 atomics on HSW if we have a DC partition, otherwise keep
1256 * them disabled to avoid crashing the system hard.
1257 */
1258 uint32_t scratch1, chicken3;
1259 anv_pack_struct(&scratch1, GENX(SCRATCH1),
1260 .L3AtomicDisable = !has_dc);
1261 anv_pack_struct(&chicken3, GENX(CHICKEN3),
1262 .L3AtomicDisableMask = true,
1263 .L3AtomicDisable = !has_dc);
1264 emit_lri(&cmd_buffer->batch, GENX(SCRATCH1_num), scratch1);
1265 emit_lri(&cmd_buffer->batch, GENX(CHICKEN3_num), chicken3);
1266 }
1267 #endif
1268
1269 #endif
1270
1271 cmd_buffer->state.current_l3_config = cfg;
1272 }
1273
1274 void
1275 genX(cmd_buffer_apply_pipe_flushes)(struct anv_cmd_buffer *cmd_buffer)
1276 {
1277 enum anv_pipe_bits bits = cmd_buffer->state.pending_pipe_bits;
1278
1279 /* Flushes are pipelined while invalidations are handled immediately.
1280 * Therefore, if we're flushing anything then we need to schedule a stall
1281 * before any invalidations can happen.
1282 */
1283 if (bits & ANV_PIPE_FLUSH_BITS)
1284 bits |= ANV_PIPE_NEEDS_CS_STALL_BIT;
1285
1286 /* If we're going to do an invalidate and we have a pending CS stall that
1287 * has yet to be resolved, we do the CS stall now.
1288 */
1289 if ((bits & ANV_PIPE_INVALIDATE_BITS) &&
1290 (bits & ANV_PIPE_NEEDS_CS_STALL_BIT)) {
1291 bits |= ANV_PIPE_CS_STALL_BIT;
1292 bits &= ~ANV_PIPE_NEEDS_CS_STALL_BIT;
1293 }
1294
1295 if (bits & (ANV_PIPE_FLUSH_BITS | ANV_PIPE_CS_STALL_BIT)) {
1296 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pipe) {
1297 pipe.DepthCacheFlushEnable = bits & ANV_PIPE_DEPTH_CACHE_FLUSH_BIT;
1298 pipe.DCFlushEnable = bits & ANV_PIPE_DATA_CACHE_FLUSH_BIT;
1299 pipe.RenderTargetCacheFlushEnable =
1300 bits & ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT;
1301
1302 pipe.DepthStallEnable = bits & ANV_PIPE_DEPTH_STALL_BIT;
1303 pipe.CommandStreamerStallEnable = bits & ANV_PIPE_CS_STALL_BIT;
1304 pipe.StallAtPixelScoreboard = bits & ANV_PIPE_STALL_AT_SCOREBOARD_BIT;
1305
1306 /*
1307 * According to the Broadwell documentation, any PIPE_CONTROL with the
1308 * "Command Streamer Stall" bit set must also have another bit set,
1309 * with five different options:
1310 *
1311 * - Render Target Cache Flush
1312 * - Depth Cache Flush
1313 * - Stall at Pixel Scoreboard
1314 * - Post-Sync Operation
1315 * - Depth Stall
1316 * - DC Flush Enable
1317 *
1318 * I chose "Stall at Pixel Scoreboard" since that's what we use in
1319 * mesa and it seems to work fine. The choice is fairly arbitrary.
1320 */
1321 if ((bits & ANV_PIPE_CS_STALL_BIT) &&
1322 !(bits & (ANV_PIPE_FLUSH_BITS | ANV_PIPE_DEPTH_STALL_BIT |
1323 ANV_PIPE_STALL_AT_SCOREBOARD_BIT)))
1324 pipe.StallAtPixelScoreboard = true;
1325 }
1326
1327 bits &= ~(ANV_PIPE_FLUSH_BITS | ANV_PIPE_CS_STALL_BIT);
1328 }
1329
1330 if (bits & ANV_PIPE_INVALIDATE_BITS) {
1331 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pipe) {
1332 pipe.StateCacheInvalidationEnable =
1333 bits & ANV_PIPE_STATE_CACHE_INVALIDATE_BIT;
1334 pipe.ConstantCacheInvalidationEnable =
1335 bits & ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT;
1336 pipe.VFCacheInvalidationEnable =
1337 bits & ANV_PIPE_VF_CACHE_INVALIDATE_BIT;
1338 pipe.TextureCacheInvalidationEnable =
1339 bits & ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT;
1340 pipe.InstructionCacheInvalidateEnable =
1341 bits & ANV_PIPE_INSTRUCTION_CACHE_INVALIDATE_BIT;
1342 }
1343
1344 bits &= ~ANV_PIPE_INVALIDATE_BITS;
1345 }
1346
1347 cmd_buffer->state.pending_pipe_bits = bits;
1348 }
1349
1350 void genX(CmdPipelineBarrier)(
1351 VkCommandBuffer commandBuffer,
1352 VkPipelineStageFlags srcStageMask,
1353 VkPipelineStageFlags destStageMask,
1354 VkBool32 byRegion,
1355 uint32_t memoryBarrierCount,
1356 const VkMemoryBarrier* pMemoryBarriers,
1357 uint32_t bufferMemoryBarrierCount,
1358 const VkBufferMemoryBarrier* pBufferMemoryBarriers,
1359 uint32_t imageMemoryBarrierCount,
1360 const VkImageMemoryBarrier* pImageMemoryBarriers)
1361 {
1362 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1363
1364 /* XXX: Right now, we're really dumb and just flush whatever categories
1365 * the app asks for. One of these days we may make this a bit better
1366 * but right now that's all the hardware allows for in most areas.
1367 */
1368 VkAccessFlags src_flags = 0;
1369 VkAccessFlags dst_flags = 0;
1370
1371 for (uint32_t i = 0; i < memoryBarrierCount; i++) {
1372 src_flags |= pMemoryBarriers[i].srcAccessMask;
1373 dst_flags |= pMemoryBarriers[i].dstAccessMask;
1374 }
1375
1376 for (uint32_t i = 0; i < bufferMemoryBarrierCount; i++) {
1377 src_flags |= pBufferMemoryBarriers[i].srcAccessMask;
1378 dst_flags |= pBufferMemoryBarriers[i].dstAccessMask;
1379 }
1380
1381 for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
1382 src_flags |= pImageMemoryBarriers[i].srcAccessMask;
1383 dst_flags |= pImageMemoryBarriers[i].dstAccessMask;
1384 ANV_FROM_HANDLE(anv_image, image, pImageMemoryBarriers[i].image);
1385 const VkImageSubresourceRange *range =
1386 &pImageMemoryBarriers[i].subresourceRange;
1387
1388 if (range->aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) {
1389 transition_depth_buffer(cmd_buffer, image,
1390 pImageMemoryBarriers[i].oldLayout,
1391 pImageMemoryBarriers[i].newLayout);
1392 } else if (range->aspectMask == VK_IMAGE_ASPECT_COLOR_BIT) {
1393 transition_color_buffer(cmd_buffer, image,
1394 range->baseMipLevel,
1395 anv_get_levelCount(image, range),
1396 range->baseArrayLayer,
1397 anv_get_layerCount(image, range),
1398 pImageMemoryBarriers[i].oldLayout,
1399 pImageMemoryBarriers[i].newLayout);
1400 }
1401 }
1402
1403 cmd_buffer->state.pending_pipe_bits |=
1404 anv_pipe_flush_bits_for_access_flags(src_flags) |
1405 anv_pipe_invalidate_bits_for_access_flags(dst_flags);
1406 }
1407
1408 static void
1409 cmd_buffer_alloc_push_constants(struct anv_cmd_buffer *cmd_buffer)
1410 {
1411 VkShaderStageFlags stages = cmd_buffer->state.pipeline->active_stages;
1412
1413 /* In order to avoid thrash, we assume that vertex and fragment stages
1414 * always exist. In the rare case where one is missing *and* the other
1415 * uses push concstants, this may be suboptimal. However, avoiding stalls
1416 * seems more important.
1417 */
1418 stages |= VK_SHADER_STAGE_FRAGMENT_BIT | VK_SHADER_STAGE_VERTEX_BIT;
1419
1420 if (stages == cmd_buffer->state.push_constant_stages)
1421 return;
1422
1423 #if GEN_GEN >= 8
1424 const unsigned push_constant_kb = 32;
1425 #elif GEN_IS_HASWELL
1426 const unsigned push_constant_kb = cmd_buffer->device->info.gt == 3 ? 32 : 16;
1427 #else
1428 const unsigned push_constant_kb = 16;
1429 #endif
1430
1431 const unsigned num_stages =
1432 _mesa_bitcount(stages & VK_SHADER_STAGE_ALL_GRAPHICS);
1433 unsigned size_per_stage = push_constant_kb / num_stages;
1434
1435 /* Broadwell+ and Haswell gt3 require that the push constant sizes be in
1436 * units of 2KB. Incidentally, these are the same platforms that have
1437 * 32KB worth of push constant space.
1438 */
1439 if (push_constant_kb == 32)
1440 size_per_stage &= ~1u;
1441
1442 uint32_t kb_used = 0;
1443 for (int i = MESA_SHADER_VERTEX; i < MESA_SHADER_FRAGMENT; i++) {
1444 unsigned push_size = (stages & (1 << i)) ? size_per_stage : 0;
1445 anv_batch_emit(&cmd_buffer->batch,
1446 GENX(3DSTATE_PUSH_CONSTANT_ALLOC_VS), alloc) {
1447 alloc._3DCommandSubOpcode = 18 + i;
1448 alloc.ConstantBufferOffset = (push_size > 0) ? kb_used : 0;
1449 alloc.ConstantBufferSize = push_size;
1450 }
1451 kb_used += push_size;
1452 }
1453
1454 anv_batch_emit(&cmd_buffer->batch,
1455 GENX(3DSTATE_PUSH_CONSTANT_ALLOC_PS), alloc) {
1456 alloc.ConstantBufferOffset = kb_used;
1457 alloc.ConstantBufferSize = push_constant_kb - kb_used;
1458 }
1459
1460 cmd_buffer->state.push_constant_stages = stages;
1461
1462 /* From the BDW PRM for 3DSTATE_PUSH_CONSTANT_ALLOC_VS:
1463 *
1464 * "The 3DSTATE_CONSTANT_VS must be reprogrammed prior to
1465 * the next 3DPRIMITIVE command after programming the
1466 * 3DSTATE_PUSH_CONSTANT_ALLOC_VS"
1467 *
1468 * Since 3DSTATE_PUSH_CONSTANT_ALLOC_VS is programmed as part of
1469 * pipeline setup, we need to dirty push constants.
1470 */
1471 cmd_buffer->state.push_constants_dirty |= VK_SHADER_STAGE_ALL_GRAPHICS;
1472 }
1473
1474 static VkResult
1475 emit_binding_table(struct anv_cmd_buffer *cmd_buffer,
1476 gl_shader_stage stage,
1477 struct anv_state *bt_state)
1478 {
1479 struct anv_subpass *subpass = cmd_buffer->state.subpass;
1480 struct anv_pipeline *pipeline;
1481 uint32_t bias, state_offset;
1482
1483 switch (stage) {
1484 case MESA_SHADER_COMPUTE:
1485 pipeline = cmd_buffer->state.compute_pipeline;
1486 bias = 1;
1487 break;
1488 default:
1489 pipeline = cmd_buffer->state.pipeline;
1490 bias = 0;
1491 break;
1492 }
1493
1494 if (!anv_pipeline_has_stage(pipeline, stage)) {
1495 *bt_state = (struct anv_state) { 0, };
1496 return VK_SUCCESS;
1497 }
1498
1499 struct anv_pipeline_bind_map *map = &pipeline->shaders[stage]->bind_map;
1500 if (bias + map->surface_count == 0) {
1501 *bt_state = (struct anv_state) { 0, };
1502 return VK_SUCCESS;
1503 }
1504
1505 *bt_state = anv_cmd_buffer_alloc_binding_table(cmd_buffer,
1506 bias + map->surface_count,
1507 &state_offset);
1508 uint32_t *bt_map = bt_state->map;
1509
1510 if (bt_state->map == NULL)
1511 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
1512
1513 if (stage == MESA_SHADER_COMPUTE &&
1514 get_cs_prog_data(cmd_buffer->state.compute_pipeline)->uses_num_work_groups) {
1515 struct anv_bo *bo = cmd_buffer->state.num_workgroups_bo;
1516 uint32_t bo_offset = cmd_buffer->state.num_workgroups_offset;
1517
1518 struct anv_state surface_state;
1519 surface_state =
1520 anv_cmd_buffer_alloc_surface_state(cmd_buffer);
1521
1522 const enum isl_format format =
1523 anv_isl_format_for_descriptor_type(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
1524 anv_fill_buffer_surface_state(cmd_buffer->device, surface_state,
1525 format, bo_offset, 12, 1);
1526
1527 bt_map[0] = surface_state.offset + state_offset;
1528 add_surface_state_reloc(cmd_buffer, surface_state, bo, bo_offset);
1529 }
1530
1531 if (map->surface_count == 0)
1532 goto out;
1533
1534 if (map->image_count > 0) {
1535 VkResult result =
1536 anv_cmd_buffer_ensure_push_constant_field(cmd_buffer, stage, images);
1537 if (result != VK_SUCCESS)
1538 return result;
1539
1540 cmd_buffer->state.push_constants_dirty |= 1 << stage;
1541 }
1542
1543 uint32_t image = 0;
1544 for (uint32_t s = 0; s < map->surface_count; s++) {
1545 struct anv_pipeline_binding *binding = &map->surface_to_descriptor[s];
1546
1547 struct anv_state surface_state;
1548
1549 if (binding->set == ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS) {
1550 /* Color attachment binding */
1551 assert(stage == MESA_SHADER_FRAGMENT);
1552 assert(binding->binding == 0);
1553 if (binding->index < subpass->color_count) {
1554 const unsigned att =
1555 subpass->color_attachments[binding->index].attachment;
1556
1557 /* From the Vulkan 1.0.46 spec:
1558 *
1559 * "If any color or depth/stencil attachments are
1560 * VK_ATTACHMENT_UNUSED, then no writes occur for those
1561 * attachments."
1562 */
1563 if (att == VK_ATTACHMENT_UNUSED) {
1564 surface_state = cmd_buffer->state.null_surface_state;
1565 } else {
1566 surface_state = cmd_buffer->state.attachments[att].color_rt_state;
1567 }
1568 } else {
1569 surface_state = cmd_buffer->state.null_surface_state;
1570 }
1571
1572 bt_map[bias + s] = surface_state.offset + state_offset;
1573 continue;
1574 }
1575
1576 struct anv_descriptor_set *set =
1577 cmd_buffer->state.descriptors[binding->set];
1578 uint32_t offset = set->layout->binding[binding->binding].descriptor_index;
1579 struct anv_descriptor *desc = &set->descriptors[offset + binding->index];
1580
1581 switch (desc->type) {
1582 case VK_DESCRIPTOR_TYPE_SAMPLER:
1583 /* Nothing for us to do here */
1584 continue;
1585
1586 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1587 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE: {
1588 enum isl_aux_usage aux_usage;
1589 if (desc->layout == VK_IMAGE_LAYOUT_GENERAL) {
1590 surface_state = desc->image_view->general_sampler_surface_state;
1591 aux_usage = desc->image_view->general_sampler_aux_usage;
1592 } else {
1593 surface_state = desc->image_view->optimal_sampler_surface_state;
1594 aux_usage = desc->image_view->optimal_sampler_aux_usage;
1595 }
1596 assert(surface_state.alloc_size);
1597 add_image_relocs(cmd_buffer, desc->image_view->image,
1598 desc->image_view->aspect_mask,
1599 aux_usage, surface_state);
1600 break;
1601 }
1602 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
1603 assert(stage == MESA_SHADER_FRAGMENT);
1604 if (desc->image_view->aspect_mask != VK_IMAGE_ASPECT_COLOR_BIT) {
1605 /* For depth and stencil input attachments, we treat it like any
1606 * old texture that a user may have bound.
1607 */
1608 enum isl_aux_usage aux_usage;
1609 if (desc->layout == VK_IMAGE_LAYOUT_GENERAL) {
1610 surface_state = desc->image_view->general_sampler_surface_state;
1611 aux_usage = desc->image_view->general_sampler_aux_usage;
1612 } else {
1613 surface_state = desc->image_view->optimal_sampler_surface_state;
1614 aux_usage = desc->image_view->optimal_sampler_aux_usage;
1615 }
1616 assert(surface_state.alloc_size);
1617 add_image_relocs(cmd_buffer, desc->image_view->image,
1618 desc->image_view->aspect_mask,
1619 aux_usage, surface_state);
1620 } else {
1621 /* For color input attachments, we create the surface state at
1622 * vkBeginRenderPass time so that we can include aux and clear
1623 * color information.
1624 */
1625 assert(binding->input_attachment_index < subpass->input_count);
1626 const unsigned subpass_att = binding->input_attachment_index;
1627 const unsigned att = subpass->input_attachments[subpass_att].attachment;
1628 surface_state = cmd_buffer->state.attachments[att].input_att_state;
1629 }
1630 break;
1631
1632 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: {
1633 surface_state = (binding->write_only)
1634 ? desc->image_view->writeonly_storage_surface_state
1635 : desc->image_view->storage_surface_state;
1636 assert(surface_state.alloc_size);
1637 add_image_relocs(cmd_buffer, desc->image_view->image,
1638 desc->image_view->aspect_mask,
1639 desc->image_view->image->aux_usage, surface_state);
1640
1641 struct brw_image_param *image_param =
1642 &cmd_buffer->state.push_constants[stage]->images[image++];
1643
1644 *image_param = desc->image_view->storage_image_param;
1645 image_param->surface_idx = bias + s;
1646 break;
1647 }
1648
1649 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1650 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1651 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1652 surface_state = desc->buffer_view->surface_state;
1653 assert(surface_state.alloc_size);
1654 add_surface_state_reloc(cmd_buffer, surface_state,
1655 desc->buffer_view->bo,
1656 desc->buffer_view->offset);
1657 break;
1658
1659 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1660 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
1661 uint32_t dynamic_offset_idx =
1662 pipeline->layout->set[binding->set].dynamic_offset_start +
1663 set->layout->binding[binding->binding].dynamic_offset_index +
1664 binding->index;
1665
1666 /* Compute the offset within the buffer */
1667 uint64_t offset = desc->offset +
1668 cmd_buffer->state.dynamic_offsets[dynamic_offset_idx];
1669 /* Clamp to the buffer size */
1670 offset = MIN2(offset, desc->buffer->size);
1671 /* Clamp the range to the buffer size */
1672 uint32_t range = MIN2(desc->range, desc->buffer->size - offset);
1673
1674 surface_state =
1675 anv_state_stream_alloc(&cmd_buffer->surface_state_stream, 64, 64);
1676 enum isl_format format =
1677 anv_isl_format_for_descriptor_type(desc->type);
1678
1679 anv_fill_buffer_surface_state(cmd_buffer->device, surface_state,
1680 format, offset, range, 1);
1681 add_surface_state_reloc(cmd_buffer, surface_state,
1682 desc->buffer->bo,
1683 desc->buffer->offset + offset);
1684 break;
1685 }
1686
1687 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1688 surface_state = (binding->write_only)
1689 ? desc->buffer_view->writeonly_storage_surface_state
1690 : desc->buffer_view->storage_surface_state;
1691 assert(surface_state.alloc_size);
1692 add_surface_state_reloc(cmd_buffer, surface_state,
1693 desc->buffer_view->bo,
1694 desc->buffer_view->offset);
1695
1696 struct brw_image_param *image_param =
1697 &cmd_buffer->state.push_constants[stage]->images[image++];
1698
1699 *image_param = desc->buffer_view->storage_image_param;
1700 image_param->surface_idx = bias + s;
1701 break;
1702
1703 default:
1704 assert(!"Invalid descriptor type");
1705 continue;
1706 }
1707
1708 bt_map[bias + s] = surface_state.offset + state_offset;
1709 }
1710 assert(image == map->image_count);
1711
1712 out:
1713 anv_state_flush(cmd_buffer->device, *bt_state);
1714
1715 return VK_SUCCESS;
1716 }
1717
1718 static VkResult
1719 emit_samplers(struct anv_cmd_buffer *cmd_buffer,
1720 gl_shader_stage stage,
1721 struct anv_state *state)
1722 {
1723 struct anv_pipeline *pipeline;
1724
1725 if (stage == MESA_SHADER_COMPUTE)
1726 pipeline = cmd_buffer->state.compute_pipeline;
1727 else
1728 pipeline = cmd_buffer->state.pipeline;
1729
1730 if (!anv_pipeline_has_stage(pipeline, stage)) {
1731 *state = (struct anv_state) { 0, };
1732 return VK_SUCCESS;
1733 }
1734
1735 struct anv_pipeline_bind_map *map = &pipeline->shaders[stage]->bind_map;
1736 if (map->sampler_count == 0) {
1737 *state = (struct anv_state) { 0, };
1738 return VK_SUCCESS;
1739 }
1740
1741 uint32_t size = map->sampler_count * 16;
1742 *state = anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, size, 32);
1743
1744 if (state->map == NULL)
1745 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
1746
1747 for (uint32_t s = 0; s < map->sampler_count; s++) {
1748 struct anv_pipeline_binding *binding = &map->sampler_to_descriptor[s];
1749 struct anv_descriptor_set *set =
1750 cmd_buffer->state.descriptors[binding->set];
1751 uint32_t offset = set->layout->binding[binding->binding].descriptor_index;
1752 struct anv_descriptor *desc = &set->descriptors[offset + binding->index];
1753
1754 if (desc->type != VK_DESCRIPTOR_TYPE_SAMPLER &&
1755 desc->type != VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
1756 continue;
1757
1758 struct anv_sampler *sampler = desc->sampler;
1759
1760 /* This can happen if we have an unfilled slot since TYPE_SAMPLER
1761 * happens to be zero.
1762 */
1763 if (sampler == NULL)
1764 continue;
1765
1766 memcpy(state->map + (s * 16),
1767 sampler->state, sizeof(sampler->state));
1768 }
1769
1770 anv_state_flush(cmd_buffer->device, *state);
1771
1772 return VK_SUCCESS;
1773 }
1774
1775 static uint32_t
1776 flush_descriptor_sets(struct anv_cmd_buffer *cmd_buffer)
1777 {
1778 VkShaderStageFlags dirty = cmd_buffer->state.descriptors_dirty &
1779 cmd_buffer->state.pipeline->active_stages;
1780
1781 VkResult result = VK_SUCCESS;
1782 anv_foreach_stage(s, dirty) {
1783 result = emit_samplers(cmd_buffer, s, &cmd_buffer->state.samplers[s]);
1784 if (result != VK_SUCCESS)
1785 break;
1786 result = emit_binding_table(cmd_buffer, s,
1787 &cmd_buffer->state.binding_tables[s]);
1788 if (result != VK_SUCCESS)
1789 break;
1790 }
1791
1792 if (result != VK_SUCCESS) {
1793 assert(result == VK_ERROR_OUT_OF_DEVICE_MEMORY);
1794
1795 result = anv_cmd_buffer_new_binding_table_block(cmd_buffer);
1796 if (result != VK_SUCCESS)
1797 return 0;
1798
1799 /* Re-emit state base addresses so we get the new surface state base
1800 * address before we start emitting binding tables etc.
1801 */
1802 genX(cmd_buffer_emit_state_base_address)(cmd_buffer);
1803
1804 /* Re-emit all active binding tables */
1805 dirty |= cmd_buffer->state.pipeline->active_stages;
1806 anv_foreach_stage(s, dirty) {
1807 result = emit_samplers(cmd_buffer, s, &cmd_buffer->state.samplers[s]);
1808 if (result != VK_SUCCESS) {
1809 anv_batch_set_error(&cmd_buffer->batch, result);
1810 return 0;
1811 }
1812 result = emit_binding_table(cmd_buffer, s,
1813 &cmd_buffer->state.binding_tables[s]);
1814 if (result != VK_SUCCESS) {
1815 anv_batch_set_error(&cmd_buffer->batch, result);
1816 return 0;
1817 }
1818 }
1819 }
1820
1821 cmd_buffer->state.descriptors_dirty &= ~dirty;
1822
1823 return dirty;
1824 }
1825
1826 static void
1827 cmd_buffer_emit_descriptor_pointers(struct anv_cmd_buffer *cmd_buffer,
1828 uint32_t stages)
1829 {
1830 static const uint32_t sampler_state_opcodes[] = {
1831 [MESA_SHADER_VERTEX] = 43,
1832 [MESA_SHADER_TESS_CTRL] = 44, /* HS */
1833 [MESA_SHADER_TESS_EVAL] = 45, /* DS */
1834 [MESA_SHADER_GEOMETRY] = 46,
1835 [MESA_SHADER_FRAGMENT] = 47,
1836 [MESA_SHADER_COMPUTE] = 0,
1837 };
1838
1839 static const uint32_t binding_table_opcodes[] = {
1840 [MESA_SHADER_VERTEX] = 38,
1841 [MESA_SHADER_TESS_CTRL] = 39,
1842 [MESA_SHADER_TESS_EVAL] = 40,
1843 [MESA_SHADER_GEOMETRY] = 41,
1844 [MESA_SHADER_FRAGMENT] = 42,
1845 [MESA_SHADER_COMPUTE] = 0,
1846 };
1847
1848 anv_foreach_stage(s, stages) {
1849 if (cmd_buffer->state.samplers[s].alloc_size > 0) {
1850 anv_batch_emit(&cmd_buffer->batch,
1851 GENX(3DSTATE_SAMPLER_STATE_POINTERS_VS), ssp) {
1852 ssp._3DCommandSubOpcode = sampler_state_opcodes[s];
1853 ssp.PointertoVSSamplerState = cmd_buffer->state.samplers[s].offset;
1854 }
1855 }
1856
1857 /* Always emit binding table pointers if we're asked to, since on SKL
1858 * this is what flushes push constants. */
1859 anv_batch_emit(&cmd_buffer->batch,
1860 GENX(3DSTATE_BINDING_TABLE_POINTERS_VS), btp) {
1861 btp._3DCommandSubOpcode = binding_table_opcodes[s];
1862 btp.PointertoVSBindingTable = cmd_buffer->state.binding_tables[s].offset;
1863 }
1864 }
1865 }
1866
1867 static uint32_t
1868 cmd_buffer_flush_push_constants(struct anv_cmd_buffer *cmd_buffer)
1869 {
1870 static const uint32_t push_constant_opcodes[] = {
1871 [MESA_SHADER_VERTEX] = 21,
1872 [MESA_SHADER_TESS_CTRL] = 25, /* HS */
1873 [MESA_SHADER_TESS_EVAL] = 26, /* DS */
1874 [MESA_SHADER_GEOMETRY] = 22,
1875 [MESA_SHADER_FRAGMENT] = 23,
1876 [MESA_SHADER_COMPUTE] = 0,
1877 };
1878
1879 VkShaderStageFlags flushed = 0;
1880
1881 anv_foreach_stage(stage, cmd_buffer->state.push_constants_dirty) {
1882 if (stage == MESA_SHADER_COMPUTE)
1883 continue;
1884
1885 struct anv_state state = anv_cmd_buffer_push_constants(cmd_buffer, stage);
1886
1887 if (state.offset == 0) {
1888 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CONSTANT_VS), c)
1889 c._3DCommandSubOpcode = push_constant_opcodes[stage];
1890 } else {
1891 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CONSTANT_VS), c) {
1892 c._3DCommandSubOpcode = push_constant_opcodes[stage],
1893 c.ConstantBody = (struct GENX(3DSTATE_CONSTANT_BODY)) {
1894 #if GEN_GEN >= 9
1895 .Buffer[2] = { &cmd_buffer->device->dynamic_state_pool.block_pool.bo, state.offset },
1896 .ReadLength[2] = DIV_ROUND_UP(state.alloc_size, 32),
1897 #else
1898 .Buffer[0] = { .offset = state.offset },
1899 .ReadLength[0] = DIV_ROUND_UP(state.alloc_size, 32),
1900 #endif
1901 };
1902 }
1903 }
1904
1905 flushed |= mesa_to_vk_shader_stage(stage);
1906 }
1907
1908 cmd_buffer->state.push_constants_dirty &= ~VK_SHADER_STAGE_ALL_GRAPHICS;
1909
1910 return flushed;
1911 }
1912
1913 void
1914 genX(cmd_buffer_flush_state)(struct anv_cmd_buffer *cmd_buffer)
1915 {
1916 struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
1917 uint32_t *p;
1918
1919 uint32_t vb_emit = cmd_buffer->state.vb_dirty & pipeline->vb_used;
1920
1921 assert((pipeline->active_stages & VK_SHADER_STAGE_COMPUTE_BIT) == 0);
1922
1923 genX(cmd_buffer_config_l3)(cmd_buffer, pipeline->urb.l3_config);
1924
1925 genX(flush_pipeline_select_3d)(cmd_buffer);
1926
1927 if (vb_emit) {
1928 const uint32_t num_buffers = __builtin_popcount(vb_emit);
1929 const uint32_t num_dwords = 1 + num_buffers * 4;
1930
1931 p = anv_batch_emitn(&cmd_buffer->batch, num_dwords,
1932 GENX(3DSTATE_VERTEX_BUFFERS));
1933 uint32_t vb, i = 0;
1934 for_each_bit(vb, vb_emit) {
1935 struct anv_buffer *buffer = cmd_buffer->state.vertex_bindings[vb].buffer;
1936 uint32_t offset = cmd_buffer->state.vertex_bindings[vb].offset;
1937
1938 struct GENX(VERTEX_BUFFER_STATE) state = {
1939 .VertexBufferIndex = vb,
1940
1941 #if GEN_GEN >= 8
1942 .MemoryObjectControlState = GENX(MOCS),
1943 #else
1944 .BufferAccessType = pipeline->instancing_enable[vb] ? INSTANCEDATA : VERTEXDATA,
1945 /* Our implementation of VK_KHR_multiview uses instancing to draw
1946 * the different views. If the client asks for instancing, we
1947 * need to use the Instance Data Step Rate to ensure that we
1948 * repeat the client's per-instance data once for each view.
1949 */
1950 .InstanceDataStepRate = anv_subpass_view_count(pipeline->subpass),
1951 .VertexBufferMemoryObjectControlState = GENX(MOCS),
1952 #endif
1953
1954 .AddressModifyEnable = true,
1955 .BufferPitch = pipeline->binding_stride[vb],
1956 .BufferStartingAddress = { buffer->bo, buffer->offset + offset },
1957
1958 #if GEN_GEN >= 8
1959 .BufferSize = buffer->size - offset
1960 #else
1961 .EndAddress = { buffer->bo, buffer->offset + buffer->size - 1},
1962 #endif
1963 };
1964
1965 GENX(VERTEX_BUFFER_STATE_pack)(&cmd_buffer->batch, &p[1 + i * 4], &state);
1966 i++;
1967 }
1968 }
1969
1970 cmd_buffer->state.vb_dirty &= ~vb_emit;
1971
1972 if (cmd_buffer->state.dirty & ANV_CMD_DIRTY_PIPELINE) {
1973 anv_batch_emit_batch(&cmd_buffer->batch, &pipeline->batch);
1974
1975 /* The exact descriptor layout is pulled from the pipeline, so we need
1976 * to re-emit binding tables on every pipeline change.
1977 */
1978 cmd_buffer->state.descriptors_dirty |=
1979 cmd_buffer->state.pipeline->active_stages;
1980
1981 /* If the pipeline changed, we may need to re-allocate push constant
1982 * space in the URB.
1983 */
1984 cmd_buffer_alloc_push_constants(cmd_buffer);
1985 }
1986
1987 #if GEN_GEN <= 7
1988 if (cmd_buffer->state.descriptors_dirty & VK_SHADER_STAGE_VERTEX_BIT ||
1989 cmd_buffer->state.push_constants_dirty & VK_SHADER_STAGE_VERTEX_BIT) {
1990 /* From the IVB PRM Vol. 2, Part 1, Section 3.2.1:
1991 *
1992 * "A PIPE_CONTROL with Post-Sync Operation set to 1h and a depth
1993 * stall needs to be sent just prior to any 3DSTATE_VS,
1994 * 3DSTATE_URB_VS, 3DSTATE_CONSTANT_VS,
1995 * 3DSTATE_BINDING_TABLE_POINTER_VS,
1996 * 3DSTATE_SAMPLER_STATE_POINTER_VS command. Only one
1997 * PIPE_CONTROL needs to be sent before any combination of VS
1998 * associated 3DSTATE."
1999 */
2000 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
2001 pc.DepthStallEnable = true;
2002 pc.PostSyncOperation = WriteImmediateData;
2003 pc.Address =
2004 (struct anv_address) { &cmd_buffer->device->workaround_bo, 0 };
2005 }
2006 }
2007 #endif
2008
2009 /* Render targets live in the same binding table as fragment descriptors */
2010 if (cmd_buffer->state.dirty & ANV_CMD_DIRTY_RENDER_TARGETS)
2011 cmd_buffer->state.descriptors_dirty |= VK_SHADER_STAGE_FRAGMENT_BIT;
2012
2013 /* We emit the binding tables and sampler tables first, then emit push
2014 * constants and then finally emit binding table and sampler table
2015 * pointers. It has to happen in this order, since emitting the binding
2016 * tables may change the push constants (in case of storage images). After
2017 * emitting push constants, on SKL+ we have to emit the corresponding
2018 * 3DSTATE_BINDING_TABLE_POINTER_* for the push constants to take effect.
2019 */
2020 uint32_t dirty = 0;
2021 if (cmd_buffer->state.descriptors_dirty)
2022 dirty = flush_descriptor_sets(cmd_buffer);
2023
2024 if (cmd_buffer->state.push_constants_dirty) {
2025 #if GEN_GEN >= 9
2026 /* On Sky Lake and later, the binding table pointers commands are
2027 * what actually flush the changes to push constant state so we need
2028 * to dirty them so they get re-emitted below.
2029 */
2030 dirty |= cmd_buffer_flush_push_constants(cmd_buffer);
2031 #else
2032 cmd_buffer_flush_push_constants(cmd_buffer);
2033 #endif
2034 }
2035
2036 if (dirty)
2037 cmd_buffer_emit_descriptor_pointers(cmd_buffer, dirty);
2038
2039 if (cmd_buffer->state.dirty & ANV_CMD_DIRTY_DYNAMIC_VIEWPORT)
2040 gen8_cmd_buffer_emit_viewport(cmd_buffer);
2041
2042 if (cmd_buffer->state.dirty & (ANV_CMD_DIRTY_DYNAMIC_VIEWPORT |
2043 ANV_CMD_DIRTY_PIPELINE)) {
2044 gen8_cmd_buffer_emit_depth_viewport(cmd_buffer,
2045 pipeline->depth_clamp_enable);
2046 }
2047
2048 if (cmd_buffer->state.dirty & ANV_CMD_DIRTY_DYNAMIC_SCISSOR)
2049 gen7_cmd_buffer_emit_scissor(cmd_buffer);
2050
2051 genX(cmd_buffer_flush_dynamic_state)(cmd_buffer);
2052
2053 genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
2054 }
2055
2056 static void
2057 emit_vertex_bo(struct anv_cmd_buffer *cmd_buffer,
2058 struct anv_bo *bo, uint32_t offset,
2059 uint32_t size, uint32_t index)
2060 {
2061 uint32_t *p = anv_batch_emitn(&cmd_buffer->batch, 5,
2062 GENX(3DSTATE_VERTEX_BUFFERS));
2063
2064 GENX(VERTEX_BUFFER_STATE_pack)(&cmd_buffer->batch, p + 1,
2065 &(struct GENX(VERTEX_BUFFER_STATE)) {
2066 .VertexBufferIndex = index,
2067 .AddressModifyEnable = true,
2068 .BufferPitch = 0,
2069 #if (GEN_GEN >= 8)
2070 .MemoryObjectControlState = GENX(MOCS),
2071 .BufferStartingAddress = { bo, offset },
2072 .BufferSize = size
2073 #else
2074 .VertexBufferMemoryObjectControlState = GENX(MOCS),
2075 .BufferStartingAddress = { bo, offset },
2076 .EndAddress = { bo, offset + size },
2077 #endif
2078 });
2079 }
2080
2081 static void
2082 emit_base_vertex_instance_bo(struct anv_cmd_buffer *cmd_buffer,
2083 struct anv_bo *bo, uint32_t offset)
2084 {
2085 emit_vertex_bo(cmd_buffer, bo, offset, 8, ANV_SVGS_VB_INDEX);
2086 }
2087
2088 static void
2089 emit_base_vertex_instance(struct anv_cmd_buffer *cmd_buffer,
2090 uint32_t base_vertex, uint32_t base_instance)
2091 {
2092 struct anv_state id_state =
2093 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, 8, 4);
2094
2095 ((uint32_t *)id_state.map)[0] = base_vertex;
2096 ((uint32_t *)id_state.map)[1] = base_instance;
2097
2098 anv_state_flush(cmd_buffer->device, id_state);
2099
2100 emit_base_vertex_instance_bo(cmd_buffer,
2101 &cmd_buffer->device->dynamic_state_pool.block_pool.bo, id_state.offset);
2102 }
2103
2104 static void
2105 emit_draw_index(struct anv_cmd_buffer *cmd_buffer, uint32_t draw_index)
2106 {
2107 struct anv_state state =
2108 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, 4, 4);
2109
2110 ((uint32_t *)state.map)[0] = draw_index;
2111
2112 anv_state_flush(cmd_buffer->device, state);
2113
2114 emit_vertex_bo(cmd_buffer,
2115 &cmd_buffer->device->dynamic_state_pool.block_pool.bo,
2116 state.offset, 4, ANV_DRAWID_VB_INDEX);
2117 }
2118
2119 void genX(CmdDraw)(
2120 VkCommandBuffer commandBuffer,
2121 uint32_t vertexCount,
2122 uint32_t instanceCount,
2123 uint32_t firstVertex,
2124 uint32_t firstInstance)
2125 {
2126 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
2127 struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
2128 const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
2129
2130 if (anv_batch_has_error(&cmd_buffer->batch))
2131 return;
2132
2133 genX(cmd_buffer_flush_state)(cmd_buffer);
2134
2135 if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
2136 emit_base_vertex_instance(cmd_buffer, firstVertex, firstInstance);
2137 if (vs_prog_data->uses_drawid)
2138 emit_draw_index(cmd_buffer, 0);
2139
2140 /* Our implementation of VK_KHR_multiview uses instancing to draw the
2141 * different views. We need to multiply instanceCount by the view count.
2142 */
2143 instanceCount *= anv_subpass_view_count(cmd_buffer->state.subpass);
2144
2145 anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
2146 prim.VertexAccessType = SEQUENTIAL;
2147 prim.PrimitiveTopologyType = pipeline->topology;
2148 prim.VertexCountPerInstance = vertexCount;
2149 prim.StartVertexLocation = firstVertex;
2150 prim.InstanceCount = instanceCount;
2151 prim.StartInstanceLocation = firstInstance;
2152 prim.BaseVertexLocation = 0;
2153 }
2154 }
2155
2156 void genX(CmdDrawIndexed)(
2157 VkCommandBuffer commandBuffer,
2158 uint32_t indexCount,
2159 uint32_t instanceCount,
2160 uint32_t firstIndex,
2161 int32_t vertexOffset,
2162 uint32_t firstInstance)
2163 {
2164 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
2165 struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
2166 const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
2167
2168 if (anv_batch_has_error(&cmd_buffer->batch))
2169 return;
2170
2171 genX(cmd_buffer_flush_state)(cmd_buffer);
2172
2173 if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
2174 emit_base_vertex_instance(cmd_buffer, vertexOffset, firstInstance);
2175 if (vs_prog_data->uses_drawid)
2176 emit_draw_index(cmd_buffer, 0);
2177
2178 /* Our implementation of VK_KHR_multiview uses instancing to draw the
2179 * different views. We need to multiply instanceCount by the view count.
2180 */
2181 instanceCount *= anv_subpass_view_count(cmd_buffer->state.subpass);
2182
2183 anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
2184 prim.VertexAccessType = RANDOM;
2185 prim.PrimitiveTopologyType = pipeline->topology;
2186 prim.VertexCountPerInstance = indexCount;
2187 prim.StartVertexLocation = firstIndex;
2188 prim.InstanceCount = instanceCount;
2189 prim.StartInstanceLocation = firstInstance;
2190 prim.BaseVertexLocation = vertexOffset;
2191 }
2192 }
2193
2194 /* Auto-Draw / Indirect Registers */
2195 #define GEN7_3DPRIM_END_OFFSET 0x2420
2196 #define GEN7_3DPRIM_START_VERTEX 0x2430
2197 #define GEN7_3DPRIM_VERTEX_COUNT 0x2434
2198 #define GEN7_3DPRIM_INSTANCE_COUNT 0x2438
2199 #define GEN7_3DPRIM_START_INSTANCE 0x243C
2200 #define GEN7_3DPRIM_BASE_VERTEX 0x2440
2201
2202 /* MI_MATH only exists on Haswell+ */
2203 #if GEN_IS_HASWELL || GEN_GEN >= 8
2204
2205 static uint32_t
2206 mi_alu(uint32_t opcode, uint32_t op1, uint32_t op2)
2207 {
2208 struct GENX(MI_MATH_ALU_INSTRUCTION) instr = {
2209 .ALUOpcode = opcode,
2210 .Operand1 = op1,
2211 .Operand2 = op2,
2212 };
2213
2214 uint32_t dw;
2215 GENX(MI_MATH_ALU_INSTRUCTION_pack)(NULL, &dw, &instr);
2216
2217 return dw;
2218 }
2219
2220 #define CS_GPR(n) (0x2600 + (n) * 8)
2221
2222 /* Emit dwords to multiply GPR0 by N */
2223 static void
2224 build_alu_multiply_gpr0(uint32_t *dw, unsigned *dw_count, uint32_t N)
2225 {
2226 VK_OUTARRAY_MAKE(out, dw, dw_count);
2227
2228 #define append_alu(opcode, operand1, operand2) \
2229 vk_outarray_append(&out, alu_dw) *alu_dw = mi_alu(opcode, operand1, operand2)
2230
2231 assert(N > 0);
2232 unsigned top_bit = 31 - __builtin_clz(N);
2233 for (int i = top_bit - 1; i >= 0; i--) {
2234 /* We get our initial data in GPR0 and we write the final data out to
2235 * GPR0 but we use GPR1 as our scratch register.
2236 */
2237 unsigned src_reg = i == top_bit - 1 ? MI_ALU_REG0 : MI_ALU_REG1;
2238 unsigned dst_reg = i == 0 ? MI_ALU_REG0 : MI_ALU_REG1;
2239
2240 /* Shift the current value left by 1 */
2241 append_alu(MI_ALU_LOAD, MI_ALU_SRCA, src_reg);
2242 append_alu(MI_ALU_LOAD, MI_ALU_SRCB, src_reg);
2243 append_alu(MI_ALU_ADD, 0, 0);
2244
2245 if (N & (1 << i)) {
2246 /* Store ACCU to R1 and add R0 to R1 */
2247 append_alu(MI_ALU_STORE, MI_ALU_REG1, MI_ALU_ACCU);
2248 append_alu(MI_ALU_LOAD, MI_ALU_SRCA, MI_ALU_REG0);
2249 append_alu(MI_ALU_LOAD, MI_ALU_SRCB, MI_ALU_REG1);
2250 append_alu(MI_ALU_ADD, 0, 0);
2251 }
2252
2253 append_alu(MI_ALU_STORE, dst_reg, MI_ALU_ACCU);
2254 }
2255
2256 #undef append_alu
2257 }
2258
2259 static void
2260 emit_mul_gpr0(struct anv_batch *batch, uint32_t N)
2261 {
2262 uint32_t num_dwords;
2263 build_alu_multiply_gpr0(NULL, &num_dwords, N);
2264
2265 uint32_t *dw = anv_batch_emitn(batch, 1 + num_dwords, GENX(MI_MATH));
2266 build_alu_multiply_gpr0(dw + 1, &num_dwords, N);
2267 }
2268
2269 #endif /* GEN_IS_HASWELL || GEN_GEN >= 8 */
2270
2271 static void
2272 load_indirect_parameters(struct anv_cmd_buffer *cmd_buffer,
2273 struct anv_buffer *buffer, uint64_t offset,
2274 bool indexed)
2275 {
2276 struct anv_batch *batch = &cmd_buffer->batch;
2277 struct anv_bo *bo = buffer->bo;
2278 uint32_t bo_offset = buffer->offset + offset;
2279
2280 emit_lrm(batch, GEN7_3DPRIM_VERTEX_COUNT, bo, bo_offset);
2281
2282 unsigned view_count = anv_subpass_view_count(cmd_buffer->state.subpass);
2283 if (view_count > 1) {
2284 #if GEN_IS_HASWELL || GEN_GEN >= 8
2285 emit_lrm(batch, CS_GPR(0), bo, bo_offset + 4);
2286 emit_mul_gpr0(batch, view_count);
2287 emit_lrr(batch, GEN7_3DPRIM_INSTANCE_COUNT, CS_GPR(0));
2288 #else
2289 anv_finishme("Multiview + indirect draw requires MI_MATH\n"
2290 "MI_MATH is not supported on Ivy Bridge");
2291 emit_lrm(batch, GEN7_3DPRIM_INSTANCE_COUNT, bo, bo_offset + 4);
2292 #endif
2293 } else {
2294 emit_lrm(batch, GEN7_3DPRIM_INSTANCE_COUNT, bo, bo_offset + 4);
2295 }
2296
2297 emit_lrm(batch, GEN7_3DPRIM_START_VERTEX, bo, bo_offset + 8);
2298
2299 if (indexed) {
2300 emit_lrm(batch, GEN7_3DPRIM_BASE_VERTEX, bo, bo_offset + 12);
2301 emit_lrm(batch, GEN7_3DPRIM_START_INSTANCE, bo, bo_offset + 16);
2302 } else {
2303 emit_lrm(batch, GEN7_3DPRIM_START_INSTANCE, bo, bo_offset + 12);
2304 emit_lri(batch, GEN7_3DPRIM_BASE_VERTEX, 0);
2305 }
2306 }
2307
2308 void genX(CmdDrawIndirect)(
2309 VkCommandBuffer commandBuffer,
2310 VkBuffer _buffer,
2311 VkDeviceSize offset,
2312 uint32_t drawCount,
2313 uint32_t stride)
2314 {
2315 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
2316 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
2317 struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
2318 const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
2319
2320 if (anv_batch_has_error(&cmd_buffer->batch))
2321 return;
2322
2323 genX(cmd_buffer_flush_state)(cmd_buffer);
2324
2325 for (uint32_t i = 0; i < drawCount; i++) {
2326 struct anv_bo *bo = buffer->bo;
2327 uint32_t bo_offset = buffer->offset + offset;
2328
2329 if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
2330 emit_base_vertex_instance_bo(cmd_buffer, bo, bo_offset + 8);
2331 if (vs_prog_data->uses_drawid)
2332 emit_draw_index(cmd_buffer, i);
2333
2334 load_indirect_parameters(cmd_buffer, buffer, offset, false);
2335
2336 anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
2337 prim.IndirectParameterEnable = true;
2338 prim.VertexAccessType = SEQUENTIAL;
2339 prim.PrimitiveTopologyType = pipeline->topology;
2340 }
2341
2342 offset += stride;
2343 }
2344 }
2345
2346 void genX(CmdDrawIndexedIndirect)(
2347 VkCommandBuffer commandBuffer,
2348 VkBuffer _buffer,
2349 VkDeviceSize offset,
2350 uint32_t drawCount,
2351 uint32_t stride)
2352 {
2353 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
2354 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
2355 struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
2356 const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
2357
2358 if (anv_batch_has_error(&cmd_buffer->batch))
2359 return;
2360
2361 genX(cmd_buffer_flush_state)(cmd_buffer);
2362
2363 for (uint32_t i = 0; i < drawCount; i++) {
2364 struct anv_bo *bo = buffer->bo;
2365 uint32_t bo_offset = buffer->offset + offset;
2366
2367 /* TODO: We need to stomp base vertex to 0 somehow */
2368 if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
2369 emit_base_vertex_instance_bo(cmd_buffer, bo, bo_offset + 12);
2370 if (vs_prog_data->uses_drawid)
2371 emit_draw_index(cmd_buffer, i);
2372
2373 load_indirect_parameters(cmd_buffer, buffer, offset, true);
2374
2375 anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
2376 prim.IndirectParameterEnable = true;
2377 prim.VertexAccessType = RANDOM;
2378 prim.PrimitiveTopologyType = pipeline->topology;
2379 }
2380
2381 offset += stride;
2382 }
2383 }
2384
2385 static VkResult
2386 flush_compute_descriptor_set(struct anv_cmd_buffer *cmd_buffer)
2387 {
2388 struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
2389 struct anv_state surfaces = { 0, }, samplers = { 0, };
2390 VkResult result;
2391
2392 result = emit_binding_table(cmd_buffer, MESA_SHADER_COMPUTE, &surfaces);
2393 if (result != VK_SUCCESS) {
2394 assert(result == VK_ERROR_OUT_OF_DEVICE_MEMORY);
2395
2396 result = anv_cmd_buffer_new_binding_table_block(cmd_buffer);
2397 if (result != VK_SUCCESS)
2398 return result;
2399
2400 /* Re-emit state base addresses so we get the new surface state base
2401 * address before we start emitting binding tables etc.
2402 */
2403 genX(cmd_buffer_emit_state_base_address)(cmd_buffer);
2404
2405 result = emit_binding_table(cmd_buffer, MESA_SHADER_COMPUTE, &surfaces);
2406 if (result != VK_SUCCESS) {
2407 anv_batch_set_error(&cmd_buffer->batch, result);
2408 return result;
2409 }
2410 }
2411
2412 result = emit_samplers(cmd_buffer, MESA_SHADER_COMPUTE, &samplers);
2413 if (result != VK_SUCCESS) {
2414 anv_batch_set_error(&cmd_buffer->batch, result);
2415 return result;
2416 }
2417
2418 uint32_t iface_desc_data_dw[GENX(INTERFACE_DESCRIPTOR_DATA_length)];
2419 struct GENX(INTERFACE_DESCRIPTOR_DATA) desc = {
2420 .BindingTablePointer = surfaces.offset,
2421 .SamplerStatePointer = samplers.offset,
2422 };
2423 GENX(INTERFACE_DESCRIPTOR_DATA_pack)(NULL, iface_desc_data_dw, &desc);
2424
2425 struct anv_state state =
2426 anv_cmd_buffer_merge_dynamic(cmd_buffer, iface_desc_data_dw,
2427 pipeline->interface_descriptor_data,
2428 GENX(INTERFACE_DESCRIPTOR_DATA_length),
2429 64);
2430
2431 uint32_t size = GENX(INTERFACE_DESCRIPTOR_DATA_length) * sizeof(uint32_t);
2432 anv_batch_emit(&cmd_buffer->batch,
2433 GENX(MEDIA_INTERFACE_DESCRIPTOR_LOAD), mid) {
2434 mid.InterfaceDescriptorTotalLength = size;
2435 mid.InterfaceDescriptorDataStartAddress = state.offset;
2436 }
2437
2438 return VK_SUCCESS;
2439 }
2440
2441 void
2442 genX(cmd_buffer_flush_compute_state)(struct anv_cmd_buffer *cmd_buffer)
2443 {
2444 struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
2445 MAYBE_UNUSED VkResult result;
2446
2447 assert(pipeline->active_stages == VK_SHADER_STAGE_COMPUTE_BIT);
2448
2449 genX(cmd_buffer_config_l3)(cmd_buffer, pipeline->urb.l3_config);
2450
2451 genX(flush_pipeline_select_gpgpu)(cmd_buffer);
2452
2453 if (cmd_buffer->state.compute_dirty & ANV_CMD_DIRTY_PIPELINE) {
2454 /* From the Sky Lake PRM Vol 2a, MEDIA_VFE_STATE:
2455 *
2456 * "A stalling PIPE_CONTROL is required before MEDIA_VFE_STATE unless
2457 * the only bits that are changed are scoreboard related: Scoreboard
2458 * Enable, Scoreboard Type, Scoreboard Mask, Scoreboard * Delta. For
2459 * these scoreboard related states, a MEDIA_STATE_FLUSH is
2460 * sufficient."
2461 */
2462 cmd_buffer->state.pending_pipe_bits |= ANV_PIPE_CS_STALL_BIT;
2463 genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
2464
2465 anv_batch_emit_batch(&cmd_buffer->batch, &pipeline->batch);
2466 }
2467
2468 if ((cmd_buffer->state.descriptors_dirty & VK_SHADER_STAGE_COMPUTE_BIT) ||
2469 (cmd_buffer->state.compute_dirty & ANV_CMD_DIRTY_PIPELINE)) {
2470 /* FIXME: figure out descriptors for gen7 */
2471 result = flush_compute_descriptor_set(cmd_buffer);
2472 if (result != VK_SUCCESS)
2473 return;
2474
2475 cmd_buffer->state.descriptors_dirty &= ~VK_SHADER_STAGE_COMPUTE_BIT;
2476 }
2477
2478 if (cmd_buffer->state.push_constants_dirty & VK_SHADER_STAGE_COMPUTE_BIT) {
2479 struct anv_state push_state =
2480 anv_cmd_buffer_cs_push_constants(cmd_buffer);
2481
2482 if (push_state.alloc_size) {
2483 anv_batch_emit(&cmd_buffer->batch, GENX(MEDIA_CURBE_LOAD), curbe) {
2484 curbe.CURBETotalDataLength = push_state.alloc_size;
2485 curbe.CURBEDataStartAddress = push_state.offset;
2486 }
2487 }
2488 }
2489
2490 cmd_buffer->state.compute_dirty = 0;
2491
2492 genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
2493 }
2494
2495 #if GEN_GEN == 7
2496
2497 static VkResult
2498 verify_cmd_parser(const struct anv_device *device,
2499 int required_version,
2500 const char *function)
2501 {
2502 if (device->instance->physicalDevice.cmd_parser_version < required_version) {
2503 return vk_errorf(VK_ERROR_FEATURE_NOT_PRESENT,
2504 "cmd parser version %d is required for %s",
2505 required_version, function);
2506 } else {
2507 return VK_SUCCESS;
2508 }
2509 }
2510
2511 #endif
2512
2513 void genX(CmdDispatch)(
2514 VkCommandBuffer commandBuffer,
2515 uint32_t x,
2516 uint32_t y,
2517 uint32_t z)
2518 {
2519 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
2520 struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
2521 const struct brw_cs_prog_data *prog_data = get_cs_prog_data(pipeline);
2522
2523 if (anv_batch_has_error(&cmd_buffer->batch))
2524 return;
2525
2526 if (prog_data->uses_num_work_groups) {
2527 struct anv_state state =
2528 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, 12, 4);
2529 uint32_t *sizes = state.map;
2530 sizes[0] = x;
2531 sizes[1] = y;
2532 sizes[2] = z;
2533 anv_state_flush(cmd_buffer->device, state);
2534 cmd_buffer->state.num_workgroups_offset = state.offset;
2535 cmd_buffer->state.num_workgroups_bo =
2536 &cmd_buffer->device->dynamic_state_pool.block_pool.bo;
2537 }
2538
2539 genX(cmd_buffer_flush_compute_state)(cmd_buffer);
2540
2541 anv_batch_emit(&cmd_buffer->batch, GENX(GPGPU_WALKER), ggw) {
2542 ggw.SIMDSize = prog_data->simd_size / 16;
2543 ggw.ThreadDepthCounterMaximum = 0;
2544 ggw.ThreadHeightCounterMaximum = 0;
2545 ggw.ThreadWidthCounterMaximum = prog_data->threads - 1;
2546 ggw.ThreadGroupIDXDimension = x;
2547 ggw.ThreadGroupIDYDimension = y;
2548 ggw.ThreadGroupIDZDimension = z;
2549 ggw.RightExecutionMask = pipeline->cs_right_mask;
2550 ggw.BottomExecutionMask = 0xffffffff;
2551 }
2552
2553 anv_batch_emit(&cmd_buffer->batch, GENX(MEDIA_STATE_FLUSH), msf);
2554 }
2555
2556 #define GPGPU_DISPATCHDIMX 0x2500
2557 #define GPGPU_DISPATCHDIMY 0x2504
2558 #define GPGPU_DISPATCHDIMZ 0x2508
2559
2560 void genX(CmdDispatchIndirect)(
2561 VkCommandBuffer commandBuffer,
2562 VkBuffer _buffer,
2563 VkDeviceSize offset)
2564 {
2565 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
2566 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
2567 struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
2568 const struct brw_cs_prog_data *prog_data = get_cs_prog_data(pipeline);
2569 struct anv_bo *bo = buffer->bo;
2570 uint32_t bo_offset = buffer->offset + offset;
2571 struct anv_batch *batch = &cmd_buffer->batch;
2572
2573 #if GEN_GEN == 7
2574 /* Linux 4.4 added command parser version 5 which allows the GPGPU
2575 * indirect dispatch registers to be written.
2576 */
2577 if (verify_cmd_parser(cmd_buffer->device, 5,
2578 "vkCmdDispatchIndirect") != VK_SUCCESS)
2579 return;
2580 #endif
2581
2582 if (prog_data->uses_num_work_groups) {
2583 cmd_buffer->state.num_workgroups_offset = bo_offset;
2584 cmd_buffer->state.num_workgroups_bo = bo;
2585 }
2586
2587 genX(cmd_buffer_flush_compute_state)(cmd_buffer);
2588
2589 emit_lrm(batch, GPGPU_DISPATCHDIMX, bo, bo_offset);
2590 emit_lrm(batch, GPGPU_DISPATCHDIMY, bo, bo_offset + 4);
2591 emit_lrm(batch, GPGPU_DISPATCHDIMZ, bo, bo_offset + 8);
2592
2593 #if GEN_GEN <= 7
2594 /* Clear upper 32-bits of SRC0 and all 64-bits of SRC1 */
2595 emit_lri(batch, MI_PREDICATE_SRC0 + 4, 0);
2596 emit_lri(batch, MI_PREDICATE_SRC1 + 0, 0);
2597 emit_lri(batch, MI_PREDICATE_SRC1 + 4, 0);
2598
2599 /* Load compute_dispatch_indirect_x_size into SRC0 */
2600 emit_lrm(batch, MI_PREDICATE_SRC0, bo, bo_offset + 0);
2601
2602 /* predicate = (compute_dispatch_indirect_x_size == 0); */
2603 anv_batch_emit(batch, GENX(MI_PREDICATE), mip) {
2604 mip.LoadOperation = LOAD_LOAD;
2605 mip.CombineOperation = COMBINE_SET;
2606 mip.CompareOperation = COMPARE_SRCS_EQUAL;
2607 }
2608
2609 /* Load compute_dispatch_indirect_y_size into SRC0 */
2610 emit_lrm(batch, MI_PREDICATE_SRC0, bo, bo_offset + 4);
2611
2612 /* predicate |= (compute_dispatch_indirect_y_size == 0); */
2613 anv_batch_emit(batch, GENX(MI_PREDICATE), mip) {
2614 mip.LoadOperation = LOAD_LOAD;
2615 mip.CombineOperation = COMBINE_OR;
2616 mip.CompareOperation = COMPARE_SRCS_EQUAL;
2617 }
2618
2619 /* Load compute_dispatch_indirect_z_size into SRC0 */
2620 emit_lrm(batch, MI_PREDICATE_SRC0, bo, bo_offset + 8);
2621
2622 /* predicate |= (compute_dispatch_indirect_z_size == 0); */
2623 anv_batch_emit(batch, GENX(MI_PREDICATE), mip) {
2624 mip.LoadOperation = LOAD_LOAD;
2625 mip.CombineOperation = COMBINE_OR;
2626 mip.CompareOperation = COMPARE_SRCS_EQUAL;
2627 }
2628
2629 /* predicate = !predicate; */
2630 #define COMPARE_FALSE 1
2631 anv_batch_emit(batch, GENX(MI_PREDICATE), mip) {
2632 mip.LoadOperation = LOAD_LOADINV;
2633 mip.CombineOperation = COMBINE_OR;
2634 mip.CompareOperation = COMPARE_FALSE;
2635 }
2636 #endif
2637
2638 anv_batch_emit(batch, GENX(GPGPU_WALKER), ggw) {
2639 ggw.IndirectParameterEnable = true;
2640 ggw.PredicateEnable = GEN_GEN <= 7;
2641 ggw.SIMDSize = prog_data->simd_size / 16;
2642 ggw.ThreadDepthCounterMaximum = 0;
2643 ggw.ThreadHeightCounterMaximum = 0;
2644 ggw.ThreadWidthCounterMaximum = prog_data->threads - 1;
2645 ggw.RightExecutionMask = pipeline->cs_right_mask;
2646 ggw.BottomExecutionMask = 0xffffffff;
2647 }
2648
2649 anv_batch_emit(batch, GENX(MEDIA_STATE_FLUSH), msf);
2650 }
2651
2652 static void
2653 genX(flush_pipeline_select)(struct anv_cmd_buffer *cmd_buffer,
2654 uint32_t pipeline)
2655 {
2656 if (cmd_buffer->state.current_pipeline == pipeline)
2657 return;
2658
2659 #if GEN_GEN >= 8 && GEN_GEN < 10
2660 /* From the Broadwell PRM, Volume 2a: Instructions, PIPELINE_SELECT:
2661 *
2662 * Software must clear the COLOR_CALC_STATE Valid field in
2663 * 3DSTATE_CC_STATE_POINTERS command prior to send a PIPELINE_SELECT
2664 * with Pipeline Select set to GPGPU.
2665 *
2666 * The internal hardware docs recommend the same workaround for Gen9
2667 * hardware too.
2668 */
2669 if (pipeline == GPGPU)
2670 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CC_STATE_POINTERS), t);
2671 #endif
2672
2673 /* From "BXML » GT » MI » vol1a GPU Overview » [Instruction]
2674 * PIPELINE_SELECT [DevBWR+]":
2675 *
2676 * Project: DEVSNB+
2677 *
2678 * Software must ensure all the write caches are flushed through a
2679 * stalling PIPE_CONTROL command followed by another PIPE_CONTROL
2680 * command to invalidate read only caches prior to programming
2681 * MI_PIPELINE_SELECT command to change the Pipeline Select Mode.
2682 */
2683 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
2684 pc.RenderTargetCacheFlushEnable = true;
2685 pc.DepthCacheFlushEnable = true;
2686 pc.DCFlushEnable = true;
2687 pc.PostSyncOperation = NoWrite;
2688 pc.CommandStreamerStallEnable = true;
2689 }
2690
2691 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
2692 pc.TextureCacheInvalidationEnable = true;
2693 pc.ConstantCacheInvalidationEnable = true;
2694 pc.StateCacheInvalidationEnable = true;
2695 pc.InstructionCacheInvalidateEnable = true;
2696 pc.PostSyncOperation = NoWrite;
2697 }
2698
2699 anv_batch_emit(&cmd_buffer->batch, GENX(PIPELINE_SELECT), ps) {
2700 #if GEN_GEN >= 9
2701 ps.MaskBits = 3;
2702 #endif
2703 ps.PipelineSelection = pipeline;
2704 }
2705
2706 cmd_buffer->state.current_pipeline = pipeline;
2707 }
2708
2709 void
2710 genX(flush_pipeline_select_3d)(struct anv_cmd_buffer *cmd_buffer)
2711 {
2712 genX(flush_pipeline_select)(cmd_buffer, _3D);
2713 }
2714
2715 void
2716 genX(flush_pipeline_select_gpgpu)(struct anv_cmd_buffer *cmd_buffer)
2717 {
2718 genX(flush_pipeline_select)(cmd_buffer, GPGPU);
2719 }
2720
2721 void
2722 genX(cmd_buffer_emit_gen7_depth_flush)(struct anv_cmd_buffer *cmd_buffer)
2723 {
2724 if (GEN_GEN >= 8)
2725 return;
2726
2727 /* From the Haswell PRM, documentation for 3DSTATE_DEPTH_BUFFER:
2728 *
2729 * "Restriction: Prior to changing Depth/Stencil Buffer state (i.e., any
2730 * combination of 3DSTATE_DEPTH_BUFFER, 3DSTATE_CLEAR_PARAMS,
2731 * 3DSTATE_STENCIL_BUFFER, 3DSTATE_HIER_DEPTH_BUFFER) SW must first
2732 * issue a pipelined depth stall (PIPE_CONTROL with Depth Stall bit
2733 * set), followed by a pipelined depth cache flush (PIPE_CONTROL with
2734 * Depth Flush Bit set, followed by another pipelined depth stall
2735 * (PIPE_CONTROL with Depth Stall Bit set), unless SW can otherwise
2736 * guarantee that the pipeline from WM onwards is already flushed (e.g.,
2737 * via a preceding MI_FLUSH)."
2738 */
2739 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pipe) {
2740 pipe.DepthStallEnable = true;
2741 }
2742 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pipe) {
2743 pipe.DepthCacheFlushEnable = true;
2744 }
2745 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pipe) {
2746 pipe.DepthStallEnable = true;
2747 }
2748 }
2749
2750 static void
2751 cmd_buffer_emit_depth_stencil(struct anv_cmd_buffer *cmd_buffer)
2752 {
2753 struct anv_device *device = cmd_buffer->device;
2754 const struct anv_image_view *iview =
2755 anv_cmd_buffer_get_depth_stencil_view(cmd_buffer);
2756 const struct anv_image *image = iview ? iview->image : NULL;
2757
2758 /* FIXME: Width and Height are wrong */
2759
2760 genX(cmd_buffer_emit_gen7_depth_flush)(cmd_buffer);
2761
2762 uint32_t *dw = anv_batch_emit_dwords(&cmd_buffer->batch,
2763 device->isl_dev.ds.size / 4);
2764 if (dw == NULL)
2765 return;
2766
2767 struct isl_depth_stencil_hiz_emit_info info = {
2768 .mocs = device->default_mocs,
2769 };
2770
2771 if (iview)
2772 info.view = &iview->isl;
2773
2774 if (image && (image->aspects & VK_IMAGE_ASPECT_DEPTH_BIT)) {
2775 info.depth_surf = &image->depth_surface.isl;
2776
2777 info.depth_address =
2778 anv_batch_emit_reloc(&cmd_buffer->batch,
2779 dw + device->isl_dev.ds.depth_offset / 4,
2780 image->bo,
2781 image->offset + image->depth_surface.offset);
2782
2783 const uint32_t ds =
2784 cmd_buffer->state.subpass->depth_stencil_attachment.attachment;
2785 info.hiz_usage = cmd_buffer->state.attachments[ds].aux_usage;
2786 if (info.hiz_usage == ISL_AUX_USAGE_HIZ) {
2787 info.hiz_surf = &image->aux_surface.isl;
2788
2789 info.hiz_address =
2790 anv_batch_emit_reloc(&cmd_buffer->batch,
2791 dw + device->isl_dev.ds.hiz_offset / 4,
2792 image->bo,
2793 image->offset + image->aux_surface.offset);
2794
2795 info.depth_clear_value = ANV_HZ_FC_VAL;
2796 }
2797 }
2798
2799 if (image && (image->aspects & VK_IMAGE_ASPECT_STENCIL_BIT)) {
2800 info.stencil_surf = &image->stencil_surface.isl;
2801
2802 info.stencil_address =
2803 anv_batch_emit_reloc(&cmd_buffer->batch,
2804 dw + device->isl_dev.ds.stencil_offset / 4,
2805 image->bo,
2806 image->offset + image->stencil_surface.offset);
2807 }
2808
2809 isl_emit_depth_stencil_hiz_s(&device->isl_dev, dw, &info);
2810
2811 cmd_buffer->state.hiz_enabled = info.hiz_usage == ISL_AUX_USAGE_HIZ;
2812 }
2813
2814
2815 /**
2816 * @brief Perform any layout transitions required at the beginning and/or end
2817 * of the current subpass for depth buffers.
2818 *
2819 * TODO: Consider preprocessing the attachment reference array at render pass
2820 * create time to determine if no layout transition is needed at the
2821 * beginning and/or end of each subpass.
2822 *
2823 * @param cmd_buffer The command buffer the transition is happening within.
2824 * @param subpass_end If true, marks that the transition is happening at the
2825 * end of the subpass.
2826 */
2827 static void
2828 cmd_buffer_subpass_transition_layouts(struct anv_cmd_buffer * const cmd_buffer,
2829 const bool subpass_end)
2830 {
2831 /* We need a non-NULL command buffer. */
2832 assert(cmd_buffer);
2833
2834 const struct anv_cmd_state * const cmd_state = &cmd_buffer->state;
2835 const struct anv_subpass * const subpass = cmd_state->subpass;
2836
2837 /* This function must be called within a subpass. */
2838 assert(subpass);
2839
2840 /* If there are attachment references, the array shouldn't be NULL.
2841 */
2842 if (subpass->attachment_count > 0)
2843 assert(subpass->attachments);
2844
2845 /* Iterate over the array of attachment references. */
2846 for (const VkAttachmentReference *att_ref = subpass->attachments;
2847 att_ref < subpass->attachments + subpass->attachment_count; att_ref++) {
2848
2849 /* If the attachment is unused, we can't perform a layout transition. */
2850 if (att_ref->attachment == VK_ATTACHMENT_UNUSED)
2851 continue;
2852
2853 /* This attachment index shouldn't go out of bounds. */
2854 assert(att_ref->attachment < cmd_state->pass->attachment_count);
2855
2856 const struct anv_render_pass_attachment * const att_desc =
2857 &cmd_state->pass->attachments[att_ref->attachment];
2858 struct anv_attachment_state * const att_state =
2859 &cmd_buffer->state.attachments[att_ref->attachment];
2860
2861 /* The attachment should not be used in a subpass after its last. */
2862 assert(att_desc->last_subpass_idx >= anv_get_subpass_id(cmd_state));
2863
2864 if (subpass_end && anv_get_subpass_id(cmd_state) <
2865 att_desc->last_subpass_idx) {
2866 /* We're calling this function on a buffer twice in one subpass and
2867 * this is not the last use of the buffer. The layout should not have
2868 * changed from the first call and no transition is necessary.
2869 */
2870 assert(att_state->current_layout == att_ref->layout ||
2871 att_state->current_layout ==
2872 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
2873 continue;
2874 }
2875
2876 /* The attachment index must be less than the number of attachments
2877 * within the framebuffer.
2878 */
2879 assert(att_ref->attachment < cmd_state->framebuffer->attachment_count);
2880
2881 const struct anv_image_view * const iview =
2882 cmd_state->framebuffer->attachments[att_ref->attachment];
2883 const struct anv_image * const image = iview->image;
2884
2885 /* Get the appropriate target layout for this attachment. */
2886 VkImageLayout target_layout;
2887
2888 /* A resolve is necessary before use as an input attachment if the clear
2889 * color or auxiliary buffer usage isn't supported by the sampler.
2890 */
2891 const bool input_needs_resolve =
2892 (att_state->fast_clear && !att_state->clear_color_is_zero_one) ||
2893 att_state->input_aux_usage != att_state->aux_usage;
2894 if (subpass_end) {
2895 target_layout = att_desc->final_layout;
2896 } else if (iview->aspect_mask == VK_IMAGE_ASPECT_COLOR_BIT &&
2897 !input_needs_resolve) {
2898 /* Layout transitions before the final only help to enable sampling as
2899 * an input attachment. If the input attachment supports sampling
2900 * using the auxiliary surface, we can skip such transitions by making
2901 * the target layout one that is CCS-aware.
2902 */
2903 target_layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
2904 } else {
2905 target_layout = att_ref->layout;
2906 }
2907
2908 /* Perform the layout transition. */
2909 if (image->aspects & VK_IMAGE_ASPECT_DEPTH_BIT) {
2910 transition_depth_buffer(cmd_buffer, image,
2911 att_state->current_layout, target_layout);
2912 att_state->aux_usage =
2913 anv_layout_to_aux_usage(&cmd_buffer->device->info, image,
2914 image->aspects, target_layout);
2915 } else if (image->aspects == VK_IMAGE_ASPECT_COLOR_BIT) {
2916 transition_color_buffer(cmd_buffer, image,
2917 iview->isl.base_level, 1,
2918 iview->isl.base_array_layer,
2919 iview->isl.array_len,
2920 att_state->current_layout, target_layout);
2921 }
2922
2923 att_state->current_layout = target_layout;
2924 }
2925 }
2926
2927 /* Update the clear value dword(s) in surface state objects or the fast clear
2928 * state buffer entry for the color attachments used in this subpass.
2929 */
2930 static void
2931 cmd_buffer_subpass_sync_fast_clear_values(struct anv_cmd_buffer *cmd_buffer)
2932 {
2933 assert(cmd_buffer && cmd_buffer->state.subpass);
2934
2935 const struct anv_cmd_state *state = &cmd_buffer->state;
2936
2937 /* Iterate through every color attachment used in this subpass. */
2938 for (uint32_t i = 0; i < state->subpass->color_count; ++i) {
2939
2940 /* The attachment should be one of the attachments described in the
2941 * render pass and used in the subpass.
2942 */
2943 const uint32_t a = state->subpass->color_attachments[i].attachment;
2944 assert(a < state->pass->attachment_count);
2945 if (a == VK_ATTACHMENT_UNUSED)
2946 continue;
2947
2948 /* Store some information regarding this attachment. */
2949 const struct anv_attachment_state *att_state = &state->attachments[a];
2950 const struct anv_image_view *iview = state->framebuffer->attachments[a];
2951 const struct anv_render_pass_attachment *rp_att =
2952 &state->pass->attachments[a];
2953
2954 if (att_state->aux_usage == ISL_AUX_USAGE_NONE)
2955 continue;
2956
2957 /* The fast clear state entry must be updated if a fast clear is going to
2958 * happen. The surface state must be updated if the clear value from a
2959 * prior fast clear may be needed.
2960 */
2961 if (att_state->pending_clear_aspects && att_state->fast_clear) {
2962 /* Update the fast clear state entry. */
2963 genX(copy_fast_clear_dwords)(cmd_buffer, att_state->color_rt_state,
2964 iview->image, iview->isl.base_level,
2965 true /* copy from ss */);
2966
2967 /* Fast-clears impact whether or not a resolve will be necessary. */
2968 if (iview->image->aux_usage == ISL_AUX_USAGE_CCS_E &&
2969 att_state->clear_color_is_zero) {
2970 /* This image always has the auxiliary buffer enabled. We can mark
2971 * the subresource as not needing a resolve because the clear color
2972 * will match what's in every RENDER_SURFACE_STATE object when it's
2973 * being used for sampling.
2974 */
2975 genX(set_image_needs_resolve)(cmd_buffer, iview->image,
2976 iview->isl.base_level, false);
2977 } else {
2978 genX(set_image_needs_resolve)(cmd_buffer, iview->image,
2979 iview->isl.base_level, true);
2980 }
2981 } else if (rp_att->load_op == VK_ATTACHMENT_LOAD_OP_LOAD) {
2982 /* The attachment may have been fast-cleared in a previous render
2983 * pass and the value is needed now. Update the surface state(s).
2984 *
2985 * TODO: Do this only once per render pass instead of every subpass.
2986 */
2987 genX(copy_fast_clear_dwords)(cmd_buffer, att_state->color_rt_state,
2988 iview->image, iview->isl.base_level,
2989 false /* copy to ss */);
2990
2991 if (need_input_attachment_state(rp_att) &&
2992 att_state->input_aux_usage != ISL_AUX_USAGE_NONE) {
2993 genX(copy_fast_clear_dwords)(cmd_buffer, att_state->input_att_state,
2994 iview->image, iview->isl.base_level,
2995 false /* copy to ss */);
2996 }
2997 }
2998 }
2999 }
3000
3001
3002 static void
3003 genX(cmd_buffer_set_subpass)(struct anv_cmd_buffer *cmd_buffer,
3004 struct anv_subpass *subpass)
3005 {
3006 cmd_buffer->state.subpass = subpass;
3007
3008 cmd_buffer->state.dirty |= ANV_CMD_DIRTY_RENDER_TARGETS;
3009
3010 /* Our implementation of VK_KHR_multiview uses instancing to draw the
3011 * different views. If the client asks for instancing, we need to use the
3012 * Instance Data Step Rate to ensure that we repeat the client's
3013 * per-instance data once for each view. Since this bit is in
3014 * VERTEX_BUFFER_STATE on gen7, we need to dirty vertex buffers at the top
3015 * of each subpass.
3016 */
3017 if (GEN_GEN == 7)
3018 cmd_buffer->state.vb_dirty |= ~0;
3019
3020 /* Perform transitions to the subpass layout before any writes have
3021 * occurred.
3022 */
3023 cmd_buffer_subpass_transition_layouts(cmd_buffer, false);
3024
3025 /* Update clear values *after* performing automatic layout transitions.
3026 * This ensures that transitions from the UNDEFINED layout have had a chance
3027 * to populate the clear value buffer with the correct values for the
3028 * LOAD_OP_LOAD loadOp and that the fast-clears will update the buffer
3029 * without the aforementioned layout transition overwriting the fast-clear
3030 * value.
3031 */
3032 cmd_buffer_subpass_sync_fast_clear_values(cmd_buffer);
3033
3034 cmd_buffer_emit_depth_stencil(cmd_buffer);
3035
3036 anv_cmd_buffer_clear_subpass(cmd_buffer);
3037 }
3038
3039 void genX(CmdBeginRenderPass)(
3040 VkCommandBuffer commandBuffer,
3041 const VkRenderPassBeginInfo* pRenderPassBegin,
3042 VkSubpassContents contents)
3043 {
3044 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
3045 ANV_FROM_HANDLE(anv_render_pass, pass, pRenderPassBegin->renderPass);
3046 ANV_FROM_HANDLE(anv_framebuffer, framebuffer, pRenderPassBegin->framebuffer);
3047
3048 cmd_buffer->state.framebuffer = framebuffer;
3049 cmd_buffer->state.pass = pass;
3050 cmd_buffer->state.render_area = pRenderPassBegin->renderArea;
3051 VkResult result =
3052 genX(cmd_buffer_setup_attachments)(cmd_buffer, pass, pRenderPassBegin);
3053
3054 /* If we failed to setup the attachments we should not try to go further */
3055 if (result != VK_SUCCESS) {
3056 assert(anv_batch_has_error(&cmd_buffer->batch));
3057 return;
3058 }
3059
3060 genX(flush_pipeline_select_3d)(cmd_buffer);
3061
3062 genX(cmd_buffer_set_subpass)(cmd_buffer, pass->subpasses);
3063
3064 cmd_buffer->state.pending_pipe_bits |=
3065 cmd_buffer->state.pass->subpass_flushes[0];
3066 }
3067
3068 void genX(CmdNextSubpass)(
3069 VkCommandBuffer commandBuffer,
3070 VkSubpassContents contents)
3071 {
3072 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
3073
3074 if (anv_batch_has_error(&cmd_buffer->batch))
3075 return;
3076
3077 assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
3078
3079 anv_cmd_buffer_resolve_subpass(cmd_buffer);
3080
3081 /* Perform transitions to the final layout after all writes have occurred.
3082 */
3083 cmd_buffer_subpass_transition_layouts(cmd_buffer, true);
3084
3085 genX(cmd_buffer_set_subpass)(cmd_buffer, cmd_buffer->state.subpass + 1);
3086
3087 uint32_t subpass_id = anv_get_subpass_id(&cmd_buffer->state);
3088 cmd_buffer->state.pending_pipe_bits |=
3089 cmd_buffer->state.pass->subpass_flushes[subpass_id];
3090 }
3091
3092 void genX(CmdEndRenderPass)(
3093 VkCommandBuffer commandBuffer)
3094 {
3095 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
3096
3097 if (anv_batch_has_error(&cmd_buffer->batch))
3098 return;
3099
3100 anv_cmd_buffer_resolve_subpass(cmd_buffer);
3101
3102 /* Perform transitions to the final layout after all writes have occurred.
3103 */
3104 cmd_buffer_subpass_transition_layouts(cmd_buffer, true);
3105
3106 cmd_buffer->state.pending_pipe_bits |=
3107 cmd_buffer->state.pass->subpass_flushes[cmd_buffer->state.pass->subpass_count];
3108
3109 cmd_buffer->state.hiz_enabled = false;
3110
3111 #ifndef NDEBUG
3112 anv_dump_add_framebuffer(cmd_buffer, cmd_buffer->state.framebuffer);
3113 #endif
3114
3115 /* Remove references to render pass specific state. This enables us to
3116 * detect whether or not we're in a renderpass.
3117 */
3118 cmd_buffer->state.framebuffer = NULL;
3119 cmd_buffer->state.pass = NULL;
3120 cmd_buffer->state.subpass = NULL;
3121 }