radv: Always enable PERFECT_ZPASS_COUNTS.
[mesa.git] / src / amd / vulkan / radv_cmd_buffer.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 */
27
28 #include "radv_private.h"
29 #include "radv_radeon_winsys.h"
30 #include "radv_shader.h"
31 #include "radv_cs.h"
32 #include "sid.h"
33 #include "vk_format.h"
34 #include "vk_util.h"
35 #include "radv_debug.h"
36 #include "radv_meta.h"
37
38 #include "ac_debug.h"
39
40 enum {
41 RADV_PREFETCH_VBO_DESCRIPTORS = (1 << 0),
42 RADV_PREFETCH_VS = (1 << 1),
43 RADV_PREFETCH_TCS = (1 << 2),
44 RADV_PREFETCH_TES = (1 << 3),
45 RADV_PREFETCH_GS = (1 << 4),
46 RADV_PREFETCH_PS = (1 << 5),
47 RADV_PREFETCH_SHADERS = (RADV_PREFETCH_VS |
48 RADV_PREFETCH_TCS |
49 RADV_PREFETCH_TES |
50 RADV_PREFETCH_GS |
51 RADV_PREFETCH_PS)
52 };
53
54 static void radv_handle_image_transition(struct radv_cmd_buffer *cmd_buffer,
55 struct radv_image *image,
56 VkImageLayout src_layout,
57 bool src_render_loop,
58 VkImageLayout dst_layout,
59 bool dst_render_loop,
60 uint32_t src_family,
61 uint32_t dst_family,
62 const VkImageSubresourceRange *range,
63 struct radv_sample_locations_state *sample_locs);
64
65 const struct radv_dynamic_state default_dynamic_state = {
66 .viewport = {
67 .count = 0,
68 },
69 .scissor = {
70 .count = 0,
71 },
72 .line_width = 1.0f,
73 .depth_bias = {
74 .bias = 0.0f,
75 .clamp = 0.0f,
76 .slope = 0.0f,
77 },
78 .blend_constants = { 0.0f, 0.0f, 0.0f, 0.0f },
79 .depth_bounds = {
80 .min = 0.0f,
81 .max = 1.0f,
82 },
83 .stencil_compare_mask = {
84 .front = ~0u,
85 .back = ~0u,
86 },
87 .stencil_write_mask = {
88 .front = ~0u,
89 .back = ~0u,
90 },
91 .stencil_reference = {
92 .front = 0u,
93 .back = 0u,
94 },
95 .line_stipple = {
96 .factor = 0u,
97 .pattern = 0u,
98 },
99 };
100
101 static void
102 radv_bind_dynamic_state(struct radv_cmd_buffer *cmd_buffer,
103 const struct radv_dynamic_state *src)
104 {
105 struct radv_dynamic_state *dest = &cmd_buffer->state.dynamic;
106 uint32_t copy_mask = src->mask;
107 uint32_t dest_mask = 0;
108
109 /* Make sure to copy the number of viewports/scissors because they can
110 * only be specified at pipeline creation time.
111 */
112 dest->viewport.count = src->viewport.count;
113 dest->scissor.count = src->scissor.count;
114 dest->discard_rectangle.count = src->discard_rectangle.count;
115 dest->sample_location.count = src->sample_location.count;
116
117 if (copy_mask & RADV_DYNAMIC_VIEWPORT) {
118 if (memcmp(&dest->viewport.viewports, &src->viewport.viewports,
119 src->viewport.count * sizeof(VkViewport))) {
120 typed_memcpy(dest->viewport.viewports,
121 src->viewport.viewports,
122 src->viewport.count);
123 dest_mask |= RADV_DYNAMIC_VIEWPORT;
124 }
125 }
126
127 if (copy_mask & RADV_DYNAMIC_SCISSOR) {
128 if (memcmp(&dest->scissor.scissors, &src->scissor.scissors,
129 src->scissor.count * sizeof(VkRect2D))) {
130 typed_memcpy(dest->scissor.scissors,
131 src->scissor.scissors, src->scissor.count);
132 dest_mask |= RADV_DYNAMIC_SCISSOR;
133 }
134 }
135
136 if (copy_mask & RADV_DYNAMIC_LINE_WIDTH) {
137 if (dest->line_width != src->line_width) {
138 dest->line_width = src->line_width;
139 dest_mask |= RADV_DYNAMIC_LINE_WIDTH;
140 }
141 }
142
143 if (copy_mask & RADV_DYNAMIC_DEPTH_BIAS) {
144 if (memcmp(&dest->depth_bias, &src->depth_bias,
145 sizeof(src->depth_bias))) {
146 dest->depth_bias = src->depth_bias;
147 dest_mask |= RADV_DYNAMIC_DEPTH_BIAS;
148 }
149 }
150
151 if (copy_mask & RADV_DYNAMIC_BLEND_CONSTANTS) {
152 if (memcmp(&dest->blend_constants, &src->blend_constants,
153 sizeof(src->blend_constants))) {
154 typed_memcpy(dest->blend_constants,
155 src->blend_constants, 4);
156 dest_mask |= RADV_DYNAMIC_BLEND_CONSTANTS;
157 }
158 }
159
160 if (copy_mask & RADV_DYNAMIC_DEPTH_BOUNDS) {
161 if (memcmp(&dest->depth_bounds, &src->depth_bounds,
162 sizeof(src->depth_bounds))) {
163 dest->depth_bounds = src->depth_bounds;
164 dest_mask |= RADV_DYNAMIC_DEPTH_BOUNDS;
165 }
166 }
167
168 if (copy_mask & RADV_DYNAMIC_STENCIL_COMPARE_MASK) {
169 if (memcmp(&dest->stencil_compare_mask,
170 &src->stencil_compare_mask,
171 sizeof(src->stencil_compare_mask))) {
172 dest->stencil_compare_mask = src->stencil_compare_mask;
173 dest_mask |= RADV_DYNAMIC_STENCIL_COMPARE_MASK;
174 }
175 }
176
177 if (copy_mask & RADV_DYNAMIC_STENCIL_WRITE_MASK) {
178 if (memcmp(&dest->stencil_write_mask, &src->stencil_write_mask,
179 sizeof(src->stencil_write_mask))) {
180 dest->stencil_write_mask = src->stencil_write_mask;
181 dest_mask |= RADV_DYNAMIC_STENCIL_WRITE_MASK;
182 }
183 }
184
185 if (copy_mask & RADV_DYNAMIC_STENCIL_REFERENCE) {
186 if (memcmp(&dest->stencil_reference, &src->stencil_reference,
187 sizeof(src->stencil_reference))) {
188 dest->stencil_reference = src->stencil_reference;
189 dest_mask |= RADV_DYNAMIC_STENCIL_REFERENCE;
190 }
191 }
192
193 if (copy_mask & RADV_DYNAMIC_DISCARD_RECTANGLE) {
194 if (memcmp(&dest->discard_rectangle.rectangles, &src->discard_rectangle.rectangles,
195 src->discard_rectangle.count * sizeof(VkRect2D))) {
196 typed_memcpy(dest->discard_rectangle.rectangles,
197 src->discard_rectangle.rectangles,
198 src->discard_rectangle.count);
199 dest_mask |= RADV_DYNAMIC_DISCARD_RECTANGLE;
200 }
201 }
202
203 if (copy_mask & RADV_DYNAMIC_SAMPLE_LOCATIONS) {
204 if (dest->sample_location.per_pixel != src->sample_location.per_pixel ||
205 dest->sample_location.grid_size.width != src->sample_location.grid_size.width ||
206 dest->sample_location.grid_size.height != src->sample_location.grid_size.height ||
207 memcmp(&dest->sample_location.locations,
208 &src->sample_location.locations,
209 src->sample_location.count * sizeof(VkSampleLocationEXT))) {
210 dest->sample_location.per_pixel = src->sample_location.per_pixel;
211 dest->sample_location.grid_size = src->sample_location.grid_size;
212 typed_memcpy(dest->sample_location.locations,
213 src->sample_location.locations,
214 src->sample_location.count);
215 dest_mask |= RADV_DYNAMIC_SAMPLE_LOCATIONS;
216 }
217 }
218
219 if (copy_mask & RADV_DYNAMIC_LINE_STIPPLE) {
220 if (memcmp(&dest->line_stipple, &src->line_stipple,
221 sizeof(src->line_stipple))) {
222 dest->line_stipple = src->line_stipple;
223 dest_mask |= RADV_DYNAMIC_LINE_STIPPLE;
224 }
225 }
226
227 cmd_buffer->state.dirty |= dest_mask;
228 }
229
230 static void
231 radv_bind_streamout_state(struct radv_cmd_buffer *cmd_buffer,
232 struct radv_pipeline *pipeline)
233 {
234 struct radv_streamout_state *so = &cmd_buffer->state.streamout;
235 struct radv_shader_info *info;
236
237 if (!pipeline->streamout_shader ||
238 cmd_buffer->device->physical_device->use_ngg_streamout)
239 return;
240
241 info = &pipeline->streamout_shader->info;
242 for (int i = 0; i < MAX_SO_BUFFERS; i++)
243 so->stride_in_dw[i] = info->so.strides[i];
244
245 so->enabled_stream_buffers_mask = info->so.enabled_stream_buffers_mask;
246 }
247
248 bool radv_cmd_buffer_uses_mec(struct radv_cmd_buffer *cmd_buffer)
249 {
250 return cmd_buffer->queue_family_index == RADV_QUEUE_COMPUTE &&
251 cmd_buffer->device->physical_device->rad_info.chip_class >= GFX7;
252 }
253
254 enum ring_type radv_queue_family_to_ring(int f) {
255 switch (f) {
256 case RADV_QUEUE_GENERAL:
257 return RING_GFX;
258 case RADV_QUEUE_COMPUTE:
259 return RING_COMPUTE;
260 case RADV_QUEUE_TRANSFER:
261 return RING_DMA;
262 default:
263 unreachable("Unknown queue family");
264 }
265 }
266
267 static VkResult radv_create_cmd_buffer(
268 struct radv_device * device,
269 struct radv_cmd_pool * pool,
270 VkCommandBufferLevel level,
271 VkCommandBuffer* pCommandBuffer)
272 {
273 struct radv_cmd_buffer *cmd_buffer;
274 unsigned ring;
275 cmd_buffer = vk_zalloc(&pool->alloc, sizeof(*cmd_buffer), 8,
276 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
277 if (cmd_buffer == NULL)
278 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
279
280 vk_object_base_init(&device->vk, &cmd_buffer->base,
281 VK_OBJECT_TYPE_COMMAND_BUFFER);
282
283 cmd_buffer->device = device;
284 cmd_buffer->pool = pool;
285 cmd_buffer->level = level;
286
287 list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
288 cmd_buffer->queue_family_index = pool->queue_family_index;
289
290 ring = radv_queue_family_to_ring(cmd_buffer->queue_family_index);
291
292 cmd_buffer->cs = device->ws->cs_create(device->ws, ring);
293 if (!cmd_buffer->cs) {
294 vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
295 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
296 }
297
298 *pCommandBuffer = radv_cmd_buffer_to_handle(cmd_buffer);
299
300 list_inithead(&cmd_buffer->upload.list);
301
302 return VK_SUCCESS;
303 }
304
305 static void
306 radv_cmd_buffer_destroy(struct radv_cmd_buffer *cmd_buffer)
307 {
308 list_del(&cmd_buffer->pool_link);
309
310 list_for_each_entry_safe(struct radv_cmd_buffer_upload, up,
311 &cmd_buffer->upload.list, list) {
312 cmd_buffer->device->ws->buffer_destroy(up->upload_bo);
313 list_del(&up->list);
314 free(up);
315 }
316
317 if (cmd_buffer->upload.upload_bo)
318 cmd_buffer->device->ws->buffer_destroy(cmd_buffer->upload.upload_bo);
319 cmd_buffer->device->ws->cs_destroy(cmd_buffer->cs);
320
321 for (unsigned i = 0; i < MAX_BIND_POINTS; i++)
322 free(cmd_buffer->descriptors[i].push_set.set.mapped_ptr);
323
324 vk_object_base_finish(&cmd_buffer->base);
325
326 vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
327 }
328
329 static VkResult
330 radv_reset_cmd_buffer(struct radv_cmd_buffer *cmd_buffer)
331 {
332 cmd_buffer->device->ws->cs_reset(cmd_buffer->cs);
333
334 list_for_each_entry_safe(struct radv_cmd_buffer_upload, up,
335 &cmd_buffer->upload.list, list) {
336 cmd_buffer->device->ws->buffer_destroy(up->upload_bo);
337 list_del(&up->list);
338 free(up);
339 }
340
341 cmd_buffer->push_constant_stages = 0;
342 cmd_buffer->scratch_size_per_wave_needed = 0;
343 cmd_buffer->scratch_waves_wanted = 0;
344 cmd_buffer->compute_scratch_size_per_wave_needed = 0;
345 cmd_buffer->compute_scratch_waves_wanted = 0;
346 cmd_buffer->esgs_ring_size_needed = 0;
347 cmd_buffer->gsvs_ring_size_needed = 0;
348 cmd_buffer->tess_rings_needed = false;
349 cmd_buffer->gds_needed = false;
350 cmd_buffer->gds_oa_needed = false;
351 cmd_buffer->sample_positions_needed = false;
352
353 if (cmd_buffer->upload.upload_bo)
354 radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs,
355 cmd_buffer->upload.upload_bo);
356 cmd_buffer->upload.offset = 0;
357
358 cmd_buffer->record_result = VK_SUCCESS;
359
360 memset(cmd_buffer->vertex_bindings, 0, sizeof(cmd_buffer->vertex_bindings));
361
362 for (unsigned i = 0; i < MAX_BIND_POINTS; i++) {
363 cmd_buffer->descriptors[i].dirty = 0;
364 cmd_buffer->descriptors[i].valid = 0;
365 cmd_buffer->descriptors[i].push_dirty = false;
366 }
367
368 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9 &&
369 cmd_buffer->queue_family_index == RADV_QUEUE_GENERAL) {
370 unsigned num_db = cmd_buffer->device->physical_device->rad_info.num_render_backends;
371 unsigned fence_offset, eop_bug_offset;
372 void *fence_ptr;
373
374 radv_cmd_buffer_upload_alloc(cmd_buffer, 8, 8, &fence_offset,
375 &fence_ptr);
376
377 cmd_buffer->gfx9_fence_va =
378 radv_buffer_get_va(cmd_buffer->upload.upload_bo);
379 cmd_buffer->gfx9_fence_va += fence_offset;
380
381 if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX9) {
382 /* Allocate a buffer for the EOP bug on GFX9. */
383 radv_cmd_buffer_upload_alloc(cmd_buffer, 16 * num_db, 8,
384 &eop_bug_offset, &fence_ptr);
385 cmd_buffer->gfx9_eop_bug_va =
386 radv_buffer_get_va(cmd_buffer->upload.upload_bo);
387 cmd_buffer->gfx9_eop_bug_va += eop_bug_offset;
388 }
389 }
390
391 cmd_buffer->status = RADV_CMD_BUFFER_STATUS_INITIAL;
392
393 return cmd_buffer->record_result;
394 }
395
396 static bool
397 radv_cmd_buffer_resize_upload_buf(struct radv_cmd_buffer *cmd_buffer,
398 uint64_t min_needed)
399 {
400 uint64_t new_size;
401 struct radeon_winsys_bo *bo;
402 struct radv_cmd_buffer_upload *upload;
403 struct radv_device *device = cmd_buffer->device;
404
405 new_size = MAX2(min_needed, 16 * 1024);
406 new_size = MAX2(new_size, 2 * cmd_buffer->upload.size);
407
408 bo = device->ws->buffer_create(device->ws,
409 new_size, 4096,
410 RADEON_DOMAIN_GTT,
411 RADEON_FLAG_CPU_ACCESS|
412 RADEON_FLAG_NO_INTERPROCESS_SHARING |
413 RADEON_FLAG_32BIT,
414 RADV_BO_PRIORITY_UPLOAD_BUFFER);
415
416 if (!bo) {
417 cmd_buffer->record_result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
418 return false;
419 }
420
421 radv_cs_add_buffer(device->ws, cmd_buffer->cs, bo);
422 if (cmd_buffer->upload.upload_bo) {
423 upload = malloc(sizeof(*upload));
424
425 if (!upload) {
426 cmd_buffer->record_result = VK_ERROR_OUT_OF_HOST_MEMORY;
427 device->ws->buffer_destroy(bo);
428 return false;
429 }
430
431 memcpy(upload, &cmd_buffer->upload, sizeof(*upload));
432 list_add(&upload->list, &cmd_buffer->upload.list);
433 }
434
435 cmd_buffer->upload.upload_bo = bo;
436 cmd_buffer->upload.size = new_size;
437 cmd_buffer->upload.offset = 0;
438 cmd_buffer->upload.map = device->ws->buffer_map(cmd_buffer->upload.upload_bo);
439
440 if (!cmd_buffer->upload.map) {
441 cmd_buffer->record_result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
442 return false;
443 }
444
445 return true;
446 }
447
448 bool
449 radv_cmd_buffer_upload_alloc(struct radv_cmd_buffer *cmd_buffer,
450 unsigned size,
451 unsigned alignment,
452 unsigned *out_offset,
453 void **ptr)
454 {
455 assert(util_is_power_of_two_nonzero(alignment));
456
457 uint64_t offset = align(cmd_buffer->upload.offset, alignment);
458 if (offset + size > cmd_buffer->upload.size) {
459 if (!radv_cmd_buffer_resize_upload_buf(cmd_buffer, size))
460 return false;
461 offset = 0;
462 }
463
464 *out_offset = offset;
465 *ptr = cmd_buffer->upload.map + offset;
466
467 cmd_buffer->upload.offset = offset + size;
468 return true;
469 }
470
471 bool
472 radv_cmd_buffer_upload_data(struct radv_cmd_buffer *cmd_buffer,
473 unsigned size, unsigned alignment,
474 const void *data, unsigned *out_offset)
475 {
476 uint8_t *ptr;
477
478 if (!radv_cmd_buffer_upload_alloc(cmd_buffer, size, alignment,
479 out_offset, (void **)&ptr))
480 return false;
481
482 if (ptr)
483 memcpy(ptr, data, size);
484
485 return true;
486 }
487
488 static void
489 radv_emit_write_data_packet(struct radv_cmd_buffer *cmd_buffer, uint64_t va,
490 unsigned count, const uint32_t *data)
491 {
492 struct radeon_cmdbuf *cs = cmd_buffer->cs;
493
494 radeon_check_space(cmd_buffer->device->ws, cs, 4 + count);
495
496 radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 2 + count, 0));
497 radeon_emit(cs, S_370_DST_SEL(V_370_MEM) |
498 S_370_WR_CONFIRM(1) |
499 S_370_ENGINE_SEL(V_370_ME));
500 radeon_emit(cs, va);
501 radeon_emit(cs, va >> 32);
502 radeon_emit_array(cs, data, count);
503 }
504
505 void radv_cmd_buffer_trace_emit(struct radv_cmd_buffer *cmd_buffer)
506 {
507 struct radv_device *device = cmd_buffer->device;
508 struct radeon_cmdbuf *cs = cmd_buffer->cs;
509 uint64_t va;
510
511 va = radv_buffer_get_va(device->trace_bo);
512 if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY)
513 va += 4;
514
515 ++cmd_buffer->state.trace_id;
516 radv_emit_write_data_packet(cmd_buffer, va, 1,
517 &cmd_buffer->state.trace_id);
518
519 radeon_check_space(cmd_buffer->device->ws, cs, 2);
520
521 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
522 radeon_emit(cs, AC_ENCODE_TRACE_POINT(cmd_buffer->state.trace_id));
523 }
524
525 static void
526 radv_cmd_buffer_after_draw(struct radv_cmd_buffer *cmd_buffer,
527 enum radv_cmd_flush_bits flags)
528 {
529 if (unlikely(cmd_buffer->device->thread_trace_bo)) {
530 radeon_emit(cmd_buffer->cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
531 radeon_emit(cmd_buffer->cs, EVENT_TYPE(V_028A90_THREAD_TRACE_MARKER) | EVENT_INDEX(0));
532 }
533
534 if (cmd_buffer->device->instance->debug_flags & RADV_DEBUG_SYNC_SHADERS) {
535 assert(flags & (RADV_CMD_FLAG_PS_PARTIAL_FLUSH |
536 RADV_CMD_FLAG_CS_PARTIAL_FLUSH));
537
538 radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 4);
539
540 /* Force wait for graphics or compute engines to be idle. */
541 si_cs_emit_cache_flush(cmd_buffer->cs,
542 cmd_buffer->device->physical_device->rad_info.chip_class,
543 &cmd_buffer->gfx9_fence_idx,
544 cmd_buffer->gfx9_fence_va,
545 radv_cmd_buffer_uses_mec(cmd_buffer),
546 flags, cmd_buffer->gfx9_eop_bug_va);
547 }
548
549 if (unlikely(cmd_buffer->device->trace_bo))
550 radv_cmd_buffer_trace_emit(cmd_buffer);
551 }
552
553 static void
554 radv_save_pipeline(struct radv_cmd_buffer *cmd_buffer,
555 struct radv_pipeline *pipeline, enum ring_type ring)
556 {
557 struct radv_device *device = cmd_buffer->device;
558 uint32_t data[2];
559 uint64_t va;
560
561 va = radv_buffer_get_va(device->trace_bo);
562
563 switch (ring) {
564 case RING_GFX:
565 va += 8;
566 break;
567 case RING_COMPUTE:
568 va += 16;
569 break;
570 default:
571 assert(!"invalid ring type");
572 }
573
574 uint64_t pipeline_address = (uintptr_t)pipeline;
575 data[0] = pipeline_address;
576 data[1] = pipeline_address >> 32;
577
578 radv_emit_write_data_packet(cmd_buffer, va, 2, data);
579 }
580
581 void radv_set_descriptor_set(struct radv_cmd_buffer *cmd_buffer,
582 VkPipelineBindPoint bind_point,
583 struct radv_descriptor_set *set,
584 unsigned idx)
585 {
586 struct radv_descriptor_state *descriptors_state =
587 radv_get_descriptors_state(cmd_buffer, bind_point);
588
589 descriptors_state->sets[idx] = set;
590
591 descriptors_state->valid |= (1u << idx); /* active descriptors */
592 descriptors_state->dirty |= (1u << idx);
593 }
594
595 static void
596 radv_save_descriptors(struct radv_cmd_buffer *cmd_buffer,
597 VkPipelineBindPoint bind_point)
598 {
599 struct radv_descriptor_state *descriptors_state =
600 radv_get_descriptors_state(cmd_buffer, bind_point);
601 struct radv_device *device = cmd_buffer->device;
602 uint32_t data[MAX_SETS * 2] = {};
603 uint64_t va;
604 unsigned i;
605 va = radv_buffer_get_va(device->trace_bo) + 24;
606
607 for_each_bit(i, descriptors_state->valid) {
608 struct radv_descriptor_set *set = descriptors_state->sets[i];
609 data[i * 2] = (uint64_t)(uintptr_t)set;
610 data[i * 2 + 1] = (uint64_t)(uintptr_t)set >> 32;
611 }
612
613 radv_emit_write_data_packet(cmd_buffer, va, MAX_SETS * 2, data);
614 }
615
616 struct radv_userdata_info *
617 radv_lookup_user_sgpr(struct radv_pipeline *pipeline,
618 gl_shader_stage stage,
619 int idx)
620 {
621 struct radv_shader_variant *shader = radv_get_shader(pipeline, stage);
622 return &shader->info.user_sgprs_locs.shader_data[idx];
623 }
624
625 static void
626 radv_emit_userdata_address(struct radv_cmd_buffer *cmd_buffer,
627 struct radv_pipeline *pipeline,
628 gl_shader_stage stage,
629 int idx, uint64_t va)
630 {
631 struct radv_userdata_info *loc = radv_lookup_user_sgpr(pipeline, stage, idx);
632 uint32_t base_reg = pipeline->user_data_0[stage];
633 if (loc->sgpr_idx == -1)
634 return;
635
636 assert(loc->num_sgprs == 1);
637
638 radv_emit_shader_pointer(cmd_buffer->device, cmd_buffer->cs,
639 base_reg + loc->sgpr_idx * 4, va, false);
640 }
641
642 static void
643 radv_emit_descriptor_pointers(struct radv_cmd_buffer *cmd_buffer,
644 struct radv_pipeline *pipeline,
645 struct radv_descriptor_state *descriptors_state,
646 gl_shader_stage stage)
647 {
648 struct radv_device *device = cmd_buffer->device;
649 struct radeon_cmdbuf *cs = cmd_buffer->cs;
650 uint32_t sh_base = pipeline->user_data_0[stage];
651 struct radv_userdata_locations *locs =
652 &pipeline->shaders[stage]->info.user_sgprs_locs;
653 unsigned mask = locs->descriptor_sets_enabled;
654
655 mask &= descriptors_state->dirty & descriptors_state->valid;
656
657 while (mask) {
658 int start, count;
659
660 u_bit_scan_consecutive_range(&mask, &start, &count);
661
662 struct radv_userdata_info *loc = &locs->descriptor_sets[start];
663 unsigned sh_offset = sh_base + loc->sgpr_idx * 4;
664
665 radv_emit_shader_pointer_head(cs, sh_offset, count, true);
666 for (int i = 0; i < count; i++) {
667 struct radv_descriptor_set *set =
668 descriptors_state->sets[start + i];
669
670 radv_emit_shader_pointer_body(device, cs, set->va, true);
671 }
672 }
673 }
674
675 /**
676 * Convert the user sample locations to hardware sample locations (the values
677 * that will be emitted by PA_SC_AA_SAMPLE_LOCS_PIXEL_*).
678 */
679 static void
680 radv_convert_user_sample_locs(struct radv_sample_locations_state *state,
681 uint32_t x, uint32_t y, VkOffset2D *sample_locs)
682 {
683 uint32_t x_offset = x % state->grid_size.width;
684 uint32_t y_offset = y % state->grid_size.height;
685 uint32_t num_samples = (uint32_t)state->per_pixel;
686 VkSampleLocationEXT *user_locs;
687 uint32_t pixel_offset;
688
689 pixel_offset = (x_offset + y_offset * state->grid_size.width) * num_samples;
690
691 assert(pixel_offset <= MAX_SAMPLE_LOCATIONS);
692 user_locs = &state->locations[pixel_offset];
693
694 for (uint32_t i = 0; i < num_samples; i++) {
695 float shifted_pos_x = user_locs[i].x - 0.5;
696 float shifted_pos_y = user_locs[i].y - 0.5;
697
698 int32_t scaled_pos_x = floorf(shifted_pos_x * 16);
699 int32_t scaled_pos_y = floorf(shifted_pos_y * 16);
700
701 sample_locs[i].x = CLAMP(scaled_pos_x, -8, 7);
702 sample_locs[i].y = CLAMP(scaled_pos_y, -8, 7);
703 }
704 }
705
706 /**
707 * Compute the PA_SC_AA_SAMPLE_LOCS_PIXEL_* mask based on hardware sample
708 * locations.
709 */
710 static void
711 radv_compute_sample_locs_pixel(uint32_t num_samples, VkOffset2D *sample_locs,
712 uint32_t *sample_locs_pixel)
713 {
714 for (uint32_t i = 0; i < num_samples; i++) {
715 uint32_t sample_reg_idx = i / 4;
716 uint32_t sample_loc_idx = i % 4;
717 int32_t pos_x = sample_locs[i].x;
718 int32_t pos_y = sample_locs[i].y;
719
720 uint32_t shift_x = 8 * sample_loc_idx;
721 uint32_t shift_y = shift_x + 4;
722
723 sample_locs_pixel[sample_reg_idx] |= (pos_x & 0xf) << shift_x;
724 sample_locs_pixel[sample_reg_idx] |= (pos_y & 0xf) << shift_y;
725 }
726 }
727
728 /**
729 * Compute the PA_SC_CENTROID_PRIORITY_* mask based on the top left hardware
730 * sample locations.
731 */
732 static uint64_t
733 radv_compute_centroid_priority(struct radv_cmd_buffer *cmd_buffer,
734 VkOffset2D *sample_locs,
735 uint32_t num_samples)
736 {
737 uint32_t centroid_priorities[num_samples];
738 uint32_t sample_mask = num_samples - 1;
739 uint32_t distances[num_samples];
740 uint64_t centroid_priority = 0;
741
742 /* Compute the distances from center for each sample. */
743 for (int i = 0; i < num_samples; i++) {
744 distances[i] = (sample_locs[i].x * sample_locs[i].x) +
745 (sample_locs[i].y * sample_locs[i].y);
746 }
747
748 /* Compute the centroid priorities by looking at the distances array. */
749 for (int i = 0; i < num_samples; i++) {
750 uint32_t min_idx = 0;
751
752 for (int j = 1; j < num_samples; j++) {
753 if (distances[j] < distances[min_idx])
754 min_idx = j;
755 }
756
757 centroid_priorities[i] = min_idx;
758 distances[min_idx] = 0xffffffff;
759 }
760
761 /* Compute the final centroid priority. */
762 for (int i = 0; i < 8; i++) {
763 centroid_priority |=
764 centroid_priorities[i & sample_mask] << (i * 4);
765 }
766
767 return centroid_priority << 32 | centroid_priority;
768 }
769
770 /**
771 * Emit the sample locations that are specified with VK_EXT_sample_locations.
772 */
773 static void
774 radv_emit_sample_locations(struct radv_cmd_buffer *cmd_buffer)
775 {
776 struct radv_sample_locations_state *sample_location =
777 &cmd_buffer->state.dynamic.sample_location;
778 uint32_t num_samples = (uint32_t)sample_location->per_pixel;
779 struct radeon_cmdbuf *cs = cmd_buffer->cs;
780 uint32_t sample_locs_pixel[4][2] = {};
781 VkOffset2D sample_locs[4][8]; /* 8 is the max. sample count supported */
782 uint32_t max_sample_dist = 0;
783 uint64_t centroid_priority;
784
785 if (!cmd_buffer->state.dynamic.sample_location.count)
786 return;
787
788 /* Convert the user sample locations to hardware sample locations. */
789 radv_convert_user_sample_locs(sample_location, 0, 0, sample_locs[0]);
790 radv_convert_user_sample_locs(sample_location, 1, 0, sample_locs[1]);
791 radv_convert_user_sample_locs(sample_location, 0, 1, sample_locs[2]);
792 radv_convert_user_sample_locs(sample_location, 1, 1, sample_locs[3]);
793
794 /* Compute the PA_SC_AA_SAMPLE_LOCS_PIXEL_* mask. */
795 for (uint32_t i = 0; i < 4; i++) {
796 radv_compute_sample_locs_pixel(num_samples, sample_locs[i],
797 sample_locs_pixel[i]);
798 }
799
800 /* Compute the PA_SC_CENTROID_PRIORITY_* mask. */
801 centroid_priority =
802 radv_compute_centroid_priority(cmd_buffer, sample_locs[0],
803 num_samples);
804
805 /* Compute the maximum sample distance from the specified locations. */
806 for (unsigned i = 0; i < 4; ++i) {
807 for (uint32_t j = 0; j < num_samples; j++) {
808 VkOffset2D offset = sample_locs[i][j];
809 max_sample_dist = MAX2(max_sample_dist,
810 MAX2(abs(offset.x), abs(offset.y)));
811 }
812 }
813
814 /* Emit the specified user sample locations. */
815 switch (num_samples) {
816 case 2:
817 case 4:
818 radeon_set_context_reg(cs, R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0, sample_locs_pixel[0][0]);
819 radeon_set_context_reg(cs, R_028C08_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0, sample_locs_pixel[1][0]);
820 radeon_set_context_reg(cs, R_028C18_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0, sample_locs_pixel[2][0]);
821 radeon_set_context_reg(cs, R_028C28_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0, sample_locs_pixel[3][0]);
822 break;
823 case 8:
824 radeon_set_context_reg(cs, R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0, sample_locs_pixel[0][0]);
825 radeon_set_context_reg(cs, R_028C08_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0, sample_locs_pixel[1][0]);
826 radeon_set_context_reg(cs, R_028C18_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0, sample_locs_pixel[2][0]);
827 radeon_set_context_reg(cs, R_028C28_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0, sample_locs_pixel[3][0]);
828 radeon_set_context_reg(cs, R_028BFC_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1, sample_locs_pixel[0][1]);
829 radeon_set_context_reg(cs, R_028C0C_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1, sample_locs_pixel[1][1]);
830 radeon_set_context_reg(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1, sample_locs_pixel[2][1]);
831 radeon_set_context_reg(cs, R_028C2C_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1, sample_locs_pixel[3][1]);
832 break;
833 default:
834 unreachable("invalid number of samples");
835 }
836
837 /* Emit the maximum sample distance and the centroid priority. */
838 radeon_set_context_reg_rmw(cs, R_028BE0_PA_SC_AA_CONFIG,
839 S_028BE0_MAX_SAMPLE_DIST(max_sample_dist),
840 ~C_028BE0_MAX_SAMPLE_DIST);
841
842 radeon_set_context_reg_seq(cs, R_028BD4_PA_SC_CENTROID_PRIORITY_0, 2);
843 radeon_emit(cs, centroid_priority);
844 radeon_emit(cs, centroid_priority >> 32);
845
846 /* GFX9: Flush DFSM when the AA mode changes. */
847 if (cmd_buffer->device->dfsm_allowed) {
848 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
849 radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_DFSM) | EVENT_INDEX(0));
850 }
851
852 cmd_buffer->state.context_roll_without_scissor_emitted = true;
853 }
854
855 static void
856 radv_emit_inline_push_consts(struct radv_cmd_buffer *cmd_buffer,
857 struct radv_pipeline *pipeline,
858 gl_shader_stage stage,
859 int idx, int count, uint32_t *values)
860 {
861 struct radv_userdata_info *loc = radv_lookup_user_sgpr(pipeline, stage, idx);
862 uint32_t base_reg = pipeline->user_data_0[stage];
863 if (loc->sgpr_idx == -1)
864 return;
865
866 assert(loc->num_sgprs == count);
867
868 radeon_set_sh_reg_seq(cmd_buffer->cs, base_reg + loc->sgpr_idx * 4, count);
869 radeon_emit_array(cmd_buffer->cs, values, count);
870 }
871
872 static void
873 radv_update_multisample_state(struct radv_cmd_buffer *cmd_buffer,
874 struct radv_pipeline *pipeline)
875 {
876 int num_samples = pipeline->graphics.ms.num_samples;
877 struct radv_pipeline *old_pipeline = cmd_buffer->state.emitted_pipeline;
878
879 if (pipeline->shaders[MESA_SHADER_FRAGMENT]->info.ps.needs_sample_positions)
880 cmd_buffer->sample_positions_needed = true;
881
882 if (old_pipeline && num_samples == old_pipeline->graphics.ms.num_samples)
883 return;
884
885 radv_emit_default_sample_locations(cmd_buffer->cs, num_samples);
886
887 cmd_buffer->state.context_roll_without_scissor_emitted = true;
888 }
889
890 static void
891 radv_update_binning_state(struct radv_cmd_buffer *cmd_buffer,
892 struct radv_pipeline *pipeline)
893 {
894 const struct radv_pipeline *old_pipeline = cmd_buffer->state.emitted_pipeline;
895
896
897 if (pipeline->device->physical_device->rad_info.chip_class < GFX9)
898 return;
899
900 if (old_pipeline &&
901 old_pipeline->graphics.binning.pa_sc_binner_cntl_0 == pipeline->graphics.binning.pa_sc_binner_cntl_0 &&
902 old_pipeline->graphics.binning.db_dfsm_control == pipeline->graphics.binning.db_dfsm_control)
903 return;
904
905 bool binning_flush = false;
906 if (cmd_buffer->device->physical_device->rad_info.family == CHIP_VEGA12 ||
907 cmd_buffer->device->physical_device->rad_info.family == CHIP_VEGA20 ||
908 cmd_buffer->device->physical_device->rad_info.family == CHIP_RAVEN2 ||
909 cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10) {
910 binning_flush = !old_pipeline ||
911 G_028C44_BINNING_MODE(old_pipeline->graphics.binning.pa_sc_binner_cntl_0) !=
912 G_028C44_BINNING_MODE(pipeline->graphics.binning.pa_sc_binner_cntl_0);
913 }
914
915 radeon_set_context_reg(cmd_buffer->cs, R_028C44_PA_SC_BINNER_CNTL_0,
916 pipeline->graphics.binning.pa_sc_binner_cntl_0 |
917 S_028C44_FLUSH_ON_BINNING_TRANSITION(!!binning_flush));
918
919 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10) {
920 radeon_set_context_reg(cmd_buffer->cs, R_028038_DB_DFSM_CONTROL,
921 pipeline->graphics.binning.db_dfsm_control);
922 } else {
923 radeon_set_context_reg(cmd_buffer->cs, R_028060_DB_DFSM_CONTROL,
924 pipeline->graphics.binning.db_dfsm_control);
925 }
926
927 cmd_buffer->state.context_roll_without_scissor_emitted = true;
928 }
929
930
931 static void
932 radv_emit_shader_prefetch(struct radv_cmd_buffer *cmd_buffer,
933 struct radv_shader_variant *shader)
934 {
935 uint64_t va;
936
937 if (!shader)
938 return;
939
940 va = radv_buffer_get_va(shader->bo) + shader->bo_offset;
941
942 si_cp_dma_prefetch(cmd_buffer, va, shader->code_size);
943 }
944
945 static void
946 radv_emit_prefetch_L2(struct radv_cmd_buffer *cmd_buffer,
947 struct radv_pipeline *pipeline,
948 bool vertex_stage_only)
949 {
950 struct radv_cmd_state *state = &cmd_buffer->state;
951 uint32_t mask = state->prefetch_L2_mask;
952
953 if (vertex_stage_only) {
954 /* Fast prefetch path for starting draws as soon as possible.
955 */
956 mask = state->prefetch_L2_mask & (RADV_PREFETCH_VS |
957 RADV_PREFETCH_VBO_DESCRIPTORS);
958 }
959
960 if (mask & RADV_PREFETCH_VS)
961 radv_emit_shader_prefetch(cmd_buffer,
962 pipeline->shaders[MESA_SHADER_VERTEX]);
963
964 if (mask & RADV_PREFETCH_VBO_DESCRIPTORS)
965 si_cp_dma_prefetch(cmd_buffer, state->vb_va, state->vb_size);
966
967 if (mask & RADV_PREFETCH_TCS)
968 radv_emit_shader_prefetch(cmd_buffer,
969 pipeline->shaders[MESA_SHADER_TESS_CTRL]);
970
971 if (mask & RADV_PREFETCH_TES)
972 radv_emit_shader_prefetch(cmd_buffer,
973 pipeline->shaders[MESA_SHADER_TESS_EVAL]);
974
975 if (mask & RADV_PREFETCH_GS) {
976 radv_emit_shader_prefetch(cmd_buffer,
977 pipeline->shaders[MESA_SHADER_GEOMETRY]);
978 if (radv_pipeline_has_gs_copy_shader(pipeline))
979 radv_emit_shader_prefetch(cmd_buffer, pipeline->gs_copy_shader);
980 }
981
982 if (mask & RADV_PREFETCH_PS)
983 radv_emit_shader_prefetch(cmd_buffer,
984 pipeline->shaders[MESA_SHADER_FRAGMENT]);
985
986 state->prefetch_L2_mask &= ~mask;
987 }
988
989 static void
990 radv_emit_rbplus_state(struct radv_cmd_buffer *cmd_buffer)
991 {
992 if (!cmd_buffer->device->physical_device->rad_info.rbplus_allowed)
993 return;
994
995 struct radv_pipeline *pipeline = cmd_buffer->state.pipeline;
996 const struct radv_subpass *subpass = cmd_buffer->state.subpass;
997
998 unsigned sx_ps_downconvert = 0;
999 unsigned sx_blend_opt_epsilon = 0;
1000 unsigned sx_blend_opt_control = 0;
1001
1002 if (!cmd_buffer->state.attachments || !subpass)
1003 return;
1004
1005 for (unsigned i = 0; i < subpass->color_count; ++i) {
1006 if (subpass->color_attachments[i].attachment == VK_ATTACHMENT_UNUSED) {
1007 /* We don't set the DISABLE bits, because the HW can't have holes,
1008 * so the SPI color format is set to 32-bit 1-component. */
1009 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_32_R << (i * 4);
1010 continue;
1011 }
1012
1013 int idx = subpass->color_attachments[i].attachment;
1014 struct radv_color_buffer_info *cb = &cmd_buffer->state.attachments[idx].cb;
1015
1016 unsigned format = G_028C70_FORMAT(cb->cb_color_info);
1017 unsigned swap = G_028C70_COMP_SWAP(cb->cb_color_info);
1018 uint32_t spi_format = (pipeline->graphics.col_format >> (i * 4)) & 0xf;
1019 uint32_t colormask = (pipeline->graphics.cb_target_mask >> (i * 4)) & 0xf;
1020
1021 bool has_alpha, has_rgb;
1022
1023 /* Set if RGB and A are present. */
1024 has_alpha = !G_028C74_FORCE_DST_ALPHA_1(cb->cb_color_attrib);
1025
1026 if (format == V_028C70_COLOR_8 ||
1027 format == V_028C70_COLOR_16 ||
1028 format == V_028C70_COLOR_32)
1029 has_rgb = !has_alpha;
1030 else
1031 has_rgb = true;
1032
1033 /* Check the colormask and export format. */
1034 if (!(colormask & 0x7))
1035 has_rgb = false;
1036 if (!(colormask & 0x8))
1037 has_alpha = false;
1038
1039 if (spi_format == V_028714_SPI_SHADER_ZERO) {
1040 has_rgb = false;
1041 has_alpha = false;
1042 }
1043
1044 /* Disable value checking for disabled channels. */
1045 if (!has_rgb)
1046 sx_blend_opt_control |= S_02875C_MRT0_COLOR_OPT_DISABLE(1) << (i * 4);
1047 if (!has_alpha)
1048 sx_blend_opt_control |= S_02875C_MRT0_ALPHA_OPT_DISABLE(1) << (i * 4);
1049
1050 /* Enable down-conversion for 32bpp and smaller formats. */
1051 switch (format) {
1052 case V_028C70_COLOR_8:
1053 case V_028C70_COLOR_8_8:
1054 case V_028C70_COLOR_8_8_8_8:
1055 /* For 1 and 2-channel formats, use the superset thereof. */
1056 if (spi_format == V_028714_SPI_SHADER_FP16_ABGR ||
1057 spi_format == V_028714_SPI_SHADER_UINT16_ABGR ||
1058 spi_format == V_028714_SPI_SHADER_SINT16_ABGR) {
1059 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_8_8_8_8 << (i * 4);
1060 sx_blend_opt_epsilon |= V_028758_8BIT_FORMAT << (i * 4);
1061 }
1062 break;
1063
1064 case V_028C70_COLOR_5_6_5:
1065 if (spi_format == V_028714_SPI_SHADER_FP16_ABGR) {
1066 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_5_6_5 << (i * 4);
1067 sx_blend_opt_epsilon |= V_028758_6BIT_FORMAT << (i * 4);
1068 }
1069 break;
1070
1071 case V_028C70_COLOR_1_5_5_5:
1072 if (spi_format == V_028714_SPI_SHADER_FP16_ABGR) {
1073 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_1_5_5_5 << (i * 4);
1074 sx_blend_opt_epsilon |= V_028758_5BIT_FORMAT << (i * 4);
1075 }
1076 break;
1077
1078 case V_028C70_COLOR_4_4_4_4:
1079 if (spi_format == V_028714_SPI_SHADER_FP16_ABGR) {
1080 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_4_4_4_4 << (i * 4);
1081 sx_blend_opt_epsilon |= V_028758_4BIT_FORMAT << (i * 4);
1082 }
1083 break;
1084
1085 case V_028C70_COLOR_32:
1086 if (swap == V_028C70_SWAP_STD &&
1087 spi_format == V_028714_SPI_SHADER_32_R)
1088 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_32_R << (i * 4);
1089 else if (swap == V_028C70_SWAP_ALT_REV &&
1090 spi_format == V_028714_SPI_SHADER_32_AR)
1091 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_32_A << (i * 4);
1092 break;
1093
1094 case V_028C70_COLOR_16:
1095 case V_028C70_COLOR_16_16:
1096 /* For 1-channel formats, use the superset thereof. */
1097 if (spi_format == V_028714_SPI_SHADER_UNORM16_ABGR ||
1098 spi_format == V_028714_SPI_SHADER_SNORM16_ABGR ||
1099 spi_format == V_028714_SPI_SHADER_UINT16_ABGR ||
1100 spi_format == V_028714_SPI_SHADER_SINT16_ABGR) {
1101 if (swap == V_028C70_SWAP_STD ||
1102 swap == V_028C70_SWAP_STD_REV)
1103 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_16_16_GR << (i * 4);
1104 else
1105 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_16_16_AR << (i * 4);
1106 }
1107 break;
1108
1109 case V_028C70_COLOR_10_11_11:
1110 if (spi_format == V_028714_SPI_SHADER_FP16_ABGR) {
1111 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_10_11_11 << (i * 4);
1112 sx_blend_opt_epsilon |= V_028758_11BIT_FORMAT << (i * 4);
1113 }
1114 break;
1115
1116 case V_028C70_COLOR_2_10_10_10:
1117 if (spi_format == V_028714_SPI_SHADER_FP16_ABGR) {
1118 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_2_10_10_10 << (i * 4);
1119 sx_blend_opt_epsilon |= V_028758_10BIT_FORMAT << (i * 4);
1120 }
1121 break;
1122 }
1123 }
1124
1125 /* Do not set the DISABLE bits for the unused attachments, as that
1126 * breaks dual source blending in SkQP and does not seem to improve
1127 * performance. */
1128
1129 if (sx_ps_downconvert == cmd_buffer->state.last_sx_ps_downconvert &&
1130 sx_blend_opt_epsilon == cmd_buffer->state.last_sx_blend_opt_epsilon &&
1131 sx_blend_opt_control == cmd_buffer->state.last_sx_blend_opt_control)
1132 return;
1133
1134 radeon_set_context_reg_seq(cmd_buffer->cs, R_028754_SX_PS_DOWNCONVERT, 3);
1135 radeon_emit(cmd_buffer->cs, sx_ps_downconvert);
1136 radeon_emit(cmd_buffer->cs, sx_blend_opt_epsilon);
1137 radeon_emit(cmd_buffer->cs, sx_blend_opt_control);
1138
1139 cmd_buffer->state.context_roll_without_scissor_emitted = true;
1140
1141 cmd_buffer->state.last_sx_ps_downconvert = sx_ps_downconvert;
1142 cmd_buffer->state.last_sx_blend_opt_epsilon = sx_blend_opt_epsilon;
1143 cmd_buffer->state.last_sx_blend_opt_control = sx_blend_opt_control;
1144 }
1145
1146 static void
1147 radv_emit_batch_break_on_new_ps(struct radv_cmd_buffer *cmd_buffer)
1148 {
1149 if (!cmd_buffer->device->pbb_allowed)
1150 return;
1151
1152 struct radv_binning_settings settings =
1153 radv_get_binning_settings(cmd_buffer->device->physical_device);
1154 bool break_for_new_ps =
1155 (!cmd_buffer->state.emitted_pipeline ||
1156 cmd_buffer->state.emitted_pipeline->shaders[MESA_SHADER_FRAGMENT] !=
1157 cmd_buffer->state.pipeline->shaders[MESA_SHADER_FRAGMENT]) &&
1158 (settings.context_states_per_bin > 1 ||
1159 settings.persistent_states_per_bin > 1);
1160 bool break_for_new_cb_target_mask =
1161 (!cmd_buffer->state.emitted_pipeline ||
1162 cmd_buffer->state.emitted_pipeline->graphics.cb_target_mask !=
1163 cmd_buffer->state.pipeline->graphics.cb_target_mask) &&
1164 settings.context_states_per_bin > 1;
1165
1166 if (!break_for_new_ps && !break_for_new_cb_target_mask)
1167 return;
1168
1169 radeon_emit(cmd_buffer->cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1170 radeon_emit(cmd_buffer->cs, EVENT_TYPE(V_028A90_BREAK_BATCH) | EVENT_INDEX(0));
1171 }
1172
1173 static void
1174 radv_emit_graphics_pipeline(struct radv_cmd_buffer *cmd_buffer)
1175 {
1176 struct radv_pipeline *pipeline = cmd_buffer->state.pipeline;
1177
1178 if (!pipeline || cmd_buffer->state.emitted_pipeline == pipeline)
1179 return;
1180
1181 radv_update_multisample_state(cmd_buffer, pipeline);
1182 radv_update_binning_state(cmd_buffer, pipeline);
1183
1184 cmd_buffer->scratch_size_per_wave_needed = MAX2(cmd_buffer->scratch_size_per_wave_needed,
1185 pipeline->scratch_bytes_per_wave);
1186 cmd_buffer->scratch_waves_wanted = MAX2(cmd_buffer->scratch_waves_wanted,
1187 pipeline->max_waves);
1188
1189 if (!cmd_buffer->state.emitted_pipeline ||
1190 cmd_buffer->state.emitted_pipeline->graphics.can_use_guardband !=
1191 pipeline->graphics.can_use_guardband)
1192 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_SCISSOR;
1193
1194 radeon_emit_array(cmd_buffer->cs, pipeline->cs.buf, pipeline->cs.cdw);
1195
1196 if (!cmd_buffer->state.emitted_pipeline ||
1197 cmd_buffer->state.emitted_pipeline->ctx_cs.cdw != pipeline->ctx_cs.cdw ||
1198 cmd_buffer->state.emitted_pipeline->ctx_cs_hash != pipeline->ctx_cs_hash ||
1199 memcmp(cmd_buffer->state.emitted_pipeline->ctx_cs.buf,
1200 pipeline->ctx_cs.buf, pipeline->ctx_cs.cdw * 4)) {
1201 radeon_emit_array(cmd_buffer->cs, pipeline->ctx_cs.buf, pipeline->ctx_cs.cdw);
1202 cmd_buffer->state.context_roll_without_scissor_emitted = true;
1203 }
1204
1205 radv_emit_batch_break_on_new_ps(cmd_buffer);
1206
1207 for (unsigned i = 0; i < MESA_SHADER_COMPUTE; i++) {
1208 if (!pipeline->shaders[i])
1209 continue;
1210
1211 radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs,
1212 pipeline->shaders[i]->bo);
1213 }
1214
1215 if (radv_pipeline_has_gs_copy_shader(pipeline))
1216 radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs,
1217 pipeline->gs_copy_shader->bo);
1218
1219 if (unlikely(cmd_buffer->device->trace_bo))
1220 radv_save_pipeline(cmd_buffer, pipeline, RING_GFX);
1221
1222 cmd_buffer->state.emitted_pipeline = pipeline;
1223
1224 cmd_buffer->state.dirty &= ~RADV_CMD_DIRTY_PIPELINE;
1225 }
1226
1227 static void
1228 radv_emit_viewport(struct radv_cmd_buffer *cmd_buffer)
1229 {
1230 si_write_viewport(cmd_buffer->cs, 0, cmd_buffer->state.dynamic.viewport.count,
1231 cmd_buffer->state.dynamic.viewport.viewports);
1232 }
1233
1234 static void
1235 radv_emit_scissor(struct radv_cmd_buffer *cmd_buffer)
1236 {
1237 uint32_t count = cmd_buffer->state.dynamic.scissor.count;
1238
1239 si_write_scissors(cmd_buffer->cs, 0, count,
1240 cmd_buffer->state.dynamic.scissor.scissors,
1241 cmd_buffer->state.dynamic.viewport.viewports,
1242 cmd_buffer->state.emitted_pipeline->graphics.can_use_guardband);
1243
1244 cmd_buffer->state.context_roll_without_scissor_emitted = false;
1245 }
1246
1247 static void
1248 radv_emit_discard_rectangle(struct radv_cmd_buffer *cmd_buffer)
1249 {
1250 if (!cmd_buffer->state.dynamic.discard_rectangle.count)
1251 return;
1252
1253 radeon_set_context_reg_seq(cmd_buffer->cs, R_028210_PA_SC_CLIPRECT_0_TL,
1254 cmd_buffer->state.dynamic.discard_rectangle.count * 2);
1255 for (unsigned i = 0; i < cmd_buffer->state.dynamic.discard_rectangle.count; ++i) {
1256 VkRect2D rect = cmd_buffer->state.dynamic.discard_rectangle.rectangles[i];
1257 radeon_emit(cmd_buffer->cs, S_028210_TL_X(rect.offset.x) | S_028210_TL_Y(rect.offset.y));
1258 radeon_emit(cmd_buffer->cs, S_028214_BR_X(rect.offset.x + rect.extent.width) |
1259 S_028214_BR_Y(rect.offset.y + rect.extent.height));
1260 }
1261 }
1262
1263 static void
1264 radv_emit_line_width(struct radv_cmd_buffer *cmd_buffer)
1265 {
1266 unsigned width = cmd_buffer->state.dynamic.line_width * 8;
1267
1268 radeon_set_context_reg(cmd_buffer->cs, R_028A08_PA_SU_LINE_CNTL,
1269 S_028A08_WIDTH(CLAMP(width, 0, 0xFFFF)));
1270 }
1271
1272 static void
1273 radv_emit_blend_constants(struct radv_cmd_buffer *cmd_buffer)
1274 {
1275 struct radv_dynamic_state *d = &cmd_buffer->state.dynamic;
1276
1277 radeon_set_context_reg_seq(cmd_buffer->cs, R_028414_CB_BLEND_RED, 4);
1278 radeon_emit_array(cmd_buffer->cs, (uint32_t *)d->blend_constants, 4);
1279 }
1280
1281 static void
1282 radv_emit_stencil(struct radv_cmd_buffer *cmd_buffer)
1283 {
1284 struct radv_dynamic_state *d = &cmd_buffer->state.dynamic;
1285
1286 radeon_set_context_reg_seq(cmd_buffer->cs,
1287 R_028430_DB_STENCILREFMASK, 2);
1288 radeon_emit(cmd_buffer->cs,
1289 S_028430_STENCILTESTVAL(d->stencil_reference.front) |
1290 S_028430_STENCILMASK(d->stencil_compare_mask.front) |
1291 S_028430_STENCILWRITEMASK(d->stencil_write_mask.front) |
1292 S_028430_STENCILOPVAL(1));
1293 radeon_emit(cmd_buffer->cs,
1294 S_028434_STENCILTESTVAL_BF(d->stencil_reference.back) |
1295 S_028434_STENCILMASK_BF(d->stencil_compare_mask.back) |
1296 S_028434_STENCILWRITEMASK_BF(d->stencil_write_mask.back) |
1297 S_028434_STENCILOPVAL_BF(1));
1298 }
1299
1300 static void
1301 radv_emit_depth_bounds(struct radv_cmd_buffer *cmd_buffer)
1302 {
1303 struct radv_dynamic_state *d = &cmd_buffer->state.dynamic;
1304
1305 radeon_set_context_reg(cmd_buffer->cs, R_028020_DB_DEPTH_BOUNDS_MIN,
1306 fui(d->depth_bounds.min));
1307 radeon_set_context_reg(cmd_buffer->cs, R_028024_DB_DEPTH_BOUNDS_MAX,
1308 fui(d->depth_bounds.max));
1309 }
1310
1311 static void
1312 radv_emit_depth_bias(struct radv_cmd_buffer *cmd_buffer)
1313 {
1314 struct radv_dynamic_state *d = &cmd_buffer->state.dynamic;
1315 unsigned slope = fui(d->depth_bias.slope * 16.0f);
1316 unsigned bias = fui(d->depth_bias.bias * cmd_buffer->state.offset_scale);
1317
1318
1319 radeon_set_context_reg_seq(cmd_buffer->cs,
1320 R_028B7C_PA_SU_POLY_OFFSET_CLAMP, 5);
1321 radeon_emit(cmd_buffer->cs, fui(d->depth_bias.clamp)); /* CLAMP */
1322 radeon_emit(cmd_buffer->cs, slope); /* FRONT SCALE */
1323 radeon_emit(cmd_buffer->cs, bias); /* FRONT OFFSET */
1324 radeon_emit(cmd_buffer->cs, slope); /* BACK SCALE */
1325 radeon_emit(cmd_buffer->cs, bias); /* BACK OFFSET */
1326 }
1327
1328 static void
1329 radv_emit_line_stipple(struct radv_cmd_buffer *cmd_buffer)
1330 {
1331 struct radv_dynamic_state *d = &cmd_buffer->state.dynamic;
1332 struct radv_pipeline *pipeline = cmd_buffer->state.pipeline;
1333 uint32_t auto_reset_cntl = 1;
1334
1335 if (pipeline->graphics.topology == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP)
1336 auto_reset_cntl = 2;
1337
1338 radeon_set_context_reg(cmd_buffer->cs, R_028A0C_PA_SC_LINE_STIPPLE,
1339 S_028A0C_LINE_PATTERN(d->line_stipple.pattern) |
1340 S_028A0C_REPEAT_COUNT(d->line_stipple.factor - 1) |
1341 S_028A0C_AUTO_RESET_CNTL(auto_reset_cntl));
1342 }
1343
1344 static void
1345 radv_emit_fb_color_state(struct radv_cmd_buffer *cmd_buffer,
1346 int index,
1347 struct radv_color_buffer_info *cb,
1348 struct radv_image_view *iview,
1349 VkImageLayout layout,
1350 bool in_render_loop)
1351 {
1352 bool is_vi = cmd_buffer->device->physical_device->rad_info.chip_class >= GFX8;
1353 uint32_t cb_color_info = cb->cb_color_info;
1354 struct radv_image *image = iview->image;
1355
1356 if (!radv_layout_dcc_compressed(cmd_buffer->device, image, layout, in_render_loop,
1357 radv_image_queue_family_mask(image,
1358 cmd_buffer->queue_family_index,
1359 cmd_buffer->queue_family_index))) {
1360 cb_color_info &= C_028C70_DCC_ENABLE;
1361 }
1362
1363 if (!radv_layout_can_fast_clear(image, layout, in_render_loop,
1364 radv_image_queue_family_mask(image,
1365 cmd_buffer->queue_family_index,
1366 cmd_buffer->queue_family_index))) {
1367 cb_color_info &= C_028C70_COMPRESSION;
1368 }
1369
1370 if (radv_image_is_tc_compat_cmask(image) &&
1371 (radv_is_fmask_decompress_pipeline(cmd_buffer) ||
1372 radv_is_dcc_decompress_pipeline(cmd_buffer))) {
1373 /* If this bit is set, the FMASK decompression operation
1374 * doesn't occur (DCC_COMPRESS also implies FMASK_DECOMPRESS).
1375 */
1376 cb_color_info &= C_028C70_FMASK_COMPRESS_1FRAG_ONLY;
1377 }
1378
1379 if (radv_image_has_fmask(image) &&
1380 (radv_is_fmask_decompress_pipeline(cmd_buffer) ||
1381 radv_is_hw_resolve_pipeline(cmd_buffer))) {
1382 /* Make sure FMASK is enabled if it has been cleared because:
1383 *
1384 * 1) it's required for FMASK_DECOMPRESS operations to avoid
1385 * GPU hangs
1386 * 2) it's necessary for CB_RESOLVE which can read compressed
1387 * FMASK data anyways.
1388 */
1389 cb_color_info |= S_028C70_COMPRESSION(1);
1390 }
1391
1392 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10) {
1393 radeon_set_context_reg_seq(cmd_buffer->cs, R_028C60_CB_COLOR0_BASE + index * 0x3c, 11);
1394 radeon_emit(cmd_buffer->cs, cb->cb_color_base);
1395 radeon_emit(cmd_buffer->cs, 0);
1396 radeon_emit(cmd_buffer->cs, 0);
1397 radeon_emit(cmd_buffer->cs, cb->cb_color_view);
1398 radeon_emit(cmd_buffer->cs, cb_color_info);
1399 radeon_emit(cmd_buffer->cs, cb->cb_color_attrib);
1400 radeon_emit(cmd_buffer->cs, cb->cb_dcc_control);
1401 radeon_emit(cmd_buffer->cs, cb->cb_color_cmask);
1402 radeon_emit(cmd_buffer->cs, 0);
1403 radeon_emit(cmd_buffer->cs, cb->cb_color_fmask);
1404 radeon_emit(cmd_buffer->cs, 0);
1405
1406 radeon_set_context_reg_seq(cmd_buffer->cs, R_028C94_CB_COLOR0_DCC_BASE + index * 0x3c, 1);
1407 radeon_emit(cmd_buffer->cs, cb->cb_dcc_base);
1408
1409 radeon_set_context_reg(cmd_buffer->cs, R_028E40_CB_COLOR0_BASE_EXT + index * 4,
1410 cb->cb_color_base >> 32);
1411 radeon_set_context_reg(cmd_buffer->cs, R_028E60_CB_COLOR0_CMASK_BASE_EXT + index * 4,
1412 cb->cb_color_cmask >> 32);
1413 radeon_set_context_reg(cmd_buffer->cs, R_028E80_CB_COLOR0_FMASK_BASE_EXT + index * 4,
1414 cb->cb_color_fmask >> 32);
1415 radeon_set_context_reg(cmd_buffer->cs, R_028EA0_CB_COLOR0_DCC_BASE_EXT + index * 4,
1416 cb->cb_dcc_base >> 32);
1417 radeon_set_context_reg(cmd_buffer->cs, R_028EC0_CB_COLOR0_ATTRIB2 + index * 4,
1418 cb->cb_color_attrib2);
1419 radeon_set_context_reg(cmd_buffer->cs, R_028EE0_CB_COLOR0_ATTRIB3 + index * 4,
1420 cb->cb_color_attrib3);
1421 } else if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX9) {
1422 radeon_set_context_reg_seq(cmd_buffer->cs, R_028C60_CB_COLOR0_BASE + index * 0x3c, 11);
1423 radeon_emit(cmd_buffer->cs, cb->cb_color_base);
1424 radeon_emit(cmd_buffer->cs, S_028C64_BASE_256B(cb->cb_color_base >> 32));
1425 radeon_emit(cmd_buffer->cs, cb->cb_color_attrib2);
1426 radeon_emit(cmd_buffer->cs, cb->cb_color_view);
1427 radeon_emit(cmd_buffer->cs, cb_color_info);
1428 radeon_emit(cmd_buffer->cs, cb->cb_color_attrib);
1429 radeon_emit(cmd_buffer->cs, cb->cb_dcc_control);
1430 radeon_emit(cmd_buffer->cs, cb->cb_color_cmask);
1431 radeon_emit(cmd_buffer->cs, S_028C80_BASE_256B(cb->cb_color_cmask >> 32));
1432 radeon_emit(cmd_buffer->cs, cb->cb_color_fmask);
1433 radeon_emit(cmd_buffer->cs, S_028C88_BASE_256B(cb->cb_color_fmask >> 32));
1434
1435 radeon_set_context_reg_seq(cmd_buffer->cs, R_028C94_CB_COLOR0_DCC_BASE + index * 0x3c, 2);
1436 radeon_emit(cmd_buffer->cs, cb->cb_dcc_base);
1437 radeon_emit(cmd_buffer->cs, S_028C98_BASE_256B(cb->cb_dcc_base >> 32));
1438
1439 radeon_set_context_reg(cmd_buffer->cs, R_0287A0_CB_MRT0_EPITCH + index * 4,
1440 cb->cb_mrt_epitch);
1441 } else {
1442 radeon_set_context_reg_seq(cmd_buffer->cs, R_028C60_CB_COLOR0_BASE + index * 0x3c, 11);
1443 radeon_emit(cmd_buffer->cs, cb->cb_color_base);
1444 radeon_emit(cmd_buffer->cs, cb->cb_color_pitch);
1445 radeon_emit(cmd_buffer->cs, cb->cb_color_slice);
1446 radeon_emit(cmd_buffer->cs, cb->cb_color_view);
1447 radeon_emit(cmd_buffer->cs, cb_color_info);
1448 radeon_emit(cmd_buffer->cs, cb->cb_color_attrib);
1449 radeon_emit(cmd_buffer->cs, cb->cb_dcc_control);
1450 radeon_emit(cmd_buffer->cs, cb->cb_color_cmask);
1451 radeon_emit(cmd_buffer->cs, cb->cb_color_cmask_slice);
1452 radeon_emit(cmd_buffer->cs, cb->cb_color_fmask);
1453 radeon_emit(cmd_buffer->cs, cb->cb_color_fmask_slice);
1454
1455 if (is_vi) { /* DCC BASE */
1456 radeon_set_context_reg(cmd_buffer->cs, R_028C94_CB_COLOR0_DCC_BASE + index * 0x3c, cb->cb_dcc_base);
1457 }
1458 }
1459
1460 if (radv_dcc_enabled(image, iview->base_mip)) {
1461 /* Drawing with DCC enabled also compresses colorbuffers. */
1462 VkImageSubresourceRange range = {
1463 .aspectMask = iview->aspect_mask,
1464 .baseMipLevel = iview->base_mip,
1465 .levelCount = iview->level_count,
1466 .baseArrayLayer = iview->base_layer,
1467 .layerCount = iview->layer_count,
1468 };
1469
1470 radv_update_dcc_metadata(cmd_buffer, image, &range, true);
1471 }
1472 }
1473
1474 static void
1475 radv_update_zrange_precision(struct radv_cmd_buffer *cmd_buffer,
1476 struct radv_ds_buffer_info *ds,
1477 const struct radv_image_view *iview,
1478 VkImageLayout layout,
1479 bool in_render_loop, bool requires_cond_exec)
1480 {
1481 const struct radv_image *image = iview->image;
1482 uint32_t db_z_info = ds->db_z_info;
1483 uint32_t db_z_info_reg;
1484
1485 if (!cmd_buffer->device->physical_device->rad_info.has_tc_compat_zrange_bug ||
1486 !radv_image_is_tc_compat_htile(image))
1487 return;
1488
1489 if (!radv_layout_is_htile_compressed(image, layout, in_render_loop,
1490 radv_image_queue_family_mask(image,
1491 cmd_buffer->queue_family_index,
1492 cmd_buffer->queue_family_index))) {
1493 db_z_info &= C_028040_TILE_SURFACE_ENABLE;
1494 }
1495
1496 db_z_info &= C_028040_ZRANGE_PRECISION;
1497
1498 if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX9) {
1499 db_z_info_reg = R_028038_DB_Z_INFO;
1500 } else {
1501 db_z_info_reg = R_028040_DB_Z_INFO;
1502 }
1503
1504 /* When we don't know the last fast clear value we need to emit a
1505 * conditional packet that will eventually skip the following
1506 * SET_CONTEXT_REG packet.
1507 */
1508 if (requires_cond_exec) {
1509 uint64_t va = radv_get_tc_compat_zrange_va(image, iview->base_mip);
1510
1511 radeon_emit(cmd_buffer->cs, PKT3(PKT3_COND_EXEC, 3, 0));
1512 radeon_emit(cmd_buffer->cs, va);
1513 radeon_emit(cmd_buffer->cs, va >> 32);
1514 radeon_emit(cmd_buffer->cs, 0);
1515 radeon_emit(cmd_buffer->cs, 3); /* SET_CONTEXT_REG size */
1516 }
1517
1518 radeon_set_context_reg(cmd_buffer->cs, db_z_info_reg, db_z_info);
1519 }
1520
1521 static void
1522 radv_emit_fb_ds_state(struct radv_cmd_buffer *cmd_buffer,
1523 struct radv_ds_buffer_info *ds,
1524 struct radv_image_view *iview,
1525 VkImageLayout layout,
1526 bool in_render_loop)
1527 {
1528 const struct radv_image *image = iview->image;
1529 uint32_t db_z_info = ds->db_z_info;
1530 uint32_t db_stencil_info = ds->db_stencil_info;
1531
1532 if (!radv_layout_is_htile_compressed(image, layout, in_render_loop,
1533 radv_image_queue_family_mask(image,
1534 cmd_buffer->queue_family_index,
1535 cmd_buffer->queue_family_index))) {
1536 db_z_info &= C_028040_TILE_SURFACE_ENABLE;
1537 db_stencil_info |= S_028044_TILE_STENCIL_DISABLE(1);
1538 }
1539
1540 radeon_set_context_reg(cmd_buffer->cs, R_028008_DB_DEPTH_VIEW, ds->db_depth_view);
1541 radeon_set_context_reg(cmd_buffer->cs, R_028ABC_DB_HTILE_SURFACE, ds->db_htile_surface);
1542
1543 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10) {
1544 radeon_set_context_reg(cmd_buffer->cs, R_028014_DB_HTILE_DATA_BASE, ds->db_htile_data_base);
1545 radeon_set_context_reg(cmd_buffer->cs, R_02801C_DB_DEPTH_SIZE_XY, ds->db_depth_size);
1546
1547 radeon_set_context_reg_seq(cmd_buffer->cs, R_02803C_DB_DEPTH_INFO, 7);
1548 radeon_emit(cmd_buffer->cs, S_02803C_RESOURCE_LEVEL(1));
1549 radeon_emit(cmd_buffer->cs, db_z_info);
1550 radeon_emit(cmd_buffer->cs, db_stencil_info);
1551 radeon_emit(cmd_buffer->cs, ds->db_z_read_base);
1552 radeon_emit(cmd_buffer->cs, ds->db_stencil_read_base);
1553 radeon_emit(cmd_buffer->cs, ds->db_z_read_base);
1554 radeon_emit(cmd_buffer->cs, ds->db_stencil_read_base);
1555
1556 radeon_set_context_reg_seq(cmd_buffer->cs, R_028068_DB_Z_READ_BASE_HI, 5);
1557 radeon_emit(cmd_buffer->cs, ds->db_z_read_base >> 32);
1558 radeon_emit(cmd_buffer->cs, ds->db_stencil_read_base >> 32);
1559 radeon_emit(cmd_buffer->cs, ds->db_z_read_base >> 32);
1560 radeon_emit(cmd_buffer->cs, ds->db_stencil_read_base >> 32);
1561 radeon_emit(cmd_buffer->cs, ds->db_htile_data_base >> 32);
1562 } else if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX9) {
1563 radeon_set_context_reg_seq(cmd_buffer->cs, R_028014_DB_HTILE_DATA_BASE, 3);
1564 radeon_emit(cmd_buffer->cs, ds->db_htile_data_base);
1565 radeon_emit(cmd_buffer->cs, S_028018_BASE_HI(ds->db_htile_data_base >> 32));
1566 radeon_emit(cmd_buffer->cs, ds->db_depth_size);
1567
1568 radeon_set_context_reg_seq(cmd_buffer->cs, R_028038_DB_Z_INFO, 10);
1569 radeon_emit(cmd_buffer->cs, db_z_info); /* DB_Z_INFO */
1570 radeon_emit(cmd_buffer->cs, db_stencil_info); /* DB_STENCIL_INFO */
1571 radeon_emit(cmd_buffer->cs, ds->db_z_read_base); /* DB_Z_READ_BASE */
1572 radeon_emit(cmd_buffer->cs, S_028044_BASE_HI(ds->db_z_read_base >> 32)); /* DB_Z_READ_BASE_HI */
1573 radeon_emit(cmd_buffer->cs, ds->db_stencil_read_base); /* DB_STENCIL_READ_BASE */
1574 radeon_emit(cmd_buffer->cs, S_02804C_BASE_HI(ds->db_stencil_read_base >> 32)); /* DB_STENCIL_READ_BASE_HI */
1575 radeon_emit(cmd_buffer->cs, ds->db_z_write_base); /* DB_Z_WRITE_BASE */
1576 radeon_emit(cmd_buffer->cs, S_028054_BASE_HI(ds->db_z_write_base >> 32)); /* DB_Z_WRITE_BASE_HI */
1577 radeon_emit(cmd_buffer->cs, ds->db_stencil_write_base); /* DB_STENCIL_WRITE_BASE */
1578 radeon_emit(cmd_buffer->cs, S_02805C_BASE_HI(ds->db_stencil_write_base >> 32)); /* DB_STENCIL_WRITE_BASE_HI */
1579
1580 radeon_set_context_reg_seq(cmd_buffer->cs, R_028068_DB_Z_INFO2, 2);
1581 radeon_emit(cmd_buffer->cs, ds->db_z_info2);
1582 radeon_emit(cmd_buffer->cs, ds->db_stencil_info2);
1583 } else {
1584 radeon_set_context_reg(cmd_buffer->cs, R_028014_DB_HTILE_DATA_BASE, ds->db_htile_data_base);
1585
1586 radeon_set_context_reg_seq(cmd_buffer->cs, R_02803C_DB_DEPTH_INFO, 9);
1587 radeon_emit(cmd_buffer->cs, ds->db_depth_info); /* R_02803C_DB_DEPTH_INFO */
1588 radeon_emit(cmd_buffer->cs, db_z_info); /* R_028040_DB_Z_INFO */
1589 radeon_emit(cmd_buffer->cs, db_stencil_info); /* R_028044_DB_STENCIL_INFO */
1590 radeon_emit(cmd_buffer->cs, ds->db_z_read_base); /* R_028048_DB_Z_READ_BASE */
1591 radeon_emit(cmd_buffer->cs, ds->db_stencil_read_base); /* R_02804C_DB_STENCIL_READ_BASE */
1592 radeon_emit(cmd_buffer->cs, ds->db_z_write_base); /* R_028050_DB_Z_WRITE_BASE */
1593 radeon_emit(cmd_buffer->cs, ds->db_stencil_write_base); /* R_028054_DB_STENCIL_WRITE_BASE */
1594 radeon_emit(cmd_buffer->cs, ds->db_depth_size); /* R_028058_DB_DEPTH_SIZE */
1595 radeon_emit(cmd_buffer->cs, ds->db_depth_slice); /* R_02805C_DB_DEPTH_SLICE */
1596
1597 }
1598
1599 /* Update the ZRANGE_PRECISION value for the TC-compat bug. */
1600 radv_update_zrange_precision(cmd_buffer, ds, iview, layout,
1601 in_render_loop, true);
1602
1603 radeon_set_context_reg(cmd_buffer->cs, R_028B78_PA_SU_POLY_OFFSET_DB_FMT_CNTL,
1604 ds->pa_su_poly_offset_db_fmt_cntl);
1605 }
1606
1607 /**
1608 * Update the fast clear depth/stencil values if the image is bound as a
1609 * depth/stencil buffer.
1610 */
1611 static void
1612 radv_update_bound_fast_clear_ds(struct radv_cmd_buffer *cmd_buffer,
1613 const struct radv_image_view *iview,
1614 VkClearDepthStencilValue ds_clear_value,
1615 VkImageAspectFlags aspects)
1616 {
1617 const struct radv_subpass *subpass = cmd_buffer->state.subpass;
1618 const struct radv_image *image = iview->image;
1619 struct radeon_cmdbuf *cs = cmd_buffer->cs;
1620 uint32_t att_idx;
1621
1622 if (!cmd_buffer->state.attachments || !subpass)
1623 return;
1624
1625 if (!subpass->depth_stencil_attachment)
1626 return;
1627
1628 att_idx = subpass->depth_stencil_attachment->attachment;
1629 if (cmd_buffer->state.attachments[att_idx].iview->image != image)
1630 return;
1631
1632 if (aspects == (VK_IMAGE_ASPECT_DEPTH_BIT |
1633 VK_IMAGE_ASPECT_STENCIL_BIT)) {
1634 radeon_set_context_reg_seq(cs, R_028028_DB_STENCIL_CLEAR, 2);
1635 radeon_emit(cs, ds_clear_value.stencil);
1636 radeon_emit(cs, fui(ds_clear_value.depth));
1637 } else if (aspects == VK_IMAGE_ASPECT_DEPTH_BIT) {
1638 radeon_set_context_reg_seq(cs, R_02802C_DB_DEPTH_CLEAR, 1);
1639 radeon_emit(cs, fui(ds_clear_value.depth));
1640 } else {
1641 assert(aspects == VK_IMAGE_ASPECT_STENCIL_BIT);
1642 radeon_set_context_reg_seq(cs, R_028028_DB_STENCIL_CLEAR, 1);
1643 radeon_emit(cs, ds_clear_value.stencil);
1644 }
1645
1646 /* Update the ZRANGE_PRECISION value for the TC-compat bug. This is
1647 * only needed when clearing Z to 0.0.
1648 */
1649 if ((aspects & VK_IMAGE_ASPECT_DEPTH_BIT) &&
1650 ds_clear_value.depth == 0.0) {
1651 VkImageLayout layout = subpass->depth_stencil_attachment->layout;
1652 bool in_render_loop = subpass->depth_stencil_attachment->in_render_loop;
1653
1654 radv_update_zrange_precision(cmd_buffer, &cmd_buffer->state.attachments[att_idx].ds,
1655 iview, layout, in_render_loop, false);
1656 }
1657
1658 cmd_buffer->state.context_roll_without_scissor_emitted = true;
1659 }
1660
1661 /**
1662 * Set the clear depth/stencil values to the image's metadata.
1663 */
1664 static void
1665 radv_set_ds_clear_metadata(struct radv_cmd_buffer *cmd_buffer,
1666 struct radv_image *image,
1667 const VkImageSubresourceRange *range,
1668 VkClearDepthStencilValue ds_clear_value,
1669 VkImageAspectFlags aspects)
1670 {
1671 struct radeon_cmdbuf *cs = cmd_buffer->cs;
1672 uint64_t va = radv_get_ds_clear_value_va(image, range->baseMipLevel);
1673 uint32_t level_count = radv_get_levelCount(image, range);
1674
1675 if (aspects == (VK_IMAGE_ASPECT_DEPTH_BIT |
1676 VK_IMAGE_ASPECT_STENCIL_BIT)) {
1677 /* Use the fastest way when both aspects are used. */
1678 radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 2 + 2 * level_count, cmd_buffer->state.predicating));
1679 radeon_emit(cs, S_370_DST_SEL(V_370_MEM) |
1680 S_370_WR_CONFIRM(1) |
1681 S_370_ENGINE_SEL(V_370_PFP));
1682 radeon_emit(cs, va);
1683 radeon_emit(cs, va >> 32);
1684
1685 for (uint32_t l = 0; l < level_count; l++) {
1686 radeon_emit(cs, ds_clear_value.stencil);
1687 radeon_emit(cs, fui(ds_clear_value.depth));
1688 }
1689 } else {
1690 /* Otherwise we need one WRITE_DATA packet per level. */
1691 for (uint32_t l = 0; l < level_count; l++) {
1692 uint64_t va = radv_get_ds_clear_value_va(image, range->baseMipLevel + l);
1693 unsigned value;
1694
1695 if (aspects == VK_IMAGE_ASPECT_DEPTH_BIT) {
1696 value = fui(ds_clear_value.depth);
1697 va += 4;
1698 } else {
1699 assert(aspects == VK_IMAGE_ASPECT_STENCIL_BIT);
1700 value = ds_clear_value.stencil;
1701 }
1702
1703 radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, cmd_buffer->state.predicating));
1704 radeon_emit(cs, S_370_DST_SEL(V_370_MEM) |
1705 S_370_WR_CONFIRM(1) |
1706 S_370_ENGINE_SEL(V_370_PFP));
1707 radeon_emit(cs, va);
1708 radeon_emit(cs, va >> 32);
1709 radeon_emit(cs, value);
1710 }
1711 }
1712 }
1713
1714 /**
1715 * Update the TC-compat metadata value for this image.
1716 */
1717 static void
1718 radv_set_tc_compat_zrange_metadata(struct radv_cmd_buffer *cmd_buffer,
1719 struct radv_image *image,
1720 const VkImageSubresourceRange *range,
1721 uint32_t value)
1722 {
1723 struct radeon_cmdbuf *cs = cmd_buffer->cs;
1724
1725 if (!cmd_buffer->device->physical_device->rad_info.has_tc_compat_zrange_bug)
1726 return;
1727
1728 uint64_t va = radv_get_tc_compat_zrange_va(image, range->baseMipLevel);
1729 uint32_t level_count = radv_get_levelCount(image, range);
1730
1731 radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 2 + level_count, cmd_buffer->state.predicating));
1732 radeon_emit(cs, S_370_DST_SEL(V_370_MEM) |
1733 S_370_WR_CONFIRM(1) |
1734 S_370_ENGINE_SEL(V_370_PFP));
1735 radeon_emit(cs, va);
1736 radeon_emit(cs, va >> 32);
1737
1738 for (uint32_t l = 0; l < level_count; l++)
1739 radeon_emit(cs, value);
1740 }
1741
1742 static void
1743 radv_update_tc_compat_zrange_metadata(struct radv_cmd_buffer *cmd_buffer,
1744 const struct radv_image_view *iview,
1745 VkClearDepthStencilValue ds_clear_value)
1746 {
1747 VkImageSubresourceRange range = {
1748 .aspectMask = iview->aspect_mask,
1749 .baseMipLevel = iview->base_mip,
1750 .levelCount = iview->level_count,
1751 .baseArrayLayer = iview->base_layer,
1752 .layerCount = iview->layer_count,
1753 };
1754 uint32_t cond_val;
1755
1756 /* Conditionally set DB_Z_INFO.ZRANGE_PRECISION to 0 when the last
1757 * depth clear value is 0.0f.
1758 */
1759 cond_val = ds_clear_value.depth == 0.0f ? UINT_MAX : 0;
1760
1761 radv_set_tc_compat_zrange_metadata(cmd_buffer, iview->image, &range,
1762 cond_val);
1763 }
1764
1765 /**
1766 * Update the clear depth/stencil values for this image.
1767 */
1768 void
1769 radv_update_ds_clear_metadata(struct radv_cmd_buffer *cmd_buffer,
1770 const struct radv_image_view *iview,
1771 VkClearDepthStencilValue ds_clear_value,
1772 VkImageAspectFlags aspects)
1773 {
1774 VkImageSubresourceRange range = {
1775 .aspectMask = iview->aspect_mask,
1776 .baseMipLevel = iview->base_mip,
1777 .levelCount = iview->level_count,
1778 .baseArrayLayer = iview->base_layer,
1779 .layerCount = iview->layer_count,
1780 };
1781 struct radv_image *image = iview->image;
1782
1783 assert(radv_image_has_htile(image));
1784
1785 radv_set_ds_clear_metadata(cmd_buffer, iview->image, &range,
1786 ds_clear_value, aspects);
1787
1788 if (radv_image_is_tc_compat_htile(image) &&
1789 (aspects & VK_IMAGE_ASPECT_DEPTH_BIT)) {
1790 radv_update_tc_compat_zrange_metadata(cmd_buffer, iview,
1791 ds_clear_value);
1792 }
1793
1794 radv_update_bound_fast_clear_ds(cmd_buffer, iview, ds_clear_value,
1795 aspects);
1796 }
1797
1798 /**
1799 * Load the clear depth/stencil values from the image's metadata.
1800 */
1801 static void
1802 radv_load_ds_clear_metadata(struct radv_cmd_buffer *cmd_buffer,
1803 const struct radv_image_view *iview)
1804 {
1805 struct radeon_cmdbuf *cs = cmd_buffer->cs;
1806 const struct radv_image *image = iview->image;
1807 VkImageAspectFlags aspects = vk_format_aspects(image->vk_format);
1808 uint64_t va = radv_get_ds_clear_value_va(image, iview->base_mip);
1809 unsigned reg_offset = 0, reg_count = 0;
1810
1811 if (!radv_image_has_htile(image))
1812 return;
1813
1814 if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT) {
1815 ++reg_count;
1816 } else {
1817 ++reg_offset;
1818 va += 4;
1819 }
1820 if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT)
1821 ++reg_count;
1822
1823 uint32_t reg = R_028028_DB_STENCIL_CLEAR + 4 * reg_offset;
1824
1825 if (cmd_buffer->device->physical_device->rad_info.has_load_ctx_reg_pkt) {
1826 radeon_emit(cs, PKT3(PKT3_LOAD_CONTEXT_REG_INDEX, 3, 0));
1827 radeon_emit(cs, va);
1828 radeon_emit(cs, va >> 32);
1829 radeon_emit(cs, (reg - SI_CONTEXT_REG_OFFSET) >> 2);
1830 radeon_emit(cs, reg_count);
1831 } else {
1832 radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
1833 radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_SRC_MEM) |
1834 COPY_DATA_DST_SEL(COPY_DATA_REG) |
1835 (reg_count == 2 ? COPY_DATA_COUNT_SEL : 0));
1836 radeon_emit(cs, va);
1837 radeon_emit(cs, va >> 32);
1838 radeon_emit(cs, reg >> 2);
1839 radeon_emit(cs, 0);
1840
1841 radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
1842 radeon_emit(cs, 0);
1843 }
1844 }
1845
1846 /*
1847 * With DCC some colors don't require CMASK elimination before being
1848 * used as a texture. This sets a predicate value to determine if the
1849 * cmask eliminate is required.
1850 */
1851 void
1852 radv_update_fce_metadata(struct radv_cmd_buffer *cmd_buffer,
1853 struct radv_image *image,
1854 const VkImageSubresourceRange *range, bool value)
1855 {
1856 uint64_t pred_val = value;
1857 uint64_t va = radv_image_get_fce_pred_va(image, range->baseMipLevel);
1858 uint32_t level_count = radv_get_levelCount(image, range);
1859 uint32_t count = 2 * level_count;
1860
1861 assert(radv_dcc_enabled(image, range->baseMipLevel));
1862
1863 radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 2 + count, 0));
1864 radeon_emit(cmd_buffer->cs, S_370_DST_SEL(V_370_MEM) |
1865 S_370_WR_CONFIRM(1) |
1866 S_370_ENGINE_SEL(V_370_PFP));
1867 radeon_emit(cmd_buffer->cs, va);
1868 radeon_emit(cmd_buffer->cs, va >> 32);
1869
1870 for (uint32_t l = 0; l < level_count; l++) {
1871 radeon_emit(cmd_buffer->cs, pred_val);
1872 radeon_emit(cmd_buffer->cs, pred_val >> 32);
1873 }
1874 }
1875
1876 /**
1877 * Update the DCC predicate to reflect the compression state.
1878 */
1879 void
1880 radv_update_dcc_metadata(struct radv_cmd_buffer *cmd_buffer,
1881 struct radv_image *image,
1882 const VkImageSubresourceRange *range, bool value)
1883 {
1884 uint64_t pred_val = value;
1885 uint64_t va = radv_image_get_dcc_pred_va(image, range->baseMipLevel);
1886 uint32_t level_count = radv_get_levelCount(image, range);
1887 uint32_t count = 2 * level_count;
1888
1889 assert(radv_dcc_enabled(image, range->baseMipLevel));
1890
1891 radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 2 + count, 0));
1892 radeon_emit(cmd_buffer->cs, S_370_DST_SEL(V_370_MEM) |
1893 S_370_WR_CONFIRM(1) |
1894 S_370_ENGINE_SEL(V_370_PFP));
1895 radeon_emit(cmd_buffer->cs, va);
1896 radeon_emit(cmd_buffer->cs, va >> 32);
1897
1898 for (uint32_t l = 0; l < level_count; l++) {
1899 radeon_emit(cmd_buffer->cs, pred_val);
1900 radeon_emit(cmd_buffer->cs, pred_val >> 32);
1901 }
1902 }
1903
1904 /**
1905 * Update the fast clear color values if the image is bound as a color buffer.
1906 */
1907 static void
1908 radv_update_bound_fast_clear_color(struct radv_cmd_buffer *cmd_buffer,
1909 struct radv_image *image,
1910 int cb_idx,
1911 uint32_t color_values[2])
1912 {
1913 const struct radv_subpass *subpass = cmd_buffer->state.subpass;
1914 struct radeon_cmdbuf *cs = cmd_buffer->cs;
1915 uint32_t att_idx;
1916
1917 if (!cmd_buffer->state.attachments || !subpass)
1918 return;
1919
1920 att_idx = subpass->color_attachments[cb_idx].attachment;
1921 if (att_idx == VK_ATTACHMENT_UNUSED)
1922 return;
1923
1924 if (cmd_buffer->state.attachments[att_idx].iview->image != image)
1925 return;
1926
1927 radeon_set_context_reg_seq(cs, R_028C8C_CB_COLOR0_CLEAR_WORD0 + cb_idx * 0x3c, 2);
1928 radeon_emit(cs, color_values[0]);
1929 radeon_emit(cs, color_values[1]);
1930
1931 cmd_buffer->state.context_roll_without_scissor_emitted = true;
1932 }
1933
1934 /**
1935 * Set the clear color values to the image's metadata.
1936 */
1937 static void
1938 radv_set_color_clear_metadata(struct radv_cmd_buffer *cmd_buffer,
1939 struct radv_image *image,
1940 const VkImageSubresourceRange *range,
1941 uint32_t color_values[2])
1942 {
1943 struct radeon_cmdbuf *cs = cmd_buffer->cs;
1944 uint64_t va = radv_image_get_fast_clear_va(image, range->baseMipLevel);
1945 uint32_t level_count = radv_get_levelCount(image, range);
1946 uint32_t count = 2 * level_count;
1947
1948 assert(radv_image_has_cmask(image) ||
1949 radv_dcc_enabled(image, range->baseMipLevel));
1950
1951 radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 2 + count, cmd_buffer->state.predicating));
1952 radeon_emit(cs, S_370_DST_SEL(V_370_MEM) |
1953 S_370_WR_CONFIRM(1) |
1954 S_370_ENGINE_SEL(V_370_PFP));
1955 radeon_emit(cs, va);
1956 radeon_emit(cs, va >> 32);
1957
1958 for (uint32_t l = 0; l < level_count; l++) {
1959 radeon_emit(cs, color_values[0]);
1960 radeon_emit(cs, color_values[1]);
1961 }
1962 }
1963
1964 /**
1965 * Update the clear color values for this image.
1966 */
1967 void
1968 radv_update_color_clear_metadata(struct radv_cmd_buffer *cmd_buffer,
1969 const struct radv_image_view *iview,
1970 int cb_idx,
1971 uint32_t color_values[2])
1972 {
1973 struct radv_image *image = iview->image;
1974 VkImageSubresourceRange range = {
1975 .aspectMask = iview->aspect_mask,
1976 .baseMipLevel = iview->base_mip,
1977 .levelCount = iview->level_count,
1978 .baseArrayLayer = iview->base_layer,
1979 .layerCount = iview->layer_count,
1980 };
1981
1982 assert(radv_image_has_cmask(image) ||
1983 radv_dcc_enabled(image, iview->base_mip));
1984
1985 radv_set_color_clear_metadata(cmd_buffer, image, &range, color_values);
1986
1987 radv_update_bound_fast_clear_color(cmd_buffer, image, cb_idx,
1988 color_values);
1989 }
1990
1991 /**
1992 * Load the clear color values from the image's metadata.
1993 */
1994 static void
1995 radv_load_color_clear_metadata(struct radv_cmd_buffer *cmd_buffer,
1996 struct radv_image_view *iview,
1997 int cb_idx)
1998 {
1999 struct radeon_cmdbuf *cs = cmd_buffer->cs;
2000 struct radv_image *image = iview->image;
2001 uint64_t va = radv_image_get_fast_clear_va(image, iview->base_mip);
2002
2003 if (!radv_image_has_cmask(image) &&
2004 !radv_dcc_enabled(image, iview->base_mip))
2005 return;
2006
2007 uint32_t reg = R_028C8C_CB_COLOR0_CLEAR_WORD0 + cb_idx * 0x3c;
2008
2009 if (cmd_buffer->device->physical_device->rad_info.has_load_ctx_reg_pkt) {
2010 radeon_emit(cs, PKT3(PKT3_LOAD_CONTEXT_REG_INDEX, 3, cmd_buffer->state.predicating));
2011 radeon_emit(cs, va);
2012 radeon_emit(cs, va >> 32);
2013 radeon_emit(cs, (reg - SI_CONTEXT_REG_OFFSET) >> 2);
2014 radeon_emit(cs, 2);
2015 } else {
2016 radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, cmd_buffer->state.predicating));
2017 radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_SRC_MEM) |
2018 COPY_DATA_DST_SEL(COPY_DATA_REG) |
2019 COPY_DATA_COUNT_SEL);
2020 radeon_emit(cs, va);
2021 radeon_emit(cs, va >> 32);
2022 radeon_emit(cs, reg >> 2);
2023 radeon_emit(cs, 0);
2024
2025 radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, cmd_buffer->state.predicating));
2026 radeon_emit(cs, 0);
2027 }
2028 }
2029
2030 static void
2031 radv_emit_framebuffer_state(struct radv_cmd_buffer *cmd_buffer)
2032 {
2033 int i;
2034 struct radv_framebuffer *framebuffer = cmd_buffer->state.framebuffer;
2035 const struct radv_subpass *subpass = cmd_buffer->state.subpass;
2036
2037 /* this may happen for inherited secondary recording */
2038 if (!framebuffer)
2039 return;
2040
2041 for (i = 0; i < 8; ++i) {
2042 if (i >= subpass->color_count || subpass->color_attachments[i].attachment == VK_ATTACHMENT_UNUSED) {
2043 radeon_set_context_reg(cmd_buffer->cs, R_028C70_CB_COLOR0_INFO + i * 0x3C,
2044 S_028C70_FORMAT(V_028C70_COLOR_INVALID));
2045 continue;
2046 }
2047
2048 int idx = subpass->color_attachments[i].attachment;
2049 struct radv_image_view *iview = cmd_buffer->state.attachments[idx].iview;
2050 VkImageLayout layout = subpass->color_attachments[i].layout;
2051 bool in_render_loop = subpass->color_attachments[i].in_render_loop;
2052
2053 radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, iview->bo);
2054
2055 assert(iview->aspect_mask & (VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_PLANE_0_BIT |
2056 VK_IMAGE_ASPECT_PLANE_1_BIT | VK_IMAGE_ASPECT_PLANE_2_BIT));
2057 radv_emit_fb_color_state(cmd_buffer, i, &cmd_buffer->state.attachments[idx].cb, iview, layout, in_render_loop);
2058
2059 radv_load_color_clear_metadata(cmd_buffer, iview, i);
2060 }
2061
2062 if (subpass->depth_stencil_attachment) {
2063 int idx = subpass->depth_stencil_attachment->attachment;
2064 VkImageLayout layout = subpass->depth_stencil_attachment->layout;
2065 bool in_render_loop = subpass->depth_stencil_attachment->in_render_loop;
2066 struct radv_image_view *iview = cmd_buffer->state.attachments[idx].iview;
2067 radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, cmd_buffer->state.attachments[idx].iview->bo);
2068
2069 radv_emit_fb_ds_state(cmd_buffer, &cmd_buffer->state.attachments[idx].ds, iview, layout, in_render_loop);
2070
2071 if (cmd_buffer->state.attachments[idx].ds.offset_scale != cmd_buffer->state.offset_scale) {
2072 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS;
2073 cmd_buffer->state.offset_scale = cmd_buffer->state.attachments[idx].ds.offset_scale;
2074 }
2075 radv_load_ds_clear_metadata(cmd_buffer, iview);
2076 } else {
2077 if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX9)
2078 radeon_set_context_reg_seq(cmd_buffer->cs, R_028038_DB_Z_INFO, 2);
2079 else
2080 radeon_set_context_reg_seq(cmd_buffer->cs, R_028040_DB_Z_INFO, 2);
2081
2082 radeon_emit(cmd_buffer->cs, S_028040_FORMAT(V_028040_Z_INVALID)); /* DB_Z_INFO */
2083 radeon_emit(cmd_buffer->cs, S_028044_FORMAT(V_028044_STENCIL_INVALID)); /* DB_STENCIL_INFO */
2084 }
2085 radeon_set_context_reg(cmd_buffer->cs, R_028208_PA_SC_WINDOW_SCISSOR_BR,
2086 S_028208_BR_X(framebuffer->width) |
2087 S_028208_BR_Y(framebuffer->height));
2088
2089 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX8) {
2090 bool disable_constant_encode =
2091 cmd_buffer->device->physical_device->rad_info.has_dcc_constant_encode;
2092 enum chip_class chip_class =
2093 cmd_buffer->device->physical_device->rad_info.chip_class;
2094 uint8_t watermark = chip_class >= GFX10 ? 6 : 4;
2095
2096 radeon_set_context_reg(cmd_buffer->cs, R_028424_CB_DCC_CONTROL,
2097 S_028424_OVERWRITE_COMBINER_MRT_SHARING_DISABLE(chip_class <= GFX9) |
2098 S_028424_OVERWRITE_COMBINER_WATERMARK(watermark) |
2099 S_028424_DISABLE_CONSTANT_ENCODE_REG(disable_constant_encode));
2100 }
2101
2102 if (cmd_buffer->device->dfsm_allowed) {
2103 radeon_emit(cmd_buffer->cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
2104 radeon_emit(cmd_buffer->cs, EVENT_TYPE(V_028A90_BREAK_BATCH) | EVENT_INDEX(0));
2105 }
2106
2107 cmd_buffer->state.dirty &= ~RADV_CMD_DIRTY_FRAMEBUFFER;
2108 }
2109
2110 static void
2111 radv_emit_index_buffer(struct radv_cmd_buffer *cmd_buffer, bool indirect)
2112 {
2113 struct radeon_cmdbuf *cs = cmd_buffer->cs;
2114 struct radv_cmd_state *state = &cmd_buffer->state;
2115
2116 if (state->index_type != state->last_index_type) {
2117 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
2118 radeon_set_uconfig_reg_idx(cmd_buffer->device->physical_device,
2119 cs, R_03090C_VGT_INDEX_TYPE,
2120 2, state->index_type);
2121 } else {
2122 radeon_emit(cs, PKT3(PKT3_INDEX_TYPE, 0, 0));
2123 radeon_emit(cs, state->index_type);
2124 }
2125
2126 state->last_index_type = state->index_type;
2127 }
2128
2129 /* For the direct indexed draws we use DRAW_INDEX_2, which includes
2130 * the index_va and max_index_count already. */
2131 if (!indirect)
2132 return;
2133
2134 radeon_emit(cs, PKT3(PKT3_INDEX_BASE, 1, 0));
2135 radeon_emit(cs, state->index_va);
2136 radeon_emit(cs, state->index_va >> 32);
2137
2138 radeon_emit(cs, PKT3(PKT3_INDEX_BUFFER_SIZE, 0, 0));
2139 radeon_emit(cs, state->max_index_count);
2140
2141 cmd_buffer->state.dirty &= ~RADV_CMD_DIRTY_INDEX_BUFFER;
2142 }
2143
2144 void radv_set_db_count_control(struct radv_cmd_buffer *cmd_buffer)
2145 {
2146 bool has_perfect_queries = cmd_buffer->state.perfect_occlusion_queries_enabled;
2147 struct radv_pipeline *pipeline = cmd_buffer->state.pipeline;
2148 uint32_t pa_sc_mode_cntl_1 =
2149 pipeline ? pipeline->graphics.ms.pa_sc_mode_cntl_1 : 0;
2150 uint32_t db_count_control;
2151
2152 if(!cmd_buffer->state.active_occlusion_queries) {
2153 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX7) {
2154 if (G_028A4C_OUT_OF_ORDER_PRIMITIVE_ENABLE(pa_sc_mode_cntl_1) &&
2155 pipeline->graphics.disable_out_of_order_rast_for_occlusion &&
2156 has_perfect_queries) {
2157 /* Re-enable out-of-order rasterization if the
2158 * bound pipeline supports it and if it's has
2159 * been disabled before starting any perfect
2160 * occlusion queries.
2161 */
2162 radeon_set_context_reg(cmd_buffer->cs,
2163 R_028A4C_PA_SC_MODE_CNTL_1,
2164 pa_sc_mode_cntl_1);
2165 }
2166 }
2167 db_count_control = S_028004_ZPASS_INCREMENT_DISABLE(1);
2168 } else {
2169 const struct radv_subpass *subpass = cmd_buffer->state.subpass;
2170 uint32_t sample_rate = subpass ? util_logbase2(subpass->max_sample_count) : 0;
2171 bool gfx10_perfect = cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10 && has_perfect_queries;
2172
2173 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX7) {
2174 /* Always enable PERFECT_ZPASS_COUNTS due to issues with partially
2175 * covered tiles, discards, and early depth testing. For more details,
2176 * see https://gitlab.freedesktop.org/mesa/mesa/-/issues/3218 */
2177 db_count_control =
2178 S_028004_PERFECT_ZPASS_COUNTS(1) |
2179 S_028004_DISABLE_CONSERVATIVE_ZPASS_COUNTS(gfx10_perfect) |
2180 S_028004_SAMPLE_RATE(sample_rate) |
2181 S_028004_ZPASS_ENABLE(1) |
2182 S_028004_SLICE_EVEN_ENABLE(1) |
2183 S_028004_SLICE_ODD_ENABLE(1);
2184
2185 if (G_028A4C_OUT_OF_ORDER_PRIMITIVE_ENABLE(pa_sc_mode_cntl_1) &&
2186 pipeline->graphics.disable_out_of_order_rast_for_occlusion &&
2187 has_perfect_queries) {
2188 /* If the bound pipeline has enabled
2189 * out-of-order rasterization, we should
2190 * disable it before starting any perfect
2191 * occlusion queries.
2192 */
2193 pa_sc_mode_cntl_1 &= C_028A4C_OUT_OF_ORDER_PRIMITIVE_ENABLE;
2194
2195 radeon_set_context_reg(cmd_buffer->cs,
2196 R_028A4C_PA_SC_MODE_CNTL_1,
2197 pa_sc_mode_cntl_1);
2198 }
2199 } else {
2200 db_count_control = S_028004_PERFECT_ZPASS_COUNTS(1) |
2201 S_028004_SAMPLE_RATE(sample_rate);
2202 }
2203 }
2204
2205 radeon_set_context_reg(cmd_buffer->cs, R_028004_DB_COUNT_CONTROL, db_count_control);
2206
2207 cmd_buffer->state.context_roll_without_scissor_emitted = true;
2208 }
2209
2210 static void
2211 radv_cmd_buffer_flush_dynamic_state(struct radv_cmd_buffer *cmd_buffer)
2212 {
2213 uint32_t states = cmd_buffer->state.dirty & cmd_buffer->state.emitted_pipeline->graphics.needed_dynamic_state;
2214
2215 if (states & (RADV_CMD_DIRTY_DYNAMIC_VIEWPORT))
2216 radv_emit_viewport(cmd_buffer);
2217
2218 if (states & (RADV_CMD_DIRTY_DYNAMIC_SCISSOR | RADV_CMD_DIRTY_DYNAMIC_VIEWPORT) &&
2219 !cmd_buffer->device->physical_device->rad_info.has_gfx9_scissor_bug)
2220 radv_emit_scissor(cmd_buffer);
2221
2222 if (states & RADV_CMD_DIRTY_DYNAMIC_LINE_WIDTH)
2223 radv_emit_line_width(cmd_buffer);
2224
2225 if (states & RADV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS)
2226 radv_emit_blend_constants(cmd_buffer);
2227
2228 if (states & (RADV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE |
2229 RADV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK |
2230 RADV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK))
2231 radv_emit_stencil(cmd_buffer);
2232
2233 if (states & RADV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS)
2234 radv_emit_depth_bounds(cmd_buffer);
2235
2236 if (states & RADV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS)
2237 radv_emit_depth_bias(cmd_buffer);
2238
2239 if (states & RADV_CMD_DIRTY_DYNAMIC_DISCARD_RECTANGLE)
2240 radv_emit_discard_rectangle(cmd_buffer);
2241
2242 if (states & RADV_CMD_DIRTY_DYNAMIC_SAMPLE_LOCATIONS)
2243 radv_emit_sample_locations(cmd_buffer);
2244
2245 if (states & RADV_CMD_DIRTY_DYNAMIC_LINE_STIPPLE)
2246 radv_emit_line_stipple(cmd_buffer);
2247
2248 cmd_buffer->state.dirty &= ~states;
2249 }
2250
2251 static void
2252 radv_flush_push_descriptors(struct radv_cmd_buffer *cmd_buffer,
2253 VkPipelineBindPoint bind_point)
2254 {
2255 struct radv_descriptor_state *descriptors_state =
2256 radv_get_descriptors_state(cmd_buffer, bind_point);
2257 struct radv_descriptor_set *set = &descriptors_state->push_set.set;
2258 unsigned bo_offset;
2259
2260 if (!radv_cmd_buffer_upload_data(cmd_buffer, set->size, 32,
2261 set->mapped_ptr,
2262 &bo_offset))
2263 return;
2264
2265 set->va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
2266 set->va += bo_offset;
2267 }
2268
2269 static void
2270 radv_flush_indirect_descriptor_sets(struct radv_cmd_buffer *cmd_buffer,
2271 VkPipelineBindPoint bind_point)
2272 {
2273 struct radv_descriptor_state *descriptors_state =
2274 radv_get_descriptors_state(cmd_buffer, bind_point);
2275 uint32_t size = MAX_SETS * 4;
2276 uint32_t offset;
2277 void *ptr;
2278
2279 if (!radv_cmd_buffer_upload_alloc(cmd_buffer, size,
2280 256, &offset, &ptr))
2281 return;
2282
2283 for (unsigned i = 0; i < MAX_SETS; i++) {
2284 uint32_t *uptr = ((uint32_t *)ptr) + i;
2285 uint64_t set_va = 0;
2286 struct radv_descriptor_set *set = descriptors_state->sets[i];
2287 if (descriptors_state->valid & (1u << i))
2288 set_va = set->va;
2289 uptr[0] = set_va & 0xffffffff;
2290 }
2291
2292 uint64_t va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
2293 va += offset;
2294
2295 if (cmd_buffer->state.pipeline) {
2296 if (cmd_buffer->state.pipeline->shaders[MESA_SHADER_VERTEX])
2297 radv_emit_userdata_address(cmd_buffer, cmd_buffer->state.pipeline, MESA_SHADER_VERTEX,
2298 AC_UD_INDIRECT_DESCRIPTOR_SETS, va);
2299
2300 if (cmd_buffer->state.pipeline->shaders[MESA_SHADER_FRAGMENT])
2301 radv_emit_userdata_address(cmd_buffer, cmd_buffer->state.pipeline, MESA_SHADER_FRAGMENT,
2302 AC_UD_INDIRECT_DESCRIPTOR_SETS, va);
2303
2304 if (radv_pipeline_has_gs(cmd_buffer->state.pipeline))
2305 radv_emit_userdata_address(cmd_buffer, cmd_buffer->state.pipeline, MESA_SHADER_GEOMETRY,
2306 AC_UD_INDIRECT_DESCRIPTOR_SETS, va);
2307
2308 if (radv_pipeline_has_tess(cmd_buffer->state.pipeline))
2309 radv_emit_userdata_address(cmd_buffer, cmd_buffer->state.pipeline, MESA_SHADER_TESS_CTRL,
2310 AC_UD_INDIRECT_DESCRIPTOR_SETS, va);
2311
2312 if (radv_pipeline_has_tess(cmd_buffer->state.pipeline))
2313 radv_emit_userdata_address(cmd_buffer, cmd_buffer->state.pipeline, MESA_SHADER_TESS_EVAL,
2314 AC_UD_INDIRECT_DESCRIPTOR_SETS, va);
2315 }
2316
2317 if (cmd_buffer->state.compute_pipeline)
2318 radv_emit_userdata_address(cmd_buffer, cmd_buffer->state.compute_pipeline, MESA_SHADER_COMPUTE,
2319 AC_UD_INDIRECT_DESCRIPTOR_SETS, va);
2320 }
2321
2322 static void
2323 radv_flush_descriptors(struct radv_cmd_buffer *cmd_buffer,
2324 VkShaderStageFlags stages)
2325 {
2326 VkPipelineBindPoint bind_point = stages & VK_SHADER_STAGE_COMPUTE_BIT ?
2327 VK_PIPELINE_BIND_POINT_COMPUTE :
2328 VK_PIPELINE_BIND_POINT_GRAPHICS;
2329 struct radv_descriptor_state *descriptors_state =
2330 radv_get_descriptors_state(cmd_buffer, bind_point);
2331 struct radv_cmd_state *state = &cmd_buffer->state;
2332 bool flush_indirect_descriptors;
2333
2334 if (!descriptors_state->dirty)
2335 return;
2336
2337 if (descriptors_state->push_dirty)
2338 radv_flush_push_descriptors(cmd_buffer, bind_point);
2339
2340 flush_indirect_descriptors =
2341 (bind_point == VK_PIPELINE_BIND_POINT_GRAPHICS &&
2342 state->pipeline && state->pipeline->need_indirect_descriptor_sets) ||
2343 (bind_point == VK_PIPELINE_BIND_POINT_COMPUTE &&
2344 state->compute_pipeline && state->compute_pipeline->need_indirect_descriptor_sets);
2345
2346 if (flush_indirect_descriptors)
2347 radv_flush_indirect_descriptor_sets(cmd_buffer, bind_point);
2348
2349 ASSERTED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
2350 cmd_buffer->cs,
2351 MAX_SETS * MESA_SHADER_STAGES * 4);
2352
2353 if (cmd_buffer->state.pipeline) {
2354 radv_foreach_stage(stage, stages) {
2355 if (!cmd_buffer->state.pipeline->shaders[stage])
2356 continue;
2357
2358 radv_emit_descriptor_pointers(cmd_buffer,
2359 cmd_buffer->state.pipeline,
2360 descriptors_state, stage);
2361 }
2362 }
2363
2364 if (cmd_buffer->state.compute_pipeline &&
2365 (stages & VK_SHADER_STAGE_COMPUTE_BIT)) {
2366 radv_emit_descriptor_pointers(cmd_buffer,
2367 cmd_buffer->state.compute_pipeline,
2368 descriptors_state,
2369 MESA_SHADER_COMPUTE);
2370 }
2371
2372 descriptors_state->dirty = 0;
2373 descriptors_state->push_dirty = false;
2374
2375 assert(cmd_buffer->cs->cdw <= cdw_max);
2376
2377 if (unlikely(cmd_buffer->device->trace_bo))
2378 radv_save_descriptors(cmd_buffer, bind_point);
2379 }
2380
2381 static void
2382 radv_flush_constants(struct radv_cmd_buffer *cmd_buffer,
2383 VkShaderStageFlags stages)
2384 {
2385 struct radv_pipeline *pipeline = stages & VK_SHADER_STAGE_COMPUTE_BIT
2386 ? cmd_buffer->state.compute_pipeline
2387 : cmd_buffer->state.pipeline;
2388 VkPipelineBindPoint bind_point = stages & VK_SHADER_STAGE_COMPUTE_BIT ?
2389 VK_PIPELINE_BIND_POINT_COMPUTE :
2390 VK_PIPELINE_BIND_POINT_GRAPHICS;
2391 struct radv_descriptor_state *descriptors_state =
2392 radv_get_descriptors_state(cmd_buffer, bind_point);
2393 struct radv_pipeline_layout *layout = pipeline->layout;
2394 struct radv_shader_variant *shader, *prev_shader;
2395 bool need_push_constants = false;
2396 unsigned offset;
2397 void *ptr;
2398 uint64_t va;
2399
2400 stages &= cmd_buffer->push_constant_stages;
2401 if (!stages ||
2402 (!layout->push_constant_size && !layout->dynamic_offset_count))
2403 return;
2404
2405 radv_foreach_stage(stage, stages) {
2406 shader = radv_get_shader(pipeline, stage);
2407 if (!shader)
2408 continue;
2409
2410 need_push_constants |= shader->info.loads_push_constants;
2411 need_push_constants |= shader->info.loads_dynamic_offsets;
2412
2413 uint8_t base = shader->info.base_inline_push_consts;
2414 uint8_t count = shader->info.num_inline_push_consts;
2415
2416 radv_emit_inline_push_consts(cmd_buffer, pipeline, stage,
2417 AC_UD_INLINE_PUSH_CONSTANTS,
2418 count,
2419 (uint32_t *)&cmd_buffer->push_constants[base * 4]);
2420 }
2421
2422 if (need_push_constants) {
2423 if (!radv_cmd_buffer_upload_alloc(cmd_buffer, layout->push_constant_size +
2424 16 * layout->dynamic_offset_count,
2425 256, &offset, &ptr))
2426 return;
2427
2428 memcpy(ptr, cmd_buffer->push_constants, layout->push_constant_size);
2429 memcpy((char*)ptr + layout->push_constant_size,
2430 descriptors_state->dynamic_buffers,
2431 16 * layout->dynamic_offset_count);
2432
2433 va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
2434 va += offset;
2435
2436 ASSERTED unsigned cdw_max =
2437 radeon_check_space(cmd_buffer->device->ws,
2438 cmd_buffer->cs, MESA_SHADER_STAGES * 4);
2439
2440 prev_shader = NULL;
2441 radv_foreach_stage(stage, stages) {
2442 shader = radv_get_shader(pipeline, stage);
2443
2444 /* Avoid redundantly emitting the address for merged stages. */
2445 if (shader && shader != prev_shader) {
2446 radv_emit_userdata_address(cmd_buffer, pipeline, stage,
2447 AC_UD_PUSH_CONSTANTS, va);
2448
2449 prev_shader = shader;
2450 }
2451 }
2452 assert(cmd_buffer->cs->cdw <= cdw_max);
2453 }
2454
2455 cmd_buffer->push_constant_stages &= ~stages;
2456 }
2457
2458 static void
2459 radv_flush_vertex_descriptors(struct radv_cmd_buffer *cmd_buffer,
2460 bool pipeline_is_dirty)
2461 {
2462 if ((pipeline_is_dirty ||
2463 (cmd_buffer->state.dirty & RADV_CMD_DIRTY_VERTEX_BUFFER)) &&
2464 cmd_buffer->state.pipeline->num_vertex_bindings &&
2465 radv_get_shader(cmd_buffer->state.pipeline, MESA_SHADER_VERTEX)->info.vs.has_vertex_buffers) {
2466 unsigned vb_offset;
2467 void *vb_ptr;
2468 uint32_t i = 0;
2469 uint32_t count = cmd_buffer->state.pipeline->num_vertex_bindings;
2470 uint64_t va;
2471
2472 /* allocate some descriptor state for vertex buffers */
2473 if (!radv_cmd_buffer_upload_alloc(cmd_buffer, count * 16, 256,
2474 &vb_offset, &vb_ptr))
2475 return;
2476
2477 for (i = 0; i < count; i++) {
2478 uint32_t *desc = &((uint32_t *)vb_ptr)[i * 4];
2479 uint32_t offset;
2480 struct radv_buffer *buffer = cmd_buffer->vertex_bindings[i].buffer;
2481 uint32_t stride = cmd_buffer->state.pipeline->binding_stride[i];
2482 unsigned num_records;
2483
2484 if (!buffer)
2485 continue;
2486
2487 va = radv_buffer_get_va(buffer->bo);
2488
2489 offset = cmd_buffer->vertex_bindings[i].offset;
2490 va += offset + buffer->offset;
2491
2492 num_records = buffer->size - offset;
2493 if (cmd_buffer->device->physical_device->rad_info.chip_class != GFX8 && stride)
2494 num_records /= stride;
2495
2496 desc[0] = va;
2497 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) | S_008F04_STRIDE(stride);
2498 desc[2] = num_records;
2499 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
2500 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
2501 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
2502 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);
2503
2504 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10) {
2505 /* OOB_SELECT chooses the out-of-bounds check:
2506 * - 1: index >= NUM_RECORDS (Structured)
2507 * - 3: offset >= NUM_RECORDS (Raw)
2508 */
2509 int oob_select = stride ? V_008F0C_OOB_SELECT_STRUCTURED : V_008F0C_OOB_SELECT_RAW;
2510
2511 desc[3] |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_UINT) |
2512 S_008F0C_OOB_SELECT(oob_select) |
2513 S_008F0C_RESOURCE_LEVEL(1);
2514 } else {
2515 desc[3] |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_UINT) |
2516 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
2517 }
2518 }
2519
2520 va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
2521 va += vb_offset;
2522
2523 radv_emit_userdata_address(cmd_buffer, cmd_buffer->state.pipeline, MESA_SHADER_VERTEX,
2524 AC_UD_VS_VERTEX_BUFFERS, va);
2525
2526 cmd_buffer->state.vb_va = va;
2527 cmd_buffer->state.vb_size = count * 16;
2528 cmd_buffer->state.prefetch_L2_mask |= RADV_PREFETCH_VBO_DESCRIPTORS;
2529 }
2530 cmd_buffer->state.dirty &= ~RADV_CMD_DIRTY_VERTEX_BUFFER;
2531 }
2532
2533 static void
2534 radv_emit_streamout_buffers(struct radv_cmd_buffer *cmd_buffer, uint64_t va)
2535 {
2536 struct radv_pipeline *pipeline = cmd_buffer->state.pipeline;
2537 struct radv_userdata_info *loc;
2538 uint32_t base_reg;
2539
2540 for (unsigned stage = 0; stage < MESA_SHADER_STAGES; ++stage) {
2541 if (!radv_get_shader(pipeline, stage))
2542 continue;
2543
2544 loc = radv_lookup_user_sgpr(pipeline, stage,
2545 AC_UD_STREAMOUT_BUFFERS);
2546 if (loc->sgpr_idx == -1)
2547 continue;
2548
2549 base_reg = pipeline->user_data_0[stage];
2550
2551 radv_emit_shader_pointer(cmd_buffer->device, cmd_buffer->cs,
2552 base_reg + loc->sgpr_idx * 4, va, false);
2553 }
2554
2555 if (radv_pipeline_has_gs_copy_shader(pipeline)) {
2556 loc = &pipeline->gs_copy_shader->info.user_sgprs_locs.shader_data[AC_UD_STREAMOUT_BUFFERS];
2557 if (loc->sgpr_idx != -1) {
2558 base_reg = R_00B130_SPI_SHADER_USER_DATA_VS_0;
2559
2560 radv_emit_shader_pointer(cmd_buffer->device, cmd_buffer->cs,
2561 base_reg + loc->sgpr_idx * 4, va, false);
2562 }
2563 }
2564 }
2565
2566 static void
2567 radv_flush_streamout_descriptors(struct radv_cmd_buffer *cmd_buffer)
2568 {
2569 if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_STREAMOUT_BUFFER) {
2570 struct radv_streamout_binding *sb = cmd_buffer->streamout_bindings;
2571 struct radv_streamout_state *so = &cmd_buffer->state.streamout;
2572 unsigned so_offset;
2573 void *so_ptr;
2574 uint64_t va;
2575
2576 /* Allocate some descriptor state for streamout buffers. */
2577 if (!radv_cmd_buffer_upload_alloc(cmd_buffer,
2578 MAX_SO_BUFFERS * 16, 256,
2579 &so_offset, &so_ptr))
2580 return;
2581
2582 for (uint32_t i = 0; i < MAX_SO_BUFFERS; i++) {
2583 struct radv_buffer *buffer = sb[i].buffer;
2584 uint32_t *desc = &((uint32_t *)so_ptr)[i * 4];
2585
2586 if (!(so->enabled_mask & (1 << i)))
2587 continue;
2588
2589 va = radv_buffer_get_va(buffer->bo) + buffer->offset;
2590
2591 va += sb[i].offset;
2592
2593 /* Set the descriptor.
2594 *
2595 * On GFX8, the format must be non-INVALID, otherwise
2596 * the buffer will be considered not bound and store
2597 * instructions will be no-ops.
2598 */
2599 uint32_t size = 0xffffffff;
2600
2601 /* Compute the correct buffer size for NGG streamout
2602 * because it's used to determine the max emit per
2603 * buffer.
2604 */
2605 if (cmd_buffer->device->physical_device->use_ngg_streamout)
2606 size = buffer->size - sb[i].offset;
2607
2608 desc[0] = va;
2609 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32);
2610 desc[2] = size;
2611 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
2612 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
2613 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
2614 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);
2615
2616 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10) {
2617 desc[3] |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_FLOAT) |
2618 S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_RAW) |
2619 S_008F0C_RESOURCE_LEVEL(1);
2620 } else {
2621 desc[3] |= S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
2622 }
2623 }
2624
2625 va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
2626 va += so_offset;
2627
2628 radv_emit_streamout_buffers(cmd_buffer, va);
2629 }
2630
2631 cmd_buffer->state.dirty &= ~RADV_CMD_DIRTY_STREAMOUT_BUFFER;
2632 }
2633
2634 static void
2635 radv_flush_ngg_gs_state(struct radv_cmd_buffer *cmd_buffer)
2636 {
2637 struct radv_pipeline *pipeline = cmd_buffer->state.pipeline;
2638 struct radv_userdata_info *loc;
2639 uint32_t ngg_gs_state = 0;
2640 uint32_t base_reg;
2641
2642 if (!radv_pipeline_has_gs(pipeline) ||
2643 !radv_pipeline_has_ngg(pipeline))
2644 return;
2645
2646 /* By default NGG GS queries are disabled but they are enabled if the
2647 * command buffer has active GDS queries or if it's a secondary command
2648 * buffer that inherits the number of generated primitives.
2649 */
2650 if (cmd_buffer->state.active_pipeline_gds_queries ||
2651 (cmd_buffer->state.inherited_pipeline_statistics & VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_PRIMITIVES_BIT))
2652 ngg_gs_state = 1;
2653
2654 loc = radv_lookup_user_sgpr(pipeline, MESA_SHADER_GEOMETRY,
2655 AC_UD_NGG_GS_STATE);
2656 base_reg = pipeline->user_data_0[MESA_SHADER_GEOMETRY];
2657 assert(loc->sgpr_idx != -1);
2658
2659 radeon_set_sh_reg(cmd_buffer->cs, base_reg + loc->sgpr_idx * 4,
2660 ngg_gs_state);
2661 }
2662
2663 static void
2664 radv_upload_graphics_shader_descriptors(struct radv_cmd_buffer *cmd_buffer, bool pipeline_is_dirty)
2665 {
2666 radv_flush_vertex_descriptors(cmd_buffer, pipeline_is_dirty);
2667 radv_flush_streamout_descriptors(cmd_buffer);
2668 radv_flush_descriptors(cmd_buffer, VK_SHADER_STAGE_ALL_GRAPHICS);
2669 radv_flush_constants(cmd_buffer, VK_SHADER_STAGE_ALL_GRAPHICS);
2670 radv_flush_ngg_gs_state(cmd_buffer);
2671 }
2672
2673 struct radv_draw_info {
2674 /**
2675 * Number of vertices.
2676 */
2677 uint32_t count;
2678
2679 /**
2680 * Index of the first vertex.
2681 */
2682 int32_t vertex_offset;
2683
2684 /**
2685 * First instance id.
2686 */
2687 uint32_t first_instance;
2688
2689 /**
2690 * Number of instances.
2691 */
2692 uint32_t instance_count;
2693
2694 /**
2695 * First index (indexed draws only).
2696 */
2697 uint32_t first_index;
2698
2699 /**
2700 * Whether it's an indexed draw.
2701 */
2702 bool indexed;
2703
2704 /**
2705 * Indirect draw parameters resource.
2706 */
2707 struct radv_buffer *indirect;
2708 uint64_t indirect_offset;
2709 uint32_t stride;
2710
2711 /**
2712 * Draw count parameters resource.
2713 */
2714 struct radv_buffer *count_buffer;
2715 uint64_t count_buffer_offset;
2716
2717 /**
2718 * Stream output parameters resource.
2719 */
2720 struct radv_buffer *strmout_buffer;
2721 uint64_t strmout_buffer_offset;
2722 };
2723
2724 static uint32_t
2725 radv_get_primitive_reset_index(struct radv_cmd_buffer *cmd_buffer)
2726 {
2727 switch (cmd_buffer->state.index_type) {
2728 case V_028A7C_VGT_INDEX_8:
2729 return 0xffu;
2730 case V_028A7C_VGT_INDEX_16:
2731 return 0xffffu;
2732 case V_028A7C_VGT_INDEX_32:
2733 return 0xffffffffu;
2734 default:
2735 unreachable("invalid index type");
2736 }
2737 }
2738
2739 static void
2740 si_emit_ia_multi_vgt_param(struct radv_cmd_buffer *cmd_buffer,
2741 bool instanced_draw, bool indirect_draw,
2742 bool count_from_stream_output,
2743 uint32_t draw_vertex_count)
2744 {
2745 struct radeon_info *info = &cmd_buffer->device->physical_device->rad_info;
2746 struct radv_cmd_state *state = &cmd_buffer->state;
2747 struct radeon_cmdbuf *cs = cmd_buffer->cs;
2748 unsigned ia_multi_vgt_param;
2749
2750 ia_multi_vgt_param =
2751 si_get_ia_multi_vgt_param(cmd_buffer, instanced_draw,
2752 indirect_draw,
2753 count_from_stream_output,
2754 draw_vertex_count);
2755
2756 if (state->last_ia_multi_vgt_param != ia_multi_vgt_param) {
2757 if (info->chip_class == GFX9) {
2758 radeon_set_uconfig_reg_idx(cmd_buffer->device->physical_device,
2759 cs,
2760 R_030960_IA_MULTI_VGT_PARAM,
2761 4, ia_multi_vgt_param);
2762 } else if (info->chip_class >= GFX7) {
2763 radeon_set_context_reg_idx(cs,
2764 R_028AA8_IA_MULTI_VGT_PARAM,
2765 1, ia_multi_vgt_param);
2766 } else {
2767 radeon_set_context_reg(cs, R_028AA8_IA_MULTI_VGT_PARAM,
2768 ia_multi_vgt_param);
2769 }
2770 state->last_ia_multi_vgt_param = ia_multi_vgt_param;
2771 }
2772 }
2773
2774 static void
2775 radv_emit_draw_registers(struct radv_cmd_buffer *cmd_buffer,
2776 const struct radv_draw_info *draw_info)
2777 {
2778 struct radeon_info *info = &cmd_buffer->device->physical_device->rad_info;
2779 struct radv_cmd_state *state = &cmd_buffer->state;
2780 struct radeon_cmdbuf *cs = cmd_buffer->cs;
2781 int32_t primitive_reset_en;
2782
2783 /* Draw state. */
2784 if (info->chip_class < GFX10) {
2785 si_emit_ia_multi_vgt_param(cmd_buffer, draw_info->instance_count > 1,
2786 draw_info->indirect,
2787 !!draw_info->strmout_buffer,
2788 draw_info->indirect ? 0 : draw_info->count);
2789 }
2790
2791 /* Primitive restart. */
2792 primitive_reset_en =
2793 draw_info->indexed && state->pipeline->graphics.prim_restart_enable;
2794
2795 if (primitive_reset_en != state->last_primitive_reset_en) {
2796 state->last_primitive_reset_en = primitive_reset_en;
2797 if (info->chip_class >= GFX9) {
2798 radeon_set_uconfig_reg(cs,
2799 R_03092C_VGT_MULTI_PRIM_IB_RESET_EN,
2800 primitive_reset_en);
2801 } else {
2802 radeon_set_context_reg(cs,
2803 R_028A94_VGT_MULTI_PRIM_IB_RESET_EN,
2804 primitive_reset_en);
2805 }
2806 }
2807
2808 if (primitive_reset_en) {
2809 uint32_t primitive_reset_index =
2810 radv_get_primitive_reset_index(cmd_buffer);
2811
2812 if (primitive_reset_index != state->last_primitive_reset_index) {
2813 radeon_set_context_reg(cs,
2814 R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX,
2815 primitive_reset_index);
2816 state->last_primitive_reset_index = primitive_reset_index;
2817 }
2818 }
2819
2820 if (draw_info->strmout_buffer) {
2821 uint64_t va = radv_buffer_get_va(draw_info->strmout_buffer->bo);
2822
2823 va += draw_info->strmout_buffer->offset +
2824 draw_info->strmout_buffer_offset;
2825
2826 radeon_set_context_reg(cs, R_028B30_VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE,
2827 draw_info->stride);
2828
2829 radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
2830 radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_SRC_MEM) |
2831 COPY_DATA_DST_SEL(COPY_DATA_REG) |
2832 COPY_DATA_WR_CONFIRM);
2833 radeon_emit(cs, va);
2834 radeon_emit(cs, va >> 32);
2835 radeon_emit(cs, R_028B2C_VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE >> 2);
2836 radeon_emit(cs, 0); /* unused */
2837
2838 radv_cs_add_buffer(cmd_buffer->device->ws, cs, draw_info->strmout_buffer->bo);
2839 }
2840 }
2841
2842 static void radv_stage_flush(struct radv_cmd_buffer *cmd_buffer,
2843 VkPipelineStageFlags src_stage_mask)
2844 {
2845 if (src_stage_mask & (VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT |
2846 VK_PIPELINE_STAGE_TRANSFER_BIT |
2847 VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT |
2848 VK_PIPELINE_STAGE_ALL_COMMANDS_BIT)) {
2849 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_CS_PARTIAL_FLUSH;
2850 }
2851
2852 if (src_stage_mask & (VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
2853 VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT |
2854 VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT |
2855 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT |
2856 VK_PIPELINE_STAGE_TRANSFER_BIT |
2857 VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT |
2858 VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT |
2859 VK_PIPELINE_STAGE_ALL_COMMANDS_BIT)) {
2860 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_PS_PARTIAL_FLUSH;
2861 } else if (src_stage_mask & (VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT |
2862 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT |
2863 VK_PIPELINE_STAGE_VERTEX_SHADER_BIT |
2864 VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
2865 VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT |
2866 VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
2867 VK_PIPELINE_STAGE_TRANSFORM_FEEDBACK_BIT_EXT)) {
2868 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_VS_PARTIAL_FLUSH;
2869 }
2870 }
2871
2872 static enum radv_cmd_flush_bits
2873 radv_src_access_flush(struct radv_cmd_buffer *cmd_buffer,
2874 VkAccessFlags src_flags,
2875 struct radv_image *image)
2876 {
2877 bool flush_CB_meta = true, flush_DB_meta = true;
2878 enum radv_cmd_flush_bits flush_bits = 0;
2879 uint32_t b;
2880
2881 if (image) {
2882 if (!radv_image_has_CB_metadata(image))
2883 flush_CB_meta = false;
2884 if (!radv_image_has_htile(image))
2885 flush_DB_meta = false;
2886 }
2887
2888 for_each_bit(b, src_flags) {
2889 switch ((VkAccessFlagBits)(1 << b)) {
2890 case VK_ACCESS_SHADER_WRITE_BIT:
2891 case VK_ACCESS_TRANSFORM_FEEDBACK_WRITE_BIT_EXT:
2892 case VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT:
2893 flush_bits |= RADV_CMD_FLAG_WB_L2;
2894 break;
2895 case VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT:
2896 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB;
2897 if (flush_CB_meta)
2898 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
2899 break;
2900 case VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT:
2901 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB;
2902 if (flush_DB_meta)
2903 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
2904 break;
2905 case VK_ACCESS_TRANSFER_WRITE_BIT:
2906 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
2907 RADV_CMD_FLAG_FLUSH_AND_INV_DB |
2908 RADV_CMD_FLAG_INV_L2;
2909
2910 if (flush_CB_meta)
2911 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
2912 if (flush_DB_meta)
2913 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
2914 break;
2915 default:
2916 break;
2917 }
2918 }
2919 return flush_bits;
2920 }
2921
2922 static enum radv_cmd_flush_bits
2923 radv_dst_access_flush(struct radv_cmd_buffer *cmd_buffer,
2924 VkAccessFlags dst_flags,
2925 struct radv_image *image)
2926 {
2927 bool flush_CB_meta = true, flush_DB_meta = true;
2928 enum radv_cmd_flush_bits flush_bits = 0;
2929 bool flush_CB = true, flush_DB = true;
2930 bool image_is_coherent = false;
2931 uint32_t b;
2932
2933 if (image) {
2934 if (!(image->usage & VK_IMAGE_USAGE_STORAGE_BIT)) {
2935 flush_CB = false;
2936 flush_DB = false;
2937 }
2938
2939 if (!radv_image_has_CB_metadata(image))
2940 flush_CB_meta = false;
2941 if (!radv_image_has_htile(image))
2942 flush_DB_meta = false;
2943
2944 /* TODO: implement shader coherent for GFX10 */
2945
2946 if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX9) {
2947 if (image->info.samples == 1 &&
2948 (image->usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
2949 VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) &&
2950 !vk_format_is_stencil(image->vk_format)) {
2951 /* Single-sample color and single-sample depth
2952 * (not stencil) are coherent with shaders on
2953 * GFX9.
2954 */
2955 image_is_coherent = true;
2956 }
2957 }
2958 }
2959
2960 for_each_bit(b, dst_flags) {
2961 switch ((VkAccessFlagBits)(1 << b)) {
2962 case VK_ACCESS_INDIRECT_COMMAND_READ_BIT:
2963 case VK_ACCESS_INDEX_READ_BIT:
2964 case VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT:
2965 break;
2966 case VK_ACCESS_UNIFORM_READ_BIT:
2967 flush_bits |= RADV_CMD_FLAG_INV_VCACHE | RADV_CMD_FLAG_INV_SCACHE;
2968 break;
2969 case VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT:
2970 case VK_ACCESS_TRANSFER_READ_BIT:
2971 case VK_ACCESS_INPUT_ATTACHMENT_READ_BIT:
2972 flush_bits |= RADV_CMD_FLAG_INV_VCACHE |
2973 RADV_CMD_FLAG_INV_L2;
2974 break;
2975 case VK_ACCESS_SHADER_READ_BIT:
2976 flush_bits |= RADV_CMD_FLAG_INV_VCACHE;
2977 /* Unlike LLVM, ACO uses SMEM for SSBOs and we have to
2978 * invalidate the scalar cache. */
2979 if (!cmd_buffer->device->physical_device->use_llvm)
2980 flush_bits |= RADV_CMD_FLAG_INV_SCACHE;
2981
2982 if (!image_is_coherent)
2983 flush_bits |= RADV_CMD_FLAG_INV_L2;
2984 break;
2985 case VK_ACCESS_COLOR_ATTACHMENT_READ_BIT:
2986 if (flush_CB)
2987 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB;
2988 if (flush_CB_meta)
2989 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
2990 break;
2991 case VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT:
2992 if (flush_DB)
2993 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB;
2994 if (flush_DB_meta)
2995 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
2996 break;
2997 default:
2998 break;
2999 }
3000 }
3001 return flush_bits;
3002 }
3003
3004 void radv_subpass_barrier(struct radv_cmd_buffer *cmd_buffer,
3005 const struct radv_subpass_barrier *barrier)
3006 {
3007 cmd_buffer->state.flush_bits |= radv_src_access_flush(cmd_buffer, barrier->src_access_mask,
3008 NULL);
3009 radv_stage_flush(cmd_buffer, barrier->src_stage_mask);
3010 cmd_buffer->state.flush_bits |= radv_dst_access_flush(cmd_buffer, barrier->dst_access_mask,
3011 NULL);
3012 }
3013
3014 uint32_t
3015 radv_get_subpass_id(struct radv_cmd_buffer *cmd_buffer)
3016 {
3017 struct radv_cmd_state *state = &cmd_buffer->state;
3018 uint32_t subpass_id = state->subpass - state->pass->subpasses;
3019
3020 /* The id of this subpass shouldn't exceed the number of subpasses in
3021 * this render pass minus 1.
3022 */
3023 assert(subpass_id < state->pass->subpass_count);
3024 return subpass_id;
3025 }
3026
3027 static struct radv_sample_locations_state *
3028 radv_get_attachment_sample_locations(struct radv_cmd_buffer *cmd_buffer,
3029 uint32_t att_idx,
3030 bool begin_subpass)
3031 {
3032 struct radv_cmd_state *state = &cmd_buffer->state;
3033 uint32_t subpass_id = radv_get_subpass_id(cmd_buffer);
3034 struct radv_image_view *view = state->attachments[att_idx].iview;
3035
3036 if (view->image->info.samples == 1)
3037 return NULL;
3038
3039 if (state->pass->attachments[att_idx].first_subpass_idx == subpass_id) {
3040 /* Return the initial sample locations if this is the initial
3041 * layout transition of the given subpass attachemnt.
3042 */
3043 if (state->attachments[att_idx].sample_location.count > 0)
3044 return &state->attachments[att_idx].sample_location;
3045 } else {
3046 /* Otherwise return the subpass sample locations if defined. */
3047 if (state->subpass_sample_locs) {
3048 /* Because the driver sets the current subpass before
3049 * initial layout transitions, we should use the sample
3050 * locations from the previous subpass to avoid an
3051 * off-by-one problem. Otherwise, use the sample
3052 * locations for the current subpass for final layout
3053 * transitions.
3054 */
3055 if (begin_subpass)
3056 subpass_id--;
3057
3058 for (uint32_t i = 0; i < state->num_subpass_sample_locs; i++) {
3059 if (state->subpass_sample_locs[i].subpass_idx == subpass_id)
3060 return &state->subpass_sample_locs[i].sample_location;
3061 }
3062 }
3063 }
3064
3065 return NULL;
3066 }
3067
3068 static void radv_handle_subpass_image_transition(struct radv_cmd_buffer *cmd_buffer,
3069 struct radv_subpass_attachment att,
3070 bool begin_subpass)
3071 {
3072 unsigned idx = att.attachment;
3073 struct radv_image_view *view = cmd_buffer->state.attachments[idx].iview;
3074 struct radv_sample_locations_state *sample_locs;
3075 VkImageSubresourceRange range;
3076 range.aspectMask = view->aspect_mask;
3077 range.baseMipLevel = view->base_mip;
3078 range.levelCount = 1;
3079 range.baseArrayLayer = view->base_layer;
3080 range.layerCount = cmd_buffer->state.framebuffer->layers;
3081
3082 if (cmd_buffer->state.subpass->view_mask) {
3083 /* If the current subpass uses multiview, the driver might have
3084 * performed a fast color/depth clear to the whole image
3085 * (including all layers). To make sure the driver will
3086 * decompress the image correctly (if needed), we have to
3087 * account for the "real" number of layers. If the view mask is
3088 * sparse, this will decompress more layers than needed.
3089 */
3090 range.layerCount = util_last_bit(cmd_buffer->state.subpass->view_mask);
3091 }
3092
3093 /* Get the subpass sample locations for the given attachment, if NULL
3094 * is returned the driver will use the default HW locations.
3095 */
3096 sample_locs = radv_get_attachment_sample_locations(cmd_buffer, idx,
3097 begin_subpass);
3098
3099 /* Determine if the subpass uses separate depth/stencil layouts. */
3100 bool uses_separate_depth_stencil_layouts = false;
3101 if ((cmd_buffer->state.attachments[idx].current_layout !=
3102 cmd_buffer->state.attachments[idx].current_stencil_layout) ||
3103 (att.layout != att.stencil_layout)) {
3104 uses_separate_depth_stencil_layouts = true;
3105 }
3106
3107 /* For separate layouts, perform depth and stencil transitions
3108 * separately.
3109 */
3110 if (uses_separate_depth_stencil_layouts &&
3111 (range.aspectMask == (VK_IMAGE_ASPECT_DEPTH_BIT |
3112 VK_IMAGE_ASPECT_STENCIL_BIT))) {
3113 /* Depth-only transitions. */
3114 range.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
3115 radv_handle_image_transition(cmd_buffer,
3116 view->image,
3117 cmd_buffer->state.attachments[idx].current_layout,
3118 cmd_buffer->state.attachments[idx].current_in_render_loop,
3119 att.layout, att.in_render_loop,
3120 0, 0, &range, sample_locs);
3121
3122 /* Stencil-only transitions. */
3123 range.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
3124 radv_handle_image_transition(cmd_buffer,
3125 view->image,
3126 cmd_buffer->state.attachments[idx].current_stencil_layout,
3127 cmd_buffer->state.attachments[idx].current_in_render_loop,
3128 att.stencil_layout, att.in_render_loop,
3129 0, 0, &range, sample_locs);
3130 } else {
3131 radv_handle_image_transition(cmd_buffer,
3132 view->image,
3133 cmd_buffer->state.attachments[idx].current_layout,
3134 cmd_buffer->state.attachments[idx].current_in_render_loop,
3135 att.layout, att.in_render_loop,
3136 0, 0, &range, sample_locs);
3137 }
3138
3139 cmd_buffer->state.attachments[idx].current_layout = att.layout;
3140 cmd_buffer->state.attachments[idx].current_stencil_layout = att.stencil_layout;
3141 cmd_buffer->state.attachments[idx].current_in_render_loop = att.in_render_loop;
3142
3143
3144 }
3145
3146 void
3147 radv_cmd_buffer_set_subpass(struct radv_cmd_buffer *cmd_buffer,
3148 const struct radv_subpass *subpass)
3149 {
3150 cmd_buffer->state.subpass = subpass;
3151
3152 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_FRAMEBUFFER;
3153 }
3154
3155 static VkResult
3156 radv_cmd_state_setup_sample_locations(struct radv_cmd_buffer *cmd_buffer,
3157 struct radv_render_pass *pass,
3158 const VkRenderPassBeginInfo *info)
3159 {
3160 const struct VkRenderPassSampleLocationsBeginInfoEXT *sample_locs =
3161 vk_find_struct_const(info->pNext,
3162 RENDER_PASS_SAMPLE_LOCATIONS_BEGIN_INFO_EXT);
3163 struct radv_cmd_state *state = &cmd_buffer->state;
3164
3165 if (!sample_locs) {
3166 state->subpass_sample_locs = NULL;
3167 return VK_SUCCESS;
3168 }
3169
3170 for (uint32_t i = 0; i < sample_locs->attachmentInitialSampleLocationsCount; i++) {
3171 const VkAttachmentSampleLocationsEXT *att_sample_locs =
3172 &sample_locs->pAttachmentInitialSampleLocations[i];
3173 uint32_t att_idx = att_sample_locs->attachmentIndex;
3174 struct radv_image *image = cmd_buffer->state.attachments[att_idx].iview->image;
3175
3176 assert(vk_format_is_depth_or_stencil(image->vk_format));
3177
3178 /* From the Vulkan spec 1.1.108:
3179 *
3180 * "If the image referenced by the framebuffer attachment at
3181 * index attachmentIndex was not created with
3182 * VK_IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT
3183 * then the values specified in sampleLocationsInfo are
3184 * ignored."
3185 */
3186 if (!(image->flags & VK_IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT))
3187 continue;
3188
3189 const VkSampleLocationsInfoEXT *sample_locs_info =
3190 &att_sample_locs->sampleLocationsInfo;
3191
3192 state->attachments[att_idx].sample_location.per_pixel =
3193 sample_locs_info->sampleLocationsPerPixel;
3194 state->attachments[att_idx].sample_location.grid_size =
3195 sample_locs_info->sampleLocationGridSize;
3196 state->attachments[att_idx].sample_location.count =
3197 sample_locs_info->sampleLocationsCount;
3198 typed_memcpy(&state->attachments[att_idx].sample_location.locations[0],
3199 sample_locs_info->pSampleLocations,
3200 sample_locs_info->sampleLocationsCount);
3201 }
3202
3203 state->subpass_sample_locs = vk_alloc(&cmd_buffer->pool->alloc,
3204 sample_locs->postSubpassSampleLocationsCount *
3205 sizeof(state->subpass_sample_locs[0]),
3206 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3207 if (state->subpass_sample_locs == NULL) {
3208 cmd_buffer->record_result = VK_ERROR_OUT_OF_HOST_MEMORY;
3209 return cmd_buffer->record_result;
3210 }
3211
3212 state->num_subpass_sample_locs = sample_locs->postSubpassSampleLocationsCount;
3213
3214 for (uint32_t i = 0; i < sample_locs->postSubpassSampleLocationsCount; i++) {
3215 const VkSubpassSampleLocationsEXT *subpass_sample_locs_info =
3216 &sample_locs->pPostSubpassSampleLocations[i];
3217 const VkSampleLocationsInfoEXT *sample_locs_info =
3218 &subpass_sample_locs_info->sampleLocationsInfo;
3219
3220 state->subpass_sample_locs[i].subpass_idx =
3221 subpass_sample_locs_info->subpassIndex;
3222 state->subpass_sample_locs[i].sample_location.per_pixel =
3223 sample_locs_info->sampleLocationsPerPixel;
3224 state->subpass_sample_locs[i].sample_location.grid_size =
3225 sample_locs_info->sampleLocationGridSize;
3226 state->subpass_sample_locs[i].sample_location.count =
3227 sample_locs_info->sampleLocationsCount;
3228 typed_memcpy(&state->subpass_sample_locs[i].sample_location.locations[0],
3229 sample_locs_info->pSampleLocations,
3230 sample_locs_info->sampleLocationsCount);
3231 }
3232
3233 return VK_SUCCESS;
3234 }
3235
3236 static VkResult
3237 radv_cmd_state_setup_attachments(struct radv_cmd_buffer *cmd_buffer,
3238 struct radv_render_pass *pass,
3239 const VkRenderPassBeginInfo *info)
3240 {
3241 struct radv_cmd_state *state = &cmd_buffer->state;
3242 const struct VkRenderPassAttachmentBeginInfo *attachment_info = NULL;
3243
3244 if (info) {
3245 attachment_info = vk_find_struct_const(info->pNext,
3246 RENDER_PASS_ATTACHMENT_BEGIN_INFO);
3247 }
3248
3249
3250 if (pass->attachment_count == 0) {
3251 state->attachments = NULL;
3252 return VK_SUCCESS;
3253 }
3254
3255 state->attachments = vk_alloc(&cmd_buffer->pool->alloc,
3256 pass->attachment_count *
3257 sizeof(state->attachments[0]),
3258 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3259 if (state->attachments == NULL) {
3260 cmd_buffer->record_result = VK_ERROR_OUT_OF_HOST_MEMORY;
3261 return cmd_buffer->record_result;
3262 }
3263
3264 for (uint32_t i = 0; i < pass->attachment_count; ++i) {
3265 struct radv_render_pass_attachment *att = &pass->attachments[i];
3266 VkImageAspectFlags att_aspects = vk_format_aspects(att->format);
3267 VkImageAspectFlags clear_aspects = 0;
3268
3269 if (att_aspects == VK_IMAGE_ASPECT_COLOR_BIT) {
3270 /* color attachment */
3271 if (att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
3272 clear_aspects |= VK_IMAGE_ASPECT_COLOR_BIT;
3273 }
3274 } else {
3275 /* depthstencil attachment */
3276 if ((att_aspects & VK_IMAGE_ASPECT_DEPTH_BIT) &&
3277 att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
3278 clear_aspects |= VK_IMAGE_ASPECT_DEPTH_BIT;
3279 if ((att_aspects & VK_IMAGE_ASPECT_STENCIL_BIT) &&
3280 att->stencil_load_op == VK_ATTACHMENT_LOAD_OP_DONT_CARE)
3281 clear_aspects |= VK_IMAGE_ASPECT_STENCIL_BIT;
3282 }
3283 if ((att_aspects & VK_IMAGE_ASPECT_STENCIL_BIT) &&
3284 att->stencil_load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
3285 clear_aspects |= VK_IMAGE_ASPECT_STENCIL_BIT;
3286 }
3287 }
3288
3289 state->attachments[i].pending_clear_aspects = clear_aspects;
3290 state->attachments[i].cleared_views = 0;
3291 if (clear_aspects && info) {
3292 assert(info->clearValueCount > i);
3293 state->attachments[i].clear_value = info->pClearValues[i];
3294 }
3295
3296 state->attachments[i].current_layout = att->initial_layout;
3297 state->attachments[i].current_stencil_layout = att->stencil_initial_layout;
3298 state->attachments[i].sample_location.count = 0;
3299
3300 struct radv_image_view *iview;
3301 if (attachment_info && attachment_info->attachmentCount > i) {
3302 iview = radv_image_view_from_handle(attachment_info->pAttachments[i]);
3303 } else {
3304 iview = state->framebuffer->attachments[i];
3305 }
3306
3307 state->attachments[i].iview = iview;
3308 if (iview->aspect_mask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
3309 radv_initialise_ds_surface(cmd_buffer->device, &state->attachments[i].ds, iview);
3310 } else {
3311 radv_initialise_color_surface(cmd_buffer->device, &state->attachments[i].cb, iview);
3312 }
3313 }
3314
3315 return VK_SUCCESS;
3316 }
3317
3318 VkResult radv_AllocateCommandBuffers(
3319 VkDevice _device,
3320 const VkCommandBufferAllocateInfo *pAllocateInfo,
3321 VkCommandBuffer *pCommandBuffers)
3322 {
3323 RADV_FROM_HANDLE(radv_device, device, _device);
3324 RADV_FROM_HANDLE(radv_cmd_pool, pool, pAllocateInfo->commandPool);
3325
3326 VkResult result = VK_SUCCESS;
3327 uint32_t i;
3328
3329 for (i = 0; i < pAllocateInfo->commandBufferCount; i++) {
3330
3331 if (!list_is_empty(&pool->free_cmd_buffers)) {
3332 struct radv_cmd_buffer *cmd_buffer = list_first_entry(&pool->free_cmd_buffers, struct radv_cmd_buffer, pool_link);
3333
3334 list_del(&cmd_buffer->pool_link);
3335 list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
3336
3337 result = radv_reset_cmd_buffer(cmd_buffer);
3338 cmd_buffer->level = pAllocateInfo->level;
3339
3340 pCommandBuffers[i] = radv_cmd_buffer_to_handle(cmd_buffer);
3341 } else {
3342 result = radv_create_cmd_buffer(device, pool, pAllocateInfo->level,
3343 &pCommandBuffers[i]);
3344 }
3345 if (result != VK_SUCCESS)
3346 break;
3347 }
3348
3349 if (result != VK_SUCCESS) {
3350 radv_FreeCommandBuffers(_device, pAllocateInfo->commandPool,
3351 i, pCommandBuffers);
3352
3353 /* From the Vulkan 1.0.66 spec:
3354 *
3355 * "vkAllocateCommandBuffers can be used to create multiple
3356 * command buffers. If the creation of any of those command
3357 * buffers fails, the implementation must destroy all
3358 * successfully created command buffer objects from this
3359 * command, set all entries of the pCommandBuffers array to
3360 * NULL and return the error."
3361 */
3362 memset(pCommandBuffers, 0,
3363 sizeof(*pCommandBuffers) * pAllocateInfo->commandBufferCount);
3364 }
3365
3366 return result;
3367 }
3368
3369 void radv_FreeCommandBuffers(
3370 VkDevice device,
3371 VkCommandPool commandPool,
3372 uint32_t commandBufferCount,
3373 const VkCommandBuffer *pCommandBuffers)
3374 {
3375 for (uint32_t i = 0; i < commandBufferCount; i++) {
3376 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, pCommandBuffers[i]);
3377
3378 if (cmd_buffer) {
3379 if (cmd_buffer->pool) {
3380 list_del(&cmd_buffer->pool_link);
3381 list_addtail(&cmd_buffer->pool_link, &cmd_buffer->pool->free_cmd_buffers);
3382 } else
3383 radv_cmd_buffer_destroy(cmd_buffer);
3384
3385 }
3386 }
3387 }
3388
3389 VkResult radv_ResetCommandBuffer(
3390 VkCommandBuffer commandBuffer,
3391 VkCommandBufferResetFlags flags)
3392 {
3393 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3394 return radv_reset_cmd_buffer(cmd_buffer);
3395 }
3396
3397 VkResult radv_BeginCommandBuffer(
3398 VkCommandBuffer commandBuffer,
3399 const VkCommandBufferBeginInfo *pBeginInfo)
3400 {
3401 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3402 VkResult result = VK_SUCCESS;
3403
3404 if (cmd_buffer->status != RADV_CMD_BUFFER_STATUS_INITIAL) {
3405 /* If the command buffer has already been resetted with
3406 * vkResetCommandBuffer, no need to do it again.
3407 */
3408 result = radv_reset_cmd_buffer(cmd_buffer);
3409 if (result != VK_SUCCESS)
3410 return result;
3411 }
3412
3413 memset(&cmd_buffer->state, 0, sizeof(cmd_buffer->state));
3414 cmd_buffer->state.last_primitive_reset_en = -1;
3415 cmd_buffer->state.last_index_type = -1;
3416 cmd_buffer->state.last_num_instances = -1;
3417 cmd_buffer->state.last_vertex_offset = -1;
3418 cmd_buffer->state.last_first_instance = -1;
3419 cmd_buffer->state.predication_type = -1;
3420 cmd_buffer->state.last_sx_ps_downconvert = -1;
3421 cmd_buffer->state.last_sx_blend_opt_epsilon = -1;
3422 cmd_buffer->state.last_sx_blend_opt_control = -1;
3423 cmd_buffer->usage_flags = pBeginInfo->flags;
3424
3425 if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY &&
3426 (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
3427 assert(pBeginInfo->pInheritanceInfo);
3428 cmd_buffer->state.framebuffer = radv_framebuffer_from_handle(pBeginInfo->pInheritanceInfo->framebuffer);
3429 cmd_buffer->state.pass = radv_render_pass_from_handle(pBeginInfo->pInheritanceInfo->renderPass);
3430
3431 struct radv_subpass *subpass =
3432 &cmd_buffer->state.pass->subpasses[pBeginInfo->pInheritanceInfo->subpass];
3433
3434 if (cmd_buffer->state.framebuffer) {
3435 result = radv_cmd_state_setup_attachments(cmd_buffer, cmd_buffer->state.pass, NULL);
3436 if (result != VK_SUCCESS)
3437 return result;
3438 }
3439
3440 cmd_buffer->state.inherited_pipeline_statistics =
3441 pBeginInfo->pInheritanceInfo->pipelineStatistics;
3442
3443 radv_cmd_buffer_set_subpass(cmd_buffer, subpass);
3444 }
3445
3446 if (unlikely(cmd_buffer->device->trace_bo))
3447 radv_cmd_buffer_trace_emit(cmd_buffer);
3448
3449 radv_describe_begin_cmd_buffer(cmd_buffer);
3450
3451 cmd_buffer->status = RADV_CMD_BUFFER_STATUS_RECORDING;
3452
3453 return result;
3454 }
3455
3456 void radv_CmdBindVertexBuffers(
3457 VkCommandBuffer commandBuffer,
3458 uint32_t firstBinding,
3459 uint32_t bindingCount,
3460 const VkBuffer* pBuffers,
3461 const VkDeviceSize* pOffsets)
3462 {
3463 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3464 struct radv_vertex_binding *vb = cmd_buffer->vertex_bindings;
3465 bool changed = false;
3466
3467 /* We have to defer setting up vertex buffer since we need the buffer
3468 * stride from the pipeline. */
3469
3470 assert(firstBinding + bindingCount <= MAX_VBS);
3471 for (uint32_t i = 0; i < bindingCount; i++) {
3472 RADV_FROM_HANDLE(radv_buffer, buffer, pBuffers[i]);
3473 uint32_t idx = firstBinding + i;
3474
3475 if (!changed &&
3476 (vb[idx].buffer != buffer ||
3477 vb[idx].offset != pOffsets[i])) {
3478 changed = true;
3479 }
3480
3481 vb[idx].buffer = buffer;
3482 vb[idx].offset = pOffsets[i];
3483
3484 if (buffer) {
3485 radv_cs_add_buffer(cmd_buffer->device->ws,
3486 cmd_buffer->cs, vb[idx].buffer->bo);
3487 }
3488 }
3489
3490 if (!changed) {
3491 /* No state changes. */
3492 return;
3493 }
3494
3495 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_VERTEX_BUFFER;
3496 }
3497
3498 static uint32_t
3499 vk_to_index_type(VkIndexType type)
3500 {
3501 switch (type) {
3502 case VK_INDEX_TYPE_UINT8_EXT:
3503 return V_028A7C_VGT_INDEX_8;
3504 case VK_INDEX_TYPE_UINT16:
3505 return V_028A7C_VGT_INDEX_16;
3506 case VK_INDEX_TYPE_UINT32:
3507 return V_028A7C_VGT_INDEX_32;
3508 default:
3509 unreachable("invalid index type");
3510 }
3511 }
3512
3513 static uint32_t
3514 radv_get_vgt_index_size(uint32_t type)
3515 {
3516 switch (type) {
3517 case V_028A7C_VGT_INDEX_8:
3518 return 1;
3519 case V_028A7C_VGT_INDEX_16:
3520 return 2;
3521 case V_028A7C_VGT_INDEX_32:
3522 return 4;
3523 default:
3524 unreachable("invalid index type");
3525 }
3526 }
3527
3528 void radv_CmdBindIndexBuffer(
3529 VkCommandBuffer commandBuffer,
3530 VkBuffer buffer,
3531 VkDeviceSize offset,
3532 VkIndexType indexType)
3533 {
3534 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3535 RADV_FROM_HANDLE(radv_buffer, index_buffer, buffer);
3536
3537 if (cmd_buffer->state.index_buffer == index_buffer &&
3538 cmd_buffer->state.index_offset == offset &&
3539 cmd_buffer->state.index_type == indexType) {
3540 /* No state changes. */
3541 return;
3542 }
3543
3544 cmd_buffer->state.index_buffer = index_buffer;
3545 cmd_buffer->state.index_offset = offset;
3546 cmd_buffer->state.index_type = vk_to_index_type(indexType);
3547 cmd_buffer->state.index_va = radv_buffer_get_va(index_buffer->bo);
3548 cmd_buffer->state.index_va += index_buffer->offset + offset;
3549
3550 int index_size = radv_get_vgt_index_size(vk_to_index_type(indexType));
3551 cmd_buffer->state.max_index_count = (index_buffer->size - offset) / index_size;
3552 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_INDEX_BUFFER;
3553 radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, index_buffer->bo);
3554 }
3555
3556
3557 static void
3558 radv_bind_descriptor_set(struct radv_cmd_buffer *cmd_buffer,
3559 VkPipelineBindPoint bind_point,
3560 struct radv_descriptor_set *set, unsigned idx)
3561 {
3562 struct radeon_winsys *ws = cmd_buffer->device->ws;
3563
3564 radv_set_descriptor_set(cmd_buffer, bind_point, set, idx);
3565
3566 assert(set);
3567 assert(!(set->layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR));
3568
3569 if (!cmd_buffer->device->use_global_bo_list) {
3570 for (unsigned j = 0; j < set->buffer_count; ++j)
3571 if (set->descriptors[j])
3572 radv_cs_add_buffer(ws, cmd_buffer->cs, set->descriptors[j]);
3573 }
3574
3575 if(set->bo)
3576 radv_cs_add_buffer(ws, cmd_buffer->cs, set->bo);
3577 }
3578
3579 void radv_CmdBindDescriptorSets(
3580 VkCommandBuffer commandBuffer,
3581 VkPipelineBindPoint pipelineBindPoint,
3582 VkPipelineLayout _layout,
3583 uint32_t firstSet,
3584 uint32_t descriptorSetCount,
3585 const VkDescriptorSet* pDescriptorSets,
3586 uint32_t dynamicOffsetCount,
3587 const uint32_t* pDynamicOffsets)
3588 {
3589 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3590 RADV_FROM_HANDLE(radv_pipeline_layout, layout, _layout);
3591 unsigned dyn_idx = 0;
3592
3593 const bool no_dynamic_bounds = cmd_buffer->device->instance->debug_flags & RADV_DEBUG_NO_DYNAMIC_BOUNDS;
3594 struct radv_descriptor_state *descriptors_state =
3595 radv_get_descriptors_state(cmd_buffer, pipelineBindPoint);
3596
3597 for (unsigned i = 0; i < descriptorSetCount; ++i) {
3598 unsigned idx = i + firstSet;
3599 RADV_FROM_HANDLE(radv_descriptor_set, set, pDescriptorSets[i]);
3600
3601 /* If the set is already bound we only need to update the
3602 * (potentially changed) dynamic offsets. */
3603 if (descriptors_state->sets[idx] != set ||
3604 !(descriptors_state->valid & (1u << idx))) {
3605 radv_bind_descriptor_set(cmd_buffer, pipelineBindPoint, set, idx);
3606 }
3607
3608 for(unsigned j = 0; j < set->layout->dynamic_offset_count; ++j, ++dyn_idx) {
3609 unsigned idx = j + layout->set[i + firstSet].dynamic_offset_start;
3610 uint32_t *dst = descriptors_state->dynamic_buffers + idx * 4;
3611 assert(dyn_idx < dynamicOffsetCount);
3612
3613 struct radv_descriptor_range *range = set->dynamic_descriptors + j;
3614 uint64_t va = range->va + pDynamicOffsets[dyn_idx];
3615 dst[0] = va;
3616 dst[1] = S_008F04_BASE_ADDRESS_HI(va >> 32);
3617 dst[2] = no_dynamic_bounds ? 0xffffffffu : range->size;
3618 dst[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
3619 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
3620 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
3621 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);
3622
3623 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10) {
3624 dst[3] |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_FLOAT) |
3625 S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_RAW) |
3626 S_008F0C_RESOURCE_LEVEL(1);
3627 } else {
3628 dst[3] |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
3629 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
3630 }
3631
3632 cmd_buffer->push_constant_stages |=
3633 set->layout->dynamic_shader_stages;
3634 }
3635 }
3636 }
3637
3638 static bool radv_init_push_descriptor_set(struct radv_cmd_buffer *cmd_buffer,
3639 struct radv_descriptor_set *set,
3640 struct radv_descriptor_set_layout *layout,
3641 VkPipelineBindPoint bind_point)
3642 {
3643 struct radv_descriptor_state *descriptors_state =
3644 radv_get_descriptors_state(cmd_buffer, bind_point);
3645 set->size = layout->size;
3646 set->layout = layout;
3647
3648 if (descriptors_state->push_set.capacity < set->size) {
3649 size_t new_size = MAX2(set->size, 1024);
3650 new_size = MAX2(new_size, 2 * descriptors_state->push_set.capacity);
3651 new_size = MIN2(new_size, 96 * MAX_PUSH_DESCRIPTORS);
3652
3653 free(set->mapped_ptr);
3654 set->mapped_ptr = malloc(new_size);
3655
3656 if (!set->mapped_ptr) {
3657 descriptors_state->push_set.capacity = 0;
3658 cmd_buffer->record_result = VK_ERROR_OUT_OF_HOST_MEMORY;
3659 return false;
3660 }
3661
3662 descriptors_state->push_set.capacity = new_size;
3663 }
3664
3665 return true;
3666 }
3667
3668 void radv_meta_push_descriptor_set(
3669 struct radv_cmd_buffer* cmd_buffer,
3670 VkPipelineBindPoint pipelineBindPoint,
3671 VkPipelineLayout _layout,
3672 uint32_t set,
3673 uint32_t descriptorWriteCount,
3674 const VkWriteDescriptorSet* pDescriptorWrites)
3675 {
3676 RADV_FROM_HANDLE(radv_pipeline_layout, layout, _layout);
3677 struct radv_descriptor_set *push_set = &cmd_buffer->meta_push_descriptors;
3678 unsigned bo_offset;
3679
3680 assert(set == 0);
3681 assert(layout->set[set].layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR);
3682
3683 push_set->size = layout->set[set].layout->size;
3684 push_set->layout = layout->set[set].layout;
3685
3686 if (!radv_cmd_buffer_upload_alloc(cmd_buffer, push_set->size, 32,
3687 &bo_offset,
3688 (void**) &push_set->mapped_ptr))
3689 return;
3690
3691 push_set->va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
3692 push_set->va += bo_offset;
3693
3694 radv_update_descriptor_sets(cmd_buffer->device, cmd_buffer,
3695 radv_descriptor_set_to_handle(push_set),
3696 descriptorWriteCount, pDescriptorWrites, 0, NULL);
3697
3698 radv_set_descriptor_set(cmd_buffer, pipelineBindPoint, push_set, set);
3699 }
3700
3701 void radv_CmdPushDescriptorSetKHR(
3702 VkCommandBuffer commandBuffer,
3703 VkPipelineBindPoint pipelineBindPoint,
3704 VkPipelineLayout _layout,
3705 uint32_t set,
3706 uint32_t descriptorWriteCount,
3707 const VkWriteDescriptorSet* pDescriptorWrites)
3708 {
3709 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3710 RADV_FROM_HANDLE(radv_pipeline_layout, layout, _layout);
3711 struct radv_descriptor_state *descriptors_state =
3712 radv_get_descriptors_state(cmd_buffer, pipelineBindPoint);
3713 struct radv_descriptor_set *push_set = &descriptors_state->push_set.set;
3714
3715 assert(layout->set[set].layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR);
3716
3717 if (!radv_init_push_descriptor_set(cmd_buffer, push_set,
3718 layout->set[set].layout,
3719 pipelineBindPoint))
3720 return;
3721
3722 /* Check that there are no inline uniform block updates when calling vkCmdPushDescriptorSetKHR()
3723 * because it is invalid, according to Vulkan spec.
3724 */
3725 for (int i = 0; i < descriptorWriteCount; i++) {
3726 ASSERTED const VkWriteDescriptorSet *writeset = &pDescriptorWrites[i];
3727 assert(writeset->descriptorType != VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT);
3728 }
3729
3730 radv_update_descriptor_sets(cmd_buffer->device, cmd_buffer,
3731 radv_descriptor_set_to_handle(push_set),
3732 descriptorWriteCount, pDescriptorWrites, 0, NULL);
3733
3734 radv_set_descriptor_set(cmd_buffer, pipelineBindPoint, push_set, set);
3735 descriptors_state->push_dirty = true;
3736 }
3737
3738 void radv_CmdPushDescriptorSetWithTemplateKHR(
3739 VkCommandBuffer commandBuffer,
3740 VkDescriptorUpdateTemplate descriptorUpdateTemplate,
3741 VkPipelineLayout _layout,
3742 uint32_t set,
3743 const void* pData)
3744 {
3745 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3746 RADV_FROM_HANDLE(radv_pipeline_layout, layout, _layout);
3747 RADV_FROM_HANDLE(radv_descriptor_update_template, templ, descriptorUpdateTemplate);
3748 struct radv_descriptor_state *descriptors_state =
3749 radv_get_descriptors_state(cmd_buffer, templ->bind_point);
3750 struct radv_descriptor_set *push_set = &descriptors_state->push_set.set;
3751
3752 assert(layout->set[set].layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR);
3753
3754 if (!radv_init_push_descriptor_set(cmd_buffer, push_set,
3755 layout->set[set].layout,
3756 templ->bind_point))
3757 return;
3758
3759 radv_update_descriptor_set_with_template(cmd_buffer->device, cmd_buffer, push_set,
3760 descriptorUpdateTemplate, pData);
3761
3762 radv_set_descriptor_set(cmd_buffer, templ->bind_point, push_set, set);
3763 descriptors_state->push_dirty = true;
3764 }
3765
3766 void radv_CmdPushConstants(VkCommandBuffer commandBuffer,
3767 VkPipelineLayout layout,
3768 VkShaderStageFlags stageFlags,
3769 uint32_t offset,
3770 uint32_t size,
3771 const void* pValues)
3772 {
3773 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3774 memcpy(cmd_buffer->push_constants + offset, pValues, size);
3775 cmd_buffer->push_constant_stages |= stageFlags;
3776 }
3777
3778 VkResult radv_EndCommandBuffer(
3779 VkCommandBuffer commandBuffer)
3780 {
3781 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3782
3783 if (cmd_buffer->queue_family_index != RADV_QUEUE_TRANSFER) {
3784 if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX6)
3785 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_CS_PARTIAL_FLUSH | RADV_CMD_FLAG_PS_PARTIAL_FLUSH | RADV_CMD_FLAG_WB_L2;
3786
3787 /* Make sure to sync all pending active queries at the end of
3788 * command buffer.
3789 */
3790 cmd_buffer->state.flush_bits |= cmd_buffer->active_query_flush_bits;
3791
3792 /* Since NGG streamout uses GDS, we need to make GDS idle when
3793 * we leave the IB, otherwise another process might overwrite
3794 * it while our shaders are busy.
3795 */
3796 if (cmd_buffer->gds_needed)
3797 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_PS_PARTIAL_FLUSH;
3798
3799 si_emit_cache_flush(cmd_buffer);
3800 }
3801
3802 /* Make sure CP DMA is idle at the end of IBs because the kernel
3803 * doesn't wait for it.
3804 */
3805 si_cp_dma_wait_for_idle(cmd_buffer);
3806
3807 radv_describe_end_cmd_buffer(cmd_buffer);
3808
3809 vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.attachments);
3810 vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.subpass_sample_locs);
3811
3812 VkResult result = cmd_buffer->device->ws->cs_finalize(cmd_buffer->cs);
3813 if (result != VK_SUCCESS)
3814 return vk_error(cmd_buffer->device->instance, result);
3815
3816 cmd_buffer->status = RADV_CMD_BUFFER_STATUS_EXECUTABLE;
3817
3818 return cmd_buffer->record_result;
3819 }
3820
3821 static void
3822 radv_emit_compute_pipeline(struct radv_cmd_buffer *cmd_buffer)
3823 {
3824 struct radv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
3825
3826 if (!pipeline || pipeline == cmd_buffer->state.emitted_compute_pipeline)
3827 return;
3828
3829 assert(!pipeline->ctx_cs.cdw);
3830
3831 cmd_buffer->state.emitted_compute_pipeline = pipeline;
3832
3833 radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, pipeline->cs.cdw);
3834 radeon_emit_array(cmd_buffer->cs, pipeline->cs.buf, pipeline->cs.cdw);
3835
3836 cmd_buffer->compute_scratch_size_per_wave_needed = MAX2(cmd_buffer->compute_scratch_size_per_wave_needed,
3837 pipeline->scratch_bytes_per_wave);
3838 cmd_buffer->compute_scratch_waves_wanted = MAX2(cmd_buffer->compute_scratch_waves_wanted,
3839 pipeline->max_waves);
3840
3841 radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs,
3842 pipeline->shaders[MESA_SHADER_COMPUTE]->bo);
3843
3844 if (unlikely(cmd_buffer->device->trace_bo))
3845 radv_save_pipeline(cmd_buffer, pipeline, RING_COMPUTE);
3846 }
3847
3848 static void radv_mark_descriptor_sets_dirty(struct radv_cmd_buffer *cmd_buffer,
3849 VkPipelineBindPoint bind_point)
3850 {
3851 struct radv_descriptor_state *descriptors_state =
3852 radv_get_descriptors_state(cmd_buffer, bind_point);
3853
3854 descriptors_state->dirty |= descriptors_state->valid;
3855 }
3856
3857 void radv_CmdBindPipeline(
3858 VkCommandBuffer commandBuffer,
3859 VkPipelineBindPoint pipelineBindPoint,
3860 VkPipeline _pipeline)
3861 {
3862 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3863 RADV_FROM_HANDLE(radv_pipeline, pipeline, _pipeline);
3864
3865 switch (pipelineBindPoint) {
3866 case VK_PIPELINE_BIND_POINT_COMPUTE:
3867 if (cmd_buffer->state.compute_pipeline == pipeline)
3868 return;
3869 radv_mark_descriptor_sets_dirty(cmd_buffer, pipelineBindPoint);
3870
3871 cmd_buffer->state.compute_pipeline = pipeline;
3872 cmd_buffer->push_constant_stages |= VK_SHADER_STAGE_COMPUTE_BIT;
3873 break;
3874 case VK_PIPELINE_BIND_POINT_GRAPHICS:
3875 if (cmd_buffer->state.pipeline == pipeline)
3876 return;
3877 radv_mark_descriptor_sets_dirty(cmd_buffer, pipelineBindPoint);
3878
3879 cmd_buffer->state.pipeline = pipeline;
3880 if (!pipeline)
3881 break;
3882
3883 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_PIPELINE;
3884 cmd_buffer->push_constant_stages |= pipeline->active_stages;
3885
3886 /* the new vertex shader might not have the same user regs */
3887 cmd_buffer->state.last_first_instance = -1;
3888 cmd_buffer->state.last_vertex_offset = -1;
3889
3890 /* Prefetch all pipeline shaders at first draw time. */
3891 cmd_buffer->state.prefetch_L2_mask |= RADV_PREFETCH_SHADERS;
3892
3893 if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX10 &&
3894 cmd_buffer->state.emitted_pipeline &&
3895 radv_pipeline_has_ngg(cmd_buffer->state.emitted_pipeline) &&
3896 !radv_pipeline_has_ngg(cmd_buffer->state.pipeline)) {
3897 /* Transitioning from NGG to legacy GS requires
3898 * VGT_FLUSH on Navi10-14. VGT_FLUSH is also emitted
3899 * at the beginning of IBs when legacy GS ring pointers
3900 * are set.
3901 */
3902 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_VGT_FLUSH;
3903 }
3904
3905 radv_bind_dynamic_state(cmd_buffer, &pipeline->dynamic_state);
3906 radv_bind_streamout_state(cmd_buffer, pipeline);
3907
3908 if (pipeline->graphics.esgs_ring_size > cmd_buffer->esgs_ring_size_needed)
3909 cmd_buffer->esgs_ring_size_needed = pipeline->graphics.esgs_ring_size;
3910 if (pipeline->graphics.gsvs_ring_size > cmd_buffer->gsvs_ring_size_needed)
3911 cmd_buffer->gsvs_ring_size_needed = pipeline->graphics.gsvs_ring_size;
3912
3913 if (radv_pipeline_has_tess(pipeline))
3914 cmd_buffer->tess_rings_needed = true;
3915 break;
3916 default:
3917 assert(!"invalid bind point");
3918 break;
3919 }
3920 }
3921
3922 void radv_CmdSetViewport(
3923 VkCommandBuffer commandBuffer,
3924 uint32_t firstViewport,
3925 uint32_t viewportCount,
3926 const VkViewport* pViewports)
3927 {
3928 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3929 struct radv_cmd_state *state = &cmd_buffer->state;
3930 ASSERTED const uint32_t total_count = firstViewport + viewportCount;
3931
3932 assert(firstViewport < MAX_VIEWPORTS);
3933 assert(total_count >= 1 && total_count <= MAX_VIEWPORTS);
3934
3935 if (!memcmp(state->dynamic.viewport.viewports + firstViewport,
3936 pViewports, viewportCount * sizeof(*pViewports))) {
3937 return;
3938 }
3939
3940 memcpy(state->dynamic.viewport.viewports + firstViewport, pViewports,
3941 viewportCount * sizeof(*pViewports));
3942
3943 state->dirty |= RADV_CMD_DIRTY_DYNAMIC_VIEWPORT;
3944 }
3945
3946 void radv_CmdSetScissor(
3947 VkCommandBuffer commandBuffer,
3948 uint32_t firstScissor,
3949 uint32_t scissorCount,
3950 const VkRect2D* pScissors)
3951 {
3952 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3953 struct radv_cmd_state *state = &cmd_buffer->state;
3954 ASSERTED const uint32_t total_count = firstScissor + scissorCount;
3955
3956 assert(firstScissor < MAX_SCISSORS);
3957 assert(total_count >= 1 && total_count <= MAX_SCISSORS);
3958
3959 if (!memcmp(state->dynamic.scissor.scissors + firstScissor, pScissors,
3960 scissorCount * sizeof(*pScissors))) {
3961 return;
3962 }
3963
3964 memcpy(state->dynamic.scissor.scissors + firstScissor, pScissors,
3965 scissorCount * sizeof(*pScissors));
3966
3967 state->dirty |= RADV_CMD_DIRTY_DYNAMIC_SCISSOR;
3968 }
3969
3970 void radv_CmdSetLineWidth(
3971 VkCommandBuffer commandBuffer,
3972 float lineWidth)
3973 {
3974 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3975
3976 if (cmd_buffer->state.dynamic.line_width == lineWidth)
3977 return;
3978
3979 cmd_buffer->state.dynamic.line_width = lineWidth;
3980 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_LINE_WIDTH;
3981 }
3982
3983 void radv_CmdSetDepthBias(
3984 VkCommandBuffer commandBuffer,
3985 float depthBiasConstantFactor,
3986 float depthBiasClamp,
3987 float depthBiasSlopeFactor)
3988 {
3989 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3990 struct radv_cmd_state *state = &cmd_buffer->state;
3991
3992 if (state->dynamic.depth_bias.bias == depthBiasConstantFactor &&
3993 state->dynamic.depth_bias.clamp == depthBiasClamp &&
3994 state->dynamic.depth_bias.slope == depthBiasSlopeFactor) {
3995 return;
3996 }
3997
3998 state->dynamic.depth_bias.bias = depthBiasConstantFactor;
3999 state->dynamic.depth_bias.clamp = depthBiasClamp;
4000 state->dynamic.depth_bias.slope = depthBiasSlopeFactor;
4001
4002 state->dirty |= RADV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS;
4003 }
4004
4005 void radv_CmdSetBlendConstants(
4006 VkCommandBuffer commandBuffer,
4007 const float blendConstants[4])
4008 {
4009 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
4010 struct radv_cmd_state *state = &cmd_buffer->state;
4011
4012 if (!memcmp(state->dynamic.blend_constants, blendConstants, sizeof(float) * 4))
4013 return;
4014
4015 memcpy(state->dynamic.blend_constants, blendConstants, sizeof(float) * 4);
4016
4017 state->dirty |= RADV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS;
4018 }
4019
4020 void radv_CmdSetDepthBounds(
4021 VkCommandBuffer commandBuffer,
4022 float minDepthBounds,
4023 float maxDepthBounds)
4024 {
4025 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
4026 struct radv_cmd_state *state = &cmd_buffer->state;
4027
4028 if (state->dynamic.depth_bounds.min == minDepthBounds &&
4029 state->dynamic.depth_bounds.max == maxDepthBounds) {
4030 return;
4031 }
4032
4033 state->dynamic.depth_bounds.min = minDepthBounds;
4034 state->dynamic.depth_bounds.max = maxDepthBounds;
4035
4036 state->dirty |= RADV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS;
4037 }
4038
4039 void radv_CmdSetStencilCompareMask(
4040 VkCommandBuffer commandBuffer,
4041 VkStencilFaceFlags faceMask,
4042 uint32_t compareMask)
4043 {
4044 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
4045 struct radv_cmd_state *state = &cmd_buffer->state;
4046 bool front_same = state->dynamic.stencil_compare_mask.front == compareMask;
4047 bool back_same = state->dynamic.stencil_compare_mask.back == compareMask;
4048
4049 if ((!(faceMask & VK_STENCIL_FACE_FRONT_BIT) || front_same) &&
4050 (!(faceMask & VK_STENCIL_FACE_BACK_BIT) || back_same)) {
4051 return;
4052 }
4053
4054 if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
4055 state->dynamic.stencil_compare_mask.front = compareMask;
4056 if (faceMask & VK_STENCIL_FACE_BACK_BIT)
4057 state->dynamic.stencil_compare_mask.back = compareMask;
4058
4059 state->dirty |= RADV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK;
4060 }
4061
4062 void radv_CmdSetStencilWriteMask(
4063 VkCommandBuffer commandBuffer,
4064 VkStencilFaceFlags faceMask,
4065 uint32_t writeMask)
4066 {
4067 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
4068 struct radv_cmd_state *state = &cmd_buffer->state;
4069 bool front_same = state->dynamic.stencil_write_mask.front == writeMask;
4070 bool back_same = state->dynamic.stencil_write_mask.back == writeMask;
4071
4072 if ((!(faceMask & VK_STENCIL_FACE_FRONT_BIT) || front_same) &&
4073 (!(faceMask & VK_STENCIL_FACE_BACK_BIT) || back_same)) {
4074 return;
4075 }
4076
4077 if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
4078 state->dynamic.stencil_write_mask.front = writeMask;
4079 if (faceMask & VK_STENCIL_FACE_BACK_BIT)
4080 state->dynamic.stencil_write_mask.back = writeMask;
4081
4082 state->dirty |= RADV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK;
4083 }
4084
4085 void radv_CmdSetStencilReference(
4086 VkCommandBuffer commandBuffer,
4087 VkStencilFaceFlags faceMask,
4088 uint32_t reference)
4089 {
4090 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
4091 struct radv_cmd_state *state = &cmd_buffer->state;
4092 bool front_same = state->dynamic.stencil_reference.front == reference;
4093 bool back_same = state->dynamic.stencil_reference.back == reference;
4094
4095 if ((!(faceMask & VK_STENCIL_FACE_FRONT_BIT) || front_same) &&
4096 (!(faceMask & VK_STENCIL_FACE_BACK_BIT) || back_same)) {
4097 return;
4098 }
4099
4100 if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
4101 cmd_buffer->state.dynamic.stencil_reference.front = reference;
4102 if (faceMask & VK_STENCIL_FACE_BACK_BIT)
4103 cmd_buffer->state.dynamic.stencil_reference.back = reference;
4104
4105 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE;
4106 }
4107
4108 void radv_CmdSetDiscardRectangleEXT(
4109 VkCommandBuffer commandBuffer,
4110 uint32_t firstDiscardRectangle,
4111 uint32_t discardRectangleCount,
4112 const VkRect2D* pDiscardRectangles)
4113 {
4114 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
4115 struct radv_cmd_state *state = &cmd_buffer->state;
4116 ASSERTED const uint32_t total_count = firstDiscardRectangle + discardRectangleCount;
4117
4118 assert(firstDiscardRectangle < MAX_DISCARD_RECTANGLES);
4119 assert(total_count >= 1 && total_count <= MAX_DISCARD_RECTANGLES);
4120
4121 if (!memcmp(state->dynamic.discard_rectangle.rectangles + firstDiscardRectangle,
4122 pDiscardRectangles, discardRectangleCount * sizeof(*pDiscardRectangles))) {
4123 return;
4124 }
4125
4126 typed_memcpy(&state->dynamic.discard_rectangle.rectangles[firstDiscardRectangle],
4127 pDiscardRectangles, discardRectangleCount);
4128
4129 state->dirty |= RADV_CMD_DIRTY_DYNAMIC_DISCARD_RECTANGLE;
4130 }
4131
4132 void radv_CmdSetSampleLocationsEXT(
4133 VkCommandBuffer commandBuffer,
4134 const VkSampleLocationsInfoEXT* pSampleLocationsInfo)
4135 {
4136 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
4137 struct radv_cmd_state *state = &cmd_buffer->state;
4138
4139 assert(pSampleLocationsInfo->sampleLocationsCount <= MAX_SAMPLE_LOCATIONS);
4140
4141 state->dynamic.sample_location.per_pixel = pSampleLocationsInfo->sampleLocationsPerPixel;
4142 state->dynamic.sample_location.grid_size = pSampleLocationsInfo->sampleLocationGridSize;
4143 state->dynamic.sample_location.count = pSampleLocationsInfo->sampleLocationsCount;
4144 typed_memcpy(&state->dynamic.sample_location.locations[0],
4145 pSampleLocationsInfo->pSampleLocations,
4146 pSampleLocationsInfo->sampleLocationsCount);
4147
4148 state->dirty |= RADV_CMD_DIRTY_DYNAMIC_SAMPLE_LOCATIONS;
4149 }
4150
4151 void radv_CmdSetLineStippleEXT(
4152 VkCommandBuffer commandBuffer,
4153 uint32_t lineStippleFactor,
4154 uint16_t lineStipplePattern)
4155 {
4156 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
4157 struct radv_cmd_state *state = &cmd_buffer->state;
4158
4159 state->dynamic.line_stipple.factor = lineStippleFactor;
4160 state->dynamic.line_stipple.pattern = lineStipplePattern;
4161
4162 state->dirty |= RADV_CMD_DIRTY_DYNAMIC_LINE_STIPPLE;
4163 }
4164
4165 void radv_CmdExecuteCommands(
4166 VkCommandBuffer commandBuffer,
4167 uint32_t commandBufferCount,
4168 const VkCommandBuffer* pCmdBuffers)
4169 {
4170 RADV_FROM_HANDLE(radv_cmd_buffer, primary, commandBuffer);
4171
4172 assert(commandBufferCount > 0);
4173
4174 /* Emit pending flushes on primary prior to executing secondary */
4175 si_emit_cache_flush(primary);
4176
4177 for (uint32_t i = 0; i < commandBufferCount; i++) {
4178 RADV_FROM_HANDLE(radv_cmd_buffer, secondary, pCmdBuffers[i]);
4179
4180 primary->scratch_size_per_wave_needed = MAX2(primary->scratch_size_per_wave_needed,
4181 secondary->scratch_size_per_wave_needed);
4182 primary->scratch_waves_wanted = MAX2(primary->scratch_waves_wanted,
4183 secondary->scratch_waves_wanted);
4184 primary->compute_scratch_size_per_wave_needed = MAX2(primary->compute_scratch_size_per_wave_needed,
4185 secondary->compute_scratch_size_per_wave_needed);
4186 primary->compute_scratch_waves_wanted = MAX2(primary->compute_scratch_waves_wanted,
4187 secondary->compute_scratch_waves_wanted);
4188
4189 if (secondary->esgs_ring_size_needed > primary->esgs_ring_size_needed)
4190 primary->esgs_ring_size_needed = secondary->esgs_ring_size_needed;
4191 if (secondary->gsvs_ring_size_needed > primary->gsvs_ring_size_needed)
4192 primary->gsvs_ring_size_needed = secondary->gsvs_ring_size_needed;
4193 if (secondary->tess_rings_needed)
4194 primary->tess_rings_needed = true;
4195 if (secondary->sample_positions_needed)
4196 primary->sample_positions_needed = true;
4197 if (secondary->gds_needed)
4198 primary->gds_needed = true;
4199
4200 if (!secondary->state.framebuffer &&
4201 (primary->state.dirty & RADV_CMD_DIRTY_FRAMEBUFFER)) {
4202 /* Emit the framebuffer state from primary if secondary
4203 * has been recorded without a framebuffer, otherwise
4204 * fast color/depth clears can't work.
4205 */
4206 radv_emit_framebuffer_state(primary);
4207 }
4208
4209 primary->device->ws->cs_execute_secondary(primary->cs, secondary->cs);
4210
4211
4212 /* When the secondary command buffer is compute only we don't
4213 * need to re-emit the current graphics pipeline.
4214 */
4215 if (secondary->state.emitted_pipeline) {
4216 primary->state.emitted_pipeline =
4217 secondary->state.emitted_pipeline;
4218 }
4219
4220 /* When the secondary command buffer is graphics only we don't
4221 * need to re-emit the current compute pipeline.
4222 */
4223 if (secondary->state.emitted_compute_pipeline) {
4224 primary->state.emitted_compute_pipeline =
4225 secondary->state.emitted_compute_pipeline;
4226 }
4227
4228 /* Only re-emit the draw packets when needed. */
4229 if (secondary->state.last_primitive_reset_en != -1) {
4230 primary->state.last_primitive_reset_en =
4231 secondary->state.last_primitive_reset_en;
4232 }
4233
4234 if (secondary->state.last_primitive_reset_index) {
4235 primary->state.last_primitive_reset_index =
4236 secondary->state.last_primitive_reset_index;
4237 }
4238
4239 if (secondary->state.last_ia_multi_vgt_param) {
4240 primary->state.last_ia_multi_vgt_param =
4241 secondary->state.last_ia_multi_vgt_param;
4242 }
4243
4244 primary->state.last_first_instance = secondary->state.last_first_instance;
4245 primary->state.last_num_instances = secondary->state.last_num_instances;
4246 primary->state.last_vertex_offset = secondary->state.last_vertex_offset;
4247 primary->state.last_sx_ps_downconvert = secondary->state.last_sx_ps_downconvert;
4248 primary->state.last_sx_blend_opt_epsilon = secondary->state.last_sx_blend_opt_epsilon;
4249 primary->state.last_sx_blend_opt_control = secondary->state.last_sx_blend_opt_control;
4250
4251 if (secondary->state.last_index_type != -1) {
4252 primary->state.last_index_type =
4253 secondary->state.last_index_type;
4254 }
4255 }
4256
4257 /* After executing commands from secondary buffers we have to dirty
4258 * some states.
4259 */
4260 primary->state.dirty |= RADV_CMD_DIRTY_PIPELINE |
4261 RADV_CMD_DIRTY_INDEX_BUFFER |
4262 RADV_CMD_DIRTY_DYNAMIC_ALL;
4263 radv_mark_descriptor_sets_dirty(primary, VK_PIPELINE_BIND_POINT_GRAPHICS);
4264 radv_mark_descriptor_sets_dirty(primary, VK_PIPELINE_BIND_POINT_COMPUTE);
4265 }
4266
4267 VkResult radv_CreateCommandPool(
4268 VkDevice _device,
4269 const VkCommandPoolCreateInfo* pCreateInfo,
4270 const VkAllocationCallbacks* pAllocator,
4271 VkCommandPool* pCmdPool)
4272 {
4273 RADV_FROM_HANDLE(radv_device, device, _device);
4274 struct radv_cmd_pool *pool;
4275
4276 pool = vk_alloc2(&device->vk.alloc, pAllocator, sizeof(*pool), 8,
4277 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
4278 if (pool == NULL)
4279 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
4280
4281 vk_object_base_init(&device->vk, &pool->base,
4282 VK_OBJECT_TYPE_COMMAND_POOL);
4283
4284 if (pAllocator)
4285 pool->alloc = *pAllocator;
4286 else
4287 pool->alloc = device->vk.alloc;
4288
4289 list_inithead(&pool->cmd_buffers);
4290 list_inithead(&pool->free_cmd_buffers);
4291
4292 pool->queue_family_index = pCreateInfo->queueFamilyIndex;
4293
4294 *pCmdPool = radv_cmd_pool_to_handle(pool);
4295
4296 return VK_SUCCESS;
4297
4298 }
4299
4300 void radv_DestroyCommandPool(
4301 VkDevice _device,
4302 VkCommandPool commandPool,
4303 const VkAllocationCallbacks* pAllocator)
4304 {
4305 RADV_FROM_HANDLE(radv_device, device, _device);
4306 RADV_FROM_HANDLE(radv_cmd_pool, pool, commandPool);
4307
4308 if (!pool)
4309 return;
4310
4311 list_for_each_entry_safe(struct radv_cmd_buffer, cmd_buffer,
4312 &pool->cmd_buffers, pool_link) {
4313 radv_cmd_buffer_destroy(cmd_buffer);
4314 }
4315
4316 list_for_each_entry_safe(struct radv_cmd_buffer, cmd_buffer,
4317 &pool->free_cmd_buffers, pool_link) {
4318 radv_cmd_buffer_destroy(cmd_buffer);
4319 }
4320
4321 vk_object_base_finish(&pool->base);
4322 vk_free2(&device->vk.alloc, pAllocator, pool);
4323 }
4324
4325 VkResult radv_ResetCommandPool(
4326 VkDevice device,
4327 VkCommandPool commandPool,
4328 VkCommandPoolResetFlags flags)
4329 {
4330 RADV_FROM_HANDLE(radv_cmd_pool, pool, commandPool);
4331 VkResult result;
4332
4333 list_for_each_entry(struct radv_cmd_buffer, cmd_buffer,
4334 &pool->cmd_buffers, pool_link) {
4335 result = radv_reset_cmd_buffer(cmd_buffer);
4336 if (result != VK_SUCCESS)
4337 return result;
4338 }
4339
4340 return VK_SUCCESS;
4341 }
4342
4343 void radv_TrimCommandPool(
4344 VkDevice device,
4345 VkCommandPool commandPool,
4346 VkCommandPoolTrimFlags flags)
4347 {
4348 RADV_FROM_HANDLE(radv_cmd_pool, pool, commandPool);
4349
4350 if (!pool)
4351 return;
4352
4353 list_for_each_entry_safe(struct radv_cmd_buffer, cmd_buffer,
4354 &pool->free_cmd_buffers, pool_link) {
4355 radv_cmd_buffer_destroy(cmd_buffer);
4356 }
4357 }
4358
4359 static void
4360 radv_cmd_buffer_begin_subpass(struct radv_cmd_buffer *cmd_buffer,
4361 uint32_t subpass_id)
4362 {
4363 struct radv_cmd_state *state = &cmd_buffer->state;
4364 struct radv_subpass *subpass = &state->pass->subpasses[subpass_id];
4365
4366 ASSERTED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
4367 cmd_buffer->cs, 4096);
4368
4369 radv_subpass_barrier(cmd_buffer, &subpass->start_barrier);
4370
4371 radv_cmd_buffer_set_subpass(cmd_buffer, subpass);
4372
4373 radv_describe_barrier_start(cmd_buffer, RGP_BARRIER_EXTERNAL_RENDER_PASS_SYNC);
4374
4375 for (uint32_t i = 0; i < subpass->attachment_count; ++i) {
4376 const uint32_t a = subpass->attachments[i].attachment;
4377 if (a == VK_ATTACHMENT_UNUSED)
4378 continue;
4379
4380 radv_handle_subpass_image_transition(cmd_buffer,
4381 subpass->attachments[i],
4382 true);
4383 }
4384
4385 radv_describe_barrier_end(cmd_buffer);
4386
4387 radv_cmd_buffer_clear_subpass(cmd_buffer);
4388
4389 assert(cmd_buffer->cs->cdw <= cdw_max);
4390 }
4391
4392 static void
4393 radv_cmd_buffer_end_subpass(struct radv_cmd_buffer *cmd_buffer)
4394 {
4395 struct radv_cmd_state *state = &cmd_buffer->state;
4396 const struct radv_subpass *subpass = state->subpass;
4397 uint32_t subpass_id = radv_get_subpass_id(cmd_buffer);
4398
4399 radv_cmd_buffer_resolve_subpass(cmd_buffer);
4400
4401 radv_describe_barrier_start(cmd_buffer, RGP_BARRIER_EXTERNAL_RENDER_PASS_SYNC);
4402
4403 for (uint32_t i = 0; i < subpass->attachment_count; ++i) {
4404 const uint32_t a = subpass->attachments[i].attachment;
4405 if (a == VK_ATTACHMENT_UNUSED)
4406 continue;
4407
4408 if (state->pass->attachments[a].last_subpass_idx != subpass_id)
4409 continue;
4410
4411 VkImageLayout layout = state->pass->attachments[a].final_layout;
4412 VkImageLayout stencil_layout = state->pass->attachments[a].stencil_final_layout;
4413 struct radv_subpass_attachment att = { a, layout, stencil_layout };
4414 radv_handle_subpass_image_transition(cmd_buffer, att, false);
4415 }
4416
4417 radv_describe_barrier_end(cmd_buffer);
4418 }
4419
4420 void
4421 radv_cmd_buffer_begin_render_pass(struct radv_cmd_buffer *cmd_buffer,
4422 const VkRenderPassBeginInfo *pRenderPassBegin)
4423 {
4424 RADV_FROM_HANDLE(radv_render_pass, pass, pRenderPassBegin->renderPass);
4425 RADV_FROM_HANDLE(radv_framebuffer, framebuffer, pRenderPassBegin->framebuffer);
4426 VkResult result;
4427
4428 cmd_buffer->state.framebuffer = framebuffer;
4429 cmd_buffer->state.pass = pass;
4430 cmd_buffer->state.render_area = pRenderPassBegin->renderArea;
4431
4432 result = radv_cmd_state_setup_attachments(cmd_buffer, pass, pRenderPassBegin);
4433 if (result != VK_SUCCESS)
4434 return;
4435
4436 result = radv_cmd_state_setup_sample_locations(cmd_buffer, pass, pRenderPassBegin);
4437 if (result != VK_SUCCESS)
4438 return;
4439 }
4440
4441 void radv_CmdBeginRenderPass(
4442 VkCommandBuffer commandBuffer,
4443 const VkRenderPassBeginInfo* pRenderPassBegin,
4444 VkSubpassContents contents)
4445 {
4446 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
4447
4448 radv_cmd_buffer_begin_render_pass(cmd_buffer, pRenderPassBegin);
4449
4450 radv_cmd_buffer_begin_subpass(cmd_buffer, 0);
4451 }
4452
4453 void radv_CmdBeginRenderPass2(
4454 VkCommandBuffer commandBuffer,
4455 const VkRenderPassBeginInfo* pRenderPassBeginInfo,
4456 const VkSubpassBeginInfo* pSubpassBeginInfo)
4457 {
4458 radv_CmdBeginRenderPass(commandBuffer, pRenderPassBeginInfo,
4459 pSubpassBeginInfo->contents);
4460 }
4461
4462 void radv_CmdNextSubpass(
4463 VkCommandBuffer commandBuffer,
4464 VkSubpassContents contents)
4465 {
4466 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
4467
4468 uint32_t prev_subpass = radv_get_subpass_id(cmd_buffer);
4469 radv_cmd_buffer_end_subpass(cmd_buffer);
4470 radv_cmd_buffer_begin_subpass(cmd_buffer, prev_subpass + 1);
4471 }
4472
4473 void radv_CmdNextSubpass2(
4474 VkCommandBuffer commandBuffer,
4475 const VkSubpassBeginInfo* pSubpassBeginInfo,
4476 const VkSubpassEndInfo* pSubpassEndInfo)
4477 {
4478 radv_CmdNextSubpass(commandBuffer, pSubpassBeginInfo->contents);
4479 }
4480
4481 static void radv_emit_view_index(struct radv_cmd_buffer *cmd_buffer, unsigned index)
4482 {
4483 struct radv_pipeline *pipeline = cmd_buffer->state.pipeline;
4484 for (unsigned stage = 0; stage < MESA_SHADER_STAGES; ++stage) {
4485 if (!radv_get_shader(pipeline, stage))
4486 continue;
4487
4488 struct radv_userdata_info *loc = radv_lookup_user_sgpr(pipeline, stage, AC_UD_VIEW_INDEX);
4489 if (loc->sgpr_idx == -1)
4490 continue;
4491 uint32_t base_reg = pipeline->user_data_0[stage];
4492 radeon_set_sh_reg(cmd_buffer->cs, base_reg + loc->sgpr_idx * 4, index);
4493
4494 }
4495 if (radv_pipeline_has_gs_copy_shader(pipeline)) {
4496 struct radv_userdata_info *loc = &pipeline->gs_copy_shader->info.user_sgprs_locs.shader_data[AC_UD_VIEW_INDEX];
4497 if (loc->sgpr_idx != -1) {
4498 uint32_t base_reg = R_00B130_SPI_SHADER_USER_DATA_VS_0;
4499 radeon_set_sh_reg(cmd_buffer->cs, base_reg + loc->sgpr_idx * 4, index);
4500 }
4501 }
4502 }
4503
4504 static void
4505 radv_cs_emit_draw_packet(struct radv_cmd_buffer *cmd_buffer,
4506 uint32_t vertex_count,
4507 bool use_opaque)
4508 {
4509 radeon_emit(cmd_buffer->cs, PKT3(PKT3_DRAW_INDEX_AUTO, 1, cmd_buffer->state.predicating));
4510 radeon_emit(cmd_buffer->cs, vertex_count);
4511 radeon_emit(cmd_buffer->cs, V_0287F0_DI_SRC_SEL_AUTO_INDEX |
4512 S_0287F0_USE_OPAQUE(use_opaque));
4513 }
4514
4515 static void
4516 radv_cs_emit_draw_indexed_packet(struct radv_cmd_buffer *cmd_buffer,
4517 uint64_t index_va,
4518 uint32_t index_count)
4519 {
4520 radeon_emit(cmd_buffer->cs, PKT3(PKT3_DRAW_INDEX_2, 4, cmd_buffer->state.predicating));
4521 radeon_emit(cmd_buffer->cs, cmd_buffer->state.max_index_count);
4522 radeon_emit(cmd_buffer->cs, index_va);
4523 radeon_emit(cmd_buffer->cs, index_va >> 32);
4524 radeon_emit(cmd_buffer->cs, index_count);
4525 radeon_emit(cmd_buffer->cs, V_0287F0_DI_SRC_SEL_DMA);
4526 }
4527
4528 static void
4529 radv_cs_emit_indirect_draw_packet(struct radv_cmd_buffer *cmd_buffer,
4530 bool indexed,
4531 uint32_t draw_count,
4532 uint64_t count_va,
4533 uint32_t stride)
4534 {
4535 struct radeon_cmdbuf *cs = cmd_buffer->cs;
4536 unsigned di_src_sel = indexed ? V_0287F0_DI_SRC_SEL_DMA
4537 : V_0287F0_DI_SRC_SEL_AUTO_INDEX;
4538 bool draw_id_enable = radv_get_shader(cmd_buffer->state.pipeline, MESA_SHADER_VERTEX)->info.vs.needs_draw_id;
4539 uint32_t base_reg = cmd_buffer->state.pipeline->graphics.vtx_base_sgpr;
4540 bool predicating = cmd_buffer->state.predicating;
4541 assert(base_reg);
4542
4543 /* just reset draw state for vertex data */
4544 cmd_buffer->state.last_first_instance = -1;
4545 cmd_buffer->state.last_num_instances = -1;
4546 cmd_buffer->state.last_vertex_offset = -1;
4547
4548 if (draw_count == 1 && !count_va && !draw_id_enable) {
4549 radeon_emit(cs, PKT3(indexed ? PKT3_DRAW_INDEX_INDIRECT :
4550 PKT3_DRAW_INDIRECT, 3, predicating));
4551 radeon_emit(cs, 0);
4552 radeon_emit(cs, (base_reg - SI_SH_REG_OFFSET) >> 2);
4553 radeon_emit(cs, ((base_reg + 4) - SI_SH_REG_OFFSET) >> 2);
4554 radeon_emit(cs, di_src_sel);
4555 } else {
4556 radeon_emit(cs, PKT3(indexed ? PKT3_DRAW_INDEX_INDIRECT_MULTI :
4557 PKT3_DRAW_INDIRECT_MULTI,
4558 8, predicating));
4559 radeon_emit(cs, 0);
4560 radeon_emit(cs, (base_reg - SI_SH_REG_OFFSET) >> 2);
4561 radeon_emit(cs, ((base_reg + 4) - SI_SH_REG_OFFSET) >> 2);
4562 radeon_emit(cs, (((base_reg + 8) - SI_SH_REG_OFFSET) >> 2) |
4563 S_2C3_DRAW_INDEX_ENABLE(draw_id_enable) |
4564 S_2C3_COUNT_INDIRECT_ENABLE(!!count_va));
4565 radeon_emit(cs, draw_count); /* count */
4566 radeon_emit(cs, count_va); /* count_addr */
4567 radeon_emit(cs, count_va >> 32);
4568 radeon_emit(cs, stride); /* stride */
4569 radeon_emit(cs, di_src_sel);
4570 }
4571 }
4572
4573 static void
4574 radv_emit_draw_packets(struct radv_cmd_buffer *cmd_buffer,
4575 const struct radv_draw_info *info)
4576 {
4577 struct radv_cmd_state *state = &cmd_buffer->state;
4578 struct radeon_winsys *ws = cmd_buffer->device->ws;
4579 struct radeon_cmdbuf *cs = cmd_buffer->cs;
4580
4581 if (info->indirect) {
4582 uint64_t va = radv_buffer_get_va(info->indirect->bo);
4583 uint64_t count_va = 0;
4584
4585 va += info->indirect->offset + info->indirect_offset;
4586
4587 radv_cs_add_buffer(ws, cs, info->indirect->bo);
4588
4589 radeon_emit(cs, PKT3(PKT3_SET_BASE, 2, 0));
4590 radeon_emit(cs, 1);
4591 radeon_emit(cs, va);
4592 radeon_emit(cs, va >> 32);
4593
4594 if (info->count_buffer) {
4595 count_va = radv_buffer_get_va(info->count_buffer->bo);
4596 count_va += info->count_buffer->offset +
4597 info->count_buffer_offset;
4598
4599 radv_cs_add_buffer(ws, cs, info->count_buffer->bo);
4600 }
4601
4602 if (!state->subpass->view_mask) {
4603 radv_cs_emit_indirect_draw_packet(cmd_buffer,
4604 info->indexed,
4605 info->count,
4606 count_va,
4607 info->stride);
4608 } else {
4609 unsigned i;
4610 for_each_bit(i, state->subpass->view_mask) {
4611 radv_emit_view_index(cmd_buffer, i);
4612
4613 radv_cs_emit_indirect_draw_packet(cmd_buffer,
4614 info->indexed,
4615 info->count,
4616 count_va,
4617 info->stride);
4618 }
4619 }
4620 } else {
4621 assert(state->pipeline->graphics.vtx_base_sgpr);
4622
4623 if (info->vertex_offset != state->last_vertex_offset ||
4624 info->first_instance != state->last_first_instance) {
4625 radeon_set_sh_reg_seq(cs, state->pipeline->graphics.vtx_base_sgpr,
4626 state->pipeline->graphics.vtx_emit_num);
4627
4628 radeon_emit(cs, info->vertex_offset);
4629 radeon_emit(cs, info->first_instance);
4630 if (state->pipeline->graphics.vtx_emit_num == 3)
4631 radeon_emit(cs, 0);
4632 state->last_first_instance = info->first_instance;
4633 state->last_vertex_offset = info->vertex_offset;
4634 }
4635
4636 if (state->last_num_instances != info->instance_count) {
4637 radeon_emit(cs, PKT3(PKT3_NUM_INSTANCES, 0, false));
4638 radeon_emit(cs, info->instance_count);
4639 state->last_num_instances = info->instance_count;
4640 }
4641
4642 if (info->indexed) {
4643 int index_size = radv_get_vgt_index_size(state->index_type);
4644 uint64_t index_va;
4645
4646 /* Skip draw calls with 0-sized index buffers. They
4647 * cause a hang on some chips, like Navi10-14.
4648 */
4649 if (!cmd_buffer->state.max_index_count)
4650 return;
4651
4652 index_va = state->index_va;
4653 index_va += info->first_index * index_size;
4654
4655 if (!state->subpass->view_mask) {
4656 radv_cs_emit_draw_indexed_packet(cmd_buffer,
4657 index_va,
4658 info->count);
4659 } else {
4660 unsigned i;
4661 for_each_bit(i, state->subpass->view_mask) {
4662 radv_emit_view_index(cmd_buffer, i);
4663
4664 radv_cs_emit_draw_indexed_packet(cmd_buffer,
4665 index_va,
4666 info->count);
4667 }
4668 }
4669 } else {
4670 if (!state->subpass->view_mask) {
4671 radv_cs_emit_draw_packet(cmd_buffer,
4672 info->count,
4673 !!info->strmout_buffer);
4674 } else {
4675 unsigned i;
4676 for_each_bit(i, state->subpass->view_mask) {
4677 radv_emit_view_index(cmd_buffer, i);
4678
4679 radv_cs_emit_draw_packet(cmd_buffer,
4680 info->count,
4681 !!info->strmout_buffer);
4682 }
4683 }
4684 }
4685 }
4686 }
4687
4688 /*
4689 * Vega and raven have a bug which triggers if there are multiple context
4690 * register contexts active at the same time with different scissor values.
4691 *
4692 * There are two possible workarounds:
4693 * 1) Wait for PS_PARTIAL_FLUSH every time the scissor is changed. That way
4694 * there is only ever 1 active set of scissor values at the same time.
4695 *
4696 * 2) Whenever the hardware switches contexts we have to set the scissor
4697 * registers again even if it is a noop. That way the new context gets
4698 * the correct scissor values.
4699 *
4700 * This implements option 2. radv_need_late_scissor_emission needs to
4701 * return true on affected HW if radv_emit_all_graphics_states sets
4702 * any context registers.
4703 */
4704 static bool radv_need_late_scissor_emission(struct radv_cmd_buffer *cmd_buffer,
4705 const struct radv_draw_info *info)
4706 {
4707 struct radv_cmd_state *state = &cmd_buffer->state;
4708
4709 if (!cmd_buffer->device->physical_device->rad_info.has_gfx9_scissor_bug)
4710 return false;
4711
4712 if (cmd_buffer->state.context_roll_without_scissor_emitted || info->strmout_buffer)
4713 return true;
4714
4715 uint32_t used_states = cmd_buffer->state.pipeline->graphics.needed_dynamic_state | ~RADV_CMD_DIRTY_DYNAMIC_ALL;
4716
4717 /* Index, vertex and streamout buffers don't change context regs, and
4718 * pipeline is already handled.
4719 */
4720 used_states &= ~(RADV_CMD_DIRTY_INDEX_BUFFER |
4721 RADV_CMD_DIRTY_VERTEX_BUFFER |
4722 RADV_CMD_DIRTY_STREAMOUT_BUFFER |
4723 RADV_CMD_DIRTY_PIPELINE);
4724
4725 if (cmd_buffer->state.dirty & used_states)
4726 return true;
4727
4728 uint32_t primitive_reset_index =
4729 radv_get_primitive_reset_index(cmd_buffer);
4730
4731 if (info->indexed && state->pipeline->graphics.prim_restart_enable &&
4732 primitive_reset_index != state->last_primitive_reset_index)
4733 return true;
4734
4735 return false;
4736 }
4737
4738 static void
4739 radv_emit_all_graphics_states(struct radv_cmd_buffer *cmd_buffer,
4740 const struct radv_draw_info *info)
4741 {
4742 bool late_scissor_emission;
4743
4744 if ((cmd_buffer->state.dirty & RADV_CMD_DIRTY_FRAMEBUFFER) ||
4745 cmd_buffer->state.emitted_pipeline != cmd_buffer->state.pipeline)
4746 radv_emit_rbplus_state(cmd_buffer);
4747
4748 if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_PIPELINE)
4749 radv_emit_graphics_pipeline(cmd_buffer);
4750
4751 /* This should be before the cmd_buffer->state.dirty is cleared
4752 * (excluding RADV_CMD_DIRTY_PIPELINE) and after
4753 * cmd_buffer->state.context_roll_without_scissor_emitted is set. */
4754 late_scissor_emission =
4755 radv_need_late_scissor_emission(cmd_buffer, info);
4756
4757 if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_FRAMEBUFFER)
4758 radv_emit_framebuffer_state(cmd_buffer);
4759
4760 if (info->indexed) {
4761 if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_INDEX_BUFFER)
4762 radv_emit_index_buffer(cmd_buffer, info->indirect);
4763 } else {
4764 /* On GFX7 and later, non-indexed draws overwrite VGT_INDEX_TYPE,
4765 * so the state must be re-emitted before the next indexed
4766 * draw.
4767 */
4768 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX7) {
4769 cmd_buffer->state.last_index_type = -1;
4770 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_INDEX_BUFFER;
4771 }
4772 }
4773
4774 radv_cmd_buffer_flush_dynamic_state(cmd_buffer);
4775
4776 radv_emit_draw_registers(cmd_buffer, info);
4777
4778 if (late_scissor_emission)
4779 radv_emit_scissor(cmd_buffer);
4780 }
4781
4782 static void
4783 radv_draw(struct radv_cmd_buffer *cmd_buffer,
4784 const struct radv_draw_info *info)
4785 {
4786 struct radeon_info *rad_info =
4787 &cmd_buffer->device->physical_device->rad_info;
4788 bool has_prefetch =
4789 cmd_buffer->device->physical_device->rad_info.chip_class >= GFX7;
4790 bool pipeline_is_dirty =
4791 (cmd_buffer->state.dirty & RADV_CMD_DIRTY_PIPELINE) &&
4792 cmd_buffer->state.pipeline != cmd_buffer->state.emitted_pipeline;
4793
4794 ASSERTED unsigned cdw_max =
4795 radeon_check_space(cmd_buffer->device->ws,
4796 cmd_buffer->cs, 4096);
4797
4798 if (likely(!info->indirect)) {
4799 /* GFX6-GFX7 treat instance_count==0 as instance_count==1. There is
4800 * no workaround for indirect draws, but we can at least skip
4801 * direct draws.
4802 */
4803 if (unlikely(!info->instance_count))
4804 return;
4805
4806 /* Handle count == 0. */
4807 if (unlikely(!info->count && !info->strmout_buffer))
4808 return;
4809 }
4810
4811 radv_describe_draw(cmd_buffer);
4812
4813 /* Use optimal packet order based on whether we need to sync the
4814 * pipeline.
4815 */
4816 if (cmd_buffer->state.flush_bits & (RADV_CMD_FLAG_FLUSH_AND_INV_CB |
4817 RADV_CMD_FLAG_FLUSH_AND_INV_DB |
4818 RADV_CMD_FLAG_PS_PARTIAL_FLUSH |
4819 RADV_CMD_FLAG_CS_PARTIAL_FLUSH)) {
4820 /* If we have to wait for idle, set all states first, so that
4821 * all SET packets are processed in parallel with previous draw
4822 * calls. Then upload descriptors, set shader pointers, and
4823 * draw, and prefetch at the end. This ensures that the time
4824 * the CUs are idle is very short. (there are only SET_SH
4825 * packets between the wait and the draw)
4826 */
4827 radv_emit_all_graphics_states(cmd_buffer, info);
4828 si_emit_cache_flush(cmd_buffer);
4829 /* <-- CUs are idle here --> */
4830
4831 radv_upload_graphics_shader_descriptors(cmd_buffer, pipeline_is_dirty);
4832
4833 radv_emit_draw_packets(cmd_buffer, info);
4834 /* <-- CUs are busy here --> */
4835
4836 /* Start prefetches after the draw has been started. Both will
4837 * run in parallel, but starting the draw first is more
4838 * important.
4839 */
4840 if (has_prefetch && cmd_buffer->state.prefetch_L2_mask) {
4841 radv_emit_prefetch_L2(cmd_buffer,
4842 cmd_buffer->state.pipeline, false);
4843 }
4844 } else {
4845 /* If we don't wait for idle, start prefetches first, then set
4846 * states, and draw at the end.
4847 */
4848 si_emit_cache_flush(cmd_buffer);
4849
4850 if (has_prefetch && cmd_buffer->state.prefetch_L2_mask) {
4851 /* Only prefetch the vertex shader and VBO descriptors
4852 * in order to start the draw as soon as possible.
4853 */
4854 radv_emit_prefetch_L2(cmd_buffer,
4855 cmd_buffer->state.pipeline, true);
4856 }
4857
4858 radv_upload_graphics_shader_descriptors(cmd_buffer, pipeline_is_dirty);
4859
4860 radv_emit_all_graphics_states(cmd_buffer, info);
4861 radv_emit_draw_packets(cmd_buffer, info);
4862
4863 /* Prefetch the remaining shaders after the draw has been
4864 * started.
4865 */
4866 if (has_prefetch && cmd_buffer->state.prefetch_L2_mask) {
4867 radv_emit_prefetch_L2(cmd_buffer,
4868 cmd_buffer->state.pipeline, false);
4869 }
4870 }
4871
4872 /* Workaround for a VGT hang when streamout is enabled.
4873 * It must be done after drawing.
4874 */
4875 if (cmd_buffer->state.streamout.streamout_enabled &&
4876 (rad_info->family == CHIP_HAWAII ||
4877 rad_info->family == CHIP_TONGA ||
4878 rad_info->family == CHIP_FIJI)) {
4879 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_VGT_STREAMOUT_SYNC;
4880 }
4881
4882 assert(cmd_buffer->cs->cdw <= cdw_max);
4883 radv_cmd_buffer_after_draw(cmd_buffer, RADV_CMD_FLAG_PS_PARTIAL_FLUSH);
4884 }
4885
4886 void radv_CmdDraw(
4887 VkCommandBuffer commandBuffer,
4888 uint32_t vertexCount,
4889 uint32_t instanceCount,
4890 uint32_t firstVertex,
4891 uint32_t firstInstance)
4892 {
4893 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
4894 struct radv_draw_info info = {};
4895
4896 info.count = vertexCount;
4897 info.instance_count = instanceCount;
4898 info.first_instance = firstInstance;
4899 info.vertex_offset = firstVertex;
4900
4901 radv_draw(cmd_buffer, &info);
4902 }
4903
4904 void radv_CmdDrawIndexed(
4905 VkCommandBuffer commandBuffer,
4906 uint32_t indexCount,
4907 uint32_t instanceCount,
4908 uint32_t firstIndex,
4909 int32_t vertexOffset,
4910 uint32_t firstInstance)
4911 {
4912 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
4913 struct radv_draw_info info = {};
4914
4915 info.indexed = true;
4916 info.count = indexCount;
4917 info.instance_count = instanceCount;
4918 info.first_index = firstIndex;
4919 info.vertex_offset = vertexOffset;
4920 info.first_instance = firstInstance;
4921
4922 radv_draw(cmd_buffer, &info);
4923 }
4924
4925 void radv_CmdDrawIndirect(
4926 VkCommandBuffer commandBuffer,
4927 VkBuffer _buffer,
4928 VkDeviceSize offset,
4929 uint32_t drawCount,
4930 uint32_t stride)
4931 {
4932 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
4933 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
4934 struct radv_draw_info info = {};
4935
4936 info.count = drawCount;
4937 info.indirect = buffer;
4938 info.indirect_offset = offset;
4939 info.stride = stride;
4940
4941 radv_draw(cmd_buffer, &info);
4942 }
4943
4944 void radv_CmdDrawIndexedIndirect(
4945 VkCommandBuffer commandBuffer,
4946 VkBuffer _buffer,
4947 VkDeviceSize offset,
4948 uint32_t drawCount,
4949 uint32_t stride)
4950 {
4951 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
4952 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
4953 struct radv_draw_info info = {};
4954
4955 info.indexed = true;
4956 info.count = drawCount;
4957 info.indirect = buffer;
4958 info.indirect_offset = offset;
4959 info.stride = stride;
4960
4961 radv_draw(cmd_buffer, &info);
4962 }
4963
4964 void radv_CmdDrawIndirectCount(
4965 VkCommandBuffer commandBuffer,
4966 VkBuffer _buffer,
4967 VkDeviceSize offset,
4968 VkBuffer _countBuffer,
4969 VkDeviceSize countBufferOffset,
4970 uint32_t maxDrawCount,
4971 uint32_t stride)
4972 {
4973 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
4974 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
4975 RADV_FROM_HANDLE(radv_buffer, count_buffer, _countBuffer);
4976 struct radv_draw_info info = {};
4977
4978 info.count = maxDrawCount;
4979 info.indirect = buffer;
4980 info.indirect_offset = offset;
4981 info.count_buffer = count_buffer;
4982 info.count_buffer_offset = countBufferOffset;
4983 info.stride = stride;
4984
4985 radv_draw(cmd_buffer, &info);
4986 }
4987
4988 void radv_CmdDrawIndexedIndirectCount(
4989 VkCommandBuffer commandBuffer,
4990 VkBuffer _buffer,
4991 VkDeviceSize offset,
4992 VkBuffer _countBuffer,
4993 VkDeviceSize countBufferOffset,
4994 uint32_t maxDrawCount,
4995 uint32_t stride)
4996 {
4997 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
4998 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
4999 RADV_FROM_HANDLE(radv_buffer, count_buffer, _countBuffer);
5000 struct radv_draw_info info = {};
5001
5002 info.indexed = true;
5003 info.count = maxDrawCount;
5004 info.indirect = buffer;
5005 info.indirect_offset = offset;
5006 info.count_buffer = count_buffer;
5007 info.count_buffer_offset = countBufferOffset;
5008 info.stride = stride;
5009
5010 radv_draw(cmd_buffer, &info);
5011 }
5012
5013 struct radv_dispatch_info {
5014 /**
5015 * Determine the layout of the grid (in block units) to be used.
5016 */
5017 uint32_t blocks[3];
5018
5019 /**
5020 * A starting offset for the grid. If unaligned is set, the offset
5021 * must still be aligned.
5022 */
5023 uint32_t offsets[3];
5024 /**
5025 * Whether it's an unaligned compute dispatch.
5026 */
5027 bool unaligned;
5028
5029 /**
5030 * Indirect compute parameters resource.
5031 */
5032 struct radv_buffer *indirect;
5033 uint64_t indirect_offset;
5034 };
5035
5036 static void
5037 radv_emit_dispatch_packets(struct radv_cmd_buffer *cmd_buffer,
5038 const struct radv_dispatch_info *info)
5039 {
5040 struct radv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
5041 struct radv_shader_variant *compute_shader = pipeline->shaders[MESA_SHADER_COMPUTE];
5042 unsigned dispatch_initiator = cmd_buffer->device->dispatch_initiator;
5043 struct radeon_winsys *ws = cmd_buffer->device->ws;
5044 bool predicating = cmd_buffer->state.predicating;
5045 struct radeon_cmdbuf *cs = cmd_buffer->cs;
5046 struct radv_userdata_info *loc;
5047
5048 loc = radv_lookup_user_sgpr(pipeline, MESA_SHADER_COMPUTE,
5049 AC_UD_CS_GRID_SIZE);
5050
5051 ASSERTED unsigned cdw_max = radeon_check_space(ws, cs, 25);
5052
5053 if (compute_shader->info.wave_size == 32) {
5054 assert(cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10);
5055 dispatch_initiator |= S_00B800_CS_W32_EN(1);
5056 }
5057
5058 if (info->indirect) {
5059 uint64_t va = radv_buffer_get_va(info->indirect->bo);
5060
5061 va += info->indirect->offset + info->indirect_offset;
5062
5063 radv_cs_add_buffer(ws, cs, info->indirect->bo);
5064
5065 if (loc->sgpr_idx != -1) {
5066 for (unsigned i = 0; i < 3; ++i) {
5067 radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
5068 radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_SRC_MEM) |
5069 COPY_DATA_DST_SEL(COPY_DATA_REG));
5070 radeon_emit(cs, (va + 4 * i));
5071 radeon_emit(cs, (va + 4 * i) >> 32);
5072 radeon_emit(cs, ((R_00B900_COMPUTE_USER_DATA_0
5073 + loc->sgpr_idx * 4) >> 2) + i);
5074 radeon_emit(cs, 0);
5075 }
5076 }
5077
5078 if (radv_cmd_buffer_uses_mec(cmd_buffer)) {
5079 radeon_emit(cs, PKT3(PKT3_DISPATCH_INDIRECT, 2, predicating) |
5080 PKT3_SHADER_TYPE_S(1));
5081 radeon_emit(cs, va);
5082 radeon_emit(cs, va >> 32);
5083 radeon_emit(cs, dispatch_initiator);
5084 } else {
5085 radeon_emit(cs, PKT3(PKT3_SET_BASE, 2, 0) |
5086 PKT3_SHADER_TYPE_S(1));
5087 radeon_emit(cs, 1);
5088 radeon_emit(cs, va);
5089 radeon_emit(cs, va >> 32);
5090
5091 radeon_emit(cs, PKT3(PKT3_DISPATCH_INDIRECT, 1, predicating) |
5092 PKT3_SHADER_TYPE_S(1));
5093 radeon_emit(cs, 0);
5094 radeon_emit(cs, dispatch_initiator);
5095 }
5096 } else {
5097 unsigned blocks[3] = { info->blocks[0], info->blocks[1], info->blocks[2] };
5098 unsigned offsets[3] = { info->offsets[0], info->offsets[1], info->offsets[2] };
5099
5100 if (info->unaligned) {
5101 unsigned *cs_block_size = compute_shader->info.cs.block_size;
5102 unsigned remainder[3];
5103
5104 /* If aligned, these should be an entire block size,
5105 * not 0.
5106 */
5107 remainder[0] = blocks[0] + cs_block_size[0] -
5108 align_u32_npot(blocks[0], cs_block_size[0]);
5109 remainder[1] = blocks[1] + cs_block_size[1] -
5110 align_u32_npot(blocks[1], cs_block_size[1]);
5111 remainder[2] = blocks[2] + cs_block_size[2] -
5112 align_u32_npot(blocks[2], cs_block_size[2]);
5113
5114 blocks[0] = round_up_u32(blocks[0], cs_block_size[0]);
5115 blocks[1] = round_up_u32(blocks[1], cs_block_size[1]);
5116 blocks[2] = round_up_u32(blocks[2], cs_block_size[2]);
5117
5118 for(unsigned i = 0; i < 3; ++i) {
5119 assert(offsets[i] % cs_block_size[i] == 0);
5120 offsets[i] /= cs_block_size[i];
5121 }
5122
5123 radeon_set_sh_reg_seq(cs, R_00B81C_COMPUTE_NUM_THREAD_X, 3);
5124 radeon_emit(cs,
5125 S_00B81C_NUM_THREAD_FULL(cs_block_size[0]) |
5126 S_00B81C_NUM_THREAD_PARTIAL(remainder[0]));
5127 radeon_emit(cs,
5128 S_00B81C_NUM_THREAD_FULL(cs_block_size[1]) |
5129 S_00B81C_NUM_THREAD_PARTIAL(remainder[1]));
5130 radeon_emit(cs,
5131 S_00B81C_NUM_THREAD_FULL(cs_block_size[2]) |
5132 S_00B81C_NUM_THREAD_PARTIAL(remainder[2]));
5133
5134 dispatch_initiator |= S_00B800_PARTIAL_TG_EN(1);
5135 }
5136
5137 if (loc->sgpr_idx != -1) {
5138 assert(loc->num_sgprs == 3);
5139
5140 radeon_set_sh_reg_seq(cs, R_00B900_COMPUTE_USER_DATA_0 +
5141 loc->sgpr_idx * 4, 3);
5142 radeon_emit(cs, blocks[0]);
5143 radeon_emit(cs, blocks[1]);
5144 radeon_emit(cs, blocks[2]);
5145 }
5146
5147 if (offsets[0] || offsets[1] || offsets[2]) {
5148 radeon_set_sh_reg_seq(cs, R_00B810_COMPUTE_START_X, 3);
5149 radeon_emit(cs, offsets[0]);
5150 radeon_emit(cs, offsets[1]);
5151 radeon_emit(cs, offsets[2]);
5152
5153 /* The blocks in the packet are not counts but end values. */
5154 for (unsigned i = 0; i < 3; ++i)
5155 blocks[i] += offsets[i];
5156 } else {
5157 dispatch_initiator |= S_00B800_FORCE_START_AT_000(1);
5158 }
5159
5160 radeon_emit(cs, PKT3(PKT3_DISPATCH_DIRECT, 3, predicating) |
5161 PKT3_SHADER_TYPE_S(1));
5162 radeon_emit(cs, blocks[0]);
5163 radeon_emit(cs, blocks[1]);
5164 radeon_emit(cs, blocks[2]);
5165 radeon_emit(cs, dispatch_initiator);
5166 }
5167
5168 assert(cmd_buffer->cs->cdw <= cdw_max);
5169 }
5170
5171 static void
5172 radv_upload_compute_shader_descriptors(struct radv_cmd_buffer *cmd_buffer)
5173 {
5174 radv_flush_descriptors(cmd_buffer, VK_SHADER_STAGE_COMPUTE_BIT);
5175 radv_flush_constants(cmd_buffer, VK_SHADER_STAGE_COMPUTE_BIT);
5176 }
5177
5178 static void
5179 radv_dispatch(struct radv_cmd_buffer *cmd_buffer,
5180 const struct radv_dispatch_info *info)
5181 {
5182 struct radv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
5183 bool has_prefetch =
5184 cmd_buffer->device->physical_device->rad_info.chip_class >= GFX7;
5185 bool pipeline_is_dirty = pipeline &&
5186 pipeline != cmd_buffer->state.emitted_compute_pipeline;
5187
5188 radv_describe_dispatch(cmd_buffer, 8, 8, 8);
5189
5190 if (cmd_buffer->state.flush_bits & (RADV_CMD_FLAG_FLUSH_AND_INV_CB |
5191 RADV_CMD_FLAG_FLUSH_AND_INV_DB |
5192 RADV_CMD_FLAG_PS_PARTIAL_FLUSH |
5193 RADV_CMD_FLAG_CS_PARTIAL_FLUSH)) {
5194 /* If we have to wait for idle, set all states first, so that
5195 * all SET packets are processed in parallel with previous draw
5196 * calls. Then upload descriptors, set shader pointers, and
5197 * dispatch, and prefetch at the end. This ensures that the
5198 * time the CUs are idle is very short. (there are only SET_SH
5199 * packets between the wait and the draw)
5200 */
5201 radv_emit_compute_pipeline(cmd_buffer);
5202 si_emit_cache_flush(cmd_buffer);
5203 /* <-- CUs are idle here --> */
5204
5205 radv_upload_compute_shader_descriptors(cmd_buffer);
5206
5207 radv_emit_dispatch_packets(cmd_buffer, info);
5208 /* <-- CUs are busy here --> */
5209
5210 /* Start prefetches after the dispatch has been started. Both
5211 * will run in parallel, but starting the dispatch first is
5212 * more important.
5213 */
5214 if (has_prefetch && pipeline_is_dirty) {
5215 radv_emit_shader_prefetch(cmd_buffer,
5216 pipeline->shaders[MESA_SHADER_COMPUTE]);
5217 }
5218 } else {
5219 /* If we don't wait for idle, start prefetches first, then set
5220 * states, and dispatch at the end.
5221 */
5222 si_emit_cache_flush(cmd_buffer);
5223
5224 if (has_prefetch && pipeline_is_dirty) {
5225 radv_emit_shader_prefetch(cmd_buffer,
5226 pipeline->shaders[MESA_SHADER_COMPUTE]);
5227 }
5228
5229 radv_upload_compute_shader_descriptors(cmd_buffer);
5230
5231 radv_emit_compute_pipeline(cmd_buffer);
5232 radv_emit_dispatch_packets(cmd_buffer, info);
5233 }
5234
5235 radv_cmd_buffer_after_draw(cmd_buffer, RADV_CMD_FLAG_CS_PARTIAL_FLUSH);
5236 }
5237
5238 void radv_CmdDispatchBase(
5239 VkCommandBuffer commandBuffer,
5240 uint32_t base_x,
5241 uint32_t base_y,
5242 uint32_t base_z,
5243 uint32_t x,
5244 uint32_t y,
5245 uint32_t z)
5246 {
5247 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
5248 struct radv_dispatch_info info = {};
5249
5250 info.blocks[0] = x;
5251 info.blocks[1] = y;
5252 info.blocks[2] = z;
5253
5254 info.offsets[0] = base_x;
5255 info.offsets[1] = base_y;
5256 info.offsets[2] = base_z;
5257 radv_dispatch(cmd_buffer, &info);
5258 }
5259
5260 void radv_CmdDispatch(
5261 VkCommandBuffer commandBuffer,
5262 uint32_t x,
5263 uint32_t y,
5264 uint32_t z)
5265 {
5266 radv_CmdDispatchBase(commandBuffer, 0, 0, 0, x, y, z);
5267 }
5268
5269 void radv_CmdDispatchIndirect(
5270 VkCommandBuffer commandBuffer,
5271 VkBuffer _buffer,
5272 VkDeviceSize offset)
5273 {
5274 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
5275 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
5276 struct radv_dispatch_info info = {};
5277
5278 info.indirect = buffer;
5279 info.indirect_offset = offset;
5280
5281 radv_dispatch(cmd_buffer, &info);
5282 }
5283
5284 void radv_unaligned_dispatch(
5285 struct radv_cmd_buffer *cmd_buffer,
5286 uint32_t x,
5287 uint32_t y,
5288 uint32_t z)
5289 {
5290 struct radv_dispatch_info info = {};
5291
5292 info.blocks[0] = x;
5293 info.blocks[1] = y;
5294 info.blocks[2] = z;
5295 info.unaligned = 1;
5296
5297 radv_dispatch(cmd_buffer, &info);
5298 }
5299
5300 void
5301 radv_cmd_buffer_end_render_pass(struct radv_cmd_buffer *cmd_buffer)
5302 {
5303 vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.attachments);
5304 vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.subpass_sample_locs);
5305
5306 cmd_buffer->state.pass = NULL;
5307 cmd_buffer->state.subpass = NULL;
5308 cmd_buffer->state.attachments = NULL;
5309 cmd_buffer->state.framebuffer = NULL;
5310 cmd_buffer->state.subpass_sample_locs = NULL;
5311 }
5312
5313 void radv_CmdEndRenderPass(
5314 VkCommandBuffer commandBuffer)
5315 {
5316 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
5317
5318 radv_subpass_barrier(cmd_buffer, &cmd_buffer->state.pass->end_barrier);
5319
5320 radv_cmd_buffer_end_subpass(cmd_buffer);
5321
5322 radv_cmd_buffer_end_render_pass(cmd_buffer);
5323 }
5324
5325 void radv_CmdEndRenderPass2(
5326 VkCommandBuffer commandBuffer,
5327 const VkSubpassEndInfo* pSubpassEndInfo)
5328 {
5329 radv_CmdEndRenderPass(commandBuffer);
5330 }
5331
5332 /*
5333 * For HTILE we have the following interesting clear words:
5334 * 0xfffff30f: Uncompressed, full depth range, for depth+stencil HTILE
5335 * 0xfffc000f: Uncompressed, full depth range, for depth only HTILE.
5336 * 0xfffffff0: Clear depth to 1.0
5337 * 0x00000000: Clear depth to 0.0
5338 */
5339 static void radv_initialize_htile(struct radv_cmd_buffer *cmd_buffer,
5340 struct radv_image *image,
5341 const VkImageSubresourceRange *range)
5342 {
5343 assert(range->baseMipLevel == 0);
5344 assert(range->levelCount == 1 || range->levelCount == VK_REMAINING_ARRAY_LAYERS);
5345 VkImageAspectFlags aspects = VK_IMAGE_ASPECT_DEPTH_BIT;
5346 struct radv_cmd_state *state = &cmd_buffer->state;
5347 uint32_t htile_value = vk_format_is_stencil(image->vk_format) ? 0xfffff30f : 0xfffc000f;
5348 VkClearDepthStencilValue value = {};
5349 struct radv_barrier_data barrier = {};
5350
5351 state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB |
5352 RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
5353
5354 barrier.layout_transitions.init_mask_ram = 1;
5355 radv_describe_layout_transition(cmd_buffer, &barrier);
5356
5357 state->flush_bits |= radv_clear_htile(cmd_buffer, image, range, htile_value);
5358
5359 state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
5360
5361 if (vk_format_is_stencil(image->vk_format))
5362 aspects |= VK_IMAGE_ASPECT_STENCIL_BIT;
5363
5364 radv_set_ds_clear_metadata(cmd_buffer, image, range, value, aspects);
5365
5366 if (radv_image_is_tc_compat_htile(image)) {
5367 /* Initialize the TC-compat metada value to 0 because by
5368 * default DB_Z_INFO.RANGE_PRECISION is set to 1, and we only
5369 * need have to conditionally update its value when performing
5370 * a fast depth clear.
5371 */
5372 radv_set_tc_compat_zrange_metadata(cmd_buffer, image, range, 0);
5373 }
5374 }
5375
5376 static void radv_handle_depth_image_transition(struct radv_cmd_buffer *cmd_buffer,
5377 struct radv_image *image,
5378 VkImageLayout src_layout,
5379 bool src_render_loop,
5380 VkImageLayout dst_layout,
5381 bool dst_render_loop,
5382 unsigned src_queue_mask,
5383 unsigned dst_queue_mask,
5384 const VkImageSubresourceRange *range,
5385 struct radv_sample_locations_state *sample_locs)
5386 {
5387 if (!radv_image_has_htile(image))
5388 return;
5389
5390 if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED) {
5391 radv_initialize_htile(cmd_buffer, image, range);
5392 } else if (!radv_layout_is_htile_compressed(image, src_layout, src_render_loop, src_queue_mask) &&
5393 radv_layout_is_htile_compressed(image, dst_layout, dst_render_loop, dst_queue_mask)) {
5394 radv_initialize_htile(cmd_buffer, image, range);
5395 } else if (radv_layout_is_htile_compressed(image, src_layout, src_render_loop, src_queue_mask) &&
5396 !radv_layout_is_htile_compressed(image, dst_layout, dst_render_loop, dst_queue_mask)) {
5397 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB |
5398 RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
5399
5400 radv_decompress_depth_stencil(cmd_buffer, image, range,
5401 sample_locs);
5402
5403 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB |
5404 RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
5405 }
5406 }
5407
5408 static void radv_initialise_cmask(struct radv_cmd_buffer *cmd_buffer,
5409 struct radv_image *image,
5410 const VkImageSubresourceRange *range,
5411 uint32_t value)
5412 {
5413 struct radv_cmd_state *state = &cmd_buffer->state;
5414 struct radv_barrier_data barrier = {};
5415
5416 state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
5417 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
5418
5419 barrier.layout_transitions.init_mask_ram = 1;
5420 radv_describe_layout_transition(cmd_buffer, &barrier);
5421
5422 state->flush_bits |= radv_clear_cmask(cmd_buffer, image, range, value);
5423
5424 state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
5425 }
5426
5427 void radv_initialize_fmask(struct radv_cmd_buffer *cmd_buffer,
5428 struct radv_image *image,
5429 const VkImageSubresourceRange *range)
5430 {
5431 struct radv_cmd_state *state = &cmd_buffer->state;
5432 static const uint32_t fmask_clear_values[4] = {
5433 0x00000000,
5434 0x02020202,
5435 0xE4E4E4E4,
5436 0x76543210
5437 };
5438 uint32_t log2_samples = util_logbase2(image->info.samples);
5439 uint32_t value = fmask_clear_values[log2_samples];
5440 struct radv_barrier_data barrier = {};
5441
5442 state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
5443 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
5444
5445 barrier.layout_transitions.init_mask_ram = 1;
5446 radv_describe_layout_transition(cmd_buffer, &barrier);
5447
5448 state->flush_bits |= radv_clear_fmask(cmd_buffer, image, range, value);
5449
5450 state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
5451 }
5452
5453 void radv_initialize_dcc(struct radv_cmd_buffer *cmd_buffer,
5454 struct radv_image *image,
5455 const VkImageSubresourceRange *range, uint32_t value)
5456 {
5457 struct radv_cmd_state *state = &cmd_buffer->state;
5458 struct radv_barrier_data barrier = {};
5459 unsigned size = 0;
5460
5461 state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
5462 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
5463
5464 barrier.layout_transitions.init_mask_ram = 1;
5465 radv_describe_layout_transition(cmd_buffer, &barrier);
5466
5467 state->flush_bits |= radv_clear_dcc(cmd_buffer, image, range, value);
5468
5469 if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX8) {
5470 /* When DCC is enabled with mipmaps, some levels might not
5471 * support fast clears and we have to initialize them as "fully
5472 * expanded".
5473 */
5474 /* Compute the size of all fast clearable DCC levels. */
5475 for (unsigned i = 0; i < image->planes[0].surface.num_dcc_levels; i++) {
5476 struct legacy_surf_level *surf_level =
5477 &image->planes[0].surface.u.legacy.level[i];
5478 unsigned dcc_fast_clear_size =
5479 surf_level->dcc_slice_fast_clear_size * image->info.array_size;
5480
5481 if (!dcc_fast_clear_size)
5482 break;
5483
5484 size = surf_level->dcc_offset + dcc_fast_clear_size;
5485 }
5486
5487 /* Initialize the mipmap levels without DCC. */
5488 if (size != image->planes[0].surface.dcc_size) {
5489 state->flush_bits |=
5490 radv_fill_buffer(cmd_buffer, image->bo,
5491 image->offset + image->planes[0].surface.dcc_offset + size,
5492 image->planes[0].surface.dcc_size - size,
5493 0xffffffff);
5494 }
5495 }
5496
5497 state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
5498 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
5499 }
5500
5501 /**
5502 * Initialize DCC/FMASK/CMASK metadata for a color image.
5503 */
5504 static void radv_init_color_image_metadata(struct radv_cmd_buffer *cmd_buffer,
5505 struct radv_image *image,
5506 VkImageLayout src_layout,
5507 bool src_render_loop,
5508 VkImageLayout dst_layout,
5509 bool dst_render_loop,
5510 unsigned src_queue_mask,
5511 unsigned dst_queue_mask,
5512 const VkImageSubresourceRange *range)
5513 {
5514 if (radv_image_has_cmask(image)) {
5515 uint32_t value = 0xffffffffu; /* Fully expanded mode. */
5516
5517 /* TODO: clarify this. */
5518 if (radv_image_has_fmask(image)) {
5519 value = 0xccccccccu;
5520 }
5521
5522 radv_initialise_cmask(cmd_buffer, image, range, value);
5523 }
5524
5525 if (radv_image_has_fmask(image)) {
5526 radv_initialize_fmask(cmd_buffer, image, range);
5527 }
5528
5529 if (radv_dcc_enabled(image, range->baseMipLevel)) {
5530 uint32_t value = 0xffffffffu; /* Fully expanded mode. */
5531 bool need_decompress_pass = false;
5532
5533 if (radv_layout_dcc_compressed(cmd_buffer->device, image, dst_layout,
5534 dst_render_loop,
5535 dst_queue_mask)) {
5536 value = 0x20202020u;
5537 need_decompress_pass = true;
5538 }
5539
5540 radv_initialize_dcc(cmd_buffer, image, range, value);
5541
5542 radv_update_fce_metadata(cmd_buffer, image, range,
5543 need_decompress_pass);
5544 }
5545
5546 if (radv_image_has_cmask(image) ||
5547 radv_dcc_enabled(image, range->baseMipLevel)) {
5548 uint32_t color_values[2] = {};
5549 radv_set_color_clear_metadata(cmd_buffer, image, range,
5550 color_values);
5551 }
5552 }
5553
5554 /**
5555 * Handle color image transitions for DCC/FMASK/CMASK.
5556 */
5557 static void radv_handle_color_image_transition(struct radv_cmd_buffer *cmd_buffer,
5558 struct radv_image *image,
5559 VkImageLayout src_layout,
5560 bool src_render_loop,
5561 VkImageLayout dst_layout,
5562 bool dst_render_loop,
5563 unsigned src_queue_mask,
5564 unsigned dst_queue_mask,
5565 const VkImageSubresourceRange *range)
5566 {
5567 if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED) {
5568 radv_init_color_image_metadata(cmd_buffer, image,
5569 src_layout, src_render_loop,
5570 dst_layout, dst_render_loop,
5571 src_queue_mask, dst_queue_mask,
5572 range);
5573 return;
5574 }
5575
5576 if (radv_dcc_enabled(image, range->baseMipLevel)) {
5577 if (src_layout == VK_IMAGE_LAYOUT_PREINITIALIZED) {
5578 radv_initialize_dcc(cmd_buffer, image, range, 0xffffffffu);
5579 } else if (radv_layout_dcc_compressed(cmd_buffer->device, image, src_layout, src_render_loop, src_queue_mask) &&
5580 !radv_layout_dcc_compressed(cmd_buffer->device, image, dst_layout, dst_render_loop, dst_queue_mask)) {
5581 radv_decompress_dcc(cmd_buffer, image, range);
5582 } else if (radv_layout_can_fast_clear(image, src_layout, src_render_loop, src_queue_mask) &&
5583 !radv_layout_can_fast_clear(image, dst_layout, dst_render_loop, dst_queue_mask)) {
5584 radv_fast_clear_flush_image_inplace(cmd_buffer, image, range);
5585 }
5586 } else if (radv_image_has_cmask(image) || radv_image_has_fmask(image)) {
5587 bool fce_eliminate = false, fmask_expand = false;
5588
5589 if (radv_layout_can_fast_clear(image, src_layout, src_render_loop, src_queue_mask) &&
5590 !radv_layout_can_fast_clear(image, dst_layout, dst_render_loop, dst_queue_mask)) {
5591 fce_eliminate = true;
5592 }
5593
5594 if (radv_image_has_fmask(image)) {
5595 if (src_layout != VK_IMAGE_LAYOUT_GENERAL &&
5596 dst_layout == VK_IMAGE_LAYOUT_GENERAL) {
5597 /* A FMASK decompress is required before doing
5598 * a MSAA decompress using FMASK.
5599 */
5600 fmask_expand = true;
5601 }
5602 }
5603
5604 if (fce_eliminate || fmask_expand)
5605 radv_fast_clear_flush_image_inplace(cmd_buffer, image, range);
5606
5607 if (fmask_expand) {
5608 struct radv_barrier_data barrier = {};
5609 barrier.layout_transitions.fmask_color_expand = 1;
5610 radv_describe_layout_transition(cmd_buffer, &barrier);
5611
5612 radv_expand_fmask_image_inplace(cmd_buffer, image, range);
5613 }
5614 }
5615 }
5616
5617 static void radv_handle_image_transition(struct radv_cmd_buffer *cmd_buffer,
5618 struct radv_image *image,
5619 VkImageLayout src_layout,
5620 bool src_render_loop,
5621 VkImageLayout dst_layout,
5622 bool dst_render_loop,
5623 uint32_t src_family,
5624 uint32_t dst_family,
5625 const VkImageSubresourceRange *range,
5626 struct radv_sample_locations_state *sample_locs)
5627 {
5628 if (image->exclusive && src_family != dst_family) {
5629 /* This is an acquire or a release operation and there will be
5630 * a corresponding release/acquire. Do the transition in the
5631 * most flexible queue. */
5632
5633 assert(src_family == cmd_buffer->queue_family_index ||
5634 dst_family == cmd_buffer->queue_family_index);
5635
5636 if (src_family == VK_QUEUE_FAMILY_EXTERNAL ||
5637 src_family == VK_QUEUE_FAMILY_FOREIGN_EXT)
5638 return;
5639
5640 if (cmd_buffer->queue_family_index == RADV_QUEUE_TRANSFER)
5641 return;
5642
5643 if (cmd_buffer->queue_family_index == RADV_QUEUE_COMPUTE &&
5644 (src_family == RADV_QUEUE_GENERAL ||
5645 dst_family == RADV_QUEUE_GENERAL))
5646 return;
5647 }
5648
5649 if (src_layout == dst_layout)
5650 return;
5651
5652 unsigned src_queue_mask =
5653 radv_image_queue_family_mask(image, src_family,
5654 cmd_buffer->queue_family_index);
5655 unsigned dst_queue_mask =
5656 radv_image_queue_family_mask(image, dst_family,
5657 cmd_buffer->queue_family_index);
5658
5659 if (vk_format_is_depth(image->vk_format)) {
5660 radv_handle_depth_image_transition(cmd_buffer, image,
5661 src_layout, src_render_loop,
5662 dst_layout, dst_render_loop,
5663 src_queue_mask, dst_queue_mask,
5664 range, sample_locs);
5665 } else {
5666 radv_handle_color_image_transition(cmd_buffer, image,
5667 src_layout, src_render_loop,
5668 dst_layout, dst_render_loop,
5669 src_queue_mask, dst_queue_mask,
5670 range);
5671 }
5672 }
5673
5674 struct radv_barrier_info {
5675 enum rgp_barrier_reason reason;
5676 uint32_t eventCount;
5677 const VkEvent *pEvents;
5678 VkPipelineStageFlags srcStageMask;
5679 VkPipelineStageFlags dstStageMask;
5680 };
5681
5682 static void
5683 radv_barrier(struct radv_cmd_buffer *cmd_buffer,
5684 uint32_t memoryBarrierCount,
5685 const VkMemoryBarrier *pMemoryBarriers,
5686 uint32_t bufferMemoryBarrierCount,
5687 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
5688 uint32_t imageMemoryBarrierCount,
5689 const VkImageMemoryBarrier *pImageMemoryBarriers,
5690 const struct radv_barrier_info *info)
5691 {
5692 struct radeon_cmdbuf *cs = cmd_buffer->cs;
5693 enum radv_cmd_flush_bits src_flush_bits = 0;
5694 enum radv_cmd_flush_bits dst_flush_bits = 0;
5695
5696 radv_describe_barrier_start(cmd_buffer, info->reason);
5697
5698 for (unsigned i = 0; i < info->eventCount; ++i) {
5699 RADV_FROM_HANDLE(radv_event, event, info->pEvents[i]);
5700 uint64_t va = radv_buffer_get_va(event->bo);
5701
5702 radv_cs_add_buffer(cmd_buffer->device->ws, cs, event->bo);
5703
5704 ASSERTED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 7);
5705
5706 radv_cp_wait_mem(cs, WAIT_REG_MEM_EQUAL, va, 1, 0xffffffff);
5707 assert(cmd_buffer->cs->cdw <= cdw_max);
5708 }
5709
5710 for (uint32_t i = 0; i < memoryBarrierCount; i++) {
5711 src_flush_bits |= radv_src_access_flush(cmd_buffer, pMemoryBarriers[i].srcAccessMask,
5712 NULL);
5713 dst_flush_bits |= radv_dst_access_flush(cmd_buffer, pMemoryBarriers[i].dstAccessMask,
5714 NULL);
5715 }
5716
5717 for (uint32_t i = 0; i < bufferMemoryBarrierCount; i++) {
5718 src_flush_bits |= radv_src_access_flush(cmd_buffer, pBufferMemoryBarriers[i].srcAccessMask,
5719 NULL);
5720 dst_flush_bits |= radv_dst_access_flush(cmd_buffer, pBufferMemoryBarriers[i].dstAccessMask,
5721 NULL);
5722 }
5723
5724 for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
5725 RADV_FROM_HANDLE(radv_image, image, pImageMemoryBarriers[i].image);
5726
5727 src_flush_bits |= radv_src_access_flush(cmd_buffer, pImageMemoryBarriers[i].srcAccessMask,
5728 image);
5729 dst_flush_bits |= radv_dst_access_flush(cmd_buffer, pImageMemoryBarriers[i].dstAccessMask,
5730 image);
5731 }
5732
5733 /* The Vulkan spec 1.1.98 says:
5734 *
5735 * "An execution dependency with only
5736 * VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT in the destination stage mask
5737 * will only prevent that stage from executing in subsequently
5738 * submitted commands. As this stage does not perform any actual
5739 * execution, this is not observable - in effect, it does not delay
5740 * processing of subsequent commands. Similarly an execution dependency
5741 * with only VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT in the source stage mask
5742 * will effectively not wait for any prior commands to complete."
5743 */
5744 if (info->dstStageMask != VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT)
5745 radv_stage_flush(cmd_buffer, info->srcStageMask);
5746 cmd_buffer->state.flush_bits |= src_flush_bits;
5747
5748 for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
5749 RADV_FROM_HANDLE(radv_image, image, pImageMemoryBarriers[i].image);
5750
5751 const struct VkSampleLocationsInfoEXT *sample_locs_info =
5752 vk_find_struct_const(pImageMemoryBarriers[i].pNext,
5753 SAMPLE_LOCATIONS_INFO_EXT);
5754 struct radv_sample_locations_state sample_locations = {};
5755
5756 if (sample_locs_info) {
5757 assert(image->flags & VK_IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT);
5758 sample_locations.per_pixel = sample_locs_info->sampleLocationsPerPixel;
5759 sample_locations.grid_size = sample_locs_info->sampleLocationGridSize;
5760 sample_locations.count = sample_locs_info->sampleLocationsCount;
5761 typed_memcpy(&sample_locations.locations[0],
5762 sample_locs_info->pSampleLocations,
5763 sample_locs_info->sampleLocationsCount);
5764 }
5765
5766 radv_handle_image_transition(cmd_buffer, image,
5767 pImageMemoryBarriers[i].oldLayout,
5768 false, /* Outside of a renderpass we are never in a renderloop */
5769 pImageMemoryBarriers[i].newLayout,
5770 false, /* Outside of a renderpass we are never in a renderloop */
5771 pImageMemoryBarriers[i].srcQueueFamilyIndex,
5772 pImageMemoryBarriers[i].dstQueueFamilyIndex,
5773 &pImageMemoryBarriers[i].subresourceRange,
5774 sample_locs_info ? &sample_locations : NULL);
5775 }
5776
5777 /* Make sure CP DMA is idle because the driver might have performed a
5778 * DMA operation for copying or filling buffers/images.
5779 */
5780 if (info->srcStageMask & (VK_PIPELINE_STAGE_TRANSFER_BIT |
5781 VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT))
5782 si_cp_dma_wait_for_idle(cmd_buffer);
5783
5784 cmd_buffer->state.flush_bits |= dst_flush_bits;
5785
5786 radv_describe_barrier_end(cmd_buffer);
5787 }
5788
5789 void radv_CmdPipelineBarrier(
5790 VkCommandBuffer commandBuffer,
5791 VkPipelineStageFlags srcStageMask,
5792 VkPipelineStageFlags destStageMask,
5793 VkBool32 byRegion,
5794 uint32_t memoryBarrierCount,
5795 const VkMemoryBarrier* pMemoryBarriers,
5796 uint32_t bufferMemoryBarrierCount,
5797 const VkBufferMemoryBarrier* pBufferMemoryBarriers,
5798 uint32_t imageMemoryBarrierCount,
5799 const VkImageMemoryBarrier* pImageMemoryBarriers)
5800 {
5801 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
5802 struct radv_barrier_info info;
5803
5804 info.reason = RGP_BARRIER_EXTERNAL_CMD_PIPELINE_BARRIER;
5805 info.eventCount = 0;
5806 info.pEvents = NULL;
5807 info.srcStageMask = srcStageMask;
5808 info.dstStageMask = destStageMask;
5809
5810 radv_barrier(cmd_buffer, memoryBarrierCount, pMemoryBarriers,
5811 bufferMemoryBarrierCount, pBufferMemoryBarriers,
5812 imageMemoryBarrierCount, pImageMemoryBarriers, &info);
5813 }
5814
5815
5816 static void write_event(struct radv_cmd_buffer *cmd_buffer,
5817 struct radv_event *event,
5818 VkPipelineStageFlags stageMask,
5819 unsigned value)
5820 {
5821 struct radeon_cmdbuf *cs = cmd_buffer->cs;
5822 uint64_t va = radv_buffer_get_va(event->bo);
5823
5824 si_emit_cache_flush(cmd_buffer);
5825
5826 radv_cs_add_buffer(cmd_buffer->device->ws, cs, event->bo);
5827
5828 ASSERTED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 21);
5829
5830 /* Flags that only require a top-of-pipe event. */
5831 VkPipelineStageFlags top_of_pipe_flags =
5832 VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
5833
5834 /* Flags that only require a post-index-fetch event. */
5835 VkPipelineStageFlags post_index_fetch_flags =
5836 top_of_pipe_flags |
5837 VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT |
5838 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT;
5839
5840 /* Make sure CP DMA is idle because the driver might have performed a
5841 * DMA operation for copying or filling buffers/images.
5842 */
5843 if (stageMask & (VK_PIPELINE_STAGE_TRANSFER_BIT |
5844 VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT))
5845 si_cp_dma_wait_for_idle(cmd_buffer);
5846
5847 /* TODO: Emit EOS events for syncing PS/CS stages. */
5848
5849 if (!(stageMask & ~top_of_pipe_flags)) {
5850 /* Just need to sync the PFP engine. */
5851 radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, 0));
5852 radeon_emit(cs, S_370_DST_SEL(V_370_MEM) |
5853 S_370_WR_CONFIRM(1) |
5854 S_370_ENGINE_SEL(V_370_PFP));
5855 radeon_emit(cs, va);
5856 radeon_emit(cs, va >> 32);
5857 radeon_emit(cs, value);
5858 } else if (!(stageMask & ~post_index_fetch_flags)) {
5859 /* Sync ME because PFP reads index and indirect buffers. */
5860 radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, 0));
5861 radeon_emit(cs, S_370_DST_SEL(V_370_MEM) |
5862 S_370_WR_CONFIRM(1) |
5863 S_370_ENGINE_SEL(V_370_ME));
5864 radeon_emit(cs, va);
5865 radeon_emit(cs, va >> 32);
5866 radeon_emit(cs, value);
5867 } else {
5868 /* Otherwise, sync all prior GPU work using an EOP event. */
5869 si_cs_emit_write_event_eop(cs,
5870 cmd_buffer->device->physical_device->rad_info.chip_class,
5871 radv_cmd_buffer_uses_mec(cmd_buffer),
5872 V_028A90_BOTTOM_OF_PIPE_TS, 0,
5873 EOP_DST_SEL_MEM,
5874 EOP_DATA_SEL_VALUE_32BIT, va, value,
5875 cmd_buffer->gfx9_eop_bug_va);
5876 }
5877
5878 assert(cmd_buffer->cs->cdw <= cdw_max);
5879 }
5880
5881 void radv_CmdSetEvent(VkCommandBuffer commandBuffer,
5882 VkEvent _event,
5883 VkPipelineStageFlags stageMask)
5884 {
5885 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
5886 RADV_FROM_HANDLE(radv_event, event, _event);
5887
5888 write_event(cmd_buffer, event, stageMask, 1);
5889 }
5890
5891 void radv_CmdResetEvent(VkCommandBuffer commandBuffer,
5892 VkEvent _event,
5893 VkPipelineStageFlags stageMask)
5894 {
5895 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
5896 RADV_FROM_HANDLE(radv_event, event, _event);
5897
5898 write_event(cmd_buffer, event, stageMask, 0);
5899 }
5900
5901 void radv_CmdWaitEvents(VkCommandBuffer commandBuffer,
5902 uint32_t eventCount,
5903 const VkEvent* pEvents,
5904 VkPipelineStageFlags srcStageMask,
5905 VkPipelineStageFlags dstStageMask,
5906 uint32_t memoryBarrierCount,
5907 const VkMemoryBarrier* pMemoryBarriers,
5908 uint32_t bufferMemoryBarrierCount,
5909 const VkBufferMemoryBarrier* pBufferMemoryBarriers,
5910 uint32_t imageMemoryBarrierCount,
5911 const VkImageMemoryBarrier* pImageMemoryBarriers)
5912 {
5913 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
5914 struct radv_barrier_info info;
5915
5916 info.reason = RGP_BARRIER_EXTERNAL_CMD_WAIT_EVENTS;
5917 info.eventCount = eventCount;
5918 info.pEvents = pEvents;
5919 info.srcStageMask = 0;
5920
5921 radv_barrier(cmd_buffer, memoryBarrierCount, pMemoryBarriers,
5922 bufferMemoryBarrierCount, pBufferMemoryBarriers,
5923 imageMemoryBarrierCount, pImageMemoryBarriers, &info);
5924 }
5925
5926
5927 void radv_CmdSetDeviceMask(VkCommandBuffer commandBuffer,
5928 uint32_t deviceMask)
5929 {
5930 /* No-op */
5931 }
5932
5933 /* VK_EXT_conditional_rendering */
5934 void radv_CmdBeginConditionalRenderingEXT(
5935 VkCommandBuffer commandBuffer,
5936 const VkConditionalRenderingBeginInfoEXT* pConditionalRenderingBegin)
5937 {
5938 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
5939 RADV_FROM_HANDLE(radv_buffer, buffer, pConditionalRenderingBegin->buffer);
5940 struct radeon_cmdbuf *cs = cmd_buffer->cs;
5941 bool draw_visible = true;
5942 uint64_t pred_value = 0;
5943 uint64_t va, new_va;
5944 unsigned pred_offset;
5945
5946 va = radv_buffer_get_va(buffer->bo) + pConditionalRenderingBegin->offset;
5947
5948 /* By default, if the 32-bit value at offset in buffer memory is zero,
5949 * then the rendering commands are discarded, otherwise they are
5950 * executed as normal. If the inverted flag is set, all commands are
5951 * discarded if the value is non zero.
5952 */
5953 if (pConditionalRenderingBegin->flags &
5954 VK_CONDITIONAL_RENDERING_INVERTED_BIT_EXT) {
5955 draw_visible = false;
5956 }
5957
5958 si_emit_cache_flush(cmd_buffer);
5959
5960 /* From the Vulkan spec 1.1.107:
5961 *
5962 * "If the 32-bit value at offset in buffer memory is zero, then the
5963 * rendering commands are discarded, otherwise they are executed as
5964 * normal. If the value of the predicate in buffer memory changes while
5965 * conditional rendering is active, the rendering commands may be
5966 * discarded in an implementation-dependent way. Some implementations
5967 * may latch the value of the predicate upon beginning conditional
5968 * rendering while others may read it before every rendering command."
5969 *
5970 * But, the AMD hardware treats the predicate as a 64-bit value which
5971 * means we need a workaround in the driver. Luckily, it's not required
5972 * to support if the value changes when predication is active.
5973 *
5974 * The workaround is as follows:
5975 * 1) allocate a 64-value in the upload BO and initialize it to 0
5976 * 2) copy the 32-bit predicate value to the upload BO
5977 * 3) use the new allocated VA address for predication
5978 *
5979 * Based on the conditionalrender demo, it's faster to do the COPY_DATA
5980 * in ME (+ sync PFP) instead of PFP.
5981 */
5982 radv_cmd_buffer_upload_data(cmd_buffer, 8, 16, &pred_value, &pred_offset);
5983
5984 new_va = radv_buffer_get_va(cmd_buffer->upload.upload_bo) + pred_offset;
5985
5986 radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
5987 radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_SRC_MEM) |
5988 COPY_DATA_DST_SEL(COPY_DATA_DST_MEM) |
5989 COPY_DATA_WR_CONFIRM);
5990 radeon_emit(cs, va);
5991 radeon_emit(cs, va >> 32);
5992 radeon_emit(cs, new_va);
5993 radeon_emit(cs, new_va >> 32);
5994
5995 radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
5996 radeon_emit(cs, 0);
5997
5998 /* Enable predication for this command buffer. */
5999 si_emit_set_predication_state(cmd_buffer, draw_visible, new_va);
6000 cmd_buffer->state.predicating = true;
6001
6002 /* Store conditional rendering user info. */
6003 cmd_buffer->state.predication_type = draw_visible;
6004 cmd_buffer->state.predication_va = new_va;
6005 }
6006
6007 void radv_CmdEndConditionalRenderingEXT(
6008 VkCommandBuffer commandBuffer)
6009 {
6010 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
6011
6012 /* Disable predication for this command buffer. */
6013 si_emit_set_predication_state(cmd_buffer, false, 0);
6014 cmd_buffer->state.predicating = false;
6015
6016 /* Reset conditional rendering user info. */
6017 cmd_buffer->state.predication_type = -1;
6018 cmd_buffer->state.predication_va = 0;
6019 }
6020
6021 /* VK_EXT_transform_feedback */
6022 void radv_CmdBindTransformFeedbackBuffersEXT(
6023 VkCommandBuffer commandBuffer,
6024 uint32_t firstBinding,
6025 uint32_t bindingCount,
6026 const VkBuffer* pBuffers,
6027 const VkDeviceSize* pOffsets,
6028 const VkDeviceSize* pSizes)
6029 {
6030 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
6031 struct radv_streamout_binding *sb = cmd_buffer->streamout_bindings;
6032 uint8_t enabled_mask = 0;
6033
6034 assert(firstBinding + bindingCount <= MAX_SO_BUFFERS);
6035 for (uint32_t i = 0; i < bindingCount; i++) {
6036 uint32_t idx = firstBinding + i;
6037
6038 sb[idx].buffer = radv_buffer_from_handle(pBuffers[i]);
6039 sb[idx].offset = pOffsets[i];
6040
6041 if (!pSizes || pSizes[i] == VK_WHOLE_SIZE) {
6042 sb[idx].size = sb[idx].buffer->size - sb[idx].offset;
6043 } else {
6044 sb[idx].size = pSizes[i];
6045 }
6046
6047 radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs,
6048 sb[idx].buffer->bo);
6049
6050 enabled_mask |= 1 << idx;
6051 }
6052
6053 cmd_buffer->state.streamout.enabled_mask |= enabled_mask;
6054
6055 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_STREAMOUT_BUFFER;
6056 }
6057
6058 static void
6059 radv_emit_streamout_enable(struct radv_cmd_buffer *cmd_buffer)
6060 {
6061 struct radv_streamout_state *so = &cmd_buffer->state.streamout;
6062 struct radeon_cmdbuf *cs = cmd_buffer->cs;
6063
6064 radeon_set_context_reg_seq(cs, R_028B94_VGT_STRMOUT_CONFIG, 2);
6065 radeon_emit(cs,
6066 S_028B94_STREAMOUT_0_EN(so->streamout_enabled) |
6067 S_028B94_RAST_STREAM(0) |
6068 S_028B94_STREAMOUT_1_EN(so->streamout_enabled) |
6069 S_028B94_STREAMOUT_2_EN(so->streamout_enabled) |
6070 S_028B94_STREAMOUT_3_EN(so->streamout_enabled));
6071 radeon_emit(cs, so->hw_enabled_mask &
6072 so->enabled_stream_buffers_mask);
6073
6074 cmd_buffer->state.context_roll_without_scissor_emitted = true;
6075 }
6076
6077 static void
6078 radv_set_streamout_enable(struct radv_cmd_buffer *cmd_buffer, bool enable)
6079 {
6080 struct radv_streamout_state *so = &cmd_buffer->state.streamout;
6081 bool old_streamout_enabled = so->streamout_enabled;
6082 uint32_t old_hw_enabled_mask = so->hw_enabled_mask;
6083
6084 so->streamout_enabled = enable;
6085
6086 so->hw_enabled_mask = so->enabled_mask |
6087 (so->enabled_mask << 4) |
6088 (so->enabled_mask << 8) |
6089 (so->enabled_mask << 12);
6090
6091 if (!cmd_buffer->device->physical_device->use_ngg_streamout &&
6092 ((old_streamout_enabled != so->streamout_enabled) ||
6093 (old_hw_enabled_mask != so->hw_enabled_mask)))
6094 radv_emit_streamout_enable(cmd_buffer);
6095
6096 if (cmd_buffer->device->physical_device->use_ngg_streamout) {
6097 cmd_buffer->gds_needed = true;
6098 cmd_buffer->gds_oa_needed = true;
6099 }
6100 }
6101
6102 static void radv_flush_vgt_streamout(struct radv_cmd_buffer *cmd_buffer)
6103 {
6104 struct radeon_cmdbuf *cs = cmd_buffer->cs;
6105 unsigned reg_strmout_cntl;
6106
6107 /* The register is at different places on different ASICs. */
6108 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX7) {
6109 reg_strmout_cntl = R_0300FC_CP_STRMOUT_CNTL;
6110 radeon_set_uconfig_reg(cs, reg_strmout_cntl, 0);
6111 } else {
6112 reg_strmout_cntl = R_0084FC_CP_STRMOUT_CNTL;
6113 radeon_set_config_reg(cs, reg_strmout_cntl, 0);
6114 }
6115
6116 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
6117 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SO_VGTSTREAMOUT_FLUSH) | EVENT_INDEX(0));
6118
6119 radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, 0));
6120 radeon_emit(cs, WAIT_REG_MEM_EQUAL); /* wait until the register is equal to the reference value */
6121 radeon_emit(cs, reg_strmout_cntl >> 2); /* register */
6122 radeon_emit(cs, 0);
6123 radeon_emit(cs, S_0084FC_OFFSET_UPDATE_DONE(1)); /* reference value */
6124 radeon_emit(cs, S_0084FC_OFFSET_UPDATE_DONE(1)); /* mask */
6125 radeon_emit(cs, 4); /* poll interval */
6126 }
6127
6128 static void
6129 radv_emit_streamout_begin(struct radv_cmd_buffer *cmd_buffer,
6130 uint32_t firstCounterBuffer,
6131 uint32_t counterBufferCount,
6132 const VkBuffer *pCounterBuffers,
6133 const VkDeviceSize *pCounterBufferOffsets)
6134
6135 {
6136 struct radv_streamout_binding *sb = cmd_buffer->streamout_bindings;
6137 struct radv_streamout_state *so = &cmd_buffer->state.streamout;
6138 struct radeon_cmdbuf *cs = cmd_buffer->cs;
6139 uint32_t i;
6140
6141 radv_flush_vgt_streamout(cmd_buffer);
6142
6143 assert(firstCounterBuffer + counterBufferCount <= MAX_SO_BUFFERS);
6144 for_each_bit(i, so->enabled_mask) {
6145 int32_t counter_buffer_idx = i - firstCounterBuffer;
6146 if (counter_buffer_idx >= 0 && counter_buffer_idx >= counterBufferCount)
6147 counter_buffer_idx = -1;
6148
6149 /* AMD GCN binds streamout buffers as shader resources.
6150 * VGT only counts primitives and tells the shader through
6151 * SGPRs what to do.
6152 */
6153 radeon_set_context_reg_seq(cs, R_028AD0_VGT_STRMOUT_BUFFER_SIZE_0 + 16*i, 2);
6154 radeon_emit(cs, sb[i].size >> 2); /* BUFFER_SIZE (in DW) */
6155 radeon_emit(cs, so->stride_in_dw[i]); /* VTX_STRIDE (in DW) */
6156
6157 cmd_buffer->state.context_roll_without_scissor_emitted = true;
6158
6159 if (counter_buffer_idx >= 0 && pCounterBuffers && pCounterBuffers[counter_buffer_idx]) {
6160 /* The array of counter buffers is optional. */
6161 RADV_FROM_HANDLE(radv_buffer, buffer, pCounterBuffers[counter_buffer_idx]);
6162 uint64_t va = radv_buffer_get_va(buffer->bo);
6163
6164 va += buffer->offset + pCounterBufferOffsets[counter_buffer_idx];
6165
6166 /* Append */
6167 radeon_emit(cs, PKT3(PKT3_STRMOUT_BUFFER_UPDATE, 4, 0));
6168 radeon_emit(cs, STRMOUT_SELECT_BUFFER(i) |
6169 STRMOUT_DATA_TYPE(1) | /* offset in bytes */
6170 STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_FROM_MEM)); /* control */
6171 radeon_emit(cs, 0); /* unused */
6172 radeon_emit(cs, 0); /* unused */
6173 radeon_emit(cs, va); /* src address lo */
6174 radeon_emit(cs, va >> 32); /* src address hi */
6175
6176 radv_cs_add_buffer(cmd_buffer->device->ws, cs, buffer->bo);
6177 } else {
6178 /* Start from the beginning. */
6179 radeon_emit(cs, PKT3(PKT3_STRMOUT_BUFFER_UPDATE, 4, 0));
6180 radeon_emit(cs, STRMOUT_SELECT_BUFFER(i) |
6181 STRMOUT_DATA_TYPE(1) | /* offset in bytes */
6182 STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_FROM_PACKET)); /* control */
6183 radeon_emit(cs, 0); /* unused */
6184 radeon_emit(cs, 0); /* unused */
6185 radeon_emit(cs, 0); /* unused */
6186 radeon_emit(cs, 0); /* unused */
6187 }
6188 }
6189
6190 radv_set_streamout_enable(cmd_buffer, true);
6191 }
6192
6193 static void
6194 gfx10_emit_streamout_begin(struct radv_cmd_buffer *cmd_buffer,
6195 uint32_t firstCounterBuffer,
6196 uint32_t counterBufferCount,
6197 const VkBuffer *pCounterBuffers,
6198 const VkDeviceSize *pCounterBufferOffsets)
6199 {
6200 struct radv_streamout_state *so = &cmd_buffer->state.streamout;
6201 unsigned last_target = util_last_bit(so->enabled_mask) - 1;
6202 struct radeon_cmdbuf *cs = cmd_buffer->cs;
6203 uint32_t i;
6204
6205 assert(cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10);
6206 assert(firstCounterBuffer + counterBufferCount <= MAX_SO_BUFFERS);
6207
6208 /* Sync because the next streamout operation will overwrite GDS and we
6209 * have to make sure it's idle.
6210 * TODO: Improve by tracking if there is a streamout operation in
6211 * flight.
6212 */
6213 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_VS_PARTIAL_FLUSH;
6214 si_emit_cache_flush(cmd_buffer);
6215
6216 for_each_bit(i, so->enabled_mask) {
6217 int32_t counter_buffer_idx = i - firstCounterBuffer;
6218 if (counter_buffer_idx >= 0 && counter_buffer_idx >= counterBufferCount)
6219 counter_buffer_idx = -1;
6220
6221 bool append = counter_buffer_idx >= 0 &&
6222 pCounterBuffers && pCounterBuffers[counter_buffer_idx];
6223 uint64_t va = 0;
6224
6225 if (append) {
6226 RADV_FROM_HANDLE(radv_buffer, buffer, pCounterBuffers[counter_buffer_idx]);
6227
6228 va += radv_buffer_get_va(buffer->bo);
6229 va += buffer->offset + pCounterBufferOffsets[counter_buffer_idx];
6230
6231 radv_cs_add_buffer(cmd_buffer->device->ws, cs, buffer->bo);
6232 }
6233
6234 radeon_emit(cs, PKT3(PKT3_DMA_DATA, 5, 0));
6235 radeon_emit(cs, S_411_SRC_SEL(append ? V_411_SRC_ADDR_TC_L2 : V_411_DATA) |
6236 S_411_DST_SEL(V_411_GDS) |
6237 S_411_CP_SYNC(i == last_target));
6238 radeon_emit(cs, va);
6239 radeon_emit(cs, va >> 32);
6240 radeon_emit(cs, 4 * i); /* destination in GDS */
6241 radeon_emit(cs, 0);
6242 radeon_emit(cs, S_414_BYTE_COUNT_GFX9(4) |
6243 S_414_DISABLE_WR_CONFIRM_GFX9(i != last_target));
6244 }
6245
6246 radv_set_streamout_enable(cmd_buffer, true);
6247 }
6248
6249 void radv_CmdBeginTransformFeedbackEXT(
6250 VkCommandBuffer commandBuffer,
6251 uint32_t firstCounterBuffer,
6252 uint32_t counterBufferCount,
6253 const VkBuffer* pCounterBuffers,
6254 const VkDeviceSize* pCounterBufferOffsets)
6255 {
6256 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
6257
6258 if (cmd_buffer->device->physical_device->use_ngg_streamout) {
6259 gfx10_emit_streamout_begin(cmd_buffer,
6260 firstCounterBuffer, counterBufferCount,
6261 pCounterBuffers, pCounterBufferOffsets);
6262 } else {
6263 radv_emit_streamout_begin(cmd_buffer,
6264 firstCounterBuffer, counterBufferCount,
6265 pCounterBuffers, pCounterBufferOffsets);
6266 }
6267 }
6268
6269 static void
6270 radv_emit_streamout_end(struct radv_cmd_buffer *cmd_buffer,
6271 uint32_t firstCounterBuffer,
6272 uint32_t counterBufferCount,
6273 const VkBuffer *pCounterBuffers,
6274 const VkDeviceSize *pCounterBufferOffsets)
6275 {
6276 struct radv_streamout_state *so = &cmd_buffer->state.streamout;
6277 struct radeon_cmdbuf *cs = cmd_buffer->cs;
6278 uint32_t i;
6279
6280 radv_flush_vgt_streamout(cmd_buffer);
6281
6282 assert(firstCounterBuffer + counterBufferCount <= MAX_SO_BUFFERS);
6283 for_each_bit(i, so->enabled_mask) {
6284 int32_t counter_buffer_idx = i - firstCounterBuffer;
6285 if (counter_buffer_idx >= 0 && counter_buffer_idx >= counterBufferCount)
6286 counter_buffer_idx = -1;
6287
6288 if (counter_buffer_idx >= 0 && pCounterBuffers && pCounterBuffers[counter_buffer_idx]) {
6289 /* The array of counters buffer is optional. */
6290 RADV_FROM_HANDLE(radv_buffer, buffer, pCounterBuffers[counter_buffer_idx]);
6291 uint64_t va = radv_buffer_get_va(buffer->bo);
6292
6293 va += buffer->offset + pCounterBufferOffsets[counter_buffer_idx];
6294
6295 radeon_emit(cs, PKT3(PKT3_STRMOUT_BUFFER_UPDATE, 4, 0));
6296 radeon_emit(cs, STRMOUT_SELECT_BUFFER(i) |
6297 STRMOUT_DATA_TYPE(1) | /* offset in bytes */
6298 STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_NONE) |
6299 STRMOUT_STORE_BUFFER_FILLED_SIZE); /* control */
6300 radeon_emit(cs, va); /* dst address lo */
6301 radeon_emit(cs, va >> 32); /* dst address hi */
6302 radeon_emit(cs, 0); /* unused */
6303 radeon_emit(cs, 0); /* unused */
6304
6305 radv_cs_add_buffer(cmd_buffer->device->ws, cs, buffer->bo);
6306 }
6307
6308 /* Deactivate transform feedback by zeroing the buffer size.
6309 * The counters (primitives generated, primitives emitted) may
6310 * be enabled even if there is not buffer bound. This ensures
6311 * that the primitives-emitted query won't increment.
6312 */
6313 radeon_set_context_reg(cs, R_028AD0_VGT_STRMOUT_BUFFER_SIZE_0 + 16*i, 0);
6314
6315 cmd_buffer->state.context_roll_without_scissor_emitted = true;
6316 }
6317
6318 radv_set_streamout_enable(cmd_buffer, false);
6319 }
6320
6321 static void
6322 gfx10_emit_streamout_end(struct radv_cmd_buffer *cmd_buffer,
6323 uint32_t firstCounterBuffer,
6324 uint32_t counterBufferCount,
6325 const VkBuffer *pCounterBuffers,
6326 const VkDeviceSize *pCounterBufferOffsets)
6327 {
6328 struct radv_streamout_state *so = &cmd_buffer->state.streamout;
6329 struct radeon_cmdbuf *cs = cmd_buffer->cs;
6330 uint32_t i;
6331
6332 assert(cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10);
6333 assert(firstCounterBuffer + counterBufferCount <= MAX_SO_BUFFERS);
6334
6335 for_each_bit(i, so->enabled_mask) {
6336 int32_t counter_buffer_idx = i - firstCounterBuffer;
6337 if (counter_buffer_idx >= 0 && counter_buffer_idx >= counterBufferCount)
6338 counter_buffer_idx = -1;
6339
6340 if (counter_buffer_idx >= 0 && pCounterBuffers && pCounterBuffers[counter_buffer_idx]) {
6341 /* The array of counters buffer is optional. */
6342 RADV_FROM_HANDLE(radv_buffer, buffer, pCounterBuffers[counter_buffer_idx]);
6343 uint64_t va = radv_buffer_get_va(buffer->bo);
6344
6345 va += buffer->offset + pCounterBufferOffsets[counter_buffer_idx];
6346
6347 si_cs_emit_write_event_eop(cs,
6348 cmd_buffer->device->physical_device->rad_info.chip_class,
6349 radv_cmd_buffer_uses_mec(cmd_buffer),
6350 V_028A90_PS_DONE, 0,
6351 EOP_DST_SEL_TC_L2,
6352 EOP_DATA_SEL_GDS,
6353 va, EOP_DATA_GDS(i, 1), 0);
6354
6355 radv_cs_add_buffer(cmd_buffer->device->ws, cs, buffer->bo);
6356 }
6357 }
6358
6359 radv_set_streamout_enable(cmd_buffer, false);
6360 }
6361
6362 void radv_CmdEndTransformFeedbackEXT(
6363 VkCommandBuffer commandBuffer,
6364 uint32_t firstCounterBuffer,
6365 uint32_t counterBufferCount,
6366 const VkBuffer* pCounterBuffers,
6367 const VkDeviceSize* pCounterBufferOffsets)
6368 {
6369 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
6370
6371 if (cmd_buffer->device->physical_device->use_ngg_streamout) {
6372 gfx10_emit_streamout_end(cmd_buffer,
6373 firstCounterBuffer, counterBufferCount,
6374 pCounterBuffers, pCounterBufferOffsets);
6375 } else {
6376 radv_emit_streamout_end(cmd_buffer,
6377 firstCounterBuffer, counterBufferCount,
6378 pCounterBuffers, pCounterBufferOffsets);
6379 }
6380 }
6381
6382 void radv_CmdDrawIndirectByteCountEXT(
6383 VkCommandBuffer commandBuffer,
6384 uint32_t instanceCount,
6385 uint32_t firstInstance,
6386 VkBuffer _counterBuffer,
6387 VkDeviceSize counterBufferOffset,
6388 uint32_t counterOffset,
6389 uint32_t vertexStride)
6390 {
6391 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
6392 RADV_FROM_HANDLE(radv_buffer, counterBuffer, _counterBuffer);
6393 struct radv_draw_info info = {};
6394
6395 info.instance_count = instanceCount;
6396 info.first_instance = firstInstance;
6397 info.strmout_buffer = counterBuffer;
6398 info.strmout_buffer_offset = counterBufferOffset;
6399 info.stride = vertexStride;
6400
6401 radv_draw(cmd_buffer, &info);
6402 }
6403
6404 /* VK_AMD_buffer_marker */
6405 void radv_CmdWriteBufferMarkerAMD(
6406 VkCommandBuffer commandBuffer,
6407 VkPipelineStageFlagBits pipelineStage,
6408 VkBuffer dstBuffer,
6409 VkDeviceSize dstOffset,
6410 uint32_t marker)
6411 {
6412 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
6413 RADV_FROM_HANDLE(radv_buffer, buffer, dstBuffer);
6414 struct radeon_cmdbuf *cs = cmd_buffer->cs;
6415 uint64_t va = radv_buffer_get_va(buffer->bo) + dstOffset;
6416
6417 si_emit_cache_flush(cmd_buffer);
6418
6419 ASSERTED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 12);
6420
6421 if (!(pipelineStage & ~VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT)) {
6422 radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
6423 radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_IMM) |
6424 COPY_DATA_DST_SEL(COPY_DATA_DST_MEM) |
6425 COPY_DATA_WR_CONFIRM);
6426 radeon_emit(cs, marker);
6427 radeon_emit(cs, 0);
6428 radeon_emit(cs, va);
6429 radeon_emit(cs, va >> 32);
6430 } else {
6431 si_cs_emit_write_event_eop(cs,
6432 cmd_buffer->device->physical_device->rad_info.chip_class,
6433 radv_cmd_buffer_uses_mec(cmd_buffer),
6434 V_028A90_BOTTOM_OF_PIPE_TS, 0,
6435 EOP_DST_SEL_MEM,
6436 EOP_DATA_SEL_VALUE_32BIT,
6437 va, marker,
6438 cmd_buffer->gfx9_eop_bug_va);
6439 }
6440
6441 assert(cmd_buffer->cs->cdw <= cdw_max);
6442 }