nir/lower_idiv: add new llvm-based path
[mesa.git] / src / amd / vulkan / radv_cmd_buffer.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 */
27
28 #include "radv_private.h"
29 #include "radv_radeon_winsys.h"
30 #include "radv_shader.h"
31 #include "radv_cs.h"
32 #include "sid.h"
33 #include "vk_format.h"
34 #include "vk_util.h"
35 #include "radv_debug.h"
36 #include "radv_meta.h"
37
38 #include "ac_debug.h"
39
40 enum {
41 RADV_PREFETCH_VBO_DESCRIPTORS = (1 << 0),
42 RADV_PREFETCH_VS = (1 << 1),
43 RADV_PREFETCH_TCS = (1 << 2),
44 RADV_PREFETCH_TES = (1 << 3),
45 RADV_PREFETCH_GS = (1 << 4),
46 RADV_PREFETCH_PS = (1 << 5),
47 RADV_PREFETCH_SHADERS = (RADV_PREFETCH_VS |
48 RADV_PREFETCH_TCS |
49 RADV_PREFETCH_TES |
50 RADV_PREFETCH_GS |
51 RADV_PREFETCH_PS)
52 };
53
54 static void radv_handle_image_transition(struct radv_cmd_buffer *cmd_buffer,
55 struct radv_image *image,
56 VkImageLayout src_layout,
57 bool src_render_loop,
58 VkImageLayout dst_layout,
59 bool dst_render_loop,
60 uint32_t src_family,
61 uint32_t dst_family,
62 const VkImageSubresourceRange *range,
63 struct radv_sample_locations_state *sample_locs);
64
65 const struct radv_dynamic_state default_dynamic_state = {
66 .viewport = {
67 .count = 0,
68 },
69 .scissor = {
70 .count = 0,
71 },
72 .line_width = 1.0f,
73 .depth_bias = {
74 .bias = 0.0f,
75 .clamp = 0.0f,
76 .slope = 0.0f,
77 },
78 .blend_constants = { 0.0f, 0.0f, 0.0f, 0.0f },
79 .depth_bounds = {
80 .min = 0.0f,
81 .max = 1.0f,
82 },
83 .stencil_compare_mask = {
84 .front = ~0u,
85 .back = ~0u,
86 },
87 .stencil_write_mask = {
88 .front = ~0u,
89 .back = ~0u,
90 },
91 .stencil_reference = {
92 .front = 0u,
93 .back = 0u,
94 },
95 };
96
97 static void
98 radv_bind_dynamic_state(struct radv_cmd_buffer *cmd_buffer,
99 const struct radv_dynamic_state *src)
100 {
101 struct radv_dynamic_state *dest = &cmd_buffer->state.dynamic;
102 uint32_t copy_mask = src->mask;
103 uint32_t dest_mask = 0;
104
105 /* Make sure to copy the number of viewports/scissors because they can
106 * only be specified at pipeline creation time.
107 */
108 dest->viewport.count = src->viewport.count;
109 dest->scissor.count = src->scissor.count;
110 dest->discard_rectangle.count = src->discard_rectangle.count;
111 dest->sample_location.count = src->sample_location.count;
112
113 if (copy_mask & RADV_DYNAMIC_VIEWPORT) {
114 if (memcmp(&dest->viewport.viewports, &src->viewport.viewports,
115 src->viewport.count * sizeof(VkViewport))) {
116 typed_memcpy(dest->viewport.viewports,
117 src->viewport.viewports,
118 src->viewport.count);
119 dest_mask |= RADV_DYNAMIC_VIEWPORT;
120 }
121 }
122
123 if (copy_mask & RADV_DYNAMIC_SCISSOR) {
124 if (memcmp(&dest->scissor.scissors, &src->scissor.scissors,
125 src->scissor.count * sizeof(VkRect2D))) {
126 typed_memcpy(dest->scissor.scissors,
127 src->scissor.scissors, src->scissor.count);
128 dest_mask |= RADV_DYNAMIC_SCISSOR;
129 }
130 }
131
132 if (copy_mask & RADV_DYNAMIC_LINE_WIDTH) {
133 if (dest->line_width != src->line_width) {
134 dest->line_width = src->line_width;
135 dest_mask |= RADV_DYNAMIC_LINE_WIDTH;
136 }
137 }
138
139 if (copy_mask & RADV_DYNAMIC_DEPTH_BIAS) {
140 if (memcmp(&dest->depth_bias, &src->depth_bias,
141 sizeof(src->depth_bias))) {
142 dest->depth_bias = src->depth_bias;
143 dest_mask |= RADV_DYNAMIC_DEPTH_BIAS;
144 }
145 }
146
147 if (copy_mask & RADV_DYNAMIC_BLEND_CONSTANTS) {
148 if (memcmp(&dest->blend_constants, &src->blend_constants,
149 sizeof(src->blend_constants))) {
150 typed_memcpy(dest->blend_constants,
151 src->blend_constants, 4);
152 dest_mask |= RADV_DYNAMIC_BLEND_CONSTANTS;
153 }
154 }
155
156 if (copy_mask & RADV_DYNAMIC_DEPTH_BOUNDS) {
157 if (memcmp(&dest->depth_bounds, &src->depth_bounds,
158 sizeof(src->depth_bounds))) {
159 dest->depth_bounds = src->depth_bounds;
160 dest_mask |= RADV_DYNAMIC_DEPTH_BOUNDS;
161 }
162 }
163
164 if (copy_mask & RADV_DYNAMIC_STENCIL_COMPARE_MASK) {
165 if (memcmp(&dest->stencil_compare_mask,
166 &src->stencil_compare_mask,
167 sizeof(src->stencil_compare_mask))) {
168 dest->stencil_compare_mask = src->stencil_compare_mask;
169 dest_mask |= RADV_DYNAMIC_STENCIL_COMPARE_MASK;
170 }
171 }
172
173 if (copy_mask & RADV_DYNAMIC_STENCIL_WRITE_MASK) {
174 if (memcmp(&dest->stencil_write_mask, &src->stencil_write_mask,
175 sizeof(src->stencil_write_mask))) {
176 dest->stencil_write_mask = src->stencil_write_mask;
177 dest_mask |= RADV_DYNAMIC_STENCIL_WRITE_MASK;
178 }
179 }
180
181 if (copy_mask & RADV_DYNAMIC_STENCIL_REFERENCE) {
182 if (memcmp(&dest->stencil_reference, &src->stencil_reference,
183 sizeof(src->stencil_reference))) {
184 dest->stencil_reference = src->stencil_reference;
185 dest_mask |= RADV_DYNAMIC_STENCIL_REFERENCE;
186 }
187 }
188
189 if (copy_mask & RADV_DYNAMIC_DISCARD_RECTANGLE) {
190 if (memcmp(&dest->discard_rectangle.rectangles, &src->discard_rectangle.rectangles,
191 src->discard_rectangle.count * sizeof(VkRect2D))) {
192 typed_memcpy(dest->discard_rectangle.rectangles,
193 src->discard_rectangle.rectangles,
194 src->discard_rectangle.count);
195 dest_mask |= RADV_DYNAMIC_DISCARD_RECTANGLE;
196 }
197 }
198
199 if (copy_mask & RADV_DYNAMIC_SAMPLE_LOCATIONS) {
200 if (dest->sample_location.per_pixel != src->sample_location.per_pixel ||
201 dest->sample_location.grid_size.width != src->sample_location.grid_size.width ||
202 dest->sample_location.grid_size.height != src->sample_location.grid_size.height ||
203 memcmp(&dest->sample_location.locations,
204 &src->sample_location.locations,
205 src->sample_location.count * sizeof(VkSampleLocationEXT))) {
206 dest->sample_location.per_pixel = src->sample_location.per_pixel;
207 dest->sample_location.grid_size = src->sample_location.grid_size;
208 typed_memcpy(dest->sample_location.locations,
209 src->sample_location.locations,
210 src->sample_location.count);
211 dest_mask |= RADV_DYNAMIC_SAMPLE_LOCATIONS;
212 }
213 }
214
215 cmd_buffer->state.dirty |= dest_mask;
216 }
217
218 static void
219 radv_bind_streamout_state(struct radv_cmd_buffer *cmd_buffer,
220 struct radv_pipeline *pipeline)
221 {
222 struct radv_streamout_state *so = &cmd_buffer->state.streamout;
223 struct radv_shader_info *info;
224
225 if (!pipeline->streamout_shader ||
226 cmd_buffer->device->physical_device->use_ngg_streamout)
227 return;
228
229 info = &pipeline->streamout_shader->info;
230 for (int i = 0; i < MAX_SO_BUFFERS; i++)
231 so->stride_in_dw[i] = info->so.strides[i];
232
233 so->enabled_stream_buffers_mask = info->so.enabled_stream_buffers_mask;
234 }
235
236 bool radv_cmd_buffer_uses_mec(struct radv_cmd_buffer *cmd_buffer)
237 {
238 return cmd_buffer->queue_family_index == RADV_QUEUE_COMPUTE &&
239 cmd_buffer->device->physical_device->rad_info.chip_class >= GFX7;
240 }
241
242 enum ring_type radv_queue_family_to_ring(int f) {
243 switch (f) {
244 case RADV_QUEUE_GENERAL:
245 return RING_GFX;
246 case RADV_QUEUE_COMPUTE:
247 return RING_COMPUTE;
248 case RADV_QUEUE_TRANSFER:
249 return RING_DMA;
250 default:
251 unreachable("Unknown queue family");
252 }
253 }
254
255 static VkResult radv_create_cmd_buffer(
256 struct radv_device * device,
257 struct radv_cmd_pool * pool,
258 VkCommandBufferLevel level,
259 VkCommandBuffer* pCommandBuffer)
260 {
261 struct radv_cmd_buffer *cmd_buffer;
262 unsigned ring;
263 cmd_buffer = vk_zalloc(&pool->alloc, sizeof(*cmd_buffer), 8,
264 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
265 if (cmd_buffer == NULL)
266 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
267
268 cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
269 cmd_buffer->device = device;
270 cmd_buffer->pool = pool;
271 cmd_buffer->level = level;
272
273 if (pool) {
274 list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
275 cmd_buffer->queue_family_index = pool->queue_family_index;
276
277 } else {
278 /* Init the pool_link so we can safely call list_del when we destroy
279 * the command buffer
280 */
281 list_inithead(&cmd_buffer->pool_link);
282 cmd_buffer->queue_family_index = RADV_QUEUE_GENERAL;
283 }
284
285 ring = radv_queue_family_to_ring(cmd_buffer->queue_family_index);
286
287 cmd_buffer->cs = device->ws->cs_create(device->ws, ring);
288 if (!cmd_buffer->cs) {
289 vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
290 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
291 }
292
293 *pCommandBuffer = radv_cmd_buffer_to_handle(cmd_buffer);
294
295 list_inithead(&cmd_buffer->upload.list);
296
297 return VK_SUCCESS;
298 }
299
300 static void
301 radv_cmd_buffer_destroy(struct radv_cmd_buffer *cmd_buffer)
302 {
303 list_del(&cmd_buffer->pool_link);
304
305 list_for_each_entry_safe(struct radv_cmd_buffer_upload, up,
306 &cmd_buffer->upload.list, list) {
307 cmd_buffer->device->ws->buffer_destroy(up->upload_bo);
308 list_del(&up->list);
309 free(up);
310 }
311
312 if (cmd_buffer->upload.upload_bo)
313 cmd_buffer->device->ws->buffer_destroy(cmd_buffer->upload.upload_bo);
314 cmd_buffer->device->ws->cs_destroy(cmd_buffer->cs);
315
316 for (unsigned i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; i++)
317 free(cmd_buffer->descriptors[i].push_set.set.mapped_ptr);
318
319 vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
320 }
321
322 static VkResult
323 radv_reset_cmd_buffer(struct radv_cmd_buffer *cmd_buffer)
324 {
325 cmd_buffer->device->ws->cs_reset(cmd_buffer->cs);
326
327 list_for_each_entry_safe(struct radv_cmd_buffer_upload, up,
328 &cmd_buffer->upload.list, list) {
329 cmd_buffer->device->ws->buffer_destroy(up->upload_bo);
330 list_del(&up->list);
331 free(up);
332 }
333
334 cmd_buffer->push_constant_stages = 0;
335 cmd_buffer->scratch_size_needed = 0;
336 cmd_buffer->compute_scratch_size_needed = 0;
337 cmd_buffer->esgs_ring_size_needed = 0;
338 cmd_buffer->gsvs_ring_size_needed = 0;
339 cmd_buffer->tess_rings_needed = false;
340 cmd_buffer->gds_needed = false;
341 cmd_buffer->sample_positions_needed = false;
342
343 if (cmd_buffer->upload.upload_bo)
344 radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs,
345 cmd_buffer->upload.upload_bo);
346 cmd_buffer->upload.offset = 0;
347
348 cmd_buffer->record_result = VK_SUCCESS;
349
350 memset(cmd_buffer->vertex_bindings, 0, sizeof(cmd_buffer->vertex_bindings));
351
352 for (unsigned i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; i++) {
353 cmd_buffer->descriptors[i].dirty = 0;
354 cmd_buffer->descriptors[i].valid = 0;
355 cmd_buffer->descriptors[i].push_dirty = false;
356 }
357
358 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9 &&
359 cmd_buffer->queue_family_index == RADV_QUEUE_GENERAL) {
360 unsigned num_db = cmd_buffer->device->physical_device->rad_info.num_render_backends;
361 unsigned fence_offset, eop_bug_offset;
362 void *fence_ptr;
363
364 radv_cmd_buffer_upload_alloc(cmd_buffer, 8, 8, &fence_offset,
365 &fence_ptr);
366
367 cmd_buffer->gfx9_fence_va =
368 radv_buffer_get_va(cmd_buffer->upload.upload_bo);
369 cmd_buffer->gfx9_fence_va += fence_offset;
370
371 if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX9) {
372 /* Allocate a buffer for the EOP bug on GFX9. */
373 radv_cmd_buffer_upload_alloc(cmd_buffer, 16 * num_db, 8,
374 &eop_bug_offset, &fence_ptr);
375 cmd_buffer->gfx9_eop_bug_va =
376 radv_buffer_get_va(cmd_buffer->upload.upload_bo);
377 cmd_buffer->gfx9_eop_bug_va += eop_bug_offset;
378 }
379 }
380
381 cmd_buffer->status = RADV_CMD_BUFFER_STATUS_INITIAL;
382
383 return cmd_buffer->record_result;
384 }
385
386 static bool
387 radv_cmd_buffer_resize_upload_buf(struct radv_cmd_buffer *cmd_buffer,
388 uint64_t min_needed)
389 {
390 uint64_t new_size;
391 struct radeon_winsys_bo *bo;
392 struct radv_cmd_buffer_upload *upload;
393 struct radv_device *device = cmd_buffer->device;
394
395 new_size = MAX2(min_needed, 16 * 1024);
396 new_size = MAX2(new_size, 2 * cmd_buffer->upload.size);
397
398 bo = device->ws->buffer_create(device->ws,
399 new_size, 4096,
400 RADEON_DOMAIN_GTT,
401 RADEON_FLAG_CPU_ACCESS|
402 RADEON_FLAG_NO_INTERPROCESS_SHARING |
403 RADEON_FLAG_32BIT,
404 RADV_BO_PRIORITY_UPLOAD_BUFFER);
405
406 if (!bo) {
407 cmd_buffer->record_result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
408 return false;
409 }
410
411 radv_cs_add_buffer(device->ws, cmd_buffer->cs, bo);
412 if (cmd_buffer->upload.upload_bo) {
413 upload = malloc(sizeof(*upload));
414
415 if (!upload) {
416 cmd_buffer->record_result = VK_ERROR_OUT_OF_HOST_MEMORY;
417 device->ws->buffer_destroy(bo);
418 return false;
419 }
420
421 memcpy(upload, &cmd_buffer->upload, sizeof(*upload));
422 list_add(&upload->list, &cmd_buffer->upload.list);
423 }
424
425 cmd_buffer->upload.upload_bo = bo;
426 cmd_buffer->upload.size = new_size;
427 cmd_buffer->upload.offset = 0;
428 cmd_buffer->upload.map = device->ws->buffer_map(cmd_buffer->upload.upload_bo);
429
430 if (!cmd_buffer->upload.map) {
431 cmd_buffer->record_result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
432 return false;
433 }
434
435 return true;
436 }
437
438 bool
439 radv_cmd_buffer_upload_alloc(struct radv_cmd_buffer *cmd_buffer,
440 unsigned size,
441 unsigned alignment,
442 unsigned *out_offset,
443 void **ptr)
444 {
445 assert(util_is_power_of_two_nonzero(alignment));
446
447 uint64_t offset = align(cmd_buffer->upload.offset, alignment);
448 if (offset + size > cmd_buffer->upload.size) {
449 if (!radv_cmd_buffer_resize_upload_buf(cmd_buffer, size))
450 return false;
451 offset = 0;
452 }
453
454 *out_offset = offset;
455 *ptr = cmd_buffer->upload.map + offset;
456
457 cmd_buffer->upload.offset = offset + size;
458 return true;
459 }
460
461 bool
462 radv_cmd_buffer_upload_data(struct radv_cmd_buffer *cmd_buffer,
463 unsigned size, unsigned alignment,
464 const void *data, unsigned *out_offset)
465 {
466 uint8_t *ptr;
467
468 if (!radv_cmd_buffer_upload_alloc(cmd_buffer, size, alignment,
469 out_offset, (void **)&ptr))
470 return false;
471
472 if (ptr)
473 memcpy(ptr, data, size);
474
475 return true;
476 }
477
478 static void
479 radv_emit_write_data_packet(struct radv_cmd_buffer *cmd_buffer, uint64_t va,
480 unsigned count, const uint32_t *data)
481 {
482 struct radeon_cmdbuf *cs = cmd_buffer->cs;
483
484 radeon_check_space(cmd_buffer->device->ws, cs, 4 + count);
485
486 radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 2 + count, 0));
487 radeon_emit(cs, S_370_DST_SEL(V_370_MEM) |
488 S_370_WR_CONFIRM(1) |
489 S_370_ENGINE_SEL(V_370_ME));
490 radeon_emit(cs, va);
491 radeon_emit(cs, va >> 32);
492 radeon_emit_array(cs, data, count);
493 }
494
495 void radv_cmd_buffer_trace_emit(struct radv_cmd_buffer *cmd_buffer)
496 {
497 struct radv_device *device = cmd_buffer->device;
498 struct radeon_cmdbuf *cs = cmd_buffer->cs;
499 uint64_t va;
500
501 va = radv_buffer_get_va(device->trace_bo);
502 if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY)
503 va += 4;
504
505 ++cmd_buffer->state.trace_id;
506 radv_emit_write_data_packet(cmd_buffer, va, 1,
507 &cmd_buffer->state.trace_id);
508
509 radeon_check_space(cmd_buffer->device->ws, cs, 2);
510
511 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
512 radeon_emit(cs, AC_ENCODE_TRACE_POINT(cmd_buffer->state.trace_id));
513 }
514
515 static void
516 radv_cmd_buffer_after_draw(struct radv_cmd_buffer *cmd_buffer,
517 enum radv_cmd_flush_bits flags)
518 {
519 if (cmd_buffer->device->instance->debug_flags & RADV_DEBUG_SYNC_SHADERS) {
520 assert(flags & (RADV_CMD_FLAG_PS_PARTIAL_FLUSH |
521 RADV_CMD_FLAG_CS_PARTIAL_FLUSH));
522
523 radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 4);
524
525 /* Force wait for graphics or compute engines to be idle. */
526 si_cs_emit_cache_flush(cmd_buffer->cs,
527 cmd_buffer->device->physical_device->rad_info.chip_class,
528 &cmd_buffer->gfx9_fence_idx,
529 cmd_buffer->gfx9_fence_va,
530 radv_cmd_buffer_uses_mec(cmd_buffer),
531 flags, cmd_buffer->gfx9_eop_bug_va);
532 }
533
534 if (unlikely(cmd_buffer->device->trace_bo))
535 radv_cmd_buffer_trace_emit(cmd_buffer);
536 }
537
538 static void
539 radv_save_pipeline(struct radv_cmd_buffer *cmd_buffer,
540 struct radv_pipeline *pipeline, enum ring_type ring)
541 {
542 struct radv_device *device = cmd_buffer->device;
543 uint32_t data[2];
544 uint64_t va;
545
546 va = radv_buffer_get_va(device->trace_bo);
547
548 switch (ring) {
549 case RING_GFX:
550 va += 8;
551 break;
552 case RING_COMPUTE:
553 va += 16;
554 break;
555 default:
556 assert(!"invalid ring type");
557 }
558
559 uint64_t pipeline_address = (uintptr_t)pipeline;
560 data[0] = pipeline_address;
561 data[1] = pipeline_address >> 32;
562
563 radv_emit_write_data_packet(cmd_buffer, va, 2, data);
564 }
565
566 void radv_set_descriptor_set(struct radv_cmd_buffer *cmd_buffer,
567 VkPipelineBindPoint bind_point,
568 struct radv_descriptor_set *set,
569 unsigned idx)
570 {
571 struct radv_descriptor_state *descriptors_state =
572 radv_get_descriptors_state(cmd_buffer, bind_point);
573
574 descriptors_state->sets[idx] = set;
575
576 descriptors_state->valid |= (1u << idx); /* active descriptors */
577 descriptors_state->dirty |= (1u << idx);
578 }
579
580 static void
581 radv_save_descriptors(struct radv_cmd_buffer *cmd_buffer,
582 VkPipelineBindPoint bind_point)
583 {
584 struct radv_descriptor_state *descriptors_state =
585 radv_get_descriptors_state(cmd_buffer, bind_point);
586 struct radv_device *device = cmd_buffer->device;
587 uint32_t data[MAX_SETS * 2] = {};
588 uint64_t va;
589 unsigned i;
590 va = radv_buffer_get_va(device->trace_bo) + 24;
591
592 for_each_bit(i, descriptors_state->valid) {
593 struct radv_descriptor_set *set = descriptors_state->sets[i];
594 data[i * 2] = (uint64_t)(uintptr_t)set;
595 data[i * 2 + 1] = (uint64_t)(uintptr_t)set >> 32;
596 }
597
598 radv_emit_write_data_packet(cmd_buffer, va, MAX_SETS * 2, data);
599 }
600
601 struct radv_userdata_info *
602 radv_lookup_user_sgpr(struct radv_pipeline *pipeline,
603 gl_shader_stage stage,
604 int idx)
605 {
606 struct radv_shader_variant *shader = radv_get_shader(pipeline, stage);
607 return &shader->info.user_sgprs_locs.shader_data[idx];
608 }
609
610 static void
611 radv_emit_userdata_address(struct radv_cmd_buffer *cmd_buffer,
612 struct radv_pipeline *pipeline,
613 gl_shader_stage stage,
614 int idx, uint64_t va)
615 {
616 struct radv_userdata_info *loc = radv_lookup_user_sgpr(pipeline, stage, idx);
617 uint32_t base_reg = pipeline->user_data_0[stage];
618 if (loc->sgpr_idx == -1)
619 return;
620
621 assert(loc->num_sgprs == 1);
622
623 radv_emit_shader_pointer(cmd_buffer->device, cmd_buffer->cs,
624 base_reg + loc->sgpr_idx * 4, va, false);
625 }
626
627 static void
628 radv_emit_descriptor_pointers(struct radv_cmd_buffer *cmd_buffer,
629 struct radv_pipeline *pipeline,
630 struct radv_descriptor_state *descriptors_state,
631 gl_shader_stage stage)
632 {
633 struct radv_device *device = cmd_buffer->device;
634 struct radeon_cmdbuf *cs = cmd_buffer->cs;
635 uint32_t sh_base = pipeline->user_data_0[stage];
636 struct radv_userdata_locations *locs =
637 &pipeline->shaders[stage]->info.user_sgprs_locs;
638 unsigned mask = locs->descriptor_sets_enabled;
639
640 mask &= descriptors_state->dirty & descriptors_state->valid;
641
642 while (mask) {
643 int start, count;
644
645 u_bit_scan_consecutive_range(&mask, &start, &count);
646
647 struct radv_userdata_info *loc = &locs->descriptor_sets[start];
648 unsigned sh_offset = sh_base + loc->sgpr_idx * 4;
649
650 radv_emit_shader_pointer_head(cs, sh_offset, count, true);
651 for (int i = 0; i < count; i++) {
652 struct radv_descriptor_set *set =
653 descriptors_state->sets[start + i];
654
655 radv_emit_shader_pointer_body(device, cs, set->va, true);
656 }
657 }
658 }
659
660 /**
661 * Convert the user sample locations to hardware sample locations (the values
662 * that will be emitted by PA_SC_AA_SAMPLE_LOCS_PIXEL_*).
663 */
664 static void
665 radv_convert_user_sample_locs(struct radv_sample_locations_state *state,
666 uint32_t x, uint32_t y, VkOffset2D *sample_locs)
667 {
668 uint32_t x_offset = x % state->grid_size.width;
669 uint32_t y_offset = y % state->grid_size.height;
670 uint32_t num_samples = (uint32_t)state->per_pixel;
671 VkSampleLocationEXT *user_locs;
672 uint32_t pixel_offset;
673
674 pixel_offset = (x_offset + y_offset * state->grid_size.width) * num_samples;
675
676 assert(pixel_offset <= MAX_SAMPLE_LOCATIONS);
677 user_locs = &state->locations[pixel_offset];
678
679 for (uint32_t i = 0; i < num_samples; i++) {
680 float shifted_pos_x = user_locs[i].x - 0.5;
681 float shifted_pos_y = user_locs[i].y - 0.5;
682
683 int32_t scaled_pos_x = floor(shifted_pos_x * 16);
684 int32_t scaled_pos_y = floor(shifted_pos_y * 16);
685
686 sample_locs[i].x = CLAMP(scaled_pos_x, -8, 7);
687 sample_locs[i].y = CLAMP(scaled_pos_y, -8, 7);
688 }
689 }
690
691 /**
692 * Compute the PA_SC_AA_SAMPLE_LOCS_PIXEL_* mask based on hardware sample
693 * locations.
694 */
695 static void
696 radv_compute_sample_locs_pixel(uint32_t num_samples, VkOffset2D *sample_locs,
697 uint32_t *sample_locs_pixel)
698 {
699 for (uint32_t i = 0; i < num_samples; i++) {
700 uint32_t sample_reg_idx = i / 4;
701 uint32_t sample_loc_idx = i % 4;
702 int32_t pos_x = sample_locs[i].x;
703 int32_t pos_y = sample_locs[i].y;
704
705 uint32_t shift_x = 8 * sample_loc_idx;
706 uint32_t shift_y = shift_x + 4;
707
708 sample_locs_pixel[sample_reg_idx] |= (pos_x & 0xf) << shift_x;
709 sample_locs_pixel[sample_reg_idx] |= (pos_y & 0xf) << shift_y;
710 }
711 }
712
713 /**
714 * Compute the PA_SC_CENTROID_PRIORITY_* mask based on the top left hardware
715 * sample locations.
716 */
717 static uint64_t
718 radv_compute_centroid_priority(struct radv_cmd_buffer *cmd_buffer,
719 VkOffset2D *sample_locs,
720 uint32_t num_samples)
721 {
722 uint32_t centroid_priorities[num_samples];
723 uint32_t sample_mask = num_samples - 1;
724 uint32_t distances[num_samples];
725 uint64_t centroid_priority = 0;
726
727 /* Compute the distances from center for each sample. */
728 for (int i = 0; i < num_samples; i++) {
729 distances[i] = (sample_locs[i].x * sample_locs[i].x) +
730 (sample_locs[i].y * sample_locs[i].y);
731 }
732
733 /* Compute the centroid priorities by looking at the distances array. */
734 for (int i = 0; i < num_samples; i++) {
735 uint32_t min_idx = 0;
736
737 for (int j = 1; j < num_samples; j++) {
738 if (distances[j] < distances[min_idx])
739 min_idx = j;
740 }
741
742 centroid_priorities[i] = min_idx;
743 distances[min_idx] = 0xffffffff;
744 }
745
746 /* Compute the final centroid priority. */
747 for (int i = 0; i < 8; i++) {
748 centroid_priority |=
749 centroid_priorities[i & sample_mask] << (i * 4);
750 }
751
752 return centroid_priority << 32 | centroid_priority;
753 }
754
755 /**
756 * Emit the sample locations that are specified with VK_EXT_sample_locations.
757 */
758 static void
759 radv_emit_sample_locations(struct radv_cmd_buffer *cmd_buffer)
760 {
761 struct radv_pipeline *pipeline = cmd_buffer->state.pipeline;
762 struct radv_multisample_state *ms = &pipeline->graphics.ms;
763 struct radv_sample_locations_state *sample_location =
764 &cmd_buffer->state.dynamic.sample_location;
765 uint32_t num_samples = (uint32_t)sample_location->per_pixel;
766 struct radeon_cmdbuf *cs = cmd_buffer->cs;
767 uint32_t sample_locs_pixel[4][2] = {};
768 VkOffset2D sample_locs[4][8]; /* 8 is the max. sample count supported */
769 uint32_t max_sample_dist = 0;
770 uint64_t centroid_priority;
771
772 if (!cmd_buffer->state.dynamic.sample_location.count)
773 return;
774
775 /* Convert the user sample locations to hardware sample locations. */
776 radv_convert_user_sample_locs(sample_location, 0, 0, sample_locs[0]);
777 radv_convert_user_sample_locs(sample_location, 1, 0, sample_locs[1]);
778 radv_convert_user_sample_locs(sample_location, 0, 1, sample_locs[2]);
779 radv_convert_user_sample_locs(sample_location, 1, 1, sample_locs[3]);
780
781 /* Compute the PA_SC_AA_SAMPLE_LOCS_PIXEL_* mask. */
782 for (uint32_t i = 0; i < 4; i++) {
783 radv_compute_sample_locs_pixel(num_samples, sample_locs[i],
784 sample_locs_pixel[i]);
785 }
786
787 /* Compute the PA_SC_CENTROID_PRIORITY_* mask. */
788 centroid_priority =
789 radv_compute_centroid_priority(cmd_buffer, sample_locs[0],
790 num_samples);
791
792 /* Compute the maximum sample distance from the specified locations. */
793 for (uint32_t i = 0; i < num_samples; i++) {
794 VkOffset2D offset = sample_locs[0][i];
795 max_sample_dist = MAX2(max_sample_dist,
796 MAX2(abs(offset.x), abs(offset.y)));
797 }
798
799 /* Emit the specified user sample locations. */
800 switch (num_samples) {
801 case 2:
802 case 4:
803 radeon_set_context_reg(cs, R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0, sample_locs_pixel[0][0]);
804 radeon_set_context_reg(cs, R_028C08_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0, sample_locs_pixel[1][0]);
805 radeon_set_context_reg(cs, R_028C18_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0, sample_locs_pixel[2][0]);
806 radeon_set_context_reg(cs, R_028C28_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0, sample_locs_pixel[3][0]);
807 break;
808 case 8:
809 radeon_set_context_reg(cs, R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0, sample_locs_pixel[0][0]);
810 radeon_set_context_reg(cs, R_028C08_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0, sample_locs_pixel[1][0]);
811 radeon_set_context_reg(cs, R_028C18_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0, sample_locs_pixel[2][0]);
812 radeon_set_context_reg(cs, R_028C28_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0, sample_locs_pixel[3][0]);
813 radeon_set_context_reg(cs, R_028BFC_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1, sample_locs_pixel[0][1]);
814 radeon_set_context_reg(cs, R_028C0C_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1, sample_locs_pixel[1][1]);
815 radeon_set_context_reg(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1, sample_locs_pixel[2][1]);
816 radeon_set_context_reg(cs, R_028C2C_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1, sample_locs_pixel[3][1]);
817 break;
818 default:
819 unreachable("invalid number of samples");
820 }
821
822 /* Emit the maximum sample distance and the centroid priority. */
823 uint32_t pa_sc_aa_config = ms->pa_sc_aa_config;
824
825 pa_sc_aa_config &= C_028BE0_MAX_SAMPLE_DIST;
826 pa_sc_aa_config |= S_028BE0_MAX_SAMPLE_DIST(max_sample_dist);
827
828 radeon_set_context_reg_seq(cs, R_028BE0_PA_SC_AA_CONFIG, 1);
829 radeon_emit(cs, pa_sc_aa_config);
830
831 radeon_set_context_reg_seq(cs, R_028BD4_PA_SC_CENTROID_PRIORITY_0, 2);
832 radeon_emit(cs, centroid_priority);
833 radeon_emit(cs, centroid_priority >> 32);
834
835 /* GFX9: Flush DFSM when the AA mode changes. */
836 if (cmd_buffer->device->dfsm_allowed) {
837 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
838 radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_DFSM) | EVENT_INDEX(0));
839 }
840
841 cmd_buffer->state.context_roll_without_scissor_emitted = true;
842 }
843
844 static void
845 radv_emit_inline_push_consts(struct radv_cmd_buffer *cmd_buffer,
846 struct radv_pipeline *pipeline,
847 gl_shader_stage stage,
848 int idx, int count, uint32_t *values)
849 {
850 struct radv_userdata_info *loc = radv_lookup_user_sgpr(pipeline, stage, idx);
851 uint32_t base_reg = pipeline->user_data_0[stage];
852 if (loc->sgpr_idx == -1)
853 return;
854
855 assert(loc->num_sgprs == count);
856
857 radeon_set_sh_reg_seq(cmd_buffer->cs, base_reg + loc->sgpr_idx * 4, count);
858 radeon_emit_array(cmd_buffer->cs, values, count);
859 }
860
861 static void
862 radv_update_multisample_state(struct radv_cmd_buffer *cmd_buffer,
863 struct radv_pipeline *pipeline)
864 {
865 int num_samples = pipeline->graphics.ms.num_samples;
866 struct radv_multisample_state *ms = &pipeline->graphics.ms;
867 struct radv_pipeline *old_pipeline = cmd_buffer->state.emitted_pipeline;
868
869 if (pipeline->shaders[MESA_SHADER_FRAGMENT]->info.ps.needs_sample_positions)
870 cmd_buffer->sample_positions_needed = true;
871
872 if (old_pipeline && num_samples == old_pipeline->graphics.ms.num_samples)
873 return;
874
875 radeon_set_context_reg_seq(cmd_buffer->cs, R_028BDC_PA_SC_LINE_CNTL, 2);
876 radeon_emit(cmd_buffer->cs, ms->pa_sc_line_cntl);
877 radeon_emit(cmd_buffer->cs, ms->pa_sc_aa_config);
878
879 radeon_set_context_reg(cmd_buffer->cs, R_028A48_PA_SC_MODE_CNTL_0, ms->pa_sc_mode_cntl_0);
880
881 radv_emit_default_sample_locations(cmd_buffer->cs, num_samples);
882
883 /* GFX9: Flush DFSM when the AA mode changes. */
884 if (cmd_buffer->device->dfsm_allowed) {
885 radeon_emit(cmd_buffer->cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
886 radeon_emit(cmd_buffer->cs, EVENT_TYPE(V_028A90_FLUSH_DFSM) | EVENT_INDEX(0));
887 }
888
889 cmd_buffer->state.context_roll_without_scissor_emitted = true;
890 }
891
892 static void
893 radv_update_binning_state(struct radv_cmd_buffer *cmd_buffer,
894 struct radv_pipeline *pipeline)
895 {
896 const struct radv_pipeline *old_pipeline = cmd_buffer->state.emitted_pipeline;
897
898
899 if (pipeline->device->physical_device->rad_info.chip_class < GFX9)
900 return;
901
902 if (old_pipeline &&
903 old_pipeline->graphics.binning.pa_sc_binner_cntl_0 == pipeline->graphics.binning.pa_sc_binner_cntl_0 &&
904 old_pipeline->graphics.binning.db_dfsm_control == pipeline->graphics.binning.db_dfsm_control)
905 return;
906
907 bool binning_flush = false;
908 if (cmd_buffer->device->physical_device->rad_info.family == CHIP_VEGA12 ||
909 cmd_buffer->device->physical_device->rad_info.family == CHIP_VEGA20 ||
910 cmd_buffer->device->physical_device->rad_info.family == CHIP_RAVEN2 ||
911 cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10) {
912 binning_flush = !old_pipeline ||
913 G_028C44_BINNING_MODE(old_pipeline->graphics.binning.pa_sc_binner_cntl_0) !=
914 G_028C44_BINNING_MODE(pipeline->graphics.binning.pa_sc_binner_cntl_0);
915 }
916
917 radeon_set_context_reg(cmd_buffer->cs, R_028C44_PA_SC_BINNER_CNTL_0,
918 pipeline->graphics.binning.pa_sc_binner_cntl_0 |
919 S_028C44_FLUSH_ON_BINNING_TRANSITION(!!binning_flush));
920
921 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10) {
922 radeon_set_context_reg(cmd_buffer->cs, R_028038_DB_DFSM_CONTROL,
923 pipeline->graphics.binning.db_dfsm_control);
924 } else {
925 radeon_set_context_reg(cmd_buffer->cs, R_028060_DB_DFSM_CONTROL,
926 pipeline->graphics.binning.db_dfsm_control);
927 }
928
929 cmd_buffer->state.context_roll_without_scissor_emitted = true;
930 }
931
932
933 static void
934 radv_emit_shader_prefetch(struct radv_cmd_buffer *cmd_buffer,
935 struct radv_shader_variant *shader)
936 {
937 uint64_t va;
938
939 if (!shader)
940 return;
941
942 va = radv_buffer_get_va(shader->bo) + shader->bo_offset;
943
944 si_cp_dma_prefetch(cmd_buffer, va, shader->code_size);
945 }
946
947 static void
948 radv_emit_prefetch_L2(struct radv_cmd_buffer *cmd_buffer,
949 struct radv_pipeline *pipeline,
950 bool vertex_stage_only)
951 {
952 struct radv_cmd_state *state = &cmd_buffer->state;
953 uint32_t mask = state->prefetch_L2_mask;
954
955 if (vertex_stage_only) {
956 /* Fast prefetch path for starting draws as soon as possible.
957 */
958 mask = state->prefetch_L2_mask & (RADV_PREFETCH_VS |
959 RADV_PREFETCH_VBO_DESCRIPTORS);
960 }
961
962 if (mask & RADV_PREFETCH_VS)
963 radv_emit_shader_prefetch(cmd_buffer,
964 pipeline->shaders[MESA_SHADER_VERTEX]);
965
966 if (mask & RADV_PREFETCH_VBO_DESCRIPTORS)
967 si_cp_dma_prefetch(cmd_buffer, state->vb_va, state->vb_size);
968
969 if (mask & RADV_PREFETCH_TCS)
970 radv_emit_shader_prefetch(cmd_buffer,
971 pipeline->shaders[MESA_SHADER_TESS_CTRL]);
972
973 if (mask & RADV_PREFETCH_TES)
974 radv_emit_shader_prefetch(cmd_buffer,
975 pipeline->shaders[MESA_SHADER_TESS_EVAL]);
976
977 if (mask & RADV_PREFETCH_GS) {
978 radv_emit_shader_prefetch(cmd_buffer,
979 pipeline->shaders[MESA_SHADER_GEOMETRY]);
980 if (radv_pipeline_has_gs_copy_shader(pipeline))
981 radv_emit_shader_prefetch(cmd_buffer, pipeline->gs_copy_shader);
982 }
983
984 if (mask & RADV_PREFETCH_PS)
985 radv_emit_shader_prefetch(cmd_buffer,
986 pipeline->shaders[MESA_SHADER_FRAGMENT]);
987
988 state->prefetch_L2_mask &= ~mask;
989 }
990
991 static void
992 radv_emit_rbplus_state(struct radv_cmd_buffer *cmd_buffer)
993 {
994 if (!cmd_buffer->device->physical_device->rad_info.rbplus_allowed)
995 return;
996
997 struct radv_pipeline *pipeline = cmd_buffer->state.pipeline;
998 const struct radv_subpass *subpass = cmd_buffer->state.subpass;
999
1000 unsigned sx_ps_downconvert = 0;
1001 unsigned sx_blend_opt_epsilon = 0;
1002 unsigned sx_blend_opt_control = 0;
1003
1004 for (unsigned i = 0; i < subpass->color_count; ++i) {
1005 if (subpass->color_attachments[i].attachment == VK_ATTACHMENT_UNUSED) {
1006 sx_blend_opt_control |= S_02875C_MRT0_COLOR_OPT_DISABLE(1) << (i * 4);
1007 sx_blend_opt_control |= S_02875C_MRT0_ALPHA_OPT_DISABLE(1) << (i * 4);
1008 continue;
1009 }
1010
1011 int idx = subpass->color_attachments[i].attachment;
1012 struct radv_color_buffer_info *cb = &cmd_buffer->state.attachments[idx].cb;
1013
1014 unsigned format = G_028C70_FORMAT(cb->cb_color_info);
1015 unsigned swap = G_028C70_COMP_SWAP(cb->cb_color_info);
1016 uint32_t spi_format = (pipeline->graphics.col_format >> (i * 4)) & 0xf;
1017 uint32_t colormask = (pipeline->graphics.cb_target_mask >> (i * 4)) & 0xf;
1018
1019 bool has_alpha, has_rgb;
1020
1021 /* Set if RGB and A are present. */
1022 has_alpha = !G_028C74_FORCE_DST_ALPHA_1(cb->cb_color_attrib);
1023
1024 if (format == V_028C70_COLOR_8 ||
1025 format == V_028C70_COLOR_16 ||
1026 format == V_028C70_COLOR_32)
1027 has_rgb = !has_alpha;
1028 else
1029 has_rgb = true;
1030
1031 /* Check the colormask and export format. */
1032 if (!(colormask & 0x7))
1033 has_rgb = false;
1034 if (!(colormask & 0x8))
1035 has_alpha = false;
1036
1037 if (spi_format == V_028714_SPI_SHADER_ZERO) {
1038 has_rgb = false;
1039 has_alpha = false;
1040 }
1041
1042 /* Disable value checking for disabled channels. */
1043 if (!has_rgb)
1044 sx_blend_opt_control |= S_02875C_MRT0_COLOR_OPT_DISABLE(1) << (i * 4);
1045 if (!has_alpha)
1046 sx_blend_opt_control |= S_02875C_MRT0_ALPHA_OPT_DISABLE(1) << (i * 4);
1047
1048 /* Enable down-conversion for 32bpp and smaller formats. */
1049 switch (format) {
1050 case V_028C70_COLOR_8:
1051 case V_028C70_COLOR_8_8:
1052 case V_028C70_COLOR_8_8_8_8:
1053 /* For 1 and 2-channel formats, use the superset thereof. */
1054 if (spi_format == V_028714_SPI_SHADER_FP16_ABGR ||
1055 spi_format == V_028714_SPI_SHADER_UINT16_ABGR ||
1056 spi_format == V_028714_SPI_SHADER_SINT16_ABGR) {
1057 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_8_8_8_8 << (i * 4);
1058 sx_blend_opt_epsilon |= V_028758_8BIT_FORMAT << (i * 4);
1059 }
1060 break;
1061
1062 case V_028C70_COLOR_5_6_5:
1063 if (spi_format == V_028714_SPI_SHADER_FP16_ABGR) {
1064 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_5_6_5 << (i * 4);
1065 sx_blend_opt_epsilon |= V_028758_6BIT_FORMAT << (i * 4);
1066 }
1067 break;
1068
1069 case V_028C70_COLOR_1_5_5_5:
1070 if (spi_format == V_028714_SPI_SHADER_FP16_ABGR) {
1071 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_1_5_5_5 << (i * 4);
1072 sx_blend_opt_epsilon |= V_028758_5BIT_FORMAT << (i * 4);
1073 }
1074 break;
1075
1076 case V_028C70_COLOR_4_4_4_4:
1077 if (spi_format == V_028714_SPI_SHADER_FP16_ABGR) {
1078 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_4_4_4_4 << (i * 4);
1079 sx_blend_opt_epsilon |= V_028758_4BIT_FORMAT << (i * 4);
1080 }
1081 break;
1082
1083 case V_028C70_COLOR_32:
1084 if (swap == V_028C70_SWAP_STD &&
1085 spi_format == V_028714_SPI_SHADER_32_R)
1086 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_32_R << (i * 4);
1087 else if (swap == V_028C70_SWAP_ALT_REV &&
1088 spi_format == V_028714_SPI_SHADER_32_AR)
1089 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_32_A << (i * 4);
1090 break;
1091
1092 case V_028C70_COLOR_16:
1093 case V_028C70_COLOR_16_16:
1094 /* For 1-channel formats, use the superset thereof. */
1095 if (spi_format == V_028714_SPI_SHADER_UNORM16_ABGR ||
1096 spi_format == V_028714_SPI_SHADER_SNORM16_ABGR ||
1097 spi_format == V_028714_SPI_SHADER_UINT16_ABGR ||
1098 spi_format == V_028714_SPI_SHADER_SINT16_ABGR) {
1099 if (swap == V_028C70_SWAP_STD ||
1100 swap == V_028C70_SWAP_STD_REV)
1101 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_16_16_GR << (i * 4);
1102 else
1103 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_16_16_AR << (i * 4);
1104 }
1105 break;
1106
1107 case V_028C70_COLOR_10_11_11:
1108 if (spi_format == V_028714_SPI_SHADER_FP16_ABGR) {
1109 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_10_11_11 << (i * 4);
1110 sx_blend_opt_epsilon |= V_028758_11BIT_FORMAT << (i * 4);
1111 }
1112 break;
1113
1114 case V_028C70_COLOR_2_10_10_10:
1115 if (spi_format == V_028714_SPI_SHADER_FP16_ABGR) {
1116 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_2_10_10_10 << (i * 4);
1117 sx_blend_opt_epsilon |= V_028758_10BIT_FORMAT << (i * 4);
1118 }
1119 break;
1120 }
1121 }
1122
1123 for (unsigned i = subpass->color_count; i < 8; ++i) {
1124 sx_blend_opt_control |= S_02875C_MRT0_COLOR_OPT_DISABLE(1) << (i * 4);
1125 sx_blend_opt_control |= S_02875C_MRT0_ALPHA_OPT_DISABLE(1) << (i * 4);
1126 }
1127 /* TODO: avoid redundantly setting context registers */
1128 radeon_set_context_reg_seq(cmd_buffer->cs, R_028754_SX_PS_DOWNCONVERT, 3);
1129 radeon_emit(cmd_buffer->cs, sx_ps_downconvert);
1130 radeon_emit(cmd_buffer->cs, sx_blend_opt_epsilon);
1131 radeon_emit(cmd_buffer->cs, sx_blend_opt_control);
1132
1133 cmd_buffer->state.context_roll_without_scissor_emitted = true;
1134 }
1135
1136 static void
1137 radv_emit_graphics_pipeline(struct radv_cmd_buffer *cmd_buffer)
1138 {
1139 struct radv_pipeline *pipeline = cmd_buffer->state.pipeline;
1140
1141 if (!pipeline || cmd_buffer->state.emitted_pipeline == pipeline)
1142 return;
1143
1144 radv_update_multisample_state(cmd_buffer, pipeline);
1145 radv_update_binning_state(cmd_buffer, pipeline);
1146
1147 cmd_buffer->scratch_size_needed =
1148 MAX2(cmd_buffer->scratch_size_needed,
1149 pipeline->max_waves * pipeline->scratch_bytes_per_wave);
1150
1151 if (!cmd_buffer->state.emitted_pipeline ||
1152 cmd_buffer->state.emitted_pipeline->graphics.can_use_guardband !=
1153 pipeline->graphics.can_use_guardband)
1154 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_SCISSOR;
1155
1156 radeon_emit_array(cmd_buffer->cs, pipeline->cs.buf, pipeline->cs.cdw);
1157
1158 if (!cmd_buffer->state.emitted_pipeline ||
1159 cmd_buffer->state.emitted_pipeline->ctx_cs.cdw != pipeline->ctx_cs.cdw ||
1160 cmd_buffer->state.emitted_pipeline->ctx_cs_hash != pipeline->ctx_cs_hash ||
1161 memcmp(cmd_buffer->state.emitted_pipeline->ctx_cs.buf,
1162 pipeline->ctx_cs.buf, pipeline->ctx_cs.cdw * 4)) {
1163 radeon_emit_array(cmd_buffer->cs, pipeline->ctx_cs.buf, pipeline->ctx_cs.cdw);
1164 cmd_buffer->state.context_roll_without_scissor_emitted = true;
1165 }
1166
1167 for (unsigned i = 0; i < MESA_SHADER_COMPUTE; i++) {
1168 if (!pipeline->shaders[i])
1169 continue;
1170
1171 radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs,
1172 pipeline->shaders[i]->bo);
1173 }
1174
1175 if (radv_pipeline_has_gs_copy_shader(pipeline))
1176 radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs,
1177 pipeline->gs_copy_shader->bo);
1178
1179 if (unlikely(cmd_buffer->device->trace_bo))
1180 radv_save_pipeline(cmd_buffer, pipeline, RING_GFX);
1181
1182 cmd_buffer->state.emitted_pipeline = pipeline;
1183
1184 cmd_buffer->state.dirty &= ~RADV_CMD_DIRTY_PIPELINE;
1185 }
1186
1187 static void
1188 radv_emit_viewport(struct radv_cmd_buffer *cmd_buffer)
1189 {
1190 si_write_viewport(cmd_buffer->cs, 0, cmd_buffer->state.dynamic.viewport.count,
1191 cmd_buffer->state.dynamic.viewport.viewports);
1192 }
1193
1194 static void
1195 radv_emit_scissor(struct radv_cmd_buffer *cmd_buffer)
1196 {
1197 uint32_t count = cmd_buffer->state.dynamic.scissor.count;
1198
1199 si_write_scissors(cmd_buffer->cs, 0, count,
1200 cmd_buffer->state.dynamic.scissor.scissors,
1201 cmd_buffer->state.dynamic.viewport.viewports,
1202 cmd_buffer->state.emitted_pipeline->graphics.can_use_guardband);
1203
1204 cmd_buffer->state.context_roll_without_scissor_emitted = false;
1205 }
1206
1207 static void
1208 radv_emit_discard_rectangle(struct radv_cmd_buffer *cmd_buffer)
1209 {
1210 if (!cmd_buffer->state.dynamic.discard_rectangle.count)
1211 return;
1212
1213 radeon_set_context_reg_seq(cmd_buffer->cs, R_028210_PA_SC_CLIPRECT_0_TL,
1214 cmd_buffer->state.dynamic.discard_rectangle.count * 2);
1215 for (unsigned i = 0; i < cmd_buffer->state.dynamic.discard_rectangle.count; ++i) {
1216 VkRect2D rect = cmd_buffer->state.dynamic.discard_rectangle.rectangles[i];
1217 radeon_emit(cmd_buffer->cs, S_028210_TL_X(rect.offset.x) | S_028210_TL_Y(rect.offset.y));
1218 radeon_emit(cmd_buffer->cs, S_028214_BR_X(rect.offset.x + rect.extent.width) |
1219 S_028214_BR_Y(rect.offset.y + rect.extent.height));
1220 }
1221 }
1222
1223 static void
1224 radv_emit_line_width(struct radv_cmd_buffer *cmd_buffer)
1225 {
1226 unsigned width = cmd_buffer->state.dynamic.line_width * 8;
1227
1228 radeon_set_context_reg(cmd_buffer->cs, R_028A08_PA_SU_LINE_CNTL,
1229 S_028A08_WIDTH(CLAMP(width, 0, 0xFFF)));
1230 }
1231
1232 static void
1233 radv_emit_blend_constants(struct radv_cmd_buffer *cmd_buffer)
1234 {
1235 struct radv_dynamic_state *d = &cmd_buffer->state.dynamic;
1236
1237 radeon_set_context_reg_seq(cmd_buffer->cs, R_028414_CB_BLEND_RED, 4);
1238 radeon_emit_array(cmd_buffer->cs, (uint32_t *)d->blend_constants, 4);
1239 }
1240
1241 static void
1242 radv_emit_stencil(struct radv_cmd_buffer *cmd_buffer)
1243 {
1244 struct radv_dynamic_state *d = &cmd_buffer->state.dynamic;
1245
1246 radeon_set_context_reg_seq(cmd_buffer->cs,
1247 R_028430_DB_STENCILREFMASK, 2);
1248 radeon_emit(cmd_buffer->cs,
1249 S_028430_STENCILTESTVAL(d->stencil_reference.front) |
1250 S_028430_STENCILMASK(d->stencil_compare_mask.front) |
1251 S_028430_STENCILWRITEMASK(d->stencil_write_mask.front) |
1252 S_028430_STENCILOPVAL(1));
1253 radeon_emit(cmd_buffer->cs,
1254 S_028434_STENCILTESTVAL_BF(d->stencil_reference.back) |
1255 S_028434_STENCILMASK_BF(d->stencil_compare_mask.back) |
1256 S_028434_STENCILWRITEMASK_BF(d->stencil_write_mask.back) |
1257 S_028434_STENCILOPVAL_BF(1));
1258 }
1259
1260 static void
1261 radv_emit_depth_bounds(struct radv_cmd_buffer *cmd_buffer)
1262 {
1263 struct radv_dynamic_state *d = &cmd_buffer->state.dynamic;
1264
1265 radeon_set_context_reg(cmd_buffer->cs, R_028020_DB_DEPTH_BOUNDS_MIN,
1266 fui(d->depth_bounds.min));
1267 radeon_set_context_reg(cmd_buffer->cs, R_028024_DB_DEPTH_BOUNDS_MAX,
1268 fui(d->depth_bounds.max));
1269 }
1270
1271 static void
1272 radv_emit_depth_bias(struct radv_cmd_buffer *cmd_buffer)
1273 {
1274 struct radv_dynamic_state *d = &cmd_buffer->state.dynamic;
1275 unsigned slope = fui(d->depth_bias.slope * 16.0f);
1276 unsigned bias = fui(d->depth_bias.bias * cmd_buffer->state.offset_scale);
1277
1278
1279 radeon_set_context_reg_seq(cmd_buffer->cs,
1280 R_028B7C_PA_SU_POLY_OFFSET_CLAMP, 5);
1281 radeon_emit(cmd_buffer->cs, fui(d->depth_bias.clamp)); /* CLAMP */
1282 radeon_emit(cmd_buffer->cs, slope); /* FRONT SCALE */
1283 radeon_emit(cmd_buffer->cs, bias); /* FRONT OFFSET */
1284 radeon_emit(cmd_buffer->cs, slope); /* BACK SCALE */
1285 radeon_emit(cmd_buffer->cs, bias); /* BACK OFFSET */
1286 }
1287
1288 static void
1289 radv_emit_fb_color_state(struct radv_cmd_buffer *cmd_buffer,
1290 int index,
1291 struct radv_color_buffer_info *cb,
1292 struct radv_image_view *iview,
1293 VkImageLayout layout,
1294 bool in_render_loop)
1295 {
1296 bool is_vi = cmd_buffer->device->physical_device->rad_info.chip_class >= GFX8;
1297 uint32_t cb_color_info = cb->cb_color_info;
1298 struct radv_image *image = iview->image;
1299
1300 if (!radv_layout_dcc_compressed(cmd_buffer->device, image, layout, in_render_loop,
1301 radv_image_queue_family_mask(image,
1302 cmd_buffer->queue_family_index,
1303 cmd_buffer->queue_family_index))) {
1304 cb_color_info &= C_028C70_DCC_ENABLE;
1305 }
1306
1307 if (radv_image_is_tc_compat_cmask(image) &&
1308 (radv_is_fmask_decompress_pipeline(cmd_buffer) ||
1309 radv_is_dcc_decompress_pipeline(cmd_buffer))) {
1310 /* If this bit is set, the FMASK decompression operation
1311 * doesn't occur (DCC_COMPRESS also implies FMASK_DECOMPRESS).
1312 */
1313 cb_color_info &= C_028C70_FMASK_COMPRESS_1FRAG_ONLY;
1314 }
1315
1316 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10) {
1317 radeon_set_context_reg_seq(cmd_buffer->cs, R_028C60_CB_COLOR0_BASE + index * 0x3c, 11);
1318 radeon_emit(cmd_buffer->cs, cb->cb_color_base);
1319 radeon_emit(cmd_buffer->cs, 0);
1320 radeon_emit(cmd_buffer->cs, 0);
1321 radeon_emit(cmd_buffer->cs, cb->cb_color_view);
1322 radeon_emit(cmd_buffer->cs, cb_color_info);
1323 radeon_emit(cmd_buffer->cs, cb->cb_color_attrib);
1324 radeon_emit(cmd_buffer->cs, cb->cb_dcc_control);
1325 radeon_emit(cmd_buffer->cs, cb->cb_color_cmask);
1326 radeon_emit(cmd_buffer->cs, 0);
1327 radeon_emit(cmd_buffer->cs, cb->cb_color_fmask);
1328 radeon_emit(cmd_buffer->cs, 0);
1329
1330 radeon_set_context_reg_seq(cmd_buffer->cs, R_028C94_CB_COLOR0_DCC_BASE + index * 0x3c, 1);
1331 radeon_emit(cmd_buffer->cs, cb->cb_dcc_base);
1332
1333 radeon_set_context_reg(cmd_buffer->cs, R_028E40_CB_COLOR0_BASE_EXT + index * 4,
1334 cb->cb_color_base >> 32);
1335 radeon_set_context_reg(cmd_buffer->cs, R_028E60_CB_COLOR0_CMASK_BASE_EXT + index * 4,
1336 cb->cb_color_cmask >> 32);
1337 radeon_set_context_reg(cmd_buffer->cs, R_028E80_CB_COLOR0_FMASK_BASE_EXT + index * 4,
1338 cb->cb_color_fmask >> 32);
1339 radeon_set_context_reg(cmd_buffer->cs, R_028EA0_CB_COLOR0_DCC_BASE_EXT + index * 4,
1340 cb->cb_dcc_base >> 32);
1341 radeon_set_context_reg(cmd_buffer->cs, R_028EC0_CB_COLOR0_ATTRIB2 + index * 4,
1342 cb->cb_color_attrib2);
1343 radeon_set_context_reg(cmd_buffer->cs, R_028EE0_CB_COLOR0_ATTRIB3 + index * 4,
1344 cb->cb_color_attrib3);
1345 } else if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX9) {
1346 radeon_set_context_reg_seq(cmd_buffer->cs, R_028C60_CB_COLOR0_BASE + index * 0x3c, 11);
1347 radeon_emit(cmd_buffer->cs, cb->cb_color_base);
1348 radeon_emit(cmd_buffer->cs, S_028C64_BASE_256B(cb->cb_color_base >> 32));
1349 radeon_emit(cmd_buffer->cs, cb->cb_color_attrib2);
1350 radeon_emit(cmd_buffer->cs, cb->cb_color_view);
1351 radeon_emit(cmd_buffer->cs, cb_color_info);
1352 radeon_emit(cmd_buffer->cs, cb->cb_color_attrib);
1353 radeon_emit(cmd_buffer->cs, cb->cb_dcc_control);
1354 radeon_emit(cmd_buffer->cs, cb->cb_color_cmask);
1355 radeon_emit(cmd_buffer->cs, S_028C80_BASE_256B(cb->cb_color_cmask >> 32));
1356 radeon_emit(cmd_buffer->cs, cb->cb_color_fmask);
1357 radeon_emit(cmd_buffer->cs, S_028C88_BASE_256B(cb->cb_color_fmask >> 32));
1358
1359 radeon_set_context_reg_seq(cmd_buffer->cs, R_028C94_CB_COLOR0_DCC_BASE + index * 0x3c, 2);
1360 radeon_emit(cmd_buffer->cs, cb->cb_dcc_base);
1361 radeon_emit(cmd_buffer->cs, S_028C98_BASE_256B(cb->cb_dcc_base >> 32));
1362
1363 radeon_set_context_reg(cmd_buffer->cs, R_0287A0_CB_MRT0_EPITCH + index * 4,
1364 cb->cb_mrt_epitch);
1365 } else {
1366 radeon_set_context_reg_seq(cmd_buffer->cs, R_028C60_CB_COLOR0_BASE + index * 0x3c, 11);
1367 radeon_emit(cmd_buffer->cs, cb->cb_color_base);
1368 radeon_emit(cmd_buffer->cs, cb->cb_color_pitch);
1369 radeon_emit(cmd_buffer->cs, cb->cb_color_slice);
1370 radeon_emit(cmd_buffer->cs, cb->cb_color_view);
1371 radeon_emit(cmd_buffer->cs, cb_color_info);
1372 radeon_emit(cmd_buffer->cs, cb->cb_color_attrib);
1373 radeon_emit(cmd_buffer->cs, cb->cb_dcc_control);
1374 radeon_emit(cmd_buffer->cs, cb->cb_color_cmask);
1375 radeon_emit(cmd_buffer->cs, cb->cb_color_cmask_slice);
1376 radeon_emit(cmd_buffer->cs, cb->cb_color_fmask);
1377 radeon_emit(cmd_buffer->cs, cb->cb_color_fmask_slice);
1378
1379 if (is_vi) { /* DCC BASE */
1380 radeon_set_context_reg(cmd_buffer->cs, R_028C94_CB_COLOR0_DCC_BASE + index * 0x3c, cb->cb_dcc_base);
1381 }
1382 }
1383
1384 if (radv_dcc_enabled(image, iview->base_mip)) {
1385 /* Drawing with DCC enabled also compresses colorbuffers. */
1386 VkImageSubresourceRange range = {
1387 .aspectMask = iview->aspect_mask,
1388 .baseMipLevel = iview->base_mip,
1389 .levelCount = iview->level_count,
1390 .baseArrayLayer = iview->base_layer,
1391 .layerCount = iview->layer_count,
1392 };
1393
1394 radv_update_dcc_metadata(cmd_buffer, image, &range, true);
1395 }
1396 }
1397
1398 static void
1399 radv_update_zrange_precision(struct radv_cmd_buffer *cmd_buffer,
1400 struct radv_ds_buffer_info *ds,
1401 const struct radv_image_view *iview,
1402 VkImageLayout layout,
1403 bool in_render_loop, bool requires_cond_exec)
1404 {
1405 const struct radv_image *image = iview->image;
1406 uint32_t db_z_info = ds->db_z_info;
1407 uint32_t db_z_info_reg;
1408
1409 if (!cmd_buffer->device->physical_device->rad_info.has_tc_compat_zrange_bug ||
1410 !radv_image_is_tc_compat_htile(image))
1411 return;
1412
1413 if (!radv_layout_has_htile(image, layout, in_render_loop,
1414 radv_image_queue_family_mask(image,
1415 cmd_buffer->queue_family_index,
1416 cmd_buffer->queue_family_index))) {
1417 db_z_info &= C_028040_TILE_SURFACE_ENABLE;
1418 }
1419
1420 db_z_info &= C_028040_ZRANGE_PRECISION;
1421
1422 if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX9) {
1423 db_z_info_reg = R_028038_DB_Z_INFO;
1424 } else {
1425 db_z_info_reg = R_028040_DB_Z_INFO;
1426 }
1427
1428 /* When we don't know the last fast clear value we need to emit a
1429 * conditional packet that will eventually skip the following
1430 * SET_CONTEXT_REG packet.
1431 */
1432 if (requires_cond_exec) {
1433 uint64_t va = radv_get_tc_compat_zrange_va(image, iview->base_mip);
1434
1435 radeon_emit(cmd_buffer->cs, PKT3(PKT3_COND_EXEC, 3, 0));
1436 radeon_emit(cmd_buffer->cs, va);
1437 radeon_emit(cmd_buffer->cs, va >> 32);
1438 radeon_emit(cmd_buffer->cs, 0);
1439 radeon_emit(cmd_buffer->cs, 3); /* SET_CONTEXT_REG size */
1440 }
1441
1442 radeon_set_context_reg(cmd_buffer->cs, db_z_info_reg, db_z_info);
1443 }
1444
1445 static void
1446 radv_emit_fb_ds_state(struct radv_cmd_buffer *cmd_buffer,
1447 struct radv_ds_buffer_info *ds,
1448 struct radv_image_view *iview,
1449 VkImageLayout layout,
1450 bool in_render_loop)
1451 {
1452 const struct radv_image *image = iview->image;
1453 uint32_t db_z_info = ds->db_z_info;
1454 uint32_t db_stencil_info = ds->db_stencil_info;
1455
1456 if (!radv_layout_has_htile(image, layout, in_render_loop,
1457 radv_image_queue_family_mask(image,
1458 cmd_buffer->queue_family_index,
1459 cmd_buffer->queue_family_index))) {
1460 db_z_info &= C_028040_TILE_SURFACE_ENABLE;
1461 db_stencil_info |= S_028044_TILE_STENCIL_DISABLE(1);
1462 }
1463
1464 radeon_set_context_reg(cmd_buffer->cs, R_028008_DB_DEPTH_VIEW, ds->db_depth_view);
1465 radeon_set_context_reg(cmd_buffer->cs, R_028ABC_DB_HTILE_SURFACE, ds->db_htile_surface);
1466
1467 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10) {
1468 radeon_set_context_reg(cmd_buffer->cs, R_028014_DB_HTILE_DATA_BASE, ds->db_htile_data_base);
1469 radeon_set_context_reg(cmd_buffer->cs, R_02801C_DB_DEPTH_SIZE_XY, ds->db_depth_size);
1470
1471 radeon_set_context_reg_seq(cmd_buffer->cs, R_02803C_DB_DEPTH_INFO, 7);
1472 radeon_emit(cmd_buffer->cs, S_02803C_RESOURCE_LEVEL(1));
1473 radeon_emit(cmd_buffer->cs, db_z_info);
1474 radeon_emit(cmd_buffer->cs, db_stencil_info);
1475 radeon_emit(cmd_buffer->cs, ds->db_z_read_base);
1476 radeon_emit(cmd_buffer->cs, ds->db_stencil_read_base);
1477 radeon_emit(cmd_buffer->cs, ds->db_z_read_base);
1478 radeon_emit(cmd_buffer->cs, ds->db_stencil_read_base);
1479
1480 radeon_set_context_reg_seq(cmd_buffer->cs, R_028068_DB_Z_READ_BASE_HI, 5);
1481 radeon_emit(cmd_buffer->cs, ds->db_z_read_base >> 32);
1482 radeon_emit(cmd_buffer->cs, ds->db_stencil_read_base >> 32);
1483 radeon_emit(cmd_buffer->cs, ds->db_z_read_base >> 32);
1484 radeon_emit(cmd_buffer->cs, ds->db_stencil_read_base >> 32);
1485 radeon_emit(cmd_buffer->cs, ds->db_htile_data_base >> 32);
1486 } else if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX9) {
1487 radeon_set_context_reg_seq(cmd_buffer->cs, R_028014_DB_HTILE_DATA_BASE, 3);
1488 radeon_emit(cmd_buffer->cs, ds->db_htile_data_base);
1489 radeon_emit(cmd_buffer->cs, S_028018_BASE_HI(ds->db_htile_data_base >> 32));
1490 radeon_emit(cmd_buffer->cs, ds->db_depth_size);
1491
1492 radeon_set_context_reg_seq(cmd_buffer->cs, R_028038_DB_Z_INFO, 10);
1493 radeon_emit(cmd_buffer->cs, db_z_info); /* DB_Z_INFO */
1494 radeon_emit(cmd_buffer->cs, db_stencil_info); /* DB_STENCIL_INFO */
1495 radeon_emit(cmd_buffer->cs, ds->db_z_read_base); /* DB_Z_READ_BASE */
1496 radeon_emit(cmd_buffer->cs, S_028044_BASE_HI(ds->db_z_read_base >> 32)); /* DB_Z_READ_BASE_HI */
1497 radeon_emit(cmd_buffer->cs, ds->db_stencil_read_base); /* DB_STENCIL_READ_BASE */
1498 radeon_emit(cmd_buffer->cs, S_02804C_BASE_HI(ds->db_stencil_read_base >> 32)); /* DB_STENCIL_READ_BASE_HI */
1499 radeon_emit(cmd_buffer->cs, ds->db_z_write_base); /* DB_Z_WRITE_BASE */
1500 radeon_emit(cmd_buffer->cs, S_028054_BASE_HI(ds->db_z_write_base >> 32)); /* DB_Z_WRITE_BASE_HI */
1501 radeon_emit(cmd_buffer->cs, ds->db_stencil_write_base); /* DB_STENCIL_WRITE_BASE */
1502 radeon_emit(cmd_buffer->cs, S_02805C_BASE_HI(ds->db_stencil_write_base >> 32)); /* DB_STENCIL_WRITE_BASE_HI */
1503
1504 radeon_set_context_reg_seq(cmd_buffer->cs, R_028068_DB_Z_INFO2, 2);
1505 radeon_emit(cmd_buffer->cs, ds->db_z_info2);
1506 radeon_emit(cmd_buffer->cs, ds->db_stencil_info2);
1507 } else {
1508 radeon_set_context_reg(cmd_buffer->cs, R_028014_DB_HTILE_DATA_BASE, ds->db_htile_data_base);
1509
1510 radeon_set_context_reg_seq(cmd_buffer->cs, R_02803C_DB_DEPTH_INFO, 9);
1511 radeon_emit(cmd_buffer->cs, ds->db_depth_info); /* R_02803C_DB_DEPTH_INFO */
1512 radeon_emit(cmd_buffer->cs, db_z_info); /* R_028040_DB_Z_INFO */
1513 radeon_emit(cmd_buffer->cs, db_stencil_info); /* R_028044_DB_STENCIL_INFO */
1514 radeon_emit(cmd_buffer->cs, ds->db_z_read_base); /* R_028048_DB_Z_READ_BASE */
1515 radeon_emit(cmd_buffer->cs, ds->db_stencil_read_base); /* R_02804C_DB_STENCIL_READ_BASE */
1516 radeon_emit(cmd_buffer->cs, ds->db_z_write_base); /* R_028050_DB_Z_WRITE_BASE */
1517 radeon_emit(cmd_buffer->cs, ds->db_stencil_write_base); /* R_028054_DB_STENCIL_WRITE_BASE */
1518 radeon_emit(cmd_buffer->cs, ds->db_depth_size); /* R_028058_DB_DEPTH_SIZE */
1519 radeon_emit(cmd_buffer->cs, ds->db_depth_slice); /* R_02805C_DB_DEPTH_SLICE */
1520
1521 }
1522
1523 /* Update the ZRANGE_PRECISION value for the TC-compat bug. */
1524 radv_update_zrange_precision(cmd_buffer, ds, iview, layout,
1525 in_render_loop, true);
1526
1527 radeon_set_context_reg(cmd_buffer->cs, R_028B78_PA_SU_POLY_OFFSET_DB_FMT_CNTL,
1528 ds->pa_su_poly_offset_db_fmt_cntl);
1529 }
1530
1531 /**
1532 * Update the fast clear depth/stencil values if the image is bound as a
1533 * depth/stencil buffer.
1534 */
1535 static void
1536 radv_update_bound_fast_clear_ds(struct radv_cmd_buffer *cmd_buffer,
1537 const struct radv_image_view *iview,
1538 VkClearDepthStencilValue ds_clear_value,
1539 VkImageAspectFlags aspects)
1540 {
1541 const struct radv_subpass *subpass = cmd_buffer->state.subpass;
1542 const struct radv_image *image = iview->image;
1543 struct radeon_cmdbuf *cs = cmd_buffer->cs;
1544 uint32_t att_idx;
1545
1546 if (!cmd_buffer->state.attachments || !subpass)
1547 return;
1548
1549 if (!subpass->depth_stencil_attachment)
1550 return;
1551
1552 att_idx = subpass->depth_stencil_attachment->attachment;
1553 if (cmd_buffer->state.attachments[att_idx].iview->image != image)
1554 return;
1555
1556 radeon_set_context_reg_seq(cs, R_028028_DB_STENCIL_CLEAR, 2);
1557 radeon_emit(cs, ds_clear_value.stencil);
1558 radeon_emit(cs, fui(ds_clear_value.depth));
1559
1560 /* Update the ZRANGE_PRECISION value for the TC-compat bug. This is
1561 * only needed when clearing Z to 0.0.
1562 */
1563 if ((aspects & VK_IMAGE_ASPECT_DEPTH_BIT) &&
1564 ds_clear_value.depth == 0.0) {
1565 VkImageLayout layout = subpass->depth_stencil_attachment->layout;
1566 bool in_render_loop = subpass->depth_stencil_attachment->in_render_loop;
1567
1568 radv_update_zrange_precision(cmd_buffer, &cmd_buffer->state.attachments[att_idx].ds,
1569 iview, layout, in_render_loop, false);
1570 }
1571
1572 cmd_buffer->state.context_roll_without_scissor_emitted = true;
1573 }
1574
1575 /**
1576 * Set the clear depth/stencil values to the image's metadata.
1577 */
1578 static void
1579 radv_set_ds_clear_metadata(struct radv_cmd_buffer *cmd_buffer,
1580 struct radv_image *image,
1581 const VkImageSubresourceRange *range,
1582 VkClearDepthStencilValue ds_clear_value,
1583 VkImageAspectFlags aspects)
1584 {
1585 struct radeon_cmdbuf *cs = cmd_buffer->cs;
1586 uint64_t va = radv_get_ds_clear_value_va(image, range->baseMipLevel);
1587 uint32_t level_count = radv_get_levelCount(image, range);
1588
1589 if (aspects == (VK_IMAGE_ASPECT_DEPTH_BIT |
1590 VK_IMAGE_ASPECT_STENCIL_BIT)) {
1591 /* Use the fastest way when both aspects are used. */
1592 radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 2 + 2 * level_count, cmd_buffer->state.predicating));
1593 radeon_emit(cs, S_370_DST_SEL(V_370_MEM) |
1594 S_370_WR_CONFIRM(1) |
1595 S_370_ENGINE_SEL(V_370_PFP));
1596 radeon_emit(cs, va);
1597 radeon_emit(cs, va >> 32);
1598
1599 for (uint32_t l = 0; l < level_count; l++) {
1600 radeon_emit(cs, ds_clear_value.stencil);
1601 radeon_emit(cs, fui(ds_clear_value.depth));
1602 }
1603 } else {
1604 /* Otherwise we need one WRITE_DATA packet per level. */
1605 for (uint32_t l = 0; l < level_count; l++) {
1606 uint64_t va = radv_get_ds_clear_value_va(image, range->baseMipLevel + l);
1607 unsigned value;
1608
1609 if (aspects == VK_IMAGE_ASPECT_DEPTH_BIT) {
1610 value = fui(ds_clear_value.depth);
1611 va += 4;
1612 } else {
1613 assert(aspects == VK_IMAGE_ASPECT_STENCIL_BIT);
1614 value = ds_clear_value.stencil;
1615 }
1616
1617 radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, cmd_buffer->state.predicating));
1618 radeon_emit(cs, S_370_DST_SEL(V_370_MEM) |
1619 S_370_WR_CONFIRM(1) |
1620 S_370_ENGINE_SEL(V_370_PFP));
1621 radeon_emit(cs, va);
1622 radeon_emit(cs, va >> 32);
1623 radeon_emit(cs, value);
1624 }
1625 }
1626 }
1627
1628 /**
1629 * Update the TC-compat metadata value for this image.
1630 */
1631 static void
1632 radv_set_tc_compat_zrange_metadata(struct radv_cmd_buffer *cmd_buffer,
1633 struct radv_image *image,
1634 const VkImageSubresourceRange *range,
1635 uint32_t value)
1636 {
1637 struct radeon_cmdbuf *cs = cmd_buffer->cs;
1638
1639 if (!cmd_buffer->device->physical_device->rad_info.has_tc_compat_zrange_bug)
1640 return;
1641
1642 uint64_t va = radv_get_tc_compat_zrange_va(image, range->baseMipLevel);
1643 uint32_t level_count = radv_get_levelCount(image, range);
1644
1645 radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 2 + level_count, cmd_buffer->state.predicating));
1646 radeon_emit(cs, S_370_DST_SEL(V_370_MEM) |
1647 S_370_WR_CONFIRM(1) |
1648 S_370_ENGINE_SEL(V_370_PFP));
1649 radeon_emit(cs, va);
1650 radeon_emit(cs, va >> 32);
1651
1652 for (uint32_t l = 0; l < level_count; l++)
1653 radeon_emit(cs, value);
1654 }
1655
1656 static void
1657 radv_update_tc_compat_zrange_metadata(struct radv_cmd_buffer *cmd_buffer,
1658 const struct radv_image_view *iview,
1659 VkClearDepthStencilValue ds_clear_value)
1660 {
1661 VkImageSubresourceRange range = {
1662 .aspectMask = iview->aspect_mask,
1663 .baseMipLevel = iview->base_mip,
1664 .levelCount = iview->level_count,
1665 .baseArrayLayer = iview->base_layer,
1666 .layerCount = iview->layer_count,
1667 };
1668 uint32_t cond_val;
1669
1670 /* Conditionally set DB_Z_INFO.ZRANGE_PRECISION to 0 when the last
1671 * depth clear value is 0.0f.
1672 */
1673 cond_val = ds_clear_value.depth == 0.0f ? UINT_MAX : 0;
1674
1675 radv_set_tc_compat_zrange_metadata(cmd_buffer, iview->image, &range,
1676 cond_val);
1677 }
1678
1679 /**
1680 * Update the clear depth/stencil values for this image.
1681 */
1682 void
1683 radv_update_ds_clear_metadata(struct radv_cmd_buffer *cmd_buffer,
1684 const struct radv_image_view *iview,
1685 VkClearDepthStencilValue ds_clear_value,
1686 VkImageAspectFlags aspects)
1687 {
1688 VkImageSubresourceRange range = {
1689 .aspectMask = iview->aspect_mask,
1690 .baseMipLevel = iview->base_mip,
1691 .levelCount = iview->level_count,
1692 .baseArrayLayer = iview->base_layer,
1693 .layerCount = iview->layer_count,
1694 };
1695 struct radv_image *image = iview->image;
1696
1697 assert(radv_image_has_htile(image));
1698
1699 radv_set_ds_clear_metadata(cmd_buffer, iview->image, &range,
1700 ds_clear_value, aspects);
1701
1702 if (radv_image_is_tc_compat_htile(image) &&
1703 (aspects & VK_IMAGE_ASPECT_DEPTH_BIT)) {
1704 radv_update_tc_compat_zrange_metadata(cmd_buffer, iview,
1705 ds_clear_value);
1706 }
1707
1708 radv_update_bound_fast_clear_ds(cmd_buffer, iview, ds_clear_value,
1709 aspects);
1710 }
1711
1712 /**
1713 * Load the clear depth/stencil values from the image's metadata.
1714 */
1715 static void
1716 radv_load_ds_clear_metadata(struct radv_cmd_buffer *cmd_buffer,
1717 const struct radv_image_view *iview)
1718 {
1719 struct radeon_cmdbuf *cs = cmd_buffer->cs;
1720 const struct radv_image *image = iview->image;
1721 VkImageAspectFlags aspects = vk_format_aspects(image->vk_format);
1722 uint64_t va = radv_get_ds_clear_value_va(image, iview->base_mip);
1723 unsigned reg_offset = 0, reg_count = 0;
1724
1725 if (!radv_image_has_htile(image))
1726 return;
1727
1728 if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT) {
1729 ++reg_count;
1730 } else {
1731 ++reg_offset;
1732 va += 4;
1733 }
1734 if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT)
1735 ++reg_count;
1736
1737 uint32_t reg = R_028028_DB_STENCIL_CLEAR + 4 * reg_offset;
1738
1739 if (cmd_buffer->device->physical_device->rad_info.has_load_ctx_reg_pkt) {
1740 radeon_emit(cs, PKT3(PKT3_LOAD_CONTEXT_REG, 3, 0));
1741 radeon_emit(cs, va);
1742 radeon_emit(cs, va >> 32);
1743 radeon_emit(cs, (reg - SI_CONTEXT_REG_OFFSET) >> 2);
1744 radeon_emit(cs, reg_count);
1745 } else {
1746 radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
1747 radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_SRC_MEM) |
1748 COPY_DATA_DST_SEL(COPY_DATA_REG) |
1749 (reg_count == 2 ? COPY_DATA_COUNT_SEL : 0));
1750 radeon_emit(cs, va);
1751 radeon_emit(cs, va >> 32);
1752 radeon_emit(cs, reg >> 2);
1753 radeon_emit(cs, 0);
1754
1755 radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
1756 radeon_emit(cs, 0);
1757 }
1758 }
1759
1760 /*
1761 * With DCC some colors don't require CMASK elimination before being
1762 * used as a texture. This sets a predicate value to determine if the
1763 * cmask eliminate is required.
1764 */
1765 void
1766 radv_update_fce_metadata(struct radv_cmd_buffer *cmd_buffer,
1767 struct radv_image *image,
1768 const VkImageSubresourceRange *range, bool value)
1769 {
1770 uint64_t pred_val = value;
1771 uint64_t va = radv_image_get_fce_pred_va(image, range->baseMipLevel);
1772 uint32_t level_count = radv_get_levelCount(image, range);
1773 uint32_t count = 2 * level_count;
1774
1775 assert(radv_dcc_enabled(image, range->baseMipLevel));
1776
1777 radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 2 + count, 0));
1778 radeon_emit(cmd_buffer->cs, S_370_DST_SEL(V_370_MEM) |
1779 S_370_WR_CONFIRM(1) |
1780 S_370_ENGINE_SEL(V_370_PFP));
1781 radeon_emit(cmd_buffer->cs, va);
1782 radeon_emit(cmd_buffer->cs, va >> 32);
1783
1784 for (uint32_t l = 0; l < level_count; l++) {
1785 radeon_emit(cmd_buffer->cs, pred_val);
1786 radeon_emit(cmd_buffer->cs, pred_val >> 32);
1787 }
1788 }
1789
1790 /**
1791 * Update the DCC predicate to reflect the compression state.
1792 */
1793 void
1794 radv_update_dcc_metadata(struct radv_cmd_buffer *cmd_buffer,
1795 struct radv_image *image,
1796 const VkImageSubresourceRange *range, bool value)
1797 {
1798 uint64_t pred_val = value;
1799 uint64_t va = radv_image_get_dcc_pred_va(image, range->baseMipLevel);
1800 uint32_t level_count = radv_get_levelCount(image, range);
1801 uint32_t count = 2 * level_count;
1802
1803 assert(radv_dcc_enabled(image, range->baseMipLevel));
1804
1805 radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 2 + count, 0));
1806 radeon_emit(cmd_buffer->cs, S_370_DST_SEL(V_370_MEM) |
1807 S_370_WR_CONFIRM(1) |
1808 S_370_ENGINE_SEL(V_370_PFP));
1809 radeon_emit(cmd_buffer->cs, va);
1810 radeon_emit(cmd_buffer->cs, va >> 32);
1811
1812 for (uint32_t l = 0; l < level_count; l++) {
1813 radeon_emit(cmd_buffer->cs, pred_val);
1814 radeon_emit(cmd_buffer->cs, pred_val >> 32);
1815 }
1816 }
1817
1818 /**
1819 * Update the fast clear color values if the image is bound as a color buffer.
1820 */
1821 static void
1822 radv_update_bound_fast_clear_color(struct radv_cmd_buffer *cmd_buffer,
1823 struct radv_image *image,
1824 int cb_idx,
1825 uint32_t color_values[2])
1826 {
1827 const struct radv_subpass *subpass = cmd_buffer->state.subpass;
1828 struct radeon_cmdbuf *cs = cmd_buffer->cs;
1829 uint32_t att_idx;
1830
1831 if (!cmd_buffer->state.attachments || !subpass)
1832 return;
1833
1834 att_idx = subpass->color_attachments[cb_idx].attachment;
1835 if (att_idx == VK_ATTACHMENT_UNUSED)
1836 return;
1837
1838 if (cmd_buffer->state.attachments[att_idx].iview->image != image)
1839 return;
1840
1841 radeon_set_context_reg_seq(cs, R_028C8C_CB_COLOR0_CLEAR_WORD0 + cb_idx * 0x3c, 2);
1842 radeon_emit(cs, color_values[0]);
1843 radeon_emit(cs, color_values[1]);
1844
1845 cmd_buffer->state.context_roll_without_scissor_emitted = true;
1846 }
1847
1848 /**
1849 * Set the clear color values to the image's metadata.
1850 */
1851 static void
1852 radv_set_color_clear_metadata(struct radv_cmd_buffer *cmd_buffer,
1853 struct radv_image *image,
1854 const VkImageSubresourceRange *range,
1855 uint32_t color_values[2])
1856 {
1857 struct radeon_cmdbuf *cs = cmd_buffer->cs;
1858 uint64_t va = radv_image_get_fast_clear_va(image, range->baseMipLevel);
1859 uint32_t level_count = radv_get_levelCount(image, range);
1860 uint32_t count = 2 * level_count;
1861
1862 assert(radv_image_has_cmask(image) ||
1863 radv_dcc_enabled(image, range->baseMipLevel));
1864
1865 radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 2 + count, cmd_buffer->state.predicating));
1866 radeon_emit(cs, S_370_DST_SEL(V_370_MEM) |
1867 S_370_WR_CONFIRM(1) |
1868 S_370_ENGINE_SEL(V_370_PFP));
1869 radeon_emit(cs, va);
1870 radeon_emit(cs, va >> 32);
1871
1872 for (uint32_t l = 0; l < level_count; l++) {
1873 radeon_emit(cs, color_values[0]);
1874 radeon_emit(cs, color_values[1]);
1875 }
1876 }
1877
1878 /**
1879 * Update the clear color values for this image.
1880 */
1881 void
1882 radv_update_color_clear_metadata(struct radv_cmd_buffer *cmd_buffer,
1883 const struct radv_image_view *iview,
1884 int cb_idx,
1885 uint32_t color_values[2])
1886 {
1887 struct radv_image *image = iview->image;
1888 VkImageSubresourceRange range = {
1889 .aspectMask = iview->aspect_mask,
1890 .baseMipLevel = iview->base_mip,
1891 .levelCount = iview->level_count,
1892 .baseArrayLayer = iview->base_layer,
1893 .layerCount = iview->layer_count,
1894 };
1895
1896 assert(radv_image_has_cmask(image) ||
1897 radv_dcc_enabled(image, iview->base_mip));
1898
1899 radv_set_color_clear_metadata(cmd_buffer, image, &range, color_values);
1900
1901 radv_update_bound_fast_clear_color(cmd_buffer, image, cb_idx,
1902 color_values);
1903 }
1904
1905 /**
1906 * Load the clear color values from the image's metadata.
1907 */
1908 static void
1909 radv_load_color_clear_metadata(struct radv_cmd_buffer *cmd_buffer,
1910 struct radv_image_view *iview,
1911 int cb_idx)
1912 {
1913 struct radeon_cmdbuf *cs = cmd_buffer->cs;
1914 struct radv_image *image = iview->image;
1915 uint64_t va = radv_image_get_fast_clear_va(image, iview->base_mip);
1916
1917 if (!radv_image_has_cmask(image) &&
1918 !radv_dcc_enabled(image, iview->base_mip))
1919 return;
1920
1921 uint32_t reg = R_028C8C_CB_COLOR0_CLEAR_WORD0 + cb_idx * 0x3c;
1922
1923 if (cmd_buffer->device->physical_device->rad_info.has_load_ctx_reg_pkt) {
1924 radeon_emit(cs, PKT3(PKT3_LOAD_CONTEXT_REG, 3, cmd_buffer->state.predicating));
1925 radeon_emit(cs, va);
1926 radeon_emit(cs, va >> 32);
1927 radeon_emit(cs, (reg - SI_CONTEXT_REG_OFFSET) >> 2);
1928 radeon_emit(cs, 2);
1929 } else {
1930 radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, cmd_buffer->state.predicating));
1931 radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_SRC_MEM) |
1932 COPY_DATA_DST_SEL(COPY_DATA_REG) |
1933 COPY_DATA_COUNT_SEL);
1934 radeon_emit(cs, va);
1935 radeon_emit(cs, va >> 32);
1936 radeon_emit(cs, reg >> 2);
1937 radeon_emit(cs, 0);
1938
1939 radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, cmd_buffer->state.predicating));
1940 radeon_emit(cs, 0);
1941 }
1942 }
1943
1944 static void
1945 radv_emit_framebuffer_state(struct radv_cmd_buffer *cmd_buffer)
1946 {
1947 int i;
1948 struct radv_framebuffer *framebuffer = cmd_buffer->state.framebuffer;
1949 const struct radv_subpass *subpass = cmd_buffer->state.subpass;
1950
1951 /* this may happen for inherited secondary recording */
1952 if (!framebuffer)
1953 return;
1954
1955 for (i = 0; i < 8; ++i) {
1956 if (i >= subpass->color_count || subpass->color_attachments[i].attachment == VK_ATTACHMENT_UNUSED) {
1957 radeon_set_context_reg(cmd_buffer->cs, R_028C70_CB_COLOR0_INFO + i * 0x3C,
1958 S_028C70_FORMAT(V_028C70_COLOR_INVALID));
1959 continue;
1960 }
1961
1962 int idx = subpass->color_attachments[i].attachment;
1963 struct radv_image_view *iview = cmd_buffer->state.attachments[idx].iview;
1964 VkImageLayout layout = subpass->color_attachments[i].layout;
1965 bool in_render_loop = subpass->color_attachments[i].in_render_loop;
1966
1967 radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, iview->bo);
1968
1969 assert(iview->aspect_mask & (VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_PLANE_0_BIT |
1970 VK_IMAGE_ASPECT_PLANE_1_BIT | VK_IMAGE_ASPECT_PLANE_2_BIT));
1971 radv_emit_fb_color_state(cmd_buffer, i, &cmd_buffer->state.attachments[idx].cb, iview, layout, in_render_loop);
1972
1973 radv_load_color_clear_metadata(cmd_buffer, iview, i);
1974 }
1975
1976 if (subpass->depth_stencil_attachment) {
1977 int idx = subpass->depth_stencil_attachment->attachment;
1978 VkImageLayout layout = subpass->depth_stencil_attachment->layout;
1979 bool in_render_loop = subpass->depth_stencil_attachment->in_render_loop;
1980 struct radv_image_view *iview = cmd_buffer->state.attachments[idx].iview;
1981 struct radv_image *image = iview->image;
1982 radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, cmd_buffer->state.attachments[idx].iview->bo);
1983 ASSERTED uint32_t queue_mask = radv_image_queue_family_mask(image,
1984 cmd_buffer->queue_family_index,
1985 cmd_buffer->queue_family_index);
1986 /* We currently don't support writing decompressed HTILE */
1987 assert(radv_layout_has_htile(image, layout, in_render_loop, queue_mask) ==
1988 radv_layout_is_htile_compressed(image, layout, in_render_loop, queue_mask));
1989
1990 radv_emit_fb_ds_state(cmd_buffer, &cmd_buffer->state.attachments[idx].ds, iview, layout, in_render_loop);
1991
1992 if (cmd_buffer->state.attachments[idx].ds.offset_scale != cmd_buffer->state.offset_scale) {
1993 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS;
1994 cmd_buffer->state.offset_scale = cmd_buffer->state.attachments[idx].ds.offset_scale;
1995 }
1996 radv_load_ds_clear_metadata(cmd_buffer, iview);
1997 } else {
1998 if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX9)
1999 radeon_set_context_reg_seq(cmd_buffer->cs, R_028038_DB_Z_INFO, 2);
2000 else
2001 radeon_set_context_reg_seq(cmd_buffer->cs, R_028040_DB_Z_INFO, 2);
2002
2003 radeon_emit(cmd_buffer->cs, S_028040_FORMAT(V_028040_Z_INVALID)); /* DB_Z_INFO */
2004 radeon_emit(cmd_buffer->cs, S_028044_FORMAT(V_028044_STENCIL_INVALID)); /* DB_STENCIL_INFO */
2005 }
2006 radeon_set_context_reg(cmd_buffer->cs, R_028208_PA_SC_WINDOW_SCISSOR_BR,
2007 S_028208_BR_X(framebuffer->width) |
2008 S_028208_BR_Y(framebuffer->height));
2009
2010 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX8) {
2011 bool disable_constant_encode =
2012 cmd_buffer->device->physical_device->rad_info.has_dcc_constant_encode;
2013 enum chip_class chip_class =
2014 cmd_buffer->device->physical_device->rad_info.chip_class;
2015 uint8_t watermark = chip_class >= GFX10 ? 6 : 4;
2016
2017 radeon_set_context_reg(cmd_buffer->cs, R_028424_CB_DCC_CONTROL,
2018 S_028424_OVERWRITE_COMBINER_MRT_SHARING_DISABLE(chip_class <= GFX9) |
2019 S_028424_OVERWRITE_COMBINER_WATERMARK(watermark) |
2020 S_028424_DISABLE_CONSTANT_ENCODE_REG(disable_constant_encode));
2021 }
2022
2023 if (cmd_buffer->device->dfsm_allowed) {
2024 radeon_emit(cmd_buffer->cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
2025 radeon_emit(cmd_buffer->cs, EVENT_TYPE(V_028A90_BREAK_BATCH) | EVENT_INDEX(0));
2026 }
2027
2028 cmd_buffer->state.dirty &= ~RADV_CMD_DIRTY_FRAMEBUFFER;
2029 }
2030
2031 static void
2032 radv_emit_index_buffer(struct radv_cmd_buffer *cmd_buffer)
2033 {
2034 struct radeon_cmdbuf *cs = cmd_buffer->cs;
2035 struct radv_cmd_state *state = &cmd_buffer->state;
2036
2037 if (state->index_type != state->last_index_type) {
2038 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
2039 radeon_set_uconfig_reg_idx(cmd_buffer->device->physical_device,
2040 cs, R_03090C_VGT_INDEX_TYPE,
2041 2, state->index_type);
2042 } else {
2043 radeon_emit(cs, PKT3(PKT3_INDEX_TYPE, 0, 0));
2044 radeon_emit(cs, state->index_type);
2045 }
2046
2047 state->last_index_type = state->index_type;
2048 }
2049
2050 radeon_emit(cs, PKT3(PKT3_INDEX_BASE, 1, 0));
2051 radeon_emit(cs, state->index_va);
2052 radeon_emit(cs, state->index_va >> 32);
2053
2054 radeon_emit(cs, PKT3(PKT3_INDEX_BUFFER_SIZE, 0, 0));
2055 radeon_emit(cs, state->max_index_count);
2056
2057 cmd_buffer->state.dirty &= ~RADV_CMD_DIRTY_INDEX_BUFFER;
2058 }
2059
2060 void radv_set_db_count_control(struct radv_cmd_buffer *cmd_buffer)
2061 {
2062 bool has_perfect_queries = cmd_buffer->state.perfect_occlusion_queries_enabled;
2063 struct radv_pipeline *pipeline = cmd_buffer->state.pipeline;
2064 uint32_t pa_sc_mode_cntl_1 =
2065 pipeline ? pipeline->graphics.ms.pa_sc_mode_cntl_1 : 0;
2066 uint32_t db_count_control;
2067
2068 if(!cmd_buffer->state.active_occlusion_queries) {
2069 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX7) {
2070 if (G_028A4C_OUT_OF_ORDER_PRIMITIVE_ENABLE(pa_sc_mode_cntl_1) &&
2071 pipeline->graphics.disable_out_of_order_rast_for_occlusion &&
2072 has_perfect_queries) {
2073 /* Re-enable out-of-order rasterization if the
2074 * bound pipeline supports it and if it's has
2075 * been disabled before starting any perfect
2076 * occlusion queries.
2077 */
2078 radeon_set_context_reg(cmd_buffer->cs,
2079 R_028A4C_PA_SC_MODE_CNTL_1,
2080 pa_sc_mode_cntl_1);
2081 }
2082 }
2083 db_count_control = S_028004_ZPASS_INCREMENT_DISABLE(1);
2084 } else {
2085 const struct radv_subpass *subpass = cmd_buffer->state.subpass;
2086 uint32_t sample_rate = subpass ? util_logbase2(subpass->max_sample_count) : 0;
2087 bool gfx10_perfect = cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10 && has_perfect_queries;
2088
2089 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX7) {
2090 db_count_control =
2091 S_028004_PERFECT_ZPASS_COUNTS(has_perfect_queries) |
2092 S_028004_DISABLE_CONSERVATIVE_ZPASS_COUNTS(gfx10_perfect) |
2093 S_028004_SAMPLE_RATE(sample_rate) |
2094 S_028004_ZPASS_ENABLE(1) |
2095 S_028004_SLICE_EVEN_ENABLE(1) |
2096 S_028004_SLICE_ODD_ENABLE(1);
2097
2098 if (G_028A4C_OUT_OF_ORDER_PRIMITIVE_ENABLE(pa_sc_mode_cntl_1) &&
2099 pipeline->graphics.disable_out_of_order_rast_for_occlusion &&
2100 has_perfect_queries) {
2101 /* If the bound pipeline has enabled
2102 * out-of-order rasterization, we should
2103 * disable it before starting any perfect
2104 * occlusion queries.
2105 */
2106 pa_sc_mode_cntl_1 &= C_028A4C_OUT_OF_ORDER_PRIMITIVE_ENABLE;
2107
2108 radeon_set_context_reg(cmd_buffer->cs,
2109 R_028A4C_PA_SC_MODE_CNTL_1,
2110 pa_sc_mode_cntl_1);
2111 }
2112 } else {
2113 db_count_control = S_028004_PERFECT_ZPASS_COUNTS(1) |
2114 S_028004_SAMPLE_RATE(sample_rate);
2115 }
2116 }
2117
2118 radeon_set_context_reg(cmd_buffer->cs, R_028004_DB_COUNT_CONTROL, db_count_control);
2119
2120 cmd_buffer->state.context_roll_without_scissor_emitted = true;
2121 }
2122
2123 static void
2124 radv_cmd_buffer_flush_dynamic_state(struct radv_cmd_buffer *cmd_buffer)
2125 {
2126 uint32_t states = cmd_buffer->state.dirty & cmd_buffer->state.emitted_pipeline->graphics.needed_dynamic_state;
2127
2128 if (states & (RADV_CMD_DIRTY_DYNAMIC_VIEWPORT))
2129 radv_emit_viewport(cmd_buffer);
2130
2131 if (states & (RADV_CMD_DIRTY_DYNAMIC_SCISSOR | RADV_CMD_DIRTY_DYNAMIC_VIEWPORT) &&
2132 !cmd_buffer->device->physical_device->rad_info.has_gfx9_scissor_bug)
2133 radv_emit_scissor(cmd_buffer);
2134
2135 if (states & RADV_CMD_DIRTY_DYNAMIC_LINE_WIDTH)
2136 radv_emit_line_width(cmd_buffer);
2137
2138 if (states & RADV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS)
2139 radv_emit_blend_constants(cmd_buffer);
2140
2141 if (states & (RADV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE |
2142 RADV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK |
2143 RADV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK))
2144 radv_emit_stencil(cmd_buffer);
2145
2146 if (states & RADV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS)
2147 radv_emit_depth_bounds(cmd_buffer);
2148
2149 if (states & RADV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS)
2150 radv_emit_depth_bias(cmd_buffer);
2151
2152 if (states & RADV_CMD_DIRTY_DYNAMIC_DISCARD_RECTANGLE)
2153 radv_emit_discard_rectangle(cmd_buffer);
2154
2155 if (states & RADV_CMD_DIRTY_DYNAMIC_SAMPLE_LOCATIONS)
2156 radv_emit_sample_locations(cmd_buffer);
2157
2158 cmd_buffer->state.dirty &= ~states;
2159 }
2160
2161 static void
2162 radv_flush_push_descriptors(struct radv_cmd_buffer *cmd_buffer,
2163 VkPipelineBindPoint bind_point)
2164 {
2165 struct radv_descriptor_state *descriptors_state =
2166 radv_get_descriptors_state(cmd_buffer, bind_point);
2167 struct radv_descriptor_set *set = &descriptors_state->push_set.set;
2168 unsigned bo_offset;
2169
2170 if (!radv_cmd_buffer_upload_data(cmd_buffer, set->size, 32,
2171 set->mapped_ptr,
2172 &bo_offset))
2173 return;
2174
2175 set->va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
2176 set->va += bo_offset;
2177 }
2178
2179 static void
2180 radv_flush_indirect_descriptor_sets(struct radv_cmd_buffer *cmd_buffer,
2181 VkPipelineBindPoint bind_point)
2182 {
2183 struct radv_descriptor_state *descriptors_state =
2184 radv_get_descriptors_state(cmd_buffer, bind_point);
2185 uint32_t size = MAX_SETS * 4;
2186 uint32_t offset;
2187 void *ptr;
2188
2189 if (!radv_cmd_buffer_upload_alloc(cmd_buffer, size,
2190 256, &offset, &ptr))
2191 return;
2192
2193 for (unsigned i = 0; i < MAX_SETS; i++) {
2194 uint32_t *uptr = ((uint32_t *)ptr) + i;
2195 uint64_t set_va = 0;
2196 struct radv_descriptor_set *set = descriptors_state->sets[i];
2197 if (descriptors_state->valid & (1u << i))
2198 set_va = set->va;
2199 uptr[0] = set_va & 0xffffffff;
2200 }
2201
2202 uint64_t va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
2203 va += offset;
2204
2205 if (cmd_buffer->state.pipeline) {
2206 if (cmd_buffer->state.pipeline->shaders[MESA_SHADER_VERTEX])
2207 radv_emit_userdata_address(cmd_buffer, cmd_buffer->state.pipeline, MESA_SHADER_VERTEX,
2208 AC_UD_INDIRECT_DESCRIPTOR_SETS, va);
2209
2210 if (cmd_buffer->state.pipeline->shaders[MESA_SHADER_FRAGMENT])
2211 radv_emit_userdata_address(cmd_buffer, cmd_buffer->state.pipeline, MESA_SHADER_FRAGMENT,
2212 AC_UD_INDIRECT_DESCRIPTOR_SETS, va);
2213
2214 if (radv_pipeline_has_gs(cmd_buffer->state.pipeline))
2215 radv_emit_userdata_address(cmd_buffer, cmd_buffer->state.pipeline, MESA_SHADER_GEOMETRY,
2216 AC_UD_INDIRECT_DESCRIPTOR_SETS, va);
2217
2218 if (radv_pipeline_has_tess(cmd_buffer->state.pipeline))
2219 radv_emit_userdata_address(cmd_buffer, cmd_buffer->state.pipeline, MESA_SHADER_TESS_CTRL,
2220 AC_UD_INDIRECT_DESCRIPTOR_SETS, va);
2221
2222 if (radv_pipeline_has_tess(cmd_buffer->state.pipeline))
2223 radv_emit_userdata_address(cmd_buffer, cmd_buffer->state.pipeline, MESA_SHADER_TESS_EVAL,
2224 AC_UD_INDIRECT_DESCRIPTOR_SETS, va);
2225 }
2226
2227 if (cmd_buffer->state.compute_pipeline)
2228 radv_emit_userdata_address(cmd_buffer, cmd_buffer->state.compute_pipeline, MESA_SHADER_COMPUTE,
2229 AC_UD_INDIRECT_DESCRIPTOR_SETS, va);
2230 }
2231
2232 static void
2233 radv_flush_descriptors(struct radv_cmd_buffer *cmd_buffer,
2234 VkShaderStageFlags stages)
2235 {
2236 VkPipelineBindPoint bind_point = stages & VK_SHADER_STAGE_COMPUTE_BIT ?
2237 VK_PIPELINE_BIND_POINT_COMPUTE :
2238 VK_PIPELINE_BIND_POINT_GRAPHICS;
2239 struct radv_descriptor_state *descriptors_state =
2240 radv_get_descriptors_state(cmd_buffer, bind_point);
2241 struct radv_cmd_state *state = &cmd_buffer->state;
2242 bool flush_indirect_descriptors;
2243
2244 if (!descriptors_state->dirty)
2245 return;
2246
2247 if (descriptors_state->push_dirty)
2248 radv_flush_push_descriptors(cmd_buffer, bind_point);
2249
2250 flush_indirect_descriptors =
2251 (bind_point == VK_PIPELINE_BIND_POINT_GRAPHICS &&
2252 state->pipeline && state->pipeline->need_indirect_descriptor_sets) ||
2253 (bind_point == VK_PIPELINE_BIND_POINT_COMPUTE &&
2254 state->compute_pipeline && state->compute_pipeline->need_indirect_descriptor_sets);
2255
2256 if (flush_indirect_descriptors)
2257 radv_flush_indirect_descriptor_sets(cmd_buffer, bind_point);
2258
2259 ASSERTED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
2260 cmd_buffer->cs,
2261 MAX_SETS * MESA_SHADER_STAGES * 4);
2262
2263 if (cmd_buffer->state.pipeline) {
2264 radv_foreach_stage(stage, stages) {
2265 if (!cmd_buffer->state.pipeline->shaders[stage])
2266 continue;
2267
2268 radv_emit_descriptor_pointers(cmd_buffer,
2269 cmd_buffer->state.pipeline,
2270 descriptors_state, stage);
2271 }
2272 }
2273
2274 if (cmd_buffer->state.compute_pipeline &&
2275 (stages & VK_SHADER_STAGE_COMPUTE_BIT)) {
2276 radv_emit_descriptor_pointers(cmd_buffer,
2277 cmd_buffer->state.compute_pipeline,
2278 descriptors_state,
2279 MESA_SHADER_COMPUTE);
2280 }
2281
2282 descriptors_state->dirty = 0;
2283 descriptors_state->push_dirty = false;
2284
2285 assert(cmd_buffer->cs->cdw <= cdw_max);
2286
2287 if (unlikely(cmd_buffer->device->trace_bo))
2288 radv_save_descriptors(cmd_buffer, bind_point);
2289 }
2290
2291 static void
2292 radv_flush_constants(struct radv_cmd_buffer *cmd_buffer,
2293 VkShaderStageFlags stages)
2294 {
2295 struct radv_pipeline *pipeline = stages & VK_SHADER_STAGE_COMPUTE_BIT
2296 ? cmd_buffer->state.compute_pipeline
2297 : cmd_buffer->state.pipeline;
2298 VkPipelineBindPoint bind_point = stages & VK_SHADER_STAGE_COMPUTE_BIT ?
2299 VK_PIPELINE_BIND_POINT_COMPUTE :
2300 VK_PIPELINE_BIND_POINT_GRAPHICS;
2301 struct radv_descriptor_state *descriptors_state =
2302 radv_get_descriptors_state(cmd_buffer, bind_point);
2303 struct radv_pipeline_layout *layout = pipeline->layout;
2304 struct radv_shader_variant *shader, *prev_shader;
2305 bool need_push_constants = false;
2306 unsigned offset;
2307 void *ptr;
2308 uint64_t va;
2309
2310 stages &= cmd_buffer->push_constant_stages;
2311 if (!stages ||
2312 (!layout->push_constant_size && !layout->dynamic_offset_count))
2313 return;
2314
2315 radv_foreach_stage(stage, stages) {
2316 shader = radv_get_shader(pipeline, stage);
2317 if (!shader)
2318 continue;
2319
2320 need_push_constants |= shader->info.loads_push_constants;
2321 need_push_constants |= shader->info.loads_dynamic_offsets;
2322
2323 uint8_t base = shader->info.base_inline_push_consts;
2324 uint8_t count = shader->info.num_inline_push_consts;
2325
2326 radv_emit_inline_push_consts(cmd_buffer, pipeline, stage,
2327 AC_UD_INLINE_PUSH_CONSTANTS,
2328 count,
2329 (uint32_t *)&cmd_buffer->push_constants[base * 4]);
2330 }
2331
2332 if (need_push_constants) {
2333 if (!radv_cmd_buffer_upload_alloc(cmd_buffer, layout->push_constant_size +
2334 16 * layout->dynamic_offset_count,
2335 256, &offset, &ptr))
2336 return;
2337
2338 memcpy(ptr, cmd_buffer->push_constants, layout->push_constant_size);
2339 memcpy((char*)ptr + layout->push_constant_size,
2340 descriptors_state->dynamic_buffers,
2341 16 * layout->dynamic_offset_count);
2342
2343 va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
2344 va += offset;
2345
2346 ASSERTED unsigned cdw_max =
2347 radeon_check_space(cmd_buffer->device->ws,
2348 cmd_buffer->cs, MESA_SHADER_STAGES * 4);
2349
2350 prev_shader = NULL;
2351 radv_foreach_stage(stage, stages) {
2352 shader = radv_get_shader(pipeline, stage);
2353
2354 /* Avoid redundantly emitting the address for merged stages. */
2355 if (shader && shader != prev_shader) {
2356 radv_emit_userdata_address(cmd_buffer, pipeline, stage,
2357 AC_UD_PUSH_CONSTANTS, va);
2358
2359 prev_shader = shader;
2360 }
2361 }
2362 assert(cmd_buffer->cs->cdw <= cdw_max);
2363 }
2364
2365 cmd_buffer->push_constant_stages &= ~stages;
2366 }
2367
2368 static void
2369 radv_flush_vertex_descriptors(struct radv_cmd_buffer *cmd_buffer,
2370 bool pipeline_is_dirty)
2371 {
2372 if ((pipeline_is_dirty ||
2373 (cmd_buffer->state.dirty & RADV_CMD_DIRTY_VERTEX_BUFFER)) &&
2374 cmd_buffer->state.pipeline->num_vertex_bindings &&
2375 radv_get_shader(cmd_buffer->state.pipeline, MESA_SHADER_VERTEX)->info.vs.has_vertex_buffers) {
2376 struct radv_vertex_elements_info *velems = &cmd_buffer->state.pipeline->vertex_elements;
2377 unsigned vb_offset;
2378 void *vb_ptr;
2379 uint32_t i = 0;
2380 uint32_t count = cmd_buffer->state.pipeline->num_vertex_bindings;
2381 uint64_t va;
2382
2383 /* allocate some descriptor state for vertex buffers */
2384 if (!radv_cmd_buffer_upload_alloc(cmd_buffer, count * 16, 256,
2385 &vb_offset, &vb_ptr))
2386 return;
2387
2388 for (i = 0; i < count; i++) {
2389 uint32_t *desc = &((uint32_t *)vb_ptr)[i * 4];
2390 uint32_t offset;
2391 struct radv_buffer *buffer = cmd_buffer->vertex_bindings[i].buffer;
2392 uint32_t stride = cmd_buffer->state.pipeline->binding_stride[i];
2393
2394 if (!buffer)
2395 continue;
2396
2397 va = radv_buffer_get_va(buffer->bo);
2398
2399 offset = cmd_buffer->vertex_bindings[i].offset;
2400 va += offset + buffer->offset;
2401 desc[0] = va;
2402 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) | S_008F04_STRIDE(stride);
2403 if (cmd_buffer->device->physical_device->rad_info.chip_class <= GFX7 && stride)
2404 desc[2] = (buffer->size - offset - velems->format_size[i]) / stride + 1;
2405 else
2406 desc[2] = buffer->size - offset;
2407 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
2408 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
2409 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
2410 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);
2411
2412 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10) {
2413 desc[3] |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_UINT) |
2414 S_008F0C_OOB_SELECT(1) |
2415 S_008F0C_RESOURCE_LEVEL(1);
2416 } else {
2417 desc[3] |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_UINT) |
2418 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
2419 }
2420 }
2421
2422 va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
2423 va += vb_offset;
2424
2425 radv_emit_userdata_address(cmd_buffer, cmd_buffer->state.pipeline, MESA_SHADER_VERTEX,
2426 AC_UD_VS_VERTEX_BUFFERS, va);
2427
2428 cmd_buffer->state.vb_va = va;
2429 cmd_buffer->state.vb_size = count * 16;
2430 cmd_buffer->state.prefetch_L2_mask |= RADV_PREFETCH_VBO_DESCRIPTORS;
2431 }
2432 cmd_buffer->state.dirty &= ~RADV_CMD_DIRTY_VERTEX_BUFFER;
2433 }
2434
2435 static void
2436 radv_emit_streamout_buffers(struct radv_cmd_buffer *cmd_buffer, uint64_t va)
2437 {
2438 struct radv_pipeline *pipeline = cmd_buffer->state.pipeline;
2439 struct radv_userdata_info *loc;
2440 uint32_t base_reg;
2441
2442 for (unsigned stage = 0; stage < MESA_SHADER_STAGES; ++stage) {
2443 if (!radv_get_shader(pipeline, stage))
2444 continue;
2445
2446 loc = radv_lookup_user_sgpr(pipeline, stage,
2447 AC_UD_STREAMOUT_BUFFERS);
2448 if (loc->sgpr_idx == -1)
2449 continue;
2450
2451 base_reg = pipeline->user_data_0[stage];
2452
2453 radv_emit_shader_pointer(cmd_buffer->device, cmd_buffer->cs,
2454 base_reg + loc->sgpr_idx * 4, va, false);
2455 }
2456
2457 if (radv_pipeline_has_gs_copy_shader(pipeline)) {
2458 loc = &pipeline->gs_copy_shader->info.user_sgprs_locs.shader_data[AC_UD_STREAMOUT_BUFFERS];
2459 if (loc->sgpr_idx != -1) {
2460 base_reg = R_00B130_SPI_SHADER_USER_DATA_VS_0;
2461
2462 radv_emit_shader_pointer(cmd_buffer->device, cmd_buffer->cs,
2463 base_reg + loc->sgpr_idx * 4, va, false);
2464 }
2465 }
2466 }
2467
2468 static void
2469 radv_flush_streamout_descriptors(struct radv_cmd_buffer *cmd_buffer)
2470 {
2471 if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_STREAMOUT_BUFFER) {
2472 struct radv_streamout_binding *sb = cmd_buffer->streamout_bindings;
2473 struct radv_streamout_state *so = &cmd_buffer->state.streamout;
2474 unsigned so_offset;
2475 void *so_ptr;
2476 uint64_t va;
2477
2478 /* Allocate some descriptor state for streamout buffers. */
2479 if (!radv_cmd_buffer_upload_alloc(cmd_buffer,
2480 MAX_SO_BUFFERS * 16, 256,
2481 &so_offset, &so_ptr))
2482 return;
2483
2484 for (uint32_t i = 0; i < MAX_SO_BUFFERS; i++) {
2485 struct radv_buffer *buffer = sb[i].buffer;
2486 uint32_t *desc = &((uint32_t *)so_ptr)[i * 4];
2487
2488 if (!(so->enabled_mask & (1 << i)))
2489 continue;
2490
2491 va = radv_buffer_get_va(buffer->bo) + buffer->offset;
2492
2493 va += sb[i].offset;
2494
2495 /* Set the descriptor.
2496 *
2497 * On GFX8, the format must be non-INVALID, otherwise
2498 * the buffer will be considered not bound and store
2499 * instructions will be no-ops.
2500 */
2501 uint32_t size = 0xffffffff;
2502
2503 /* Compute the correct buffer size for NGG streamout
2504 * because it's used to determine the max emit per
2505 * buffer.
2506 */
2507 if (cmd_buffer->device->physical_device->use_ngg_streamout)
2508 size = buffer->size - sb[i].offset;
2509
2510 desc[0] = va;
2511 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32);
2512 desc[2] = size;
2513 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
2514 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
2515 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
2516 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);
2517
2518 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10) {
2519 desc[3] |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_FLOAT) |
2520 S_008F0C_OOB_SELECT(3) |
2521 S_008F0C_RESOURCE_LEVEL(1);
2522 } else {
2523 desc[3] |= S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
2524 }
2525 }
2526
2527 va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
2528 va += so_offset;
2529
2530 radv_emit_streamout_buffers(cmd_buffer, va);
2531 }
2532
2533 cmd_buffer->state.dirty &= ~RADV_CMD_DIRTY_STREAMOUT_BUFFER;
2534 }
2535
2536 static void
2537 radv_upload_graphics_shader_descriptors(struct radv_cmd_buffer *cmd_buffer, bool pipeline_is_dirty)
2538 {
2539 radv_flush_vertex_descriptors(cmd_buffer, pipeline_is_dirty);
2540 radv_flush_streamout_descriptors(cmd_buffer);
2541 radv_flush_descriptors(cmd_buffer, VK_SHADER_STAGE_ALL_GRAPHICS);
2542 radv_flush_constants(cmd_buffer, VK_SHADER_STAGE_ALL_GRAPHICS);
2543 }
2544
2545 struct radv_draw_info {
2546 /**
2547 * Number of vertices.
2548 */
2549 uint32_t count;
2550
2551 /**
2552 * Index of the first vertex.
2553 */
2554 int32_t vertex_offset;
2555
2556 /**
2557 * First instance id.
2558 */
2559 uint32_t first_instance;
2560
2561 /**
2562 * Number of instances.
2563 */
2564 uint32_t instance_count;
2565
2566 /**
2567 * First index (indexed draws only).
2568 */
2569 uint32_t first_index;
2570
2571 /**
2572 * Whether it's an indexed draw.
2573 */
2574 bool indexed;
2575
2576 /**
2577 * Indirect draw parameters resource.
2578 */
2579 struct radv_buffer *indirect;
2580 uint64_t indirect_offset;
2581 uint32_t stride;
2582
2583 /**
2584 * Draw count parameters resource.
2585 */
2586 struct radv_buffer *count_buffer;
2587 uint64_t count_buffer_offset;
2588
2589 /**
2590 * Stream output parameters resource.
2591 */
2592 struct radv_buffer *strmout_buffer;
2593 uint64_t strmout_buffer_offset;
2594 };
2595
2596 static uint32_t
2597 radv_get_primitive_reset_index(struct radv_cmd_buffer *cmd_buffer)
2598 {
2599 switch (cmd_buffer->state.index_type) {
2600 case V_028A7C_VGT_INDEX_8:
2601 return 0xffu;
2602 case V_028A7C_VGT_INDEX_16:
2603 return 0xffffu;
2604 case V_028A7C_VGT_INDEX_32:
2605 return 0xffffffffu;
2606 default:
2607 unreachable("invalid index type");
2608 }
2609 }
2610
2611 static void
2612 si_emit_ia_multi_vgt_param(struct radv_cmd_buffer *cmd_buffer,
2613 bool instanced_draw, bool indirect_draw,
2614 bool count_from_stream_output,
2615 uint32_t draw_vertex_count)
2616 {
2617 struct radeon_info *info = &cmd_buffer->device->physical_device->rad_info;
2618 struct radv_cmd_state *state = &cmd_buffer->state;
2619 struct radeon_cmdbuf *cs = cmd_buffer->cs;
2620 unsigned ia_multi_vgt_param;
2621
2622 ia_multi_vgt_param =
2623 si_get_ia_multi_vgt_param(cmd_buffer, instanced_draw,
2624 indirect_draw,
2625 count_from_stream_output,
2626 draw_vertex_count);
2627
2628 if (state->last_ia_multi_vgt_param != ia_multi_vgt_param) {
2629 if (info->chip_class == GFX9) {
2630 radeon_set_uconfig_reg_idx(cmd_buffer->device->physical_device,
2631 cs,
2632 R_030960_IA_MULTI_VGT_PARAM,
2633 4, ia_multi_vgt_param);
2634 } else if (info->chip_class >= GFX7) {
2635 radeon_set_context_reg_idx(cs,
2636 R_028AA8_IA_MULTI_VGT_PARAM,
2637 1, ia_multi_vgt_param);
2638 } else {
2639 radeon_set_context_reg(cs, R_028AA8_IA_MULTI_VGT_PARAM,
2640 ia_multi_vgt_param);
2641 }
2642 state->last_ia_multi_vgt_param = ia_multi_vgt_param;
2643 }
2644 }
2645
2646 static void
2647 radv_emit_draw_registers(struct radv_cmd_buffer *cmd_buffer,
2648 const struct radv_draw_info *draw_info)
2649 {
2650 struct radeon_info *info = &cmd_buffer->device->physical_device->rad_info;
2651 struct radv_cmd_state *state = &cmd_buffer->state;
2652 struct radeon_cmdbuf *cs = cmd_buffer->cs;
2653 int32_t primitive_reset_en;
2654
2655 /* Draw state. */
2656 if (info->chip_class < GFX10) {
2657 si_emit_ia_multi_vgt_param(cmd_buffer, draw_info->instance_count > 1,
2658 draw_info->indirect,
2659 !!draw_info->strmout_buffer,
2660 draw_info->indirect ? 0 : draw_info->count);
2661 }
2662
2663 /* Primitive restart. */
2664 primitive_reset_en =
2665 draw_info->indexed && state->pipeline->graphics.prim_restart_enable;
2666
2667 if (primitive_reset_en != state->last_primitive_reset_en) {
2668 state->last_primitive_reset_en = primitive_reset_en;
2669 if (info->chip_class >= GFX9) {
2670 radeon_set_uconfig_reg(cs,
2671 R_03092C_VGT_MULTI_PRIM_IB_RESET_EN,
2672 primitive_reset_en);
2673 } else {
2674 radeon_set_context_reg(cs,
2675 R_028A94_VGT_MULTI_PRIM_IB_RESET_EN,
2676 primitive_reset_en);
2677 }
2678 }
2679
2680 if (primitive_reset_en) {
2681 uint32_t primitive_reset_index =
2682 radv_get_primitive_reset_index(cmd_buffer);
2683
2684 if (primitive_reset_index != state->last_primitive_reset_index) {
2685 radeon_set_context_reg(cs,
2686 R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX,
2687 primitive_reset_index);
2688 state->last_primitive_reset_index = primitive_reset_index;
2689 }
2690 }
2691
2692 if (draw_info->strmout_buffer) {
2693 uint64_t va = radv_buffer_get_va(draw_info->strmout_buffer->bo);
2694
2695 va += draw_info->strmout_buffer->offset +
2696 draw_info->strmout_buffer_offset;
2697
2698 radeon_set_context_reg(cs, R_028B30_VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE,
2699 draw_info->stride);
2700
2701 radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
2702 radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_SRC_MEM) |
2703 COPY_DATA_DST_SEL(COPY_DATA_REG) |
2704 COPY_DATA_WR_CONFIRM);
2705 radeon_emit(cs, va);
2706 radeon_emit(cs, va >> 32);
2707 radeon_emit(cs, R_028B2C_VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE >> 2);
2708 radeon_emit(cs, 0); /* unused */
2709
2710 radv_cs_add_buffer(cmd_buffer->device->ws, cs, draw_info->strmout_buffer->bo);
2711 }
2712 }
2713
2714 static void radv_stage_flush(struct radv_cmd_buffer *cmd_buffer,
2715 VkPipelineStageFlags src_stage_mask)
2716 {
2717 if (src_stage_mask & (VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT |
2718 VK_PIPELINE_STAGE_TRANSFER_BIT |
2719 VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT |
2720 VK_PIPELINE_STAGE_ALL_COMMANDS_BIT)) {
2721 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_CS_PARTIAL_FLUSH;
2722 }
2723
2724 if (src_stage_mask & (VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
2725 VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT |
2726 VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT |
2727 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT |
2728 VK_PIPELINE_STAGE_TRANSFER_BIT |
2729 VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT |
2730 VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT |
2731 VK_PIPELINE_STAGE_ALL_COMMANDS_BIT)) {
2732 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_PS_PARTIAL_FLUSH;
2733 } else if (src_stage_mask & (VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT |
2734 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT |
2735 VK_PIPELINE_STAGE_VERTEX_SHADER_BIT |
2736 VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
2737 VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT |
2738 VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
2739 VK_PIPELINE_STAGE_TRANSFORM_FEEDBACK_BIT_EXT)) {
2740 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_VS_PARTIAL_FLUSH;
2741 }
2742 }
2743
2744 static enum radv_cmd_flush_bits
2745 radv_src_access_flush(struct radv_cmd_buffer *cmd_buffer,
2746 VkAccessFlags src_flags,
2747 struct radv_image *image)
2748 {
2749 bool flush_CB_meta = true, flush_DB_meta = true;
2750 enum radv_cmd_flush_bits flush_bits = 0;
2751 uint32_t b;
2752
2753 if (image) {
2754 if (!radv_image_has_CB_metadata(image))
2755 flush_CB_meta = false;
2756 if (!radv_image_has_htile(image))
2757 flush_DB_meta = false;
2758 }
2759
2760 for_each_bit(b, src_flags) {
2761 switch ((VkAccessFlagBits)(1 << b)) {
2762 case VK_ACCESS_SHADER_WRITE_BIT:
2763 case VK_ACCESS_TRANSFORM_FEEDBACK_WRITE_BIT_EXT:
2764 case VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT:
2765 flush_bits |= RADV_CMD_FLAG_WB_L2;
2766 break;
2767 case VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT:
2768 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB;
2769 if (flush_CB_meta)
2770 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
2771 break;
2772 case VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT:
2773 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB;
2774 if (flush_DB_meta)
2775 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
2776 break;
2777 case VK_ACCESS_TRANSFER_WRITE_BIT:
2778 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
2779 RADV_CMD_FLAG_FLUSH_AND_INV_DB |
2780 RADV_CMD_FLAG_INV_L2;
2781
2782 if (flush_CB_meta)
2783 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
2784 if (flush_DB_meta)
2785 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
2786 break;
2787 default:
2788 break;
2789 }
2790 }
2791 return flush_bits;
2792 }
2793
2794 static enum radv_cmd_flush_bits
2795 radv_dst_access_flush(struct radv_cmd_buffer *cmd_buffer,
2796 VkAccessFlags dst_flags,
2797 struct radv_image *image)
2798 {
2799 bool flush_CB_meta = true, flush_DB_meta = true;
2800 enum radv_cmd_flush_bits flush_bits = 0;
2801 bool flush_CB = true, flush_DB = true;
2802 bool image_is_coherent = false;
2803 uint32_t b;
2804
2805 if (image) {
2806 if (!(image->usage & VK_IMAGE_USAGE_STORAGE_BIT)) {
2807 flush_CB = false;
2808 flush_DB = false;
2809 }
2810
2811 if (!radv_image_has_CB_metadata(image))
2812 flush_CB_meta = false;
2813 if (!radv_image_has_htile(image))
2814 flush_DB_meta = false;
2815
2816 /* TODO: implement shader coherent for GFX10 */
2817
2818 if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX9) {
2819 if (image->info.samples == 1 &&
2820 (image->usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
2821 VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) &&
2822 !vk_format_is_stencil(image->vk_format)) {
2823 /* Single-sample color and single-sample depth
2824 * (not stencil) are coherent with shaders on
2825 * GFX9.
2826 */
2827 image_is_coherent = true;
2828 }
2829 }
2830 }
2831
2832 for_each_bit(b, dst_flags) {
2833 switch ((VkAccessFlagBits)(1 << b)) {
2834 case VK_ACCESS_INDIRECT_COMMAND_READ_BIT:
2835 case VK_ACCESS_INDEX_READ_BIT:
2836 case VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT:
2837 break;
2838 case VK_ACCESS_UNIFORM_READ_BIT:
2839 flush_bits |= RADV_CMD_FLAG_INV_VCACHE | RADV_CMD_FLAG_INV_SCACHE;
2840 break;
2841 case VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT:
2842 case VK_ACCESS_TRANSFER_READ_BIT:
2843 case VK_ACCESS_INPUT_ATTACHMENT_READ_BIT:
2844 flush_bits |= RADV_CMD_FLAG_INV_VCACHE |
2845 RADV_CMD_FLAG_INV_L2;
2846 break;
2847 case VK_ACCESS_SHADER_READ_BIT:
2848 flush_bits |= RADV_CMD_FLAG_INV_VCACHE;
2849 /* Unlike LLVM, ACO uses SMEM for SSBOs and we have to
2850 * invalidate the scalar cache. */
2851 if (cmd_buffer->device->physical_device->use_aco)
2852 flush_bits |= RADV_CMD_FLAG_INV_SCACHE;
2853
2854 if (!image_is_coherent)
2855 flush_bits |= RADV_CMD_FLAG_INV_L2;
2856 break;
2857 case VK_ACCESS_COLOR_ATTACHMENT_READ_BIT:
2858 if (flush_CB)
2859 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB;
2860 if (flush_CB_meta)
2861 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
2862 break;
2863 case VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT:
2864 if (flush_DB)
2865 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB;
2866 if (flush_DB_meta)
2867 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
2868 break;
2869 default:
2870 break;
2871 }
2872 }
2873 return flush_bits;
2874 }
2875
2876 void radv_subpass_barrier(struct radv_cmd_buffer *cmd_buffer,
2877 const struct radv_subpass_barrier *barrier)
2878 {
2879 cmd_buffer->state.flush_bits |= radv_src_access_flush(cmd_buffer, barrier->src_access_mask,
2880 NULL);
2881 radv_stage_flush(cmd_buffer, barrier->src_stage_mask);
2882 cmd_buffer->state.flush_bits |= radv_dst_access_flush(cmd_buffer, barrier->dst_access_mask,
2883 NULL);
2884 }
2885
2886 uint32_t
2887 radv_get_subpass_id(struct radv_cmd_buffer *cmd_buffer)
2888 {
2889 struct radv_cmd_state *state = &cmd_buffer->state;
2890 uint32_t subpass_id = state->subpass - state->pass->subpasses;
2891
2892 /* The id of this subpass shouldn't exceed the number of subpasses in
2893 * this render pass minus 1.
2894 */
2895 assert(subpass_id < state->pass->subpass_count);
2896 return subpass_id;
2897 }
2898
2899 static struct radv_sample_locations_state *
2900 radv_get_attachment_sample_locations(struct radv_cmd_buffer *cmd_buffer,
2901 uint32_t att_idx,
2902 bool begin_subpass)
2903 {
2904 struct radv_cmd_state *state = &cmd_buffer->state;
2905 uint32_t subpass_id = radv_get_subpass_id(cmd_buffer);
2906 struct radv_image_view *view = state->attachments[att_idx].iview;
2907
2908 if (view->image->info.samples == 1)
2909 return NULL;
2910
2911 if (state->pass->attachments[att_idx].first_subpass_idx == subpass_id) {
2912 /* Return the initial sample locations if this is the initial
2913 * layout transition of the given subpass attachemnt.
2914 */
2915 if (state->attachments[att_idx].sample_location.count > 0)
2916 return &state->attachments[att_idx].sample_location;
2917 } else {
2918 /* Otherwise return the subpass sample locations if defined. */
2919 if (state->subpass_sample_locs) {
2920 /* Because the driver sets the current subpass before
2921 * initial layout transitions, we should use the sample
2922 * locations from the previous subpass to avoid an
2923 * off-by-one problem. Otherwise, use the sample
2924 * locations for the current subpass for final layout
2925 * transitions.
2926 */
2927 if (begin_subpass)
2928 subpass_id--;
2929
2930 for (uint32_t i = 0; i < state->num_subpass_sample_locs; i++) {
2931 if (state->subpass_sample_locs[i].subpass_idx == subpass_id)
2932 return &state->subpass_sample_locs[i].sample_location;
2933 }
2934 }
2935 }
2936
2937 return NULL;
2938 }
2939
2940 static void radv_handle_subpass_image_transition(struct radv_cmd_buffer *cmd_buffer,
2941 struct radv_subpass_attachment att,
2942 bool begin_subpass)
2943 {
2944 unsigned idx = att.attachment;
2945 struct radv_image_view *view = cmd_buffer->state.attachments[idx].iview;
2946 struct radv_sample_locations_state *sample_locs;
2947 VkImageSubresourceRange range;
2948 range.aspectMask = 0;
2949 range.baseMipLevel = view->base_mip;
2950 range.levelCount = 1;
2951 range.baseArrayLayer = view->base_layer;
2952 range.layerCount = cmd_buffer->state.framebuffer->layers;
2953
2954 if (cmd_buffer->state.subpass->view_mask) {
2955 /* If the current subpass uses multiview, the driver might have
2956 * performed a fast color/depth clear to the whole image
2957 * (including all layers). To make sure the driver will
2958 * decompress the image correctly (if needed), we have to
2959 * account for the "real" number of layers. If the view mask is
2960 * sparse, this will decompress more layers than needed.
2961 */
2962 range.layerCount = util_last_bit(cmd_buffer->state.subpass->view_mask);
2963 }
2964
2965 /* Get the subpass sample locations for the given attachment, if NULL
2966 * is returned the driver will use the default HW locations.
2967 */
2968 sample_locs = radv_get_attachment_sample_locations(cmd_buffer, idx,
2969 begin_subpass);
2970
2971 radv_handle_image_transition(cmd_buffer,
2972 view->image,
2973 cmd_buffer->state.attachments[idx].current_layout,
2974 cmd_buffer->state.attachments[idx].current_in_render_loop,
2975 att.layout, att.in_render_loop,
2976 0, 0, &range, sample_locs);
2977
2978 cmd_buffer->state.attachments[idx].current_layout = att.layout;
2979 cmd_buffer->state.attachments[idx].current_in_render_loop = att.in_render_loop;
2980
2981
2982 }
2983
2984 void
2985 radv_cmd_buffer_set_subpass(struct radv_cmd_buffer *cmd_buffer,
2986 const struct radv_subpass *subpass)
2987 {
2988 cmd_buffer->state.subpass = subpass;
2989
2990 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_FRAMEBUFFER;
2991 }
2992
2993 static VkResult
2994 radv_cmd_state_setup_sample_locations(struct radv_cmd_buffer *cmd_buffer,
2995 struct radv_render_pass *pass,
2996 const VkRenderPassBeginInfo *info)
2997 {
2998 const struct VkRenderPassSampleLocationsBeginInfoEXT *sample_locs =
2999 vk_find_struct_const(info->pNext,
3000 RENDER_PASS_SAMPLE_LOCATIONS_BEGIN_INFO_EXT);
3001 struct radv_cmd_state *state = &cmd_buffer->state;
3002
3003 if (!sample_locs) {
3004 state->subpass_sample_locs = NULL;
3005 return VK_SUCCESS;
3006 }
3007
3008 for (uint32_t i = 0; i < sample_locs->attachmentInitialSampleLocationsCount; i++) {
3009 const VkAttachmentSampleLocationsEXT *att_sample_locs =
3010 &sample_locs->pAttachmentInitialSampleLocations[i];
3011 uint32_t att_idx = att_sample_locs->attachmentIndex;
3012 struct radv_image *image = cmd_buffer->state.attachments[att_idx].iview->image;
3013
3014 assert(vk_format_is_depth_or_stencil(image->vk_format));
3015
3016 /* From the Vulkan spec 1.1.108:
3017 *
3018 * "If the image referenced by the framebuffer attachment at
3019 * index attachmentIndex was not created with
3020 * VK_IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT
3021 * then the values specified in sampleLocationsInfo are
3022 * ignored."
3023 */
3024 if (!(image->flags & VK_IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT))
3025 continue;
3026
3027 const VkSampleLocationsInfoEXT *sample_locs_info =
3028 &att_sample_locs->sampleLocationsInfo;
3029
3030 state->attachments[att_idx].sample_location.per_pixel =
3031 sample_locs_info->sampleLocationsPerPixel;
3032 state->attachments[att_idx].sample_location.grid_size =
3033 sample_locs_info->sampleLocationGridSize;
3034 state->attachments[att_idx].sample_location.count =
3035 sample_locs_info->sampleLocationsCount;
3036 typed_memcpy(&state->attachments[att_idx].sample_location.locations[0],
3037 sample_locs_info->pSampleLocations,
3038 sample_locs_info->sampleLocationsCount);
3039 }
3040
3041 state->subpass_sample_locs = vk_alloc(&cmd_buffer->pool->alloc,
3042 sample_locs->postSubpassSampleLocationsCount *
3043 sizeof(state->subpass_sample_locs[0]),
3044 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3045 if (state->subpass_sample_locs == NULL) {
3046 cmd_buffer->record_result = VK_ERROR_OUT_OF_HOST_MEMORY;
3047 return cmd_buffer->record_result;
3048 }
3049
3050 state->num_subpass_sample_locs = sample_locs->postSubpassSampleLocationsCount;
3051
3052 for (uint32_t i = 0; i < sample_locs->postSubpassSampleLocationsCount; i++) {
3053 const VkSubpassSampleLocationsEXT *subpass_sample_locs_info =
3054 &sample_locs->pPostSubpassSampleLocations[i];
3055 const VkSampleLocationsInfoEXT *sample_locs_info =
3056 &subpass_sample_locs_info->sampleLocationsInfo;
3057
3058 state->subpass_sample_locs[i].subpass_idx =
3059 subpass_sample_locs_info->subpassIndex;
3060 state->subpass_sample_locs[i].sample_location.per_pixel =
3061 sample_locs_info->sampleLocationsPerPixel;
3062 state->subpass_sample_locs[i].sample_location.grid_size =
3063 sample_locs_info->sampleLocationGridSize;
3064 state->subpass_sample_locs[i].sample_location.count =
3065 sample_locs_info->sampleLocationsCount;
3066 typed_memcpy(&state->subpass_sample_locs[i].sample_location.locations[0],
3067 sample_locs_info->pSampleLocations,
3068 sample_locs_info->sampleLocationsCount);
3069 }
3070
3071 return VK_SUCCESS;
3072 }
3073
3074 static VkResult
3075 radv_cmd_state_setup_attachments(struct radv_cmd_buffer *cmd_buffer,
3076 struct radv_render_pass *pass,
3077 const VkRenderPassBeginInfo *info)
3078 {
3079 struct radv_cmd_state *state = &cmd_buffer->state;
3080 const struct VkRenderPassAttachmentBeginInfoKHR *attachment_info = NULL;
3081
3082 if (info) {
3083 attachment_info = vk_find_struct_const(info->pNext,
3084 RENDER_PASS_ATTACHMENT_BEGIN_INFO_KHR);
3085 }
3086
3087
3088 if (pass->attachment_count == 0) {
3089 state->attachments = NULL;
3090 return VK_SUCCESS;
3091 }
3092
3093 state->attachments = vk_alloc(&cmd_buffer->pool->alloc,
3094 pass->attachment_count *
3095 sizeof(state->attachments[0]),
3096 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3097 if (state->attachments == NULL) {
3098 cmd_buffer->record_result = VK_ERROR_OUT_OF_HOST_MEMORY;
3099 return cmd_buffer->record_result;
3100 }
3101
3102 for (uint32_t i = 0; i < pass->attachment_count; ++i) {
3103 struct radv_render_pass_attachment *att = &pass->attachments[i];
3104 VkImageAspectFlags att_aspects = vk_format_aspects(att->format);
3105 VkImageAspectFlags clear_aspects = 0;
3106
3107 if (att_aspects == VK_IMAGE_ASPECT_COLOR_BIT) {
3108 /* color attachment */
3109 if (att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
3110 clear_aspects |= VK_IMAGE_ASPECT_COLOR_BIT;
3111 }
3112 } else {
3113 /* depthstencil attachment */
3114 if ((att_aspects & VK_IMAGE_ASPECT_DEPTH_BIT) &&
3115 att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
3116 clear_aspects |= VK_IMAGE_ASPECT_DEPTH_BIT;
3117 if ((att_aspects & VK_IMAGE_ASPECT_STENCIL_BIT) &&
3118 att->stencil_load_op == VK_ATTACHMENT_LOAD_OP_DONT_CARE)
3119 clear_aspects |= VK_IMAGE_ASPECT_STENCIL_BIT;
3120 }
3121 if ((att_aspects & VK_IMAGE_ASPECT_STENCIL_BIT) &&
3122 att->stencil_load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
3123 clear_aspects |= VK_IMAGE_ASPECT_STENCIL_BIT;
3124 }
3125 }
3126
3127 state->attachments[i].pending_clear_aspects = clear_aspects;
3128 state->attachments[i].cleared_views = 0;
3129 if (clear_aspects && info) {
3130 assert(info->clearValueCount > i);
3131 state->attachments[i].clear_value = info->pClearValues[i];
3132 }
3133
3134 state->attachments[i].current_layout = att->initial_layout;
3135 state->attachments[i].sample_location.count = 0;
3136
3137 struct radv_image_view *iview;
3138 if (attachment_info && attachment_info->attachmentCount > i) {
3139 iview = radv_image_view_from_handle(attachment_info->pAttachments[i]);
3140 } else {
3141 iview = state->framebuffer->attachments[i];
3142 }
3143
3144 state->attachments[i].iview = iview;
3145 if (iview->aspect_mask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
3146 radv_initialise_ds_surface(cmd_buffer->device, &state->attachments[i].ds, iview);
3147 } else {
3148 radv_initialise_color_surface(cmd_buffer->device, &state->attachments[i].cb, iview);
3149 }
3150 }
3151
3152 return VK_SUCCESS;
3153 }
3154
3155 VkResult radv_AllocateCommandBuffers(
3156 VkDevice _device,
3157 const VkCommandBufferAllocateInfo *pAllocateInfo,
3158 VkCommandBuffer *pCommandBuffers)
3159 {
3160 RADV_FROM_HANDLE(radv_device, device, _device);
3161 RADV_FROM_HANDLE(radv_cmd_pool, pool, pAllocateInfo->commandPool);
3162
3163 VkResult result = VK_SUCCESS;
3164 uint32_t i;
3165
3166 for (i = 0; i < pAllocateInfo->commandBufferCount; i++) {
3167
3168 if (!list_empty(&pool->free_cmd_buffers)) {
3169 struct radv_cmd_buffer *cmd_buffer = list_first_entry(&pool->free_cmd_buffers, struct radv_cmd_buffer, pool_link);
3170
3171 list_del(&cmd_buffer->pool_link);
3172 list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
3173
3174 result = radv_reset_cmd_buffer(cmd_buffer);
3175 cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
3176 cmd_buffer->level = pAllocateInfo->level;
3177
3178 pCommandBuffers[i] = radv_cmd_buffer_to_handle(cmd_buffer);
3179 } else {
3180 result = radv_create_cmd_buffer(device, pool, pAllocateInfo->level,
3181 &pCommandBuffers[i]);
3182 }
3183 if (result != VK_SUCCESS)
3184 break;
3185 }
3186
3187 if (result != VK_SUCCESS) {
3188 radv_FreeCommandBuffers(_device, pAllocateInfo->commandPool,
3189 i, pCommandBuffers);
3190
3191 /* From the Vulkan 1.0.66 spec:
3192 *
3193 * "vkAllocateCommandBuffers can be used to create multiple
3194 * command buffers. If the creation of any of those command
3195 * buffers fails, the implementation must destroy all
3196 * successfully created command buffer objects from this
3197 * command, set all entries of the pCommandBuffers array to
3198 * NULL and return the error."
3199 */
3200 memset(pCommandBuffers, 0,
3201 sizeof(*pCommandBuffers) * pAllocateInfo->commandBufferCount);
3202 }
3203
3204 return result;
3205 }
3206
3207 void radv_FreeCommandBuffers(
3208 VkDevice device,
3209 VkCommandPool commandPool,
3210 uint32_t commandBufferCount,
3211 const VkCommandBuffer *pCommandBuffers)
3212 {
3213 for (uint32_t i = 0; i < commandBufferCount; i++) {
3214 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, pCommandBuffers[i]);
3215
3216 if (cmd_buffer) {
3217 if (cmd_buffer->pool) {
3218 list_del(&cmd_buffer->pool_link);
3219 list_addtail(&cmd_buffer->pool_link, &cmd_buffer->pool->free_cmd_buffers);
3220 } else
3221 radv_cmd_buffer_destroy(cmd_buffer);
3222
3223 }
3224 }
3225 }
3226
3227 VkResult radv_ResetCommandBuffer(
3228 VkCommandBuffer commandBuffer,
3229 VkCommandBufferResetFlags flags)
3230 {
3231 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3232 return radv_reset_cmd_buffer(cmd_buffer);
3233 }
3234
3235 VkResult radv_BeginCommandBuffer(
3236 VkCommandBuffer commandBuffer,
3237 const VkCommandBufferBeginInfo *pBeginInfo)
3238 {
3239 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3240 VkResult result = VK_SUCCESS;
3241
3242 if (cmd_buffer->status != RADV_CMD_BUFFER_STATUS_INITIAL) {
3243 /* If the command buffer has already been resetted with
3244 * vkResetCommandBuffer, no need to do it again.
3245 */
3246 result = radv_reset_cmd_buffer(cmd_buffer);
3247 if (result != VK_SUCCESS)
3248 return result;
3249 }
3250
3251 memset(&cmd_buffer->state, 0, sizeof(cmd_buffer->state));
3252 cmd_buffer->state.last_primitive_reset_en = -1;
3253 cmd_buffer->state.last_index_type = -1;
3254 cmd_buffer->state.last_num_instances = -1;
3255 cmd_buffer->state.last_vertex_offset = -1;
3256 cmd_buffer->state.last_first_instance = -1;
3257 cmd_buffer->state.predication_type = -1;
3258 cmd_buffer->usage_flags = pBeginInfo->flags;
3259
3260 if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY &&
3261 (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
3262 assert(pBeginInfo->pInheritanceInfo);
3263 cmd_buffer->state.framebuffer = radv_framebuffer_from_handle(pBeginInfo->pInheritanceInfo->framebuffer);
3264 cmd_buffer->state.pass = radv_render_pass_from_handle(pBeginInfo->pInheritanceInfo->renderPass);
3265
3266 struct radv_subpass *subpass =
3267 &cmd_buffer->state.pass->subpasses[pBeginInfo->pInheritanceInfo->subpass];
3268
3269 if (cmd_buffer->state.framebuffer) {
3270 result = radv_cmd_state_setup_attachments(cmd_buffer, cmd_buffer->state.pass, NULL);
3271 if (result != VK_SUCCESS)
3272 return result;
3273 }
3274
3275 radv_cmd_buffer_set_subpass(cmd_buffer, subpass);
3276 }
3277
3278 if (unlikely(cmd_buffer->device->trace_bo)) {
3279 struct radv_device *device = cmd_buffer->device;
3280
3281 radv_cs_add_buffer(device->ws, cmd_buffer->cs,
3282 device->trace_bo);
3283
3284 radv_cmd_buffer_trace_emit(cmd_buffer);
3285 }
3286
3287 cmd_buffer->status = RADV_CMD_BUFFER_STATUS_RECORDING;
3288
3289 return result;
3290 }
3291
3292 void radv_CmdBindVertexBuffers(
3293 VkCommandBuffer commandBuffer,
3294 uint32_t firstBinding,
3295 uint32_t bindingCount,
3296 const VkBuffer* pBuffers,
3297 const VkDeviceSize* pOffsets)
3298 {
3299 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3300 struct radv_vertex_binding *vb = cmd_buffer->vertex_bindings;
3301 bool changed = false;
3302
3303 /* We have to defer setting up vertex buffer since we need the buffer
3304 * stride from the pipeline. */
3305
3306 assert(firstBinding + bindingCount <= MAX_VBS);
3307 for (uint32_t i = 0; i < bindingCount; i++) {
3308 uint32_t idx = firstBinding + i;
3309
3310 if (!changed &&
3311 (vb[idx].buffer != radv_buffer_from_handle(pBuffers[i]) ||
3312 vb[idx].offset != pOffsets[i])) {
3313 changed = true;
3314 }
3315
3316 vb[idx].buffer = radv_buffer_from_handle(pBuffers[i]);
3317 vb[idx].offset = pOffsets[i];
3318
3319 radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs,
3320 vb[idx].buffer->bo);
3321 }
3322
3323 if (!changed) {
3324 /* No state changes. */
3325 return;
3326 }
3327
3328 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_VERTEX_BUFFER;
3329 }
3330
3331 static uint32_t
3332 vk_to_index_type(VkIndexType type)
3333 {
3334 switch (type) {
3335 case VK_INDEX_TYPE_UINT8_EXT:
3336 return V_028A7C_VGT_INDEX_8;
3337 case VK_INDEX_TYPE_UINT16:
3338 return V_028A7C_VGT_INDEX_16;
3339 case VK_INDEX_TYPE_UINT32:
3340 return V_028A7C_VGT_INDEX_32;
3341 default:
3342 unreachable("invalid index type");
3343 }
3344 }
3345
3346 static uint32_t
3347 radv_get_vgt_index_size(uint32_t type)
3348 {
3349 switch (type) {
3350 case V_028A7C_VGT_INDEX_8:
3351 return 1;
3352 case V_028A7C_VGT_INDEX_16:
3353 return 2;
3354 case V_028A7C_VGT_INDEX_32:
3355 return 4;
3356 default:
3357 unreachable("invalid index type");
3358 }
3359 }
3360
3361 void radv_CmdBindIndexBuffer(
3362 VkCommandBuffer commandBuffer,
3363 VkBuffer buffer,
3364 VkDeviceSize offset,
3365 VkIndexType indexType)
3366 {
3367 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3368 RADV_FROM_HANDLE(radv_buffer, index_buffer, buffer);
3369
3370 if (cmd_buffer->state.index_buffer == index_buffer &&
3371 cmd_buffer->state.index_offset == offset &&
3372 cmd_buffer->state.index_type == indexType) {
3373 /* No state changes. */
3374 return;
3375 }
3376
3377 cmd_buffer->state.index_buffer = index_buffer;
3378 cmd_buffer->state.index_offset = offset;
3379 cmd_buffer->state.index_type = vk_to_index_type(indexType);
3380 cmd_buffer->state.index_va = radv_buffer_get_va(index_buffer->bo);
3381 cmd_buffer->state.index_va += index_buffer->offset + offset;
3382
3383 int index_size = radv_get_vgt_index_size(vk_to_index_type(indexType));
3384 cmd_buffer->state.max_index_count = (index_buffer->size - offset) / index_size;
3385 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_INDEX_BUFFER;
3386 radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, index_buffer->bo);
3387 }
3388
3389
3390 static void
3391 radv_bind_descriptor_set(struct radv_cmd_buffer *cmd_buffer,
3392 VkPipelineBindPoint bind_point,
3393 struct radv_descriptor_set *set, unsigned idx)
3394 {
3395 struct radeon_winsys *ws = cmd_buffer->device->ws;
3396
3397 radv_set_descriptor_set(cmd_buffer, bind_point, set, idx);
3398
3399 assert(set);
3400 assert(!(set->layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR));
3401
3402 if (!cmd_buffer->device->use_global_bo_list) {
3403 for (unsigned j = 0; j < set->layout->buffer_count; ++j)
3404 if (set->descriptors[j])
3405 radv_cs_add_buffer(ws, cmd_buffer->cs, set->descriptors[j]);
3406 }
3407
3408 if(set->bo)
3409 radv_cs_add_buffer(ws, cmd_buffer->cs, set->bo);
3410 }
3411
3412 void radv_CmdBindDescriptorSets(
3413 VkCommandBuffer commandBuffer,
3414 VkPipelineBindPoint pipelineBindPoint,
3415 VkPipelineLayout _layout,
3416 uint32_t firstSet,
3417 uint32_t descriptorSetCount,
3418 const VkDescriptorSet* pDescriptorSets,
3419 uint32_t dynamicOffsetCount,
3420 const uint32_t* pDynamicOffsets)
3421 {
3422 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3423 RADV_FROM_HANDLE(radv_pipeline_layout, layout, _layout);
3424 unsigned dyn_idx = 0;
3425
3426 const bool no_dynamic_bounds = cmd_buffer->device->instance->debug_flags & RADV_DEBUG_NO_DYNAMIC_BOUNDS;
3427 struct radv_descriptor_state *descriptors_state =
3428 radv_get_descriptors_state(cmd_buffer, pipelineBindPoint);
3429
3430 for (unsigned i = 0; i < descriptorSetCount; ++i) {
3431 unsigned idx = i + firstSet;
3432 RADV_FROM_HANDLE(radv_descriptor_set, set, pDescriptorSets[i]);
3433
3434 /* If the set is already bound we only need to update the
3435 * (potentially changed) dynamic offsets. */
3436 if (descriptors_state->sets[idx] != set ||
3437 !(descriptors_state->valid & (1u << idx))) {
3438 radv_bind_descriptor_set(cmd_buffer, pipelineBindPoint, set, idx);
3439 }
3440
3441 for(unsigned j = 0; j < set->layout->dynamic_offset_count; ++j, ++dyn_idx) {
3442 unsigned idx = j + layout->set[i + firstSet].dynamic_offset_start;
3443 uint32_t *dst = descriptors_state->dynamic_buffers + idx * 4;
3444 assert(dyn_idx < dynamicOffsetCount);
3445
3446 struct radv_descriptor_range *range = set->dynamic_descriptors + j;
3447 uint64_t va = range->va + pDynamicOffsets[dyn_idx];
3448 dst[0] = va;
3449 dst[1] = S_008F04_BASE_ADDRESS_HI(va >> 32);
3450 dst[2] = no_dynamic_bounds ? 0xffffffffu : range->size;
3451 dst[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
3452 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
3453 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
3454 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);
3455
3456 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10) {
3457 dst[3] |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_FLOAT) |
3458 S_008F0C_OOB_SELECT(3) |
3459 S_008F0C_RESOURCE_LEVEL(1);
3460 } else {
3461 dst[3] |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
3462 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
3463 }
3464
3465 cmd_buffer->push_constant_stages |=
3466 set->layout->dynamic_shader_stages;
3467 }
3468 }
3469 }
3470
3471 static bool radv_init_push_descriptor_set(struct radv_cmd_buffer *cmd_buffer,
3472 struct radv_descriptor_set *set,
3473 struct radv_descriptor_set_layout *layout,
3474 VkPipelineBindPoint bind_point)
3475 {
3476 struct radv_descriptor_state *descriptors_state =
3477 radv_get_descriptors_state(cmd_buffer, bind_point);
3478 set->size = layout->size;
3479 set->layout = layout;
3480
3481 if (descriptors_state->push_set.capacity < set->size) {
3482 size_t new_size = MAX2(set->size, 1024);
3483 new_size = MAX2(new_size, 2 * descriptors_state->push_set.capacity);
3484 new_size = MIN2(new_size, 96 * MAX_PUSH_DESCRIPTORS);
3485
3486 free(set->mapped_ptr);
3487 set->mapped_ptr = malloc(new_size);
3488
3489 if (!set->mapped_ptr) {
3490 descriptors_state->push_set.capacity = 0;
3491 cmd_buffer->record_result = VK_ERROR_OUT_OF_HOST_MEMORY;
3492 return false;
3493 }
3494
3495 descriptors_state->push_set.capacity = new_size;
3496 }
3497
3498 return true;
3499 }
3500
3501 void radv_meta_push_descriptor_set(
3502 struct radv_cmd_buffer* cmd_buffer,
3503 VkPipelineBindPoint pipelineBindPoint,
3504 VkPipelineLayout _layout,
3505 uint32_t set,
3506 uint32_t descriptorWriteCount,
3507 const VkWriteDescriptorSet* pDescriptorWrites)
3508 {
3509 RADV_FROM_HANDLE(radv_pipeline_layout, layout, _layout);
3510 struct radv_descriptor_set *push_set = &cmd_buffer->meta_push_descriptors;
3511 unsigned bo_offset;
3512
3513 assert(set == 0);
3514 assert(layout->set[set].layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR);
3515
3516 push_set->size = layout->set[set].layout->size;
3517 push_set->layout = layout->set[set].layout;
3518
3519 if (!radv_cmd_buffer_upload_alloc(cmd_buffer, push_set->size, 32,
3520 &bo_offset,
3521 (void**) &push_set->mapped_ptr))
3522 return;
3523
3524 push_set->va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
3525 push_set->va += bo_offset;
3526
3527 radv_update_descriptor_sets(cmd_buffer->device, cmd_buffer,
3528 radv_descriptor_set_to_handle(push_set),
3529 descriptorWriteCount, pDescriptorWrites, 0, NULL);
3530
3531 radv_set_descriptor_set(cmd_buffer, pipelineBindPoint, push_set, set);
3532 }
3533
3534 void radv_CmdPushDescriptorSetKHR(
3535 VkCommandBuffer commandBuffer,
3536 VkPipelineBindPoint pipelineBindPoint,
3537 VkPipelineLayout _layout,
3538 uint32_t set,
3539 uint32_t descriptorWriteCount,
3540 const VkWriteDescriptorSet* pDescriptorWrites)
3541 {
3542 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3543 RADV_FROM_HANDLE(radv_pipeline_layout, layout, _layout);
3544 struct radv_descriptor_state *descriptors_state =
3545 radv_get_descriptors_state(cmd_buffer, pipelineBindPoint);
3546 struct radv_descriptor_set *push_set = &descriptors_state->push_set.set;
3547
3548 assert(layout->set[set].layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR);
3549
3550 if (!radv_init_push_descriptor_set(cmd_buffer, push_set,
3551 layout->set[set].layout,
3552 pipelineBindPoint))
3553 return;
3554
3555 /* Check that there are no inline uniform block updates when calling vkCmdPushDescriptorSetKHR()
3556 * because it is invalid, according to Vulkan spec.
3557 */
3558 for (int i = 0; i < descriptorWriteCount; i++) {
3559 ASSERTED const VkWriteDescriptorSet *writeset = &pDescriptorWrites[i];
3560 assert(writeset->descriptorType != VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT);
3561 }
3562
3563 radv_update_descriptor_sets(cmd_buffer->device, cmd_buffer,
3564 radv_descriptor_set_to_handle(push_set),
3565 descriptorWriteCount, pDescriptorWrites, 0, NULL);
3566
3567 radv_set_descriptor_set(cmd_buffer, pipelineBindPoint, push_set, set);
3568 descriptors_state->push_dirty = true;
3569 }
3570
3571 void radv_CmdPushDescriptorSetWithTemplateKHR(
3572 VkCommandBuffer commandBuffer,
3573 VkDescriptorUpdateTemplate descriptorUpdateTemplate,
3574 VkPipelineLayout _layout,
3575 uint32_t set,
3576 const void* pData)
3577 {
3578 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3579 RADV_FROM_HANDLE(radv_pipeline_layout, layout, _layout);
3580 RADV_FROM_HANDLE(radv_descriptor_update_template, templ, descriptorUpdateTemplate);
3581 struct radv_descriptor_state *descriptors_state =
3582 radv_get_descriptors_state(cmd_buffer, templ->bind_point);
3583 struct radv_descriptor_set *push_set = &descriptors_state->push_set.set;
3584
3585 assert(layout->set[set].layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR);
3586
3587 if (!radv_init_push_descriptor_set(cmd_buffer, push_set,
3588 layout->set[set].layout,
3589 templ->bind_point))
3590 return;
3591
3592 radv_update_descriptor_set_with_template(cmd_buffer->device, cmd_buffer, push_set,
3593 descriptorUpdateTemplate, pData);
3594
3595 radv_set_descriptor_set(cmd_buffer, templ->bind_point, push_set, set);
3596 descriptors_state->push_dirty = true;
3597 }
3598
3599 void radv_CmdPushConstants(VkCommandBuffer commandBuffer,
3600 VkPipelineLayout layout,
3601 VkShaderStageFlags stageFlags,
3602 uint32_t offset,
3603 uint32_t size,
3604 const void* pValues)
3605 {
3606 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3607 memcpy(cmd_buffer->push_constants + offset, pValues, size);
3608 cmd_buffer->push_constant_stages |= stageFlags;
3609 }
3610
3611 VkResult radv_EndCommandBuffer(
3612 VkCommandBuffer commandBuffer)
3613 {
3614 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3615
3616 if (cmd_buffer->queue_family_index != RADV_QUEUE_TRANSFER) {
3617 if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX6)
3618 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_CS_PARTIAL_FLUSH | RADV_CMD_FLAG_PS_PARTIAL_FLUSH | RADV_CMD_FLAG_WB_L2;
3619
3620 /* Make sure to sync all pending active queries at the end of
3621 * command buffer.
3622 */
3623 cmd_buffer->state.flush_bits |= cmd_buffer->active_query_flush_bits;
3624
3625 /* Since NGG streamout uses GDS, we need to make GDS idle when
3626 * we leave the IB, otherwise another process might overwrite
3627 * it while our shaders are busy.
3628 */
3629 if (cmd_buffer->gds_needed)
3630 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_PS_PARTIAL_FLUSH;
3631
3632 si_emit_cache_flush(cmd_buffer);
3633 }
3634
3635 /* Make sure CP DMA is idle at the end of IBs because the kernel
3636 * doesn't wait for it.
3637 */
3638 si_cp_dma_wait_for_idle(cmd_buffer);
3639
3640 vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.attachments);
3641 vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.subpass_sample_locs);
3642
3643 if (!cmd_buffer->device->ws->cs_finalize(cmd_buffer->cs))
3644 return vk_error(cmd_buffer->device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
3645
3646 cmd_buffer->status = RADV_CMD_BUFFER_STATUS_EXECUTABLE;
3647
3648 return cmd_buffer->record_result;
3649 }
3650
3651 static void
3652 radv_emit_compute_pipeline(struct radv_cmd_buffer *cmd_buffer)
3653 {
3654 struct radv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
3655
3656 if (!pipeline || pipeline == cmd_buffer->state.emitted_compute_pipeline)
3657 return;
3658
3659 assert(!pipeline->ctx_cs.cdw);
3660
3661 cmd_buffer->state.emitted_compute_pipeline = pipeline;
3662
3663 radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, pipeline->cs.cdw);
3664 radeon_emit_array(cmd_buffer->cs, pipeline->cs.buf, pipeline->cs.cdw);
3665
3666 cmd_buffer->compute_scratch_size_needed =
3667 MAX2(cmd_buffer->compute_scratch_size_needed,
3668 pipeline->max_waves * pipeline->scratch_bytes_per_wave);
3669
3670 radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs,
3671 pipeline->shaders[MESA_SHADER_COMPUTE]->bo);
3672
3673 if (unlikely(cmd_buffer->device->trace_bo))
3674 radv_save_pipeline(cmd_buffer, pipeline, RING_COMPUTE);
3675 }
3676
3677 static void radv_mark_descriptor_sets_dirty(struct radv_cmd_buffer *cmd_buffer,
3678 VkPipelineBindPoint bind_point)
3679 {
3680 struct radv_descriptor_state *descriptors_state =
3681 radv_get_descriptors_state(cmd_buffer, bind_point);
3682
3683 descriptors_state->dirty |= descriptors_state->valid;
3684 }
3685
3686 void radv_CmdBindPipeline(
3687 VkCommandBuffer commandBuffer,
3688 VkPipelineBindPoint pipelineBindPoint,
3689 VkPipeline _pipeline)
3690 {
3691 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3692 RADV_FROM_HANDLE(radv_pipeline, pipeline, _pipeline);
3693
3694 switch (pipelineBindPoint) {
3695 case VK_PIPELINE_BIND_POINT_COMPUTE:
3696 if (cmd_buffer->state.compute_pipeline == pipeline)
3697 return;
3698 radv_mark_descriptor_sets_dirty(cmd_buffer, pipelineBindPoint);
3699
3700 cmd_buffer->state.compute_pipeline = pipeline;
3701 cmd_buffer->push_constant_stages |= VK_SHADER_STAGE_COMPUTE_BIT;
3702 break;
3703 case VK_PIPELINE_BIND_POINT_GRAPHICS:
3704 if (cmd_buffer->state.pipeline == pipeline)
3705 return;
3706 radv_mark_descriptor_sets_dirty(cmd_buffer, pipelineBindPoint);
3707
3708 cmd_buffer->state.pipeline = pipeline;
3709 if (!pipeline)
3710 break;
3711
3712 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_PIPELINE;
3713 cmd_buffer->push_constant_stages |= pipeline->active_stages;
3714
3715 /* the new vertex shader might not have the same user regs */
3716 cmd_buffer->state.last_first_instance = -1;
3717 cmd_buffer->state.last_vertex_offset = -1;
3718
3719 /* Prefetch all pipeline shaders at first draw time. */
3720 cmd_buffer->state.prefetch_L2_mask |= RADV_PREFETCH_SHADERS;
3721
3722 if ((cmd_buffer->device->physical_device->rad_info.family == CHIP_NAVI10 ||
3723 cmd_buffer->device->physical_device->rad_info.family == CHIP_NAVI12 ||
3724 cmd_buffer->device->physical_device->rad_info.family == CHIP_NAVI14) &&
3725 cmd_buffer->state.emitted_pipeline &&
3726 radv_pipeline_has_ngg(cmd_buffer->state.emitted_pipeline) &&
3727 !radv_pipeline_has_ngg(cmd_buffer->state.pipeline)) {
3728 /* Transitioning from NGG to legacy GS requires
3729 * VGT_FLUSH on Navi10-14. VGT_FLUSH is also emitted
3730 * at the beginning of IBs when legacy GS ring pointers
3731 * are set.
3732 */
3733 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_VGT_FLUSH;
3734 }
3735
3736 radv_bind_dynamic_state(cmd_buffer, &pipeline->dynamic_state);
3737 radv_bind_streamout_state(cmd_buffer, pipeline);
3738
3739 if (pipeline->graphics.esgs_ring_size > cmd_buffer->esgs_ring_size_needed)
3740 cmd_buffer->esgs_ring_size_needed = pipeline->graphics.esgs_ring_size;
3741 if (pipeline->graphics.gsvs_ring_size > cmd_buffer->gsvs_ring_size_needed)
3742 cmd_buffer->gsvs_ring_size_needed = pipeline->graphics.gsvs_ring_size;
3743
3744 if (radv_pipeline_has_tess(pipeline))
3745 cmd_buffer->tess_rings_needed = true;
3746 break;
3747 default:
3748 assert(!"invalid bind point");
3749 break;
3750 }
3751 }
3752
3753 void radv_CmdSetViewport(
3754 VkCommandBuffer commandBuffer,
3755 uint32_t firstViewport,
3756 uint32_t viewportCount,
3757 const VkViewport* pViewports)
3758 {
3759 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3760 struct radv_cmd_state *state = &cmd_buffer->state;
3761 ASSERTED const uint32_t total_count = firstViewport + viewportCount;
3762
3763 assert(firstViewport < MAX_VIEWPORTS);
3764 assert(total_count >= 1 && total_count <= MAX_VIEWPORTS);
3765
3766 if (!memcmp(state->dynamic.viewport.viewports + firstViewport,
3767 pViewports, viewportCount * sizeof(*pViewports))) {
3768 return;
3769 }
3770
3771 memcpy(state->dynamic.viewport.viewports + firstViewport, pViewports,
3772 viewportCount * sizeof(*pViewports));
3773
3774 state->dirty |= RADV_CMD_DIRTY_DYNAMIC_VIEWPORT;
3775 }
3776
3777 void radv_CmdSetScissor(
3778 VkCommandBuffer commandBuffer,
3779 uint32_t firstScissor,
3780 uint32_t scissorCount,
3781 const VkRect2D* pScissors)
3782 {
3783 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3784 struct radv_cmd_state *state = &cmd_buffer->state;
3785 ASSERTED const uint32_t total_count = firstScissor + scissorCount;
3786
3787 assert(firstScissor < MAX_SCISSORS);
3788 assert(total_count >= 1 && total_count <= MAX_SCISSORS);
3789
3790 if (!memcmp(state->dynamic.scissor.scissors + firstScissor, pScissors,
3791 scissorCount * sizeof(*pScissors))) {
3792 return;
3793 }
3794
3795 memcpy(state->dynamic.scissor.scissors + firstScissor, pScissors,
3796 scissorCount * sizeof(*pScissors));
3797
3798 state->dirty |= RADV_CMD_DIRTY_DYNAMIC_SCISSOR;
3799 }
3800
3801 void radv_CmdSetLineWidth(
3802 VkCommandBuffer commandBuffer,
3803 float lineWidth)
3804 {
3805 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3806
3807 if (cmd_buffer->state.dynamic.line_width == lineWidth)
3808 return;
3809
3810 cmd_buffer->state.dynamic.line_width = lineWidth;
3811 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_LINE_WIDTH;
3812 }
3813
3814 void radv_CmdSetDepthBias(
3815 VkCommandBuffer commandBuffer,
3816 float depthBiasConstantFactor,
3817 float depthBiasClamp,
3818 float depthBiasSlopeFactor)
3819 {
3820 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3821 struct radv_cmd_state *state = &cmd_buffer->state;
3822
3823 if (state->dynamic.depth_bias.bias == depthBiasConstantFactor &&
3824 state->dynamic.depth_bias.clamp == depthBiasClamp &&
3825 state->dynamic.depth_bias.slope == depthBiasSlopeFactor) {
3826 return;
3827 }
3828
3829 state->dynamic.depth_bias.bias = depthBiasConstantFactor;
3830 state->dynamic.depth_bias.clamp = depthBiasClamp;
3831 state->dynamic.depth_bias.slope = depthBiasSlopeFactor;
3832
3833 state->dirty |= RADV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS;
3834 }
3835
3836 void radv_CmdSetBlendConstants(
3837 VkCommandBuffer commandBuffer,
3838 const float blendConstants[4])
3839 {
3840 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3841 struct radv_cmd_state *state = &cmd_buffer->state;
3842
3843 if (!memcmp(state->dynamic.blend_constants, blendConstants, sizeof(float) * 4))
3844 return;
3845
3846 memcpy(state->dynamic.blend_constants, blendConstants, sizeof(float) * 4);
3847
3848 state->dirty |= RADV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS;
3849 }
3850
3851 void radv_CmdSetDepthBounds(
3852 VkCommandBuffer commandBuffer,
3853 float minDepthBounds,
3854 float maxDepthBounds)
3855 {
3856 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3857 struct radv_cmd_state *state = &cmd_buffer->state;
3858
3859 if (state->dynamic.depth_bounds.min == minDepthBounds &&
3860 state->dynamic.depth_bounds.max == maxDepthBounds) {
3861 return;
3862 }
3863
3864 state->dynamic.depth_bounds.min = minDepthBounds;
3865 state->dynamic.depth_bounds.max = maxDepthBounds;
3866
3867 state->dirty |= RADV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS;
3868 }
3869
3870 void radv_CmdSetStencilCompareMask(
3871 VkCommandBuffer commandBuffer,
3872 VkStencilFaceFlags faceMask,
3873 uint32_t compareMask)
3874 {
3875 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3876 struct radv_cmd_state *state = &cmd_buffer->state;
3877 bool front_same = state->dynamic.stencil_compare_mask.front == compareMask;
3878 bool back_same = state->dynamic.stencil_compare_mask.back == compareMask;
3879
3880 if ((!(faceMask & VK_STENCIL_FACE_FRONT_BIT) || front_same) &&
3881 (!(faceMask & VK_STENCIL_FACE_BACK_BIT) || back_same)) {
3882 return;
3883 }
3884
3885 if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
3886 state->dynamic.stencil_compare_mask.front = compareMask;
3887 if (faceMask & VK_STENCIL_FACE_BACK_BIT)
3888 state->dynamic.stencil_compare_mask.back = compareMask;
3889
3890 state->dirty |= RADV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK;
3891 }
3892
3893 void radv_CmdSetStencilWriteMask(
3894 VkCommandBuffer commandBuffer,
3895 VkStencilFaceFlags faceMask,
3896 uint32_t writeMask)
3897 {
3898 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3899 struct radv_cmd_state *state = &cmd_buffer->state;
3900 bool front_same = state->dynamic.stencil_write_mask.front == writeMask;
3901 bool back_same = state->dynamic.stencil_write_mask.back == writeMask;
3902
3903 if ((!(faceMask & VK_STENCIL_FACE_FRONT_BIT) || front_same) &&
3904 (!(faceMask & VK_STENCIL_FACE_BACK_BIT) || back_same)) {
3905 return;
3906 }
3907
3908 if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
3909 state->dynamic.stencil_write_mask.front = writeMask;
3910 if (faceMask & VK_STENCIL_FACE_BACK_BIT)
3911 state->dynamic.stencil_write_mask.back = writeMask;
3912
3913 state->dirty |= RADV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK;
3914 }
3915
3916 void radv_CmdSetStencilReference(
3917 VkCommandBuffer commandBuffer,
3918 VkStencilFaceFlags faceMask,
3919 uint32_t reference)
3920 {
3921 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3922 struct radv_cmd_state *state = &cmd_buffer->state;
3923 bool front_same = state->dynamic.stencil_reference.front == reference;
3924 bool back_same = state->dynamic.stencil_reference.back == reference;
3925
3926 if ((!(faceMask & VK_STENCIL_FACE_FRONT_BIT) || front_same) &&
3927 (!(faceMask & VK_STENCIL_FACE_BACK_BIT) || back_same)) {
3928 return;
3929 }
3930
3931 if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
3932 cmd_buffer->state.dynamic.stencil_reference.front = reference;
3933 if (faceMask & VK_STENCIL_FACE_BACK_BIT)
3934 cmd_buffer->state.dynamic.stencil_reference.back = reference;
3935
3936 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE;
3937 }
3938
3939 void radv_CmdSetDiscardRectangleEXT(
3940 VkCommandBuffer commandBuffer,
3941 uint32_t firstDiscardRectangle,
3942 uint32_t discardRectangleCount,
3943 const VkRect2D* pDiscardRectangles)
3944 {
3945 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3946 struct radv_cmd_state *state = &cmd_buffer->state;
3947 ASSERTED const uint32_t total_count = firstDiscardRectangle + discardRectangleCount;
3948
3949 assert(firstDiscardRectangle < MAX_DISCARD_RECTANGLES);
3950 assert(total_count >= 1 && total_count <= MAX_DISCARD_RECTANGLES);
3951
3952 if (!memcmp(state->dynamic.discard_rectangle.rectangles + firstDiscardRectangle,
3953 pDiscardRectangles, discardRectangleCount * sizeof(*pDiscardRectangles))) {
3954 return;
3955 }
3956
3957 typed_memcpy(&state->dynamic.discard_rectangle.rectangles[firstDiscardRectangle],
3958 pDiscardRectangles, discardRectangleCount);
3959
3960 state->dirty |= RADV_CMD_DIRTY_DYNAMIC_DISCARD_RECTANGLE;
3961 }
3962
3963 void radv_CmdSetSampleLocationsEXT(
3964 VkCommandBuffer commandBuffer,
3965 const VkSampleLocationsInfoEXT* pSampleLocationsInfo)
3966 {
3967 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3968 struct radv_cmd_state *state = &cmd_buffer->state;
3969
3970 assert(pSampleLocationsInfo->sampleLocationsCount <= MAX_SAMPLE_LOCATIONS);
3971
3972 state->dynamic.sample_location.per_pixel = pSampleLocationsInfo->sampleLocationsPerPixel;
3973 state->dynamic.sample_location.grid_size = pSampleLocationsInfo->sampleLocationGridSize;
3974 state->dynamic.sample_location.count = pSampleLocationsInfo->sampleLocationsCount;
3975 typed_memcpy(&state->dynamic.sample_location.locations[0],
3976 pSampleLocationsInfo->pSampleLocations,
3977 pSampleLocationsInfo->sampleLocationsCount);
3978
3979 state->dirty |= RADV_CMD_DIRTY_DYNAMIC_SAMPLE_LOCATIONS;
3980 }
3981
3982 void radv_CmdExecuteCommands(
3983 VkCommandBuffer commandBuffer,
3984 uint32_t commandBufferCount,
3985 const VkCommandBuffer* pCmdBuffers)
3986 {
3987 RADV_FROM_HANDLE(radv_cmd_buffer, primary, commandBuffer);
3988
3989 assert(commandBufferCount > 0);
3990
3991 /* Emit pending flushes on primary prior to executing secondary */
3992 si_emit_cache_flush(primary);
3993
3994 for (uint32_t i = 0; i < commandBufferCount; i++) {
3995 RADV_FROM_HANDLE(radv_cmd_buffer, secondary, pCmdBuffers[i]);
3996
3997 primary->scratch_size_needed = MAX2(primary->scratch_size_needed,
3998 secondary->scratch_size_needed);
3999 primary->compute_scratch_size_needed = MAX2(primary->compute_scratch_size_needed,
4000 secondary->compute_scratch_size_needed);
4001
4002 if (secondary->esgs_ring_size_needed > primary->esgs_ring_size_needed)
4003 primary->esgs_ring_size_needed = secondary->esgs_ring_size_needed;
4004 if (secondary->gsvs_ring_size_needed > primary->gsvs_ring_size_needed)
4005 primary->gsvs_ring_size_needed = secondary->gsvs_ring_size_needed;
4006 if (secondary->tess_rings_needed)
4007 primary->tess_rings_needed = true;
4008 if (secondary->sample_positions_needed)
4009 primary->sample_positions_needed = true;
4010
4011 if (!secondary->state.framebuffer &&
4012 (primary->state.dirty & RADV_CMD_DIRTY_FRAMEBUFFER)) {
4013 /* Emit the framebuffer state from primary if secondary
4014 * has been recorded without a framebuffer, otherwise
4015 * fast color/depth clears can't work.
4016 */
4017 radv_emit_framebuffer_state(primary);
4018 }
4019
4020 primary->device->ws->cs_execute_secondary(primary->cs, secondary->cs);
4021
4022
4023 /* When the secondary command buffer is compute only we don't
4024 * need to re-emit the current graphics pipeline.
4025 */
4026 if (secondary->state.emitted_pipeline) {
4027 primary->state.emitted_pipeline =
4028 secondary->state.emitted_pipeline;
4029 }
4030
4031 /* When the secondary command buffer is graphics only we don't
4032 * need to re-emit the current compute pipeline.
4033 */
4034 if (secondary->state.emitted_compute_pipeline) {
4035 primary->state.emitted_compute_pipeline =
4036 secondary->state.emitted_compute_pipeline;
4037 }
4038
4039 /* Only re-emit the draw packets when needed. */
4040 if (secondary->state.last_primitive_reset_en != -1) {
4041 primary->state.last_primitive_reset_en =
4042 secondary->state.last_primitive_reset_en;
4043 }
4044
4045 if (secondary->state.last_primitive_reset_index) {
4046 primary->state.last_primitive_reset_index =
4047 secondary->state.last_primitive_reset_index;
4048 }
4049
4050 if (secondary->state.last_ia_multi_vgt_param) {
4051 primary->state.last_ia_multi_vgt_param =
4052 secondary->state.last_ia_multi_vgt_param;
4053 }
4054
4055 primary->state.last_first_instance = secondary->state.last_first_instance;
4056 primary->state.last_num_instances = secondary->state.last_num_instances;
4057 primary->state.last_vertex_offset = secondary->state.last_vertex_offset;
4058
4059 if (secondary->state.last_index_type != -1) {
4060 primary->state.last_index_type =
4061 secondary->state.last_index_type;
4062 }
4063 }
4064
4065 /* After executing commands from secondary buffers we have to dirty
4066 * some states.
4067 */
4068 primary->state.dirty |= RADV_CMD_DIRTY_PIPELINE |
4069 RADV_CMD_DIRTY_INDEX_BUFFER |
4070 RADV_CMD_DIRTY_DYNAMIC_ALL;
4071 radv_mark_descriptor_sets_dirty(primary, VK_PIPELINE_BIND_POINT_GRAPHICS);
4072 radv_mark_descriptor_sets_dirty(primary, VK_PIPELINE_BIND_POINT_COMPUTE);
4073 }
4074
4075 VkResult radv_CreateCommandPool(
4076 VkDevice _device,
4077 const VkCommandPoolCreateInfo* pCreateInfo,
4078 const VkAllocationCallbacks* pAllocator,
4079 VkCommandPool* pCmdPool)
4080 {
4081 RADV_FROM_HANDLE(radv_device, device, _device);
4082 struct radv_cmd_pool *pool;
4083
4084 pool = vk_alloc2(&device->alloc, pAllocator, sizeof(*pool), 8,
4085 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
4086 if (pool == NULL)
4087 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
4088
4089 if (pAllocator)
4090 pool->alloc = *pAllocator;
4091 else
4092 pool->alloc = device->alloc;
4093
4094 list_inithead(&pool->cmd_buffers);
4095 list_inithead(&pool->free_cmd_buffers);
4096
4097 pool->queue_family_index = pCreateInfo->queueFamilyIndex;
4098
4099 *pCmdPool = radv_cmd_pool_to_handle(pool);
4100
4101 return VK_SUCCESS;
4102
4103 }
4104
4105 void radv_DestroyCommandPool(
4106 VkDevice _device,
4107 VkCommandPool commandPool,
4108 const VkAllocationCallbacks* pAllocator)
4109 {
4110 RADV_FROM_HANDLE(radv_device, device, _device);
4111 RADV_FROM_HANDLE(radv_cmd_pool, pool, commandPool);
4112
4113 if (!pool)
4114 return;
4115
4116 list_for_each_entry_safe(struct radv_cmd_buffer, cmd_buffer,
4117 &pool->cmd_buffers, pool_link) {
4118 radv_cmd_buffer_destroy(cmd_buffer);
4119 }
4120
4121 list_for_each_entry_safe(struct radv_cmd_buffer, cmd_buffer,
4122 &pool->free_cmd_buffers, pool_link) {
4123 radv_cmd_buffer_destroy(cmd_buffer);
4124 }
4125
4126 vk_free2(&device->alloc, pAllocator, pool);
4127 }
4128
4129 VkResult radv_ResetCommandPool(
4130 VkDevice device,
4131 VkCommandPool commandPool,
4132 VkCommandPoolResetFlags flags)
4133 {
4134 RADV_FROM_HANDLE(radv_cmd_pool, pool, commandPool);
4135 VkResult result;
4136
4137 list_for_each_entry(struct radv_cmd_buffer, cmd_buffer,
4138 &pool->cmd_buffers, pool_link) {
4139 result = radv_reset_cmd_buffer(cmd_buffer);
4140 if (result != VK_SUCCESS)
4141 return result;
4142 }
4143
4144 return VK_SUCCESS;
4145 }
4146
4147 void radv_TrimCommandPool(
4148 VkDevice device,
4149 VkCommandPool commandPool,
4150 VkCommandPoolTrimFlags flags)
4151 {
4152 RADV_FROM_HANDLE(radv_cmd_pool, pool, commandPool);
4153
4154 if (!pool)
4155 return;
4156
4157 list_for_each_entry_safe(struct radv_cmd_buffer, cmd_buffer,
4158 &pool->free_cmd_buffers, pool_link) {
4159 radv_cmd_buffer_destroy(cmd_buffer);
4160 }
4161 }
4162
4163 static void
4164 radv_cmd_buffer_begin_subpass(struct radv_cmd_buffer *cmd_buffer,
4165 uint32_t subpass_id)
4166 {
4167 struct radv_cmd_state *state = &cmd_buffer->state;
4168 struct radv_subpass *subpass = &state->pass->subpasses[subpass_id];
4169
4170 ASSERTED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
4171 cmd_buffer->cs, 4096);
4172
4173 radv_subpass_barrier(cmd_buffer, &subpass->start_barrier);
4174
4175 radv_cmd_buffer_set_subpass(cmd_buffer, subpass);
4176
4177 for (uint32_t i = 0; i < subpass->attachment_count; ++i) {
4178 const uint32_t a = subpass->attachments[i].attachment;
4179 if (a == VK_ATTACHMENT_UNUSED)
4180 continue;
4181
4182 radv_handle_subpass_image_transition(cmd_buffer,
4183 subpass->attachments[i],
4184 true);
4185 }
4186
4187 radv_cmd_buffer_clear_subpass(cmd_buffer);
4188
4189 assert(cmd_buffer->cs->cdw <= cdw_max);
4190 }
4191
4192 static void
4193 radv_cmd_buffer_end_subpass(struct radv_cmd_buffer *cmd_buffer)
4194 {
4195 struct radv_cmd_state *state = &cmd_buffer->state;
4196 const struct radv_subpass *subpass = state->subpass;
4197 uint32_t subpass_id = radv_get_subpass_id(cmd_buffer);
4198
4199 radv_cmd_buffer_resolve_subpass(cmd_buffer);
4200
4201 for (uint32_t i = 0; i < subpass->attachment_count; ++i) {
4202 const uint32_t a = subpass->attachments[i].attachment;
4203 if (a == VK_ATTACHMENT_UNUSED)
4204 continue;
4205
4206 if (state->pass->attachments[a].last_subpass_idx != subpass_id)
4207 continue;
4208
4209 VkImageLayout layout = state->pass->attachments[a].final_layout;
4210 struct radv_subpass_attachment att = { a, layout };
4211 radv_handle_subpass_image_transition(cmd_buffer, att, false);
4212 }
4213 }
4214
4215 void radv_CmdBeginRenderPass(
4216 VkCommandBuffer commandBuffer,
4217 const VkRenderPassBeginInfo* pRenderPassBegin,
4218 VkSubpassContents contents)
4219 {
4220 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
4221 RADV_FROM_HANDLE(radv_render_pass, pass, pRenderPassBegin->renderPass);
4222 RADV_FROM_HANDLE(radv_framebuffer, framebuffer, pRenderPassBegin->framebuffer);
4223 VkResult result;
4224
4225 cmd_buffer->state.framebuffer = framebuffer;
4226 cmd_buffer->state.pass = pass;
4227 cmd_buffer->state.render_area = pRenderPassBegin->renderArea;
4228
4229 result = radv_cmd_state_setup_attachments(cmd_buffer, pass, pRenderPassBegin);
4230 if (result != VK_SUCCESS)
4231 return;
4232
4233 result = radv_cmd_state_setup_sample_locations(cmd_buffer, pass, pRenderPassBegin);
4234 if (result != VK_SUCCESS)
4235 return;
4236
4237 radv_cmd_buffer_begin_subpass(cmd_buffer, 0);
4238 }
4239
4240 void radv_CmdBeginRenderPass2KHR(
4241 VkCommandBuffer commandBuffer,
4242 const VkRenderPassBeginInfo* pRenderPassBeginInfo,
4243 const VkSubpassBeginInfoKHR* pSubpassBeginInfo)
4244 {
4245 radv_CmdBeginRenderPass(commandBuffer, pRenderPassBeginInfo,
4246 pSubpassBeginInfo->contents);
4247 }
4248
4249 void radv_CmdNextSubpass(
4250 VkCommandBuffer commandBuffer,
4251 VkSubpassContents contents)
4252 {
4253 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
4254
4255 uint32_t prev_subpass = radv_get_subpass_id(cmd_buffer);
4256 radv_cmd_buffer_end_subpass(cmd_buffer);
4257 radv_cmd_buffer_begin_subpass(cmd_buffer, prev_subpass + 1);
4258 }
4259
4260 void radv_CmdNextSubpass2KHR(
4261 VkCommandBuffer commandBuffer,
4262 const VkSubpassBeginInfoKHR* pSubpassBeginInfo,
4263 const VkSubpassEndInfoKHR* pSubpassEndInfo)
4264 {
4265 radv_CmdNextSubpass(commandBuffer, pSubpassBeginInfo->contents);
4266 }
4267
4268 static void radv_emit_view_index(struct radv_cmd_buffer *cmd_buffer, unsigned index)
4269 {
4270 struct radv_pipeline *pipeline = cmd_buffer->state.pipeline;
4271 for (unsigned stage = 0; stage < MESA_SHADER_STAGES; ++stage) {
4272 if (!radv_get_shader(pipeline, stage))
4273 continue;
4274
4275 struct radv_userdata_info *loc = radv_lookup_user_sgpr(pipeline, stage, AC_UD_VIEW_INDEX);
4276 if (loc->sgpr_idx == -1)
4277 continue;
4278 uint32_t base_reg = pipeline->user_data_0[stage];
4279 radeon_set_sh_reg(cmd_buffer->cs, base_reg + loc->sgpr_idx * 4, index);
4280
4281 }
4282 if (radv_pipeline_has_gs_copy_shader(pipeline)) {
4283 struct radv_userdata_info *loc = &pipeline->gs_copy_shader->info.user_sgprs_locs.shader_data[AC_UD_VIEW_INDEX];
4284 if (loc->sgpr_idx != -1) {
4285 uint32_t base_reg = R_00B130_SPI_SHADER_USER_DATA_VS_0;
4286 radeon_set_sh_reg(cmd_buffer->cs, base_reg + loc->sgpr_idx * 4, index);
4287 }
4288 }
4289 }
4290
4291 static void
4292 radv_cs_emit_draw_packet(struct radv_cmd_buffer *cmd_buffer,
4293 uint32_t vertex_count,
4294 bool use_opaque)
4295 {
4296 radeon_emit(cmd_buffer->cs, PKT3(PKT3_DRAW_INDEX_AUTO, 1, cmd_buffer->state.predicating));
4297 radeon_emit(cmd_buffer->cs, vertex_count);
4298 radeon_emit(cmd_buffer->cs, V_0287F0_DI_SRC_SEL_AUTO_INDEX |
4299 S_0287F0_USE_OPAQUE(use_opaque));
4300 }
4301
4302 static void
4303 radv_cs_emit_draw_indexed_packet(struct radv_cmd_buffer *cmd_buffer,
4304 uint64_t index_va,
4305 uint32_t index_count)
4306 {
4307 radeon_emit(cmd_buffer->cs, PKT3(PKT3_DRAW_INDEX_2, 4, cmd_buffer->state.predicating));
4308 radeon_emit(cmd_buffer->cs, cmd_buffer->state.max_index_count);
4309 radeon_emit(cmd_buffer->cs, index_va);
4310 radeon_emit(cmd_buffer->cs, index_va >> 32);
4311 radeon_emit(cmd_buffer->cs, index_count);
4312 radeon_emit(cmd_buffer->cs, V_0287F0_DI_SRC_SEL_DMA);
4313 }
4314
4315 static void
4316 radv_cs_emit_indirect_draw_packet(struct radv_cmd_buffer *cmd_buffer,
4317 bool indexed,
4318 uint32_t draw_count,
4319 uint64_t count_va,
4320 uint32_t stride)
4321 {
4322 struct radeon_cmdbuf *cs = cmd_buffer->cs;
4323 unsigned di_src_sel = indexed ? V_0287F0_DI_SRC_SEL_DMA
4324 : V_0287F0_DI_SRC_SEL_AUTO_INDEX;
4325 bool draw_id_enable = radv_get_shader(cmd_buffer->state.pipeline, MESA_SHADER_VERTEX)->info.vs.needs_draw_id;
4326 uint32_t base_reg = cmd_buffer->state.pipeline->graphics.vtx_base_sgpr;
4327 bool predicating = cmd_buffer->state.predicating;
4328 assert(base_reg);
4329
4330 /* just reset draw state for vertex data */
4331 cmd_buffer->state.last_first_instance = -1;
4332 cmd_buffer->state.last_num_instances = -1;
4333 cmd_buffer->state.last_vertex_offset = -1;
4334
4335 if (draw_count == 1 && !count_va && !draw_id_enable) {
4336 radeon_emit(cs, PKT3(indexed ? PKT3_DRAW_INDEX_INDIRECT :
4337 PKT3_DRAW_INDIRECT, 3, predicating));
4338 radeon_emit(cs, 0);
4339 radeon_emit(cs, (base_reg - SI_SH_REG_OFFSET) >> 2);
4340 radeon_emit(cs, ((base_reg + 4) - SI_SH_REG_OFFSET) >> 2);
4341 radeon_emit(cs, di_src_sel);
4342 } else {
4343 radeon_emit(cs, PKT3(indexed ? PKT3_DRAW_INDEX_INDIRECT_MULTI :
4344 PKT3_DRAW_INDIRECT_MULTI,
4345 8, predicating));
4346 radeon_emit(cs, 0);
4347 radeon_emit(cs, (base_reg - SI_SH_REG_OFFSET) >> 2);
4348 radeon_emit(cs, ((base_reg + 4) - SI_SH_REG_OFFSET) >> 2);
4349 radeon_emit(cs, (((base_reg + 8) - SI_SH_REG_OFFSET) >> 2) |
4350 S_2C3_DRAW_INDEX_ENABLE(draw_id_enable) |
4351 S_2C3_COUNT_INDIRECT_ENABLE(!!count_va));
4352 radeon_emit(cs, draw_count); /* count */
4353 radeon_emit(cs, count_va); /* count_addr */
4354 radeon_emit(cs, count_va >> 32);
4355 radeon_emit(cs, stride); /* stride */
4356 radeon_emit(cs, di_src_sel);
4357 }
4358 }
4359
4360 static void
4361 radv_emit_draw_packets(struct radv_cmd_buffer *cmd_buffer,
4362 const struct radv_draw_info *info)
4363 {
4364 struct radv_cmd_state *state = &cmd_buffer->state;
4365 struct radeon_winsys *ws = cmd_buffer->device->ws;
4366 struct radeon_cmdbuf *cs = cmd_buffer->cs;
4367
4368 if (info->indirect) {
4369 uint64_t va = radv_buffer_get_va(info->indirect->bo);
4370 uint64_t count_va = 0;
4371
4372 va += info->indirect->offset + info->indirect_offset;
4373
4374 radv_cs_add_buffer(ws, cs, info->indirect->bo);
4375
4376 radeon_emit(cs, PKT3(PKT3_SET_BASE, 2, 0));
4377 radeon_emit(cs, 1);
4378 radeon_emit(cs, va);
4379 radeon_emit(cs, va >> 32);
4380
4381 if (info->count_buffer) {
4382 count_va = radv_buffer_get_va(info->count_buffer->bo);
4383 count_va += info->count_buffer->offset +
4384 info->count_buffer_offset;
4385
4386 radv_cs_add_buffer(ws, cs, info->count_buffer->bo);
4387 }
4388
4389 if (!state->subpass->view_mask) {
4390 radv_cs_emit_indirect_draw_packet(cmd_buffer,
4391 info->indexed,
4392 info->count,
4393 count_va,
4394 info->stride);
4395 } else {
4396 unsigned i;
4397 for_each_bit(i, state->subpass->view_mask) {
4398 radv_emit_view_index(cmd_buffer, i);
4399
4400 radv_cs_emit_indirect_draw_packet(cmd_buffer,
4401 info->indexed,
4402 info->count,
4403 count_va,
4404 info->stride);
4405 }
4406 }
4407 } else {
4408 assert(state->pipeline->graphics.vtx_base_sgpr);
4409
4410 if (info->vertex_offset != state->last_vertex_offset ||
4411 info->first_instance != state->last_first_instance) {
4412 radeon_set_sh_reg_seq(cs, state->pipeline->graphics.vtx_base_sgpr,
4413 state->pipeline->graphics.vtx_emit_num);
4414
4415 radeon_emit(cs, info->vertex_offset);
4416 radeon_emit(cs, info->first_instance);
4417 if (state->pipeline->graphics.vtx_emit_num == 3)
4418 radeon_emit(cs, 0);
4419 state->last_first_instance = info->first_instance;
4420 state->last_vertex_offset = info->vertex_offset;
4421 }
4422
4423 if (state->last_num_instances != info->instance_count) {
4424 radeon_emit(cs, PKT3(PKT3_NUM_INSTANCES, 0, false));
4425 radeon_emit(cs, info->instance_count);
4426 state->last_num_instances = info->instance_count;
4427 }
4428
4429 if (info->indexed) {
4430 int index_size = radv_get_vgt_index_size(state->index_type);
4431 uint64_t index_va;
4432
4433 /* Skip draw calls with 0-sized index buffers. They
4434 * cause a hang on some chips, like Navi10-14.
4435 */
4436 if (!cmd_buffer->state.max_index_count)
4437 return;
4438
4439 index_va = state->index_va;
4440 index_va += info->first_index * index_size;
4441
4442 if (!state->subpass->view_mask) {
4443 radv_cs_emit_draw_indexed_packet(cmd_buffer,
4444 index_va,
4445 info->count);
4446 } else {
4447 unsigned i;
4448 for_each_bit(i, state->subpass->view_mask) {
4449 radv_emit_view_index(cmd_buffer, i);
4450
4451 radv_cs_emit_draw_indexed_packet(cmd_buffer,
4452 index_va,
4453 info->count);
4454 }
4455 }
4456 } else {
4457 if (!state->subpass->view_mask) {
4458 radv_cs_emit_draw_packet(cmd_buffer,
4459 info->count,
4460 !!info->strmout_buffer);
4461 } else {
4462 unsigned i;
4463 for_each_bit(i, state->subpass->view_mask) {
4464 radv_emit_view_index(cmd_buffer, i);
4465
4466 radv_cs_emit_draw_packet(cmd_buffer,
4467 info->count,
4468 !!info->strmout_buffer);
4469 }
4470 }
4471 }
4472 }
4473 }
4474
4475 /*
4476 * Vega and raven have a bug which triggers if there are multiple context
4477 * register contexts active at the same time with different scissor values.
4478 *
4479 * There are two possible workarounds:
4480 * 1) Wait for PS_PARTIAL_FLUSH every time the scissor is changed. That way
4481 * there is only ever 1 active set of scissor values at the same time.
4482 *
4483 * 2) Whenever the hardware switches contexts we have to set the scissor
4484 * registers again even if it is a noop. That way the new context gets
4485 * the correct scissor values.
4486 *
4487 * This implements option 2. radv_need_late_scissor_emission needs to
4488 * return true on affected HW if radv_emit_all_graphics_states sets
4489 * any context registers.
4490 */
4491 static bool radv_need_late_scissor_emission(struct radv_cmd_buffer *cmd_buffer,
4492 const struct radv_draw_info *info)
4493 {
4494 struct radv_cmd_state *state = &cmd_buffer->state;
4495
4496 if (!cmd_buffer->device->physical_device->rad_info.has_gfx9_scissor_bug)
4497 return false;
4498
4499 if (cmd_buffer->state.context_roll_without_scissor_emitted || info->strmout_buffer)
4500 return true;
4501
4502 uint32_t used_states = cmd_buffer->state.pipeline->graphics.needed_dynamic_state | ~RADV_CMD_DIRTY_DYNAMIC_ALL;
4503
4504 /* Index, vertex and streamout buffers don't change context regs, and
4505 * pipeline is already handled.
4506 */
4507 used_states &= ~(RADV_CMD_DIRTY_INDEX_BUFFER |
4508 RADV_CMD_DIRTY_VERTEX_BUFFER |
4509 RADV_CMD_DIRTY_STREAMOUT_BUFFER |
4510 RADV_CMD_DIRTY_PIPELINE);
4511
4512 if (cmd_buffer->state.dirty & used_states)
4513 return true;
4514
4515 uint32_t primitive_reset_index =
4516 radv_get_primitive_reset_index(cmd_buffer);
4517
4518 if (info->indexed && state->pipeline->graphics.prim_restart_enable &&
4519 primitive_reset_index != state->last_primitive_reset_index)
4520 return true;
4521
4522 return false;
4523 }
4524
4525 static void
4526 radv_emit_all_graphics_states(struct radv_cmd_buffer *cmd_buffer,
4527 const struct radv_draw_info *info)
4528 {
4529 bool late_scissor_emission;
4530
4531 if ((cmd_buffer->state.dirty & RADV_CMD_DIRTY_FRAMEBUFFER) ||
4532 cmd_buffer->state.emitted_pipeline != cmd_buffer->state.pipeline)
4533 radv_emit_rbplus_state(cmd_buffer);
4534
4535 if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_PIPELINE)
4536 radv_emit_graphics_pipeline(cmd_buffer);
4537
4538 /* This should be before the cmd_buffer->state.dirty is cleared
4539 * (excluding RADV_CMD_DIRTY_PIPELINE) and after
4540 * cmd_buffer->state.context_roll_without_scissor_emitted is set. */
4541 late_scissor_emission =
4542 radv_need_late_scissor_emission(cmd_buffer, info);
4543
4544 if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_FRAMEBUFFER)
4545 radv_emit_framebuffer_state(cmd_buffer);
4546
4547 if (info->indexed) {
4548 if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_INDEX_BUFFER)
4549 radv_emit_index_buffer(cmd_buffer);
4550 } else {
4551 /* On GFX7 and later, non-indexed draws overwrite VGT_INDEX_TYPE,
4552 * so the state must be re-emitted before the next indexed
4553 * draw.
4554 */
4555 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX7) {
4556 cmd_buffer->state.last_index_type = -1;
4557 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_INDEX_BUFFER;
4558 }
4559 }
4560
4561 radv_cmd_buffer_flush_dynamic_state(cmd_buffer);
4562
4563 radv_emit_draw_registers(cmd_buffer, info);
4564
4565 if (late_scissor_emission)
4566 radv_emit_scissor(cmd_buffer);
4567 }
4568
4569 static void
4570 radv_draw(struct radv_cmd_buffer *cmd_buffer,
4571 const struct radv_draw_info *info)
4572 {
4573 struct radeon_info *rad_info =
4574 &cmd_buffer->device->physical_device->rad_info;
4575 bool has_prefetch =
4576 cmd_buffer->device->physical_device->rad_info.chip_class >= GFX7;
4577 bool pipeline_is_dirty =
4578 (cmd_buffer->state.dirty & RADV_CMD_DIRTY_PIPELINE) &&
4579 cmd_buffer->state.pipeline != cmd_buffer->state.emitted_pipeline;
4580
4581 ASSERTED unsigned cdw_max =
4582 radeon_check_space(cmd_buffer->device->ws,
4583 cmd_buffer->cs, 4096);
4584
4585 if (likely(!info->indirect)) {
4586 /* GFX6-GFX7 treat instance_count==0 as instance_count==1. There is
4587 * no workaround for indirect draws, but we can at least skip
4588 * direct draws.
4589 */
4590 if (unlikely(!info->instance_count))
4591 return;
4592
4593 /* Handle count == 0. */
4594 if (unlikely(!info->count && !info->strmout_buffer))
4595 return;
4596 }
4597
4598 /* Use optimal packet order based on whether we need to sync the
4599 * pipeline.
4600 */
4601 if (cmd_buffer->state.flush_bits & (RADV_CMD_FLAG_FLUSH_AND_INV_CB |
4602 RADV_CMD_FLAG_FLUSH_AND_INV_DB |
4603 RADV_CMD_FLAG_PS_PARTIAL_FLUSH |
4604 RADV_CMD_FLAG_CS_PARTIAL_FLUSH)) {
4605 /* If we have to wait for idle, set all states first, so that
4606 * all SET packets are processed in parallel with previous draw
4607 * calls. Then upload descriptors, set shader pointers, and
4608 * draw, and prefetch at the end. This ensures that the time
4609 * the CUs are idle is very short. (there are only SET_SH
4610 * packets between the wait and the draw)
4611 */
4612 radv_emit_all_graphics_states(cmd_buffer, info);
4613 si_emit_cache_flush(cmd_buffer);
4614 /* <-- CUs are idle here --> */
4615
4616 radv_upload_graphics_shader_descriptors(cmd_buffer, pipeline_is_dirty);
4617
4618 radv_emit_draw_packets(cmd_buffer, info);
4619 /* <-- CUs are busy here --> */
4620
4621 /* Start prefetches after the draw has been started. Both will
4622 * run in parallel, but starting the draw first is more
4623 * important.
4624 */
4625 if (has_prefetch && cmd_buffer->state.prefetch_L2_mask) {
4626 radv_emit_prefetch_L2(cmd_buffer,
4627 cmd_buffer->state.pipeline, false);
4628 }
4629 } else {
4630 /* If we don't wait for idle, start prefetches first, then set
4631 * states, and draw at the end.
4632 */
4633 si_emit_cache_flush(cmd_buffer);
4634
4635 if (has_prefetch && cmd_buffer->state.prefetch_L2_mask) {
4636 /* Only prefetch the vertex shader and VBO descriptors
4637 * in order to start the draw as soon as possible.
4638 */
4639 radv_emit_prefetch_L2(cmd_buffer,
4640 cmd_buffer->state.pipeline, true);
4641 }
4642
4643 radv_upload_graphics_shader_descriptors(cmd_buffer, pipeline_is_dirty);
4644
4645 radv_emit_all_graphics_states(cmd_buffer, info);
4646 radv_emit_draw_packets(cmd_buffer, info);
4647
4648 /* Prefetch the remaining shaders after the draw has been
4649 * started.
4650 */
4651 if (has_prefetch && cmd_buffer->state.prefetch_L2_mask) {
4652 radv_emit_prefetch_L2(cmd_buffer,
4653 cmd_buffer->state.pipeline, false);
4654 }
4655 }
4656
4657 /* Workaround for a VGT hang when streamout is enabled.
4658 * It must be done after drawing.
4659 */
4660 if (cmd_buffer->state.streamout.streamout_enabled &&
4661 (rad_info->family == CHIP_HAWAII ||
4662 rad_info->family == CHIP_TONGA ||
4663 rad_info->family == CHIP_FIJI)) {
4664 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_VGT_STREAMOUT_SYNC;
4665 }
4666
4667 assert(cmd_buffer->cs->cdw <= cdw_max);
4668 radv_cmd_buffer_after_draw(cmd_buffer, RADV_CMD_FLAG_PS_PARTIAL_FLUSH);
4669 }
4670
4671 void radv_CmdDraw(
4672 VkCommandBuffer commandBuffer,
4673 uint32_t vertexCount,
4674 uint32_t instanceCount,
4675 uint32_t firstVertex,
4676 uint32_t firstInstance)
4677 {
4678 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
4679 struct radv_draw_info info = {};
4680
4681 info.count = vertexCount;
4682 info.instance_count = instanceCount;
4683 info.first_instance = firstInstance;
4684 info.vertex_offset = firstVertex;
4685
4686 radv_draw(cmd_buffer, &info);
4687 }
4688
4689 void radv_CmdDrawIndexed(
4690 VkCommandBuffer commandBuffer,
4691 uint32_t indexCount,
4692 uint32_t instanceCount,
4693 uint32_t firstIndex,
4694 int32_t vertexOffset,
4695 uint32_t firstInstance)
4696 {
4697 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
4698 struct radv_draw_info info = {};
4699
4700 info.indexed = true;
4701 info.count = indexCount;
4702 info.instance_count = instanceCount;
4703 info.first_index = firstIndex;
4704 info.vertex_offset = vertexOffset;
4705 info.first_instance = firstInstance;
4706
4707 radv_draw(cmd_buffer, &info);
4708 }
4709
4710 void radv_CmdDrawIndirect(
4711 VkCommandBuffer commandBuffer,
4712 VkBuffer _buffer,
4713 VkDeviceSize offset,
4714 uint32_t drawCount,
4715 uint32_t stride)
4716 {
4717 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
4718 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
4719 struct radv_draw_info info = {};
4720
4721 info.count = drawCount;
4722 info.indirect = buffer;
4723 info.indirect_offset = offset;
4724 info.stride = stride;
4725
4726 radv_draw(cmd_buffer, &info);
4727 }
4728
4729 void radv_CmdDrawIndexedIndirect(
4730 VkCommandBuffer commandBuffer,
4731 VkBuffer _buffer,
4732 VkDeviceSize offset,
4733 uint32_t drawCount,
4734 uint32_t stride)
4735 {
4736 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
4737 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
4738 struct radv_draw_info info = {};
4739
4740 info.indexed = true;
4741 info.count = drawCount;
4742 info.indirect = buffer;
4743 info.indirect_offset = offset;
4744 info.stride = stride;
4745
4746 radv_draw(cmd_buffer, &info);
4747 }
4748
4749 void radv_CmdDrawIndirectCountKHR(
4750 VkCommandBuffer commandBuffer,
4751 VkBuffer _buffer,
4752 VkDeviceSize offset,
4753 VkBuffer _countBuffer,
4754 VkDeviceSize countBufferOffset,
4755 uint32_t maxDrawCount,
4756 uint32_t stride)
4757 {
4758 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
4759 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
4760 RADV_FROM_HANDLE(radv_buffer, count_buffer, _countBuffer);
4761 struct radv_draw_info info = {};
4762
4763 info.count = maxDrawCount;
4764 info.indirect = buffer;
4765 info.indirect_offset = offset;
4766 info.count_buffer = count_buffer;
4767 info.count_buffer_offset = countBufferOffset;
4768 info.stride = stride;
4769
4770 radv_draw(cmd_buffer, &info);
4771 }
4772
4773 void radv_CmdDrawIndexedIndirectCountKHR(
4774 VkCommandBuffer commandBuffer,
4775 VkBuffer _buffer,
4776 VkDeviceSize offset,
4777 VkBuffer _countBuffer,
4778 VkDeviceSize countBufferOffset,
4779 uint32_t maxDrawCount,
4780 uint32_t stride)
4781 {
4782 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
4783 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
4784 RADV_FROM_HANDLE(radv_buffer, count_buffer, _countBuffer);
4785 struct radv_draw_info info = {};
4786
4787 info.indexed = true;
4788 info.count = maxDrawCount;
4789 info.indirect = buffer;
4790 info.indirect_offset = offset;
4791 info.count_buffer = count_buffer;
4792 info.count_buffer_offset = countBufferOffset;
4793 info.stride = stride;
4794
4795 radv_draw(cmd_buffer, &info);
4796 }
4797
4798 struct radv_dispatch_info {
4799 /**
4800 * Determine the layout of the grid (in block units) to be used.
4801 */
4802 uint32_t blocks[3];
4803
4804 /**
4805 * A starting offset for the grid. If unaligned is set, the offset
4806 * must still be aligned.
4807 */
4808 uint32_t offsets[3];
4809 /**
4810 * Whether it's an unaligned compute dispatch.
4811 */
4812 bool unaligned;
4813
4814 /**
4815 * Indirect compute parameters resource.
4816 */
4817 struct radv_buffer *indirect;
4818 uint64_t indirect_offset;
4819 };
4820
4821 static void
4822 radv_emit_dispatch_packets(struct radv_cmd_buffer *cmd_buffer,
4823 const struct radv_dispatch_info *info)
4824 {
4825 struct radv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
4826 struct radv_shader_variant *compute_shader = pipeline->shaders[MESA_SHADER_COMPUTE];
4827 unsigned dispatch_initiator = cmd_buffer->device->dispatch_initiator;
4828 struct radeon_winsys *ws = cmd_buffer->device->ws;
4829 bool predicating = cmd_buffer->state.predicating;
4830 struct radeon_cmdbuf *cs = cmd_buffer->cs;
4831 struct radv_userdata_info *loc;
4832
4833 loc = radv_lookup_user_sgpr(pipeline, MESA_SHADER_COMPUTE,
4834 AC_UD_CS_GRID_SIZE);
4835
4836 ASSERTED unsigned cdw_max = radeon_check_space(ws, cs, 25);
4837
4838 if (info->indirect) {
4839 uint64_t va = radv_buffer_get_va(info->indirect->bo);
4840
4841 va += info->indirect->offset + info->indirect_offset;
4842
4843 radv_cs_add_buffer(ws, cs, info->indirect->bo);
4844
4845 if (loc->sgpr_idx != -1) {
4846 for (unsigned i = 0; i < 3; ++i) {
4847 radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
4848 radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_SRC_MEM) |
4849 COPY_DATA_DST_SEL(COPY_DATA_REG));
4850 radeon_emit(cs, (va + 4 * i));
4851 radeon_emit(cs, (va + 4 * i) >> 32);
4852 radeon_emit(cs, ((R_00B900_COMPUTE_USER_DATA_0
4853 + loc->sgpr_idx * 4) >> 2) + i);
4854 radeon_emit(cs, 0);
4855 }
4856 }
4857
4858 if (radv_cmd_buffer_uses_mec(cmd_buffer)) {
4859 radeon_emit(cs, PKT3(PKT3_DISPATCH_INDIRECT, 2, predicating) |
4860 PKT3_SHADER_TYPE_S(1));
4861 radeon_emit(cs, va);
4862 radeon_emit(cs, va >> 32);
4863 radeon_emit(cs, dispatch_initiator);
4864 } else {
4865 radeon_emit(cs, PKT3(PKT3_SET_BASE, 2, 0) |
4866 PKT3_SHADER_TYPE_S(1));
4867 radeon_emit(cs, 1);
4868 radeon_emit(cs, va);
4869 radeon_emit(cs, va >> 32);
4870
4871 radeon_emit(cs, PKT3(PKT3_DISPATCH_INDIRECT, 1, predicating) |
4872 PKT3_SHADER_TYPE_S(1));
4873 radeon_emit(cs, 0);
4874 radeon_emit(cs, dispatch_initiator);
4875 }
4876 } else {
4877 unsigned blocks[3] = { info->blocks[0], info->blocks[1], info->blocks[2] };
4878 unsigned offsets[3] = { info->offsets[0], info->offsets[1], info->offsets[2] };
4879
4880 if (info->unaligned) {
4881 unsigned *cs_block_size = compute_shader->info.cs.block_size;
4882 unsigned remainder[3];
4883
4884 /* If aligned, these should be an entire block size,
4885 * not 0.
4886 */
4887 remainder[0] = blocks[0] + cs_block_size[0] -
4888 align_u32_npot(blocks[0], cs_block_size[0]);
4889 remainder[1] = blocks[1] + cs_block_size[1] -
4890 align_u32_npot(blocks[1], cs_block_size[1]);
4891 remainder[2] = blocks[2] + cs_block_size[2] -
4892 align_u32_npot(blocks[2], cs_block_size[2]);
4893
4894 blocks[0] = round_up_u32(blocks[0], cs_block_size[0]);
4895 blocks[1] = round_up_u32(blocks[1], cs_block_size[1]);
4896 blocks[2] = round_up_u32(blocks[2], cs_block_size[2]);
4897
4898 for(unsigned i = 0; i < 3; ++i) {
4899 assert(offsets[i] % cs_block_size[i] == 0);
4900 offsets[i] /= cs_block_size[i];
4901 }
4902
4903 radeon_set_sh_reg_seq(cs, R_00B81C_COMPUTE_NUM_THREAD_X, 3);
4904 radeon_emit(cs,
4905 S_00B81C_NUM_THREAD_FULL(cs_block_size[0]) |
4906 S_00B81C_NUM_THREAD_PARTIAL(remainder[0]));
4907 radeon_emit(cs,
4908 S_00B81C_NUM_THREAD_FULL(cs_block_size[1]) |
4909 S_00B81C_NUM_THREAD_PARTIAL(remainder[1]));
4910 radeon_emit(cs,
4911 S_00B81C_NUM_THREAD_FULL(cs_block_size[2]) |
4912 S_00B81C_NUM_THREAD_PARTIAL(remainder[2]));
4913
4914 dispatch_initiator |= S_00B800_PARTIAL_TG_EN(1);
4915 }
4916
4917 if (loc->sgpr_idx != -1) {
4918 assert(loc->num_sgprs == 3);
4919
4920 radeon_set_sh_reg_seq(cs, R_00B900_COMPUTE_USER_DATA_0 +
4921 loc->sgpr_idx * 4, 3);
4922 radeon_emit(cs, blocks[0]);
4923 radeon_emit(cs, blocks[1]);
4924 radeon_emit(cs, blocks[2]);
4925 }
4926
4927 if (offsets[0] || offsets[1] || offsets[2]) {
4928 radeon_set_sh_reg_seq(cs, R_00B810_COMPUTE_START_X, 3);
4929 radeon_emit(cs, offsets[0]);
4930 radeon_emit(cs, offsets[1]);
4931 radeon_emit(cs, offsets[2]);
4932
4933 /* The blocks in the packet are not counts but end values. */
4934 for (unsigned i = 0; i < 3; ++i)
4935 blocks[i] += offsets[i];
4936 } else {
4937 dispatch_initiator |= S_00B800_FORCE_START_AT_000(1);
4938 }
4939
4940 radeon_emit(cs, PKT3(PKT3_DISPATCH_DIRECT, 3, predicating) |
4941 PKT3_SHADER_TYPE_S(1));
4942 radeon_emit(cs, blocks[0]);
4943 radeon_emit(cs, blocks[1]);
4944 radeon_emit(cs, blocks[2]);
4945 radeon_emit(cs, dispatch_initiator);
4946 }
4947
4948 assert(cmd_buffer->cs->cdw <= cdw_max);
4949 }
4950
4951 static void
4952 radv_upload_compute_shader_descriptors(struct radv_cmd_buffer *cmd_buffer)
4953 {
4954 radv_flush_descriptors(cmd_buffer, VK_SHADER_STAGE_COMPUTE_BIT);
4955 radv_flush_constants(cmd_buffer, VK_SHADER_STAGE_COMPUTE_BIT);
4956 }
4957
4958 static void
4959 radv_dispatch(struct radv_cmd_buffer *cmd_buffer,
4960 const struct radv_dispatch_info *info)
4961 {
4962 struct radv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
4963 bool has_prefetch =
4964 cmd_buffer->device->physical_device->rad_info.chip_class >= GFX7;
4965 bool pipeline_is_dirty = pipeline &&
4966 pipeline != cmd_buffer->state.emitted_compute_pipeline;
4967
4968 if (cmd_buffer->state.flush_bits & (RADV_CMD_FLAG_FLUSH_AND_INV_CB |
4969 RADV_CMD_FLAG_FLUSH_AND_INV_DB |
4970 RADV_CMD_FLAG_PS_PARTIAL_FLUSH |
4971 RADV_CMD_FLAG_CS_PARTIAL_FLUSH)) {
4972 /* If we have to wait for idle, set all states first, so that
4973 * all SET packets are processed in parallel with previous draw
4974 * calls. Then upload descriptors, set shader pointers, and
4975 * dispatch, and prefetch at the end. This ensures that the
4976 * time the CUs are idle is very short. (there are only SET_SH
4977 * packets between the wait and the draw)
4978 */
4979 radv_emit_compute_pipeline(cmd_buffer);
4980 si_emit_cache_flush(cmd_buffer);
4981 /* <-- CUs are idle here --> */
4982
4983 radv_upload_compute_shader_descriptors(cmd_buffer);
4984
4985 radv_emit_dispatch_packets(cmd_buffer, info);
4986 /* <-- CUs are busy here --> */
4987
4988 /* Start prefetches after the dispatch has been started. Both
4989 * will run in parallel, but starting the dispatch first is
4990 * more important.
4991 */
4992 if (has_prefetch && pipeline_is_dirty) {
4993 radv_emit_shader_prefetch(cmd_buffer,
4994 pipeline->shaders[MESA_SHADER_COMPUTE]);
4995 }
4996 } else {
4997 /* If we don't wait for idle, start prefetches first, then set
4998 * states, and dispatch at the end.
4999 */
5000 si_emit_cache_flush(cmd_buffer);
5001
5002 if (has_prefetch && pipeline_is_dirty) {
5003 radv_emit_shader_prefetch(cmd_buffer,
5004 pipeline->shaders[MESA_SHADER_COMPUTE]);
5005 }
5006
5007 radv_upload_compute_shader_descriptors(cmd_buffer);
5008
5009 radv_emit_compute_pipeline(cmd_buffer);
5010 radv_emit_dispatch_packets(cmd_buffer, info);
5011 }
5012
5013 radv_cmd_buffer_after_draw(cmd_buffer, RADV_CMD_FLAG_CS_PARTIAL_FLUSH);
5014 }
5015
5016 void radv_CmdDispatchBase(
5017 VkCommandBuffer commandBuffer,
5018 uint32_t base_x,
5019 uint32_t base_y,
5020 uint32_t base_z,
5021 uint32_t x,
5022 uint32_t y,
5023 uint32_t z)
5024 {
5025 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
5026 struct radv_dispatch_info info = {};
5027
5028 info.blocks[0] = x;
5029 info.blocks[1] = y;
5030 info.blocks[2] = z;
5031
5032 info.offsets[0] = base_x;
5033 info.offsets[1] = base_y;
5034 info.offsets[2] = base_z;
5035 radv_dispatch(cmd_buffer, &info);
5036 }
5037
5038 void radv_CmdDispatch(
5039 VkCommandBuffer commandBuffer,
5040 uint32_t x,
5041 uint32_t y,
5042 uint32_t z)
5043 {
5044 radv_CmdDispatchBase(commandBuffer, 0, 0, 0, x, y, z);
5045 }
5046
5047 void radv_CmdDispatchIndirect(
5048 VkCommandBuffer commandBuffer,
5049 VkBuffer _buffer,
5050 VkDeviceSize offset)
5051 {
5052 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
5053 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
5054 struct radv_dispatch_info info = {};
5055
5056 info.indirect = buffer;
5057 info.indirect_offset = offset;
5058
5059 radv_dispatch(cmd_buffer, &info);
5060 }
5061
5062 void radv_unaligned_dispatch(
5063 struct radv_cmd_buffer *cmd_buffer,
5064 uint32_t x,
5065 uint32_t y,
5066 uint32_t z)
5067 {
5068 struct radv_dispatch_info info = {};
5069
5070 info.blocks[0] = x;
5071 info.blocks[1] = y;
5072 info.blocks[2] = z;
5073 info.unaligned = 1;
5074
5075 radv_dispatch(cmd_buffer, &info);
5076 }
5077
5078 void radv_CmdEndRenderPass(
5079 VkCommandBuffer commandBuffer)
5080 {
5081 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
5082
5083 radv_subpass_barrier(cmd_buffer, &cmd_buffer->state.pass->end_barrier);
5084
5085 radv_cmd_buffer_end_subpass(cmd_buffer);
5086
5087 vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.attachments);
5088 vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.subpass_sample_locs);
5089
5090 cmd_buffer->state.pass = NULL;
5091 cmd_buffer->state.subpass = NULL;
5092 cmd_buffer->state.attachments = NULL;
5093 cmd_buffer->state.framebuffer = NULL;
5094 cmd_buffer->state.subpass_sample_locs = NULL;
5095 }
5096
5097 void radv_CmdEndRenderPass2KHR(
5098 VkCommandBuffer commandBuffer,
5099 const VkSubpassEndInfoKHR* pSubpassEndInfo)
5100 {
5101 radv_CmdEndRenderPass(commandBuffer);
5102 }
5103
5104 /*
5105 * For HTILE we have the following interesting clear words:
5106 * 0xfffff30f: Uncompressed, full depth range, for depth+stencil HTILE
5107 * 0xfffc000f: Uncompressed, full depth range, for depth only HTILE.
5108 * 0xfffffff0: Clear depth to 1.0
5109 * 0x00000000: Clear depth to 0.0
5110 */
5111 static void radv_initialize_htile(struct radv_cmd_buffer *cmd_buffer,
5112 struct radv_image *image,
5113 const VkImageSubresourceRange *range,
5114 uint32_t clear_word)
5115 {
5116 assert(range->baseMipLevel == 0);
5117 assert(range->levelCount == 1 || range->levelCount == VK_REMAINING_ARRAY_LAYERS);
5118 VkImageAspectFlags aspects = VK_IMAGE_ASPECT_DEPTH_BIT;
5119 struct radv_cmd_state *state = &cmd_buffer->state;
5120 VkClearDepthStencilValue value = {};
5121
5122 state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB |
5123 RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
5124
5125 state->flush_bits |= radv_clear_htile(cmd_buffer, image, range, clear_word);
5126
5127 state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
5128
5129 if (vk_format_is_stencil(image->vk_format))
5130 aspects |= VK_IMAGE_ASPECT_STENCIL_BIT;
5131
5132 radv_set_ds_clear_metadata(cmd_buffer, image, range, value, aspects);
5133
5134 if (radv_image_is_tc_compat_htile(image)) {
5135 /* Initialize the TC-compat metada value to 0 because by
5136 * default DB_Z_INFO.RANGE_PRECISION is set to 1, and we only
5137 * need have to conditionally update its value when performing
5138 * a fast depth clear.
5139 */
5140 radv_set_tc_compat_zrange_metadata(cmd_buffer, image, range, 0);
5141 }
5142 }
5143
5144 static void radv_handle_depth_image_transition(struct radv_cmd_buffer *cmd_buffer,
5145 struct radv_image *image,
5146 VkImageLayout src_layout,
5147 bool src_render_loop,
5148 VkImageLayout dst_layout,
5149 bool dst_render_loop,
5150 unsigned src_queue_mask,
5151 unsigned dst_queue_mask,
5152 const VkImageSubresourceRange *range,
5153 struct radv_sample_locations_state *sample_locs)
5154 {
5155 if (!radv_image_has_htile(image))
5156 return;
5157
5158 if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED) {
5159 uint32_t clear_value = vk_format_is_stencil(image->vk_format) ? 0xfffff30f : 0xfffc000f;
5160
5161 if (radv_layout_is_htile_compressed(image, dst_layout, dst_render_loop,
5162 dst_queue_mask)) {
5163 clear_value = 0;
5164 }
5165
5166 radv_initialize_htile(cmd_buffer, image, range, clear_value);
5167 } else if (!radv_layout_is_htile_compressed(image, src_layout, src_render_loop, src_queue_mask) &&
5168 radv_layout_is_htile_compressed(image, dst_layout, dst_render_loop, dst_queue_mask)) {
5169 uint32_t clear_value = vk_format_is_stencil(image->vk_format) ? 0xfffff30f : 0xfffc000f;
5170 radv_initialize_htile(cmd_buffer, image, range, clear_value);
5171 } else if (radv_layout_is_htile_compressed(image, src_layout, src_render_loop, src_queue_mask) &&
5172 !radv_layout_is_htile_compressed(image, dst_layout, dst_render_loop, dst_queue_mask)) {
5173 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB |
5174 RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
5175
5176 radv_decompress_depth_image_inplace(cmd_buffer, image, range,
5177 sample_locs);
5178
5179 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB |
5180 RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
5181 }
5182 }
5183
5184 static void radv_initialise_cmask(struct radv_cmd_buffer *cmd_buffer,
5185 struct radv_image *image,
5186 const VkImageSubresourceRange *range,
5187 uint32_t value)
5188 {
5189 struct radv_cmd_state *state = &cmd_buffer->state;
5190
5191 state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
5192 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
5193
5194 state->flush_bits |= radv_clear_cmask(cmd_buffer, image, range, value);
5195
5196 state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
5197 }
5198
5199 void radv_initialize_fmask(struct radv_cmd_buffer *cmd_buffer,
5200 struct radv_image *image,
5201 const VkImageSubresourceRange *range)
5202 {
5203 struct radv_cmd_state *state = &cmd_buffer->state;
5204 static const uint32_t fmask_clear_values[4] = {
5205 0x00000000,
5206 0x02020202,
5207 0xE4E4E4E4,
5208 0x76543210
5209 };
5210 uint32_t log2_samples = util_logbase2(image->info.samples);
5211 uint32_t value = fmask_clear_values[log2_samples];
5212
5213 state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
5214 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
5215
5216 state->flush_bits |= radv_clear_fmask(cmd_buffer, image, range, value);
5217
5218 state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
5219 }
5220
5221 void radv_initialize_dcc(struct radv_cmd_buffer *cmd_buffer,
5222 struct radv_image *image,
5223 const VkImageSubresourceRange *range, uint32_t value)
5224 {
5225 struct radv_cmd_state *state = &cmd_buffer->state;
5226 unsigned size = 0;
5227
5228 state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
5229 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
5230
5231 state->flush_bits |= radv_clear_dcc(cmd_buffer, image, range, value);
5232
5233 if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX8) {
5234 /* When DCC is enabled with mipmaps, some levels might not
5235 * support fast clears and we have to initialize them as "fully
5236 * expanded".
5237 */
5238 /* Compute the size of all fast clearable DCC levels. */
5239 for (unsigned i = 0; i < image->planes[0].surface.num_dcc_levels; i++) {
5240 struct legacy_surf_level *surf_level =
5241 &image->planes[0].surface.u.legacy.level[i];
5242 unsigned dcc_fast_clear_size =
5243 surf_level->dcc_slice_fast_clear_size * image->info.array_size;
5244
5245 if (!dcc_fast_clear_size)
5246 break;
5247
5248 size = surf_level->dcc_offset + dcc_fast_clear_size;
5249 }
5250
5251 /* Initialize the mipmap levels without DCC. */
5252 if (size != image->planes[0].surface.dcc_size) {
5253 state->flush_bits |=
5254 radv_fill_buffer(cmd_buffer, image->bo,
5255 image->offset + image->dcc_offset + size,
5256 image->planes[0].surface.dcc_size - size,
5257 0xffffffff);
5258 }
5259 }
5260
5261 state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
5262 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
5263 }
5264
5265 /**
5266 * Initialize DCC/FMASK/CMASK metadata for a color image.
5267 */
5268 static void radv_init_color_image_metadata(struct radv_cmd_buffer *cmd_buffer,
5269 struct radv_image *image,
5270 VkImageLayout src_layout,
5271 bool src_render_loop,
5272 VkImageLayout dst_layout,
5273 bool dst_render_loop,
5274 unsigned src_queue_mask,
5275 unsigned dst_queue_mask,
5276 const VkImageSubresourceRange *range)
5277 {
5278 if (radv_image_has_cmask(image)) {
5279 uint32_t value = 0xffffffffu; /* Fully expanded mode. */
5280
5281 /* TODO: clarify this. */
5282 if (radv_image_has_fmask(image)) {
5283 value = 0xccccccccu;
5284 }
5285
5286 radv_initialise_cmask(cmd_buffer, image, range, value);
5287 }
5288
5289 if (radv_image_has_fmask(image)) {
5290 radv_initialize_fmask(cmd_buffer, image, range);
5291 }
5292
5293 if (radv_dcc_enabled(image, range->baseMipLevel)) {
5294 uint32_t value = 0xffffffffu; /* Fully expanded mode. */
5295 bool need_decompress_pass = false;
5296
5297 if (radv_layout_dcc_compressed(cmd_buffer->device, image, dst_layout,
5298 dst_render_loop,
5299 dst_queue_mask)) {
5300 value = 0x20202020u;
5301 need_decompress_pass = true;
5302 }
5303
5304 radv_initialize_dcc(cmd_buffer, image, range, value);
5305
5306 radv_update_fce_metadata(cmd_buffer, image, range,
5307 need_decompress_pass);
5308 }
5309
5310 if (radv_image_has_cmask(image) ||
5311 radv_dcc_enabled(image, range->baseMipLevel)) {
5312 uint32_t color_values[2] = {};
5313 radv_set_color_clear_metadata(cmd_buffer, image, range,
5314 color_values);
5315 }
5316 }
5317
5318 /**
5319 * Handle color image transitions for DCC/FMASK/CMASK.
5320 */
5321 static void radv_handle_color_image_transition(struct radv_cmd_buffer *cmd_buffer,
5322 struct radv_image *image,
5323 VkImageLayout src_layout,
5324 bool src_render_loop,
5325 VkImageLayout dst_layout,
5326 bool dst_render_loop,
5327 unsigned src_queue_mask,
5328 unsigned dst_queue_mask,
5329 const VkImageSubresourceRange *range)
5330 {
5331 if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED) {
5332 radv_init_color_image_metadata(cmd_buffer, image,
5333 src_layout, src_render_loop,
5334 dst_layout, dst_render_loop,
5335 src_queue_mask, dst_queue_mask,
5336 range);
5337 return;
5338 }
5339
5340 if (radv_dcc_enabled(image, range->baseMipLevel)) {
5341 if (src_layout == VK_IMAGE_LAYOUT_PREINITIALIZED) {
5342 radv_initialize_dcc(cmd_buffer, image, range, 0xffffffffu);
5343 } else if (radv_layout_dcc_compressed(cmd_buffer->device, image, src_layout, src_render_loop, src_queue_mask) &&
5344 !radv_layout_dcc_compressed(cmd_buffer->device, image, dst_layout, dst_render_loop, dst_queue_mask)) {
5345 radv_decompress_dcc(cmd_buffer, image, range);
5346 } else if (radv_layout_can_fast_clear(image, src_layout, src_render_loop, src_queue_mask) &&
5347 !radv_layout_can_fast_clear(image, dst_layout, dst_render_loop, dst_queue_mask)) {
5348 radv_fast_clear_flush_image_inplace(cmd_buffer, image, range);
5349 }
5350 } else if (radv_image_has_cmask(image) || radv_image_has_fmask(image)) {
5351 bool fce_eliminate = false, fmask_expand = false;
5352
5353 if (radv_layout_can_fast_clear(image, src_layout, src_render_loop, src_queue_mask) &&
5354 !radv_layout_can_fast_clear(image, dst_layout, dst_render_loop, dst_queue_mask)) {
5355 fce_eliminate = true;
5356 }
5357
5358 if (radv_image_has_fmask(image)) {
5359 if (src_layout != VK_IMAGE_LAYOUT_GENERAL &&
5360 dst_layout == VK_IMAGE_LAYOUT_GENERAL) {
5361 /* A FMASK decompress is required before doing
5362 * a MSAA decompress using FMASK.
5363 */
5364 fmask_expand = true;
5365 }
5366 }
5367
5368 if (fce_eliminate || fmask_expand)
5369 radv_fast_clear_flush_image_inplace(cmd_buffer, image, range);
5370
5371 if (fmask_expand)
5372 radv_expand_fmask_image_inplace(cmd_buffer, image, range);
5373 }
5374 }
5375
5376 static void radv_handle_image_transition(struct radv_cmd_buffer *cmd_buffer,
5377 struct radv_image *image,
5378 VkImageLayout src_layout,
5379 bool src_render_loop,
5380 VkImageLayout dst_layout,
5381 bool dst_render_loop,
5382 uint32_t src_family,
5383 uint32_t dst_family,
5384 const VkImageSubresourceRange *range,
5385 struct radv_sample_locations_state *sample_locs)
5386 {
5387 if (image->exclusive && src_family != dst_family) {
5388 /* This is an acquire or a release operation and there will be
5389 * a corresponding release/acquire. Do the transition in the
5390 * most flexible queue. */
5391
5392 assert(src_family == cmd_buffer->queue_family_index ||
5393 dst_family == cmd_buffer->queue_family_index);
5394
5395 if (src_family == VK_QUEUE_FAMILY_EXTERNAL ||
5396 src_family == VK_QUEUE_FAMILY_FOREIGN_EXT)
5397 return;
5398
5399 if (cmd_buffer->queue_family_index == RADV_QUEUE_TRANSFER)
5400 return;
5401
5402 if (cmd_buffer->queue_family_index == RADV_QUEUE_COMPUTE &&
5403 (src_family == RADV_QUEUE_GENERAL ||
5404 dst_family == RADV_QUEUE_GENERAL))
5405 return;
5406 }
5407
5408 if (src_layout == dst_layout)
5409 return;
5410
5411 unsigned src_queue_mask =
5412 radv_image_queue_family_mask(image, src_family,
5413 cmd_buffer->queue_family_index);
5414 unsigned dst_queue_mask =
5415 radv_image_queue_family_mask(image, dst_family,
5416 cmd_buffer->queue_family_index);
5417
5418 if (vk_format_is_depth(image->vk_format)) {
5419 radv_handle_depth_image_transition(cmd_buffer, image,
5420 src_layout, src_render_loop,
5421 dst_layout, dst_render_loop,
5422 src_queue_mask, dst_queue_mask,
5423 range, sample_locs);
5424 } else {
5425 radv_handle_color_image_transition(cmd_buffer, image,
5426 src_layout, src_render_loop,
5427 dst_layout, dst_render_loop,
5428 src_queue_mask, dst_queue_mask,
5429 range);
5430 }
5431 }
5432
5433 struct radv_barrier_info {
5434 uint32_t eventCount;
5435 const VkEvent *pEvents;
5436 VkPipelineStageFlags srcStageMask;
5437 VkPipelineStageFlags dstStageMask;
5438 };
5439
5440 static void
5441 radv_barrier(struct radv_cmd_buffer *cmd_buffer,
5442 uint32_t memoryBarrierCount,
5443 const VkMemoryBarrier *pMemoryBarriers,
5444 uint32_t bufferMemoryBarrierCount,
5445 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
5446 uint32_t imageMemoryBarrierCount,
5447 const VkImageMemoryBarrier *pImageMemoryBarriers,
5448 const struct radv_barrier_info *info)
5449 {
5450 struct radeon_cmdbuf *cs = cmd_buffer->cs;
5451 enum radv_cmd_flush_bits src_flush_bits = 0;
5452 enum radv_cmd_flush_bits dst_flush_bits = 0;
5453
5454 for (unsigned i = 0; i < info->eventCount; ++i) {
5455 RADV_FROM_HANDLE(radv_event, event, info->pEvents[i]);
5456 uint64_t va = radv_buffer_get_va(event->bo);
5457
5458 radv_cs_add_buffer(cmd_buffer->device->ws, cs, event->bo);
5459
5460 ASSERTED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 7);
5461
5462 radv_cp_wait_mem(cs, WAIT_REG_MEM_EQUAL, va, 1, 0xffffffff);
5463 assert(cmd_buffer->cs->cdw <= cdw_max);
5464 }
5465
5466 for (uint32_t i = 0; i < memoryBarrierCount; i++) {
5467 src_flush_bits |= radv_src_access_flush(cmd_buffer, pMemoryBarriers[i].srcAccessMask,
5468 NULL);
5469 dst_flush_bits |= radv_dst_access_flush(cmd_buffer, pMemoryBarriers[i].dstAccessMask,
5470 NULL);
5471 }
5472
5473 for (uint32_t i = 0; i < bufferMemoryBarrierCount; i++) {
5474 src_flush_bits |= radv_src_access_flush(cmd_buffer, pBufferMemoryBarriers[i].srcAccessMask,
5475 NULL);
5476 dst_flush_bits |= radv_dst_access_flush(cmd_buffer, pBufferMemoryBarriers[i].dstAccessMask,
5477 NULL);
5478 }
5479
5480 for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
5481 RADV_FROM_HANDLE(radv_image, image, pImageMemoryBarriers[i].image);
5482
5483 src_flush_bits |= radv_src_access_flush(cmd_buffer, pImageMemoryBarriers[i].srcAccessMask,
5484 image);
5485 dst_flush_bits |= radv_dst_access_flush(cmd_buffer, pImageMemoryBarriers[i].dstAccessMask,
5486 image);
5487 }
5488
5489 /* The Vulkan spec 1.1.98 says:
5490 *
5491 * "An execution dependency with only
5492 * VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT in the destination stage mask
5493 * will only prevent that stage from executing in subsequently
5494 * submitted commands. As this stage does not perform any actual
5495 * execution, this is not observable - in effect, it does not delay
5496 * processing of subsequent commands. Similarly an execution dependency
5497 * with only VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT in the source stage mask
5498 * will effectively not wait for any prior commands to complete."
5499 */
5500 if (info->dstStageMask != VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT)
5501 radv_stage_flush(cmd_buffer, info->srcStageMask);
5502 cmd_buffer->state.flush_bits |= src_flush_bits;
5503
5504 for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
5505 RADV_FROM_HANDLE(radv_image, image, pImageMemoryBarriers[i].image);
5506
5507 const struct VkSampleLocationsInfoEXT *sample_locs_info =
5508 vk_find_struct_const(pImageMemoryBarriers[i].pNext,
5509 SAMPLE_LOCATIONS_INFO_EXT);
5510 struct radv_sample_locations_state sample_locations = {};
5511
5512 if (sample_locs_info) {
5513 assert(image->flags & VK_IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT);
5514 sample_locations.per_pixel = sample_locs_info->sampleLocationsPerPixel;
5515 sample_locations.grid_size = sample_locs_info->sampleLocationGridSize;
5516 sample_locations.count = sample_locs_info->sampleLocationsCount;
5517 typed_memcpy(&sample_locations.locations[0],
5518 sample_locs_info->pSampleLocations,
5519 sample_locs_info->sampleLocationsCount);
5520 }
5521
5522 radv_handle_image_transition(cmd_buffer, image,
5523 pImageMemoryBarriers[i].oldLayout,
5524 false, /* Outside of a renderpass we are never in a renderloop */
5525 pImageMemoryBarriers[i].newLayout,
5526 false, /* Outside of a renderpass we are never in a renderloop */
5527 pImageMemoryBarriers[i].srcQueueFamilyIndex,
5528 pImageMemoryBarriers[i].dstQueueFamilyIndex,
5529 &pImageMemoryBarriers[i].subresourceRange,
5530 sample_locs_info ? &sample_locations : NULL);
5531 }
5532
5533 /* Make sure CP DMA is idle because the driver might have performed a
5534 * DMA operation for copying or filling buffers/images.
5535 */
5536 if (info->srcStageMask & (VK_PIPELINE_STAGE_TRANSFER_BIT |
5537 VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT))
5538 si_cp_dma_wait_for_idle(cmd_buffer);
5539
5540 cmd_buffer->state.flush_bits |= dst_flush_bits;
5541 }
5542
5543 void radv_CmdPipelineBarrier(
5544 VkCommandBuffer commandBuffer,
5545 VkPipelineStageFlags srcStageMask,
5546 VkPipelineStageFlags destStageMask,
5547 VkBool32 byRegion,
5548 uint32_t memoryBarrierCount,
5549 const VkMemoryBarrier* pMemoryBarriers,
5550 uint32_t bufferMemoryBarrierCount,
5551 const VkBufferMemoryBarrier* pBufferMemoryBarriers,
5552 uint32_t imageMemoryBarrierCount,
5553 const VkImageMemoryBarrier* pImageMemoryBarriers)
5554 {
5555 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
5556 struct radv_barrier_info info;
5557
5558 info.eventCount = 0;
5559 info.pEvents = NULL;
5560 info.srcStageMask = srcStageMask;
5561 info.dstStageMask = destStageMask;
5562
5563 radv_barrier(cmd_buffer, memoryBarrierCount, pMemoryBarriers,
5564 bufferMemoryBarrierCount, pBufferMemoryBarriers,
5565 imageMemoryBarrierCount, pImageMemoryBarriers, &info);
5566 }
5567
5568
5569 static void write_event(struct radv_cmd_buffer *cmd_buffer,
5570 struct radv_event *event,
5571 VkPipelineStageFlags stageMask,
5572 unsigned value)
5573 {
5574 struct radeon_cmdbuf *cs = cmd_buffer->cs;
5575 uint64_t va = radv_buffer_get_va(event->bo);
5576
5577 si_emit_cache_flush(cmd_buffer);
5578
5579 radv_cs_add_buffer(cmd_buffer->device->ws, cs, event->bo);
5580
5581 ASSERTED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 21);
5582
5583 /* Flags that only require a top-of-pipe event. */
5584 VkPipelineStageFlags top_of_pipe_flags =
5585 VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
5586
5587 /* Flags that only require a post-index-fetch event. */
5588 VkPipelineStageFlags post_index_fetch_flags =
5589 top_of_pipe_flags |
5590 VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT |
5591 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT;
5592
5593 /* Make sure CP DMA is idle because the driver might have performed a
5594 * DMA operation for copying or filling buffers/images.
5595 */
5596 if (stageMask & (VK_PIPELINE_STAGE_TRANSFER_BIT |
5597 VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT))
5598 si_cp_dma_wait_for_idle(cmd_buffer);
5599
5600 /* TODO: Emit EOS events for syncing PS/CS stages. */
5601
5602 if (!(stageMask & ~top_of_pipe_flags)) {
5603 /* Just need to sync the PFP engine. */
5604 radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, 0));
5605 radeon_emit(cs, S_370_DST_SEL(V_370_MEM) |
5606 S_370_WR_CONFIRM(1) |
5607 S_370_ENGINE_SEL(V_370_PFP));
5608 radeon_emit(cs, va);
5609 radeon_emit(cs, va >> 32);
5610 radeon_emit(cs, value);
5611 } else if (!(stageMask & ~post_index_fetch_flags)) {
5612 /* Sync ME because PFP reads index and indirect buffers. */
5613 radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, 0));
5614 radeon_emit(cs, S_370_DST_SEL(V_370_MEM) |
5615 S_370_WR_CONFIRM(1) |
5616 S_370_ENGINE_SEL(V_370_ME));
5617 radeon_emit(cs, va);
5618 radeon_emit(cs, va >> 32);
5619 radeon_emit(cs, value);
5620 } else {
5621 /* Otherwise, sync all prior GPU work using an EOP event. */
5622 si_cs_emit_write_event_eop(cs,
5623 cmd_buffer->device->physical_device->rad_info.chip_class,
5624 radv_cmd_buffer_uses_mec(cmd_buffer),
5625 V_028A90_BOTTOM_OF_PIPE_TS, 0,
5626 EOP_DST_SEL_MEM,
5627 EOP_DATA_SEL_VALUE_32BIT, va, value,
5628 cmd_buffer->gfx9_eop_bug_va);
5629 }
5630
5631 assert(cmd_buffer->cs->cdw <= cdw_max);
5632 }
5633
5634 void radv_CmdSetEvent(VkCommandBuffer commandBuffer,
5635 VkEvent _event,
5636 VkPipelineStageFlags stageMask)
5637 {
5638 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
5639 RADV_FROM_HANDLE(radv_event, event, _event);
5640
5641 write_event(cmd_buffer, event, stageMask, 1);
5642 }
5643
5644 void radv_CmdResetEvent(VkCommandBuffer commandBuffer,
5645 VkEvent _event,
5646 VkPipelineStageFlags stageMask)
5647 {
5648 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
5649 RADV_FROM_HANDLE(radv_event, event, _event);
5650
5651 write_event(cmd_buffer, event, stageMask, 0);
5652 }
5653
5654 void radv_CmdWaitEvents(VkCommandBuffer commandBuffer,
5655 uint32_t eventCount,
5656 const VkEvent* pEvents,
5657 VkPipelineStageFlags srcStageMask,
5658 VkPipelineStageFlags dstStageMask,
5659 uint32_t memoryBarrierCount,
5660 const VkMemoryBarrier* pMemoryBarriers,
5661 uint32_t bufferMemoryBarrierCount,
5662 const VkBufferMemoryBarrier* pBufferMemoryBarriers,
5663 uint32_t imageMemoryBarrierCount,
5664 const VkImageMemoryBarrier* pImageMemoryBarriers)
5665 {
5666 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
5667 struct radv_barrier_info info;
5668
5669 info.eventCount = eventCount;
5670 info.pEvents = pEvents;
5671 info.srcStageMask = 0;
5672
5673 radv_barrier(cmd_buffer, memoryBarrierCount, pMemoryBarriers,
5674 bufferMemoryBarrierCount, pBufferMemoryBarriers,
5675 imageMemoryBarrierCount, pImageMemoryBarriers, &info);
5676 }
5677
5678
5679 void radv_CmdSetDeviceMask(VkCommandBuffer commandBuffer,
5680 uint32_t deviceMask)
5681 {
5682 /* No-op */
5683 }
5684
5685 /* VK_EXT_conditional_rendering */
5686 void radv_CmdBeginConditionalRenderingEXT(
5687 VkCommandBuffer commandBuffer,
5688 const VkConditionalRenderingBeginInfoEXT* pConditionalRenderingBegin)
5689 {
5690 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
5691 RADV_FROM_HANDLE(radv_buffer, buffer, pConditionalRenderingBegin->buffer);
5692 struct radeon_cmdbuf *cs = cmd_buffer->cs;
5693 bool draw_visible = true;
5694 uint64_t pred_value = 0;
5695 uint64_t va, new_va;
5696 unsigned pred_offset;
5697
5698 va = radv_buffer_get_va(buffer->bo) + pConditionalRenderingBegin->offset;
5699
5700 /* By default, if the 32-bit value at offset in buffer memory is zero,
5701 * then the rendering commands are discarded, otherwise they are
5702 * executed as normal. If the inverted flag is set, all commands are
5703 * discarded if the value is non zero.
5704 */
5705 if (pConditionalRenderingBegin->flags &
5706 VK_CONDITIONAL_RENDERING_INVERTED_BIT_EXT) {
5707 draw_visible = false;
5708 }
5709
5710 si_emit_cache_flush(cmd_buffer);
5711
5712 /* From the Vulkan spec 1.1.107:
5713 *
5714 * "If the 32-bit value at offset in buffer memory is zero, then the
5715 * rendering commands are discarded, otherwise they are executed as
5716 * normal. If the value of the predicate in buffer memory changes while
5717 * conditional rendering is active, the rendering commands may be
5718 * discarded in an implementation-dependent way. Some implementations
5719 * may latch the value of the predicate upon beginning conditional
5720 * rendering while others may read it before every rendering command."
5721 *
5722 * But, the AMD hardware treats the predicate as a 64-bit value which
5723 * means we need a workaround in the driver. Luckily, it's not required
5724 * to support if the value changes when predication is active.
5725 *
5726 * The workaround is as follows:
5727 * 1) allocate a 64-value in the upload BO and initialize it to 0
5728 * 2) copy the 32-bit predicate value to the upload BO
5729 * 3) use the new allocated VA address for predication
5730 *
5731 * Based on the conditionalrender demo, it's faster to do the COPY_DATA
5732 * in ME (+ sync PFP) instead of PFP.
5733 */
5734 radv_cmd_buffer_upload_data(cmd_buffer, 8, 16, &pred_value, &pred_offset);
5735
5736 new_va = radv_buffer_get_va(cmd_buffer->upload.upload_bo) + pred_offset;
5737
5738 radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
5739 radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_SRC_MEM) |
5740 COPY_DATA_DST_SEL(COPY_DATA_DST_MEM) |
5741 COPY_DATA_WR_CONFIRM);
5742 radeon_emit(cs, va);
5743 radeon_emit(cs, va >> 32);
5744 radeon_emit(cs, new_va);
5745 radeon_emit(cs, new_va >> 32);
5746
5747 radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
5748 radeon_emit(cs, 0);
5749
5750 /* Enable predication for this command buffer. */
5751 si_emit_set_predication_state(cmd_buffer, draw_visible, new_va);
5752 cmd_buffer->state.predicating = true;
5753
5754 /* Store conditional rendering user info. */
5755 cmd_buffer->state.predication_type = draw_visible;
5756 cmd_buffer->state.predication_va = new_va;
5757 }
5758
5759 void radv_CmdEndConditionalRenderingEXT(
5760 VkCommandBuffer commandBuffer)
5761 {
5762 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
5763
5764 /* Disable predication for this command buffer. */
5765 si_emit_set_predication_state(cmd_buffer, false, 0);
5766 cmd_buffer->state.predicating = false;
5767
5768 /* Reset conditional rendering user info. */
5769 cmd_buffer->state.predication_type = -1;
5770 cmd_buffer->state.predication_va = 0;
5771 }
5772
5773 /* VK_EXT_transform_feedback */
5774 void radv_CmdBindTransformFeedbackBuffersEXT(
5775 VkCommandBuffer commandBuffer,
5776 uint32_t firstBinding,
5777 uint32_t bindingCount,
5778 const VkBuffer* pBuffers,
5779 const VkDeviceSize* pOffsets,
5780 const VkDeviceSize* pSizes)
5781 {
5782 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
5783 struct radv_streamout_binding *sb = cmd_buffer->streamout_bindings;
5784 uint8_t enabled_mask = 0;
5785
5786 assert(firstBinding + bindingCount <= MAX_SO_BUFFERS);
5787 for (uint32_t i = 0; i < bindingCount; i++) {
5788 uint32_t idx = firstBinding + i;
5789
5790 sb[idx].buffer = radv_buffer_from_handle(pBuffers[i]);
5791 sb[idx].offset = pOffsets[i];
5792 sb[idx].size = pSizes[i];
5793
5794 radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs,
5795 sb[idx].buffer->bo);
5796
5797 enabled_mask |= 1 << idx;
5798 }
5799
5800 cmd_buffer->state.streamout.enabled_mask |= enabled_mask;
5801
5802 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_STREAMOUT_BUFFER;
5803 }
5804
5805 static void
5806 radv_emit_streamout_enable(struct radv_cmd_buffer *cmd_buffer)
5807 {
5808 struct radv_streamout_state *so = &cmd_buffer->state.streamout;
5809 struct radeon_cmdbuf *cs = cmd_buffer->cs;
5810
5811 radeon_set_context_reg_seq(cs, R_028B94_VGT_STRMOUT_CONFIG, 2);
5812 radeon_emit(cs,
5813 S_028B94_STREAMOUT_0_EN(so->streamout_enabled) |
5814 S_028B94_RAST_STREAM(0) |
5815 S_028B94_STREAMOUT_1_EN(so->streamout_enabled) |
5816 S_028B94_STREAMOUT_2_EN(so->streamout_enabled) |
5817 S_028B94_STREAMOUT_3_EN(so->streamout_enabled));
5818 radeon_emit(cs, so->hw_enabled_mask &
5819 so->enabled_stream_buffers_mask);
5820
5821 cmd_buffer->state.context_roll_without_scissor_emitted = true;
5822 }
5823
5824 static void
5825 radv_set_streamout_enable(struct radv_cmd_buffer *cmd_buffer, bool enable)
5826 {
5827 struct radv_streamout_state *so = &cmd_buffer->state.streamout;
5828 bool old_streamout_enabled = so->streamout_enabled;
5829 uint32_t old_hw_enabled_mask = so->hw_enabled_mask;
5830
5831 so->streamout_enabled = enable;
5832
5833 so->hw_enabled_mask = so->enabled_mask |
5834 (so->enabled_mask << 4) |
5835 (so->enabled_mask << 8) |
5836 (so->enabled_mask << 12);
5837
5838 if (!cmd_buffer->device->physical_device->use_ngg_streamout &&
5839 ((old_streamout_enabled != so->streamout_enabled) ||
5840 (old_hw_enabled_mask != so->hw_enabled_mask)))
5841 radv_emit_streamout_enable(cmd_buffer);
5842
5843 if (cmd_buffer->device->physical_device->use_ngg_streamout)
5844 cmd_buffer->gds_needed = true;
5845 }
5846
5847 static void radv_flush_vgt_streamout(struct radv_cmd_buffer *cmd_buffer)
5848 {
5849 struct radeon_cmdbuf *cs = cmd_buffer->cs;
5850 unsigned reg_strmout_cntl;
5851
5852 /* The register is at different places on different ASICs. */
5853 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX7) {
5854 reg_strmout_cntl = R_0300FC_CP_STRMOUT_CNTL;
5855 radeon_set_uconfig_reg(cs, reg_strmout_cntl, 0);
5856 } else {
5857 reg_strmout_cntl = R_0084FC_CP_STRMOUT_CNTL;
5858 radeon_set_config_reg(cs, reg_strmout_cntl, 0);
5859 }
5860
5861 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
5862 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SO_VGTSTREAMOUT_FLUSH) | EVENT_INDEX(0));
5863
5864 radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, 0));
5865 radeon_emit(cs, WAIT_REG_MEM_EQUAL); /* wait until the register is equal to the reference value */
5866 radeon_emit(cs, reg_strmout_cntl >> 2); /* register */
5867 radeon_emit(cs, 0);
5868 radeon_emit(cs, S_0084FC_OFFSET_UPDATE_DONE(1)); /* reference value */
5869 radeon_emit(cs, S_0084FC_OFFSET_UPDATE_DONE(1)); /* mask */
5870 radeon_emit(cs, 4); /* poll interval */
5871 }
5872
5873 static void
5874 radv_emit_streamout_begin(struct radv_cmd_buffer *cmd_buffer,
5875 uint32_t firstCounterBuffer,
5876 uint32_t counterBufferCount,
5877 const VkBuffer *pCounterBuffers,
5878 const VkDeviceSize *pCounterBufferOffsets)
5879
5880 {
5881 struct radv_streamout_binding *sb = cmd_buffer->streamout_bindings;
5882 struct radv_streamout_state *so = &cmd_buffer->state.streamout;
5883 struct radeon_cmdbuf *cs = cmd_buffer->cs;
5884 uint32_t i;
5885
5886 radv_flush_vgt_streamout(cmd_buffer);
5887
5888 assert(firstCounterBuffer + counterBufferCount <= MAX_SO_BUFFERS);
5889 for_each_bit(i, so->enabled_mask) {
5890 int32_t counter_buffer_idx = i - firstCounterBuffer;
5891 if (counter_buffer_idx >= 0 && counter_buffer_idx >= counterBufferCount)
5892 counter_buffer_idx = -1;
5893
5894 /* AMD GCN binds streamout buffers as shader resources.
5895 * VGT only counts primitives and tells the shader through
5896 * SGPRs what to do.
5897 */
5898 radeon_set_context_reg_seq(cs, R_028AD0_VGT_STRMOUT_BUFFER_SIZE_0 + 16*i, 2);
5899 radeon_emit(cs, sb[i].size >> 2); /* BUFFER_SIZE (in DW) */
5900 radeon_emit(cs, so->stride_in_dw[i]); /* VTX_STRIDE (in DW) */
5901
5902 cmd_buffer->state.context_roll_without_scissor_emitted = true;
5903
5904 if (counter_buffer_idx >= 0 && pCounterBuffers && pCounterBuffers[counter_buffer_idx]) {
5905 /* The array of counter buffers is optional. */
5906 RADV_FROM_HANDLE(radv_buffer, buffer, pCounterBuffers[counter_buffer_idx]);
5907 uint64_t va = radv_buffer_get_va(buffer->bo);
5908
5909 va += buffer->offset + pCounterBufferOffsets[counter_buffer_idx];
5910
5911 /* Append */
5912 radeon_emit(cs, PKT3(PKT3_STRMOUT_BUFFER_UPDATE, 4, 0));
5913 radeon_emit(cs, STRMOUT_SELECT_BUFFER(i) |
5914 STRMOUT_DATA_TYPE(1) | /* offset in bytes */
5915 STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_FROM_MEM)); /* control */
5916 radeon_emit(cs, 0); /* unused */
5917 radeon_emit(cs, 0); /* unused */
5918 radeon_emit(cs, va); /* src address lo */
5919 radeon_emit(cs, va >> 32); /* src address hi */
5920
5921 radv_cs_add_buffer(cmd_buffer->device->ws, cs, buffer->bo);
5922 } else {
5923 /* Start from the beginning. */
5924 radeon_emit(cs, PKT3(PKT3_STRMOUT_BUFFER_UPDATE, 4, 0));
5925 radeon_emit(cs, STRMOUT_SELECT_BUFFER(i) |
5926 STRMOUT_DATA_TYPE(1) | /* offset in bytes */
5927 STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_FROM_PACKET)); /* control */
5928 radeon_emit(cs, 0); /* unused */
5929 radeon_emit(cs, 0); /* unused */
5930 radeon_emit(cs, 0); /* unused */
5931 radeon_emit(cs, 0); /* unused */
5932 }
5933 }
5934
5935 radv_set_streamout_enable(cmd_buffer, true);
5936 }
5937
5938 static void
5939 gfx10_emit_streamout_begin(struct radv_cmd_buffer *cmd_buffer,
5940 uint32_t firstCounterBuffer,
5941 uint32_t counterBufferCount,
5942 const VkBuffer *pCounterBuffers,
5943 const VkDeviceSize *pCounterBufferOffsets)
5944 {
5945 struct radv_streamout_state *so = &cmd_buffer->state.streamout;
5946 unsigned last_target = util_last_bit(so->enabled_mask) - 1;
5947 struct radeon_cmdbuf *cs = cmd_buffer->cs;
5948 uint32_t i;
5949
5950 assert(cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10);
5951 assert(firstCounterBuffer + counterBufferCount <= MAX_SO_BUFFERS);
5952
5953 /* Sync because the next streamout operation will overwrite GDS and we
5954 * have to make sure it's idle.
5955 * TODO: Improve by tracking if there is a streamout operation in
5956 * flight.
5957 */
5958 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_VS_PARTIAL_FLUSH;
5959 si_emit_cache_flush(cmd_buffer);
5960
5961 for_each_bit(i, so->enabled_mask) {
5962 int32_t counter_buffer_idx = i - firstCounterBuffer;
5963 if (counter_buffer_idx >= 0 && counter_buffer_idx >= counterBufferCount)
5964 counter_buffer_idx = -1;
5965
5966 bool append = counter_buffer_idx >= 0 &&
5967 pCounterBuffers && pCounterBuffers[counter_buffer_idx];
5968 uint64_t va = 0;
5969
5970 if (append) {
5971 RADV_FROM_HANDLE(radv_buffer, buffer, pCounterBuffers[counter_buffer_idx]);
5972
5973 va += radv_buffer_get_va(buffer->bo);
5974 va += buffer->offset + pCounterBufferOffsets[counter_buffer_idx];
5975
5976 radv_cs_add_buffer(cmd_buffer->device->ws, cs, buffer->bo);
5977 }
5978
5979 radeon_emit(cs, PKT3(PKT3_DMA_DATA, 5, 0));
5980 radeon_emit(cs, S_411_SRC_SEL(append ? V_411_SRC_ADDR_TC_L2 : V_411_DATA) |
5981 S_411_DST_SEL(V_411_GDS) |
5982 S_411_CP_SYNC(i == last_target));
5983 radeon_emit(cs, va);
5984 radeon_emit(cs, va >> 32);
5985 radeon_emit(cs, 4 * i); /* destination in GDS */
5986 radeon_emit(cs, 0);
5987 radeon_emit(cs, S_414_BYTE_COUNT_GFX9(4) |
5988 S_414_DISABLE_WR_CONFIRM_GFX9(i != last_target));
5989 }
5990
5991 radv_set_streamout_enable(cmd_buffer, true);
5992 }
5993
5994 void radv_CmdBeginTransformFeedbackEXT(
5995 VkCommandBuffer commandBuffer,
5996 uint32_t firstCounterBuffer,
5997 uint32_t counterBufferCount,
5998 const VkBuffer* pCounterBuffers,
5999 const VkDeviceSize* pCounterBufferOffsets)
6000 {
6001 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
6002
6003 if (cmd_buffer->device->physical_device->use_ngg_streamout) {
6004 gfx10_emit_streamout_begin(cmd_buffer,
6005 firstCounterBuffer, counterBufferCount,
6006 pCounterBuffers, pCounterBufferOffsets);
6007 } else {
6008 radv_emit_streamout_begin(cmd_buffer,
6009 firstCounterBuffer, counterBufferCount,
6010 pCounterBuffers, pCounterBufferOffsets);
6011 }
6012 }
6013
6014 static void
6015 radv_emit_streamout_end(struct radv_cmd_buffer *cmd_buffer,
6016 uint32_t firstCounterBuffer,
6017 uint32_t counterBufferCount,
6018 const VkBuffer *pCounterBuffers,
6019 const VkDeviceSize *pCounterBufferOffsets)
6020 {
6021 struct radv_streamout_state *so = &cmd_buffer->state.streamout;
6022 struct radeon_cmdbuf *cs = cmd_buffer->cs;
6023 uint32_t i;
6024
6025 radv_flush_vgt_streamout(cmd_buffer);
6026
6027 assert(firstCounterBuffer + counterBufferCount <= MAX_SO_BUFFERS);
6028 for_each_bit(i, so->enabled_mask) {
6029 int32_t counter_buffer_idx = i - firstCounterBuffer;
6030 if (counter_buffer_idx >= 0 && counter_buffer_idx >= counterBufferCount)
6031 counter_buffer_idx = -1;
6032
6033 if (counter_buffer_idx >= 0 && pCounterBuffers && pCounterBuffers[counter_buffer_idx]) {
6034 /* The array of counters buffer is optional. */
6035 RADV_FROM_HANDLE(radv_buffer, buffer, pCounterBuffers[counter_buffer_idx]);
6036 uint64_t va = radv_buffer_get_va(buffer->bo);
6037
6038 va += buffer->offset + pCounterBufferOffsets[counter_buffer_idx];
6039
6040 radeon_emit(cs, PKT3(PKT3_STRMOUT_BUFFER_UPDATE, 4, 0));
6041 radeon_emit(cs, STRMOUT_SELECT_BUFFER(i) |
6042 STRMOUT_DATA_TYPE(1) | /* offset in bytes */
6043 STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_NONE) |
6044 STRMOUT_STORE_BUFFER_FILLED_SIZE); /* control */
6045 radeon_emit(cs, va); /* dst address lo */
6046 radeon_emit(cs, va >> 32); /* dst address hi */
6047 radeon_emit(cs, 0); /* unused */
6048 radeon_emit(cs, 0); /* unused */
6049
6050 radv_cs_add_buffer(cmd_buffer->device->ws, cs, buffer->bo);
6051 }
6052
6053 /* Deactivate transform feedback by zeroing the buffer size.
6054 * The counters (primitives generated, primitives emitted) may
6055 * be enabled even if there is not buffer bound. This ensures
6056 * that the primitives-emitted query won't increment.
6057 */
6058 radeon_set_context_reg(cs, R_028AD0_VGT_STRMOUT_BUFFER_SIZE_0 + 16*i, 0);
6059
6060 cmd_buffer->state.context_roll_without_scissor_emitted = true;
6061 }
6062
6063 radv_set_streamout_enable(cmd_buffer, false);
6064 }
6065
6066 static void
6067 gfx10_emit_streamout_end(struct radv_cmd_buffer *cmd_buffer,
6068 uint32_t firstCounterBuffer,
6069 uint32_t counterBufferCount,
6070 const VkBuffer *pCounterBuffers,
6071 const VkDeviceSize *pCounterBufferOffsets)
6072 {
6073 struct radv_streamout_state *so = &cmd_buffer->state.streamout;
6074 struct radeon_cmdbuf *cs = cmd_buffer->cs;
6075 uint32_t i;
6076
6077 assert(cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10);
6078 assert(firstCounterBuffer + counterBufferCount <= MAX_SO_BUFFERS);
6079
6080 for_each_bit(i, so->enabled_mask) {
6081 int32_t counter_buffer_idx = i - firstCounterBuffer;
6082 if (counter_buffer_idx >= 0 && counter_buffer_idx >= counterBufferCount)
6083 counter_buffer_idx = -1;
6084
6085 if (counter_buffer_idx >= 0 && pCounterBuffers && pCounterBuffers[counter_buffer_idx]) {
6086 /* The array of counters buffer is optional. */
6087 RADV_FROM_HANDLE(radv_buffer, buffer, pCounterBuffers[counter_buffer_idx]);
6088 uint64_t va = radv_buffer_get_va(buffer->bo);
6089
6090 va += buffer->offset + pCounterBufferOffsets[counter_buffer_idx];
6091
6092 si_cs_emit_write_event_eop(cs,
6093 cmd_buffer->device->physical_device->rad_info.chip_class,
6094 radv_cmd_buffer_uses_mec(cmd_buffer),
6095 V_028A90_PS_DONE, 0,
6096 EOP_DST_SEL_TC_L2,
6097 EOP_DATA_SEL_GDS,
6098 va, EOP_DATA_GDS(i, 1), 0);
6099
6100 radv_cs_add_buffer(cmd_buffer->device->ws, cs, buffer->bo);
6101 }
6102 }
6103
6104 radv_set_streamout_enable(cmd_buffer, false);
6105 }
6106
6107 void radv_CmdEndTransformFeedbackEXT(
6108 VkCommandBuffer commandBuffer,
6109 uint32_t firstCounterBuffer,
6110 uint32_t counterBufferCount,
6111 const VkBuffer* pCounterBuffers,
6112 const VkDeviceSize* pCounterBufferOffsets)
6113 {
6114 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
6115
6116 if (cmd_buffer->device->physical_device->use_ngg_streamout) {
6117 gfx10_emit_streamout_end(cmd_buffer,
6118 firstCounterBuffer, counterBufferCount,
6119 pCounterBuffers, pCounterBufferOffsets);
6120 } else {
6121 radv_emit_streamout_end(cmd_buffer,
6122 firstCounterBuffer, counterBufferCount,
6123 pCounterBuffers, pCounterBufferOffsets);
6124 }
6125 }
6126
6127 void radv_CmdDrawIndirectByteCountEXT(
6128 VkCommandBuffer commandBuffer,
6129 uint32_t instanceCount,
6130 uint32_t firstInstance,
6131 VkBuffer _counterBuffer,
6132 VkDeviceSize counterBufferOffset,
6133 uint32_t counterOffset,
6134 uint32_t vertexStride)
6135 {
6136 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
6137 RADV_FROM_HANDLE(radv_buffer, counterBuffer, _counterBuffer);
6138 struct radv_draw_info info = {};
6139
6140 info.instance_count = instanceCount;
6141 info.first_instance = firstInstance;
6142 info.strmout_buffer = counterBuffer;
6143 info.strmout_buffer_offset = counterBufferOffset;
6144 info.stride = vertexStride;
6145
6146 radv_draw(cmd_buffer, &info);
6147 }
6148
6149 /* VK_AMD_buffer_marker */
6150 void radv_CmdWriteBufferMarkerAMD(
6151 VkCommandBuffer commandBuffer,
6152 VkPipelineStageFlagBits pipelineStage,
6153 VkBuffer dstBuffer,
6154 VkDeviceSize dstOffset,
6155 uint32_t marker)
6156 {
6157 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
6158 RADV_FROM_HANDLE(radv_buffer, buffer, dstBuffer);
6159 struct radeon_cmdbuf *cs = cmd_buffer->cs;
6160 uint64_t va = radv_buffer_get_va(buffer->bo) + dstOffset;
6161
6162 si_emit_cache_flush(cmd_buffer);
6163
6164 if (!(pipelineStage & ~VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT)) {
6165 radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
6166 radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_IMM) |
6167 COPY_DATA_DST_SEL(COPY_DATA_DST_MEM) |
6168 COPY_DATA_WR_CONFIRM);
6169 radeon_emit(cs, marker);
6170 radeon_emit(cs, 0);
6171 radeon_emit(cs, va);
6172 radeon_emit(cs, va >> 32);
6173 } else {
6174 si_cs_emit_write_event_eop(cs,
6175 cmd_buffer->device->physical_device->rad_info.chip_class,
6176 radv_cmd_buffer_uses_mec(cmd_buffer),
6177 V_028A90_BOTTOM_OF_PIPE_TS, 0,
6178 EOP_DST_SEL_MEM,
6179 EOP_DATA_SEL_VALUE_32BIT,
6180 va, marker,
6181 cmd_buffer->gfx9_eop_bug_va);
6182 }
6183 }