1a7d59bc0539e89d57d4d98bf33c3a1cd93cafdb
[mesa.git] / src / amd / vulkan / radv_cmd_buffer.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 */
27
28 #include "radv_private.h"
29 #include "radv_radeon_winsys.h"
30 #include "radv_shader.h"
31 #include "radv_cs.h"
32 #include "sid.h"
33 #include "vk_format.h"
34 #include "vk_util.h"
35 #include "radv_debug.h"
36 #include "radv_meta.h"
37
38 #include "ac_debug.h"
39
40 enum {
41 RADV_PREFETCH_VBO_DESCRIPTORS = (1 << 0),
42 RADV_PREFETCH_VS = (1 << 1),
43 RADV_PREFETCH_TCS = (1 << 2),
44 RADV_PREFETCH_TES = (1 << 3),
45 RADV_PREFETCH_GS = (1 << 4),
46 RADV_PREFETCH_PS = (1 << 5),
47 RADV_PREFETCH_SHADERS = (RADV_PREFETCH_VS |
48 RADV_PREFETCH_TCS |
49 RADV_PREFETCH_TES |
50 RADV_PREFETCH_GS |
51 RADV_PREFETCH_PS)
52 };
53
54 static void radv_handle_image_transition(struct radv_cmd_buffer *cmd_buffer,
55 struct radv_image *image,
56 VkImageLayout src_layout,
57 bool src_render_loop,
58 VkImageLayout dst_layout,
59 bool dst_render_loop,
60 uint32_t src_family,
61 uint32_t dst_family,
62 const VkImageSubresourceRange *range,
63 struct radv_sample_locations_state *sample_locs);
64
65 const struct radv_dynamic_state default_dynamic_state = {
66 .viewport = {
67 .count = 0,
68 },
69 .scissor = {
70 .count = 0,
71 },
72 .line_width = 1.0f,
73 .depth_bias = {
74 .bias = 0.0f,
75 .clamp = 0.0f,
76 .slope = 0.0f,
77 },
78 .blend_constants = { 0.0f, 0.0f, 0.0f, 0.0f },
79 .depth_bounds = {
80 .min = 0.0f,
81 .max = 1.0f,
82 },
83 .stencil_compare_mask = {
84 .front = ~0u,
85 .back = ~0u,
86 },
87 .stencil_write_mask = {
88 .front = ~0u,
89 .back = ~0u,
90 },
91 .stencil_reference = {
92 .front = 0u,
93 .back = 0u,
94 },
95 };
96
97 static void
98 radv_bind_dynamic_state(struct radv_cmd_buffer *cmd_buffer,
99 const struct radv_dynamic_state *src)
100 {
101 struct radv_dynamic_state *dest = &cmd_buffer->state.dynamic;
102 uint32_t copy_mask = src->mask;
103 uint32_t dest_mask = 0;
104
105 /* Make sure to copy the number of viewports/scissors because they can
106 * only be specified at pipeline creation time.
107 */
108 dest->viewport.count = src->viewport.count;
109 dest->scissor.count = src->scissor.count;
110 dest->discard_rectangle.count = src->discard_rectangle.count;
111 dest->sample_location.count = src->sample_location.count;
112
113 if (copy_mask & RADV_DYNAMIC_VIEWPORT) {
114 if (memcmp(&dest->viewport.viewports, &src->viewport.viewports,
115 src->viewport.count * sizeof(VkViewport))) {
116 typed_memcpy(dest->viewport.viewports,
117 src->viewport.viewports,
118 src->viewport.count);
119 dest_mask |= RADV_DYNAMIC_VIEWPORT;
120 }
121 }
122
123 if (copy_mask & RADV_DYNAMIC_SCISSOR) {
124 if (memcmp(&dest->scissor.scissors, &src->scissor.scissors,
125 src->scissor.count * sizeof(VkRect2D))) {
126 typed_memcpy(dest->scissor.scissors,
127 src->scissor.scissors, src->scissor.count);
128 dest_mask |= RADV_DYNAMIC_SCISSOR;
129 }
130 }
131
132 if (copy_mask & RADV_DYNAMIC_LINE_WIDTH) {
133 if (dest->line_width != src->line_width) {
134 dest->line_width = src->line_width;
135 dest_mask |= RADV_DYNAMIC_LINE_WIDTH;
136 }
137 }
138
139 if (copy_mask & RADV_DYNAMIC_DEPTH_BIAS) {
140 if (memcmp(&dest->depth_bias, &src->depth_bias,
141 sizeof(src->depth_bias))) {
142 dest->depth_bias = src->depth_bias;
143 dest_mask |= RADV_DYNAMIC_DEPTH_BIAS;
144 }
145 }
146
147 if (copy_mask & RADV_DYNAMIC_BLEND_CONSTANTS) {
148 if (memcmp(&dest->blend_constants, &src->blend_constants,
149 sizeof(src->blend_constants))) {
150 typed_memcpy(dest->blend_constants,
151 src->blend_constants, 4);
152 dest_mask |= RADV_DYNAMIC_BLEND_CONSTANTS;
153 }
154 }
155
156 if (copy_mask & RADV_DYNAMIC_DEPTH_BOUNDS) {
157 if (memcmp(&dest->depth_bounds, &src->depth_bounds,
158 sizeof(src->depth_bounds))) {
159 dest->depth_bounds = src->depth_bounds;
160 dest_mask |= RADV_DYNAMIC_DEPTH_BOUNDS;
161 }
162 }
163
164 if (copy_mask & RADV_DYNAMIC_STENCIL_COMPARE_MASK) {
165 if (memcmp(&dest->stencil_compare_mask,
166 &src->stencil_compare_mask,
167 sizeof(src->stencil_compare_mask))) {
168 dest->stencil_compare_mask = src->stencil_compare_mask;
169 dest_mask |= RADV_DYNAMIC_STENCIL_COMPARE_MASK;
170 }
171 }
172
173 if (copy_mask & RADV_DYNAMIC_STENCIL_WRITE_MASK) {
174 if (memcmp(&dest->stencil_write_mask, &src->stencil_write_mask,
175 sizeof(src->stencil_write_mask))) {
176 dest->stencil_write_mask = src->stencil_write_mask;
177 dest_mask |= RADV_DYNAMIC_STENCIL_WRITE_MASK;
178 }
179 }
180
181 if (copy_mask & RADV_DYNAMIC_STENCIL_REFERENCE) {
182 if (memcmp(&dest->stencil_reference, &src->stencil_reference,
183 sizeof(src->stencil_reference))) {
184 dest->stencil_reference = src->stencil_reference;
185 dest_mask |= RADV_DYNAMIC_STENCIL_REFERENCE;
186 }
187 }
188
189 if (copy_mask & RADV_DYNAMIC_DISCARD_RECTANGLE) {
190 if (memcmp(&dest->discard_rectangle.rectangles, &src->discard_rectangle.rectangles,
191 src->discard_rectangle.count * sizeof(VkRect2D))) {
192 typed_memcpy(dest->discard_rectangle.rectangles,
193 src->discard_rectangle.rectangles,
194 src->discard_rectangle.count);
195 dest_mask |= RADV_DYNAMIC_DISCARD_RECTANGLE;
196 }
197 }
198
199 if (copy_mask & RADV_DYNAMIC_SAMPLE_LOCATIONS) {
200 if (dest->sample_location.per_pixel != src->sample_location.per_pixel ||
201 dest->sample_location.grid_size.width != src->sample_location.grid_size.width ||
202 dest->sample_location.grid_size.height != src->sample_location.grid_size.height ||
203 memcmp(&dest->sample_location.locations,
204 &src->sample_location.locations,
205 src->sample_location.count * sizeof(VkSampleLocationEXT))) {
206 dest->sample_location.per_pixel = src->sample_location.per_pixel;
207 dest->sample_location.grid_size = src->sample_location.grid_size;
208 typed_memcpy(dest->sample_location.locations,
209 src->sample_location.locations,
210 src->sample_location.count);
211 dest_mask |= RADV_DYNAMIC_SAMPLE_LOCATIONS;
212 }
213 }
214
215 cmd_buffer->state.dirty |= dest_mask;
216 }
217
218 static void
219 radv_bind_streamout_state(struct radv_cmd_buffer *cmd_buffer,
220 struct radv_pipeline *pipeline)
221 {
222 struct radv_streamout_state *so = &cmd_buffer->state.streamout;
223 struct radv_shader_info *info;
224
225 if (!pipeline->streamout_shader ||
226 cmd_buffer->device->physical_device->use_ngg_streamout)
227 return;
228
229 info = &pipeline->streamout_shader->info;
230 for (int i = 0; i < MAX_SO_BUFFERS; i++)
231 so->stride_in_dw[i] = info->so.strides[i];
232
233 so->enabled_stream_buffers_mask = info->so.enabled_stream_buffers_mask;
234 }
235
236 bool radv_cmd_buffer_uses_mec(struct radv_cmd_buffer *cmd_buffer)
237 {
238 return cmd_buffer->queue_family_index == RADV_QUEUE_COMPUTE &&
239 cmd_buffer->device->physical_device->rad_info.chip_class >= GFX7;
240 }
241
242 enum ring_type radv_queue_family_to_ring(int f) {
243 switch (f) {
244 case RADV_QUEUE_GENERAL:
245 return RING_GFX;
246 case RADV_QUEUE_COMPUTE:
247 return RING_COMPUTE;
248 case RADV_QUEUE_TRANSFER:
249 return RING_DMA;
250 default:
251 unreachable("Unknown queue family");
252 }
253 }
254
255 static VkResult radv_create_cmd_buffer(
256 struct radv_device * device,
257 struct radv_cmd_pool * pool,
258 VkCommandBufferLevel level,
259 VkCommandBuffer* pCommandBuffer)
260 {
261 struct radv_cmd_buffer *cmd_buffer;
262 unsigned ring;
263 cmd_buffer = vk_zalloc(&pool->alloc, sizeof(*cmd_buffer), 8,
264 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
265 if (cmd_buffer == NULL)
266 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
267
268 cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
269 cmd_buffer->device = device;
270 cmd_buffer->pool = pool;
271 cmd_buffer->level = level;
272
273 if (pool) {
274 list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
275 cmd_buffer->queue_family_index = pool->queue_family_index;
276
277 } else {
278 /* Init the pool_link so we can safely call list_del when we destroy
279 * the command buffer
280 */
281 list_inithead(&cmd_buffer->pool_link);
282 cmd_buffer->queue_family_index = RADV_QUEUE_GENERAL;
283 }
284
285 ring = radv_queue_family_to_ring(cmd_buffer->queue_family_index);
286
287 cmd_buffer->cs = device->ws->cs_create(device->ws, ring);
288 if (!cmd_buffer->cs) {
289 vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
290 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
291 }
292
293 *pCommandBuffer = radv_cmd_buffer_to_handle(cmd_buffer);
294
295 list_inithead(&cmd_buffer->upload.list);
296
297 return VK_SUCCESS;
298 }
299
300 static void
301 radv_cmd_buffer_destroy(struct radv_cmd_buffer *cmd_buffer)
302 {
303 list_del(&cmd_buffer->pool_link);
304
305 list_for_each_entry_safe(struct radv_cmd_buffer_upload, up,
306 &cmd_buffer->upload.list, list) {
307 cmd_buffer->device->ws->buffer_destroy(up->upload_bo);
308 list_del(&up->list);
309 free(up);
310 }
311
312 if (cmd_buffer->upload.upload_bo)
313 cmd_buffer->device->ws->buffer_destroy(cmd_buffer->upload.upload_bo);
314 cmd_buffer->device->ws->cs_destroy(cmd_buffer->cs);
315
316 for (unsigned i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; i++)
317 free(cmd_buffer->descriptors[i].push_set.set.mapped_ptr);
318
319 vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
320 }
321
322 static VkResult
323 radv_reset_cmd_buffer(struct radv_cmd_buffer *cmd_buffer)
324 {
325 cmd_buffer->device->ws->cs_reset(cmd_buffer->cs);
326
327 list_for_each_entry_safe(struct radv_cmd_buffer_upload, up,
328 &cmd_buffer->upload.list, list) {
329 cmd_buffer->device->ws->buffer_destroy(up->upload_bo);
330 list_del(&up->list);
331 free(up);
332 }
333
334 cmd_buffer->push_constant_stages = 0;
335 cmd_buffer->scratch_size_per_wave_needed = 0;
336 cmd_buffer->scratch_waves_wanted = 0;
337 cmd_buffer->compute_scratch_size_per_wave_needed = 0;
338 cmd_buffer->compute_scratch_waves_wanted = 0;
339 cmd_buffer->esgs_ring_size_needed = 0;
340 cmd_buffer->gsvs_ring_size_needed = 0;
341 cmd_buffer->tess_rings_needed = false;
342 cmd_buffer->gds_needed = false;
343 cmd_buffer->sample_positions_needed = false;
344
345 if (cmd_buffer->upload.upload_bo)
346 radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs,
347 cmd_buffer->upload.upload_bo);
348 cmd_buffer->upload.offset = 0;
349
350 cmd_buffer->record_result = VK_SUCCESS;
351
352 memset(cmd_buffer->vertex_bindings, 0, sizeof(cmd_buffer->vertex_bindings));
353
354 for (unsigned i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; i++) {
355 cmd_buffer->descriptors[i].dirty = 0;
356 cmd_buffer->descriptors[i].valid = 0;
357 cmd_buffer->descriptors[i].push_dirty = false;
358 }
359
360 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9 &&
361 cmd_buffer->queue_family_index == RADV_QUEUE_GENERAL) {
362 unsigned num_db = cmd_buffer->device->physical_device->rad_info.num_render_backends;
363 unsigned fence_offset, eop_bug_offset;
364 void *fence_ptr;
365
366 radv_cmd_buffer_upload_alloc(cmd_buffer, 8, 8, &fence_offset,
367 &fence_ptr);
368
369 cmd_buffer->gfx9_fence_va =
370 radv_buffer_get_va(cmd_buffer->upload.upload_bo);
371 cmd_buffer->gfx9_fence_va += fence_offset;
372
373 if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX9) {
374 /* Allocate a buffer for the EOP bug on GFX9. */
375 radv_cmd_buffer_upload_alloc(cmd_buffer, 16 * num_db, 8,
376 &eop_bug_offset, &fence_ptr);
377 cmd_buffer->gfx9_eop_bug_va =
378 radv_buffer_get_va(cmd_buffer->upload.upload_bo);
379 cmd_buffer->gfx9_eop_bug_va += eop_bug_offset;
380 }
381 }
382
383 cmd_buffer->status = RADV_CMD_BUFFER_STATUS_INITIAL;
384
385 return cmd_buffer->record_result;
386 }
387
388 static bool
389 radv_cmd_buffer_resize_upload_buf(struct radv_cmd_buffer *cmd_buffer,
390 uint64_t min_needed)
391 {
392 uint64_t new_size;
393 struct radeon_winsys_bo *bo;
394 struct radv_cmd_buffer_upload *upload;
395 struct radv_device *device = cmd_buffer->device;
396
397 new_size = MAX2(min_needed, 16 * 1024);
398 new_size = MAX2(new_size, 2 * cmd_buffer->upload.size);
399
400 bo = device->ws->buffer_create(device->ws,
401 new_size, 4096,
402 RADEON_DOMAIN_GTT,
403 RADEON_FLAG_CPU_ACCESS|
404 RADEON_FLAG_NO_INTERPROCESS_SHARING |
405 RADEON_FLAG_32BIT,
406 RADV_BO_PRIORITY_UPLOAD_BUFFER);
407
408 if (!bo) {
409 cmd_buffer->record_result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
410 return false;
411 }
412
413 radv_cs_add_buffer(device->ws, cmd_buffer->cs, bo);
414 if (cmd_buffer->upload.upload_bo) {
415 upload = malloc(sizeof(*upload));
416
417 if (!upload) {
418 cmd_buffer->record_result = VK_ERROR_OUT_OF_HOST_MEMORY;
419 device->ws->buffer_destroy(bo);
420 return false;
421 }
422
423 memcpy(upload, &cmd_buffer->upload, sizeof(*upload));
424 list_add(&upload->list, &cmd_buffer->upload.list);
425 }
426
427 cmd_buffer->upload.upload_bo = bo;
428 cmd_buffer->upload.size = new_size;
429 cmd_buffer->upload.offset = 0;
430 cmd_buffer->upload.map = device->ws->buffer_map(cmd_buffer->upload.upload_bo);
431
432 if (!cmd_buffer->upload.map) {
433 cmd_buffer->record_result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
434 return false;
435 }
436
437 return true;
438 }
439
440 bool
441 radv_cmd_buffer_upload_alloc(struct radv_cmd_buffer *cmd_buffer,
442 unsigned size,
443 unsigned alignment,
444 unsigned *out_offset,
445 void **ptr)
446 {
447 assert(util_is_power_of_two_nonzero(alignment));
448
449 uint64_t offset = align(cmd_buffer->upload.offset, alignment);
450 if (offset + size > cmd_buffer->upload.size) {
451 if (!radv_cmd_buffer_resize_upload_buf(cmd_buffer, size))
452 return false;
453 offset = 0;
454 }
455
456 *out_offset = offset;
457 *ptr = cmd_buffer->upload.map + offset;
458
459 cmd_buffer->upload.offset = offset + size;
460 return true;
461 }
462
463 bool
464 radv_cmd_buffer_upload_data(struct radv_cmd_buffer *cmd_buffer,
465 unsigned size, unsigned alignment,
466 const void *data, unsigned *out_offset)
467 {
468 uint8_t *ptr;
469
470 if (!radv_cmd_buffer_upload_alloc(cmd_buffer, size, alignment,
471 out_offset, (void **)&ptr))
472 return false;
473
474 if (ptr)
475 memcpy(ptr, data, size);
476
477 return true;
478 }
479
480 static void
481 radv_emit_write_data_packet(struct radv_cmd_buffer *cmd_buffer, uint64_t va,
482 unsigned count, const uint32_t *data)
483 {
484 struct radeon_cmdbuf *cs = cmd_buffer->cs;
485
486 radeon_check_space(cmd_buffer->device->ws, cs, 4 + count);
487
488 radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 2 + count, 0));
489 radeon_emit(cs, S_370_DST_SEL(V_370_MEM) |
490 S_370_WR_CONFIRM(1) |
491 S_370_ENGINE_SEL(V_370_ME));
492 radeon_emit(cs, va);
493 radeon_emit(cs, va >> 32);
494 radeon_emit_array(cs, data, count);
495 }
496
497 void radv_cmd_buffer_trace_emit(struct radv_cmd_buffer *cmd_buffer)
498 {
499 struct radv_device *device = cmd_buffer->device;
500 struct radeon_cmdbuf *cs = cmd_buffer->cs;
501 uint64_t va;
502
503 va = radv_buffer_get_va(device->trace_bo);
504 if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY)
505 va += 4;
506
507 ++cmd_buffer->state.trace_id;
508 radv_emit_write_data_packet(cmd_buffer, va, 1,
509 &cmd_buffer->state.trace_id);
510
511 radeon_check_space(cmd_buffer->device->ws, cs, 2);
512
513 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
514 radeon_emit(cs, AC_ENCODE_TRACE_POINT(cmd_buffer->state.trace_id));
515 }
516
517 static void
518 radv_cmd_buffer_after_draw(struct radv_cmd_buffer *cmd_buffer,
519 enum radv_cmd_flush_bits flags)
520 {
521 if (cmd_buffer->device->instance->debug_flags & RADV_DEBUG_SYNC_SHADERS) {
522 assert(flags & (RADV_CMD_FLAG_PS_PARTIAL_FLUSH |
523 RADV_CMD_FLAG_CS_PARTIAL_FLUSH));
524
525 radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 4);
526
527 /* Force wait for graphics or compute engines to be idle. */
528 si_cs_emit_cache_flush(cmd_buffer->cs,
529 cmd_buffer->device->physical_device->rad_info.chip_class,
530 &cmd_buffer->gfx9_fence_idx,
531 cmd_buffer->gfx9_fence_va,
532 radv_cmd_buffer_uses_mec(cmd_buffer),
533 flags, cmd_buffer->gfx9_eop_bug_va);
534 }
535
536 if (unlikely(cmd_buffer->device->trace_bo))
537 radv_cmd_buffer_trace_emit(cmd_buffer);
538 }
539
540 static void
541 radv_save_pipeline(struct radv_cmd_buffer *cmd_buffer,
542 struct radv_pipeline *pipeline, enum ring_type ring)
543 {
544 struct radv_device *device = cmd_buffer->device;
545 uint32_t data[2];
546 uint64_t va;
547
548 va = radv_buffer_get_va(device->trace_bo);
549
550 switch (ring) {
551 case RING_GFX:
552 va += 8;
553 break;
554 case RING_COMPUTE:
555 va += 16;
556 break;
557 default:
558 assert(!"invalid ring type");
559 }
560
561 uint64_t pipeline_address = (uintptr_t)pipeline;
562 data[0] = pipeline_address;
563 data[1] = pipeline_address >> 32;
564
565 radv_emit_write_data_packet(cmd_buffer, va, 2, data);
566 }
567
568 void radv_set_descriptor_set(struct radv_cmd_buffer *cmd_buffer,
569 VkPipelineBindPoint bind_point,
570 struct radv_descriptor_set *set,
571 unsigned idx)
572 {
573 struct radv_descriptor_state *descriptors_state =
574 radv_get_descriptors_state(cmd_buffer, bind_point);
575
576 descriptors_state->sets[idx] = set;
577
578 descriptors_state->valid |= (1u << idx); /* active descriptors */
579 descriptors_state->dirty |= (1u << idx);
580 }
581
582 static void
583 radv_save_descriptors(struct radv_cmd_buffer *cmd_buffer,
584 VkPipelineBindPoint bind_point)
585 {
586 struct radv_descriptor_state *descriptors_state =
587 radv_get_descriptors_state(cmd_buffer, bind_point);
588 struct radv_device *device = cmd_buffer->device;
589 uint32_t data[MAX_SETS * 2] = {};
590 uint64_t va;
591 unsigned i;
592 va = radv_buffer_get_va(device->trace_bo) + 24;
593
594 for_each_bit(i, descriptors_state->valid) {
595 struct radv_descriptor_set *set = descriptors_state->sets[i];
596 data[i * 2] = (uint64_t)(uintptr_t)set;
597 data[i * 2 + 1] = (uint64_t)(uintptr_t)set >> 32;
598 }
599
600 radv_emit_write_data_packet(cmd_buffer, va, MAX_SETS * 2, data);
601 }
602
603 struct radv_userdata_info *
604 radv_lookup_user_sgpr(struct radv_pipeline *pipeline,
605 gl_shader_stage stage,
606 int idx)
607 {
608 struct radv_shader_variant *shader = radv_get_shader(pipeline, stage);
609 return &shader->info.user_sgprs_locs.shader_data[idx];
610 }
611
612 static void
613 radv_emit_userdata_address(struct radv_cmd_buffer *cmd_buffer,
614 struct radv_pipeline *pipeline,
615 gl_shader_stage stage,
616 int idx, uint64_t va)
617 {
618 struct radv_userdata_info *loc = radv_lookup_user_sgpr(pipeline, stage, idx);
619 uint32_t base_reg = pipeline->user_data_0[stage];
620 if (loc->sgpr_idx == -1)
621 return;
622
623 assert(loc->num_sgprs == 1);
624
625 radv_emit_shader_pointer(cmd_buffer->device, cmd_buffer->cs,
626 base_reg + loc->sgpr_idx * 4, va, false);
627 }
628
629 static void
630 radv_emit_descriptor_pointers(struct radv_cmd_buffer *cmd_buffer,
631 struct radv_pipeline *pipeline,
632 struct radv_descriptor_state *descriptors_state,
633 gl_shader_stage stage)
634 {
635 struct radv_device *device = cmd_buffer->device;
636 struct radeon_cmdbuf *cs = cmd_buffer->cs;
637 uint32_t sh_base = pipeline->user_data_0[stage];
638 struct radv_userdata_locations *locs =
639 &pipeline->shaders[stage]->info.user_sgprs_locs;
640 unsigned mask = locs->descriptor_sets_enabled;
641
642 mask &= descriptors_state->dirty & descriptors_state->valid;
643
644 while (mask) {
645 int start, count;
646
647 u_bit_scan_consecutive_range(&mask, &start, &count);
648
649 struct radv_userdata_info *loc = &locs->descriptor_sets[start];
650 unsigned sh_offset = sh_base + loc->sgpr_idx * 4;
651
652 radv_emit_shader_pointer_head(cs, sh_offset, count, true);
653 for (int i = 0; i < count; i++) {
654 struct radv_descriptor_set *set =
655 descriptors_state->sets[start + i];
656
657 radv_emit_shader_pointer_body(device, cs, set->va, true);
658 }
659 }
660 }
661
662 /**
663 * Convert the user sample locations to hardware sample locations (the values
664 * that will be emitted by PA_SC_AA_SAMPLE_LOCS_PIXEL_*).
665 */
666 static void
667 radv_convert_user_sample_locs(struct radv_sample_locations_state *state,
668 uint32_t x, uint32_t y, VkOffset2D *sample_locs)
669 {
670 uint32_t x_offset = x % state->grid_size.width;
671 uint32_t y_offset = y % state->grid_size.height;
672 uint32_t num_samples = (uint32_t)state->per_pixel;
673 VkSampleLocationEXT *user_locs;
674 uint32_t pixel_offset;
675
676 pixel_offset = (x_offset + y_offset * state->grid_size.width) * num_samples;
677
678 assert(pixel_offset <= MAX_SAMPLE_LOCATIONS);
679 user_locs = &state->locations[pixel_offset];
680
681 for (uint32_t i = 0; i < num_samples; i++) {
682 float shifted_pos_x = user_locs[i].x - 0.5;
683 float shifted_pos_y = user_locs[i].y - 0.5;
684
685 int32_t scaled_pos_x = floor(shifted_pos_x * 16);
686 int32_t scaled_pos_y = floor(shifted_pos_y * 16);
687
688 sample_locs[i].x = CLAMP(scaled_pos_x, -8, 7);
689 sample_locs[i].y = CLAMP(scaled_pos_y, -8, 7);
690 }
691 }
692
693 /**
694 * Compute the PA_SC_AA_SAMPLE_LOCS_PIXEL_* mask based on hardware sample
695 * locations.
696 */
697 static void
698 radv_compute_sample_locs_pixel(uint32_t num_samples, VkOffset2D *sample_locs,
699 uint32_t *sample_locs_pixel)
700 {
701 for (uint32_t i = 0; i < num_samples; i++) {
702 uint32_t sample_reg_idx = i / 4;
703 uint32_t sample_loc_idx = i % 4;
704 int32_t pos_x = sample_locs[i].x;
705 int32_t pos_y = sample_locs[i].y;
706
707 uint32_t shift_x = 8 * sample_loc_idx;
708 uint32_t shift_y = shift_x + 4;
709
710 sample_locs_pixel[sample_reg_idx] |= (pos_x & 0xf) << shift_x;
711 sample_locs_pixel[sample_reg_idx] |= (pos_y & 0xf) << shift_y;
712 }
713 }
714
715 /**
716 * Compute the PA_SC_CENTROID_PRIORITY_* mask based on the top left hardware
717 * sample locations.
718 */
719 static uint64_t
720 radv_compute_centroid_priority(struct radv_cmd_buffer *cmd_buffer,
721 VkOffset2D *sample_locs,
722 uint32_t num_samples)
723 {
724 uint32_t centroid_priorities[num_samples];
725 uint32_t sample_mask = num_samples - 1;
726 uint32_t distances[num_samples];
727 uint64_t centroid_priority = 0;
728
729 /* Compute the distances from center for each sample. */
730 for (int i = 0; i < num_samples; i++) {
731 distances[i] = (sample_locs[i].x * sample_locs[i].x) +
732 (sample_locs[i].y * sample_locs[i].y);
733 }
734
735 /* Compute the centroid priorities by looking at the distances array. */
736 for (int i = 0; i < num_samples; i++) {
737 uint32_t min_idx = 0;
738
739 for (int j = 1; j < num_samples; j++) {
740 if (distances[j] < distances[min_idx])
741 min_idx = j;
742 }
743
744 centroid_priorities[i] = min_idx;
745 distances[min_idx] = 0xffffffff;
746 }
747
748 /* Compute the final centroid priority. */
749 for (int i = 0; i < 8; i++) {
750 centroid_priority |=
751 centroid_priorities[i & sample_mask] << (i * 4);
752 }
753
754 return centroid_priority << 32 | centroid_priority;
755 }
756
757 /**
758 * Emit the sample locations that are specified with VK_EXT_sample_locations.
759 */
760 static void
761 radv_emit_sample_locations(struct radv_cmd_buffer *cmd_buffer)
762 {
763 struct radv_pipeline *pipeline = cmd_buffer->state.pipeline;
764 struct radv_multisample_state *ms = &pipeline->graphics.ms;
765 struct radv_sample_locations_state *sample_location =
766 &cmd_buffer->state.dynamic.sample_location;
767 uint32_t num_samples = (uint32_t)sample_location->per_pixel;
768 struct radeon_cmdbuf *cs = cmd_buffer->cs;
769 uint32_t sample_locs_pixel[4][2] = {};
770 VkOffset2D sample_locs[4][8]; /* 8 is the max. sample count supported */
771 uint32_t max_sample_dist = 0;
772 uint64_t centroid_priority;
773
774 if (!cmd_buffer->state.dynamic.sample_location.count)
775 return;
776
777 /* Convert the user sample locations to hardware sample locations. */
778 radv_convert_user_sample_locs(sample_location, 0, 0, sample_locs[0]);
779 radv_convert_user_sample_locs(sample_location, 1, 0, sample_locs[1]);
780 radv_convert_user_sample_locs(sample_location, 0, 1, sample_locs[2]);
781 radv_convert_user_sample_locs(sample_location, 1, 1, sample_locs[3]);
782
783 /* Compute the PA_SC_AA_SAMPLE_LOCS_PIXEL_* mask. */
784 for (uint32_t i = 0; i < 4; i++) {
785 radv_compute_sample_locs_pixel(num_samples, sample_locs[i],
786 sample_locs_pixel[i]);
787 }
788
789 /* Compute the PA_SC_CENTROID_PRIORITY_* mask. */
790 centroid_priority =
791 radv_compute_centroid_priority(cmd_buffer, sample_locs[0],
792 num_samples);
793
794 /* Compute the maximum sample distance from the specified locations. */
795 for (uint32_t i = 0; i < num_samples; i++) {
796 VkOffset2D offset = sample_locs[0][i];
797 max_sample_dist = MAX2(max_sample_dist,
798 MAX2(abs(offset.x), abs(offset.y)));
799 }
800
801 /* Emit the specified user sample locations. */
802 switch (num_samples) {
803 case 2:
804 case 4:
805 radeon_set_context_reg(cs, R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0, sample_locs_pixel[0][0]);
806 radeon_set_context_reg(cs, R_028C08_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0, sample_locs_pixel[1][0]);
807 radeon_set_context_reg(cs, R_028C18_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0, sample_locs_pixel[2][0]);
808 radeon_set_context_reg(cs, R_028C28_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0, sample_locs_pixel[3][0]);
809 break;
810 case 8:
811 radeon_set_context_reg(cs, R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0, sample_locs_pixel[0][0]);
812 radeon_set_context_reg(cs, R_028C08_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0, sample_locs_pixel[1][0]);
813 radeon_set_context_reg(cs, R_028C18_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0, sample_locs_pixel[2][0]);
814 radeon_set_context_reg(cs, R_028C28_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0, sample_locs_pixel[3][0]);
815 radeon_set_context_reg(cs, R_028BFC_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1, sample_locs_pixel[0][1]);
816 radeon_set_context_reg(cs, R_028C0C_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1, sample_locs_pixel[1][1]);
817 radeon_set_context_reg(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1, sample_locs_pixel[2][1]);
818 radeon_set_context_reg(cs, R_028C2C_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1, sample_locs_pixel[3][1]);
819 break;
820 default:
821 unreachable("invalid number of samples");
822 }
823
824 /* Emit the maximum sample distance and the centroid priority. */
825 uint32_t pa_sc_aa_config = ms->pa_sc_aa_config;
826
827 pa_sc_aa_config &= C_028BE0_MAX_SAMPLE_DIST;
828 pa_sc_aa_config |= S_028BE0_MAX_SAMPLE_DIST(max_sample_dist);
829
830 radeon_set_context_reg_seq(cs, R_028BE0_PA_SC_AA_CONFIG, 1);
831 radeon_emit(cs, pa_sc_aa_config);
832
833 radeon_set_context_reg_seq(cs, R_028BD4_PA_SC_CENTROID_PRIORITY_0, 2);
834 radeon_emit(cs, centroid_priority);
835 radeon_emit(cs, centroid_priority >> 32);
836
837 /* GFX9: Flush DFSM when the AA mode changes. */
838 if (cmd_buffer->device->dfsm_allowed) {
839 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
840 radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_DFSM) | EVENT_INDEX(0));
841 }
842
843 cmd_buffer->state.context_roll_without_scissor_emitted = true;
844 }
845
846 static void
847 radv_emit_inline_push_consts(struct radv_cmd_buffer *cmd_buffer,
848 struct radv_pipeline *pipeline,
849 gl_shader_stage stage,
850 int idx, int count, uint32_t *values)
851 {
852 struct radv_userdata_info *loc = radv_lookup_user_sgpr(pipeline, stage, idx);
853 uint32_t base_reg = pipeline->user_data_0[stage];
854 if (loc->sgpr_idx == -1)
855 return;
856
857 assert(loc->num_sgprs == count);
858
859 radeon_set_sh_reg_seq(cmd_buffer->cs, base_reg + loc->sgpr_idx * 4, count);
860 radeon_emit_array(cmd_buffer->cs, values, count);
861 }
862
863 static void
864 radv_update_multisample_state(struct radv_cmd_buffer *cmd_buffer,
865 struct radv_pipeline *pipeline)
866 {
867 int num_samples = pipeline->graphics.ms.num_samples;
868 struct radv_pipeline *old_pipeline = cmd_buffer->state.emitted_pipeline;
869
870 if (pipeline->shaders[MESA_SHADER_FRAGMENT]->info.ps.needs_sample_positions)
871 cmd_buffer->sample_positions_needed = true;
872
873 if (old_pipeline && num_samples == old_pipeline->graphics.ms.num_samples)
874 return;
875
876 radv_emit_default_sample_locations(cmd_buffer->cs, num_samples);
877
878 cmd_buffer->state.context_roll_without_scissor_emitted = true;
879 }
880
881 static void
882 radv_update_binning_state(struct radv_cmd_buffer *cmd_buffer,
883 struct radv_pipeline *pipeline)
884 {
885 const struct radv_pipeline *old_pipeline = cmd_buffer->state.emitted_pipeline;
886
887
888 if (pipeline->device->physical_device->rad_info.chip_class < GFX9)
889 return;
890
891 if (old_pipeline &&
892 old_pipeline->graphics.binning.pa_sc_binner_cntl_0 == pipeline->graphics.binning.pa_sc_binner_cntl_0 &&
893 old_pipeline->graphics.binning.db_dfsm_control == pipeline->graphics.binning.db_dfsm_control)
894 return;
895
896 bool binning_flush = false;
897 if (cmd_buffer->device->physical_device->rad_info.family == CHIP_VEGA12 ||
898 cmd_buffer->device->physical_device->rad_info.family == CHIP_VEGA20 ||
899 cmd_buffer->device->physical_device->rad_info.family == CHIP_RAVEN2 ||
900 cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10) {
901 binning_flush = !old_pipeline ||
902 G_028C44_BINNING_MODE(old_pipeline->graphics.binning.pa_sc_binner_cntl_0) !=
903 G_028C44_BINNING_MODE(pipeline->graphics.binning.pa_sc_binner_cntl_0);
904 }
905
906 radeon_set_context_reg(cmd_buffer->cs, R_028C44_PA_SC_BINNER_CNTL_0,
907 pipeline->graphics.binning.pa_sc_binner_cntl_0 |
908 S_028C44_FLUSH_ON_BINNING_TRANSITION(!!binning_flush));
909
910 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10) {
911 radeon_set_context_reg(cmd_buffer->cs, R_028038_DB_DFSM_CONTROL,
912 pipeline->graphics.binning.db_dfsm_control);
913 } else {
914 radeon_set_context_reg(cmd_buffer->cs, R_028060_DB_DFSM_CONTROL,
915 pipeline->graphics.binning.db_dfsm_control);
916 }
917
918 cmd_buffer->state.context_roll_without_scissor_emitted = true;
919 }
920
921
922 static void
923 radv_emit_shader_prefetch(struct radv_cmd_buffer *cmd_buffer,
924 struct radv_shader_variant *shader)
925 {
926 uint64_t va;
927
928 if (!shader)
929 return;
930
931 va = radv_buffer_get_va(shader->bo) + shader->bo_offset;
932
933 si_cp_dma_prefetch(cmd_buffer, va, shader->code_size);
934 }
935
936 static void
937 radv_emit_prefetch_L2(struct radv_cmd_buffer *cmd_buffer,
938 struct radv_pipeline *pipeline,
939 bool vertex_stage_only)
940 {
941 struct radv_cmd_state *state = &cmd_buffer->state;
942 uint32_t mask = state->prefetch_L2_mask;
943
944 if (vertex_stage_only) {
945 /* Fast prefetch path for starting draws as soon as possible.
946 */
947 mask = state->prefetch_L2_mask & (RADV_PREFETCH_VS |
948 RADV_PREFETCH_VBO_DESCRIPTORS);
949 }
950
951 if (mask & RADV_PREFETCH_VS)
952 radv_emit_shader_prefetch(cmd_buffer,
953 pipeline->shaders[MESA_SHADER_VERTEX]);
954
955 if (mask & RADV_PREFETCH_VBO_DESCRIPTORS)
956 si_cp_dma_prefetch(cmd_buffer, state->vb_va, state->vb_size);
957
958 if (mask & RADV_PREFETCH_TCS)
959 radv_emit_shader_prefetch(cmd_buffer,
960 pipeline->shaders[MESA_SHADER_TESS_CTRL]);
961
962 if (mask & RADV_PREFETCH_TES)
963 radv_emit_shader_prefetch(cmd_buffer,
964 pipeline->shaders[MESA_SHADER_TESS_EVAL]);
965
966 if (mask & RADV_PREFETCH_GS) {
967 radv_emit_shader_prefetch(cmd_buffer,
968 pipeline->shaders[MESA_SHADER_GEOMETRY]);
969 if (radv_pipeline_has_gs_copy_shader(pipeline))
970 radv_emit_shader_prefetch(cmd_buffer, pipeline->gs_copy_shader);
971 }
972
973 if (mask & RADV_PREFETCH_PS)
974 radv_emit_shader_prefetch(cmd_buffer,
975 pipeline->shaders[MESA_SHADER_FRAGMENT]);
976
977 state->prefetch_L2_mask &= ~mask;
978 }
979
980 static void
981 radv_emit_rbplus_state(struct radv_cmd_buffer *cmd_buffer)
982 {
983 if (!cmd_buffer->device->physical_device->rad_info.rbplus_allowed)
984 return;
985
986 struct radv_pipeline *pipeline = cmd_buffer->state.pipeline;
987 const struct radv_subpass *subpass = cmd_buffer->state.subpass;
988
989 unsigned sx_ps_downconvert = 0;
990 unsigned sx_blend_opt_epsilon = 0;
991 unsigned sx_blend_opt_control = 0;
992
993 if (!cmd_buffer->state.attachments || !subpass)
994 return;
995
996 for (unsigned i = 0; i < subpass->color_count; ++i) {
997 if (subpass->color_attachments[i].attachment == VK_ATTACHMENT_UNUSED) {
998 sx_blend_opt_control |= S_02875C_MRT0_COLOR_OPT_DISABLE(1) << (i * 4);
999 sx_blend_opt_control |= S_02875C_MRT0_ALPHA_OPT_DISABLE(1) << (i * 4);
1000 continue;
1001 }
1002
1003 int idx = subpass->color_attachments[i].attachment;
1004 struct radv_color_buffer_info *cb = &cmd_buffer->state.attachments[idx].cb;
1005
1006 unsigned format = G_028C70_FORMAT(cb->cb_color_info);
1007 unsigned swap = G_028C70_COMP_SWAP(cb->cb_color_info);
1008 uint32_t spi_format = (pipeline->graphics.col_format >> (i * 4)) & 0xf;
1009 uint32_t colormask = (pipeline->graphics.cb_target_mask >> (i * 4)) & 0xf;
1010
1011 bool has_alpha, has_rgb;
1012
1013 /* Set if RGB and A are present. */
1014 has_alpha = !G_028C74_FORCE_DST_ALPHA_1(cb->cb_color_attrib);
1015
1016 if (format == V_028C70_COLOR_8 ||
1017 format == V_028C70_COLOR_16 ||
1018 format == V_028C70_COLOR_32)
1019 has_rgb = !has_alpha;
1020 else
1021 has_rgb = true;
1022
1023 /* Check the colormask and export format. */
1024 if (!(colormask & 0x7))
1025 has_rgb = false;
1026 if (!(colormask & 0x8))
1027 has_alpha = false;
1028
1029 if (spi_format == V_028714_SPI_SHADER_ZERO) {
1030 has_rgb = false;
1031 has_alpha = false;
1032 }
1033
1034 /* Disable value checking for disabled channels. */
1035 if (!has_rgb)
1036 sx_blend_opt_control |= S_02875C_MRT0_COLOR_OPT_DISABLE(1) << (i * 4);
1037 if (!has_alpha)
1038 sx_blend_opt_control |= S_02875C_MRT0_ALPHA_OPT_DISABLE(1) << (i * 4);
1039
1040 /* Enable down-conversion for 32bpp and smaller formats. */
1041 switch (format) {
1042 case V_028C70_COLOR_8:
1043 case V_028C70_COLOR_8_8:
1044 case V_028C70_COLOR_8_8_8_8:
1045 /* For 1 and 2-channel formats, use the superset thereof. */
1046 if (spi_format == V_028714_SPI_SHADER_FP16_ABGR ||
1047 spi_format == V_028714_SPI_SHADER_UINT16_ABGR ||
1048 spi_format == V_028714_SPI_SHADER_SINT16_ABGR) {
1049 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_8_8_8_8 << (i * 4);
1050 sx_blend_opt_epsilon |= V_028758_8BIT_FORMAT << (i * 4);
1051 }
1052 break;
1053
1054 case V_028C70_COLOR_5_6_5:
1055 if (spi_format == V_028714_SPI_SHADER_FP16_ABGR) {
1056 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_5_6_5 << (i * 4);
1057 sx_blend_opt_epsilon |= V_028758_6BIT_FORMAT << (i * 4);
1058 }
1059 break;
1060
1061 case V_028C70_COLOR_1_5_5_5:
1062 if (spi_format == V_028714_SPI_SHADER_FP16_ABGR) {
1063 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_1_5_5_5 << (i * 4);
1064 sx_blend_opt_epsilon |= V_028758_5BIT_FORMAT << (i * 4);
1065 }
1066 break;
1067
1068 case V_028C70_COLOR_4_4_4_4:
1069 if (spi_format == V_028714_SPI_SHADER_FP16_ABGR) {
1070 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_4_4_4_4 << (i * 4);
1071 sx_blend_opt_epsilon |= V_028758_4BIT_FORMAT << (i * 4);
1072 }
1073 break;
1074
1075 case V_028C70_COLOR_32:
1076 if (swap == V_028C70_SWAP_STD &&
1077 spi_format == V_028714_SPI_SHADER_32_R)
1078 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_32_R << (i * 4);
1079 else if (swap == V_028C70_SWAP_ALT_REV &&
1080 spi_format == V_028714_SPI_SHADER_32_AR)
1081 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_32_A << (i * 4);
1082 break;
1083
1084 case V_028C70_COLOR_16:
1085 case V_028C70_COLOR_16_16:
1086 /* For 1-channel formats, use the superset thereof. */
1087 if (spi_format == V_028714_SPI_SHADER_UNORM16_ABGR ||
1088 spi_format == V_028714_SPI_SHADER_SNORM16_ABGR ||
1089 spi_format == V_028714_SPI_SHADER_UINT16_ABGR ||
1090 spi_format == V_028714_SPI_SHADER_SINT16_ABGR) {
1091 if (swap == V_028C70_SWAP_STD ||
1092 swap == V_028C70_SWAP_STD_REV)
1093 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_16_16_GR << (i * 4);
1094 else
1095 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_16_16_AR << (i * 4);
1096 }
1097 break;
1098
1099 case V_028C70_COLOR_10_11_11:
1100 if (spi_format == V_028714_SPI_SHADER_FP16_ABGR) {
1101 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_10_11_11 << (i * 4);
1102 sx_blend_opt_epsilon |= V_028758_11BIT_FORMAT << (i * 4);
1103 }
1104 break;
1105
1106 case V_028C70_COLOR_2_10_10_10:
1107 if (spi_format == V_028714_SPI_SHADER_FP16_ABGR) {
1108 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_2_10_10_10 << (i * 4);
1109 sx_blend_opt_epsilon |= V_028758_10BIT_FORMAT << (i * 4);
1110 }
1111 break;
1112 }
1113 }
1114
1115 for (unsigned i = subpass->color_count; i < 8; ++i) {
1116 sx_blend_opt_control |= S_02875C_MRT0_COLOR_OPT_DISABLE(1) << (i * 4);
1117 sx_blend_opt_control |= S_02875C_MRT0_ALPHA_OPT_DISABLE(1) << (i * 4);
1118 }
1119 /* TODO: avoid redundantly setting context registers */
1120 radeon_set_context_reg_seq(cmd_buffer->cs, R_028754_SX_PS_DOWNCONVERT, 3);
1121 radeon_emit(cmd_buffer->cs, sx_ps_downconvert);
1122 radeon_emit(cmd_buffer->cs, sx_blend_opt_epsilon);
1123 radeon_emit(cmd_buffer->cs, sx_blend_opt_control);
1124
1125 cmd_buffer->state.context_roll_without_scissor_emitted = true;
1126 }
1127
1128 static void
1129 radv_emit_graphics_pipeline(struct radv_cmd_buffer *cmd_buffer)
1130 {
1131 struct radv_pipeline *pipeline = cmd_buffer->state.pipeline;
1132
1133 if (!pipeline || cmd_buffer->state.emitted_pipeline == pipeline)
1134 return;
1135
1136 radv_update_multisample_state(cmd_buffer, pipeline);
1137 radv_update_binning_state(cmd_buffer, pipeline);
1138
1139 cmd_buffer->scratch_size_per_wave_needed = MAX2(cmd_buffer->scratch_size_per_wave_needed,
1140 pipeline->scratch_bytes_per_wave);
1141 cmd_buffer->scratch_waves_wanted = MAX2(cmd_buffer->scratch_waves_wanted,
1142 pipeline->max_waves);
1143
1144 if (!cmd_buffer->state.emitted_pipeline ||
1145 cmd_buffer->state.emitted_pipeline->graphics.can_use_guardband !=
1146 pipeline->graphics.can_use_guardband)
1147 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_SCISSOR;
1148
1149 radeon_emit_array(cmd_buffer->cs, pipeline->cs.buf, pipeline->cs.cdw);
1150
1151 if (!cmd_buffer->state.emitted_pipeline ||
1152 cmd_buffer->state.emitted_pipeline->ctx_cs.cdw != pipeline->ctx_cs.cdw ||
1153 cmd_buffer->state.emitted_pipeline->ctx_cs_hash != pipeline->ctx_cs_hash ||
1154 memcmp(cmd_buffer->state.emitted_pipeline->ctx_cs.buf,
1155 pipeline->ctx_cs.buf, pipeline->ctx_cs.cdw * 4)) {
1156 radeon_emit_array(cmd_buffer->cs, pipeline->ctx_cs.buf, pipeline->ctx_cs.cdw);
1157 cmd_buffer->state.context_roll_without_scissor_emitted = true;
1158 }
1159
1160 for (unsigned i = 0; i < MESA_SHADER_COMPUTE; i++) {
1161 if (!pipeline->shaders[i])
1162 continue;
1163
1164 radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs,
1165 pipeline->shaders[i]->bo);
1166 }
1167
1168 if (radv_pipeline_has_gs_copy_shader(pipeline))
1169 radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs,
1170 pipeline->gs_copy_shader->bo);
1171
1172 if (unlikely(cmd_buffer->device->trace_bo))
1173 radv_save_pipeline(cmd_buffer, pipeline, RING_GFX);
1174
1175 cmd_buffer->state.emitted_pipeline = pipeline;
1176
1177 cmd_buffer->state.dirty &= ~RADV_CMD_DIRTY_PIPELINE;
1178 }
1179
1180 static void
1181 radv_emit_viewport(struct radv_cmd_buffer *cmd_buffer)
1182 {
1183 si_write_viewport(cmd_buffer->cs, 0, cmd_buffer->state.dynamic.viewport.count,
1184 cmd_buffer->state.dynamic.viewport.viewports);
1185 }
1186
1187 static void
1188 radv_emit_scissor(struct radv_cmd_buffer *cmd_buffer)
1189 {
1190 uint32_t count = cmd_buffer->state.dynamic.scissor.count;
1191
1192 si_write_scissors(cmd_buffer->cs, 0, count,
1193 cmd_buffer->state.dynamic.scissor.scissors,
1194 cmd_buffer->state.dynamic.viewport.viewports,
1195 cmd_buffer->state.emitted_pipeline->graphics.can_use_guardband);
1196
1197 cmd_buffer->state.context_roll_without_scissor_emitted = false;
1198 }
1199
1200 static void
1201 radv_emit_discard_rectangle(struct radv_cmd_buffer *cmd_buffer)
1202 {
1203 if (!cmd_buffer->state.dynamic.discard_rectangle.count)
1204 return;
1205
1206 radeon_set_context_reg_seq(cmd_buffer->cs, R_028210_PA_SC_CLIPRECT_0_TL,
1207 cmd_buffer->state.dynamic.discard_rectangle.count * 2);
1208 for (unsigned i = 0; i < cmd_buffer->state.dynamic.discard_rectangle.count; ++i) {
1209 VkRect2D rect = cmd_buffer->state.dynamic.discard_rectangle.rectangles[i];
1210 radeon_emit(cmd_buffer->cs, S_028210_TL_X(rect.offset.x) | S_028210_TL_Y(rect.offset.y));
1211 radeon_emit(cmd_buffer->cs, S_028214_BR_X(rect.offset.x + rect.extent.width) |
1212 S_028214_BR_Y(rect.offset.y + rect.extent.height));
1213 }
1214 }
1215
1216 static void
1217 radv_emit_line_width(struct radv_cmd_buffer *cmd_buffer)
1218 {
1219 unsigned width = cmd_buffer->state.dynamic.line_width * 8;
1220
1221 radeon_set_context_reg(cmd_buffer->cs, R_028A08_PA_SU_LINE_CNTL,
1222 S_028A08_WIDTH(CLAMP(width, 0, 0xFFF)));
1223 }
1224
1225 static void
1226 radv_emit_blend_constants(struct radv_cmd_buffer *cmd_buffer)
1227 {
1228 struct radv_dynamic_state *d = &cmd_buffer->state.dynamic;
1229
1230 radeon_set_context_reg_seq(cmd_buffer->cs, R_028414_CB_BLEND_RED, 4);
1231 radeon_emit_array(cmd_buffer->cs, (uint32_t *)d->blend_constants, 4);
1232 }
1233
1234 static void
1235 radv_emit_stencil(struct radv_cmd_buffer *cmd_buffer)
1236 {
1237 struct radv_dynamic_state *d = &cmd_buffer->state.dynamic;
1238
1239 radeon_set_context_reg_seq(cmd_buffer->cs,
1240 R_028430_DB_STENCILREFMASK, 2);
1241 radeon_emit(cmd_buffer->cs,
1242 S_028430_STENCILTESTVAL(d->stencil_reference.front) |
1243 S_028430_STENCILMASK(d->stencil_compare_mask.front) |
1244 S_028430_STENCILWRITEMASK(d->stencil_write_mask.front) |
1245 S_028430_STENCILOPVAL(1));
1246 radeon_emit(cmd_buffer->cs,
1247 S_028434_STENCILTESTVAL_BF(d->stencil_reference.back) |
1248 S_028434_STENCILMASK_BF(d->stencil_compare_mask.back) |
1249 S_028434_STENCILWRITEMASK_BF(d->stencil_write_mask.back) |
1250 S_028434_STENCILOPVAL_BF(1));
1251 }
1252
1253 static void
1254 radv_emit_depth_bounds(struct radv_cmd_buffer *cmd_buffer)
1255 {
1256 struct radv_dynamic_state *d = &cmd_buffer->state.dynamic;
1257
1258 radeon_set_context_reg(cmd_buffer->cs, R_028020_DB_DEPTH_BOUNDS_MIN,
1259 fui(d->depth_bounds.min));
1260 radeon_set_context_reg(cmd_buffer->cs, R_028024_DB_DEPTH_BOUNDS_MAX,
1261 fui(d->depth_bounds.max));
1262 }
1263
1264 static void
1265 radv_emit_depth_bias(struct radv_cmd_buffer *cmd_buffer)
1266 {
1267 struct radv_dynamic_state *d = &cmd_buffer->state.dynamic;
1268 unsigned slope = fui(d->depth_bias.slope * 16.0f);
1269 unsigned bias = fui(d->depth_bias.bias * cmd_buffer->state.offset_scale);
1270
1271
1272 radeon_set_context_reg_seq(cmd_buffer->cs,
1273 R_028B7C_PA_SU_POLY_OFFSET_CLAMP, 5);
1274 radeon_emit(cmd_buffer->cs, fui(d->depth_bias.clamp)); /* CLAMP */
1275 radeon_emit(cmd_buffer->cs, slope); /* FRONT SCALE */
1276 radeon_emit(cmd_buffer->cs, bias); /* FRONT OFFSET */
1277 radeon_emit(cmd_buffer->cs, slope); /* BACK SCALE */
1278 radeon_emit(cmd_buffer->cs, bias); /* BACK OFFSET */
1279 }
1280
1281 static void
1282 radv_emit_fb_color_state(struct radv_cmd_buffer *cmd_buffer,
1283 int index,
1284 struct radv_color_buffer_info *cb,
1285 struct radv_image_view *iview,
1286 VkImageLayout layout,
1287 bool in_render_loop)
1288 {
1289 bool is_vi = cmd_buffer->device->physical_device->rad_info.chip_class >= GFX8;
1290 uint32_t cb_color_info = cb->cb_color_info;
1291 struct radv_image *image = iview->image;
1292
1293 if (!radv_layout_dcc_compressed(cmd_buffer->device, image, layout, in_render_loop,
1294 radv_image_queue_family_mask(image,
1295 cmd_buffer->queue_family_index,
1296 cmd_buffer->queue_family_index))) {
1297 cb_color_info &= C_028C70_DCC_ENABLE;
1298 }
1299
1300 if (radv_image_is_tc_compat_cmask(image) &&
1301 (radv_is_fmask_decompress_pipeline(cmd_buffer) ||
1302 radv_is_dcc_decompress_pipeline(cmd_buffer))) {
1303 /* If this bit is set, the FMASK decompression operation
1304 * doesn't occur (DCC_COMPRESS also implies FMASK_DECOMPRESS).
1305 */
1306 cb_color_info &= C_028C70_FMASK_COMPRESS_1FRAG_ONLY;
1307 }
1308
1309 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10) {
1310 radeon_set_context_reg_seq(cmd_buffer->cs, R_028C60_CB_COLOR0_BASE + index * 0x3c, 11);
1311 radeon_emit(cmd_buffer->cs, cb->cb_color_base);
1312 radeon_emit(cmd_buffer->cs, 0);
1313 radeon_emit(cmd_buffer->cs, 0);
1314 radeon_emit(cmd_buffer->cs, cb->cb_color_view);
1315 radeon_emit(cmd_buffer->cs, cb_color_info);
1316 radeon_emit(cmd_buffer->cs, cb->cb_color_attrib);
1317 radeon_emit(cmd_buffer->cs, cb->cb_dcc_control);
1318 radeon_emit(cmd_buffer->cs, cb->cb_color_cmask);
1319 radeon_emit(cmd_buffer->cs, 0);
1320 radeon_emit(cmd_buffer->cs, cb->cb_color_fmask);
1321 radeon_emit(cmd_buffer->cs, 0);
1322
1323 radeon_set_context_reg_seq(cmd_buffer->cs, R_028C94_CB_COLOR0_DCC_BASE + index * 0x3c, 1);
1324 radeon_emit(cmd_buffer->cs, cb->cb_dcc_base);
1325
1326 radeon_set_context_reg(cmd_buffer->cs, R_028E40_CB_COLOR0_BASE_EXT + index * 4,
1327 cb->cb_color_base >> 32);
1328 radeon_set_context_reg(cmd_buffer->cs, R_028E60_CB_COLOR0_CMASK_BASE_EXT + index * 4,
1329 cb->cb_color_cmask >> 32);
1330 radeon_set_context_reg(cmd_buffer->cs, R_028E80_CB_COLOR0_FMASK_BASE_EXT + index * 4,
1331 cb->cb_color_fmask >> 32);
1332 radeon_set_context_reg(cmd_buffer->cs, R_028EA0_CB_COLOR0_DCC_BASE_EXT + index * 4,
1333 cb->cb_dcc_base >> 32);
1334 radeon_set_context_reg(cmd_buffer->cs, R_028EC0_CB_COLOR0_ATTRIB2 + index * 4,
1335 cb->cb_color_attrib2);
1336 radeon_set_context_reg(cmd_buffer->cs, R_028EE0_CB_COLOR0_ATTRIB3 + index * 4,
1337 cb->cb_color_attrib3);
1338 } else if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX9) {
1339 radeon_set_context_reg_seq(cmd_buffer->cs, R_028C60_CB_COLOR0_BASE + index * 0x3c, 11);
1340 radeon_emit(cmd_buffer->cs, cb->cb_color_base);
1341 radeon_emit(cmd_buffer->cs, S_028C64_BASE_256B(cb->cb_color_base >> 32));
1342 radeon_emit(cmd_buffer->cs, cb->cb_color_attrib2);
1343 radeon_emit(cmd_buffer->cs, cb->cb_color_view);
1344 radeon_emit(cmd_buffer->cs, cb_color_info);
1345 radeon_emit(cmd_buffer->cs, cb->cb_color_attrib);
1346 radeon_emit(cmd_buffer->cs, cb->cb_dcc_control);
1347 radeon_emit(cmd_buffer->cs, cb->cb_color_cmask);
1348 radeon_emit(cmd_buffer->cs, S_028C80_BASE_256B(cb->cb_color_cmask >> 32));
1349 radeon_emit(cmd_buffer->cs, cb->cb_color_fmask);
1350 radeon_emit(cmd_buffer->cs, S_028C88_BASE_256B(cb->cb_color_fmask >> 32));
1351
1352 radeon_set_context_reg_seq(cmd_buffer->cs, R_028C94_CB_COLOR0_DCC_BASE + index * 0x3c, 2);
1353 radeon_emit(cmd_buffer->cs, cb->cb_dcc_base);
1354 radeon_emit(cmd_buffer->cs, S_028C98_BASE_256B(cb->cb_dcc_base >> 32));
1355
1356 radeon_set_context_reg(cmd_buffer->cs, R_0287A0_CB_MRT0_EPITCH + index * 4,
1357 cb->cb_mrt_epitch);
1358 } else {
1359 radeon_set_context_reg_seq(cmd_buffer->cs, R_028C60_CB_COLOR0_BASE + index * 0x3c, 11);
1360 radeon_emit(cmd_buffer->cs, cb->cb_color_base);
1361 radeon_emit(cmd_buffer->cs, cb->cb_color_pitch);
1362 radeon_emit(cmd_buffer->cs, cb->cb_color_slice);
1363 radeon_emit(cmd_buffer->cs, cb->cb_color_view);
1364 radeon_emit(cmd_buffer->cs, cb_color_info);
1365 radeon_emit(cmd_buffer->cs, cb->cb_color_attrib);
1366 radeon_emit(cmd_buffer->cs, cb->cb_dcc_control);
1367 radeon_emit(cmd_buffer->cs, cb->cb_color_cmask);
1368 radeon_emit(cmd_buffer->cs, cb->cb_color_cmask_slice);
1369 radeon_emit(cmd_buffer->cs, cb->cb_color_fmask);
1370 radeon_emit(cmd_buffer->cs, cb->cb_color_fmask_slice);
1371
1372 if (is_vi) { /* DCC BASE */
1373 radeon_set_context_reg(cmd_buffer->cs, R_028C94_CB_COLOR0_DCC_BASE + index * 0x3c, cb->cb_dcc_base);
1374 }
1375 }
1376
1377 if (radv_dcc_enabled(image, iview->base_mip)) {
1378 /* Drawing with DCC enabled also compresses colorbuffers. */
1379 VkImageSubresourceRange range = {
1380 .aspectMask = iview->aspect_mask,
1381 .baseMipLevel = iview->base_mip,
1382 .levelCount = iview->level_count,
1383 .baseArrayLayer = iview->base_layer,
1384 .layerCount = iview->layer_count,
1385 };
1386
1387 radv_update_dcc_metadata(cmd_buffer, image, &range, true);
1388 }
1389 }
1390
1391 static void
1392 radv_update_zrange_precision(struct radv_cmd_buffer *cmd_buffer,
1393 struct radv_ds_buffer_info *ds,
1394 const struct radv_image_view *iview,
1395 VkImageLayout layout,
1396 bool in_render_loop, bool requires_cond_exec)
1397 {
1398 const struct radv_image *image = iview->image;
1399 uint32_t db_z_info = ds->db_z_info;
1400 uint32_t db_z_info_reg;
1401
1402 if (!cmd_buffer->device->physical_device->rad_info.has_tc_compat_zrange_bug ||
1403 !radv_image_is_tc_compat_htile(image))
1404 return;
1405
1406 if (!radv_layout_has_htile(image, layout, in_render_loop,
1407 radv_image_queue_family_mask(image,
1408 cmd_buffer->queue_family_index,
1409 cmd_buffer->queue_family_index))) {
1410 db_z_info &= C_028040_TILE_SURFACE_ENABLE;
1411 }
1412
1413 db_z_info &= C_028040_ZRANGE_PRECISION;
1414
1415 if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX9) {
1416 db_z_info_reg = R_028038_DB_Z_INFO;
1417 } else {
1418 db_z_info_reg = R_028040_DB_Z_INFO;
1419 }
1420
1421 /* When we don't know the last fast clear value we need to emit a
1422 * conditional packet that will eventually skip the following
1423 * SET_CONTEXT_REG packet.
1424 */
1425 if (requires_cond_exec) {
1426 uint64_t va = radv_get_tc_compat_zrange_va(image, iview->base_mip);
1427
1428 radeon_emit(cmd_buffer->cs, PKT3(PKT3_COND_EXEC, 3, 0));
1429 radeon_emit(cmd_buffer->cs, va);
1430 radeon_emit(cmd_buffer->cs, va >> 32);
1431 radeon_emit(cmd_buffer->cs, 0);
1432 radeon_emit(cmd_buffer->cs, 3); /* SET_CONTEXT_REG size */
1433 }
1434
1435 radeon_set_context_reg(cmd_buffer->cs, db_z_info_reg, db_z_info);
1436 }
1437
1438 static void
1439 radv_emit_fb_ds_state(struct radv_cmd_buffer *cmd_buffer,
1440 struct radv_ds_buffer_info *ds,
1441 struct radv_image_view *iview,
1442 VkImageLayout layout,
1443 bool in_render_loop)
1444 {
1445 const struct radv_image *image = iview->image;
1446 uint32_t db_z_info = ds->db_z_info;
1447 uint32_t db_stencil_info = ds->db_stencil_info;
1448
1449 if (!radv_layout_has_htile(image, layout, in_render_loop,
1450 radv_image_queue_family_mask(image,
1451 cmd_buffer->queue_family_index,
1452 cmd_buffer->queue_family_index))) {
1453 db_z_info &= C_028040_TILE_SURFACE_ENABLE;
1454 db_stencil_info |= S_028044_TILE_STENCIL_DISABLE(1);
1455 }
1456
1457 radeon_set_context_reg(cmd_buffer->cs, R_028008_DB_DEPTH_VIEW, ds->db_depth_view);
1458 radeon_set_context_reg(cmd_buffer->cs, R_028ABC_DB_HTILE_SURFACE, ds->db_htile_surface);
1459
1460 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10) {
1461 radeon_set_context_reg(cmd_buffer->cs, R_028014_DB_HTILE_DATA_BASE, ds->db_htile_data_base);
1462 radeon_set_context_reg(cmd_buffer->cs, R_02801C_DB_DEPTH_SIZE_XY, ds->db_depth_size);
1463
1464 radeon_set_context_reg_seq(cmd_buffer->cs, R_02803C_DB_DEPTH_INFO, 7);
1465 radeon_emit(cmd_buffer->cs, S_02803C_RESOURCE_LEVEL(1));
1466 radeon_emit(cmd_buffer->cs, db_z_info);
1467 radeon_emit(cmd_buffer->cs, db_stencil_info);
1468 radeon_emit(cmd_buffer->cs, ds->db_z_read_base);
1469 radeon_emit(cmd_buffer->cs, ds->db_stencil_read_base);
1470 radeon_emit(cmd_buffer->cs, ds->db_z_read_base);
1471 radeon_emit(cmd_buffer->cs, ds->db_stencil_read_base);
1472
1473 radeon_set_context_reg_seq(cmd_buffer->cs, R_028068_DB_Z_READ_BASE_HI, 5);
1474 radeon_emit(cmd_buffer->cs, ds->db_z_read_base >> 32);
1475 radeon_emit(cmd_buffer->cs, ds->db_stencil_read_base >> 32);
1476 radeon_emit(cmd_buffer->cs, ds->db_z_read_base >> 32);
1477 radeon_emit(cmd_buffer->cs, ds->db_stencil_read_base >> 32);
1478 radeon_emit(cmd_buffer->cs, ds->db_htile_data_base >> 32);
1479 } else if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX9) {
1480 radeon_set_context_reg_seq(cmd_buffer->cs, R_028014_DB_HTILE_DATA_BASE, 3);
1481 radeon_emit(cmd_buffer->cs, ds->db_htile_data_base);
1482 radeon_emit(cmd_buffer->cs, S_028018_BASE_HI(ds->db_htile_data_base >> 32));
1483 radeon_emit(cmd_buffer->cs, ds->db_depth_size);
1484
1485 radeon_set_context_reg_seq(cmd_buffer->cs, R_028038_DB_Z_INFO, 10);
1486 radeon_emit(cmd_buffer->cs, db_z_info); /* DB_Z_INFO */
1487 radeon_emit(cmd_buffer->cs, db_stencil_info); /* DB_STENCIL_INFO */
1488 radeon_emit(cmd_buffer->cs, ds->db_z_read_base); /* DB_Z_READ_BASE */
1489 radeon_emit(cmd_buffer->cs, S_028044_BASE_HI(ds->db_z_read_base >> 32)); /* DB_Z_READ_BASE_HI */
1490 radeon_emit(cmd_buffer->cs, ds->db_stencil_read_base); /* DB_STENCIL_READ_BASE */
1491 radeon_emit(cmd_buffer->cs, S_02804C_BASE_HI(ds->db_stencil_read_base >> 32)); /* DB_STENCIL_READ_BASE_HI */
1492 radeon_emit(cmd_buffer->cs, ds->db_z_write_base); /* DB_Z_WRITE_BASE */
1493 radeon_emit(cmd_buffer->cs, S_028054_BASE_HI(ds->db_z_write_base >> 32)); /* DB_Z_WRITE_BASE_HI */
1494 radeon_emit(cmd_buffer->cs, ds->db_stencil_write_base); /* DB_STENCIL_WRITE_BASE */
1495 radeon_emit(cmd_buffer->cs, S_02805C_BASE_HI(ds->db_stencil_write_base >> 32)); /* DB_STENCIL_WRITE_BASE_HI */
1496
1497 radeon_set_context_reg_seq(cmd_buffer->cs, R_028068_DB_Z_INFO2, 2);
1498 radeon_emit(cmd_buffer->cs, ds->db_z_info2);
1499 radeon_emit(cmd_buffer->cs, ds->db_stencil_info2);
1500 } else {
1501 radeon_set_context_reg(cmd_buffer->cs, R_028014_DB_HTILE_DATA_BASE, ds->db_htile_data_base);
1502
1503 radeon_set_context_reg_seq(cmd_buffer->cs, R_02803C_DB_DEPTH_INFO, 9);
1504 radeon_emit(cmd_buffer->cs, ds->db_depth_info); /* R_02803C_DB_DEPTH_INFO */
1505 radeon_emit(cmd_buffer->cs, db_z_info); /* R_028040_DB_Z_INFO */
1506 radeon_emit(cmd_buffer->cs, db_stencil_info); /* R_028044_DB_STENCIL_INFO */
1507 radeon_emit(cmd_buffer->cs, ds->db_z_read_base); /* R_028048_DB_Z_READ_BASE */
1508 radeon_emit(cmd_buffer->cs, ds->db_stencil_read_base); /* R_02804C_DB_STENCIL_READ_BASE */
1509 radeon_emit(cmd_buffer->cs, ds->db_z_write_base); /* R_028050_DB_Z_WRITE_BASE */
1510 radeon_emit(cmd_buffer->cs, ds->db_stencil_write_base); /* R_028054_DB_STENCIL_WRITE_BASE */
1511 radeon_emit(cmd_buffer->cs, ds->db_depth_size); /* R_028058_DB_DEPTH_SIZE */
1512 radeon_emit(cmd_buffer->cs, ds->db_depth_slice); /* R_02805C_DB_DEPTH_SLICE */
1513
1514 }
1515
1516 /* Update the ZRANGE_PRECISION value for the TC-compat bug. */
1517 radv_update_zrange_precision(cmd_buffer, ds, iview, layout,
1518 in_render_loop, true);
1519
1520 radeon_set_context_reg(cmd_buffer->cs, R_028B78_PA_SU_POLY_OFFSET_DB_FMT_CNTL,
1521 ds->pa_su_poly_offset_db_fmt_cntl);
1522 }
1523
1524 /**
1525 * Update the fast clear depth/stencil values if the image is bound as a
1526 * depth/stencil buffer.
1527 */
1528 static void
1529 radv_update_bound_fast_clear_ds(struct radv_cmd_buffer *cmd_buffer,
1530 const struct radv_image_view *iview,
1531 VkClearDepthStencilValue ds_clear_value,
1532 VkImageAspectFlags aspects)
1533 {
1534 const struct radv_subpass *subpass = cmd_buffer->state.subpass;
1535 const struct radv_image *image = iview->image;
1536 struct radeon_cmdbuf *cs = cmd_buffer->cs;
1537 uint32_t att_idx;
1538
1539 if (!cmd_buffer->state.attachments || !subpass)
1540 return;
1541
1542 if (!subpass->depth_stencil_attachment)
1543 return;
1544
1545 att_idx = subpass->depth_stencil_attachment->attachment;
1546 if (cmd_buffer->state.attachments[att_idx].iview->image != image)
1547 return;
1548
1549 if (aspects == (VK_IMAGE_ASPECT_DEPTH_BIT |
1550 VK_IMAGE_ASPECT_STENCIL_BIT)) {
1551 radeon_set_context_reg_seq(cs, R_028028_DB_STENCIL_CLEAR, 2);
1552 radeon_emit(cs, ds_clear_value.stencil);
1553 radeon_emit(cs, fui(ds_clear_value.depth));
1554 } else if (aspects == VK_IMAGE_ASPECT_DEPTH_BIT) {
1555 radeon_set_context_reg_seq(cs, R_02802C_DB_DEPTH_CLEAR, 1);
1556 radeon_emit(cs, fui(ds_clear_value.depth));
1557 } else {
1558 assert(aspects == VK_IMAGE_ASPECT_STENCIL_BIT);
1559 radeon_set_context_reg_seq(cs, R_028028_DB_STENCIL_CLEAR, 1);
1560 radeon_emit(cs, ds_clear_value.stencil);
1561 }
1562
1563 /* Update the ZRANGE_PRECISION value for the TC-compat bug. This is
1564 * only needed when clearing Z to 0.0.
1565 */
1566 if ((aspects & VK_IMAGE_ASPECT_DEPTH_BIT) &&
1567 ds_clear_value.depth == 0.0) {
1568 VkImageLayout layout = subpass->depth_stencil_attachment->layout;
1569 bool in_render_loop = subpass->depth_stencil_attachment->in_render_loop;
1570
1571 radv_update_zrange_precision(cmd_buffer, &cmd_buffer->state.attachments[att_idx].ds,
1572 iview, layout, in_render_loop, false);
1573 }
1574
1575 cmd_buffer->state.context_roll_without_scissor_emitted = true;
1576 }
1577
1578 /**
1579 * Set the clear depth/stencil values to the image's metadata.
1580 */
1581 static void
1582 radv_set_ds_clear_metadata(struct radv_cmd_buffer *cmd_buffer,
1583 struct radv_image *image,
1584 const VkImageSubresourceRange *range,
1585 VkClearDepthStencilValue ds_clear_value,
1586 VkImageAspectFlags aspects)
1587 {
1588 struct radeon_cmdbuf *cs = cmd_buffer->cs;
1589 uint64_t va = radv_get_ds_clear_value_va(image, range->baseMipLevel);
1590 uint32_t level_count = radv_get_levelCount(image, range);
1591
1592 if (aspects == (VK_IMAGE_ASPECT_DEPTH_BIT |
1593 VK_IMAGE_ASPECT_STENCIL_BIT)) {
1594 /* Use the fastest way when both aspects are used. */
1595 radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 2 + 2 * level_count, cmd_buffer->state.predicating));
1596 radeon_emit(cs, S_370_DST_SEL(V_370_MEM) |
1597 S_370_WR_CONFIRM(1) |
1598 S_370_ENGINE_SEL(V_370_PFP));
1599 radeon_emit(cs, va);
1600 radeon_emit(cs, va >> 32);
1601
1602 for (uint32_t l = 0; l < level_count; l++) {
1603 radeon_emit(cs, ds_clear_value.stencil);
1604 radeon_emit(cs, fui(ds_clear_value.depth));
1605 }
1606 } else {
1607 /* Otherwise we need one WRITE_DATA packet per level. */
1608 for (uint32_t l = 0; l < level_count; l++) {
1609 uint64_t va = radv_get_ds_clear_value_va(image, range->baseMipLevel + l);
1610 unsigned value;
1611
1612 if (aspects == VK_IMAGE_ASPECT_DEPTH_BIT) {
1613 value = fui(ds_clear_value.depth);
1614 va += 4;
1615 } else {
1616 assert(aspects == VK_IMAGE_ASPECT_STENCIL_BIT);
1617 value = ds_clear_value.stencil;
1618 }
1619
1620 radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, cmd_buffer->state.predicating));
1621 radeon_emit(cs, S_370_DST_SEL(V_370_MEM) |
1622 S_370_WR_CONFIRM(1) |
1623 S_370_ENGINE_SEL(V_370_PFP));
1624 radeon_emit(cs, va);
1625 radeon_emit(cs, va >> 32);
1626 radeon_emit(cs, value);
1627 }
1628 }
1629 }
1630
1631 /**
1632 * Update the TC-compat metadata value for this image.
1633 */
1634 static void
1635 radv_set_tc_compat_zrange_metadata(struct radv_cmd_buffer *cmd_buffer,
1636 struct radv_image *image,
1637 const VkImageSubresourceRange *range,
1638 uint32_t value)
1639 {
1640 struct radeon_cmdbuf *cs = cmd_buffer->cs;
1641
1642 if (!cmd_buffer->device->physical_device->rad_info.has_tc_compat_zrange_bug)
1643 return;
1644
1645 uint64_t va = radv_get_tc_compat_zrange_va(image, range->baseMipLevel);
1646 uint32_t level_count = radv_get_levelCount(image, range);
1647
1648 radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 2 + level_count, cmd_buffer->state.predicating));
1649 radeon_emit(cs, S_370_DST_SEL(V_370_MEM) |
1650 S_370_WR_CONFIRM(1) |
1651 S_370_ENGINE_SEL(V_370_PFP));
1652 radeon_emit(cs, va);
1653 radeon_emit(cs, va >> 32);
1654
1655 for (uint32_t l = 0; l < level_count; l++)
1656 radeon_emit(cs, value);
1657 }
1658
1659 static void
1660 radv_update_tc_compat_zrange_metadata(struct radv_cmd_buffer *cmd_buffer,
1661 const struct radv_image_view *iview,
1662 VkClearDepthStencilValue ds_clear_value)
1663 {
1664 VkImageSubresourceRange range = {
1665 .aspectMask = iview->aspect_mask,
1666 .baseMipLevel = iview->base_mip,
1667 .levelCount = iview->level_count,
1668 .baseArrayLayer = iview->base_layer,
1669 .layerCount = iview->layer_count,
1670 };
1671 uint32_t cond_val;
1672
1673 /* Conditionally set DB_Z_INFO.ZRANGE_PRECISION to 0 when the last
1674 * depth clear value is 0.0f.
1675 */
1676 cond_val = ds_clear_value.depth == 0.0f ? UINT_MAX : 0;
1677
1678 radv_set_tc_compat_zrange_metadata(cmd_buffer, iview->image, &range,
1679 cond_val);
1680 }
1681
1682 /**
1683 * Update the clear depth/stencil values for this image.
1684 */
1685 void
1686 radv_update_ds_clear_metadata(struct radv_cmd_buffer *cmd_buffer,
1687 const struct radv_image_view *iview,
1688 VkClearDepthStencilValue ds_clear_value,
1689 VkImageAspectFlags aspects)
1690 {
1691 VkImageSubresourceRange range = {
1692 .aspectMask = iview->aspect_mask,
1693 .baseMipLevel = iview->base_mip,
1694 .levelCount = iview->level_count,
1695 .baseArrayLayer = iview->base_layer,
1696 .layerCount = iview->layer_count,
1697 };
1698 struct radv_image *image = iview->image;
1699
1700 assert(radv_image_has_htile(image));
1701
1702 radv_set_ds_clear_metadata(cmd_buffer, iview->image, &range,
1703 ds_clear_value, aspects);
1704
1705 if (radv_image_is_tc_compat_htile(image) &&
1706 (aspects & VK_IMAGE_ASPECT_DEPTH_BIT)) {
1707 radv_update_tc_compat_zrange_metadata(cmd_buffer, iview,
1708 ds_clear_value);
1709 }
1710
1711 radv_update_bound_fast_clear_ds(cmd_buffer, iview, ds_clear_value,
1712 aspects);
1713 }
1714
1715 /**
1716 * Load the clear depth/stencil values from the image's metadata.
1717 */
1718 static void
1719 radv_load_ds_clear_metadata(struct radv_cmd_buffer *cmd_buffer,
1720 const struct radv_image_view *iview)
1721 {
1722 struct radeon_cmdbuf *cs = cmd_buffer->cs;
1723 const struct radv_image *image = iview->image;
1724 VkImageAspectFlags aspects = vk_format_aspects(image->vk_format);
1725 uint64_t va = radv_get_ds_clear_value_va(image, iview->base_mip);
1726 unsigned reg_offset = 0, reg_count = 0;
1727
1728 if (!radv_image_has_htile(image))
1729 return;
1730
1731 if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT) {
1732 ++reg_count;
1733 } else {
1734 ++reg_offset;
1735 va += 4;
1736 }
1737 if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT)
1738 ++reg_count;
1739
1740 uint32_t reg = R_028028_DB_STENCIL_CLEAR + 4 * reg_offset;
1741
1742 if (cmd_buffer->device->physical_device->rad_info.has_load_ctx_reg_pkt) {
1743 radeon_emit(cs, PKT3(PKT3_LOAD_CONTEXT_REG, 3, 0));
1744 radeon_emit(cs, va);
1745 radeon_emit(cs, va >> 32);
1746 radeon_emit(cs, (reg - SI_CONTEXT_REG_OFFSET) >> 2);
1747 radeon_emit(cs, reg_count);
1748 } else {
1749 radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
1750 radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_SRC_MEM) |
1751 COPY_DATA_DST_SEL(COPY_DATA_REG) |
1752 (reg_count == 2 ? COPY_DATA_COUNT_SEL : 0));
1753 radeon_emit(cs, va);
1754 radeon_emit(cs, va >> 32);
1755 radeon_emit(cs, reg >> 2);
1756 radeon_emit(cs, 0);
1757
1758 radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
1759 radeon_emit(cs, 0);
1760 }
1761 }
1762
1763 /*
1764 * With DCC some colors don't require CMASK elimination before being
1765 * used as a texture. This sets a predicate value to determine if the
1766 * cmask eliminate is required.
1767 */
1768 void
1769 radv_update_fce_metadata(struct radv_cmd_buffer *cmd_buffer,
1770 struct radv_image *image,
1771 const VkImageSubresourceRange *range, bool value)
1772 {
1773 uint64_t pred_val = value;
1774 uint64_t va = radv_image_get_fce_pred_va(image, range->baseMipLevel);
1775 uint32_t level_count = radv_get_levelCount(image, range);
1776 uint32_t count = 2 * level_count;
1777
1778 assert(radv_dcc_enabled(image, range->baseMipLevel));
1779
1780 radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 2 + count, 0));
1781 radeon_emit(cmd_buffer->cs, S_370_DST_SEL(V_370_MEM) |
1782 S_370_WR_CONFIRM(1) |
1783 S_370_ENGINE_SEL(V_370_PFP));
1784 radeon_emit(cmd_buffer->cs, va);
1785 radeon_emit(cmd_buffer->cs, va >> 32);
1786
1787 for (uint32_t l = 0; l < level_count; l++) {
1788 radeon_emit(cmd_buffer->cs, pred_val);
1789 radeon_emit(cmd_buffer->cs, pred_val >> 32);
1790 }
1791 }
1792
1793 /**
1794 * Update the DCC predicate to reflect the compression state.
1795 */
1796 void
1797 radv_update_dcc_metadata(struct radv_cmd_buffer *cmd_buffer,
1798 struct radv_image *image,
1799 const VkImageSubresourceRange *range, bool value)
1800 {
1801 uint64_t pred_val = value;
1802 uint64_t va = radv_image_get_dcc_pred_va(image, range->baseMipLevel);
1803 uint32_t level_count = radv_get_levelCount(image, range);
1804 uint32_t count = 2 * level_count;
1805
1806 assert(radv_dcc_enabled(image, range->baseMipLevel));
1807
1808 radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 2 + count, 0));
1809 radeon_emit(cmd_buffer->cs, S_370_DST_SEL(V_370_MEM) |
1810 S_370_WR_CONFIRM(1) |
1811 S_370_ENGINE_SEL(V_370_PFP));
1812 radeon_emit(cmd_buffer->cs, va);
1813 radeon_emit(cmd_buffer->cs, va >> 32);
1814
1815 for (uint32_t l = 0; l < level_count; l++) {
1816 radeon_emit(cmd_buffer->cs, pred_val);
1817 radeon_emit(cmd_buffer->cs, pred_val >> 32);
1818 }
1819 }
1820
1821 /**
1822 * Update the fast clear color values if the image is bound as a color buffer.
1823 */
1824 static void
1825 radv_update_bound_fast_clear_color(struct radv_cmd_buffer *cmd_buffer,
1826 struct radv_image *image,
1827 int cb_idx,
1828 uint32_t color_values[2])
1829 {
1830 const struct radv_subpass *subpass = cmd_buffer->state.subpass;
1831 struct radeon_cmdbuf *cs = cmd_buffer->cs;
1832 uint32_t att_idx;
1833
1834 if (!cmd_buffer->state.attachments || !subpass)
1835 return;
1836
1837 att_idx = subpass->color_attachments[cb_idx].attachment;
1838 if (att_idx == VK_ATTACHMENT_UNUSED)
1839 return;
1840
1841 if (cmd_buffer->state.attachments[att_idx].iview->image != image)
1842 return;
1843
1844 radeon_set_context_reg_seq(cs, R_028C8C_CB_COLOR0_CLEAR_WORD0 + cb_idx * 0x3c, 2);
1845 radeon_emit(cs, color_values[0]);
1846 radeon_emit(cs, color_values[1]);
1847
1848 cmd_buffer->state.context_roll_without_scissor_emitted = true;
1849 }
1850
1851 /**
1852 * Set the clear color values to the image's metadata.
1853 */
1854 static void
1855 radv_set_color_clear_metadata(struct radv_cmd_buffer *cmd_buffer,
1856 struct radv_image *image,
1857 const VkImageSubresourceRange *range,
1858 uint32_t color_values[2])
1859 {
1860 struct radeon_cmdbuf *cs = cmd_buffer->cs;
1861 uint64_t va = radv_image_get_fast_clear_va(image, range->baseMipLevel);
1862 uint32_t level_count = radv_get_levelCount(image, range);
1863 uint32_t count = 2 * level_count;
1864
1865 assert(radv_image_has_cmask(image) ||
1866 radv_dcc_enabled(image, range->baseMipLevel));
1867
1868 radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 2 + count, cmd_buffer->state.predicating));
1869 radeon_emit(cs, S_370_DST_SEL(V_370_MEM) |
1870 S_370_WR_CONFIRM(1) |
1871 S_370_ENGINE_SEL(V_370_PFP));
1872 radeon_emit(cs, va);
1873 radeon_emit(cs, va >> 32);
1874
1875 for (uint32_t l = 0; l < level_count; l++) {
1876 radeon_emit(cs, color_values[0]);
1877 radeon_emit(cs, color_values[1]);
1878 }
1879 }
1880
1881 /**
1882 * Update the clear color values for this image.
1883 */
1884 void
1885 radv_update_color_clear_metadata(struct radv_cmd_buffer *cmd_buffer,
1886 const struct radv_image_view *iview,
1887 int cb_idx,
1888 uint32_t color_values[2])
1889 {
1890 struct radv_image *image = iview->image;
1891 VkImageSubresourceRange range = {
1892 .aspectMask = iview->aspect_mask,
1893 .baseMipLevel = iview->base_mip,
1894 .levelCount = iview->level_count,
1895 .baseArrayLayer = iview->base_layer,
1896 .layerCount = iview->layer_count,
1897 };
1898
1899 assert(radv_image_has_cmask(image) ||
1900 radv_dcc_enabled(image, iview->base_mip));
1901
1902 radv_set_color_clear_metadata(cmd_buffer, image, &range, color_values);
1903
1904 radv_update_bound_fast_clear_color(cmd_buffer, image, cb_idx,
1905 color_values);
1906 }
1907
1908 /**
1909 * Load the clear color values from the image's metadata.
1910 */
1911 static void
1912 radv_load_color_clear_metadata(struct radv_cmd_buffer *cmd_buffer,
1913 struct radv_image_view *iview,
1914 int cb_idx)
1915 {
1916 struct radeon_cmdbuf *cs = cmd_buffer->cs;
1917 struct radv_image *image = iview->image;
1918 uint64_t va = radv_image_get_fast_clear_va(image, iview->base_mip);
1919
1920 if (!radv_image_has_cmask(image) &&
1921 !radv_dcc_enabled(image, iview->base_mip))
1922 return;
1923
1924 uint32_t reg = R_028C8C_CB_COLOR0_CLEAR_WORD0 + cb_idx * 0x3c;
1925
1926 if (cmd_buffer->device->physical_device->rad_info.has_load_ctx_reg_pkt) {
1927 radeon_emit(cs, PKT3(PKT3_LOAD_CONTEXT_REG, 3, cmd_buffer->state.predicating));
1928 radeon_emit(cs, va);
1929 radeon_emit(cs, va >> 32);
1930 radeon_emit(cs, (reg - SI_CONTEXT_REG_OFFSET) >> 2);
1931 radeon_emit(cs, 2);
1932 } else {
1933 radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, cmd_buffer->state.predicating));
1934 radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_SRC_MEM) |
1935 COPY_DATA_DST_SEL(COPY_DATA_REG) |
1936 COPY_DATA_COUNT_SEL);
1937 radeon_emit(cs, va);
1938 radeon_emit(cs, va >> 32);
1939 radeon_emit(cs, reg >> 2);
1940 radeon_emit(cs, 0);
1941
1942 radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, cmd_buffer->state.predicating));
1943 radeon_emit(cs, 0);
1944 }
1945 }
1946
1947 static void
1948 radv_emit_framebuffer_state(struct radv_cmd_buffer *cmd_buffer)
1949 {
1950 int i;
1951 struct radv_framebuffer *framebuffer = cmd_buffer->state.framebuffer;
1952 const struct radv_subpass *subpass = cmd_buffer->state.subpass;
1953
1954 /* this may happen for inherited secondary recording */
1955 if (!framebuffer)
1956 return;
1957
1958 for (i = 0; i < 8; ++i) {
1959 if (i >= subpass->color_count || subpass->color_attachments[i].attachment == VK_ATTACHMENT_UNUSED) {
1960 radeon_set_context_reg(cmd_buffer->cs, R_028C70_CB_COLOR0_INFO + i * 0x3C,
1961 S_028C70_FORMAT(V_028C70_COLOR_INVALID));
1962 continue;
1963 }
1964
1965 int idx = subpass->color_attachments[i].attachment;
1966 struct radv_image_view *iview = cmd_buffer->state.attachments[idx].iview;
1967 VkImageLayout layout = subpass->color_attachments[i].layout;
1968 bool in_render_loop = subpass->color_attachments[i].in_render_loop;
1969
1970 radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, iview->bo);
1971
1972 assert(iview->aspect_mask & (VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_PLANE_0_BIT |
1973 VK_IMAGE_ASPECT_PLANE_1_BIT | VK_IMAGE_ASPECT_PLANE_2_BIT));
1974 radv_emit_fb_color_state(cmd_buffer, i, &cmd_buffer->state.attachments[idx].cb, iview, layout, in_render_loop);
1975
1976 radv_load_color_clear_metadata(cmd_buffer, iview, i);
1977 }
1978
1979 if (subpass->depth_stencil_attachment) {
1980 int idx = subpass->depth_stencil_attachment->attachment;
1981 VkImageLayout layout = subpass->depth_stencil_attachment->layout;
1982 bool in_render_loop = subpass->depth_stencil_attachment->in_render_loop;
1983 struct radv_image_view *iview = cmd_buffer->state.attachments[idx].iview;
1984 struct radv_image *image = iview->image;
1985 radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, cmd_buffer->state.attachments[idx].iview->bo);
1986 ASSERTED uint32_t queue_mask = radv_image_queue_family_mask(image,
1987 cmd_buffer->queue_family_index,
1988 cmd_buffer->queue_family_index);
1989 /* We currently don't support writing decompressed HTILE */
1990 assert(radv_layout_has_htile(image, layout, in_render_loop, queue_mask) ==
1991 radv_layout_is_htile_compressed(image, layout, in_render_loop, queue_mask));
1992
1993 radv_emit_fb_ds_state(cmd_buffer, &cmd_buffer->state.attachments[idx].ds, iview, layout, in_render_loop);
1994
1995 if (cmd_buffer->state.attachments[idx].ds.offset_scale != cmd_buffer->state.offset_scale) {
1996 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS;
1997 cmd_buffer->state.offset_scale = cmd_buffer->state.attachments[idx].ds.offset_scale;
1998 }
1999 radv_load_ds_clear_metadata(cmd_buffer, iview);
2000 } else {
2001 if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX9)
2002 radeon_set_context_reg_seq(cmd_buffer->cs, R_028038_DB_Z_INFO, 2);
2003 else
2004 radeon_set_context_reg_seq(cmd_buffer->cs, R_028040_DB_Z_INFO, 2);
2005
2006 radeon_emit(cmd_buffer->cs, S_028040_FORMAT(V_028040_Z_INVALID)); /* DB_Z_INFO */
2007 radeon_emit(cmd_buffer->cs, S_028044_FORMAT(V_028044_STENCIL_INVALID)); /* DB_STENCIL_INFO */
2008 }
2009 radeon_set_context_reg(cmd_buffer->cs, R_028208_PA_SC_WINDOW_SCISSOR_BR,
2010 S_028208_BR_X(framebuffer->width) |
2011 S_028208_BR_Y(framebuffer->height));
2012
2013 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX8) {
2014 bool disable_constant_encode =
2015 cmd_buffer->device->physical_device->rad_info.has_dcc_constant_encode;
2016 enum chip_class chip_class =
2017 cmd_buffer->device->physical_device->rad_info.chip_class;
2018 uint8_t watermark = chip_class >= GFX10 ? 6 : 4;
2019
2020 radeon_set_context_reg(cmd_buffer->cs, R_028424_CB_DCC_CONTROL,
2021 S_028424_OVERWRITE_COMBINER_MRT_SHARING_DISABLE(chip_class <= GFX9) |
2022 S_028424_OVERWRITE_COMBINER_WATERMARK(watermark) |
2023 S_028424_DISABLE_CONSTANT_ENCODE_REG(disable_constant_encode));
2024 }
2025
2026 if (cmd_buffer->device->dfsm_allowed) {
2027 radeon_emit(cmd_buffer->cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
2028 radeon_emit(cmd_buffer->cs, EVENT_TYPE(V_028A90_BREAK_BATCH) | EVENT_INDEX(0));
2029 }
2030
2031 cmd_buffer->state.dirty &= ~RADV_CMD_DIRTY_FRAMEBUFFER;
2032 }
2033
2034 static void
2035 radv_emit_index_buffer(struct radv_cmd_buffer *cmd_buffer)
2036 {
2037 struct radeon_cmdbuf *cs = cmd_buffer->cs;
2038 struct radv_cmd_state *state = &cmd_buffer->state;
2039
2040 if (state->index_type != state->last_index_type) {
2041 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
2042 radeon_set_uconfig_reg_idx(cmd_buffer->device->physical_device,
2043 cs, R_03090C_VGT_INDEX_TYPE,
2044 2, state->index_type);
2045 } else {
2046 radeon_emit(cs, PKT3(PKT3_INDEX_TYPE, 0, 0));
2047 radeon_emit(cs, state->index_type);
2048 }
2049
2050 state->last_index_type = state->index_type;
2051 }
2052
2053 radeon_emit(cs, PKT3(PKT3_INDEX_BASE, 1, 0));
2054 radeon_emit(cs, state->index_va);
2055 radeon_emit(cs, state->index_va >> 32);
2056
2057 radeon_emit(cs, PKT3(PKT3_INDEX_BUFFER_SIZE, 0, 0));
2058 radeon_emit(cs, state->max_index_count);
2059
2060 cmd_buffer->state.dirty &= ~RADV_CMD_DIRTY_INDEX_BUFFER;
2061 }
2062
2063 void radv_set_db_count_control(struct radv_cmd_buffer *cmd_buffer)
2064 {
2065 bool has_perfect_queries = cmd_buffer->state.perfect_occlusion_queries_enabled;
2066 struct radv_pipeline *pipeline = cmd_buffer->state.pipeline;
2067 uint32_t pa_sc_mode_cntl_1 =
2068 pipeline ? pipeline->graphics.ms.pa_sc_mode_cntl_1 : 0;
2069 uint32_t db_count_control;
2070
2071 if(!cmd_buffer->state.active_occlusion_queries) {
2072 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX7) {
2073 if (G_028A4C_OUT_OF_ORDER_PRIMITIVE_ENABLE(pa_sc_mode_cntl_1) &&
2074 pipeline->graphics.disable_out_of_order_rast_for_occlusion &&
2075 has_perfect_queries) {
2076 /* Re-enable out-of-order rasterization if the
2077 * bound pipeline supports it and if it's has
2078 * been disabled before starting any perfect
2079 * occlusion queries.
2080 */
2081 radeon_set_context_reg(cmd_buffer->cs,
2082 R_028A4C_PA_SC_MODE_CNTL_1,
2083 pa_sc_mode_cntl_1);
2084 }
2085 }
2086 db_count_control = S_028004_ZPASS_INCREMENT_DISABLE(1);
2087 } else {
2088 const struct radv_subpass *subpass = cmd_buffer->state.subpass;
2089 uint32_t sample_rate = subpass ? util_logbase2(subpass->max_sample_count) : 0;
2090 bool gfx10_perfect = cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10 && has_perfect_queries;
2091
2092 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX7) {
2093 db_count_control =
2094 S_028004_PERFECT_ZPASS_COUNTS(has_perfect_queries) |
2095 S_028004_DISABLE_CONSERVATIVE_ZPASS_COUNTS(gfx10_perfect) |
2096 S_028004_SAMPLE_RATE(sample_rate) |
2097 S_028004_ZPASS_ENABLE(1) |
2098 S_028004_SLICE_EVEN_ENABLE(1) |
2099 S_028004_SLICE_ODD_ENABLE(1);
2100
2101 if (G_028A4C_OUT_OF_ORDER_PRIMITIVE_ENABLE(pa_sc_mode_cntl_1) &&
2102 pipeline->graphics.disable_out_of_order_rast_for_occlusion &&
2103 has_perfect_queries) {
2104 /* If the bound pipeline has enabled
2105 * out-of-order rasterization, we should
2106 * disable it before starting any perfect
2107 * occlusion queries.
2108 */
2109 pa_sc_mode_cntl_1 &= C_028A4C_OUT_OF_ORDER_PRIMITIVE_ENABLE;
2110
2111 radeon_set_context_reg(cmd_buffer->cs,
2112 R_028A4C_PA_SC_MODE_CNTL_1,
2113 pa_sc_mode_cntl_1);
2114 }
2115 } else {
2116 db_count_control = S_028004_PERFECT_ZPASS_COUNTS(1) |
2117 S_028004_SAMPLE_RATE(sample_rate);
2118 }
2119 }
2120
2121 radeon_set_context_reg(cmd_buffer->cs, R_028004_DB_COUNT_CONTROL, db_count_control);
2122
2123 cmd_buffer->state.context_roll_without_scissor_emitted = true;
2124 }
2125
2126 static void
2127 radv_cmd_buffer_flush_dynamic_state(struct radv_cmd_buffer *cmd_buffer)
2128 {
2129 uint32_t states = cmd_buffer->state.dirty & cmd_buffer->state.emitted_pipeline->graphics.needed_dynamic_state;
2130
2131 if (states & (RADV_CMD_DIRTY_DYNAMIC_VIEWPORT))
2132 radv_emit_viewport(cmd_buffer);
2133
2134 if (states & (RADV_CMD_DIRTY_DYNAMIC_SCISSOR | RADV_CMD_DIRTY_DYNAMIC_VIEWPORT) &&
2135 !cmd_buffer->device->physical_device->rad_info.has_gfx9_scissor_bug)
2136 radv_emit_scissor(cmd_buffer);
2137
2138 if (states & RADV_CMD_DIRTY_DYNAMIC_LINE_WIDTH)
2139 radv_emit_line_width(cmd_buffer);
2140
2141 if (states & RADV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS)
2142 radv_emit_blend_constants(cmd_buffer);
2143
2144 if (states & (RADV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE |
2145 RADV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK |
2146 RADV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK))
2147 radv_emit_stencil(cmd_buffer);
2148
2149 if (states & RADV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS)
2150 radv_emit_depth_bounds(cmd_buffer);
2151
2152 if (states & RADV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS)
2153 radv_emit_depth_bias(cmd_buffer);
2154
2155 if (states & RADV_CMD_DIRTY_DYNAMIC_DISCARD_RECTANGLE)
2156 radv_emit_discard_rectangle(cmd_buffer);
2157
2158 if (states & RADV_CMD_DIRTY_DYNAMIC_SAMPLE_LOCATIONS)
2159 radv_emit_sample_locations(cmd_buffer);
2160
2161 cmd_buffer->state.dirty &= ~states;
2162 }
2163
2164 static void
2165 radv_flush_push_descriptors(struct radv_cmd_buffer *cmd_buffer,
2166 VkPipelineBindPoint bind_point)
2167 {
2168 struct radv_descriptor_state *descriptors_state =
2169 radv_get_descriptors_state(cmd_buffer, bind_point);
2170 struct radv_descriptor_set *set = &descriptors_state->push_set.set;
2171 unsigned bo_offset;
2172
2173 if (!radv_cmd_buffer_upload_data(cmd_buffer, set->size, 32,
2174 set->mapped_ptr,
2175 &bo_offset))
2176 return;
2177
2178 set->va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
2179 set->va += bo_offset;
2180 }
2181
2182 static void
2183 radv_flush_indirect_descriptor_sets(struct radv_cmd_buffer *cmd_buffer,
2184 VkPipelineBindPoint bind_point)
2185 {
2186 struct radv_descriptor_state *descriptors_state =
2187 radv_get_descriptors_state(cmd_buffer, bind_point);
2188 uint32_t size = MAX_SETS * 4;
2189 uint32_t offset;
2190 void *ptr;
2191
2192 if (!radv_cmd_buffer_upload_alloc(cmd_buffer, size,
2193 256, &offset, &ptr))
2194 return;
2195
2196 for (unsigned i = 0; i < MAX_SETS; i++) {
2197 uint32_t *uptr = ((uint32_t *)ptr) + i;
2198 uint64_t set_va = 0;
2199 struct radv_descriptor_set *set = descriptors_state->sets[i];
2200 if (descriptors_state->valid & (1u << i))
2201 set_va = set->va;
2202 uptr[0] = set_va & 0xffffffff;
2203 }
2204
2205 uint64_t va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
2206 va += offset;
2207
2208 if (cmd_buffer->state.pipeline) {
2209 if (cmd_buffer->state.pipeline->shaders[MESA_SHADER_VERTEX])
2210 radv_emit_userdata_address(cmd_buffer, cmd_buffer->state.pipeline, MESA_SHADER_VERTEX,
2211 AC_UD_INDIRECT_DESCRIPTOR_SETS, va);
2212
2213 if (cmd_buffer->state.pipeline->shaders[MESA_SHADER_FRAGMENT])
2214 radv_emit_userdata_address(cmd_buffer, cmd_buffer->state.pipeline, MESA_SHADER_FRAGMENT,
2215 AC_UD_INDIRECT_DESCRIPTOR_SETS, va);
2216
2217 if (radv_pipeline_has_gs(cmd_buffer->state.pipeline))
2218 radv_emit_userdata_address(cmd_buffer, cmd_buffer->state.pipeline, MESA_SHADER_GEOMETRY,
2219 AC_UD_INDIRECT_DESCRIPTOR_SETS, va);
2220
2221 if (radv_pipeline_has_tess(cmd_buffer->state.pipeline))
2222 radv_emit_userdata_address(cmd_buffer, cmd_buffer->state.pipeline, MESA_SHADER_TESS_CTRL,
2223 AC_UD_INDIRECT_DESCRIPTOR_SETS, va);
2224
2225 if (radv_pipeline_has_tess(cmd_buffer->state.pipeline))
2226 radv_emit_userdata_address(cmd_buffer, cmd_buffer->state.pipeline, MESA_SHADER_TESS_EVAL,
2227 AC_UD_INDIRECT_DESCRIPTOR_SETS, va);
2228 }
2229
2230 if (cmd_buffer->state.compute_pipeline)
2231 radv_emit_userdata_address(cmd_buffer, cmd_buffer->state.compute_pipeline, MESA_SHADER_COMPUTE,
2232 AC_UD_INDIRECT_DESCRIPTOR_SETS, va);
2233 }
2234
2235 static void
2236 radv_flush_descriptors(struct radv_cmd_buffer *cmd_buffer,
2237 VkShaderStageFlags stages)
2238 {
2239 VkPipelineBindPoint bind_point = stages & VK_SHADER_STAGE_COMPUTE_BIT ?
2240 VK_PIPELINE_BIND_POINT_COMPUTE :
2241 VK_PIPELINE_BIND_POINT_GRAPHICS;
2242 struct radv_descriptor_state *descriptors_state =
2243 radv_get_descriptors_state(cmd_buffer, bind_point);
2244 struct radv_cmd_state *state = &cmd_buffer->state;
2245 bool flush_indirect_descriptors;
2246
2247 if (!descriptors_state->dirty)
2248 return;
2249
2250 if (descriptors_state->push_dirty)
2251 radv_flush_push_descriptors(cmd_buffer, bind_point);
2252
2253 flush_indirect_descriptors =
2254 (bind_point == VK_PIPELINE_BIND_POINT_GRAPHICS &&
2255 state->pipeline && state->pipeline->need_indirect_descriptor_sets) ||
2256 (bind_point == VK_PIPELINE_BIND_POINT_COMPUTE &&
2257 state->compute_pipeline && state->compute_pipeline->need_indirect_descriptor_sets);
2258
2259 if (flush_indirect_descriptors)
2260 radv_flush_indirect_descriptor_sets(cmd_buffer, bind_point);
2261
2262 ASSERTED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
2263 cmd_buffer->cs,
2264 MAX_SETS * MESA_SHADER_STAGES * 4);
2265
2266 if (cmd_buffer->state.pipeline) {
2267 radv_foreach_stage(stage, stages) {
2268 if (!cmd_buffer->state.pipeline->shaders[stage])
2269 continue;
2270
2271 radv_emit_descriptor_pointers(cmd_buffer,
2272 cmd_buffer->state.pipeline,
2273 descriptors_state, stage);
2274 }
2275 }
2276
2277 if (cmd_buffer->state.compute_pipeline &&
2278 (stages & VK_SHADER_STAGE_COMPUTE_BIT)) {
2279 radv_emit_descriptor_pointers(cmd_buffer,
2280 cmd_buffer->state.compute_pipeline,
2281 descriptors_state,
2282 MESA_SHADER_COMPUTE);
2283 }
2284
2285 descriptors_state->dirty = 0;
2286 descriptors_state->push_dirty = false;
2287
2288 assert(cmd_buffer->cs->cdw <= cdw_max);
2289
2290 if (unlikely(cmd_buffer->device->trace_bo))
2291 radv_save_descriptors(cmd_buffer, bind_point);
2292 }
2293
2294 static void
2295 radv_flush_constants(struct radv_cmd_buffer *cmd_buffer,
2296 VkShaderStageFlags stages)
2297 {
2298 struct radv_pipeline *pipeline = stages & VK_SHADER_STAGE_COMPUTE_BIT
2299 ? cmd_buffer->state.compute_pipeline
2300 : cmd_buffer->state.pipeline;
2301 VkPipelineBindPoint bind_point = stages & VK_SHADER_STAGE_COMPUTE_BIT ?
2302 VK_PIPELINE_BIND_POINT_COMPUTE :
2303 VK_PIPELINE_BIND_POINT_GRAPHICS;
2304 struct radv_descriptor_state *descriptors_state =
2305 radv_get_descriptors_state(cmd_buffer, bind_point);
2306 struct radv_pipeline_layout *layout = pipeline->layout;
2307 struct radv_shader_variant *shader, *prev_shader;
2308 bool need_push_constants = false;
2309 unsigned offset;
2310 void *ptr;
2311 uint64_t va;
2312
2313 stages &= cmd_buffer->push_constant_stages;
2314 if (!stages ||
2315 (!layout->push_constant_size && !layout->dynamic_offset_count))
2316 return;
2317
2318 radv_foreach_stage(stage, stages) {
2319 shader = radv_get_shader(pipeline, stage);
2320 if (!shader)
2321 continue;
2322
2323 need_push_constants |= shader->info.loads_push_constants;
2324 need_push_constants |= shader->info.loads_dynamic_offsets;
2325
2326 uint8_t base = shader->info.base_inline_push_consts;
2327 uint8_t count = shader->info.num_inline_push_consts;
2328
2329 radv_emit_inline_push_consts(cmd_buffer, pipeline, stage,
2330 AC_UD_INLINE_PUSH_CONSTANTS,
2331 count,
2332 (uint32_t *)&cmd_buffer->push_constants[base * 4]);
2333 }
2334
2335 if (need_push_constants) {
2336 if (!radv_cmd_buffer_upload_alloc(cmd_buffer, layout->push_constant_size +
2337 16 * layout->dynamic_offset_count,
2338 256, &offset, &ptr))
2339 return;
2340
2341 memcpy(ptr, cmd_buffer->push_constants, layout->push_constant_size);
2342 memcpy((char*)ptr + layout->push_constant_size,
2343 descriptors_state->dynamic_buffers,
2344 16 * layout->dynamic_offset_count);
2345
2346 va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
2347 va += offset;
2348
2349 ASSERTED unsigned cdw_max =
2350 radeon_check_space(cmd_buffer->device->ws,
2351 cmd_buffer->cs, MESA_SHADER_STAGES * 4);
2352
2353 prev_shader = NULL;
2354 radv_foreach_stage(stage, stages) {
2355 shader = radv_get_shader(pipeline, stage);
2356
2357 /* Avoid redundantly emitting the address for merged stages. */
2358 if (shader && shader != prev_shader) {
2359 radv_emit_userdata_address(cmd_buffer, pipeline, stage,
2360 AC_UD_PUSH_CONSTANTS, va);
2361
2362 prev_shader = shader;
2363 }
2364 }
2365 assert(cmd_buffer->cs->cdw <= cdw_max);
2366 }
2367
2368 cmd_buffer->push_constant_stages &= ~stages;
2369 }
2370
2371 static void
2372 radv_flush_vertex_descriptors(struct radv_cmd_buffer *cmd_buffer,
2373 bool pipeline_is_dirty)
2374 {
2375 if ((pipeline_is_dirty ||
2376 (cmd_buffer->state.dirty & RADV_CMD_DIRTY_VERTEX_BUFFER)) &&
2377 cmd_buffer->state.pipeline->num_vertex_bindings &&
2378 radv_get_shader(cmd_buffer->state.pipeline, MESA_SHADER_VERTEX)->info.vs.has_vertex_buffers) {
2379 unsigned vb_offset;
2380 void *vb_ptr;
2381 uint32_t i = 0;
2382 uint32_t count = cmd_buffer->state.pipeline->num_vertex_bindings;
2383 uint64_t va;
2384
2385 /* allocate some descriptor state for vertex buffers */
2386 if (!radv_cmd_buffer_upload_alloc(cmd_buffer, count * 16, 256,
2387 &vb_offset, &vb_ptr))
2388 return;
2389
2390 for (i = 0; i < count; i++) {
2391 uint32_t *desc = &((uint32_t *)vb_ptr)[i * 4];
2392 uint32_t offset;
2393 struct radv_buffer *buffer = cmd_buffer->vertex_bindings[i].buffer;
2394 uint32_t stride = cmd_buffer->state.pipeline->binding_stride[i];
2395 unsigned num_records;
2396
2397 if (!buffer)
2398 continue;
2399
2400 va = radv_buffer_get_va(buffer->bo);
2401
2402 offset = cmd_buffer->vertex_bindings[i].offset;
2403 va += offset + buffer->offset;
2404
2405 num_records = buffer->size - offset;
2406 if (cmd_buffer->device->physical_device->rad_info.chip_class != GFX8 && stride)
2407 num_records /= stride;
2408
2409 desc[0] = va;
2410 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) | S_008F04_STRIDE(stride);
2411 desc[2] = num_records;
2412 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
2413 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
2414 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
2415 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);
2416
2417 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10) {
2418 /* OOB_SELECT chooses the out-of-bounds check:
2419 * - 1: index >= NUM_RECORDS (Structured)
2420 * - 3: offset >= NUM_RECORDS (Raw)
2421 */
2422 desc[3] |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_UINT) |
2423 S_008F0C_OOB_SELECT(stride ? 1 : 3) |
2424 S_008F0C_RESOURCE_LEVEL(1);
2425 } else {
2426 desc[3] |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_UINT) |
2427 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
2428 }
2429 }
2430
2431 va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
2432 va += vb_offset;
2433
2434 radv_emit_userdata_address(cmd_buffer, cmd_buffer->state.pipeline, MESA_SHADER_VERTEX,
2435 AC_UD_VS_VERTEX_BUFFERS, va);
2436
2437 cmd_buffer->state.vb_va = va;
2438 cmd_buffer->state.vb_size = count * 16;
2439 cmd_buffer->state.prefetch_L2_mask |= RADV_PREFETCH_VBO_DESCRIPTORS;
2440 }
2441 cmd_buffer->state.dirty &= ~RADV_CMD_DIRTY_VERTEX_BUFFER;
2442 }
2443
2444 static void
2445 radv_emit_streamout_buffers(struct radv_cmd_buffer *cmd_buffer, uint64_t va)
2446 {
2447 struct radv_pipeline *pipeline = cmd_buffer->state.pipeline;
2448 struct radv_userdata_info *loc;
2449 uint32_t base_reg;
2450
2451 for (unsigned stage = 0; stage < MESA_SHADER_STAGES; ++stage) {
2452 if (!radv_get_shader(pipeline, stage))
2453 continue;
2454
2455 loc = radv_lookup_user_sgpr(pipeline, stage,
2456 AC_UD_STREAMOUT_BUFFERS);
2457 if (loc->sgpr_idx == -1)
2458 continue;
2459
2460 base_reg = pipeline->user_data_0[stage];
2461
2462 radv_emit_shader_pointer(cmd_buffer->device, cmd_buffer->cs,
2463 base_reg + loc->sgpr_idx * 4, va, false);
2464 }
2465
2466 if (radv_pipeline_has_gs_copy_shader(pipeline)) {
2467 loc = &pipeline->gs_copy_shader->info.user_sgprs_locs.shader_data[AC_UD_STREAMOUT_BUFFERS];
2468 if (loc->sgpr_idx != -1) {
2469 base_reg = R_00B130_SPI_SHADER_USER_DATA_VS_0;
2470
2471 radv_emit_shader_pointer(cmd_buffer->device, cmd_buffer->cs,
2472 base_reg + loc->sgpr_idx * 4, va, false);
2473 }
2474 }
2475 }
2476
2477 static void
2478 radv_flush_streamout_descriptors(struct radv_cmd_buffer *cmd_buffer)
2479 {
2480 if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_STREAMOUT_BUFFER) {
2481 struct radv_streamout_binding *sb = cmd_buffer->streamout_bindings;
2482 struct radv_streamout_state *so = &cmd_buffer->state.streamout;
2483 unsigned so_offset;
2484 void *so_ptr;
2485 uint64_t va;
2486
2487 /* Allocate some descriptor state for streamout buffers. */
2488 if (!radv_cmd_buffer_upload_alloc(cmd_buffer,
2489 MAX_SO_BUFFERS * 16, 256,
2490 &so_offset, &so_ptr))
2491 return;
2492
2493 for (uint32_t i = 0; i < MAX_SO_BUFFERS; i++) {
2494 struct radv_buffer *buffer = sb[i].buffer;
2495 uint32_t *desc = &((uint32_t *)so_ptr)[i * 4];
2496
2497 if (!(so->enabled_mask & (1 << i)))
2498 continue;
2499
2500 va = radv_buffer_get_va(buffer->bo) + buffer->offset;
2501
2502 va += sb[i].offset;
2503
2504 /* Set the descriptor.
2505 *
2506 * On GFX8, the format must be non-INVALID, otherwise
2507 * the buffer will be considered not bound and store
2508 * instructions will be no-ops.
2509 */
2510 uint32_t size = 0xffffffff;
2511
2512 /* Compute the correct buffer size for NGG streamout
2513 * because it's used to determine the max emit per
2514 * buffer.
2515 */
2516 if (cmd_buffer->device->physical_device->use_ngg_streamout)
2517 size = buffer->size - sb[i].offset;
2518
2519 desc[0] = va;
2520 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32);
2521 desc[2] = size;
2522 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
2523 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
2524 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
2525 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);
2526
2527 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10) {
2528 desc[3] |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_FLOAT) |
2529 S_008F0C_OOB_SELECT(3) |
2530 S_008F0C_RESOURCE_LEVEL(1);
2531 } else {
2532 desc[3] |= S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
2533 }
2534 }
2535
2536 va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
2537 va += so_offset;
2538
2539 radv_emit_streamout_buffers(cmd_buffer, va);
2540 }
2541
2542 cmd_buffer->state.dirty &= ~RADV_CMD_DIRTY_STREAMOUT_BUFFER;
2543 }
2544
2545 static void
2546 radv_upload_graphics_shader_descriptors(struct radv_cmd_buffer *cmd_buffer, bool pipeline_is_dirty)
2547 {
2548 radv_flush_vertex_descriptors(cmd_buffer, pipeline_is_dirty);
2549 radv_flush_streamout_descriptors(cmd_buffer);
2550 radv_flush_descriptors(cmd_buffer, VK_SHADER_STAGE_ALL_GRAPHICS);
2551 radv_flush_constants(cmd_buffer, VK_SHADER_STAGE_ALL_GRAPHICS);
2552 }
2553
2554 struct radv_draw_info {
2555 /**
2556 * Number of vertices.
2557 */
2558 uint32_t count;
2559
2560 /**
2561 * Index of the first vertex.
2562 */
2563 int32_t vertex_offset;
2564
2565 /**
2566 * First instance id.
2567 */
2568 uint32_t first_instance;
2569
2570 /**
2571 * Number of instances.
2572 */
2573 uint32_t instance_count;
2574
2575 /**
2576 * First index (indexed draws only).
2577 */
2578 uint32_t first_index;
2579
2580 /**
2581 * Whether it's an indexed draw.
2582 */
2583 bool indexed;
2584
2585 /**
2586 * Indirect draw parameters resource.
2587 */
2588 struct radv_buffer *indirect;
2589 uint64_t indirect_offset;
2590 uint32_t stride;
2591
2592 /**
2593 * Draw count parameters resource.
2594 */
2595 struct radv_buffer *count_buffer;
2596 uint64_t count_buffer_offset;
2597
2598 /**
2599 * Stream output parameters resource.
2600 */
2601 struct radv_buffer *strmout_buffer;
2602 uint64_t strmout_buffer_offset;
2603 };
2604
2605 static uint32_t
2606 radv_get_primitive_reset_index(struct radv_cmd_buffer *cmd_buffer)
2607 {
2608 switch (cmd_buffer->state.index_type) {
2609 case V_028A7C_VGT_INDEX_8:
2610 return 0xffu;
2611 case V_028A7C_VGT_INDEX_16:
2612 return 0xffffu;
2613 case V_028A7C_VGT_INDEX_32:
2614 return 0xffffffffu;
2615 default:
2616 unreachable("invalid index type");
2617 }
2618 }
2619
2620 static void
2621 si_emit_ia_multi_vgt_param(struct radv_cmd_buffer *cmd_buffer,
2622 bool instanced_draw, bool indirect_draw,
2623 bool count_from_stream_output,
2624 uint32_t draw_vertex_count)
2625 {
2626 struct radeon_info *info = &cmd_buffer->device->physical_device->rad_info;
2627 struct radv_cmd_state *state = &cmd_buffer->state;
2628 struct radeon_cmdbuf *cs = cmd_buffer->cs;
2629 unsigned ia_multi_vgt_param;
2630
2631 ia_multi_vgt_param =
2632 si_get_ia_multi_vgt_param(cmd_buffer, instanced_draw,
2633 indirect_draw,
2634 count_from_stream_output,
2635 draw_vertex_count);
2636
2637 if (state->last_ia_multi_vgt_param != ia_multi_vgt_param) {
2638 if (info->chip_class == GFX9) {
2639 radeon_set_uconfig_reg_idx(cmd_buffer->device->physical_device,
2640 cs,
2641 R_030960_IA_MULTI_VGT_PARAM,
2642 4, ia_multi_vgt_param);
2643 } else if (info->chip_class >= GFX7) {
2644 radeon_set_context_reg_idx(cs,
2645 R_028AA8_IA_MULTI_VGT_PARAM,
2646 1, ia_multi_vgt_param);
2647 } else {
2648 radeon_set_context_reg(cs, R_028AA8_IA_MULTI_VGT_PARAM,
2649 ia_multi_vgt_param);
2650 }
2651 state->last_ia_multi_vgt_param = ia_multi_vgt_param;
2652 }
2653 }
2654
2655 static void
2656 radv_emit_draw_registers(struct radv_cmd_buffer *cmd_buffer,
2657 const struct radv_draw_info *draw_info)
2658 {
2659 struct radeon_info *info = &cmd_buffer->device->physical_device->rad_info;
2660 struct radv_cmd_state *state = &cmd_buffer->state;
2661 struct radeon_cmdbuf *cs = cmd_buffer->cs;
2662 int32_t primitive_reset_en;
2663
2664 /* Draw state. */
2665 if (info->chip_class < GFX10) {
2666 si_emit_ia_multi_vgt_param(cmd_buffer, draw_info->instance_count > 1,
2667 draw_info->indirect,
2668 !!draw_info->strmout_buffer,
2669 draw_info->indirect ? 0 : draw_info->count);
2670 }
2671
2672 /* Primitive restart. */
2673 primitive_reset_en =
2674 draw_info->indexed && state->pipeline->graphics.prim_restart_enable;
2675
2676 if (primitive_reset_en != state->last_primitive_reset_en) {
2677 state->last_primitive_reset_en = primitive_reset_en;
2678 if (info->chip_class >= GFX9) {
2679 radeon_set_uconfig_reg(cs,
2680 R_03092C_VGT_MULTI_PRIM_IB_RESET_EN,
2681 primitive_reset_en);
2682 } else {
2683 radeon_set_context_reg(cs,
2684 R_028A94_VGT_MULTI_PRIM_IB_RESET_EN,
2685 primitive_reset_en);
2686 }
2687 }
2688
2689 if (primitive_reset_en) {
2690 uint32_t primitive_reset_index =
2691 radv_get_primitive_reset_index(cmd_buffer);
2692
2693 if (primitive_reset_index != state->last_primitive_reset_index) {
2694 radeon_set_context_reg(cs,
2695 R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX,
2696 primitive_reset_index);
2697 state->last_primitive_reset_index = primitive_reset_index;
2698 }
2699 }
2700
2701 if (draw_info->strmout_buffer) {
2702 uint64_t va = radv_buffer_get_va(draw_info->strmout_buffer->bo);
2703
2704 va += draw_info->strmout_buffer->offset +
2705 draw_info->strmout_buffer_offset;
2706
2707 radeon_set_context_reg(cs, R_028B30_VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE,
2708 draw_info->stride);
2709
2710 radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
2711 radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_SRC_MEM) |
2712 COPY_DATA_DST_SEL(COPY_DATA_REG) |
2713 COPY_DATA_WR_CONFIRM);
2714 radeon_emit(cs, va);
2715 radeon_emit(cs, va >> 32);
2716 radeon_emit(cs, R_028B2C_VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE >> 2);
2717 radeon_emit(cs, 0); /* unused */
2718
2719 radv_cs_add_buffer(cmd_buffer->device->ws, cs, draw_info->strmout_buffer->bo);
2720 }
2721 }
2722
2723 static void radv_stage_flush(struct radv_cmd_buffer *cmd_buffer,
2724 VkPipelineStageFlags src_stage_mask)
2725 {
2726 if (src_stage_mask & (VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT |
2727 VK_PIPELINE_STAGE_TRANSFER_BIT |
2728 VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT |
2729 VK_PIPELINE_STAGE_ALL_COMMANDS_BIT)) {
2730 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_CS_PARTIAL_FLUSH;
2731 }
2732
2733 if (src_stage_mask & (VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
2734 VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT |
2735 VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT |
2736 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT |
2737 VK_PIPELINE_STAGE_TRANSFER_BIT |
2738 VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT |
2739 VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT |
2740 VK_PIPELINE_STAGE_ALL_COMMANDS_BIT)) {
2741 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_PS_PARTIAL_FLUSH;
2742 } else if (src_stage_mask & (VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT |
2743 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT |
2744 VK_PIPELINE_STAGE_VERTEX_SHADER_BIT |
2745 VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
2746 VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT |
2747 VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
2748 VK_PIPELINE_STAGE_TRANSFORM_FEEDBACK_BIT_EXT)) {
2749 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_VS_PARTIAL_FLUSH;
2750 }
2751 }
2752
2753 static enum radv_cmd_flush_bits
2754 radv_src_access_flush(struct radv_cmd_buffer *cmd_buffer,
2755 VkAccessFlags src_flags,
2756 struct radv_image *image)
2757 {
2758 bool flush_CB_meta = true, flush_DB_meta = true;
2759 enum radv_cmd_flush_bits flush_bits = 0;
2760 uint32_t b;
2761
2762 if (image) {
2763 if (!radv_image_has_CB_metadata(image))
2764 flush_CB_meta = false;
2765 if (!radv_image_has_htile(image))
2766 flush_DB_meta = false;
2767 }
2768
2769 for_each_bit(b, src_flags) {
2770 switch ((VkAccessFlagBits)(1 << b)) {
2771 case VK_ACCESS_SHADER_WRITE_BIT:
2772 case VK_ACCESS_TRANSFORM_FEEDBACK_WRITE_BIT_EXT:
2773 case VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT:
2774 flush_bits |= RADV_CMD_FLAG_WB_L2;
2775 break;
2776 case VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT:
2777 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB;
2778 if (flush_CB_meta)
2779 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
2780 break;
2781 case VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT:
2782 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB;
2783 if (flush_DB_meta)
2784 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
2785 break;
2786 case VK_ACCESS_TRANSFER_WRITE_BIT:
2787 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
2788 RADV_CMD_FLAG_FLUSH_AND_INV_DB |
2789 RADV_CMD_FLAG_INV_L2;
2790
2791 if (flush_CB_meta)
2792 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
2793 if (flush_DB_meta)
2794 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
2795 break;
2796 default:
2797 break;
2798 }
2799 }
2800 return flush_bits;
2801 }
2802
2803 static enum radv_cmd_flush_bits
2804 radv_dst_access_flush(struct radv_cmd_buffer *cmd_buffer,
2805 VkAccessFlags dst_flags,
2806 struct radv_image *image)
2807 {
2808 bool flush_CB_meta = true, flush_DB_meta = true;
2809 enum radv_cmd_flush_bits flush_bits = 0;
2810 bool flush_CB = true, flush_DB = true;
2811 bool image_is_coherent = false;
2812 uint32_t b;
2813
2814 if (image) {
2815 if (!(image->usage & VK_IMAGE_USAGE_STORAGE_BIT)) {
2816 flush_CB = false;
2817 flush_DB = false;
2818 }
2819
2820 if (!radv_image_has_CB_metadata(image))
2821 flush_CB_meta = false;
2822 if (!radv_image_has_htile(image))
2823 flush_DB_meta = false;
2824
2825 /* TODO: implement shader coherent for GFX10 */
2826
2827 if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX9) {
2828 if (image->info.samples == 1 &&
2829 (image->usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
2830 VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) &&
2831 !vk_format_is_stencil(image->vk_format)) {
2832 /* Single-sample color and single-sample depth
2833 * (not stencil) are coherent with shaders on
2834 * GFX9.
2835 */
2836 image_is_coherent = true;
2837 }
2838 }
2839 }
2840
2841 for_each_bit(b, dst_flags) {
2842 switch ((VkAccessFlagBits)(1 << b)) {
2843 case VK_ACCESS_INDIRECT_COMMAND_READ_BIT:
2844 case VK_ACCESS_INDEX_READ_BIT:
2845 case VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT:
2846 break;
2847 case VK_ACCESS_UNIFORM_READ_BIT:
2848 flush_bits |= RADV_CMD_FLAG_INV_VCACHE | RADV_CMD_FLAG_INV_SCACHE;
2849 break;
2850 case VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT:
2851 case VK_ACCESS_TRANSFER_READ_BIT:
2852 case VK_ACCESS_INPUT_ATTACHMENT_READ_BIT:
2853 flush_bits |= RADV_CMD_FLAG_INV_VCACHE |
2854 RADV_CMD_FLAG_INV_L2;
2855 break;
2856 case VK_ACCESS_SHADER_READ_BIT:
2857 flush_bits |= RADV_CMD_FLAG_INV_VCACHE;
2858 /* Unlike LLVM, ACO uses SMEM for SSBOs and we have to
2859 * invalidate the scalar cache. */
2860 if (cmd_buffer->device->physical_device->use_aco &&
2861 cmd_buffer->device->physical_device->rad_info.chip_class >= GFX8)
2862 flush_bits |= RADV_CMD_FLAG_INV_SCACHE;
2863
2864 if (!image_is_coherent)
2865 flush_bits |= RADV_CMD_FLAG_INV_L2;
2866 break;
2867 case VK_ACCESS_COLOR_ATTACHMENT_READ_BIT:
2868 if (flush_CB)
2869 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB;
2870 if (flush_CB_meta)
2871 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
2872 break;
2873 case VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT:
2874 if (flush_DB)
2875 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB;
2876 if (flush_DB_meta)
2877 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
2878 break;
2879 default:
2880 break;
2881 }
2882 }
2883 return flush_bits;
2884 }
2885
2886 void radv_subpass_barrier(struct radv_cmd_buffer *cmd_buffer,
2887 const struct radv_subpass_barrier *barrier)
2888 {
2889 cmd_buffer->state.flush_bits |= radv_src_access_flush(cmd_buffer, barrier->src_access_mask,
2890 NULL);
2891 radv_stage_flush(cmd_buffer, barrier->src_stage_mask);
2892 cmd_buffer->state.flush_bits |= radv_dst_access_flush(cmd_buffer, barrier->dst_access_mask,
2893 NULL);
2894 }
2895
2896 uint32_t
2897 radv_get_subpass_id(struct radv_cmd_buffer *cmd_buffer)
2898 {
2899 struct radv_cmd_state *state = &cmd_buffer->state;
2900 uint32_t subpass_id = state->subpass - state->pass->subpasses;
2901
2902 /* The id of this subpass shouldn't exceed the number of subpasses in
2903 * this render pass minus 1.
2904 */
2905 assert(subpass_id < state->pass->subpass_count);
2906 return subpass_id;
2907 }
2908
2909 static struct radv_sample_locations_state *
2910 radv_get_attachment_sample_locations(struct radv_cmd_buffer *cmd_buffer,
2911 uint32_t att_idx,
2912 bool begin_subpass)
2913 {
2914 struct radv_cmd_state *state = &cmd_buffer->state;
2915 uint32_t subpass_id = radv_get_subpass_id(cmd_buffer);
2916 struct radv_image_view *view = state->attachments[att_idx].iview;
2917
2918 if (view->image->info.samples == 1)
2919 return NULL;
2920
2921 if (state->pass->attachments[att_idx].first_subpass_idx == subpass_id) {
2922 /* Return the initial sample locations if this is the initial
2923 * layout transition of the given subpass attachemnt.
2924 */
2925 if (state->attachments[att_idx].sample_location.count > 0)
2926 return &state->attachments[att_idx].sample_location;
2927 } else {
2928 /* Otherwise return the subpass sample locations if defined. */
2929 if (state->subpass_sample_locs) {
2930 /* Because the driver sets the current subpass before
2931 * initial layout transitions, we should use the sample
2932 * locations from the previous subpass to avoid an
2933 * off-by-one problem. Otherwise, use the sample
2934 * locations for the current subpass for final layout
2935 * transitions.
2936 */
2937 if (begin_subpass)
2938 subpass_id--;
2939
2940 for (uint32_t i = 0; i < state->num_subpass_sample_locs; i++) {
2941 if (state->subpass_sample_locs[i].subpass_idx == subpass_id)
2942 return &state->subpass_sample_locs[i].sample_location;
2943 }
2944 }
2945 }
2946
2947 return NULL;
2948 }
2949
2950 static void radv_handle_subpass_image_transition(struct radv_cmd_buffer *cmd_buffer,
2951 struct radv_subpass_attachment att,
2952 bool begin_subpass)
2953 {
2954 unsigned idx = att.attachment;
2955 struct radv_image_view *view = cmd_buffer->state.attachments[idx].iview;
2956 struct radv_sample_locations_state *sample_locs;
2957 VkImageSubresourceRange range;
2958 range.aspectMask = view->aspect_mask;
2959 range.baseMipLevel = view->base_mip;
2960 range.levelCount = 1;
2961 range.baseArrayLayer = view->base_layer;
2962 range.layerCount = cmd_buffer->state.framebuffer->layers;
2963
2964 if (cmd_buffer->state.subpass->view_mask) {
2965 /* If the current subpass uses multiview, the driver might have
2966 * performed a fast color/depth clear to the whole image
2967 * (including all layers). To make sure the driver will
2968 * decompress the image correctly (if needed), we have to
2969 * account for the "real" number of layers. If the view mask is
2970 * sparse, this will decompress more layers than needed.
2971 */
2972 range.layerCount = util_last_bit(cmd_buffer->state.subpass->view_mask);
2973 }
2974
2975 /* Get the subpass sample locations for the given attachment, if NULL
2976 * is returned the driver will use the default HW locations.
2977 */
2978 sample_locs = radv_get_attachment_sample_locations(cmd_buffer, idx,
2979 begin_subpass);
2980
2981 /* Determine if the subpass uses separate depth/stencil layouts. */
2982 bool uses_separate_depth_stencil_layouts = false;
2983 if ((cmd_buffer->state.attachments[idx].current_layout !=
2984 cmd_buffer->state.attachments[idx].current_stencil_layout) ||
2985 (att.layout != att.stencil_layout)) {
2986 uses_separate_depth_stencil_layouts = true;
2987 }
2988
2989 /* For separate layouts, perform depth and stencil transitions
2990 * separately.
2991 */
2992 if (uses_separate_depth_stencil_layouts &&
2993 (range.aspectMask == (VK_IMAGE_ASPECT_DEPTH_BIT |
2994 VK_IMAGE_ASPECT_STENCIL_BIT))) {
2995 /* Depth-only transitions. */
2996 range.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
2997 radv_handle_image_transition(cmd_buffer,
2998 view->image,
2999 cmd_buffer->state.attachments[idx].current_layout,
3000 cmd_buffer->state.attachments[idx].current_in_render_loop,
3001 att.layout, att.in_render_loop,
3002 0, 0, &range, sample_locs);
3003
3004 /* Stencil-only transitions. */
3005 range.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
3006 radv_handle_image_transition(cmd_buffer,
3007 view->image,
3008 cmd_buffer->state.attachments[idx].current_stencil_layout,
3009 cmd_buffer->state.attachments[idx].current_in_render_loop,
3010 att.stencil_layout, att.in_render_loop,
3011 0, 0, &range, sample_locs);
3012 } else {
3013 radv_handle_image_transition(cmd_buffer,
3014 view->image,
3015 cmd_buffer->state.attachments[idx].current_layout,
3016 cmd_buffer->state.attachments[idx].current_in_render_loop,
3017 att.layout, att.in_render_loop,
3018 0, 0, &range, sample_locs);
3019 }
3020
3021 cmd_buffer->state.attachments[idx].current_layout = att.layout;
3022 cmd_buffer->state.attachments[idx].current_stencil_layout = att.stencil_layout;
3023 cmd_buffer->state.attachments[idx].current_in_render_loop = att.in_render_loop;
3024
3025
3026 }
3027
3028 void
3029 radv_cmd_buffer_set_subpass(struct radv_cmd_buffer *cmd_buffer,
3030 const struct radv_subpass *subpass)
3031 {
3032 cmd_buffer->state.subpass = subpass;
3033
3034 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_FRAMEBUFFER;
3035 }
3036
3037 static VkResult
3038 radv_cmd_state_setup_sample_locations(struct radv_cmd_buffer *cmd_buffer,
3039 struct radv_render_pass *pass,
3040 const VkRenderPassBeginInfo *info)
3041 {
3042 const struct VkRenderPassSampleLocationsBeginInfoEXT *sample_locs =
3043 vk_find_struct_const(info->pNext,
3044 RENDER_PASS_SAMPLE_LOCATIONS_BEGIN_INFO_EXT);
3045 struct radv_cmd_state *state = &cmd_buffer->state;
3046
3047 if (!sample_locs) {
3048 state->subpass_sample_locs = NULL;
3049 return VK_SUCCESS;
3050 }
3051
3052 for (uint32_t i = 0; i < sample_locs->attachmentInitialSampleLocationsCount; i++) {
3053 const VkAttachmentSampleLocationsEXT *att_sample_locs =
3054 &sample_locs->pAttachmentInitialSampleLocations[i];
3055 uint32_t att_idx = att_sample_locs->attachmentIndex;
3056 struct radv_image *image = cmd_buffer->state.attachments[att_idx].iview->image;
3057
3058 assert(vk_format_is_depth_or_stencil(image->vk_format));
3059
3060 /* From the Vulkan spec 1.1.108:
3061 *
3062 * "If the image referenced by the framebuffer attachment at
3063 * index attachmentIndex was not created with
3064 * VK_IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT
3065 * then the values specified in sampleLocationsInfo are
3066 * ignored."
3067 */
3068 if (!(image->flags & VK_IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT))
3069 continue;
3070
3071 const VkSampleLocationsInfoEXT *sample_locs_info =
3072 &att_sample_locs->sampleLocationsInfo;
3073
3074 state->attachments[att_idx].sample_location.per_pixel =
3075 sample_locs_info->sampleLocationsPerPixel;
3076 state->attachments[att_idx].sample_location.grid_size =
3077 sample_locs_info->sampleLocationGridSize;
3078 state->attachments[att_idx].sample_location.count =
3079 sample_locs_info->sampleLocationsCount;
3080 typed_memcpy(&state->attachments[att_idx].sample_location.locations[0],
3081 sample_locs_info->pSampleLocations,
3082 sample_locs_info->sampleLocationsCount);
3083 }
3084
3085 state->subpass_sample_locs = vk_alloc(&cmd_buffer->pool->alloc,
3086 sample_locs->postSubpassSampleLocationsCount *
3087 sizeof(state->subpass_sample_locs[0]),
3088 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3089 if (state->subpass_sample_locs == NULL) {
3090 cmd_buffer->record_result = VK_ERROR_OUT_OF_HOST_MEMORY;
3091 return cmd_buffer->record_result;
3092 }
3093
3094 state->num_subpass_sample_locs = sample_locs->postSubpassSampleLocationsCount;
3095
3096 for (uint32_t i = 0; i < sample_locs->postSubpassSampleLocationsCount; i++) {
3097 const VkSubpassSampleLocationsEXT *subpass_sample_locs_info =
3098 &sample_locs->pPostSubpassSampleLocations[i];
3099 const VkSampleLocationsInfoEXT *sample_locs_info =
3100 &subpass_sample_locs_info->sampleLocationsInfo;
3101
3102 state->subpass_sample_locs[i].subpass_idx =
3103 subpass_sample_locs_info->subpassIndex;
3104 state->subpass_sample_locs[i].sample_location.per_pixel =
3105 sample_locs_info->sampleLocationsPerPixel;
3106 state->subpass_sample_locs[i].sample_location.grid_size =
3107 sample_locs_info->sampleLocationGridSize;
3108 state->subpass_sample_locs[i].sample_location.count =
3109 sample_locs_info->sampleLocationsCount;
3110 typed_memcpy(&state->subpass_sample_locs[i].sample_location.locations[0],
3111 sample_locs_info->pSampleLocations,
3112 sample_locs_info->sampleLocationsCount);
3113 }
3114
3115 return VK_SUCCESS;
3116 }
3117
3118 static VkResult
3119 radv_cmd_state_setup_attachments(struct radv_cmd_buffer *cmd_buffer,
3120 struct radv_render_pass *pass,
3121 const VkRenderPassBeginInfo *info)
3122 {
3123 struct radv_cmd_state *state = &cmd_buffer->state;
3124 const struct VkRenderPassAttachmentBeginInfoKHR *attachment_info = NULL;
3125
3126 if (info) {
3127 attachment_info = vk_find_struct_const(info->pNext,
3128 RENDER_PASS_ATTACHMENT_BEGIN_INFO_KHR);
3129 }
3130
3131
3132 if (pass->attachment_count == 0) {
3133 state->attachments = NULL;
3134 return VK_SUCCESS;
3135 }
3136
3137 state->attachments = vk_alloc(&cmd_buffer->pool->alloc,
3138 pass->attachment_count *
3139 sizeof(state->attachments[0]),
3140 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3141 if (state->attachments == NULL) {
3142 cmd_buffer->record_result = VK_ERROR_OUT_OF_HOST_MEMORY;
3143 return cmd_buffer->record_result;
3144 }
3145
3146 for (uint32_t i = 0; i < pass->attachment_count; ++i) {
3147 struct radv_render_pass_attachment *att = &pass->attachments[i];
3148 VkImageAspectFlags att_aspects = vk_format_aspects(att->format);
3149 VkImageAspectFlags clear_aspects = 0;
3150
3151 if (att_aspects == VK_IMAGE_ASPECT_COLOR_BIT) {
3152 /* color attachment */
3153 if (att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
3154 clear_aspects |= VK_IMAGE_ASPECT_COLOR_BIT;
3155 }
3156 } else {
3157 /* depthstencil attachment */
3158 if ((att_aspects & VK_IMAGE_ASPECT_DEPTH_BIT) &&
3159 att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
3160 clear_aspects |= VK_IMAGE_ASPECT_DEPTH_BIT;
3161 if ((att_aspects & VK_IMAGE_ASPECT_STENCIL_BIT) &&
3162 att->stencil_load_op == VK_ATTACHMENT_LOAD_OP_DONT_CARE)
3163 clear_aspects |= VK_IMAGE_ASPECT_STENCIL_BIT;
3164 }
3165 if ((att_aspects & VK_IMAGE_ASPECT_STENCIL_BIT) &&
3166 att->stencil_load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
3167 clear_aspects |= VK_IMAGE_ASPECT_STENCIL_BIT;
3168 }
3169 }
3170
3171 state->attachments[i].pending_clear_aspects = clear_aspects;
3172 state->attachments[i].cleared_views = 0;
3173 if (clear_aspects && info) {
3174 assert(info->clearValueCount > i);
3175 state->attachments[i].clear_value = info->pClearValues[i];
3176 }
3177
3178 state->attachments[i].current_layout = att->initial_layout;
3179 state->attachments[i].current_stencil_layout = att->stencil_initial_layout;
3180 state->attachments[i].sample_location.count = 0;
3181
3182 struct radv_image_view *iview;
3183 if (attachment_info && attachment_info->attachmentCount > i) {
3184 iview = radv_image_view_from_handle(attachment_info->pAttachments[i]);
3185 } else {
3186 iview = state->framebuffer->attachments[i];
3187 }
3188
3189 state->attachments[i].iview = iview;
3190 if (iview->aspect_mask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
3191 radv_initialise_ds_surface(cmd_buffer->device, &state->attachments[i].ds, iview);
3192 } else {
3193 radv_initialise_color_surface(cmd_buffer->device, &state->attachments[i].cb, iview);
3194 }
3195 }
3196
3197 return VK_SUCCESS;
3198 }
3199
3200 VkResult radv_AllocateCommandBuffers(
3201 VkDevice _device,
3202 const VkCommandBufferAllocateInfo *pAllocateInfo,
3203 VkCommandBuffer *pCommandBuffers)
3204 {
3205 RADV_FROM_HANDLE(radv_device, device, _device);
3206 RADV_FROM_HANDLE(radv_cmd_pool, pool, pAllocateInfo->commandPool);
3207
3208 VkResult result = VK_SUCCESS;
3209 uint32_t i;
3210
3211 for (i = 0; i < pAllocateInfo->commandBufferCount; i++) {
3212
3213 if (!list_is_empty(&pool->free_cmd_buffers)) {
3214 struct radv_cmd_buffer *cmd_buffer = list_first_entry(&pool->free_cmd_buffers, struct radv_cmd_buffer, pool_link);
3215
3216 list_del(&cmd_buffer->pool_link);
3217 list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
3218
3219 result = radv_reset_cmd_buffer(cmd_buffer);
3220 cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
3221 cmd_buffer->level = pAllocateInfo->level;
3222
3223 pCommandBuffers[i] = radv_cmd_buffer_to_handle(cmd_buffer);
3224 } else {
3225 result = radv_create_cmd_buffer(device, pool, pAllocateInfo->level,
3226 &pCommandBuffers[i]);
3227 }
3228 if (result != VK_SUCCESS)
3229 break;
3230 }
3231
3232 if (result != VK_SUCCESS) {
3233 radv_FreeCommandBuffers(_device, pAllocateInfo->commandPool,
3234 i, pCommandBuffers);
3235
3236 /* From the Vulkan 1.0.66 spec:
3237 *
3238 * "vkAllocateCommandBuffers can be used to create multiple
3239 * command buffers. If the creation of any of those command
3240 * buffers fails, the implementation must destroy all
3241 * successfully created command buffer objects from this
3242 * command, set all entries of the pCommandBuffers array to
3243 * NULL and return the error."
3244 */
3245 memset(pCommandBuffers, 0,
3246 sizeof(*pCommandBuffers) * pAllocateInfo->commandBufferCount);
3247 }
3248
3249 return result;
3250 }
3251
3252 void radv_FreeCommandBuffers(
3253 VkDevice device,
3254 VkCommandPool commandPool,
3255 uint32_t commandBufferCount,
3256 const VkCommandBuffer *pCommandBuffers)
3257 {
3258 for (uint32_t i = 0; i < commandBufferCount; i++) {
3259 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, pCommandBuffers[i]);
3260
3261 if (cmd_buffer) {
3262 if (cmd_buffer->pool) {
3263 list_del(&cmd_buffer->pool_link);
3264 list_addtail(&cmd_buffer->pool_link, &cmd_buffer->pool->free_cmd_buffers);
3265 } else
3266 radv_cmd_buffer_destroy(cmd_buffer);
3267
3268 }
3269 }
3270 }
3271
3272 VkResult radv_ResetCommandBuffer(
3273 VkCommandBuffer commandBuffer,
3274 VkCommandBufferResetFlags flags)
3275 {
3276 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3277 return radv_reset_cmd_buffer(cmd_buffer);
3278 }
3279
3280 VkResult radv_BeginCommandBuffer(
3281 VkCommandBuffer commandBuffer,
3282 const VkCommandBufferBeginInfo *pBeginInfo)
3283 {
3284 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3285 VkResult result = VK_SUCCESS;
3286
3287 if (cmd_buffer->status != RADV_CMD_BUFFER_STATUS_INITIAL) {
3288 /* If the command buffer has already been resetted with
3289 * vkResetCommandBuffer, no need to do it again.
3290 */
3291 result = radv_reset_cmd_buffer(cmd_buffer);
3292 if (result != VK_SUCCESS)
3293 return result;
3294 }
3295
3296 memset(&cmd_buffer->state, 0, sizeof(cmd_buffer->state));
3297 cmd_buffer->state.last_primitive_reset_en = -1;
3298 cmd_buffer->state.last_index_type = -1;
3299 cmd_buffer->state.last_num_instances = -1;
3300 cmd_buffer->state.last_vertex_offset = -1;
3301 cmd_buffer->state.last_first_instance = -1;
3302 cmd_buffer->state.predication_type = -1;
3303 cmd_buffer->usage_flags = pBeginInfo->flags;
3304
3305 if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY &&
3306 (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
3307 assert(pBeginInfo->pInheritanceInfo);
3308 cmd_buffer->state.framebuffer = radv_framebuffer_from_handle(pBeginInfo->pInheritanceInfo->framebuffer);
3309 cmd_buffer->state.pass = radv_render_pass_from_handle(pBeginInfo->pInheritanceInfo->renderPass);
3310
3311 struct radv_subpass *subpass =
3312 &cmd_buffer->state.pass->subpasses[pBeginInfo->pInheritanceInfo->subpass];
3313
3314 if (cmd_buffer->state.framebuffer) {
3315 result = radv_cmd_state_setup_attachments(cmd_buffer, cmd_buffer->state.pass, NULL);
3316 if (result != VK_SUCCESS)
3317 return result;
3318 }
3319
3320 radv_cmd_buffer_set_subpass(cmd_buffer, subpass);
3321 }
3322
3323 if (unlikely(cmd_buffer->device->trace_bo)) {
3324 struct radv_device *device = cmd_buffer->device;
3325
3326 radv_cs_add_buffer(device->ws, cmd_buffer->cs,
3327 device->trace_bo);
3328
3329 radv_cmd_buffer_trace_emit(cmd_buffer);
3330 }
3331
3332 cmd_buffer->status = RADV_CMD_BUFFER_STATUS_RECORDING;
3333
3334 return result;
3335 }
3336
3337 void radv_CmdBindVertexBuffers(
3338 VkCommandBuffer commandBuffer,
3339 uint32_t firstBinding,
3340 uint32_t bindingCount,
3341 const VkBuffer* pBuffers,
3342 const VkDeviceSize* pOffsets)
3343 {
3344 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3345 struct radv_vertex_binding *vb = cmd_buffer->vertex_bindings;
3346 bool changed = false;
3347
3348 /* We have to defer setting up vertex buffer since we need the buffer
3349 * stride from the pipeline. */
3350
3351 assert(firstBinding + bindingCount <= MAX_VBS);
3352 for (uint32_t i = 0; i < bindingCount; i++) {
3353 uint32_t idx = firstBinding + i;
3354
3355 if (!changed &&
3356 (vb[idx].buffer != radv_buffer_from_handle(pBuffers[i]) ||
3357 vb[idx].offset != pOffsets[i])) {
3358 changed = true;
3359 }
3360
3361 vb[idx].buffer = radv_buffer_from_handle(pBuffers[i]);
3362 vb[idx].offset = pOffsets[i];
3363
3364 radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs,
3365 vb[idx].buffer->bo);
3366 }
3367
3368 if (!changed) {
3369 /* No state changes. */
3370 return;
3371 }
3372
3373 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_VERTEX_BUFFER;
3374 }
3375
3376 static uint32_t
3377 vk_to_index_type(VkIndexType type)
3378 {
3379 switch (type) {
3380 case VK_INDEX_TYPE_UINT8_EXT:
3381 return V_028A7C_VGT_INDEX_8;
3382 case VK_INDEX_TYPE_UINT16:
3383 return V_028A7C_VGT_INDEX_16;
3384 case VK_INDEX_TYPE_UINT32:
3385 return V_028A7C_VGT_INDEX_32;
3386 default:
3387 unreachable("invalid index type");
3388 }
3389 }
3390
3391 static uint32_t
3392 radv_get_vgt_index_size(uint32_t type)
3393 {
3394 switch (type) {
3395 case V_028A7C_VGT_INDEX_8:
3396 return 1;
3397 case V_028A7C_VGT_INDEX_16:
3398 return 2;
3399 case V_028A7C_VGT_INDEX_32:
3400 return 4;
3401 default:
3402 unreachable("invalid index type");
3403 }
3404 }
3405
3406 void radv_CmdBindIndexBuffer(
3407 VkCommandBuffer commandBuffer,
3408 VkBuffer buffer,
3409 VkDeviceSize offset,
3410 VkIndexType indexType)
3411 {
3412 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3413 RADV_FROM_HANDLE(radv_buffer, index_buffer, buffer);
3414
3415 if (cmd_buffer->state.index_buffer == index_buffer &&
3416 cmd_buffer->state.index_offset == offset &&
3417 cmd_buffer->state.index_type == indexType) {
3418 /* No state changes. */
3419 return;
3420 }
3421
3422 cmd_buffer->state.index_buffer = index_buffer;
3423 cmd_buffer->state.index_offset = offset;
3424 cmd_buffer->state.index_type = vk_to_index_type(indexType);
3425 cmd_buffer->state.index_va = radv_buffer_get_va(index_buffer->bo);
3426 cmd_buffer->state.index_va += index_buffer->offset + offset;
3427
3428 int index_size = radv_get_vgt_index_size(vk_to_index_type(indexType));
3429 cmd_buffer->state.max_index_count = (index_buffer->size - offset) / index_size;
3430 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_INDEX_BUFFER;
3431 radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, index_buffer->bo);
3432 }
3433
3434
3435 static void
3436 radv_bind_descriptor_set(struct radv_cmd_buffer *cmd_buffer,
3437 VkPipelineBindPoint bind_point,
3438 struct radv_descriptor_set *set, unsigned idx)
3439 {
3440 struct radeon_winsys *ws = cmd_buffer->device->ws;
3441
3442 radv_set_descriptor_set(cmd_buffer, bind_point, set, idx);
3443
3444 assert(set);
3445 assert(!(set->layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR));
3446
3447 if (!cmd_buffer->device->use_global_bo_list) {
3448 for (unsigned j = 0; j < set->layout->buffer_count; ++j)
3449 if (set->descriptors[j])
3450 radv_cs_add_buffer(ws, cmd_buffer->cs, set->descriptors[j]);
3451 }
3452
3453 if(set->bo)
3454 radv_cs_add_buffer(ws, cmd_buffer->cs, set->bo);
3455 }
3456
3457 void radv_CmdBindDescriptorSets(
3458 VkCommandBuffer commandBuffer,
3459 VkPipelineBindPoint pipelineBindPoint,
3460 VkPipelineLayout _layout,
3461 uint32_t firstSet,
3462 uint32_t descriptorSetCount,
3463 const VkDescriptorSet* pDescriptorSets,
3464 uint32_t dynamicOffsetCount,
3465 const uint32_t* pDynamicOffsets)
3466 {
3467 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3468 RADV_FROM_HANDLE(radv_pipeline_layout, layout, _layout);
3469 unsigned dyn_idx = 0;
3470
3471 const bool no_dynamic_bounds = cmd_buffer->device->instance->debug_flags & RADV_DEBUG_NO_DYNAMIC_BOUNDS;
3472 struct radv_descriptor_state *descriptors_state =
3473 radv_get_descriptors_state(cmd_buffer, pipelineBindPoint);
3474
3475 for (unsigned i = 0; i < descriptorSetCount; ++i) {
3476 unsigned idx = i + firstSet;
3477 RADV_FROM_HANDLE(radv_descriptor_set, set, pDescriptorSets[i]);
3478
3479 /* If the set is already bound we only need to update the
3480 * (potentially changed) dynamic offsets. */
3481 if (descriptors_state->sets[idx] != set ||
3482 !(descriptors_state->valid & (1u << idx))) {
3483 radv_bind_descriptor_set(cmd_buffer, pipelineBindPoint, set, idx);
3484 }
3485
3486 for(unsigned j = 0; j < set->layout->dynamic_offset_count; ++j, ++dyn_idx) {
3487 unsigned idx = j + layout->set[i + firstSet].dynamic_offset_start;
3488 uint32_t *dst = descriptors_state->dynamic_buffers + idx * 4;
3489 assert(dyn_idx < dynamicOffsetCount);
3490
3491 struct radv_descriptor_range *range = set->dynamic_descriptors + j;
3492 uint64_t va = range->va + pDynamicOffsets[dyn_idx];
3493 dst[0] = va;
3494 dst[1] = S_008F04_BASE_ADDRESS_HI(va >> 32);
3495 dst[2] = no_dynamic_bounds ? 0xffffffffu : range->size;
3496 dst[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
3497 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
3498 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
3499 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);
3500
3501 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10) {
3502 dst[3] |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_FLOAT) |
3503 S_008F0C_OOB_SELECT(3) |
3504 S_008F0C_RESOURCE_LEVEL(1);
3505 } else {
3506 dst[3] |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
3507 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
3508 }
3509
3510 cmd_buffer->push_constant_stages |=
3511 set->layout->dynamic_shader_stages;
3512 }
3513 }
3514 }
3515
3516 static bool radv_init_push_descriptor_set(struct radv_cmd_buffer *cmd_buffer,
3517 struct radv_descriptor_set *set,
3518 struct radv_descriptor_set_layout *layout,
3519 VkPipelineBindPoint bind_point)
3520 {
3521 struct radv_descriptor_state *descriptors_state =
3522 radv_get_descriptors_state(cmd_buffer, bind_point);
3523 set->size = layout->size;
3524 set->layout = layout;
3525
3526 if (descriptors_state->push_set.capacity < set->size) {
3527 size_t new_size = MAX2(set->size, 1024);
3528 new_size = MAX2(new_size, 2 * descriptors_state->push_set.capacity);
3529 new_size = MIN2(new_size, 96 * MAX_PUSH_DESCRIPTORS);
3530
3531 free(set->mapped_ptr);
3532 set->mapped_ptr = malloc(new_size);
3533
3534 if (!set->mapped_ptr) {
3535 descriptors_state->push_set.capacity = 0;
3536 cmd_buffer->record_result = VK_ERROR_OUT_OF_HOST_MEMORY;
3537 return false;
3538 }
3539
3540 descriptors_state->push_set.capacity = new_size;
3541 }
3542
3543 return true;
3544 }
3545
3546 void radv_meta_push_descriptor_set(
3547 struct radv_cmd_buffer* cmd_buffer,
3548 VkPipelineBindPoint pipelineBindPoint,
3549 VkPipelineLayout _layout,
3550 uint32_t set,
3551 uint32_t descriptorWriteCount,
3552 const VkWriteDescriptorSet* pDescriptorWrites)
3553 {
3554 RADV_FROM_HANDLE(radv_pipeline_layout, layout, _layout);
3555 struct radv_descriptor_set *push_set = &cmd_buffer->meta_push_descriptors;
3556 unsigned bo_offset;
3557
3558 assert(set == 0);
3559 assert(layout->set[set].layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR);
3560
3561 push_set->size = layout->set[set].layout->size;
3562 push_set->layout = layout->set[set].layout;
3563
3564 if (!radv_cmd_buffer_upload_alloc(cmd_buffer, push_set->size, 32,
3565 &bo_offset,
3566 (void**) &push_set->mapped_ptr))
3567 return;
3568
3569 push_set->va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
3570 push_set->va += bo_offset;
3571
3572 radv_update_descriptor_sets(cmd_buffer->device, cmd_buffer,
3573 radv_descriptor_set_to_handle(push_set),
3574 descriptorWriteCount, pDescriptorWrites, 0, NULL);
3575
3576 radv_set_descriptor_set(cmd_buffer, pipelineBindPoint, push_set, set);
3577 }
3578
3579 void radv_CmdPushDescriptorSetKHR(
3580 VkCommandBuffer commandBuffer,
3581 VkPipelineBindPoint pipelineBindPoint,
3582 VkPipelineLayout _layout,
3583 uint32_t set,
3584 uint32_t descriptorWriteCount,
3585 const VkWriteDescriptorSet* pDescriptorWrites)
3586 {
3587 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3588 RADV_FROM_HANDLE(radv_pipeline_layout, layout, _layout);
3589 struct radv_descriptor_state *descriptors_state =
3590 radv_get_descriptors_state(cmd_buffer, pipelineBindPoint);
3591 struct radv_descriptor_set *push_set = &descriptors_state->push_set.set;
3592
3593 assert(layout->set[set].layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR);
3594
3595 if (!radv_init_push_descriptor_set(cmd_buffer, push_set,
3596 layout->set[set].layout,
3597 pipelineBindPoint))
3598 return;
3599
3600 /* Check that there are no inline uniform block updates when calling vkCmdPushDescriptorSetKHR()
3601 * because it is invalid, according to Vulkan spec.
3602 */
3603 for (int i = 0; i < descriptorWriteCount; i++) {
3604 ASSERTED const VkWriteDescriptorSet *writeset = &pDescriptorWrites[i];
3605 assert(writeset->descriptorType != VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT);
3606 }
3607
3608 radv_update_descriptor_sets(cmd_buffer->device, cmd_buffer,
3609 radv_descriptor_set_to_handle(push_set),
3610 descriptorWriteCount, pDescriptorWrites, 0, NULL);
3611
3612 radv_set_descriptor_set(cmd_buffer, pipelineBindPoint, push_set, set);
3613 descriptors_state->push_dirty = true;
3614 }
3615
3616 void radv_CmdPushDescriptorSetWithTemplateKHR(
3617 VkCommandBuffer commandBuffer,
3618 VkDescriptorUpdateTemplate descriptorUpdateTemplate,
3619 VkPipelineLayout _layout,
3620 uint32_t set,
3621 const void* pData)
3622 {
3623 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3624 RADV_FROM_HANDLE(radv_pipeline_layout, layout, _layout);
3625 RADV_FROM_HANDLE(radv_descriptor_update_template, templ, descriptorUpdateTemplate);
3626 struct radv_descriptor_state *descriptors_state =
3627 radv_get_descriptors_state(cmd_buffer, templ->bind_point);
3628 struct radv_descriptor_set *push_set = &descriptors_state->push_set.set;
3629
3630 assert(layout->set[set].layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR);
3631
3632 if (!radv_init_push_descriptor_set(cmd_buffer, push_set,
3633 layout->set[set].layout,
3634 templ->bind_point))
3635 return;
3636
3637 radv_update_descriptor_set_with_template(cmd_buffer->device, cmd_buffer, push_set,
3638 descriptorUpdateTemplate, pData);
3639
3640 radv_set_descriptor_set(cmd_buffer, templ->bind_point, push_set, set);
3641 descriptors_state->push_dirty = true;
3642 }
3643
3644 void radv_CmdPushConstants(VkCommandBuffer commandBuffer,
3645 VkPipelineLayout layout,
3646 VkShaderStageFlags stageFlags,
3647 uint32_t offset,
3648 uint32_t size,
3649 const void* pValues)
3650 {
3651 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3652 memcpy(cmd_buffer->push_constants + offset, pValues, size);
3653 cmd_buffer->push_constant_stages |= stageFlags;
3654 }
3655
3656 VkResult radv_EndCommandBuffer(
3657 VkCommandBuffer commandBuffer)
3658 {
3659 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3660
3661 if (cmd_buffer->queue_family_index != RADV_QUEUE_TRANSFER) {
3662 if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX6)
3663 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_CS_PARTIAL_FLUSH | RADV_CMD_FLAG_PS_PARTIAL_FLUSH | RADV_CMD_FLAG_WB_L2;
3664
3665 /* Make sure to sync all pending active queries at the end of
3666 * command buffer.
3667 */
3668 cmd_buffer->state.flush_bits |= cmd_buffer->active_query_flush_bits;
3669
3670 /* Since NGG streamout uses GDS, we need to make GDS idle when
3671 * we leave the IB, otherwise another process might overwrite
3672 * it while our shaders are busy.
3673 */
3674 if (cmd_buffer->gds_needed)
3675 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_PS_PARTIAL_FLUSH;
3676
3677 si_emit_cache_flush(cmd_buffer);
3678 }
3679
3680 /* Make sure CP DMA is idle at the end of IBs because the kernel
3681 * doesn't wait for it.
3682 */
3683 si_cp_dma_wait_for_idle(cmd_buffer);
3684
3685 vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.attachments);
3686 vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.subpass_sample_locs);
3687
3688 if (!cmd_buffer->device->ws->cs_finalize(cmd_buffer->cs))
3689 return vk_error(cmd_buffer->device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
3690
3691 cmd_buffer->status = RADV_CMD_BUFFER_STATUS_EXECUTABLE;
3692
3693 return cmd_buffer->record_result;
3694 }
3695
3696 static void
3697 radv_emit_compute_pipeline(struct radv_cmd_buffer *cmd_buffer)
3698 {
3699 struct radv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
3700
3701 if (!pipeline || pipeline == cmd_buffer->state.emitted_compute_pipeline)
3702 return;
3703
3704 assert(!pipeline->ctx_cs.cdw);
3705
3706 cmd_buffer->state.emitted_compute_pipeline = pipeline;
3707
3708 radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, pipeline->cs.cdw);
3709 radeon_emit_array(cmd_buffer->cs, pipeline->cs.buf, pipeline->cs.cdw);
3710
3711 cmd_buffer->compute_scratch_size_per_wave_needed = MAX2(cmd_buffer->compute_scratch_size_per_wave_needed,
3712 pipeline->scratch_bytes_per_wave);
3713 cmd_buffer->compute_scratch_waves_wanted = MAX2(cmd_buffer->compute_scratch_waves_wanted,
3714 pipeline->max_waves);
3715
3716 radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs,
3717 pipeline->shaders[MESA_SHADER_COMPUTE]->bo);
3718
3719 if (unlikely(cmd_buffer->device->trace_bo))
3720 radv_save_pipeline(cmd_buffer, pipeline, RING_COMPUTE);
3721 }
3722
3723 static void radv_mark_descriptor_sets_dirty(struct radv_cmd_buffer *cmd_buffer,
3724 VkPipelineBindPoint bind_point)
3725 {
3726 struct radv_descriptor_state *descriptors_state =
3727 radv_get_descriptors_state(cmd_buffer, bind_point);
3728
3729 descriptors_state->dirty |= descriptors_state->valid;
3730 }
3731
3732 void radv_CmdBindPipeline(
3733 VkCommandBuffer commandBuffer,
3734 VkPipelineBindPoint pipelineBindPoint,
3735 VkPipeline _pipeline)
3736 {
3737 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3738 RADV_FROM_HANDLE(radv_pipeline, pipeline, _pipeline);
3739
3740 switch (pipelineBindPoint) {
3741 case VK_PIPELINE_BIND_POINT_COMPUTE:
3742 if (cmd_buffer->state.compute_pipeline == pipeline)
3743 return;
3744 radv_mark_descriptor_sets_dirty(cmd_buffer, pipelineBindPoint);
3745
3746 cmd_buffer->state.compute_pipeline = pipeline;
3747 cmd_buffer->push_constant_stages |= VK_SHADER_STAGE_COMPUTE_BIT;
3748 break;
3749 case VK_PIPELINE_BIND_POINT_GRAPHICS:
3750 if (cmd_buffer->state.pipeline == pipeline)
3751 return;
3752 radv_mark_descriptor_sets_dirty(cmd_buffer, pipelineBindPoint);
3753
3754 cmd_buffer->state.pipeline = pipeline;
3755 if (!pipeline)
3756 break;
3757
3758 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_PIPELINE;
3759 cmd_buffer->push_constant_stages |= pipeline->active_stages;
3760
3761 /* the new vertex shader might not have the same user regs */
3762 cmd_buffer->state.last_first_instance = -1;
3763 cmd_buffer->state.last_vertex_offset = -1;
3764
3765 /* Prefetch all pipeline shaders at first draw time. */
3766 cmd_buffer->state.prefetch_L2_mask |= RADV_PREFETCH_SHADERS;
3767
3768 if ((cmd_buffer->device->physical_device->rad_info.family == CHIP_NAVI10 ||
3769 cmd_buffer->device->physical_device->rad_info.family == CHIP_NAVI12 ||
3770 cmd_buffer->device->physical_device->rad_info.family == CHIP_NAVI14) &&
3771 cmd_buffer->state.emitted_pipeline &&
3772 radv_pipeline_has_ngg(cmd_buffer->state.emitted_pipeline) &&
3773 !radv_pipeline_has_ngg(cmd_buffer->state.pipeline)) {
3774 /* Transitioning from NGG to legacy GS requires
3775 * VGT_FLUSH on Navi10-14. VGT_FLUSH is also emitted
3776 * at the beginning of IBs when legacy GS ring pointers
3777 * are set.
3778 */
3779 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_VGT_FLUSH;
3780 }
3781
3782 radv_bind_dynamic_state(cmd_buffer, &pipeline->dynamic_state);
3783 radv_bind_streamout_state(cmd_buffer, pipeline);
3784
3785 if (pipeline->graphics.esgs_ring_size > cmd_buffer->esgs_ring_size_needed)
3786 cmd_buffer->esgs_ring_size_needed = pipeline->graphics.esgs_ring_size;
3787 if (pipeline->graphics.gsvs_ring_size > cmd_buffer->gsvs_ring_size_needed)
3788 cmd_buffer->gsvs_ring_size_needed = pipeline->graphics.gsvs_ring_size;
3789
3790 if (radv_pipeline_has_tess(pipeline))
3791 cmd_buffer->tess_rings_needed = true;
3792 break;
3793 default:
3794 assert(!"invalid bind point");
3795 break;
3796 }
3797 }
3798
3799 void radv_CmdSetViewport(
3800 VkCommandBuffer commandBuffer,
3801 uint32_t firstViewport,
3802 uint32_t viewportCount,
3803 const VkViewport* pViewports)
3804 {
3805 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3806 struct radv_cmd_state *state = &cmd_buffer->state;
3807 ASSERTED const uint32_t total_count = firstViewport + viewportCount;
3808
3809 assert(firstViewport < MAX_VIEWPORTS);
3810 assert(total_count >= 1 && total_count <= MAX_VIEWPORTS);
3811
3812 if (!memcmp(state->dynamic.viewport.viewports + firstViewport,
3813 pViewports, viewportCount * sizeof(*pViewports))) {
3814 return;
3815 }
3816
3817 memcpy(state->dynamic.viewport.viewports + firstViewport, pViewports,
3818 viewportCount * sizeof(*pViewports));
3819
3820 state->dirty |= RADV_CMD_DIRTY_DYNAMIC_VIEWPORT;
3821 }
3822
3823 void radv_CmdSetScissor(
3824 VkCommandBuffer commandBuffer,
3825 uint32_t firstScissor,
3826 uint32_t scissorCount,
3827 const VkRect2D* pScissors)
3828 {
3829 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3830 struct radv_cmd_state *state = &cmd_buffer->state;
3831 ASSERTED const uint32_t total_count = firstScissor + scissorCount;
3832
3833 assert(firstScissor < MAX_SCISSORS);
3834 assert(total_count >= 1 && total_count <= MAX_SCISSORS);
3835
3836 if (!memcmp(state->dynamic.scissor.scissors + firstScissor, pScissors,
3837 scissorCount * sizeof(*pScissors))) {
3838 return;
3839 }
3840
3841 memcpy(state->dynamic.scissor.scissors + firstScissor, pScissors,
3842 scissorCount * sizeof(*pScissors));
3843
3844 state->dirty |= RADV_CMD_DIRTY_DYNAMIC_SCISSOR;
3845 }
3846
3847 void radv_CmdSetLineWidth(
3848 VkCommandBuffer commandBuffer,
3849 float lineWidth)
3850 {
3851 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3852
3853 if (cmd_buffer->state.dynamic.line_width == lineWidth)
3854 return;
3855
3856 cmd_buffer->state.dynamic.line_width = lineWidth;
3857 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_LINE_WIDTH;
3858 }
3859
3860 void radv_CmdSetDepthBias(
3861 VkCommandBuffer commandBuffer,
3862 float depthBiasConstantFactor,
3863 float depthBiasClamp,
3864 float depthBiasSlopeFactor)
3865 {
3866 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3867 struct radv_cmd_state *state = &cmd_buffer->state;
3868
3869 if (state->dynamic.depth_bias.bias == depthBiasConstantFactor &&
3870 state->dynamic.depth_bias.clamp == depthBiasClamp &&
3871 state->dynamic.depth_bias.slope == depthBiasSlopeFactor) {
3872 return;
3873 }
3874
3875 state->dynamic.depth_bias.bias = depthBiasConstantFactor;
3876 state->dynamic.depth_bias.clamp = depthBiasClamp;
3877 state->dynamic.depth_bias.slope = depthBiasSlopeFactor;
3878
3879 state->dirty |= RADV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS;
3880 }
3881
3882 void radv_CmdSetBlendConstants(
3883 VkCommandBuffer commandBuffer,
3884 const float blendConstants[4])
3885 {
3886 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3887 struct radv_cmd_state *state = &cmd_buffer->state;
3888
3889 if (!memcmp(state->dynamic.blend_constants, blendConstants, sizeof(float) * 4))
3890 return;
3891
3892 memcpy(state->dynamic.blend_constants, blendConstants, sizeof(float) * 4);
3893
3894 state->dirty |= RADV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS;
3895 }
3896
3897 void radv_CmdSetDepthBounds(
3898 VkCommandBuffer commandBuffer,
3899 float minDepthBounds,
3900 float maxDepthBounds)
3901 {
3902 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3903 struct radv_cmd_state *state = &cmd_buffer->state;
3904
3905 if (state->dynamic.depth_bounds.min == minDepthBounds &&
3906 state->dynamic.depth_bounds.max == maxDepthBounds) {
3907 return;
3908 }
3909
3910 state->dynamic.depth_bounds.min = minDepthBounds;
3911 state->dynamic.depth_bounds.max = maxDepthBounds;
3912
3913 state->dirty |= RADV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS;
3914 }
3915
3916 void radv_CmdSetStencilCompareMask(
3917 VkCommandBuffer commandBuffer,
3918 VkStencilFaceFlags faceMask,
3919 uint32_t compareMask)
3920 {
3921 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3922 struct radv_cmd_state *state = &cmd_buffer->state;
3923 bool front_same = state->dynamic.stencil_compare_mask.front == compareMask;
3924 bool back_same = state->dynamic.stencil_compare_mask.back == compareMask;
3925
3926 if ((!(faceMask & VK_STENCIL_FACE_FRONT_BIT) || front_same) &&
3927 (!(faceMask & VK_STENCIL_FACE_BACK_BIT) || back_same)) {
3928 return;
3929 }
3930
3931 if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
3932 state->dynamic.stencil_compare_mask.front = compareMask;
3933 if (faceMask & VK_STENCIL_FACE_BACK_BIT)
3934 state->dynamic.stencil_compare_mask.back = compareMask;
3935
3936 state->dirty |= RADV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK;
3937 }
3938
3939 void radv_CmdSetStencilWriteMask(
3940 VkCommandBuffer commandBuffer,
3941 VkStencilFaceFlags faceMask,
3942 uint32_t writeMask)
3943 {
3944 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3945 struct radv_cmd_state *state = &cmd_buffer->state;
3946 bool front_same = state->dynamic.stencil_write_mask.front == writeMask;
3947 bool back_same = state->dynamic.stencil_write_mask.back == writeMask;
3948
3949 if ((!(faceMask & VK_STENCIL_FACE_FRONT_BIT) || front_same) &&
3950 (!(faceMask & VK_STENCIL_FACE_BACK_BIT) || back_same)) {
3951 return;
3952 }
3953
3954 if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
3955 state->dynamic.stencil_write_mask.front = writeMask;
3956 if (faceMask & VK_STENCIL_FACE_BACK_BIT)
3957 state->dynamic.stencil_write_mask.back = writeMask;
3958
3959 state->dirty |= RADV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK;
3960 }
3961
3962 void radv_CmdSetStencilReference(
3963 VkCommandBuffer commandBuffer,
3964 VkStencilFaceFlags faceMask,
3965 uint32_t reference)
3966 {
3967 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3968 struct radv_cmd_state *state = &cmd_buffer->state;
3969 bool front_same = state->dynamic.stencil_reference.front == reference;
3970 bool back_same = state->dynamic.stencil_reference.back == reference;
3971
3972 if ((!(faceMask & VK_STENCIL_FACE_FRONT_BIT) || front_same) &&
3973 (!(faceMask & VK_STENCIL_FACE_BACK_BIT) || back_same)) {
3974 return;
3975 }
3976
3977 if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
3978 cmd_buffer->state.dynamic.stencil_reference.front = reference;
3979 if (faceMask & VK_STENCIL_FACE_BACK_BIT)
3980 cmd_buffer->state.dynamic.stencil_reference.back = reference;
3981
3982 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE;
3983 }
3984
3985 void radv_CmdSetDiscardRectangleEXT(
3986 VkCommandBuffer commandBuffer,
3987 uint32_t firstDiscardRectangle,
3988 uint32_t discardRectangleCount,
3989 const VkRect2D* pDiscardRectangles)
3990 {
3991 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3992 struct radv_cmd_state *state = &cmd_buffer->state;
3993 ASSERTED const uint32_t total_count = firstDiscardRectangle + discardRectangleCount;
3994
3995 assert(firstDiscardRectangle < MAX_DISCARD_RECTANGLES);
3996 assert(total_count >= 1 && total_count <= MAX_DISCARD_RECTANGLES);
3997
3998 if (!memcmp(state->dynamic.discard_rectangle.rectangles + firstDiscardRectangle,
3999 pDiscardRectangles, discardRectangleCount * sizeof(*pDiscardRectangles))) {
4000 return;
4001 }
4002
4003 typed_memcpy(&state->dynamic.discard_rectangle.rectangles[firstDiscardRectangle],
4004 pDiscardRectangles, discardRectangleCount);
4005
4006 state->dirty |= RADV_CMD_DIRTY_DYNAMIC_DISCARD_RECTANGLE;
4007 }
4008
4009 void radv_CmdSetSampleLocationsEXT(
4010 VkCommandBuffer commandBuffer,
4011 const VkSampleLocationsInfoEXT* pSampleLocationsInfo)
4012 {
4013 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
4014 struct radv_cmd_state *state = &cmd_buffer->state;
4015
4016 assert(pSampleLocationsInfo->sampleLocationsCount <= MAX_SAMPLE_LOCATIONS);
4017
4018 state->dynamic.sample_location.per_pixel = pSampleLocationsInfo->sampleLocationsPerPixel;
4019 state->dynamic.sample_location.grid_size = pSampleLocationsInfo->sampleLocationGridSize;
4020 state->dynamic.sample_location.count = pSampleLocationsInfo->sampleLocationsCount;
4021 typed_memcpy(&state->dynamic.sample_location.locations[0],
4022 pSampleLocationsInfo->pSampleLocations,
4023 pSampleLocationsInfo->sampleLocationsCount);
4024
4025 state->dirty |= RADV_CMD_DIRTY_DYNAMIC_SAMPLE_LOCATIONS;
4026 }
4027
4028 void radv_CmdExecuteCommands(
4029 VkCommandBuffer commandBuffer,
4030 uint32_t commandBufferCount,
4031 const VkCommandBuffer* pCmdBuffers)
4032 {
4033 RADV_FROM_HANDLE(radv_cmd_buffer, primary, commandBuffer);
4034
4035 assert(commandBufferCount > 0);
4036
4037 /* Emit pending flushes on primary prior to executing secondary */
4038 si_emit_cache_flush(primary);
4039
4040 for (uint32_t i = 0; i < commandBufferCount; i++) {
4041 RADV_FROM_HANDLE(radv_cmd_buffer, secondary, pCmdBuffers[i]);
4042
4043 primary->scratch_size_per_wave_needed = MAX2(primary->scratch_size_per_wave_needed,
4044 secondary->scratch_size_per_wave_needed);
4045 primary->scratch_waves_wanted = MAX2(primary->scratch_waves_wanted,
4046 secondary->scratch_waves_wanted);
4047 primary->compute_scratch_size_per_wave_needed = MAX2(primary->compute_scratch_size_per_wave_needed,
4048 secondary->compute_scratch_size_per_wave_needed);
4049 primary->compute_scratch_waves_wanted = MAX2(primary->compute_scratch_waves_wanted,
4050 secondary->compute_scratch_waves_wanted);
4051
4052 if (secondary->esgs_ring_size_needed > primary->esgs_ring_size_needed)
4053 primary->esgs_ring_size_needed = secondary->esgs_ring_size_needed;
4054 if (secondary->gsvs_ring_size_needed > primary->gsvs_ring_size_needed)
4055 primary->gsvs_ring_size_needed = secondary->gsvs_ring_size_needed;
4056 if (secondary->tess_rings_needed)
4057 primary->tess_rings_needed = true;
4058 if (secondary->sample_positions_needed)
4059 primary->sample_positions_needed = true;
4060
4061 if (!secondary->state.framebuffer &&
4062 (primary->state.dirty & RADV_CMD_DIRTY_FRAMEBUFFER)) {
4063 /* Emit the framebuffer state from primary if secondary
4064 * has been recorded without a framebuffer, otherwise
4065 * fast color/depth clears can't work.
4066 */
4067 radv_emit_framebuffer_state(primary);
4068 }
4069
4070 primary->device->ws->cs_execute_secondary(primary->cs, secondary->cs);
4071
4072
4073 /* When the secondary command buffer is compute only we don't
4074 * need to re-emit the current graphics pipeline.
4075 */
4076 if (secondary->state.emitted_pipeline) {
4077 primary->state.emitted_pipeline =
4078 secondary->state.emitted_pipeline;
4079 }
4080
4081 /* When the secondary command buffer is graphics only we don't
4082 * need to re-emit the current compute pipeline.
4083 */
4084 if (secondary->state.emitted_compute_pipeline) {
4085 primary->state.emitted_compute_pipeline =
4086 secondary->state.emitted_compute_pipeline;
4087 }
4088
4089 /* Only re-emit the draw packets when needed. */
4090 if (secondary->state.last_primitive_reset_en != -1) {
4091 primary->state.last_primitive_reset_en =
4092 secondary->state.last_primitive_reset_en;
4093 }
4094
4095 if (secondary->state.last_primitive_reset_index) {
4096 primary->state.last_primitive_reset_index =
4097 secondary->state.last_primitive_reset_index;
4098 }
4099
4100 if (secondary->state.last_ia_multi_vgt_param) {
4101 primary->state.last_ia_multi_vgt_param =
4102 secondary->state.last_ia_multi_vgt_param;
4103 }
4104
4105 primary->state.last_first_instance = secondary->state.last_first_instance;
4106 primary->state.last_num_instances = secondary->state.last_num_instances;
4107 primary->state.last_vertex_offset = secondary->state.last_vertex_offset;
4108
4109 if (secondary->state.last_index_type != -1) {
4110 primary->state.last_index_type =
4111 secondary->state.last_index_type;
4112 }
4113 }
4114
4115 /* After executing commands from secondary buffers we have to dirty
4116 * some states.
4117 */
4118 primary->state.dirty |= RADV_CMD_DIRTY_PIPELINE |
4119 RADV_CMD_DIRTY_INDEX_BUFFER |
4120 RADV_CMD_DIRTY_DYNAMIC_ALL;
4121 radv_mark_descriptor_sets_dirty(primary, VK_PIPELINE_BIND_POINT_GRAPHICS);
4122 radv_mark_descriptor_sets_dirty(primary, VK_PIPELINE_BIND_POINT_COMPUTE);
4123 }
4124
4125 VkResult radv_CreateCommandPool(
4126 VkDevice _device,
4127 const VkCommandPoolCreateInfo* pCreateInfo,
4128 const VkAllocationCallbacks* pAllocator,
4129 VkCommandPool* pCmdPool)
4130 {
4131 RADV_FROM_HANDLE(radv_device, device, _device);
4132 struct radv_cmd_pool *pool;
4133
4134 pool = vk_alloc2(&device->alloc, pAllocator, sizeof(*pool), 8,
4135 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
4136 if (pool == NULL)
4137 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
4138
4139 if (pAllocator)
4140 pool->alloc = *pAllocator;
4141 else
4142 pool->alloc = device->alloc;
4143
4144 list_inithead(&pool->cmd_buffers);
4145 list_inithead(&pool->free_cmd_buffers);
4146
4147 pool->queue_family_index = pCreateInfo->queueFamilyIndex;
4148
4149 *pCmdPool = radv_cmd_pool_to_handle(pool);
4150
4151 return VK_SUCCESS;
4152
4153 }
4154
4155 void radv_DestroyCommandPool(
4156 VkDevice _device,
4157 VkCommandPool commandPool,
4158 const VkAllocationCallbacks* pAllocator)
4159 {
4160 RADV_FROM_HANDLE(radv_device, device, _device);
4161 RADV_FROM_HANDLE(radv_cmd_pool, pool, commandPool);
4162
4163 if (!pool)
4164 return;
4165
4166 list_for_each_entry_safe(struct radv_cmd_buffer, cmd_buffer,
4167 &pool->cmd_buffers, pool_link) {
4168 radv_cmd_buffer_destroy(cmd_buffer);
4169 }
4170
4171 list_for_each_entry_safe(struct radv_cmd_buffer, cmd_buffer,
4172 &pool->free_cmd_buffers, pool_link) {
4173 radv_cmd_buffer_destroy(cmd_buffer);
4174 }
4175
4176 vk_free2(&device->alloc, pAllocator, pool);
4177 }
4178
4179 VkResult radv_ResetCommandPool(
4180 VkDevice device,
4181 VkCommandPool commandPool,
4182 VkCommandPoolResetFlags flags)
4183 {
4184 RADV_FROM_HANDLE(radv_cmd_pool, pool, commandPool);
4185 VkResult result;
4186
4187 list_for_each_entry(struct radv_cmd_buffer, cmd_buffer,
4188 &pool->cmd_buffers, pool_link) {
4189 result = radv_reset_cmd_buffer(cmd_buffer);
4190 if (result != VK_SUCCESS)
4191 return result;
4192 }
4193
4194 return VK_SUCCESS;
4195 }
4196
4197 void radv_TrimCommandPool(
4198 VkDevice device,
4199 VkCommandPool commandPool,
4200 VkCommandPoolTrimFlags flags)
4201 {
4202 RADV_FROM_HANDLE(radv_cmd_pool, pool, commandPool);
4203
4204 if (!pool)
4205 return;
4206
4207 list_for_each_entry_safe(struct radv_cmd_buffer, cmd_buffer,
4208 &pool->free_cmd_buffers, pool_link) {
4209 radv_cmd_buffer_destroy(cmd_buffer);
4210 }
4211 }
4212
4213 static void
4214 radv_cmd_buffer_begin_subpass(struct radv_cmd_buffer *cmd_buffer,
4215 uint32_t subpass_id)
4216 {
4217 struct radv_cmd_state *state = &cmd_buffer->state;
4218 struct radv_subpass *subpass = &state->pass->subpasses[subpass_id];
4219
4220 ASSERTED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
4221 cmd_buffer->cs, 4096);
4222
4223 radv_subpass_barrier(cmd_buffer, &subpass->start_barrier);
4224
4225 radv_cmd_buffer_set_subpass(cmd_buffer, subpass);
4226
4227 for (uint32_t i = 0; i < subpass->attachment_count; ++i) {
4228 const uint32_t a = subpass->attachments[i].attachment;
4229 if (a == VK_ATTACHMENT_UNUSED)
4230 continue;
4231
4232 radv_handle_subpass_image_transition(cmd_buffer,
4233 subpass->attachments[i],
4234 true);
4235 }
4236
4237 radv_cmd_buffer_clear_subpass(cmd_buffer);
4238
4239 assert(cmd_buffer->cs->cdw <= cdw_max);
4240 }
4241
4242 static void
4243 radv_cmd_buffer_end_subpass(struct radv_cmd_buffer *cmd_buffer)
4244 {
4245 struct radv_cmd_state *state = &cmd_buffer->state;
4246 const struct radv_subpass *subpass = state->subpass;
4247 uint32_t subpass_id = radv_get_subpass_id(cmd_buffer);
4248
4249 radv_cmd_buffer_resolve_subpass(cmd_buffer);
4250
4251 for (uint32_t i = 0; i < subpass->attachment_count; ++i) {
4252 const uint32_t a = subpass->attachments[i].attachment;
4253 if (a == VK_ATTACHMENT_UNUSED)
4254 continue;
4255
4256 if (state->pass->attachments[a].last_subpass_idx != subpass_id)
4257 continue;
4258
4259 VkImageLayout layout = state->pass->attachments[a].final_layout;
4260 VkImageLayout stencil_layout = state->pass->attachments[a].stencil_final_layout;
4261 struct radv_subpass_attachment att = { a, layout, stencil_layout };
4262 radv_handle_subpass_image_transition(cmd_buffer, att, false);
4263 }
4264 }
4265
4266 void radv_CmdBeginRenderPass(
4267 VkCommandBuffer commandBuffer,
4268 const VkRenderPassBeginInfo* pRenderPassBegin,
4269 VkSubpassContents contents)
4270 {
4271 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
4272 RADV_FROM_HANDLE(radv_render_pass, pass, pRenderPassBegin->renderPass);
4273 RADV_FROM_HANDLE(radv_framebuffer, framebuffer, pRenderPassBegin->framebuffer);
4274 VkResult result;
4275
4276 cmd_buffer->state.framebuffer = framebuffer;
4277 cmd_buffer->state.pass = pass;
4278 cmd_buffer->state.render_area = pRenderPassBegin->renderArea;
4279
4280 result = radv_cmd_state_setup_attachments(cmd_buffer, pass, pRenderPassBegin);
4281 if (result != VK_SUCCESS)
4282 return;
4283
4284 result = radv_cmd_state_setup_sample_locations(cmd_buffer, pass, pRenderPassBegin);
4285 if (result != VK_SUCCESS)
4286 return;
4287
4288 radv_cmd_buffer_begin_subpass(cmd_buffer, 0);
4289 }
4290
4291 void radv_CmdBeginRenderPass2KHR(
4292 VkCommandBuffer commandBuffer,
4293 const VkRenderPassBeginInfo* pRenderPassBeginInfo,
4294 const VkSubpassBeginInfoKHR* pSubpassBeginInfo)
4295 {
4296 radv_CmdBeginRenderPass(commandBuffer, pRenderPassBeginInfo,
4297 pSubpassBeginInfo->contents);
4298 }
4299
4300 void radv_CmdNextSubpass(
4301 VkCommandBuffer commandBuffer,
4302 VkSubpassContents contents)
4303 {
4304 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
4305
4306 uint32_t prev_subpass = radv_get_subpass_id(cmd_buffer);
4307 radv_cmd_buffer_end_subpass(cmd_buffer);
4308 radv_cmd_buffer_begin_subpass(cmd_buffer, prev_subpass + 1);
4309 }
4310
4311 void radv_CmdNextSubpass2KHR(
4312 VkCommandBuffer commandBuffer,
4313 const VkSubpassBeginInfoKHR* pSubpassBeginInfo,
4314 const VkSubpassEndInfoKHR* pSubpassEndInfo)
4315 {
4316 radv_CmdNextSubpass(commandBuffer, pSubpassBeginInfo->contents);
4317 }
4318
4319 static void radv_emit_view_index(struct radv_cmd_buffer *cmd_buffer, unsigned index)
4320 {
4321 struct radv_pipeline *pipeline = cmd_buffer->state.pipeline;
4322 for (unsigned stage = 0; stage < MESA_SHADER_STAGES; ++stage) {
4323 if (!radv_get_shader(pipeline, stage))
4324 continue;
4325
4326 struct radv_userdata_info *loc = radv_lookup_user_sgpr(pipeline, stage, AC_UD_VIEW_INDEX);
4327 if (loc->sgpr_idx == -1)
4328 continue;
4329 uint32_t base_reg = pipeline->user_data_0[stage];
4330 radeon_set_sh_reg(cmd_buffer->cs, base_reg + loc->sgpr_idx * 4, index);
4331
4332 }
4333 if (radv_pipeline_has_gs_copy_shader(pipeline)) {
4334 struct radv_userdata_info *loc = &pipeline->gs_copy_shader->info.user_sgprs_locs.shader_data[AC_UD_VIEW_INDEX];
4335 if (loc->sgpr_idx != -1) {
4336 uint32_t base_reg = R_00B130_SPI_SHADER_USER_DATA_VS_0;
4337 radeon_set_sh_reg(cmd_buffer->cs, base_reg + loc->sgpr_idx * 4, index);
4338 }
4339 }
4340 }
4341
4342 static void
4343 radv_cs_emit_draw_packet(struct radv_cmd_buffer *cmd_buffer,
4344 uint32_t vertex_count,
4345 bool use_opaque)
4346 {
4347 radeon_emit(cmd_buffer->cs, PKT3(PKT3_DRAW_INDEX_AUTO, 1, cmd_buffer->state.predicating));
4348 radeon_emit(cmd_buffer->cs, vertex_count);
4349 radeon_emit(cmd_buffer->cs, V_0287F0_DI_SRC_SEL_AUTO_INDEX |
4350 S_0287F0_USE_OPAQUE(use_opaque));
4351 }
4352
4353 static void
4354 radv_cs_emit_draw_indexed_packet(struct radv_cmd_buffer *cmd_buffer,
4355 uint64_t index_va,
4356 uint32_t index_count)
4357 {
4358 radeon_emit(cmd_buffer->cs, PKT3(PKT3_DRAW_INDEX_2, 4, cmd_buffer->state.predicating));
4359 radeon_emit(cmd_buffer->cs, cmd_buffer->state.max_index_count);
4360 radeon_emit(cmd_buffer->cs, index_va);
4361 radeon_emit(cmd_buffer->cs, index_va >> 32);
4362 radeon_emit(cmd_buffer->cs, index_count);
4363 radeon_emit(cmd_buffer->cs, V_0287F0_DI_SRC_SEL_DMA);
4364 }
4365
4366 static void
4367 radv_cs_emit_indirect_draw_packet(struct radv_cmd_buffer *cmd_buffer,
4368 bool indexed,
4369 uint32_t draw_count,
4370 uint64_t count_va,
4371 uint32_t stride)
4372 {
4373 struct radeon_cmdbuf *cs = cmd_buffer->cs;
4374 unsigned di_src_sel = indexed ? V_0287F0_DI_SRC_SEL_DMA
4375 : V_0287F0_DI_SRC_SEL_AUTO_INDEX;
4376 bool draw_id_enable = radv_get_shader(cmd_buffer->state.pipeline, MESA_SHADER_VERTEX)->info.vs.needs_draw_id;
4377 uint32_t base_reg = cmd_buffer->state.pipeline->graphics.vtx_base_sgpr;
4378 bool predicating = cmd_buffer->state.predicating;
4379 assert(base_reg);
4380
4381 /* just reset draw state for vertex data */
4382 cmd_buffer->state.last_first_instance = -1;
4383 cmd_buffer->state.last_num_instances = -1;
4384 cmd_buffer->state.last_vertex_offset = -1;
4385
4386 if (draw_count == 1 && !count_va && !draw_id_enable) {
4387 radeon_emit(cs, PKT3(indexed ? PKT3_DRAW_INDEX_INDIRECT :
4388 PKT3_DRAW_INDIRECT, 3, predicating));
4389 radeon_emit(cs, 0);
4390 radeon_emit(cs, (base_reg - SI_SH_REG_OFFSET) >> 2);
4391 radeon_emit(cs, ((base_reg + 4) - SI_SH_REG_OFFSET) >> 2);
4392 radeon_emit(cs, di_src_sel);
4393 } else {
4394 radeon_emit(cs, PKT3(indexed ? PKT3_DRAW_INDEX_INDIRECT_MULTI :
4395 PKT3_DRAW_INDIRECT_MULTI,
4396 8, predicating));
4397 radeon_emit(cs, 0);
4398 radeon_emit(cs, (base_reg - SI_SH_REG_OFFSET) >> 2);
4399 radeon_emit(cs, ((base_reg + 4) - SI_SH_REG_OFFSET) >> 2);
4400 radeon_emit(cs, (((base_reg + 8) - SI_SH_REG_OFFSET) >> 2) |
4401 S_2C3_DRAW_INDEX_ENABLE(draw_id_enable) |
4402 S_2C3_COUNT_INDIRECT_ENABLE(!!count_va));
4403 radeon_emit(cs, draw_count); /* count */
4404 radeon_emit(cs, count_va); /* count_addr */
4405 radeon_emit(cs, count_va >> 32);
4406 radeon_emit(cs, stride); /* stride */
4407 radeon_emit(cs, di_src_sel);
4408 }
4409 }
4410
4411 static void
4412 radv_emit_draw_packets(struct radv_cmd_buffer *cmd_buffer,
4413 const struct radv_draw_info *info)
4414 {
4415 struct radv_cmd_state *state = &cmd_buffer->state;
4416 struct radeon_winsys *ws = cmd_buffer->device->ws;
4417 struct radeon_cmdbuf *cs = cmd_buffer->cs;
4418
4419 if (info->indirect) {
4420 uint64_t va = radv_buffer_get_va(info->indirect->bo);
4421 uint64_t count_va = 0;
4422
4423 va += info->indirect->offset + info->indirect_offset;
4424
4425 radv_cs_add_buffer(ws, cs, info->indirect->bo);
4426
4427 radeon_emit(cs, PKT3(PKT3_SET_BASE, 2, 0));
4428 radeon_emit(cs, 1);
4429 radeon_emit(cs, va);
4430 radeon_emit(cs, va >> 32);
4431
4432 if (info->count_buffer) {
4433 count_va = radv_buffer_get_va(info->count_buffer->bo);
4434 count_va += info->count_buffer->offset +
4435 info->count_buffer_offset;
4436
4437 radv_cs_add_buffer(ws, cs, info->count_buffer->bo);
4438 }
4439
4440 if (!state->subpass->view_mask) {
4441 radv_cs_emit_indirect_draw_packet(cmd_buffer,
4442 info->indexed,
4443 info->count,
4444 count_va,
4445 info->stride);
4446 } else {
4447 unsigned i;
4448 for_each_bit(i, state->subpass->view_mask) {
4449 radv_emit_view_index(cmd_buffer, i);
4450
4451 radv_cs_emit_indirect_draw_packet(cmd_buffer,
4452 info->indexed,
4453 info->count,
4454 count_va,
4455 info->stride);
4456 }
4457 }
4458 } else {
4459 assert(state->pipeline->graphics.vtx_base_sgpr);
4460
4461 if (info->vertex_offset != state->last_vertex_offset ||
4462 info->first_instance != state->last_first_instance) {
4463 radeon_set_sh_reg_seq(cs, state->pipeline->graphics.vtx_base_sgpr,
4464 state->pipeline->graphics.vtx_emit_num);
4465
4466 radeon_emit(cs, info->vertex_offset);
4467 radeon_emit(cs, info->first_instance);
4468 if (state->pipeline->graphics.vtx_emit_num == 3)
4469 radeon_emit(cs, 0);
4470 state->last_first_instance = info->first_instance;
4471 state->last_vertex_offset = info->vertex_offset;
4472 }
4473
4474 if (state->last_num_instances != info->instance_count) {
4475 radeon_emit(cs, PKT3(PKT3_NUM_INSTANCES, 0, false));
4476 radeon_emit(cs, info->instance_count);
4477 state->last_num_instances = info->instance_count;
4478 }
4479
4480 if (info->indexed) {
4481 int index_size = radv_get_vgt_index_size(state->index_type);
4482 uint64_t index_va;
4483
4484 /* Skip draw calls with 0-sized index buffers. They
4485 * cause a hang on some chips, like Navi10-14.
4486 */
4487 if (!cmd_buffer->state.max_index_count)
4488 return;
4489
4490 index_va = state->index_va;
4491 index_va += info->first_index * index_size;
4492
4493 if (!state->subpass->view_mask) {
4494 radv_cs_emit_draw_indexed_packet(cmd_buffer,
4495 index_va,
4496 info->count);
4497 } else {
4498 unsigned i;
4499 for_each_bit(i, state->subpass->view_mask) {
4500 radv_emit_view_index(cmd_buffer, i);
4501
4502 radv_cs_emit_draw_indexed_packet(cmd_buffer,
4503 index_va,
4504 info->count);
4505 }
4506 }
4507 } else {
4508 if (!state->subpass->view_mask) {
4509 radv_cs_emit_draw_packet(cmd_buffer,
4510 info->count,
4511 !!info->strmout_buffer);
4512 } else {
4513 unsigned i;
4514 for_each_bit(i, state->subpass->view_mask) {
4515 radv_emit_view_index(cmd_buffer, i);
4516
4517 radv_cs_emit_draw_packet(cmd_buffer,
4518 info->count,
4519 !!info->strmout_buffer);
4520 }
4521 }
4522 }
4523 }
4524 }
4525
4526 /*
4527 * Vega and raven have a bug which triggers if there are multiple context
4528 * register contexts active at the same time with different scissor values.
4529 *
4530 * There are two possible workarounds:
4531 * 1) Wait for PS_PARTIAL_FLUSH every time the scissor is changed. That way
4532 * there is only ever 1 active set of scissor values at the same time.
4533 *
4534 * 2) Whenever the hardware switches contexts we have to set the scissor
4535 * registers again even if it is a noop. That way the new context gets
4536 * the correct scissor values.
4537 *
4538 * This implements option 2. radv_need_late_scissor_emission needs to
4539 * return true on affected HW if radv_emit_all_graphics_states sets
4540 * any context registers.
4541 */
4542 static bool radv_need_late_scissor_emission(struct radv_cmd_buffer *cmd_buffer,
4543 const struct radv_draw_info *info)
4544 {
4545 struct radv_cmd_state *state = &cmd_buffer->state;
4546
4547 if (!cmd_buffer->device->physical_device->rad_info.has_gfx9_scissor_bug)
4548 return false;
4549
4550 if (cmd_buffer->state.context_roll_without_scissor_emitted || info->strmout_buffer)
4551 return true;
4552
4553 uint32_t used_states = cmd_buffer->state.pipeline->graphics.needed_dynamic_state | ~RADV_CMD_DIRTY_DYNAMIC_ALL;
4554
4555 /* Index, vertex and streamout buffers don't change context regs, and
4556 * pipeline is already handled.
4557 */
4558 used_states &= ~(RADV_CMD_DIRTY_INDEX_BUFFER |
4559 RADV_CMD_DIRTY_VERTEX_BUFFER |
4560 RADV_CMD_DIRTY_STREAMOUT_BUFFER |
4561 RADV_CMD_DIRTY_PIPELINE);
4562
4563 if (cmd_buffer->state.dirty & used_states)
4564 return true;
4565
4566 uint32_t primitive_reset_index =
4567 radv_get_primitive_reset_index(cmd_buffer);
4568
4569 if (info->indexed && state->pipeline->graphics.prim_restart_enable &&
4570 primitive_reset_index != state->last_primitive_reset_index)
4571 return true;
4572
4573 return false;
4574 }
4575
4576 static void
4577 radv_emit_all_graphics_states(struct radv_cmd_buffer *cmd_buffer,
4578 const struct radv_draw_info *info)
4579 {
4580 bool late_scissor_emission;
4581
4582 if ((cmd_buffer->state.dirty & RADV_CMD_DIRTY_FRAMEBUFFER) ||
4583 cmd_buffer->state.emitted_pipeline != cmd_buffer->state.pipeline)
4584 radv_emit_rbplus_state(cmd_buffer);
4585
4586 if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_PIPELINE)
4587 radv_emit_graphics_pipeline(cmd_buffer);
4588
4589 /* This should be before the cmd_buffer->state.dirty is cleared
4590 * (excluding RADV_CMD_DIRTY_PIPELINE) and after
4591 * cmd_buffer->state.context_roll_without_scissor_emitted is set. */
4592 late_scissor_emission =
4593 radv_need_late_scissor_emission(cmd_buffer, info);
4594
4595 if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_FRAMEBUFFER)
4596 radv_emit_framebuffer_state(cmd_buffer);
4597
4598 if (info->indexed) {
4599 if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_INDEX_BUFFER)
4600 radv_emit_index_buffer(cmd_buffer);
4601 } else {
4602 /* On GFX7 and later, non-indexed draws overwrite VGT_INDEX_TYPE,
4603 * so the state must be re-emitted before the next indexed
4604 * draw.
4605 */
4606 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX7) {
4607 cmd_buffer->state.last_index_type = -1;
4608 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_INDEX_BUFFER;
4609 }
4610 }
4611
4612 radv_cmd_buffer_flush_dynamic_state(cmd_buffer);
4613
4614 radv_emit_draw_registers(cmd_buffer, info);
4615
4616 if (late_scissor_emission)
4617 radv_emit_scissor(cmd_buffer);
4618 }
4619
4620 static void
4621 radv_draw(struct radv_cmd_buffer *cmd_buffer,
4622 const struct radv_draw_info *info)
4623 {
4624 struct radeon_info *rad_info =
4625 &cmd_buffer->device->physical_device->rad_info;
4626 bool has_prefetch =
4627 cmd_buffer->device->physical_device->rad_info.chip_class >= GFX7;
4628 bool pipeline_is_dirty =
4629 (cmd_buffer->state.dirty & RADV_CMD_DIRTY_PIPELINE) &&
4630 cmd_buffer->state.pipeline != cmd_buffer->state.emitted_pipeline;
4631
4632 ASSERTED unsigned cdw_max =
4633 radeon_check_space(cmd_buffer->device->ws,
4634 cmd_buffer->cs, 4096);
4635
4636 if (likely(!info->indirect)) {
4637 /* GFX6-GFX7 treat instance_count==0 as instance_count==1. There is
4638 * no workaround for indirect draws, but we can at least skip
4639 * direct draws.
4640 */
4641 if (unlikely(!info->instance_count))
4642 return;
4643
4644 /* Handle count == 0. */
4645 if (unlikely(!info->count && !info->strmout_buffer))
4646 return;
4647 }
4648
4649 /* Use optimal packet order based on whether we need to sync the
4650 * pipeline.
4651 */
4652 if (cmd_buffer->state.flush_bits & (RADV_CMD_FLAG_FLUSH_AND_INV_CB |
4653 RADV_CMD_FLAG_FLUSH_AND_INV_DB |
4654 RADV_CMD_FLAG_PS_PARTIAL_FLUSH |
4655 RADV_CMD_FLAG_CS_PARTIAL_FLUSH)) {
4656 /* If we have to wait for idle, set all states first, so that
4657 * all SET packets are processed in parallel with previous draw
4658 * calls. Then upload descriptors, set shader pointers, and
4659 * draw, and prefetch at the end. This ensures that the time
4660 * the CUs are idle is very short. (there are only SET_SH
4661 * packets between the wait and the draw)
4662 */
4663 radv_emit_all_graphics_states(cmd_buffer, info);
4664 si_emit_cache_flush(cmd_buffer);
4665 /* <-- CUs are idle here --> */
4666
4667 radv_upload_graphics_shader_descriptors(cmd_buffer, pipeline_is_dirty);
4668
4669 radv_emit_draw_packets(cmd_buffer, info);
4670 /* <-- CUs are busy here --> */
4671
4672 /* Start prefetches after the draw has been started. Both will
4673 * run in parallel, but starting the draw first is more
4674 * important.
4675 */
4676 if (has_prefetch && cmd_buffer->state.prefetch_L2_mask) {
4677 radv_emit_prefetch_L2(cmd_buffer,
4678 cmd_buffer->state.pipeline, false);
4679 }
4680 } else {
4681 /* If we don't wait for idle, start prefetches first, then set
4682 * states, and draw at the end.
4683 */
4684 si_emit_cache_flush(cmd_buffer);
4685
4686 if (has_prefetch && cmd_buffer->state.prefetch_L2_mask) {
4687 /* Only prefetch the vertex shader and VBO descriptors
4688 * in order to start the draw as soon as possible.
4689 */
4690 radv_emit_prefetch_L2(cmd_buffer,
4691 cmd_buffer->state.pipeline, true);
4692 }
4693
4694 radv_upload_graphics_shader_descriptors(cmd_buffer, pipeline_is_dirty);
4695
4696 radv_emit_all_graphics_states(cmd_buffer, info);
4697 radv_emit_draw_packets(cmd_buffer, info);
4698
4699 /* Prefetch the remaining shaders after the draw has been
4700 * started.
4701 */
4702 if (has_prefetch && cmd_buffer->state.prefetch_L2_mask) {
4703 radv_emit_prefetch_L2(cmd_buffer,
4704 cmd_buffer->state.pipeline, false);
4705 }
4706 }
4707
4708 /* Workaround for a VGT hang when streamout is enabled.
4709 * It must be done after drawing.
4710 */
4711 if (cmd_buffer->state.streamout.streamout_enabled &&
4712 (rad_info->family == CHIP_HAWAII ||
4713 rad_info->family == CHIP_TONGA ||
4714 rad_info->family == CHIP_FIJI)) {
4715 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_VGT_STREAMOUT_SYNC;
4716 }
4717
4718 assert(cmd_buffer->cs->cdw <= cdw_max);
4719 radv_cmd_buffer_after_draw(cmd_buffer, RADV_CMD_FLAG_PS_PARTIAL_FLUSH);
4720 }
4721
4722 void radv_CmdDraw(
4723 VkCommandBuffer commandBuffer,
4724 uint32_t vertexCount,
4725 uint32_t instanceCount,
4726 uint32_t firstVertex,
4727 uint32_t firstInstance)
4728 {
4729 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
4730 struct radv_draw_info info = {};
4731
4732 info.count = vertexCount;
4733 info.instance_count = instanceCount;
4734 info.first_instance = firstInstance;
4735 info.vertex_offset = firstVertex;
4736
4737 radv_draw(cmd_buffer, &info);
4738 }
4739
4740 void radv_CmdDrawIndexed(
4741 VkCommandBuffer commandBuffer,
4742 uint32_t indexCount,
4743 uint32_t instanceCount,
4744 uint32_t firstIndex,
4745 int32_t vertexOffset,
4746 uint32_t firstInstance)
4747 {
4748 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
4749 struct radv_draw_info info = {};
4750
4751 info.indexed = true;
4752 info.count = indexCount;
4753 info.instance_count = instanceCount;
4754 info.first_index = firstIndex;
4755 info.vertex_offset = vertexOffset;
4756 info.first_instance = firstInstance;
4757
4758 radv_draw(cmd_buffer, &info);
4759 }
4760
4761 void radv_CmdDrawIndirect(
4762 VkCommandBuffer commandBuffer,
4763 VkBuffer _buffer,
4764 VkDeviceSize offset,
4765 uint32_t drawCount,
4766 uint32_t stride)
4767 {
4768 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
4769 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
4770 struct radv_draw_info info = {};
4771
4772 info.count = drawCount;
4773 info.indirect = buffer;
4774 info.indirect_offset = offset;
4775 info.stride = stride;
4776
4777 radv_draw(cmd_buffer, &info);
4778 }
4779
4780 void radv_CmdDrawIndexedIndirect(
4781 VkCommandBuffer commandBuffer,
4782 VkBuffer _buffer,
4783 VkDeviceSize offset,
4784 uint32_t drawCount,
4785 uint32_t stride)
4786 {
4787 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
4788 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
4789 struct radv_draw_info info = {};
4790
4791 info.indexed = true;
4792 info.count = drawCount;
4793 info.indirect = buffer;
4794 info.indirect_offset = offset;
4795 info.stride = stride;
4796
4797 radv_draw(cmd_buffer, &info);
4798 }
4799
4800 void radv_CmdDrawIndirectCountKHR(
4801 VkCommandBuffer commandBuffer,
4802 VkBuffer _buffer,
4803 VkDeviceSize offset,
4804 VkBuffer _countBuffer,
4805 VkDeviceSize countBufferOffset,
4806 uint32_t maxDrawCount,
4807 uint32_t stride)
4808 {
4809 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
4810 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
4811 RADV_FROM_HANDLE(radv_buffer, count_buffer, _countBuffer);
4812 struct radv_draw_info info = {};
4813
4814 info.count = maxDrawCount;
4815 info.indirect = buffer;
4816 info.indirect_offset = offset;
4817 info.count_buffer = count_buffer;
4818 info.count_buffer_offset = countBufferOffset;
4819 info.stride = stride;
4820
4821 radv_draw(cmd_buffer, &info);
4822 }
4823
4824 void radv_CmdDrawIndexedIndirectCountKHR(
4825 VkCommandBuffer commandBuffer,
4826 VkBuffer _buffer,
4827 VkDeviceSize offset,
4828 VkBuffer _countBuffer,
4829 VkDeviceSize countBufferOffset,
4830 uint32_t maxDrawCount,
4831 uint32_t stride)
4832 {
4833 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
4834 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
4835 RADV_FROM_HANDLE(radv_buffer, count_buffer, _countBuffer);
4836 struct radv_draw_info info = {};
4837
4838 info.indexed = true;
4839 info.count = maxDrawCount;
4840 info.indirect = buffer;
4841 info.indirect_offset = offset;
4842 info.count_buffer = count_buffer;
4843 info.count_buffer_offset = countBufferOffset;
4844 info.stride = stride;
4845
4846 radv_draw(cmd_buffer, &info);
4847 }
4848
4849 struct radv_dispatch_info {
4850 /**
4851 * Determine the layout of the grid (in block units) to be used.
4852 */
4853 uint32_t blocks[3];
4854
4855 /**
4856 * A starting offset for the grid. If unaligned is set, the offset
4857 * must still be aligned.
4858 */
4859 uint32_t offsets[3];
4860 /**
4861 * Whether it's an unaligned compute dispatch.
4862 */
4863 bool unaligned;
4864
4865 /**
4866 * Indirect compute parameters resource.
4867 */
4868 struct radv_buffer *indirect;
4869 uint64_t indirect_offset;
4870 };
4871
4872 static void
4873 radv_emit_dispatch_packets(struct radv_cmd_buffer *cmd_buffer,
4874 const struct radv_dispatch_info *info)
4875 {
4876 struct radv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
4877 struct radv_shader_variant *compute_shader = pipeline->shaders[MESA_SHADER_COMPUTE];
4878 unsigned dispatch_initiator = cmd_buffer->device->dispatch_initiator;
4879 struct radeon_winsys *ws = cmd_buffer->device->ws;
4880 bool predicating = cmd_buffer->state.predicating;
4881 struct radeon_cmdbuf *cs = cmd_buffer->cs;
4882 struct radv_userdata_info *loc;
4883
4884 loc = radv_lookup_user_sgpr(pipeline, MESA_SHADER_COMPUTE,
4885 AC_UD_CS_GRID_SIZE);
4886
4887 ASSERTED unsigned cdw_max = radeon_check_space(ws, cs, 25);
4888
4889 if (compute_shader->info.wave_size == 32) {
4890 assert(cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10);
4891 dispatch_initiator |= S_00B800_CS_W32_EN(1);
4892 }
4893
4894 if (info->indirect) {
4895 uint64_t va = radv_buffer_get_va(info->indirect->bo);
4896
4897 va += info->indirect->offset + info->indirect_offset;
4898
4899 radv_cs_add_buffer(ws, cs, info->indirect->bo);
4900
4901 if (loc->sgpr_idx != -1) {
4902 for (unsigned i = 0; i < 3; ++i) {
4903 radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
4904 radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_SRC_MEM) |
4905 COPY_DATA_DST_SEL(COPY_DATA_REG));
4906 radeon_emit(cs, (va + 4 * i));
4907 radeon_emit(cs, (va + 4 * i) >> 32);
4908 radeon_emit(cs, ((R_00B900_COMPUTE_USER_DATA_0
4909 + loc->sgpr_idx * 4) >> 2) + i);
4910 radeon_emit(cs, 0);
4911 }
4912 }
4913
4914 if (radv_cmd_buffer_uses_mec(cmd_buffer)) {
4915 radeon_emit(cs, PKT3(PKT3_DISPATCH_INDIRECT, 2, predicating) |
4916 PKT3_SHADER_TYPE_S(1));
4917 radeon_emit(cs, va);
4918 radeon_emit(cs, va >> 32);
4919 radeon_emit(cs, dispatch_initiator);
4920 } else {
4921 radeon_emit(cs, PKT3(PKT3_SET_BASE, 2, 0) |
4922 PKT3_SHADER_TYPE_S(1));
4923 radeon_emit(cs, 1);
4924 radeon_emit(cs, va);
4925 radeon_emit(cs, va >> 32);
4926
4927 radeon_emit(cs, PKT3(PKT3_DISPATCH_INDIRECT, 1, predicating) |
4928 PKT3_SHADER_TYPE_S(1));
4929 radeon_emit(cs, 0);
4930 radeon_emit(cs, dispatch_initiator);
4931 }
4932 } else {
4933 unsigned blocks[3] = { info->blocks[0], info->blocks[1], info->blocks[2] };
4934 unsigned offsets[3] = { info->offsets[0], info->offsets[1], info->offsets[2] };
4935
4936 if (info->unaligned) {
4937 unsigned *cs_block_size = compute_shader->info.cs.block_size;
4938 unsigned remainder[3];
4939
4940 /* If aligned, these should be an entire block size,
4941 * not 0.
4942 */
4943 remainder[0] = blocks[0] + cs_block_size[0] -
4944 align_u32_npot(blocks[0], cs_block_size[0]);
4945 remainder[1] = blocks[1] + cs_block_size[1] -
4946 align_u32_npot(blocks[1], cs_block_size[1]);
4947 remainder[2] = blocks[2] + cs_block_size[2] -
4948 align_u32_npot(blocks[2], cs_block_size[2]);
4949
4950 blocks[0] = round_up_u32(blocks[0], cs_block_size[0]);
4951 blocks[1] = round_up_u32(blocks[1], cs_block_size[1]);
4952 blocks[2] = round_up_u32(blocks[2], cs_block_size[2]);
4953
4954 for(unsigned i = 0; i < 3; ++i) {
4955 assert(offsets[i] % cs_block_size[i] == 0);
4956 offsets[i] /= cs_block_size[i];
4957 }
4958
4959 radeon_set_sh_reg_seq(cs, R_00B81C_COMPUTE_NUM_THREAD_X, 3);
4960 radeon_emit(cs,
4961 S_00B81C_NUM_THREAD_FULL(cs_block_size[0]) |
4962 S_00B81C_NUM_THREAD_PARTIAL(remainder[0]));
4963 radeon_emit(cs,
4964 S_00B81C_NUM_THREAD_FULL(cs_block_size[1]) |
4965 S_00B81C_NUM_THREAD_PARTIAL(remainder[1]));
4966 radeon_emit(cs,
4967 S_00B81C_NUM_THREAD_FULL(cs_block_size[2]) |
4968 S_00B81C_NUM_THREAD_PARTIAL(remainder[2]));
4969
4970 dispatch_initiator |= S_00B800_PARTIAL_TG_EN(1);
4971 }
4972
4973 if (loc->sgpr_idx != -1) {
4974 assert(loc->num_sgprs == 3);
4975
4976 radeon_set_sh_reg_seq(cs, R_00B900_COMPUTE_USER_DATA_0 +
4977 loc->sgpr_idx * 4, 3);
4978 radeon_emit(cs, blocks[0]);
4979 radeon_emit(cs, blocks[1]);
4980 radeon_emit(cs, blocks[2]);
4981 }
4982
4983 if (offsets[0] || offsets[1] || offsets[2]) {
4984 radeon_set_sh_reg_seq(cs, R_00B810_COMPUTE_START_X, 3);
4985 radeon_emit(cs, offsets[0]);
4986 radeon_emit(cs, offsets[1]);
4987 radeon_emit(cs, offsets[2]);
4988
4989 /* The blocks in the packet are not counts but end values. */
4990 for (unsigned i = 0; i < 3; ++i)
4991 blocks[i] += offsets[i];
4992 } else {
4993 dispatch_initiator |= S_00B800_FORCE_START_AT_000(1);
4994 }
4995
4996 radeon_emit(cs, PKT3(PKT3_DISPATCH_DIRECT, 3, predicating) |
4997 PKT3_SHADER_TYPE_S(1));
4998 radeon_emit(cs, blocks[0]);
4999 radeon_emit(cs, blocks[1]);
5000 radeon_emit(cs, blocks[2]);
5001 radeon_emit(cs, dispatch_initiator);
5002 }
5003
5004 assert(cmd_buffer->cs->cdw <= cdw_max);
5005 }
5006
5007 static void
5008 radv_upload_compute_shader_descriptors(struct radv_cmd_buffer *cmd_buffer)
5009 {
5010 radv_flush_descriptors(cmd_buffer, VK_SHADER_STAGE_COMPUTE_BIT);
5011 radv_flush_constants(cmd_buffer, VK_SHADER_STAGE_COMPUTE_BIT);
5012 }
5013
5014 static void
5015 radv_dispatch(struct radv_cmd_buffer *cmd_buffer,
5016 const struct radv_dispatch_info *info)
5017 {
5018 struct radv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
5019 bool has_prefetch =
5020 cmd_buffer->device->physical_device->rad_info.chip_class >= GFX7;
5021 bool pipeline_is_dirty = pipeline &&
5022 pipeline != cmd_buffer->state.emitted_compute_pipeline;
5023
5024 if (cmd_buffer->state.flush_bits & (RADV_CMD_FLAG_FLUSH_AND_INV_CB |
5025 RADV_CMD_FLAG_FLUSH_AND_INV_DB |
5026 RADV_CMD_FLAG_PS_PARTIAL_FLUSH |
5027 RADV_CMD_FLAG_CS_PARTIAL_FLUSH)) {
5028 /* If we have to wait for idle, set all states first, so that
5029 * all SET packets are processed in parallel with previous draw
5030 * calls. Then upload descriptors, set shader pointers, and
5031 * dispatch, and prefetch at the end. This ensures that the
5032 * time the CUs are idle is very short. (there are only SET_SH
5033 * packets between the wait and the draw)
5034 */
5035 radv_emit_compute_pipeline(cmd_buffer);
5036 si_emit_cache_flush(cmd_buffer);
5037 /* <-- CUs are idle here --> */
5038
5039 radv_upload_compute_shader_descriptors(cmd_buffer);
5040
5041 radv_emit_dispatch_packets(cmd_buffer, info);
5042 /* <-- CUs are busy here --> */
5043
5044 /* Start prefetches after the dispatch has been started. Both
5045 * will run in parallel, but starting the dispatch first is
5046 * more important.
5047 */
5048 if (has_prefetch && pipeline_is_dirty) {
5049 radv_emit_shader_prefetch(cmd_buffer,
5050 pipeline->shaders[MESA_SHADER_COMPUTE]);
5051 }
5052 } else {
5053 /* If we don't wait for idle, start prefetches first, then set
5054 * states, and dispatch at the end.
5055 */
5056 si_emit_cache_flush(cmd_buffer);
5057
5058 if (has_prefetch && pipeline_is_dirty) {
5059 radv_emit_shader_prefetch(cmd_buffer,
5060 pipeline->shaders[MESA_SHADER_COMPUTE]);
5061 }
5062
5063 radv_upload_compute_shader_descriptors(cmd_buffer);
5064
5065 radv_emit_compute_pipeline(cmd_buffer);
5066 radv_emit_dispatch_packets(cmd_buffer, info);
5067 }
5068
5069 radv_cmd_buffer_after_draw(cmd_buffer, RADV_CMD_FLAG_CS_PARTIAL_FLUSH);
5070 }
5071
5072 void radv_CmdDispatchBase(
5073 VkCommandBuffer commandBuffer,
5074 uint32_t base_x,
5075 uint32_t base_y,
5076 uint32_t base_z,
5077 uint32_t x,
5078 uint32_t y,
5079 uint32_t z)
5080 {
5081 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
5082 struct radv_dispatch_info info = {};
5083
5084 info.blocks[0] = x;
5085 info.blocks[1] = y;
5086 info.blocks[2] = z;
5087
5088 info.offsets[0] = base_x;
5089 info.offsets[1] = base_y;
5090 info.offsets[2] = base_z;
5091 radv_dispatch(cmd_buffer, &info);
5092 }
5093
5094 void radv_CmdDispatch(
5095 VkCommandBuffer commandBuffer,
5096 uint32_t x,
5097 uint32_t y,
5098 uint32_t z)
5099 {
5100 radv_CmdDispatchBase(commandBuffer, 0, 0, 0, x, y, z);
5101 }
5102
5103 void radv_CmdDispatchIndirect(
5104 VkCommandBuffer commandBuffer,
5105 VkBuffer _buffer,
5106 VkDeviceSize offset)
5107 {
5108 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
5109 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
5110 struct radv_dispatch_info info = {};
5111
5112 info.indirect = buffer;
5113 info.indirect_offset = offset;
5114
5115 radv_dispatch(cmd_buffer, &info);
5116 }
5117
5118 void radv_unaligned_dispatch(
5119 struct radv_cmd_buffer *cmd_buffer,
5120 uint32_t x,
5121 uint32_t y,
5122 uint32_t z)
5123 {
5124 struct radv_dispatch_info info = {};
5125
5126 info.blocks[0] = x;
5127 info.blocks[1] = y;
5128 info.blocks[2] = z;
5129 info.unaligned = 1;
5130
5131 radv_dispatch(cmd_buffer, &info);
5132 }
5133
5134 void radv_CmdEndRenderPass(
5135 VkCommandBuffer commandBuffer)
5136 {
5137 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
5138
5139 radv_subpass_barrier(cmd_buffer, &cmd_buffer->state.pass->end_barrier);
5140
5141 radv_cmd_buffer_end_subpass(cmd_buffer);
5142
5143 vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.attachments);
5144 vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.subpass_sample_locs);
5145
5146 cmd_buffer->state.pass = NULL;
5147 cmd_buffer->state.subpass = NULL;
5148 cmd_buffer->state.attachments = NULL;
5149 cmd_buffer->state.framebuffer = NULL;
5150 cmd_buffer->state.subpass_sample_locs = NULL;
5151 }
5152
5153 void radv_CmdEndRenderPass2KHR(
5154 VkCommandBuffer commandBuffer,
5155 const VkSubpassEndInfoKHR* pSubpassEndInfo)
5156 {
5157 radv_CmdEndRenderPass(commandBuffer);
5158 }
5159
5160 /*
5161 * For HTILE we have the following interesting clear words:
5162 * 0xfffff30f: Uncompressed, full depth range, for depth+stencil HTILE
5163 * 0xfffc000f: Uncompressed, full depth range, for depth only HTILE.
5164 * 0xfffffff0: Clear depth to 1.0
5165 * 0x00000000: Clear depth to 0.0
5166 */
5167 static void radv_initialize_htile(struct radv_cmd_buffer *cmd_buffer,
5168 struct radv_image *image,
5169 const VkImageSubresourceRange *range)
5170 {
5171 assert(range->baseMipLevel == 0);
5172 assert(range->levelCount == 1 || range->levelCount == VK_REMAINING_ARRAY_LAYERS);
5173 VkImageAspectFlags aspects = VK_IMAGE_ASPECT_DEPTH_BIT;
5174 struct radv_cmd_state *state = &cmd_buffer->state;
5175 uint32_t htile_value = vk_format_is_stencil(image->vk_format) ? 0xfffff30f : 0xfffc000f;
5176 VkClearDepthStencilValue value = {};
5177
5178 state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB |
5179 RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
5180
5181 state->flush_bits |= radv_clear_htile(cmd_buffer, image, range, htile_value);
5182
5183 state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
5184
5185 if (vk_format_is_stencil(image->vk_format))
5186 aspects |= VK_IMAGE_ASPECT_STENCIL_BIT;
5187
5188 radv_set_ds_clear_metadata(cmd_buffer, image, range, value, aspects);
5189
5190 if (radv_image_is_tc_compat_htile(image)) {
5191 /* Initialize the TC-compat metada value to 0 because by
5192 * default DB_Z_INFO.RANGE_PRECISION is set to 1, and we only
5193 * need have to conditionally update its value when performing
5194 * a fast depth clear.
5195 */
5196 radv_set_tc_compat_zrange_metadata(cmd_buffer, image, range, 0);
5197 }
5198 }
5199
5200 static void radv_handle_depth_image_transition(struct radv_cmd_buffer *cmd_buffer,
5201 struct radv_image *image,
5202 VkImageLayout src_layout,
5203 bool src_render_loop,
5204 VkImageLayout dst_layout,
5205 bool dst_render_loop,
5206 unsigned src_queue_mask,
5207 unsigned dst_queue_mask,
5208 const VkImageSubresourceRange *range,
5209 struct radv_sample_locations_state *sample_locs)
5210 {
5211 if (!radv_image_has_htile(image))
5212 return;
5213
5214 if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED) {
5215 radv_initialize_htile(cmd_buffer, image, range);
5216 } else if (!radv_layout_is_htile_compressed(image, src_layout, src_render_loop, src_queue_mask) &&
5217 radv_layout_is_htile_compressed(image, dst_layout, dst_render_loop, dst_queue_mask)) {
5218 radv_initialize_htile(cmd_buffer, image, range);
5219 } else if (radv_layout_is_htile_compressed(image, src_layout, src_render_loop, src_queue_mask) &&
5220 !radv_layout_is_htile_compressed(image, dst_layout, dst_render_loop, dst_queue_mask)) {
5221 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB |
5222 RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
5223
5224 radv_decompress_depth_image_inplace(cmd_buffer, image, range,
5225 sample_locs);
5226
5227 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB |
5228 RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
5229 }
5230 }
5231
5232 static void radv_initialise_cmask(struct radv_cmd_buffer *cmd_buffer,
5233 struct radv_image *image,
5234 const VkImageSubresourceRange *range,
5235 uint32_t value)
5236 {
5237 struct radv_cmd_state *state = &cmd_buffer->state;
5238
5239 state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
5240 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
5241
5242 state->flush_bits |= radv_clear_cmask(cmd_buffer, image, range, value);
5243
5244 state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
5245 }
5246
5247 void radv_initialize_fmask(struct radv_cmd_buffer *cmd_buffer,
5248 struct radv_image *image,
5249 const VkImageSubresourceRange *range)
5250 {
5251 struct radv_cmd_state *state = &cmd_buffer->state;
5252 static const uint32_t fmask_clear_values[4] = {
5253 0x00000000,
5254 0x02020202,
5255 0xE4E4E4E4,
5256 0x76543210
5257 };
5258 uint32_t log2_samples = util_logbase2(image->info.samples);
5259 uint32_t value = fmask_clear_values[log2_samples];
5260
5261 state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
5262 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
5263
5264 state->flush_bits |= radv_clear_fmask(cmd_buffer, image, range, value);
5265
5266 state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
5267 }
5268
5269 void radv_initialize_dcc(struct radv_cmd_buffer *cmd_buffer,
5270 struct radv_image *image,
5271 const VkImageSubresourceRange *range, uint32_t value)
5272 {
5273 struct radv_cmd_state *state = &cmd_buffer->state;
5274 unsigned size = 0;
5275
5276 state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
5277 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
5278
5279 state->flush_bits |= radv_clear_dcc(cmd_buffer, image, range, value);
5280
5281 if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX8) {
5282 /* When DCC is enabled with mipmaps, some levels might not
5283 * support fast clears and we have to initialize them as "fully
5284 * expanded".
5285 */
5286 /* Compute the size of all fast clearable DCC levels. */
5287 for (unsigned i = 0; i < image->planes[0].surface.num_dcc_levels; i++) {
5288 struct legacy_surf_level *surf_level =
5289 &image->planes[0].surface.u.legacy.level[i];
5290 unsigned dcc_fast_clear_size =
5291 surf_level->dcc_slice_fast_clear_size * image->info.array_size;
5292
5293 if (!dcc_fast_clear_size)
5294 break;
5295
5296 size = surf_level->dcc_offset + dcc_fast_clear_size;
5297 }
5298
5299 /* Initialize the mipmap levels without DCC. */
5300 if (size != image->planes[0].surface.dcc_size) {
5301 state->flush_bits |=
5302 radv_fill_buffer(cmd_buffer, image->bo,
5303 image->offset + image->dcc_offset + size,
5304 image->planes[0].surface.dcc_size - size,
5305 0xffffffff);
5306 }
5307 }
5308
5309 state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
5310 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
5311 }
5312
5313 /**
5314 * Initialize DCC/FMASK/CMASK metadata for a color image.
5315 */
5316 static void radv_init_color_image_metadata(struct radv_cmd_buffer *cmd_buffer,
5317 struct radv_image *image,
5318 VkImageLayout src_layout,
5319 bool src_render_loop,
5320 VkImageLayout dst_layout,
5321 bool dst_render_loop,
5322 unsigned src_queue_mask,
5323 unsigned dst_queue_mask,
5324 const VkImageSubresourceRange *range)
5325 {
5326 if (radv_image_has_cmask(image)) {
5327 uint32_t value = 0xffffffffu; /* Fully expanded mode. */
5328
5329 /* TODO: clarify this. */
5330 if (radv_image_has_fmask(image)) {
5331 value = 0xccccccccu;
5332 }
5333
5334 radv_initialise_cmask(cmd_buffer, image, range, value);
5335 }
5336
5337 if (radv_image_has_fmask(image)) {
5338 radv_initialize_fmask(cmd_buffer, image, range);
5339 }
5340
5341 if (radv_dcc_enabled(image, range->baseMipLevel)) {
5342 uint32_t value = 0xffffffffu; /* Fully expanded mode. */
5343 bool need_decompress_pass = false;
5344
5345 if (radv_layout_dcc_compressed(cmd_buffer->device, image, dst_layout,
5346 dst_render_loop,
5347 dst_queue_mask)) {
5348 value = 0x20202020u;
5349 need_decompress_pass = true;
5350 }
5351
5352 radv_initialize_dcc(cmd_buffer, image, range, value);
5353
5354 radv_update_fce_metadata(cmd_buffer, image, range,
5355 need_decompress_pass);
5356 }
5357
5358 if (radv_image_has_cmask(image) ||
5359 radv_dcc_enabled(image, range->baseMipLevel)) {
5360 uint32_t color_values[2] = {};
5361 radv_set_color_clear_metadata(cmd_buffer, image, range,
5362 color_values);
5363 }
5364 }
5365
5366 /**
5367 * Handle color image transitions for DCC/FMASK/CMASK.
5368 */
5369 static void radv_handle_color_image_transition(struct radv_cmd_buffer *cmd_buffer,
5370 struct radv_image *image,
5371 VkImageLayout src_layout,
5372 bool src_render_loop,
5373 VkImageLayout dst_layout,
5374 bool dst_render_loop,
5375 unsigned src_queue_mask,
5376 unsigned dst_queue_mask,
5377 const VkImageSubresourceRange *range)
5378 {
5379 if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED) {
5380 radv_init_color_image_metadata(cmd_buffer, image,
5381 src_layout, src_render_loop,
5382 dst_layout, dst_render_loop,
5383 src_queue_mask, dst_queue_mask,
5384 range);
5385 return;
5386 }
5387
5388 if (radv_dcc_enabled(image, range->baseMipLevel)) {
5389 if (src_layout == VK_IMAGE_LAYOUT_PREINITIALIZED) {
5390 radv_initialize_dcc(cmd_buffer, image, range, 0xffffffffu);
5391 } else if (radv_layout_dcc_compressed(cmd_buffer->device, image, src_layout, src_render_loop, src_queue_mask) &&
5392 !radv_layout_dcc_compressed(cmd_buffer->device, image, dst_layout, dst_render_loop, dst_queue_mask)) {
5393 radv_decompress_dcc(cmd_buffer, image, range);
5394 } else if (radv_layout_can_fast_clear(image, src_layout, src_render_loop, src_queue_mask) &&
5395 !radv_layout_can_fast_clear(image, dst_layout, dst_render_loop, dst_queue_mask)) {
5396 radv_fast_clear_flush_image_inplace(cmd_buffer, image, range);
5397 }
5398 } else if (radv_image_has_cmask(image) || radv_image_has_fmask(image)) {
5399 bool fce_eliminate = false, fmask_expand = false;
5400
5401 if (radv_layout_can_fast_clear(image, src_layout, src_render_loop, src_queue_mask) &&
5402 !radv_layout_can_fast_clear(image, dst_layout, dst_render_loop, dst_queue_mask)) {
5403 fce_eliminate = true;
5404 }
5405
5406 if (radv_image_has_fmask(image)) {
5407 if (src_layout != VK_IMAGE_LAYOUT_GENERAL &&
5408 dst_layout == VK_IMAGE_LAYOUT_GENERAL) {
5409 /* A FMASK decompress is required before doing
5410 * a MSAA decompress using FMASK.
5411 */
5412 fmask_expand = true;
5413 }
5414 }
5415
5416 if (fce_eliminate || fmask_expand)
5417 radv_fast_clear_flush_image_inplace(cmd_buffer, image, range);
5418
5419 if (fmask_expand)
5420 radv_expand_fmask_image_inplace(cmd_buffer, image, range);
5421 }
5422 }
5423
5424 static void radv_handle_image_transition(struct radv_cmd_buffer *cmd_buffer,
5425 struct radv_image *image,
5426 VkImageLayout src_layout,
5427 bool src_render_loop,
5428 VkImageLayout dst_layout,
5429 bool dst_render_loop,
5430 uint32_t src_family,
5431 uint32_t dst_family,
5432 const VkImageSubresourceRange *range,
5433 struct radv_sample_locations_state *sample_locs)
5434 {
5435 if (image->exclusive && src_family != dst_family) {
5436 /* This is an acquire or a release operation and there will be
5437 * a corresponding release/acquire. Do the transition in the
5438 * most flexible queue. */
5439
5440 assert(src_family == cmd_buffer->queue_family_index ||
5441 dst_family == cmd_buffer->queue_family_index);
5442
5443 if (src_family == VK_QUEUE_FAMILY_EXTERNAL ||
5444 src_family == VK_QUEUE_FAMILY_FOREIGN_EXT)
5445 return;
5446
5447 if (cmd_buffer->queue_family_index == RADV_QUEUE_TRANSFER)
5448 return;
5449
5450 if (cmd_buffer->queue_family_index == RADV_QUEUE_COMPUTE &&
5451 (src_family == RADV_QUEUE_GENERAL ||
5452 dst_family == RADV_QUEUE_GENERAL))
5453 return;
5454 }
5455
5456 if (src_layout == dst_layout)
5457 return;
5458
5459 unsigned src_queue_mask =
5460 radv_image_queue_family_mask(image, src_family,
5461 cmd_buffer->queue_family_index);
5462 unsigned dst_queue_mask =
5463 radv_image_queue_family_mask(image, dst_family,
5464 cmd_buffer->queue_family_index);
5465
5466 if (vk_format_is_depth(image->vk_format)) {
5467 radv_handle_depth_image_transition(cmd_buffer, image,
5468 src_layout, src_render_loop,
5469 dst_layout, dst_render_loop,
5470 src_queue_mask, dst_queue_mask,
5471 range, sample_locs);
5472 } else {
5473 radv_handle_color_image_transition(cmd_buffer, image,
5474 src_layout, src_render_loop,
5475 dst_layout, dst_render_loop,
5476 src_queue_mask, dst_queue_mask,
5477 range);
5478 }
5479 }
5480
5481 struct radv_barrier_info {
5482 uint32_t eventCount;
5483 const VkEvent *pEvents;
5484 VkPipelineStageFlags srcStageMask;
5485 VkPipelineStageFlags dstStageMask;
5486 };
5487
5488 static void
5489 radv_barrier(struct radv_cmd_buffer *cmd_buffer,
5490 uint32_t memoryBarrierCount,
5491 const VkMemoryBarrier *pMemoryBarriers,
5492 uint32_t bufferMemoryBarrierCount,
5493 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
5494 uint32_t imageMemoryBarrierCount,
5495 const VkImageMemoryBarrier *pImageMemoryBarriers,
5496 const struct radv_barrier_info *info)
5497 {
5498 struct radeon_cmdbuf *cs = cmd_buffer->cs;
5499 enum radv_cmd_flush_bits src_flush_bits = 0;
5500 enum radv_cmd_flush_bits dst_flush_bits = 0;
5501
5502 for (unsigned i = 0; i < info->eventCount; ++i) {
5503 RADV_FROM_HANDLE(radv_event, event, info->pEvents[i]);
5504 uint64_t va = radv_buffer_get_va(event->bo);
5505
5506 radv_cs_add_buffer(cmd_buffer->device->ws, cs, event->bo);
5507
5508 ASSERTED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 7);
5509
5510 radv_cp_wait_mem(cs, WAIT_REG_MEM_EQUAL, va, 1, 0xffffffff);
5511 assert(cmd_buffer->cs->cdw <= cdw_max);
5512 }
5513
5514 for (uint32_t i = 0; i < memoryBarrierCount; i++) {
5515 src_flush_bits |= radv_src_access_flush(cmd_buffer, pMemoryBarriers[i].srcAccessMask,
5516 NULL);
5517 dst_flush_bits |= radv_dst_access_flush(cmd_buffer, pMemoryBarriers[i].dstAccessMask,
5518 NULL);
5519 }
5520
5521 for (uint32_t i = 0; i < bufferMemoryBarrierCount; i++) {
5522 src_flush_bits |= radv_src_access_flush(cmd_buffer, pBufferMemoryBarriers[i].srcAccessMask,
5523 NULL);
5524 dst_flush_bits |= radv_dst_access_flush(cmd_buffer, pBufferMemoryBarriers[i].dstAccessMask,
5525 NULL);
5526 }
5527
5528 for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
5529 RADV_FROM_HANDLE(radv_image, image, pImageMemoryBarriers[i].image);
5530
5531 src_flush_bits |= radv_src_access_flush(cmd_buffer, pImageMemoryBarriers[i].srcAccessMask,
5532 image);
5533 dst_flush_bits |= radv_dst_access_flush(cmd_buffer, pImageMemoryBarriers[i].dstAccessMask,
5534 image);
5535 }
5536
5537 /* The Vulkan spec 1.1.98 says:
5538 *
5539 * "An execution dependency with only
5540 * VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT in the destination stage mask
5541 * will only prevent that stage from executing in subsequently
5542 * submitted commands. As this stage does not perform any actual
5543 * execution, this is not observable - in effect, it does not delay
5544 * processing of subsequent commands. Similarly an execution dependency
5545 * with only VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT in the source stage mask
5546 * will effectively not wait for any prior commands to complete."
5547 */
5548 if (info->dstStageMask != VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT)
5549 radv_stage_flush(cmd_buffer, info->srcStageMask);
5550 cmd_buffer->state.flush_bits |= src_flush_bits;
5551
5552 for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
5553 RADV_FROM_HANDLE(radv_image, image, pImageMemoryBarriers[i].image);
5554
5555 const struct VkSampleLocationsInfoEXT *sample_locs_info =
5556 vk_find_struct_const(pImageMemoryBarriers[i].pNext,
5557 SAMPLE_LOCATIONS_INFO_EXT);
5558 struct radv_sample_locations_state sample_locations = {};
5559
5560 if (sample_locs_info) {
5561 assert(image->flags & VK_IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT);
5562 sample_locations.per_pixel = sample_locs_info->sampleLocationsPerPixel;
5563 sample_locations.grid_size = sample_locs_info->sampleLocationGridSize;
5564 sample_locations.count = sample_locs_info->sampleLocationsCount;
5565 typed_memcpy(&sample_locations.locations[0],
5566 sample_locs_info->pSampleLocations,
5567 sample_locs_info->sampleLocationsCount);
5568 }
5569
5570 radv_handle_image_transition(cmd_buffer, image,
5571 pImageMemoryBarriers[i].oldLayout,
5572 false, /* Outside of a renderpass we are never in a renderloop */
5573 pImageMemoryBarriers[i].newLayout,
5574 false, /* Outside of a renderpass we are never in a renderloop */
5575 pImageMemoryBarriers[i].srcQueueFamilyIndex,
5576 pImageMemoryBarriers[i].dstQueueFamilyIndex,
5577 &pImageMemoryBarriers[i].subresourceRange,
5578 sample_locs_info ? &sample_locations : NULL);
5579 }
5580
5581 /* Make sure CP DMA is idle because the driver might have performed a
5582 * DMA operation for copying or filling buffers/images.
5583 */
5584 if (info->srcStageMask & (VK_PIPELINE_STAGE_TRANSFER_BIT |
5585 VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT))
5586 si_cp_dma_wait_for_idle(cmd_buffer);
5587
5588 cmd_buffer->state.flush_bits |= dst_flush_bits;
5589 }
5590
5591 void radv_CmdPipelineBarrier(
5592 VkCommandBuffer commandBuffer,
5593 VkPipelineStageFlags srcStageMask,
5594 VkPipelineStageFlags destStageMask,
5595 VkBool32 byRegion,
5596 uint32_t memoryBarrierCount,
5597 const VkMemoryBarrier* pMemoryBarriers,
5598 uint32_t bufferMemoryBarrierCount,
5599 const VkBufferMemoryBarrier* pBufferMemoryBarriers,
5600 uint32_t imageMemoryBarrierCount,
5601 const VkImageMemoryBarrier* pImageMemoryBarriers)
5602 {
5603 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
5604 struct radv_barrier_info info;
5605
5606 info.eventCount = 0;
5607 info.pEvents = NULL;
5608 info.srcStageMask = srcStageMask;
5609 info.dstStageMask = destStageMask;
5610
5611 radv_barrier(cmd_buffer, memoryBarrierCount, pMemoryBarriers,
5612 bufferMemoryBarrierCount, pBufferMemoryBarriers,
5613 imageMemoryBarrierCount, pImageMemoryBarriers, &info);
5614 }
5615
5616
5617 static void write_event(struct radv_cmd_buffer *cmd_buffer,
5618 struct radv_event *event,
5619 VkPipelineStageFlags stageMask,
5620 unsigned value)
5621 {
5622 struct radeon_cmdbuf *cs = cmd_buffer->cs;
5623 uint64_t va = radv_buffer_get_va(event->bo);
5624
5625 si_emit_cache_flush(cmd_buffer);
5626
5627 radv_cs_add_buffer(cmd_buffer->device->ws, cs, event->bo);
5628
5629 ASSERTED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 21);
5630
5631 /* Flags that only require a top-of-pipe event. */
5632 VkPipelineStageFlags top_of_pipe_flags =
5633 VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
5634
5635 /* Flags that only require a post-index-fetch event. */
5636 VkPipelineStageFlags post_index_fetch_flags =
5637 top_of_pipe_flags |
5638 VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT |
5639 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT;
5640
5641 /* Make sure CP DMA is idle because the driver might have performed a
5642 * DMA operation for copying or filling buffers/images.
5643 */
5644 if (stageMask & (VK_PIPELINE_STAGE_TRANSFER_BIT |
5645 VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT))
5646 si_cp_dma_wait_for_idle(cmd_buffer);
5647
5648 /* TODO: Emit EOS events for syncing PS/CS stages. */
5649
5650 if (!(stageMask & ~top_of_pipe_flags)) {
5651 /* Just need to sync the PFP engine. */
5652 radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, 0));
5653 radeon_emit(cs, S_370_DST_SEL(V_370_MEM) |
5654 S_370_WR_CONFIRM(1) |
5655 S_370_ENGINE_SEL(V_370_PFP));
5656 radeon_emit(cs, va);
5657 radeon_emit(cs, va >> 32);
5658 radeon_emit(cs, value);
5659 } else if (!(stageMask & ~post_index_fetch_flags)) {
5660 /* Sync ME because PFP reads index and indirect buffers. */
5661 radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, 0));
5662 radeon_emit(cs, S_370_DST_SEL(V_370_MEM) |
5663 S_370_WR_CONFIRM(1) |
5664 S_370_ENGINE_SEL(V_370_ME));
5665 radeon_emit(cs, va);
5666 radeon_emit(cs, va >> 32);
5667 radeon_emit(cs, value);
5668 } else {
5669 /* Otherwise, sync all prior GPU work using an EOP event. */
5670 si_cs_emit_write_event_eop(cs,
5671 cmd_buffer->device->physical_device->rad_info.chip_class,
5672 radv_cmd_buffer_uses_mec(cmd_buffer),
5673 V_028A90_BOTTOM_OF_PIPE_TS, 0,
5674 EOP_DST_SEL_MEM,
5675 EOP_DATA_SEL_VALUE_32BIT, va, value,
5676 cmd_buffer->gfx9_eop_bug_va);
5677 }
5678
5679 assert(cmd_buffer->cs->cdw <= cdw_max);
5680 }
5681
5682 void radv_CmdSetEvent(VkCommandBuffer commandBuffer,
5683 VkEvent _event,
5684 VkPipelineStageFlags stageMask)
5685 {
5686 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
5687 RADV_FROM_HANDLE(radv_event, event, _event);
5688
5689 write_event(cmd_buffer, event, stageMask, 1);
5690 }
5691
5692 void radv_CmdResetEvent(VkCommandBuffer commandBuffer,
5693 VkEvent _event,
5694 VkPipelineStageFlags stageMask)
5695 {
5696 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
5697 RADV_FROM_HANDLE(radv_event, event, _event);
5698
5699 write_event(cmd_buffer, event, stageMask, 0);
5700 }
5701
5702 void radv_CmdWaitEvents(VkCommandBuffer commandBuffer,
5703 uint32_t eventCount,
5704 const VkEvent* pEvents,
5705 VkPipelineStageFlags srcStageMask,
5706 VkPipelineStageFlags dstStageMask,
5707 uint32_t memoryBarrierCount,
5708 const VkMemoryBarrier* pMemoryBarriers,
5709 uint32_t bufferMemoryBarrierCount,
5710 const VkBufferMemoryBarrier* pBufferMemoryBarriers,
5711 uint32_t imageMemoryBarrierCount,
5712 const VkImageMemoryBarrier* pImageMemoryBarriers)
5713 {
5714 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
5715 struct radv_barrier_info info;
5716
5717 info.eventCount = eventCount;
5718 info.pEvents = pEvents;
5719 info.srcStageMask = 0;
5720
5721 radv_barrier(cmd_buffer, memoryBarrierCount, pMemoryBarriers,
5722 bufferMemoryBarrierCount, pBufferMemoryBarriers,
5723 imageMemoryBarrierCount, pImageMemoryBarriers, &info);
5724 }
5725
5726
5727 void radv_CmdSetDeviceMask(VkCommandBuffer commandBuffer,
5728 uint32_t deviceMask)
5729 {
5730 /* No-op */
5731 }
5732
5733 /* VK_EXT_conditional_rendering */
5734 void radv_CmdBeginConditionalRenderingEXT(
5735 VkCommandBuffer commandBuffer,
5736 const VkConditionalRenderingBeginInfoEXT* pConditionalRenderingBegin)
5737 {
5738 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
5739 RADV_FROM_HANDLE(radv_buffer, buffer, pConditionalRenderingBegin->buffer);
5740 struct radeon_cmdbuf *cs = cmd_buffer->cs;
5741 bool draw_visible = true;
5742 uint64_t pred_value = 0;
5743 uint64_t va, new_va;
5744 unsigned pred_offset;
5745
5746 va = radv_buffer_get_va(buffer->bo) + pConditionalRenderingBegin->offset;
5747
5748 /* By default, if the 32-bit value at offset in buffer memory is zero,
5749 * then the rendering commands are discarded, otherwise they are
5750 * executed as normal. If the inverted flag is set, all commands are
5751 * discarded if the value is non zero.
5752 */
5753 if (pConditionalRenderingBegin->flags &
5754 VK_CONDITIONAL_RENDERING_INVERTED_BIT_EXT) {
5755 draw_visible = false;
5756 }
5757
5758 si_emit_cache_flush(cmd_buffer);
5759
5760 /* From the Vulkan spec 1.1.107:
5761 *
5762 * "If the 32-bit value at offset in buffer memory is zero, then the
5763 * rendering commands are discarded, otherwise they are executed as
5764 * normal. If the value of the predicate in buffer memory changes while
5765 * conditional rendering is active, the rendering commands may be
5766 * discarded in an implementation-dependent way. Some implementations
5767 * may latch the value of the predicate upon beginning conditional
5768 * rendering while others may read it before every rendering command."
5769 *
5770 * But, the AMD hardware treats the predicate as a 64-bit value which
5771 * means we need a workaround in the driver. Luckily, it's not required
5772 * to support if the value changes when predication is active.
5773 *
5774 * The workaround is as follows:
5775 * 1) allocate a 64-value in the upload BO and initialize it to 0
5776 * 2) copy the 32-bit predicate value to the upload BO
5777 * 3) use the new allocated VA address for predication
5778 *
5779 * Based on the conditionalrender demo, it's faster to do the COPY_DATA
5780 * in ME (+ sync PFP) instead of PFP.
5781 */
5782 radv_cmd_buffer_upload_data(cmd_buffer, 8, 16, &pred_value, &pred_offset);
5783
5784 new_va = radv_buffer_get_va(cmd_buffer->upload.upload_bo) + pred_offset;
5785
5786 radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
5787 radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_SRC_MEM) |
5788 COPY_DATA_DST_SEL(COPY_DATA_DST_MEM) |
5789 COPY_DATA_WR_CONFIRM);
5790 radeon_emit(cs, va);
5791 radeon_emit(cs, va >> 32);
5792 radeon_emit(cs, new_va);
5793 radeon_emit(cs, new_va >> 32);
5794
5795 radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
5796 radeon_emit(cs, 0);
5797
5798 /* Enable predication for this command buffer. */
5799 si_emit_set_predication_state(cmd_buffer, draw_visible, new_va);
5800 cmd_buffer->state.predicating = true;
5801
5802 /* Store conditional rendering user info. */
5803 cmd_buffer->state.predication_type = draw_visible;
5804 cmd_buffer->state.predication_va = new_va;
5805 }
5806
5807 void radv_CmdEndConditionalRenderingEXT(
5808 VkCommandBuffer commandBuffer)
5809 {
5810 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
5811
5812 /* Disable predication for this command buffer. */
5813 si_emit_set_predication_state(cmd_buffer, false, 0);
5814 cmd_buffer->state.predicating = false;
5815
5816 /* Reset conditional rendering user info. */
5817 cmd_buffer->state.predication_type = -1;
5818 cmd_buffer->state.predication_va = 0;
5819 }
5820
5821 /* VK_EXT_transform_feedback */
5822 void radv_CmdBindTransformFeedbackBuffersEXT(
5823 VkCommandBuffer commandBuffer,
5824 uint32_t firstBinding,
5825 uint32_t bindingCount,
5826 const VkBuffer* pBuffers,
5827 const VkDeviceSize* pOffsets,
5828 const VkDeviceSize* pSizes)
5829 {
5830 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
5831 struct radv_streamout_binding *sb = cmd_buffer->streamout_bindings;
5832 uint8_t enabled_mask = 0;
5833
5834 assert(firstBinding + bindingCount <= MAX_SO_BUFFERS);
5835 for (uint32_t i = 0; i < bindingCount; i++) {
5836 uint32_t idx = firstBinding + i;
5837
5838 sb[idx].buffer = radv_buffer_from_handle(pBuffers[i]);
5839 sb[idx].offset = pOffsets[i];
5840 sb[idx].size = pSizes[i];
5841
5842 radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs,
5843 sb[idx].buffer->bo);
5844
5845 enabled_mask |= 1 << idx;
5846 }
5847
5848 cmd_buffer->state.streamout.enabled_mask |= enabled_mask;
5849
5850 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_STREAMOUT_BUFFER;
5851 }
5852
5853 static void
5854 radv_emit_streamout_enable(struct radv_cmd_buffer *cmd_buffer)
5855 {
5856 struct radv_streamout_state *so = &cmd_buffer->state.streamout;
5857 struct radeon_cmdbuf *cs = cmd_buffer->cs;
5858
5859 radeon_set_context_reg_seq(cs, R_028B94_VGT_STRMOUT_CONFIG, 2);
5860 radeon_emit(cs,
5861 S_028B94_STREAMOUT_0_EN(so->streamout_enabled) |
5862 S_028B94_RAST_STREAM(0) |
5863 S_028B94_STREAMOUT_1_EN(so->streamout_enabled) |
5864 S_028B94_STREAMOUT_2_EN(so->streamout_enabled) |
5865 S_028B94_STREAMOUT_3_EN(so->streamout_enabled));
5866 radeon_emit(cs, so->hw_enabled_mask &
5867 so->enabled_stream_buffers_mask);
5868
5869 cmd_buffer->state.context_roll_without_scissor_emitted = true;
5870 }
5871
5872 static void
5873 radv_set_streamout_enable(struct radv_cmd_buffer *cmd_buffer, bool enable)
5874 {
5875 struct radv_streamout_state *so = &cmd_buffer->state.streamout;
5876 bool old_streamout_enabled = so->streamout_enabled;
5877 uint32_t old_hw_enabled_mask = so->hw_enabled_mask;
5878
5879 so->streamout_enabled = enable;
5880
5881 so->hw_enabled_mask = so->enabled_mask |
5882 (so->enabled_mask << 4) |
5883 (so->enabled_mask << 8) |
5884 (so->enabled_mask << 12);
5885
5886 if (!cmd_buffer->device->physical_device->use_ngg_streamout &&
5887 ((old_streamout_enabled != so->streamout_enabled) ||
5888 (old_hw_enabled_mask != so->hw_enabled_mask)))
5889 radv_emit_streamout_enable(cmd_buffer);
5890
5891 if (cmd_buffer->device->physical_device->use_ngg_streamout)
5892 cmd_buffer->gds_needed = true;
5893 }
5894
5895 static void radv_flush_vgt_streamout(struct radv_cmd_buffer *cmd_buffer)
5896 {
5897 struct radeon_cmdbuf *cs = cmd_buffer->cs;
5898 unsigned reg_strmout_cntl;
5899
5900 /* The register is at different places on different ASICs. */
5901 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX7) {
5902 reg_strmout_cntl = R_0300FC_CP_STRMOUT_CNTL;
5903 radeon_set_uconfig_reg(cs, reg_strmout_cntl, 0);
5904 } else {
5905 reg_strmout_cntl = R_0084FC_CP_STRMOUT_CNTL;
5906 radeon_set_config_reg(cs, reg_strmout_cntl, 0);
5907 }
5908
5909 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
5910 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SO_VGTSTREAMOUT_FLUSH) | EVENT_INDEX(0));
5911
5912 radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, 0));
5913 radeon_emit(cs, WAIT_REG_MEM_EQUAL); /* wait until the register is equal to the reference value */
5914 radeon_emit(cs, reg_strmout_cntl >> 2); /* register */
5915 radeon_emit(cs, 0);
5916 radeon_emit(cs, S_0084FC_OFFSET_UPDATE_DONE(1)); /* reference value */
5917 radeon_emit(cs, S_0084FC_OFFSET_UPDATE_DONE(1)); /* mask */
5918 radeon_emit(cs, 4); /* poll interval */
5919 }
5920
5921 static void
5922 radv_emit_streamout_begin(struct radv_cmd_buffer *cmd_buffer,
5923 uint32_t firstCounterBuffer,
5924 uint32_t counterBufferCount,
5925 const VkBuffer *pCounterBuffers,
5926 const VkDeviceSize *pCounterBufferOffsets)
5927
5928 {
5929 struct radv_streamout_binding *sb = cmd_buffer->streamout_bindings;
5930 struct radv_streamout_state *so = &cmd_buffer->state.streamout;
5931 struct radeon_cmdbuf *cs = cmd_buffer->cs;
5932 uint32_t i;
5933
5934 radv_flush_vgt_streamout(cmd_buffer);
5935
5936 assert(firstCounterBuffer + counterBufferCount <= MAX_SO_BUFFERS);
5937 for_each_bit(i, so->enabled_mask) {
5938 int32_t counter_buffer_idx = i - firstCounterBuffer;
5939 if (counter_buffer_idx >= 0 && counter_buffer_idx >= counterBufferCount)
5940 counter_buffer_idx = -1;
5941
5942 /* AMD GCN binds streamout buffers as shader resources.
5943 * VGT only counts primitives and tells the shader through
5944 * SGPRs what to do.
5945 */
5946 radeon_set_context_reg_seq(cs, R_028AD0_VGT_STRMOUT_BUFFER_SIZE_0 + 16*i, 2);
5947 radeon_emit(cs, sb[i].size >> 2); /* BUFFER_SIZE (in DW) */
5948 radeon_emit(cs, so->stride_in_dw[i]); /* VTX_STRIDE (in DW) */
5949
5950 cmd_buffer->state.context_roll_without_scissor_emitted = true;
5951
5952 if (counter_buffer_idx >= 0 && pCounterBuffers && pCounterBuffers[counter_buffer_idx]) {
5953 /* The array of counter buffers is optional. */
5954 RADV_FROM_HANDLE(radv_buffer, buffer, pCounterBuffers[counter_buffer_idx]);
5955 uint64_t va = radv_buffer_get_va(buffer->bo);
5956
5957 va += buffer->offset + pCounterBufferOffsets[counter_buffer_idx];
5958
5959 /* Append */
5960 radeon_emit(cs, PKT3(PKT3_STRMOUT_BUFFER_UPDATE, 4, 0));
5961 radeon_emit(cs, STRMOUT_SELECT_BUFFER(i) |
5962 STRMOUT_DATA_TYPE(1) | /* offset in bytes */
5963 STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_FROM_MEM)); /* control */
5964 radeon_emit(cs, 0); /* unused */
5965 radeon_emit(cs, 0); /* unused */
5966 radeon_emit(cs, va); /* src address lo */
5967 radeon_emit(cs, va >> 32); /* src address hi */
5968
5969 radv_cs_add_buffer(cmd_buffer->device->ws, cs, buffer->bo);
5970 } else {
5971 /* Start from the beginning. */
5972 radeon_emit(cs, PKT3(PKT3_STRMOUT_BUFFER_UPDATE, 4, 0));
5973 radeon_emit(cs, STRMOUT_SELECT_BUFFER(i) |
5974 STRMOUT_DATA_TYPE(1) | /* offset in bytes */
5975 STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_FROM_PACKET)); /* control */
5976 radeon_emit(cs, 0); /* unused */
5977 radeon_emit(cs, 0); /* unused */
5978 radeon_emit(cs, 0); /* unused */
5979 radeon_emit(cs, 0); /* unused */
5980 }
5981 }
5982
5983 radv_set_streamout_enable(cmd_buffer, true);
5984 }
5985
5986 static void
5987 gfx10_emit_streamout_begin(struct radv_cmd_buffer *cmd_buffer,
5988 uint32_t firstCounterBuffer,
5989 uint32_t counterBufferCount,
5990 const VkBuffer *pCounterBuffers,
5991 const VkDeviceSize *pCounterBufferOffsets)
5992 {
5993 struct radv_streamout_state *so = &cmd_buffer->state.streamout;
5994 unsigned last_target = util_last_bit(so->enabled_mask) - 1;
5995 struct radeon_cmdbuf *cs = cmd_buffer->cs;
5996 uint32_t i;
5997
5998 assert(cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10);
5999 assert(firstCounterBuffer + counterBufferCount <= MAX_SO_BUFFERS);
6000
6001 /* Sync because the next streamout operation will overwrite GDS and we
6002 * have to make sure it's idle.
6003 * TODO: Improve by tracking if there is a streamout operation in
6004 * flight.
6005 */
6006 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_VS_PARTIAL_FLUSH;
6007 si_emit_cache_flush(cmd_buffer);
6008
6009 for_each_bit(i, so->enabled_mask) {
6010 int32_t counter_buffer_idx = i - firstCounterBuffer;
6011 if (counter_buffer_idx >= 0 && counter_buffer_idx >= counterBufferCount)
6012 counter_buffer_idx = -1;
6013
6014 bool append = counter_buffer_idx >= 0 &&
6015 pCounterBuffers && pCounterBuffers[counter_buffer_idx];
6016 uint64_t va = 0;
6017
6018 if (append) {
6019 RADV_FROM_HANDLE(radv_buffer, buffer, pCounterBuffers[counter_buffer_idx]);
6020
6021 va += radv_buffer_get_va(buffer->bo);
6022 va += buffer->offset + pCounterBufferOffsets[counter_buffer_idx];
6023
6024 radv_cs_add_buffer(cmd_buffer->device->ws, cs, buffer->bo);
6025 }
6026
6027 radeon_emit(cs, PKT3(PKT3_DMA_DATA, 5, 0));
6028 radeon_emit(cs, S_411_SRC_SEL(append ? V_411_SRC_ADDR_TC_L2 : V_411_DATA) |
6029 S_411_DST_SEL(V_411_GDS) |
6030 S_411_CP_SYNC(i == last_target));
6031 radeon_emit(cs, va);
6032 radeon_emit(cs, va >> 32);
6033 radeon_emit(cs, 4 * i); /* destination in GDS */
6034 radeon_emit(cs, 0);
6035 radeon_emit(cs, S_414_BYTE_COUNT_GFX9(4) |
6036 S_414_DISABLE_WR_CONFIRM_GFX9(i != last_target));
6037 }
6038
6039 radv_set_streamout_enable(cmd_buffer, true);
6040 }
6041
6042 void radv_CmdBeginTransformFeedbackEXT(
6043 VkCommandBuffer commandBuffer,
6044 uint32_t firstCounterBuffer,
6045 uint32_t counterBufferCount,
6046 const VkBuffer* pCounterBuffers,
6047 const VkDeviceSize* pCounterBufferOffsets)
6048 {
6049 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
6050
6051 if (cmd_buffer->device->physical_device->use_ngg_streamout) {
6052 gfx10_emit_streamout_begin(cmd_buffer,
6053 firstCounterBuffer, counterBufferCount,
6054 pCounterBuffers, pCounterBufferOffsets);
6055 } else {
6056 radv_emit_streamout_begin(cmd_buffer,
6057 firstCounterBuffer, counterBufferCount,
6058 pCounterBuffers, pCounterBufferOffsets);
6059 }
6060 }
6061
6062 static void
6063 radv_emit_streamout_end(struct radv_cmd_buffer *cmd_buffer,
6064 uint32_t firstCounterBuffer,
6065 uint32_t counterBufferCount,
6066 const VkBuffer *pCounterBuffers,
6067 const VkDeviceSize *pCounterBufferOffsets)
6068 {
6069 struct radv_streamout_state *so = &cmd_buffer->state.streamout;
6070 struct radeon_cmdbuf *cs = cmd_buffer->cs;
6071 uint32_t i;
6072
6073 radv_flush_vgt_streamout(cmd_buffer);
6074
6075 assert(firstCounterBuffer + counterBufferCount <= MAX_SO_BUFFERS);
6076 for_each_bit(i, so->enabled_mask) {
6077 int32_t counter_buffer_idx = i - firstCounterBuffer;
6078 if (counter_buffer_idx >= 0 && counter_buffer_idx >= counterBufferCount)
6079 counter_buffer_idx = -1;
6080
6081 if (counter_buffer_idx >= 0 && pCounterBuffers && pCounterBuffers[counter_buffer_idx]) {
6082 /* The array of counters buffer is optional. */
6083 RADV_FROM_HANDLE(radv_buffer, buffer, pCounterBuffers[counter_buffer_idx]);
6084 uint64_t va = radv_buffer_get_va(buffer->bo);
6085
6086 va += buffer->offset + pCounterBufferOffsets[counter_buffer_idx];
6087
6088 radeon_emit(cs, PKT3(PKT3_STRMOUT_BUFFER_UPDATE, 4, 0));
6089 radeon_emit(cs, STRMOUT_SELECT_BUFFER(i) |
6090 STRMOUT_DATA_TYPE(1) | /* offset in bytes */
6091 STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_NONE) |
6092 STRMOUT_STORE_BUFFER_FILLED_SIZE); /* control */
6093 radeon_emit(cs, va); /* dst address lo */
6094 radeon_emit(cs, va >> 32); /* dst address hi */
6095 radeon_emit(cs, 0); /* unused */
6096 radeon_emit(cs, 0); /* unused */
6097
6098 radv_cs_add_buffer(cmd_buffer->device->ws, cs, buffer->bo);
6099 }
6100
6101 /* Deactivate transform feedback by zeroing the buffer size.
6102 * The counters (primitives generated, primitives emitted) may
6103 * be enabled even if there is not buffer bound. This ensures
6104 * that the primitives-emitted query won't increment.
6105 */
6106 radeon_set_context_reg(cs, R_028AD0_VGT_STRMOUT_BUFFER_SIZE_0 + 16*i, 0);
6107
6108 cmd_buffer->state.context_roll_without_scissor_emitted = true;
6109 }
6110
6111 radv_set_streamout_enable(cmd_buffer, false);
6112 }
6113
6114 static void
6115 gfx10_emit_streamout_end(struct radv_cmd_buffer *cmd_buffer,
6116 uint32_t firstCounterBuffer,
6117 uint32_t counterBufferCount,
6118 const VkBuffer *pCounterBuffers,
6119 const VkDeviceSize *pCounterBufferOffsets)
6120 {
6121 struct radv_streamout_state *so = &cmd_buffer->state.streamout;
6122 struct radeon_cmdbuf *cs = cmd_buffer->cs;
6123 uint32_t i;
6124
6125 assert(cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10);
6126 assert(firstCounterBuffer + counterBufferCount <= MAX_SO_BUFFERS);
6127
6128 for_each_bit(i, so->enabled_mask) {
6129 int32_t counter_buffer_idx = i - firstCounterBuffer;
6130 if (counter_buffer_idx >= 0 && counter_buffer_idx >= counterBufferCount)
6131 counter_buffer_idx = -1;
6132
6133 if (counter_buffer_idx >= 0 && pCounterBuffers && pCounterBuffers[counter_buffer_idx]) {
6134 /* The array of counters buffer is optional. */
6135 RADV_FROM_HANDLE(radv_buffer, buffer, pCounterBuffers[counter_buffer_idx]);
6136 uint64_t va = radv_buffer_get_va(buffer->bo);
6137
6138 va += buffer->offset + pCounterBufferOffsets[counter_buffer_idx];
6139
6140 si_cs_emit_write_event_eop(cs,
6141 cmd_buffer->device->physical_device->rad_info.chip_class,
6142 radv_cmd_buffer_uses_mec(cmd_buffer),
6143 V_028A90_PS_DONE, 0,
6144 EOP_DST_SEL_TC_L2,
6145 EOP_DATA_SEL_GDS,
6146 va, EOP_DATA_GDS(i, 1), 0);
6147
6148 radv_cs_add_buffer(cmd_buffer->device->ws, cs, buffer->bo);
6149 }
6150 }
6151
6152 radv_set_streamout_enable(cmd_buffer, false);
6153 }
6154
6155 void radv_CmdEndTransformFeedbackEXT(
6156 VkCommandBuffer commandBuffer,
6157 uint32_t firstCounterBuffer,
6158 uint32_t counterBufferCount,
6159 const VkBuffer* pCounterBuffers,
6160 const VkDeviceSize* pCounterBufferOffsets)
6161 {
6162 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
6163
6164 if (cmd_buffer->device->physical_device->use_ngg_streamout) {
6165 gfx10_emit_streamout_end(cmd_buffer,
6166 firstCounterBuffer, counterBufferCount,
6167 pCounterBuffers, pCounterBufferOffsets);
6168 } else {
6169 radv_emit_streamout_end(cmd_buffer,
6170 firstCounterBuffer, counterBufferCount,
6171 pCounterBuffers, pCounterBufferOffsets);
6172 }
6173 }
6174
6175 void radv_CmdDrawIndirectByteCountEXT(
6176 VkCommandBuffer commandBuffer,
6177 uint32_t instanceCount,
6178 uint32_t firstInstance,
6179 VkBuffer _counterBuffer,
6180 VkDeviceSize counterBufferOffset,
6181 uint32_t counterOffset,
6182 uint32_t vertexStride)
6183 {
6184 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
6185 RADV_FROM_HANDLE(radv_buffer, counterBuffer, _counterBuffer);
6186 struct radv_draw_info info = {};
6187
6188 info.instance_count = instanceCount;
6189 info.first_instance = firstInstance;
6190 info.strmout_buffer = counterBuffer;
6191 info.strmout_buffer_offset = counterBufferOffset;
6192 info.stride = vertexStride;
6193
6194 radv_draw(cmd_buffer, &info);
6195 }
6196
6197 /* VK_AMD_buffer_marker */
6198 void radv_CmdWriteBufferMarkerAMD(
6199 VkCommandBuffer commandBuffer,
6200 VkPipelineStageFlagBits pipelineStage,
6201 VkBuffer dstBuffer,
6202 VkDeviceSize dstOffset,
6203 uint32_t marker)
6204 {
6205 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
6206 RADV_FROM_HANDLE(radv_buffer, buffer, dstBuffer);
6207 struct radeon_cmdbuf *cs = cmd_buffer->cs;
6208 uint64_t va = radv_buffer_get_va(buffer->bo) + dstOffset;
6209
6210 si_emit_cache_flush(cmd_buffer);
6211
6212 ASSERTED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 12);
6213
6214 if (!(pipelineStage & ~VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT)) {
6215 radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
6216 radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_IMM) |
6217 COPY_DATA_DST_SEL(COPY_DATA_DST_MEM) |
6218 COPY_DATA_WR_CONFIRM);
6219 radeon_emit(cs, marker);
6220 radeon_emit(cs, 0);
6221 radeon_emit(cs, va);
6222 radeon_emit(cs, va >> 32);
6223 } else {
6224 si_cs_emit_write_event_eop(cs,
6225 cmd_buffer->device->physical_device->rad_info.chip_class,
6226 radv_cmd_buffer_uses_mec(cmd_buffer),
6227 V_028A90_BOTTOM_OF_PIPE_TS, 0,
6228 EOP_DST_SEL_MEM,
6229 EOP_DATA_SEL_VALUE_32BIT,
6230 va, marker,
6231 cmd_buffer->gfx9_eop_bug_va);
6232 }
6233
6234 assert(cmd_buffer->cs->cdw <= cdw_max);
6235 }