radv: initialize FMASK for images in fully expanded mode
[mesa.git] / src / amd / vulkan / radv_cmd_buffer.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 */
27
28 #include "radv_private.h"
29 #include "radv_radeon_winsys.h"
30 #include "radv_shader.h"
31 #include "radv_cs.h"
32 #include "sid.h"
33 #include "gfx9d.h"
34 #include "vk_format.h"
35 #include "radv_debug.h"
36 #include "radv_meta.h"
37
38 #include "ac_debug.h"
39
40 enum {
41 RADV_PREFETCH_VBO_DESCRIPTORS = (1 << 0),
42 RADV_PREFETCH_VS = (1 << 1),
43 RADV_PREFETCH_TCS = (1 << 2),
44 RADV_PREFETCH_TES = (1 << 3),
45 RADV_PREFETCH_GS = (1 << 4),
46 RADV_PREFETCH_PS = (1 << 5),
47 RADV_PREFETCH_SHADERS = (RADV_PREFETCH_VS |
48 RADV_PREFETCH_TCS |
49 RADV_PREFETCH_TES |
50 RADV_PREFETCH_GS |
51 RADV_PREFETCH_PS)
52 };
53
54 static void radv_handle_image_transition(struct radv_cmd_buffer *cmd_buffer,
55 struct radv_image *image,
56 VkImageLayout src_layout,
57 VkImageLayout dst_layout,
58 uint32_t src_family,
59 uint32_t dst_family,
60 const VkImageSubresourceRange *range);
61
62 const struct radv_dynamic_state default_dynamic_state = {
63 .viewport = {
64 .count = 0,
65 },
66 .scissor = {
67 .count = 0,
68 },
69 .line_width = 1.0f,
70 .depth_bias = {
71 .bias = 0.0f,
72 .clamp = 0.0f,
73 .slope = 0.0f,
74 },
75 .blend_constants = { 0.0f, 0.0f, 0.0f, 0.0f },
76 .depth_bounds = {
77 .min = 0.0f,
78 .max = 1.0f,
79 },
80 .stencil_compare_mask = {
81 .front = ~0u,
82 .back = ~0u,
83 },
84 .stencil_write_mask = {
85 .front = ~0u,
86 .back = ~0u,
87 },
88 .stencil_reference = {
89 .front = 0u,
90 .back = 0u,
91 },
92 };
93
94 static void
95 radv_bind_dynamic_state(struct radv_cmd_buffer *cmd_buffer,
96 const struct radv_dynamic_state *src)
97 {
98 struct radv_dynamic_state *dest = &cmd_buffer->state.dynamic;
99 uint32_t copy_mask = src->mask;
100 uint32_t dest_mask = 0;
101
102 /* Make sure to copy the number of viewports/scissors because they can
103 * only be specified at pipeline creation time.
104 */
105 dest->viewport.count = src->viewport.count;
106 dest->scissor.count = src->scissor.count;
107 dest->discard_rectangle.count = src->discard_rectangle.count;
108
109 if (copy_mask & RADV_DYNAMIC_VIEWPORT) {
110 if (memcmp(&dest->viewport.viewports, &src->viewport.viewports,
111 src->viewport.count * sizeof(VkViewport))) {
112 typed_memcpy(dest->viewport.viewports,
113 src->viewport.viewports,
114 src->viewport.count);
115 dest_mask |= RADV_DYNAMIC_VIEWPORT;
116 }
117 }
118
119 if (copy_mask & RADV_DYNAMIC_SCISSOR) {
120 if (memcmp(&dest->scissor.scissors, &src->scissor.scissors,
121 src->scissor.count * sizeof(VkRect2D))) {
122 typed_memcpy(dest->scissor.scissors,
123 src->scissor.scissors, src->scissor.count);
124 dest_mask |= RADV_DYNAMIC_SCISSOR;
125 }
126 }
127
128 if (copy_mask & RADV_DYNAMIC_LINE_WIDTH) {
129 if (dest->line_width != src->line_width) {
130 dest->line_width = src->line_width;
131 dest_mask |= RADV_DYNAMIC_LINE_WIDTH;
132 }
133 }
134
135 if (copy_mask & RADV_DYNAMIC_DEPTH_BIAS) {
136 if (memcmp(&dest->depth_bias, &src->depth_bias,
137 sizeof(src->depth_bias))) {
138 dest->depth_bias = src->depth_bias;
139 dest_mask |= RADV_DYNAMIC_DEPTH_BIAS;
140 }
141 }
142
143 if (copy_mask & RADV_DYNAMIC_BLEND_CONSTANTS) {
144 if (memcmp(&dest->blend_constants, &src->blend_constants,
145 sizeof(src->blend_constants))) {
146 typed_memcpy(dest->blend_constants,
147 src->blend_constants, 4);
148 dest_mask |= RADV_DYNAMIC_BLEND_CONSTANTS;
149 }
150 }
151
152 if (copy_mask & RADV_DYNAMIC_DEPTH_BOUNDS) {
153 if (memcmp(&dest->depth_bounds, &src->depth_bounds,
154 sizeof(src->depth_bounds))) {
155 dest->depth_bounds = src->depth_bounds;
156 dest_mask |= RADV_DYNAMIC_DEPTH_BOUNDS;
157 }
158 }
159
160 if (copy_mask & RADV_DYNAMIC_STENCIL_COMPARE_MASK) {
161 if (memcmp(&dest->stencil_compare_mask,
162 &src->stencil_compare_mask,
163 sizeof(src->stencil_compare_mask))) {
164 dest->stencil_compare_mask = src->stencil_compare_mask;
165 dest_mask |= RADV_DYNAMIC_STENCIL_COMPARE_MASK;
166 }
167 }
168
169 if (copy_mask & RADV_DYNAMIC_STENCIL_WRITE_MASK) {
170 if (memcmp(&dest->stencil_write_mask, &src->stencil_write_mask,
171 sizeof(src->stencil_write_mask))) {
172 dest->stencil_write_mask = src->stencil_write_mask;
173 dest_mask |= RADV_DYNAMIC_STENCIL_WRITE_MASK;
174 }
175 }
176
177 if (copy_mask & RADV_DYNAMIC_STENCIL_REFERENCE) {
178 if (memcmp(&dest->stencil_reference, &src->stencil_reference,
179 sizeof(src->stencil_reference))) {
180 dest->stencil_reference = src->stencil_reference;
181 dest_mask |= RADV_DYNAMIC_STENCIL_REFERENCE;
182 }
183 }
184
185 if (copy_mask & RADV_DYNAMIC_DISCARD_RECTANGLE) {
186 if (memcmp(&dest->discard_rectangle.rectangles, &src->discard_rectangle.rectangles,
187 src->discard_rectangle.count * sizeof(VkRect2D))) {
188 typed_memcpy(dest->discard_rectangle.rectangles,
189 src->discard_rectangle.rectangles,
190 src->discard_rectangle.count);
191 dest_mask |= RADV_DYNAMIC_DISCARD_RECTANGLE;
192 }
193 }
194
195 cmd_buffer->state.dirty |= dest_mask;
196 }
197
198 static void
199 radv_bind_streamout_state(struct radv_cmd_buffer *cmd_buffer,
200 struct radv_pipeline *pipeline)
201 {
202 struct radv_streamout_state *so = &cmd_buffer->state.streamout;
203 struct radv_shader_info *info;
204
205 if (!pipeline->streamout_shader)
206 return;
207
208 info = &pipeline->streamout_shader->info.info;
209 for (int i = 0; i < MAX_SO_BUFFERS; i++)
210 so->stride_in_dw[i] = info->so.strides[i];
211
212 so->enabled_stream_buffers_mask = info->so.enabled_stream_buffers_mask;
213 }
214
215 bool radv_cmd_buffer_uses_mec(struct radv_cmd_buffer *cmd_buffer)
216 {
217 return cmd_buffer->queue_family_index == RADV_QUEUE_COMPUTE &&
218 cmd_buffer->device->physical_device->rad_info.chip_class >= CIK;
219 }
220
221 enum ring_type radv_queue_family_to_ring(int f) {
222 switch (f) {
223 case RADV_QUEUE_GENERAL:
224 return RING_GFX;
225 case RADV_QUEUE_COMPUTE:
226 return RING_COMPUTE;
227 case RADV_QUEUE_TRANSFER:
228 return RING_DMA;
229 default:
230 unreachable("Unknown queue family");
231 }
232 }
233
234 static VkResult radv_create_cmd_buffer(
235 struct radv_device * device,
236 struct radv_cmd_pool * pool,
237 VkCommandBufferLevel level,
238 VkCommandBuffer* pCommandBuffer)
239 {
240 struct radv_cmd_buffer *cmd_buffer;
241 unsigned ring;
242 cmd_buffer = vk_zalloc(&pool->alloc, sizeof(*cmd_buffer), 8,
243 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
244 if (cmd_buffer == NULL)
245 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
246
247 cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
248 cmd_buffer->device = device;
249 cmd_buffer->pool = pool;
250 cmd_buffer->level = level;
251
252 if (pool) {
253 list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
254 cmd_buffer->queue_family_index = pool->queue_family_index;
255
256 } else {
257 /* Init the pool_link so we can safely call list_del when we destroy
258 * the command buffer
259 */
260 list_inithead(&cmd_buffer->pool_link);
261 cmd_buffer->queue_family_index = RADV_QUEUE_GENERAL;
262 }
263
264 ring = radv_queue_family_to_ring(cmd_buffer->queue_family_index);
265
266 cmd_buffer->cs = device->ws->cs_create(device->ws, ring);
267 if (!cmd_buffer->cs) {
268 vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
269 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
270 }
271
272 *pCommandBuffer = radv_cmd_buffer_to_handle(cmd_buffer);
273
274 list_inithead(&cmd_buffer->upload.list);
275
276 return VK_SUCCESS;
277 }
278
279 static void
280 radv_cmd_buffer_destroy(struct radv_cmd_buffer *cmd_buffer)
281 {
282 list_del(&cmd_buffer->pool_link);
283
284 list_for_each_entry_safe(struct radv_cmd_buffer_upload, up,
285 &cmd_buffer->upload.list, list) {
286 cmd_buffer->device->ws->buffer_destroy(up->upload_bo);
287 list_del(&up->list);
288 free(up);
289 }
290
291 if (cmd_buffer->upload.upload_bo)
292 cmd_buffer->device->ws->buffer_destroy(cmd_buffer->upload.upload_bo);
293 cmd_buffer->device->ws->cs_destroy(cmd_buffer->cs);
294
295 for (unsigned i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; i++)
296 free(cmd_buffer->descriptors[i].push_set.set.mapped_ptr);
297
298 vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
299 }
300
301 static VkResult
302 radv_reset_cmd_buffer(struct radv_cmd_buffer *cmd_buffer)
303 {
304
305 cmd_buffer->device->ws->cs_reset(cmd_buffer->cs);
306
307 list_for_each_entry_safe(struct radv_cmd_buffer_upload, up,
308 &cmd_buffer->upload.list, list) {
309 cmd_buffer->device->ws->buffer_destroy(up->upload_bo);
310 list_del(&up->list);
311 free(up);
312 }
313
314 cmd_buffer->push_constant_stages = 0;
315 cmd_buffer->scratch_size_needed = 0;
316 cmd_buffer->compute_scratch_size_needed = 0;
317 cmd_buffer->esgs_ring_size_needed = 0;
318 cmd_buffer->gsvs_ring_size_needed = 0;
319 cmd_buffer->tess_rings_needed = false;
320 cmd_buffer->sample_positions_needed = false;
321
322 if (cmd_buffer->upload.upload_bo)
323 radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs,
324 cmd_buffer->upload.upload_bo);
325 cmd_buffer->upload.offset = 0;
326
327 cmd_buffer->record_result = VK_SUCCESS;
328
329 for (unsigned i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; i++) {
330 cmd_buffer->descriptors[i].dirty = 0;
331 cmd_buffer->descriptors[i].valid = 0;
332 cmd_buffer->descriptors[i].push_dirty = false;
333 }
334
335 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
336 unsigned num_db = cmd_buffer->device->physical_device->rad_info.num_render_backends;
337 unsigned eop_bug_offset;
338 void *fence_ptr;
339
340 radv_cmd_buffer_upload_alloc(cmd_buffer, 8, 0,
341 &cmd_buffer->gfx9_fence_offset,
342 &fence_ptr);
343 cmd_buffer->gfx9_fence_bo = cmd_buffer->upload.upload_bo;
344
345 /* Allocate a buffer for the EOP bug on GFX9. */
346 radv_cmd_buffer_upload_alloc(cmd_buffer, 16 * num_db, 0,
347 &eop_bug_offset, &fence_ptr);
348 cmd_buffer->gfx9_eop_bug_va =
349 radv_buffer_get_va(cmd_buffer->upload.upload_bo);
350 cmd_buffer->gfx9_eop_bug_va += eop_bug_offset;
351 }
352
353 cmd_buffer->status = RADV_CMD_BUFFER_STATUS_INITIAL;
354
355 return cmd_buffer->record_result;
356 }
357
358 static bool
359 radv_cmd_buffer_resize_upload_buf(struct radv_cmd_buffer *cmd_buffer,
360 uint64_t min_needed)
361 {
362 uint64_t new_size;
363 struct radeon_winsys_bo *bo;
364 struct radv_cmd_buffer_upload *upload;
365 struct radv_device *device = cmd_buffer->device;
366
367 new_size = MAX2(min_needed, 16 * 1024);
368 new_size = MAX2(new_size, 2 * cmd_buffer->upload.size);
369
370 bo = device->ws->buffer_create(device->ws,
371 new_size, 4096,
372 RADEON_DOMAIN_GTT,
373 RADEON_FLAG_CPU_ACCESS|
374 RADEON_FLAG_NO_INTERPROCESS_SHARING |
375 RADEON_FLAG_32BIT);
376
377 if (!bo) {
378 cmd_buffer->record_result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
379 return false;
380 }
381
382 radv_cs_add_buffer(device->ws, cmd_buffer->cs, bo);
383 if (cmd_buffer->upload.upload_bo) {
384 upload = malloc(sizeof(*upload));
385
386 if (!upload) {
387 cmd_buffer->record_result = VK_ERROR_OUT_OF_HOST_MEMORY;
388 device->ws->buffer_destroy(bo);
389 return false;
390 }
391
392 memcpy(upload, &cmd_buffer->upload, sizeof(*upload));
393 list_add(&upload->list, &cmd_buffer->upload.list);
394 }
395
396 cmd_buffer->upload.upload_bo = bo;
397 cmd_buffer->upload.size = new_size;
398 cmd_buffer->upload.offset = 0;
399 cmd_buffer->upload.map = device->ws->buffer_map(cmd_buffer->upload.upload_bo);
400
401 if (!cmd_buffer->upload.map) {
402 cmd_buffer->record_result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
403 return false;
404 }
405
406 return true;
407 }
408
409 bool
410 radv_cmd_buffer_upload_alloc(struct radv_cmd_buffer *cmd_buffer,
411 unsigned size,
412 unsigned alignment,
413 unsigned *out_offset,
414 void **ptr)
415 {
416 uint64_t offset = align(cmd_buffer->upload.offset, alignment);
417 if (offset + size > cmd_buffer->upload.size) {
418 if (!radv_cmd_buffer_resize_upload_buf(cmd_buffer, size))
419 return false;
420 offset = 0;
421 }
422
423 *out_offset = offset;
424 *ptr = cmd_buffer->upload.map + offset;
425
426 cmd_buffer->upload.offset = offset + size;
427 return true;
428 }
429
430 bool
431 radv_cmd_buffer_upload_data(struct radv_cmd_buffer *cmd_buffer,
432 unsigned size, unsigned alignment,
433 const void *data, unsigned *out_offset)
434 {
435 uint8_t *ptr;
436
437 if (!radv_cmd_buffer_upload_alloc(cmd_buffer, size, alignment,
438 out_offset, (void **)&ptr))
439 return false;
440
441 if (ptr)
442 memcpy(ptr, data, size);
443
444 return true;
445 }
446
447 static void
448 radv_emit_write_data_packet(struct radv_cmd_buffer *cmd_buffer, uint64_t va,
449 unsigned count, const uint32_t *data)
450 {
451 struct radeon_cmdbuf *cs = cmd_buffer->cs;
452
453 radeon_check_space(cmd_buffer->device->ws, cs, 4 + count);
454
455 radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 2 + count, 0));
456 radeon_emit(cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
457 S_370_WR_CONFIRM(1) |
458 S_370_ENGINE_SEL(V_370_ME));
459 radeon_emit(cs, va);
460 radeon_emit(cs, va >> 32);
461 radeon_emit_array(cs, data, count);
462 }
463
464 void radv_cmd_buffer_trace_emit(struct radv_cmd_buffer *cmd_buffer)
465 {
466 struct radv_device *device = cmd_buffer->device;
467 struct radeon_cmdbuf *cs = cmd_buffer->cs;
468 uint64_t va;
469
470 va = radv_buffer_get_va(device->trace_bo);
471 if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY)
472 va += 4;
473
474 ++cmd_buffer->state.trace_id;
475 radv_emit_write_data_packet(cmd_buffer, va, 1,
476 &cmd_buffer->state.trace_id);
477
478 radeon_check_space(cmd_buffer->device->ws, cs, 2);
479
480 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
481 radeon_emit(cs, AC_ENCODE_TRACE_POINT(cmd_buffer->state.trace_id));
482 }
483
484 static void
485 radv_cmd_buffer_after_draw(struct radv_cmd_buffer *cmd_buffer,
486 enum radv_cmd_flush_bits flags)
487 {
488 if (cmd_buffer->device->instance->debug_flags & RADV_DEBUG_SYNC_SHADERS) {
489 uint32_t *ptr = NULL;
490 uint64_t va = 0;
491
492 assert(flags & (RADV_CMD_FLAG_PS_PARTIAL_FLUSH |
493 RADV_CMD_FLAG_CS_PARTIAL_FLUSH));
494
495 if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX9) {
496 va = radv_buffer_get_va(cmd_buffer->gfx9_fence_bo) +
497 cmd_buffer->gfx9_fence_offset;
498 ptr = &cmd_buffer->gfx9_fence_idx;
499 }
500
501 radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 4);
502
503 /* Force wait for graphics or compute engines to be idle. */
504 si_cs_emit_cache_flush(cmd_buffer->cs,
505 cmd_buffer->device->physical_device->rad_info.chip_class,
506 ptr, va,
507 radv_cmd_buffer_uses_mec(cmd_buffer),
508 flags, cmd_buffer->gfx9_eop_bug_va);
509 }
510
511 if (unlikely(cmd_buffer->device->trace_bo))
512 radv_cmd_buffer_trace_emit(cmd_buffer);
513 }
514
515 static void
516 radv_save_pipeline(struct radv_cmd_buffer *cmd_buffer,
517 struct radv_pipeline *pipeline, enum ring_type ring)
518 {
519 struct radv_device *device = cmd_buffer->device;
520 uint32_t data[2];
521 uint64_t va;
522
523 va = radv_buffer_get_va(device->trace_bo);
524
525 switch (ring) {
526 case RING_GFX:
527 va += 8;
528 break;
529 case RING_COMPUTE:
530 va += 16;
531 break;
532 default:
533 assert(!"invalid ring type");
534 }
535
536 data[0] = (uintptr_t)pipeline;
537 data[1] = (uintptr_t)pipeline >> 32;
538
539 radv_emit_write_data_packet(cmd_buffer, va, 2, data);
540 }
541
542 void radv_set_descriptor_set(struct radv_cmd_buffer *cmd_buffer,
543 VkPipelineBindPoint bind_point,
544 struct radv_descriptor_set *set,
545 unsigned idx)
546 {
547 struct radv_descriptor_state *descriptors_state =
548 radv_get_descriptors_state(cmd_buffer, bind_point);
549
550 descriptors_state->sets[idx] = set;
551
552 descriptors_state->valid |= (1u << idx); /* active descriptors */
553 descriptors_state->dirty |= (1u << idx);
554 }
555
556 static void
557 radv_save_descriptors(struct radv_cmd_buffer *cmd_buffer,
558 VkPipelineBindPoint bind_point)
559 {
560 struct radv_descriptor_state *descriptors_state =
561 radv_get_descriptors_state(cmd_buffer, bind_point);
562 struct radv_device *device = cmd_buffer->device;
563 uint32_t data[MAX_SETS * 2] = {};
564 uint64_t va;
565 unsigned i;
566 va = radv_buffer_get_va(device->trace_bo) + 24;
567
568 for_each_bit(i, descriptors_state->valid) {
569 struct radv_descriptor_set *set = descriptors_state->sets[i];
570 data[i * 2] = (uintptr_t)set;
571 data[i * 2 + 1] = (uintptr_t)set >> 32;
572 }
573
574 radv_emit_write_data_packet(cmd_buffer, va, MAX_SETS * 2, data);
575 }
576
577 struct radv_userdata_info *
578 radv_lookup_user_sgpr(struct radv_pipeline *pipeline,
579 gl_shader_stage stage,
580 int idx)
581 {
582 struct radv_shader_variant *shader = radv_get_shader(pipeline, stage);
583 return &shader->info.user_sgprs_locs.shader_data[idx];
584 }
585
586 static void
587 radv_emit_userdata_address(struct radv_cmd_buffer *cmd_buffer,
588 struct radv_pipeline *pipeline,
589 gl_shader_stage stage,
590 int idx, uint64_t va)
591 {
592 struct radv_userdata_info *loc = radv_lookup_user_sgpr(pipeline, stage, idx);
593 uint32_t base_reg = pipeline->user_data_0[stage];
594 if (loc->sgpr_idx == -1)
595 return;
596
597 assert(loc->num_sgprs == 1);
598 assert(!loc->indirect);
599
600 radv_emit_shader_pointer(cmd_buffer->device, cmd_buffer->cs,
601 base_reg + loc->sgpr_idx * 4, va, false);
602 }
603
604 static void
605 radv_emit_descriptor_pointers(struct radv_cmd_buffer *cmd_buffer,
606 struct radv_pipeline *pipeline,
607 struct radv_descriptor_state *descriptors_state,
608 gl_shader_stage stage)
609 {
610 struct radv_device *device = cmd_buffer->device;
611 struct radeon_cmdbuf *cs = cmd_buffer->cs;
612 uint32_t sh_base = pipeline->user_data_0[stage];
613 struct radv_userdata_locations *locs =
614 &pipeline->shaders[stage]->info.user_sgprs_locs;
615 unsigned mask = locs->descriptor_sets_enabled;
616
617 mask &= descriptors_state->dirty & descriptors_state->valid;
618
619 while (mask) {
620 int start, count;
621
622 u_bit_scan_consecutive_range(&mask, &start, &count);
623
624 struct radv_userdata_info *loc = &locs->descriptor_sets[start];
625 unsigned sh_offset = sh_base + loc->sgpr_idx * 4;
626
627 radv_emit_shader_pointer_head(cs, sh_offset, count, true);
628 for (int i = 0; i < count; i++) {
629 struct radv_descriptor_set *set =
630 descriptors_state->sets[start + i];
631
632 radv_emit_shader_pointer_body(device, cs, set->va, true);
633 }
634 }
635 }
636
637 static void
638 radv_update_multisample_state(struct radv_cmd_buffer *cmd_buffer,
639 struct radv_pipeline *pipeline)
640 {
641 int num_samples = pipeline->graphics.ms.num_samples;
642 struct radv_multisample_state *ms = &pipeline->graphics.ms;
643 struct radv_pipeline *old_pipeline = cmd_buffer->state.emitted_pipeline;
644
645 if (pipeline->shaders[MESA_SHADER_FRAGMENT]->info.info.ps.needs_sample_positions)
646 cmd_buffer->sample_positions_needed = true;
647
648 if (old_pipeline && num_samples == old_pipeline->graphics.ms.num_samples)
649 return;
650
651 radeon_set_context_reg_seq(cmd_buffer->cs, R_028BDC_PA_SC_LINE_CNTL, 2);
652 radeon_emit(cmd_buffer->cs, ms->pa_sc_line_cntl);
653 radeon_emit(cmd_buffer->cs, ms->pa_sc_aa_config);
654
655 radeon_set_context_reg(cmd_buffer->cs, R_028A48_PA_SC_MODE_CNTL_0, ms->pa_sc_mode_cntl_0);
656
657 radv_cayman_emit_msaa_sample_locs(cmd_buffer->cs, num_samples);
658
659 /* GFX9: Flush DFSM when the AA mode changes. */
660 if (cmd_buffer->device->dfsm_allowed) {
661 radeon_emit(cmd_buffer->cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
662 radeon_emit(cmd_buffer->cs, EVENT_TYPE(V_028A90_FLUSH_DFSM) | EVENT_INDEX(0));
663 }
664 }
665
666 static void
667 radv_emit_shader_prefetch(struct radv_cmd_buffer *cmd_buffer,
668 struct radv_shader_variant *shader)
669 {
670 uint64_t va;
671
672 if (!shader)
673 return;
674
675 va = radv_buffer_get_va(shader->bo) + shader->bo_offset;
676
677 si_cp_dma_prefetch(cmd_buffer, va, shader->code_size);
678 }
679
680 static void
681 radv_emit_prefetch_L2(struct radv_cmd_buffer *cmd_buffer,
682 struct radv_pipeline *pipeline,
683 bool vertex_stage_only)
684 {
685 struct radv_cmd_state *state = &cmd_buffer->state;
686 uint32_t mask = state->prefetch_L2_mask;
687
688 if (vertex_stage_only) {
689 /* Fast prefetch path for starting draws as soon as possible.
690 */
691 mask = state->prefetch_L2_mask & (RADV_PREFETCH_VS |
692 RADV_PREFETCH_VBO_DESCRIPTORS);
693 }
694
695 if (mask & RADV_PREFETCH_VS)
696 radv_emit_shader_prefetch(cmd_buffer,
697 pipeline->shaders[MESA_SHADER_VERTEX]);
698
699 if (mask & RADV_PREFETCH_VBO_DESCRIPTORS)
700 si_cp_dma_prefetch(cmd_buffer, state->vb_va, state->vb_size);
701
702 if (mask & RADV_PREFETCH_TCS)
703 radv_emit_shader_prefetch(cmd_buffer,
704 pipeline->shaders[MESA_SHADER_TESS_CTRL]);
705
706 if (mask & RADV_PREFETCH_TES)
707 radv_emit_shader_prefetch(cmd_buffer,
708 pipeline->shaders[MESA_SHADER_TESS_EVAL]);
709
710 if (mask & RADV_PREFETCH_GS) {
711 radv_emit_shader_prefetch(cmd_buffer,
712 pipeline->shaders[MESA_SHADER_GEOMETRY]);
713 radv_emit_shader_prefetch(cmd_buffer, pipeline->gs_copy_shader);
714 }
715
716 if (mask & RADV_PREFETCH_PS)
717 radv_emit_shader_prefetch(cmd_buffer,
718 pipeline->shaders[MESA_SHADER_FRAGMENT]);
719
720 state->prefetch_L2_mask &= ~mask;
721 }
722
723 static void
724 radv_emit_rbplus_state(struct radv_cmd_buffer *cmd_buffer)
725 {
726 if (!cmd_buffer->device->physical_device->rbplus_allowed)
727 return;
728
729 struct radv_pipeline *pipeline = cmd_buffer->state.pipeline;
730 struct radv_framebuffer *framebuffer = cmd_buffer->state.framebuffer;
731 const struct radv_subpass *subpass = cmd_buffer->state.subpass;
732
733 unsigned sx_ps_downconvert = 0;
734 unsigned sx_blend_opt_epsilon = 0;
735 unsigned sx_blend_opt_control = 0;
736
737 for (unsigned i = 0; i < subpass->color_count; ++i) {
738 if (subpass->color_attachments[i].attachment == VK_ATTACHMENT_UNUSED) {
739 sx_blend_opt_control |= S_02875C_MRT0_COLOR_OPT_DISABLE(1) << (i * 4);
740 sx_blend_opt_control |= S_02875C_MRT0_ALPHA_OPT_DISABLE(1) << (i * 4);
741 continue;
742 }
743
744 int idx = subpass->color_attachments[i].attachment;
745 struct radv_color_buffer_info *cb = &framebuffer->attachments[idx].cb;
746
747 unsigned format = G_028C70_FORMAT(cb->cb_color_info);
748 unsigned swap = G_028C70_COMP_SWAP(cb->cb_color_info);
749 uint32_t spi_format = (pipeline->graphics.col_format >> (i * 4)) & 0xf;
750 uint32_t colormask = (pipeline->graphics.cb_target_mask >> (i * 4)) & 0xf;
751
752 bool has_alpha, has_rgb;
753
754 /* Set if RGB and A are present. */
755 has_alpha = !G_028C74_FORCE_DST_ALPHA_1(cb->cb_color_attrib);
756
757 if (format == V_028C70_COLOR_8 ||
758 format == V_028C70_COLOR_16 ||
759 format == V_028C70_COLOR_32)
760 has_rgb = !has_alpha;
761 else
762 has_rgb = true;
763
764 /* Check the colormask and export format. */
765 if (!(colormask & 0x7))
766 has_rgb = false;
767 if (!(colormask & 0x8))
768 has_alpha = false;
769
770 if (spi_format == V_028714_SPI_SHADER_ZERO) {
771 has_rgb = false;
772 has_alpha = false;
773 }
774
775 /* Disable value checking for disabled channels. */
776 if (!has_rgb)
777 sx_blend_opt_control |= S_02875C_MRT0_COLOR_OPT_DISABLE(1) << (i * 4);
778 if (!has_alpha)
779 sx_blend_opt_control |= S_02875C_MRT0_ALPHA_OPT_DISABLE(1) << (i * 4);
780
781 /* Enable down-conversion for 32bpp and smaller formats. */
782 switch (format) {
783 case V_028C70_COLOR_8:
784 case V_028C70_COLOR_8_8:
785 case V_028C70_COLOR_8_8_8_8:
786 /* For 1 and 2-channel formats, use the superset thereof. */
787 if (spi_format == V_028714_SPI_SHADER_FP16_ABGR ||
788 spi_format == V_028714_SPI_SHADER_UINT16_ABGR ||
789 spi_format == V_028714_SPI_SHADER_SINT16_ABGR) {
790 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_8_8_8_8 << (i * 4);
791 sx_blend_opt_epsilon |= V_028758_8BIT_FORMAT << (i * 4);
792 }
793 break;
794
795 case V_028C70_COLOR_5_6_5:
796 if (spi_format == V_028714_SPI_SHADER_FP16_ABGR) {
797 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_5_6_5 << (i * 4);
798 sx_blend_opt_epsilon |= V_028758_6BIT_FORMAT << (i * 4);
799 }
800 break;
801
802 case V_028C70_COLOR_1_5_5_5:
803 if (spi_format == V_028714_SPI_SHADER_FP16_ABGR) {
804 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_1_5_5_5 << (i * 4);
805 sx_blend_opt_epsilon |= V_028758_5BIT_FORMAT << (i * 4);
806 }
807 break;
808
809 case V_028C70_COLOR_4_4_4_4:
810 if (spi_format == V_028714_SPI_SHADER_FP16_ABGR) {
811 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_4_4_4_4 << (i * 4);
812 sx_blend_opt_epsilon |= V_028758_4BIT_FORMAT << (i * 4);
813 }
814 break;
815
816 case V_028C70_COLOR_32:
817 if (swap == V_028C70_SWAP_STD &&
818 spi_format == V_028714_SPI_SHADER_32_R)
819 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_32_R << (i * 4);
820 else if (swap == V_028C70_SWAP_ALT_REV &&
821 spi_format == V_028714_SPI_SHADER_32_AR)
822 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_32_A << (i * 4);
823 break;
824
825 case V_028C70_COLOR_16:
826 case V_028C70_COLOR_16_16:
827 /* For 1-channel formats, use the superset thereof. */
828 if (spi_format == V_028714_SPI_SHADER_UNORM16_ABGR ||
829 spi_format == V_028714_SPI_SHADER_SNORM16_ABGR ||
830 spi_format == V_028714_SPI_SHADER_UINT16_ABGR ||
831 spi_format == V_028714_SPI_SHADER_SINT16_ABGR) {
832 if (swap == V_028C70_SWAP_STD ||
833 swap == V_028C70_SWAP_STD_REV)
834 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_16_16_GR << (i * 4);
835 else
836 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_16_16_AR << (i * 4);
837 }
838 break;
839
840 case V_028C70_COLOR_10_11_11:
841 if (spi_format == V_028714_SPI_SHADER_FP16_ABGR) {
842 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_10_11_11 << (i * 4);
843 sx_blend_opt_epsilon |= V_028758_11BIT_FORMAT << (i * 4);
844 }
845 break;
846
847 case V_028C70_COLOR_2_10_10_10:
848 if (spi_format == V_028714_SPI_SHADER_FP16_ABGR) {
849 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_2_10_10_10 << (i * 4);
850 sx_blend_opt_epsilon |= V_028758_10BIT_FORMAT << (i * 4);
851 }
852 break;
853 }
854 }
855
856 for (unsigned i = subpass->color_count; i < 8; ++i) {
857 sx_blend_opt_control |= S_02875C_MRT0_COLOR_OPT_DISABLE(1) << (i * 4);
858 sx_blend_opt_control |= S_02875C_MRT0_ALPHA_OPT_DISABLE(1) << (i * 4);
859 }
860 radeon_set_context_reg_seq(cmd_buffer->cs, R_028754_SX_PS_DOWNCONVERT, 3);
861 radeon_emit(cmd_buffer->cs, sx_ps_downconvert);
862 radeon_emit(cmd_buffer->cs, sx_blend_opt_epsilon);
863 radeon_emit(cmd_buffer->cs, sx_blend_opt_control);
864 }
865
866 static void
867 radv_emit_graphics_pipeline(struct radv_cmd_buffer *cmd_buffer)
868 {
869 struct radv_pipeline *pipeline = cmd_buffer->state.pipeline;
870
871 if (!pipeline || cmd_buffer->state.emitted_pipeline == pipeline)
872 return;
873
874 radv_update_multisample_state(cmd_buffer, pipeline);
875
876 cmd_buffer->scratch_size_needed =
877 MAX2(cmd_buffer->scratch_size_needed,
878 pipeline->max_waves * pipeline->scratch_bytes_per_wave);
879
880 if (!cmd_buffer->state.emitted_pipeline ||
881 cmd_buffer->state.emitted_pipeline->graphics.can_use_guardband !=
882 pipeline->graphics.can_use_guardband)
883 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_SCISSOR;
884
885 radeon_emit_array(cmd_buffer->cs, pipeline->cs.buf, pipeline->cs.cdw);
886
887 for (unsigned i = 0; i < MESA_SHADER_COMPUTE; i++) {
888 if (!pipeline->shaders[i])
889 continue;
890
891 radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs,
892 pipeline->shaders[i]->bo);
893 }
894
895 if (radv_pipeline_has_gs(pipeline))
896 radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs,
897 pipeline->gs_copy_shader->bo);
898
899 if (unlikely(cmd_buffer->device->trace_bo))
900 radv_save_pipeline(cmd_buffer, pipeline, RING_GFX);
901
902 cmd_buffer->state.emitted_pipeline = pipeline;
903
904 cmd_buffer->state.dirty &= ~RADV_CMD_DIRTY_PIPELINE;
905 }
906
907 static void
908 radv_emit_viewport(struct radv_cmd_buffer *cmd_buffer)
909 {
910 si_write_viewport(cmd_buffer->cs, 0, cmd_buffer->state.dynamic.viewport.count,
911 cmd_buffer->state.dynamic.viewport.viewports);
912 }
913
914 static void
915 radv_emit_scissor(struct radv_cmd_buffer *cmd_buffer)
916 {
917 uint32_t count = cmd_buffer->state.dynamic.scissor.count;
918
919 si_write_scissors(cmd_buffer->cs, 0, count,
920 cmd_buffer->state.dynamic.scissor.scissors,
921 cmd_buffer->state.dynamic.viewport.viewports,
922 cmd_buffer->state.emitted_pipeline->graphics.can_use_guardband);
923 }
924
925 static void
926 radv_emit_discard_rectangle(struct radv_cmd_buffer *cmd_buffer)
927 {
928 if (!cmd_buffer->state.dynamic.discard_rectangle.count)
929 return;
930
931 radeon_set_context_reg_seq(cmd_buffer->cs, R_028210_PA_SC_CLIPRECT_0_TL,
932 cmd_buffer->state.dynamic.discard_rectangle.count * 2);
933 for (unsigned i = 0; i < cmd_buffer->state.dynamic.discard_rectangle.count; ++i) {
934 VkRect2D rect = cmd_buffer->state.dynamic.discard_rectangle.rectangles[i];
935 radeon_emit(cmd_buffer->cs, S_028210_TL_X(rect.offset.x) | S_028210_TL_Y(rect.offset.y));
936 radeon_emit(cmd_buffer->cs, S_028214_BR_X(rect.offset.x + rect.extent.width) |
937 S_028214_BR_Y(rect.offset.y + rect.extent.height));
938 }
939 }
940
941 static void
942 radv_emit_line_width(struct radv_cmd_buffer *cmd_buffer)
943 {
944 unsigned width = cmd_buffer->state.dynamic.line_width * 8;
945
946 radeon_set_context_reg(cmd_buffer->cs, R_028A08_PA_SU_LINE_CNTL,
947 S_028A08_WIDTH(CLAMP(width, 0, 0xFFF)));
948 }
949
950 static void
951 radv_emit_blend_constants(struct radv_cmd_buffer *cmd_buffer)
952 {
953 struct radv_dynamic_state *d = &cmd_buffer->state.dynamic;
954
955 radeon_set_context_reg_seq(cmd_buffer->cs, R_028414_CB_BLEND_RED, 4);
956 radeon_emit_array(cmd_buffer->cs, (uint32_t *)d->blend_constants, 4);
957 }
958
959 static void
960 radv_emit_stencil(struct radv_cmd_buffer *cmd_buffer)
961 {
962 struct radv_dynamic_state *d = &cmd_buffer->state.dynamic;
963
964 radeon_set_context_reg_seq(cmd_buffer->cs,
965 R_028430_DB_STENCILREFMASK, 2);
966 radeon_emit(cmd_buffer->cs,
967 S_028430_STENCILTESTVAL(d->stencil_reference.front) |
968 S_028430_STENCILMASK(d->stencil_compare_mask.front) |
969 S_028430_STENCILWRITEMASK(d->stencil_write_mask.front) |
970 S_028430_STENCILOPVAL(1));
971 radeon_emit(cmd_buffer->cs,
972 S_028434_STENCILTESTVAL_BF(d->stencil_reference.back) |
973 S_028434_STENCILMASK_BF(d->stencil_compare_mask.back) |
974 S_028434_STENCILWRITEMASK_BF(d->stencil_write_mask.back) |
975 S_028434_STENCILOPVAL_BF(1));
976 }
977
978 static void
979 radv_emit_depth_bounds(struct radv_cmd_buffer *cmd_buffer)
980 {
981 struct radv_dynamic_state *d = &cmd_buffer->state.dynamic;
982
983 radeon_set_context_reg(cmd_buffer->cs, R_028020_DB_DEPTH_BOUNDS_MIN,
984 fui(d->depth_bounds.min));
985 radeon_set_context_reg(cmd_buffer->cs, R_028024_DB_DEPTH_BOUNDS_MAX,
986 fui(d->depth_bounds.max));
987 }
988
989 static void
990 radv_emit_depth_bias(struct radv_cmd_buffer *cmd_buffer)
991 {
992 struct radv_dynamic_state *d = &cmd_buffer->state.dynamic;
993 unsigned slope = fui(d->depth_bias.slope * 16.0f);
994 unsigned bias = fui(d->depth_bias.bias * cmd_buffer->state.offset_scale);
995
996
997 radeon_set_context_reg_seq(cmd_buffer->cs,
998 R_028B7C_PA_SU_POLY_OFFSET_CLAMP, 5);
999 radeon_emit(cmd_buffer->cs, fui(d->depth_bias.clamp)); /* CLAMP */
1000 radeon_emit(cmd_buffer->cs, slope); /* FRONT SCALE */
1001 radeon_emit(cmd_buffer->cs, bias); /* FRONT OFFSET */
1002 radeon_emit(cmd_buffer->cs, slope); /* BACK SCALE */
1003 radeon_emit(cmd_buffer->cs, bias); /* BACK OFFSET */
1004 }
1005
1006 static void
1007 radv_emit_fb_color_state(struct radv_cmd_buffer *cmd_buffer,
1008 int index,
1009 struct radv_attachment_info *att,
1010 struct radv_image *image,
1011 VkImageLayout layout)
1012 {
1013 bool is_vi = cmd_buffer->device->physical_device->rad_info.chip_class >= VI;
1014 struct radv_color_buffer_info *cb = &att->cb;
1015 uint32_t cb_color_info = cb->cb_color_info;
1016
1017 if (!radv_layout_dcc_compressed(image, layout,
1018 radv_image_queue_family_mask(image,
1019 cmd_buffer->queue_family_index,
1020 cmd_buffer->queue_family_index))) {
1021 cb_color_info &= C_028C70_DCC_ENABLE;
1022 }
1023
1024 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
1025 radeon_set_context_reg_seq(cmd_buffer->cs, R_028C60_CB_COLOR0_BASE + index * 0x3c, 11);
1026 radeon_emit(cmd_buffer->cs, cb->cb_color_base);
1027 radeon_emit(cmd_buffer->cs, S_028C64_BASE_256B(cb->cb_color_base >> 32));
1028 radeon_emit(cmd_buffer->cs, cb->cb_color_attrib2);
1029 radeon_emit(cmd_buffer->cs, cb->cb_color_view);
1030 radeon_emit(cmd_buffer->cs, cb_color_info);
1031 radeon_emit(cmd_buffer->cs, cb->cb_color_attrib);
1032 radeon_emit(cmd_buffer->cs, cb->cb_dcc_control);
1033 radeon_emit(cmd_buffer->cs, cb->cb_color_cmask);
1034 radeon_emit(cmd_buffer->cs, S_028C80_BASE_256B(cb->cb_color_cmask >> 32));
1035 radeon_emit(cmd_buffer->cs, cb->cb_color_fmask);
1036 radeon_emit(cmd_buffer->cs, S_028C88_BASE_256B(cb->cb_color_fmask >> 32));
1037
1038 radeon_set_context_reg_seq(cmd_buffer->cs, R_028C94_CB_COLOR0_DCC_BASE + index * 0x3c, 2);
1039 radeon_emit(cmd_buffer->cs, cb->cb_dcc_base);
1040 radeon_emit(cmd_buffer->cs, S_028C98_BASE_256B(cb->cb_dcc_base >> 32));
1041
1042 radeon_set_context_reg(cmd_buffer->cs, R_0287A0_CB_MRT0_EPITCH + index * 4,
1043 S_0287A0_EPITCH(att->attachment->image->surface.u.gfx9.surf.epitch));
1044 } else {
1045 radeon_set_context_reg_seq(cmd_buffer->cs, R_028C60_CB_COLOR0_BASE + index * 0x3c, 11);
1046 radeon_emit(cmd_buffer->cs, cb->cb_color_base);
1047 radeon_emit(cmd_buffer->cs, cb->cb_color_pitch);
1048 radeon_emit(cmd_buffer->cs, cb->cb_color_slice);
1049 radeon_emit(cmd_buffer->cs, cb->cb_color_view);
1050 radeon_emit(cmd_buffer->cs, cb_color_info);
1051 radeon_emit(cmd_buffer->cs, cb->cb_color_attrib);
1052 radeon_emit(cmd_buffer->cs, cb->cb_dcc_control);
1053 radeon_emit(cmd_buffer->cs, cb->cb_color_cmask);
1054 radeon_emit(cmd_buffer->cs, cb->cb_color_cmask_slice);
1055 radeon_emit(cmd_buffer->cs, cb->cb_color_fmask);
1056 radeon_emit(cmd_buffer->cs, cb->cb_color_fmask_slice);
1057
1058 if (is_vi) { /* DCC BASE */
1059 radeon_set_context_reg(cmd_buffer->cs, R_028C94_CB_COLOR0_DCC_BASE + index * 0x3c, cb->cb_dcc_base);
1060 }
1061 }
1062
1063 if (radv_image_has_dcc(image)) {
1064 /* Drawing with DCC enabled also compresses colorbuffers. */
1065 radv_update_dcc_metadata(cmd_buffer, image, true);
1066 }
1067 }
1068
1069 static void
1070 radv_update_zrange_precision(struct radv_cmd_buffer *cmd_buffer,
1071 struct radv_ds_buffer_info *ds,
1072 struct radv_image *image, VkImageLayout layout,
1073 bool requires_cond_exec)
1074 {
1075 uint32_t db_z_info = ds->db_z_info;
1076 uint32_t db_z_info_reg;
1077
1078 if (!radv_image_is_tc_compat_htile(image))
1079 return;
1080
1081 if (!radv_layout_has_htile(image, layout,
1082 radv_image_queue_family_mask(image,
1083 cmd_buffer->queue_family_index,
1084 cmd_buffer->queue_family_index))) {
1085 db_z_info &= C_028040_TILE_SURFACE_ENABLE;
1086 }
1087
1088 db_z_info &= C_028040_ZRANGE_PRECISION;
1089
1090 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
1091 db_z_info_reg = R_028038_DB_Z_INFO;
1092 } else {
1093 db_z_info_reg = R_028040_DB_Z_INFO;
1094 }
1095
1096 /* When we don't know the last fast clear value we need to emit a
1097 * conditional packet that will eventually skip the following
1098 * SET_CONTEXT_REG packet.
1099 */
1100 if (requires_cond_exec) {
1101 uint64_t va = radv_buffer_get_va(image->bo);
1102 va += image->offset + image->tc_compat_zrange_offset;
1103
1104 radeon_emit(cmd_buffer->cs, PKT3(PKT3_COND_EXEC, 3, 0));
1105 radeon_emit(cmd_buffer->cs, va);
1106 radeon_emit(cmd_buffer->cs, va >> 32);
1107 radeon_emit(cmd_buffer->cs, 0);
1108 radeon_emit(cmd_buffer->cs, 3); /* SET_CONTEXT_REG size */
1109 }
1110
1111 radeon_set_context_reg(cmd_buffer->cs, db_z_info_reg, db_z_info);
1112 }
1113
1114 static void
1115 radv_emit_fb_ds_state(struct radv_cmd_buffer *cmd_buffer,
1116 struct radv_ds_buffer_info *ds,
1117 struct radv_image *image,
1118 VkImageLayout layout)
1119 {
1120 uint32_t db_z_info = ds->db_z_info;
1121 uint32_t db_stencil_info = ds->db_stencil_info;
1122
1123 if (!radv_layout_has_htile(image, layout,
1124 radv_image_queue_family_mask(image,
1125 cmd_buffer->queue_family_index,
1126 cmd_buffer->queue_family_index))) {
1127 db_z_info &= C_028040_TILE_SURFACE_ENABLE;
1128 db_stencil_info |= S_028044_TILE_STENCIL_DISABLE(1);
1129 }
1130
1131 radeon_set_context_reg(cmd_buffer->cs, R_028008_DB_DEPTH_VIEW, ds->db_depth_view);
1132 radeon_set_context_reg(cmd_buffer->cs, R_028ABC_DB_HTILE_SURFACE, ds->db_htile_surface);
1133
1134
1135 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
1136 radeon_set_context_reg_seq(cmd_buffer->cs, R_028014_DB_HTILE_DATA_BASE, 3);
1137 radeon_emit(cmd_buffer->cs, ds->db_htile_data_base);
1138 radeon_emit(cmd_buffer->cs, S_028018_BASE_HI(ds->db_htile_data_base >> 32));
1139 radeon_emit(cmd_buffer->cs, ds->db_depth_size);
1140
1141 radeon_set_context_reg_seq(cmd_buffer->cs, R_028038_DB_Z_INFO, 10);
1142 radeon_emit(cmd_buffer->cs, db_z_info); /* DB_Z_INFO */
1143 radeon_emit(cmd_buffer->cs, db_stencil_info); /* DB_STENCIL_INFO */
1144 radeon_emit(cmd_buffer->cs, ds->db_z_read_base); /* DB_Z_READ_BASE */
1145 radeon_emit(cmd_buffer->cs, S_028044_BASE_HI(ds->db_z_read_base >> 32)); /* DB_Z_READ_BASE_HI */
1146 radeon_emit(cmd_buffer->cs, ds->db_stencil_read_base); /* DB_STENCIL_READ_BASE */
1147 radeon_emit(cmd_buffer->cs, S_02804C_BASE_HI(ds->db_stencil_read_base >> 32)); /* DB_STENCIL_READ_BASE_HI */
1148 radeon_emit(cmd_buffer->cs, ds->db_z_write_base); /* DB_Z_WRITE_BASE */
1149 radeon_emit(cmd_buffer->cs, S_028054_BASE_HI(ds->db_z_write_base >> 32)); /* DB_Z_WRITE_BASE_HI */
1150 radeon_emit(cmd_buffer->cs, ds->db_stencil_write_base); /* DB_STENCIL_WRITE_BASE */
1151 radeon_emit(cmd_buffer->cs, S_02805C_BASE_HI(ds->db_stencil_write_base >> 32)); /* DB_STENCIL_WRITE_BASE_HI */
1152
1153 radeon_set_context_reg_seq(cmd_buffer->cs, R_028068_DB_Z_INFO2, 2);
1154 radeon_emit(cmd_buffer->cs, ds->db_z_info2);
1155 radeon_emit(cmd_buffer->cs, ds->db_stencil_info2);
1156 } else {
1157 radeon_set_context_reg(cmd_buffer->cs, R_028014_DB_HTILE_DATA_BASE, ds->db_htile_data_base);
1158
1159 radeon_set_context_reg_seq(cmd_buffer->cs, R_02803C_DB_DEPTH_INFO, 9);
1160 radeon_emit(cmd_buffer->cs, ds->db_depth_info); /* R_02803C_DB_DEPTH_INFO */
1161 radeon_emit(cmd_buffer->cs, db_z_info); /* R_028040_DB_Z_INFO */
1162 radeon_emit(cmd_buffer->cs, db_stencil_info); /* R_028044_DB_STENCIL_INFO */
1163 radeon_emit(cmd_buffer->cs, ds->db_z_read_base); /* R_028048_DB_Z_READ_BASE */
1164 radeon_emit(cmd_buffer->cs, ds->db_stencil_read_base); /* R_02804C_DB_STENCIL_READ_BASE */
1165 radeon_emit(cmd_buffer->cs, ds->db_z_write_base); /* R_028050_DB_Z_WRITE_BASE */
1166 radeon_emit(cmd_buffer->cs, ds->db_stencil_write_base); /* R_028054_DB_STENCIL_WRITE_BASE */
1167 radeon_emit(cmd_buffer->cs, ds->db_depth_size); /* R_028058_DB_DEPTH_SIZE */
1168 radeon_emit(cmd_buffer->cs, ds->db_depth_slice); /* R_02805C_DB_DEPTH_SLICE */
1169
1170 }
1171
1172 /* Update the ZRANGE_PRECISION value for the TC-compat bug. */
1173 radv_update_zrange_precision(cmd_buffer, ds, image, layout, true);
1174
1175 radeon_set_context_reg(cmd_buffer->cs, R_028B78_PA_SU_POLY_OFFSET_DB_FMT_CNTL,
1176 ds->pa_su_poly_offset_db_fmt_cntl);
1177 }
1178
1179 /**
1180 * Update the fast clear depth/stencil values if the image is bound as a
1181 * depth/stencil buffer.
1182 */
1183 static void
1184 radv_update_bound_fast_clear_ds(struct radv_cmd_buffer *cmd_buffer,
1185 struct radv_image *image,
1186 VkClearDepthStencilValue ds_clear_value,
1187 VkImageAspectFlags aspects)
1188 {
1189 struct radv_framebuffer *framebuffer = cmd_buffer->state.framebuffer;
1190 const struct radv_subpass *subpass = cmd_buffer->state.subpass;
1191 struct radeon_cmdbuf *cs = cmd_buffer->cs;
1192 struct radv_attachment_info *att;
1193 uint32_t att_idx;
1194
1195 if (!framebuffer || !subpass)
1196 return;
1197
1198 att_idx = subpass->depth_stencil_attachment.attachment;
1199 if (att_idx == VK_ATTACHMENT_UNUSED)
1200 return;
1201
1202 att = &framebuffer->attachments[att_idx];
1203 if (att->attachment->image != image)
1204 return;
1205
1206 radeon_set_context_reg_seq(cs, R_028028_DB_STENCIL_CLEAR, 2);
1207 radeon_emit(cs, ds_clear_value.stencil);
1208 radeon_emit(cs, fui(ds_clear_value.depth));
1209
1210 /* Update the ZRANGE_PRECISION value for the TC-compat bug. This is
1211 * only needed when clearing Z to 0.0.
1212 */
1213 if ((aspects & VK_IMAGE_ASPECT_DEPTH_BIT) &&
1214 ds_clear_value.depth == 0.0) {
1215 VkImageLayout layout = subpass->depth_stencil_attachment.layout;
1216
1217 radv_update_zrange_precision(cmd_buffer, &att->ds, image,
1218 layout, false);
1219 }
1220 }
1221
1222 /**
1223 * Set the clear depth/stencil values to the image's metadata.
1224 */
1225 static void
1226 radv_set_ds_clear_metadata(struct radv_cmd_buffer *cmd_buffer,
1227 struct radv_image *image,
1228 VkClearDepthStencilValue ds_clear_value,
1229 VkImageAspectFlags aspects)
1230 {
1231 struct radeon_cmdbuf *cs = cmd_buffer->cs;
1232 uint64_t va = radv_buffer_get_va(image->bo);
1233 unsigned reg_offset = 0, reg_count = 0;
1234
1235 va += image->offset + image->clear_value_offset;
1236
1237 if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT) {
1238 ++reg_count;
1239 } else {
1240 ++reg_offset;
1241 va += 4;
1242 }
1243 if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT)
1244 ++reg_count;
1245
1246 radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 2 + reg_count, 0));
1247 radeon_emit(cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
1248 S_370_WR_CONFIRM(1) |
1249 S_370_ENGINE_SEL(V_370_PFP));
1250 radeon_emit(cs, va);
1251 radeon_emit(cs, va >> 32);
1252 if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT)
1253 radeon_emit(cs, ds_clear_value.stencil);
1254 if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT)
1255 radeon_emit(cs, fui(ds_clear_value.depth));
1256 }
1257
1258 /**
1259 * Update the TC-compat metadata value for this image.
1260 */
1261 static void
1262 radv_set_tc_compat_zrange_metadata(struct radv_cmd_buffer *cmd_buffer,
1263 struct radv_image *image,
1264 uint32_t value)
1265 {
1266 struct radeon_cmdbuf *cs = cmd_buffer->cs;
1267 uint64_t va = radv_buffer_get_va(image->bo);
1268 va += image->offset + image->tc_compat_zrange_offset;
1269
1270 radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, 0));
1271 radeon_emit(cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
1272 S_370_WR_CONFIRM(1) |
1273 S_370_ENGINE_SEL(V_370_PFP));
1274 radeon_emit(cs, va);
1275 radeon_emit(cs, va >> 32);
1276 radeon_emit(cs, value);
1277 }
1278
1279 static void
1280 radv_update_tc_compat_zrange_metadata(struct radv_cmd_buffer *cmd_buffer,
1281 struct radv_image *image,
1282 VkClearDepthStencilValue ds_clear_value)
1283 {
1284 struct radeon_cmdbuf *cs = cmd_buffer->cs;
1285 uint64_t va = radv_buffer_get_va(image->bo);
1286 va += image->offset + image->tc_compat_zrange_offset;
1287 uint32_t cond_val;
1288
1289 /* Conditionally set DB_Z_INFO.ZRANGE_PRECISION to 0 when the last
1290 * depth clear value is 0.0f.
1291 */
1292 cond_val = ds_clear_value.depth == 0.0f ? UINT_MAX : 0;
1293
1294 radv_set_tc_compat_zrange_metadata(cmd_buffer, image, cond_val);
1295 }
1296
1297 /**
1298 * Update the clear depth/stencil values for this image.
1299 */
1300 void
1301 radv_update_ds_clear_metadata(struct radv_cmd_buffer *cmd_buffer,
1302 struct radv_image *image,
1303 VkClearDepthStencilValue ds_clear_value,
1304 VkImageAspectFlags aspects)
1305 {
1306 assert(radv_image_has_htile(image));
1307
1308 radv_set_ds_clear_metadata(cmd_buffer, image, ds_clear_value, aspects);
1309
1310 if (radv_image_is_tc_compat_htile(image) &&
1311 (aspects & VK_IMAGE_ASPECT_DEPTH_BIT)) {
1312 radv_update_tc_compat_zrange_metadata(cmd_buffer, image,
1313 ds_clear_value);
1314 }
1315
1316 radv_update_bound_fast_clear_ds(cmd_buffer, image, ds_clear_value,
1317 aspects);
1318 }
1319
1320 /**
1321 * Load the clear depth/stencil values from the image's metadata.
1322 */
1323 static void
1324 radv_load_ds_clear_metadata(struct radv_cmd_buffer *cmd_buffer,
1325 struct radv_image *image)
1326 {
1327 struct radeon_cmdbuf *cs = cmd_buffer->cs;
1328 VkImageAspectFlags aspects = vk_format_aspects(image->vk_format);
1329 uint64_t va = radv_buffer_get_va(image->bo);
1330 unsigned reg_offset = 0, reg_count = 0;
1331
1332 va += image->offset + image->clear_value_offset;
1333
1334 if (!radv_image_has_htile(image))
1335 return;
1336
1337 if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT) {
1338 ++reg_count;
1339 } else {
1340 ++reg_offset;
1341 va += 4;
1342 }
1343 if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT)
1344 ++reg_count;
1345
1346 uint32_t reg = R_028028_DB_STENCIL_CLEAR + 4 * reg_offset;
1347
1348 if (cmd_buffer->device->physical_device->rad_info.chip_class >= VI) {
1349 radeon_emit(cs, PKT3(PKT3_LOAD_CONTEXT_REG, 3, 0));
1350 radeon_emit(cs, va);
1351 radeon_emit(cs, va >> 32);
1352 radeon_emit(cs, (reg - SI_CONTEXT_REG_OFFSET) >> 2);
1353 radeon_emit(cs, reg_count);
1354 } else {
1355 radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
1356 radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_SRC_MEM) |
1357 COPY_DATA_DST_SEL(COPY_DATA_REG) |
1358 (reg_count == 2 ? COPY_DATA_COUNT_SEL : 0));
1359 radeon_emit(cs, va);
1360 radeon_emit(cs, va >> 32);
1361 radeon_emit(cs, reg >> 2);
1362 radeon_emit(cs, 0);
1363
1364 radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
1365 radeon_emit(cs, 0);
1366 }
1367 }
1368
1369 /*
1370 * With DCC some colors don't require CMASK elimination before being
1371 * used as a texture. This sets a predicate value to determine if the
1372 * cmask eliminate is required.
1373 */
1374 void
1375 radv_update_fce_metadata(struct radv_cmd_buffer *cmd_buffer,
1376 struct radv_image *image, bool value)
1377 {
1378 uint64_t pred_val = value;
1379 uint64_t va = radv_buffer_get_va(image->bo);
1380 va += image->offset + image->fce_pred_offset;
1381
1382 assert(radv_image_has_dcc(image));
1383
1384 radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 4, 0));
1385 radeon_emit(cmd_buffer->cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
1386 S_370_WR_CONFIRM(1) |
1387 S_370_ENGINE_SEL(V_370_PFP));
1388 radeon_emit(cmd_buffer->cs, va);
1389 radeon_emit(cmd_buffer->cs, va >> 32);
1390 radeon_emit(cmd_buffer->cs, pred_val);
1391 radeon_emit(cmd_buffer->cs, pred_val >> 32);
1392 }
1393
1394 /**
1395 * Update the DCC predicate to reflect the compression state.
1396 */
1397 void
1398 radv_update_dcc_metadata(struct radv_cmd_buffer *cmd_buffer,
1399 struct radv_image *image, bool value)
1400 {
1401 uint64_t pred_val = value;
1402 uint64_t va = radv_buffer_get_va(image->bo);
1403 va += image->offset + image->dcc_pred_offset;
1404
1405 assert(radv_image_has_dcc(image));
1406
1407 radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 4, 0));
1408 radeon_emit(cmd_buffer->cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
1409 S_370_WR_CONFIRM(1) |
1410 S_370_ENGINE_SEL(V_370_PFP));
1411 radeon_emit(cmd_buffer->cs, va);
1412 radeon_emit(cmd_buffer->cs, va >> 32);
1413 radeon_emit(cmd_buffer->cs, pred_val);
1414 radeon_emit(cmd_buffer->cs, pred_val >> 32);
1415 }
1416
1417 /**
1418 * Update the fast clear color values if the image is bound as a color buffer.
1419 */
1420 static void
1421 radv_update_bound_fast_clear_color(struct radv_cmd_buffer *cmd_buffer,
1422 struct radv_image *image,
1423 int cb_idx,
1424 uint32_t color_values[2])
1425 {
1426 struct radv_framebuffer *framebuffer = cmd_buffer->state.framebuffer;
1427 const struct radv_subpass *subpass = cmd_buffer->state.subpass;
1428 struct radeon_cmdbuf *cs = cmd_buffer->cs;
1429 struct radv_attachment_info *att;
1430 uint32_t att_idx;
1431
1432 if (!framebuffer || !subpass)
1433 return;
1434
1435 att_idx = subpass->color_attachments[cb_idx].attachment;
1436 if (att_idx == VK_ATTACHMENT_UNUSED)
1437 return;
1438
1439 att = &framebuffer->attachments[att_idx];
1440 if (att->attachment->image != image)
1441 return;
1442
1443 radeon_set_context_reg_seq(cs, R_028C8C_CB_COLOR0_CLEAR_WORD0 + cb_idx * 0x3c, 2);
1444 radeon_emit(cs, color_values[0]);
1445 radeon_emit(cs, color_values[1]);
1446 }
1447
1448 /**
1449 * Set the clear color values to the image's metadata.
1450 */
1451 static void
1452 radv_set_color_clear_metadata(struct radv_cmd_buffer *cmd_buffer,
1453 struct radv_image *image,
1454 uint32_t color_values[2])
1455 {
1456 struct radeon_cmdbuf *cs = cmd_buffer->cs;
1457 uint64_t va = radv_buffer_get_va(image->bo);
1458
1459 va += image->offset + image->clear_value_offset;
1460
1461 assert(radv_image_has_cmask(image) || radv_image_has_dcc(image));
1462
1463 radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 4, 0));
1464 radeon_emit(cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
1465 S_370_WR_CONFIRM(1) |
1466 S_370_ENGINE_SEL(V_370_PFP));
1467 radeon_emit(cs, va);
1468 radeon_emit(cs, va >> 32);
1469 radeon_emit(cs, color_values[0]);
1470 radeon_emit(cs, color_values[1]);
1471 }
1472
1473 /**
1474 * Update the clear color values for this image.
1475 */
1476 void
1477 radv_update_color_clear_metadata(struct radv_cmd_buffer *cmd_buffer,
1478 struct radv_image *image,
1479 int cb_idx,
1480 uint32_t color_values[2])
1481 {
1482 assert(radv_image_has_cmask(image) || radv_image_has_dcc(image));
1483
1484 radv_set_color_clear_metadata(cmd_buffer, image, color_values);
1485
1486 radv_update_bound_fast_clear_color(cmd_buffer, image, cb_idx,
1487 color_values);
1488 }
1489
1490 /**
1491 * Load the clear color values from the image's metadata.
1492 */
1493 static void
1494 radv_load_color_clear_metadata(struct radv_cmd_buffer *cmd_buffer,
1495 struct radv_image *image,
1496 int cb_idx)
1497 {
1498 struct radeon_cmdbuf *cs = cmd_buffer->cs;
1499 uint64_t va = radv_buffer_get_va(image->bo);
1500
1501 va += image->offset + image->clear_value_offset;
1502
1503 if (!radv_image_has_cmask(image) && !radv_image_has_dcc(image))
1504 return;
1505
1506 uint32_t reg = R_028C8C_CB_COLOR0_CLEAR_WORD0 + cb_idx * 0x3c;
1507
1508 if (cmd_buffer->device->physical_device->rad_info.chip_class >= VI) {
1509 radeon_emit(cs, PKT3(PKT3_LOAD_CONTEXT_REG, 3, cmd_buffer->state.predicating));
1510 radeon_emit(cs, va);
1511 radeon_emit(cs, va >> 32);
1512 radeon_emit(cs, (reg - SI_CONTEXT_REG_OFFSET) >> 2);
1513 radeon_emit(cs, 2);
1514 } else {
1515 /* TODO: Figure out how to use LOAD_CONTEXT_REG on SI/CIK. */
1516 radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, cmd_buffer->state.predicating));
1517 radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_SRC_MEM) |
1518 COPY_DATA_DST_SEL(COPY_DATA_REG) |
1519 COPY_DATA_COUNT_SEL);
1520 radeon_emit(cs, va);
1521 radeon_emit(cs, va >> 32);
1522 radeon_emit(cs, reg >> 2);
1523 radeon_emit(cs, 0);
1524
1525 radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, cmd_buffer->state.predicating));
1526 radeon_emit(cs, 0);
1527 }
1528 }
1529
1530 static void
1531 radv_emit_framebuffer_state(struct radv_cmd_buffer *cmd_buffer)
1532 {
1533 int i;
1534 struct radv_framebuffer *framebuffer = cmd_buffer->state.framebuffer;
1535 const struct radv_subpass *subpass = cmd_buffer->state.subpass;
1536 unsigned num_bpp64_colorbufs = 0;
1537
1538 /* this may happen for inherited secondary recording */
1539 if (!framebuffer)
1540 return;
1541
1542 for (i = 0; i < 8; ++i) {
1543 if (i >= subpass->color_count || subpass->color_attachments[i].attachment == VK_ATTACHMENT_UNUSED) {
1544 radeon_set_context_reg(cmd_buffer->cs, R_028C70_CB_COLOR0_INFO + i * 0x3C,
1545 S_028C70_FORMAT(V_028C70_COLOR_INVALID));
1546 continue;
1547 }
1548
1549 int idx = subpass->color_attachments[i].attachment;
1550 struct radv_attachment_info *att = &framebuffer->attachments[idx];
1551 struct radv_image *image = att->attachment->image;
1552 VkImageLayout layout = subpass->color_attachments[i].layout;
1553
1554 radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, att->attachment->bo);
1555
1556 assert(att->attachment->aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT);
1557 radv_emit_fb_color_state(cmd_buffer, i, att, image, layout);
1558
1559 radv_load_color_clear_metadata(cmd_buffer, image, i);
1560
1561 if (image->surface.bpe >= 8)
1562 num_bpp64_colorbufs++;
1563 }
1564
1565 if(subpass->depth_stencil_attachment.attachment != VK_ATTACHMENT_UNUSED) {
1566 int idx = subpass->depth_stencil_attachment.attachment;
1567 VkImageLayout layout = subpass->depth_stencil_attachment.layout;
1568 struct radv_attachment_info *att = &framebuffer->attachments[idx];
1569 struct radv_image *image = att->attachment->image;
1570 radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, att->attachment->bo);
1571 MAYBE_UNUSED uint32_t queue_mask = radv_image_queue_family_mask(image,
1572 cmd_buffer->queue_family_index,
1573 cmd_buffer->queue_family_index);
1574 /* We currently don't support writing decompressed HTILE */
1575 assert(radv_layout_has_htile(image, layout, queue_mask) ==
1576 radv_layout_is_htile_compressed(image, layout, queue_mask));
1577
1578 radv_emit_fb_ds_state(cmd_buffer, &att->ds, image, layout);
1579
1580 if (att->ds.offset_scale != cmd_buffer->state.offset_scale) {
1581 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS;
1582 cmd_buffer->state.offset_scale = att->ds.offset_scale;
1583 }
1584 radv_load_ds_clear_metadata(cmd_buffer, image);
1585 } else {
1586 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9)
1587 radeon_set_context_reg_seq(cmd_buffer->cs, R_028038_DB_Z_INFO, 2);
1588 else
1589 radeon_set_context_reg_seq(cmd_buffer->cs, R_028040_DB_Z_INFO, 2);
1590
1591 radeon_emit(cmd_buffer->cs, S_028040_FORMAT(V_028040_Z_INVALID)); /* DB_Z_INFO */
1592 radeon_emit(cmd_buffer->cs, S_028044_FORMAT(V_028044_STENCIL_INVALID)); /* DB_STENCIL_INFO */
1593 }
1594 radeon_set_context_reg(cmd_buffer->cs, R_028208_PA_SC_WINDOW_SCISSOR_BR,
1595 S_028208_BR_X(framebuffer->width) |
1596 S_028208_BR_Y(framebuffer->height));
1597
1598 if (cmd_buffer->device->physical_device->rad_info.chip_class >= VI) {
1599 uint8_t watermark = 4; /* Default value for VI. */
1600
1601 /* For optimal DCC performance. */
1602 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
1603 if (num_bpp64_colorbufs >= 5) {
1604 watermark = 8;
1605 } else {
1606 watermark = 6;
1607 }
1608 }
1609
1610 radeon_set_context_reg(cmd_buffer->cs, R_028424_CB_DCC_CONTROL,
1611 S_028424_OVERWRITE_COMBINER_MRT_SHARING_DISABLE(1) |
1612 S_028424_OVERWRITE_COMBINER_WATERMARK(watermark));
1613 }
1614
1615 if (cmd_buffer->device->dfsm_allowed) {
1616 radeon_emit(cmd_buffer->cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1617 radeon_emit(cmd_buffer->cs, EVENT_TYPE(V_028A90_BREAK_BATCH) | EVENT_INDEX(0));
1618 }
1619
1620 cmd_buffer->state.dirty &= ~RADV_CMD_DIRTY_FRAMEBUFFER;
1621 }
1622
1623 static void
1624 radv_emit_index_buffer(struct radv_cmd_buffer *cmd_buffer)
1625 {
1626 struct radeon_cmdbuf *cs = cmd_buffer->cs;
1627 struct radv_cmd_state *state = &cmd_buffer->state;
1628
1629 if (state->index_type != state->last_index_type) {
1630 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
1631 radeon_set_uconfig_reg_idx(cs, R_03090C_VGT_INDEX_TYPE,
1632 2, state->index_type);
1633 } else {
1634 radeon_emit(cs, PKT3(PKT3_INDEX_TYPE, 0, 0));
1635 radeon_emit(cs, state->index_type);
1636 }
1637
1638 state->last_index_type = state->index_type;
1639 }
1640
1641 radeon_emit(cs, PKT3(PKT3_INDEX_BASE, 1, 0));
1642 radeon_emit(cs, state->index_va);
1643 radeon_emit(cs, state->index_va >> 32);
1644
1645 radeon_emit(cs, PKT3(PKT3_INDEX_BUFFER_SIZE, 0, 0));
1646 radeon_emit(cs, state->max_index_count);
1647
1648 cmd_buffer->state.dirty &= ~RADV_CMD_DIRTY_INDEX_BUFFER;
1649 }
1650
1651 void radv_set_db_count_control(struct radv_cmd_buffer *cmd_buffer)
1652 {
1653 bool has_perfect_queries = cmd_buffer->state.perfect_occlusion_queries_enabled;
1654 struct radv_pipeline *pipeline = cmd_buffer->state.pipeline;
1655 uint32_t pa_sc_mode_cntl_1 =
1656 pipeline ? pipeline->graphics.ms.pa_sc_mode_cntl_1 : 0;
1657 uint32_t db_count_control;
1658
1659 if(!cmd_buffer->state.active_occlusion_queries) {
1660 if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) {
1661 if (G_028A4C_OUT_OF_ORDER_PRIMITIVE_ENABLE(pa_sc_mode_cntl_1) &&
1662 pipeline->graphics.disable_out_of_order_rast_for_occlusion &&
1663 has_perfect_queries) {
1664 /* Re-enable out-of-order rasterization if the
1665 * bound pipeline supports it and if it's has
1666 * been disabled before starting any perfect
1667 * occlusion queries.
1668 */
1669 radeon_set_context_reg(cmd_buffer->cs,
1670 R_028A4C_PA_SC_MODE_CNTL_1,
1671 pa_sc_mode_cntl_1);
1672 }
1673 }
1674 db_count_control = S_028004_ZPASS_INCREMENT_DISABLE(1);
1675 } else {
1676 const struct radv_subpass *subpass = cmd_buffer->state.subpass;
1677 uint32_t sample_rate = subpass ? util_logbase2(subpass->max_sample_count) : 0;
1678
1679 if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) {
1680 db_count_control =
1681 S_028004_PERFECT_ZPASS_COUNTS(has_perfect_queries) |
1682 S_028004_SAMPLE_RATE(sample_rate) |
1683 S_028004_ZPASS_ENABLE(1) |
1684 S_028004_SLICE_EVEN_ENABLE(1) |
1685 S_028004_SLICE_ODD_ENABLE(1);
1686
1687 if (G_028A4C_OUT_OF_ORDER_PRIMITIVE_ENABLE(pa_sc_mode_cntl_1) &&
1688 pipeline->graphics.disable_out_of_order_rast_for_occlusion &&
1689 has_perfect_queries) {
1690 /* If the bound pipeline has enabled
1691 * out-of-order rasterization, we should
1692 * disable it before starting any perfect
1693 * occlusion queries.
1694 */
1695 pa_sc_mode_cntl_1 &= C_028A4C_OUT_OF_ORDER_PRIMITIVE_ENABLE;
1696
1697 radeon_set_context_reg(cmd_buffer->cs,
1698 R_028A4C_PA_SC_MODE_CNTL_1,
1699 pa_sc_mode_cntl_1);
1700 }
1701 } else {
1702 db_count_control = S_028004_PERFECT_ZPASS_COUNTS(1) |
1703 S_028004_SAMPLE_RATE(sample_rate);
1704 }
1705 }
1706
1707 radeon_set_context_reg(cmd_buffer->cs, R_028004_DB_COUNT_CONTROL, db_count_control);
1708 }
1709
1710 static void
1711 radv_cmd_buffer_flush_dynamic_state(struct radv_cmd_buffer *cmd_buffer)
1712 {
1713 uint32_t states = cmd_buffer->state.dirty & cmd_buffer->state.emitted_pipeline->graphics.needed_dynamic_state;
1714
1715 if (states & (RADV_CMD_DIRTY_DYNAMIC_VIEWPORT))
1716 radv_emit_viewport(cmd_buffer);
1717
1718 if (states & (RADV_CMD_DIRTY_DYNAMIC_SCISSOR | RADV_CMD_DIRTY_DYNAMIC_VIEWPORT) &&
1719 !cmd_buffer->device->physical_device->has_scissor_bug)
1720 radv_emit_scissor(cmd_buffer);
1721
1722 if (states & RADV_CMD_DIRTY_DYNAMIC_LINE_WIDTH)
1723 radv_emit_line_width(cmd_buffer);
1724
1725 if (states & RADV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS)
1726 radv_emit_blend_constants(cmd_buffer);
1727
1728 if (states & (RADV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE |
1729 RADV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK |
1730 RADV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK))
1731 radv_emit_stencil(cmd_buffer);
1732
1733 if (states & RADV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS)
1734 radv_emit_depth_bounds(cmd_buffer);
1735
1736 if (states & RADV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS)
1737 radv_emit_depth_bias(cmd_buffer);
1738
1739 if (states & RADV_CMD_DIRTY_DYNAMIC_DISCARD_RECTANGLE)
1740 radv_emit_discard_rectangle(cmd_buffer);
1741
1742 cmd_buffer->state.dirty &= ~states;
1743 }
1744
1745 static void
1746 radv_flush_push_descriptors(struct radv_cmd_buffer *cmd_buffer,
1747 VkPipelineBindPoint bind_point)
1748 {
1749 struct radv_descriptor_state *descriptors_state =
1750 radv_get_descriptors_state(cmd_buffer, bind_point);
1751 struct radv_descriptor_set *set = &descriptors_state->push_set.set;
1752 unsigned bo_offset;
1753
1754 if (!radv_cmd_buffer_upload_data(cmd_buffer, set->size, 32,
1755 set->mapped_ptr,
1756 &bo_offset))
1757 return;
1758
1759 set->va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
1760 set->va += bo_offset;
1761 }
1762
1763 static void
1764 radv_flush_indirect_descriptor_sets(struct radv_cmd_buffer *cmd_buffer,
1765 VkPipelineBindPoint bind_point)
1766 {
1767 struct radv_descriptor_state *descriptors_state =
1768 radv_get_descriptors_state(cmd_buffer, bind_point);
1769 uint32_t size = MAX_SETS * 4;
1770 uint32_t offset;
1771 void *ptr;
1772
1773 if (!radv_cmd_buffer_upload_alloc(cmd_buffer, size,
1774 256, &offset, &ptr))
1775 return;
1776
1777 for (unsigned i = 0; i < MAX_SETS; i++) {
1778 uint32_t *uptr = ((uint32_t *)ptr) + i;
1779 uint64_t set_va = 0;
1780 struct radv_descriptor_set *set = descriptors_state->sets[i];
1781 if (descriptors_state->valid & (1u << i))
1782 set_va = set->va;
1783 uptr[0] = set_va & 0xffffffff;
1784 }
1785
1786 uint64_t va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
1787 va += offset;
1788
1789 if (cmd_buffer->state.pipeline) {
1790 if (cmd_buffer->state.pipeline->shaders[MESA_SHADER_VERTEX])
1791 radv_emit_userdata_address(cmd_buffer, cmd_buffer->state.pipeline, MESA_SHADER_VERTEX,
1792 AC_UD_INDIRECT_DESCRIPTOR_SETS, va);
1793
1794 if (cmd_buffer->state.pipeline->shaders[MESA_SHADER_FRAGMENT])
1795 radv_emit_userdata_address(cmd_buffer, cmd_buffer->state.pipeline, MESA_SHADER_FRAGMENT,
1796 AC_UD_INDIRECT_DESCRIPTOR_SETS, va);
1797
1798 if (radv_pipeline_has_gs(cmd_buffer->state.pipeline))
1799 radv_emit_userdata_address(cmd_buffer, cmd_buffer->state.pipeline, MESA_SHADER_GEOMETRY,
1800 AC_UD_INDIRECT_DESCRIPTOR_SETS, va);
1801
1802 if (radv_pipeline_has_tess(cmd_buffer->state.pipeline))
1803 radv_emit_userdata_address(cmd_buffer, cmd_buffer->state.pipeline, MESA_SHADER_TESS_CTRL,
1804 AC_UD_INDIRECT_DESCRIPTOR_SETS, va);
1805
1806 if (radv_pipeline_has_tess(cmd_buffer->state.pipeline))
1807 radv_emit_userdata_address(cmd_buffer, cmd_buffer->state.pipeline, MESA_SHADER_TESS_EVAL,
1808 AC_UD_INDIRECT_DESCRIPTOR_SETS, va);
1809 }
1810
1811 if (cmd_buffer->state.compute_pipeline)
1812 radv_emit_userdata_address(cmd_buffer, cmd_buffer->state.compute_pipeline, MESA_SHADER_COMPUTE,
1813 AC_UD_INDIRECT_DESCRIPTOR_SETS, va);
1814 }
1815
1816 static void
1817 radv_flush_descriptors(struct radv_cmd_buffer *cmd_buffer,
1818 VkShaderStageFlags stages)
1819 {
1820 VkPipelineBindPoint bind_point = stages & VK_SHADER_STAGE_COMPUTE_BIT ?
1821 VK_PIPELINE_BIND_POINT_COMPUTE :
1822 VK_PIPELINE_BIND_POINT_GRAPHICS;
1823 struct radv_descriptor_state *descriptors_state =
1824 radv_get_descriptors_state(cmd_buffer, bind_point);
1825 struct radv_cmd_state *state = &cmd_buffer->state;
1826 bool flush_indirect_descriptors;
1827
1828 if (!descriptors_state->dirty)
1829 return;
1830
1831 if (descriptors_state->push_dirty)
1832 radv_flush_push_descriptors(cmd_buffer, bind_point);
1833
1834 flush_indirect_descriptors =
1835 (bind_point == VK_PIPELINE_BIND_POINT_GRAPHICS &&
1836 state->pipeline && state->pipeline->need_indirect_descriptor_sets) ||
1837 (bind_point == VK_PIPELINE_BIND_POINT_COMPUTE &&
1838 state->compute_pipeline && state->compute_pipeline->need_indirect_descriptor_sets);
1839
1840 if (flush_indirect_descriptors)
1841 radv_flush_indirect_descriptor_sets(cmd_buffer, bind_point);
1842
1843 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
1844 cmd_buffer->cs,
1845 MAX_SETS * MESA_SHADER_STAGES * 4);
1846
1847 if (cmd_buffer->state.pipeline) {
1848 radv_foreach_stage(stage, stages) {
1849 if (!cmd_buffer->state.pipeline->shaders[stage])
1850 continue;
1851
1852 radv_emit_descriptor_pointers(cmd_buffer,
1853 cmd_buffer->state.pipeline,
1854 descriptors_state, stage);
1855 }
1856 }
1857
1858 if (cmd_buffer->state.compute_pipeline &&
1859 (stages & VK_SHADER_STAGE_COMPUTE_BIT)) {
1860 radv_emit_descriptor_pointers(cmd_buffer,
1861 cmd_buffer->state.compute_pipeline,
1862 descriptors_state,
1863 MESA_SHADER_COMPUTE);
1864 }
1865
1866 descriptors_state->dirty = 0;
1867 descriptors_state->push_dirty = false;
1868
1869 assert(cmd_buffer->cs->cdw <= cdw_max);
1870
1871 if (unlikely(cmd_buffer->device->trace_bo))
1872 radv_save_descriptors(cmd_buffer, bind_point);
1873 }
1874
1875 static void
1876 radv_flush_constants(struct radv_cmd_buffer *cmd_buffer,
1877 VkShaderStageFlags stages)
1878 {
1879 struct radv_pipeline *pipeline = stages & VK_SHADER_STAGE_COMPUTE_BIT
1880 ? cmd_buffer->state.compute_pipeline
1881 : cmd_buffer->state.pipeline;
1882 VkPipelineBindPoint bind_point = stages & VK_SHADER_STAGE_COMPUTE_BIT ?
1883 VK_PIPELINE_BIND_POINT_COMPUTE :
1884 VK_PIPELINE_BIND_POINT_GRAPHICS;
1885 struct radv_descriptor_state *descriptors_state =
1886 radv_get_descriptors_state(cmd_buffer, bind_point);
1887 struct radv_pipeline_layout *layout = pipeline->layout;
1888 struct radv_shader_variant *shader, *prev_shader;
1889 unsigned offset;
1890 void *ptr;
1891 uint64_t va;
1892
1893 stages &= cmd_buffer->push_constant_stages;
1894 if (!stages ||
1895 (!layout->push_constant_size && !layout->dynamic_offset_count))
1896 return;
1897
1898 if (!radv_cmd_buffer_upload_alloc(cmd_buffer, layout->push_constant_size +
1899 16 * layout->dynamic_offset_count,
1900 256, &offset, &ptr))
1901 return;
1902
1903 memcpy(ptr, cmd_buffer->push_constants, layout->push_constant_size);
1904 memcpy((char*)ptr + layout->push_constant_size,
1905 descriptors_state->dynamic_buffers,
1906 16 * layout->dynamic_offset_count);
1907
1908 va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
1909 va += offset;
1910
1911 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
1912 cmd_buffer->cs, MESA_SHADER_STAGES * 4);
1913
1914 prev_shader = NULL;
1915 radv_foreach_stage(stage, stages) {
1916 shader = radv_get_shader(pipeline, stage);
1917
1918 /* Avoid redundantly emitting the address for merged stages. */
1919 if (shader && shader != prev_shader) {
1920 radv_emit_userdata_address(cmd_buffer, pipeline, stage,
1921 AC_UD_PUSH_CONSTANTS, va);
1922
1923 prev_shader = shader;
1924 }
1925 }
1926
1927 cmd_buffer->push_constant_stages &= ~stages;
1928 assert(cmd_buffer->cs->cdw <= cdw_max);
1929 }
1930
1931 static void
1932 radv_flush_vertex_descriptors(struct radv_cmd_buffer *cmd_buffer,
1933 bool pipeline_is_dirty)
1934 {
1935 if ((pipeline_is_dirty ||
1936 (cmd_buffer->state.dirty & RADV_CMD_DIRTY_VERTEX_BUFFER)) &&
1937 cmd_buffer->state.pipeline->vertex_elements.count &&
1938 radv_get_shader(cmd_buffer->state.pipeline, MESA_SHADER_VERTEX)->info.info.vs.has_vertex_buffers) {
1939 struct radv_vertex_elements_info *velems = &cmd_buffer->state.pipeline->vertex_elements;
1940 unsigned vb_offset;
1941 void *vb_ptr;
1942 uint32_t i = 0;
1943 uint32_t count = velems->count;
1944 uint64_t va;
1945
1946 /* allocate some descriptor state for vertex buffers */
1947 if (!radv_cmd_buffer_upload_alloc(cmd_buffer, count * 16, 256,
1948 &vb_offset, &vb_ptr))
1949 return;
1950
1951 for (i = 0; i < count; i++) {
1952 uint32_t *desc = &((uint32_t *)vb_ptr)[i * 4];
1953 uint32_t offset;
1954 int vb = velems->binding[i];
1955 struct radv_buffer *buffer = cmd_buffer->vertex_bindings[vb].buffer;
1956 uint32_t stride = cmd_buffer->state.pipeline->binding_stride[vb];
1957
1958 va = radv_buffer_get_va(buffer->bo);
1959
1960 offset = cmd_buffer->vertex_bindings[vb].offset + velems->offset[i];
1961 va += offset + buffer->offset;
1962 desc[0] = va;
1963 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) | S_008F04_STRIDE(stride);
1964 if (cmd_buffer->device->physical_device->rad_info.chip_class <= CIK && stride)
1965 desc[2] = (buffer->size - offset - velems->format_size[i]) / stride + 1;
1966 else
1967 desc[2] = buffer->size - offset;
1968 desc[3] = velems->rsrc_word3[i];
1969 }
1970
1971 va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
1972 va += vb_offset;
1973
1974 radv_emit_userdata_address(cmd_buffer, cmd_buffer->state.pipeline, MESA_SHADER_VERTEX,
1975 AC_UD_VS_VERTEX_BUFFERS, va);
1976
1977 cmd_buffer->state.vb_va = va;
1978 cmd_buffer->state.vb_size = count * 16;
1979 cmd_buffer->state.prefetch_L2_mask |= RADV_PREFETCH_VBO_DESCRIPTORS;
1980 }
1981 cmd_buffer->state.dirty &= ~RADV_CMD_DIRTY_VERTEX_BUFFER;
1982 }
1983
1984 static void
1985 radv_emit_streamout_buffers(struct radv_cmd_buffer *cmd_buffer, uint64_t va)
1986 {
1987 struct radv_pipeline *pipeline = cmd_buffer->state.pipeline;
1988 struct radv_userdata_info *loc;
1989 uint32_t base_reg;
1990
1991 for (unsigned stage = 0; stage < MESA_SHADER_STAGES; ++stage) {
1992 if (!radv_get_shader(pipeline, stage))
1993 continue;
1994
1995 loc = radv_lookup_user_sgpr(pipeline, stage,
1996 AC_UD_STREAMOUT_BUFFERS);
1997 if (loc->sgpr_idx == -1)
1998 continue;
1999
2000 base_reg = pipeline->user_data_0[stage];
2001
2002 radv_emit_shader_pointer(cmd_buffer->device, cmd_buffer->cs,
2003 base_reg + loc->sgpr_idx * 4, va, false);
2004 }
2005
2006 if (pipeline->gs_copy_shader) {
2007 loc = &pipeline->gs_copy_shader->info.user_sgprs_locs.shader_data[AC_UD_STREAMOUT_BUFFERS];
2008 if (loc->sgpr_idx != -1) {
2009 base_reg = R_00B130_SPI_SHADER_USER_DATA_VS_0;
2010
2011 radv_emit_shader_pointer(cmd_buffer->device, cmd_buffer->cs,
2012 base_reg + loc->sgpr_idx * 4, va, false);
2013 }
2014 }
2015 }
2016
2017 static void
2018 radv_flush_streamout_descriptors(struct radv_cmd_buffer *cmd_buffer)
2019 {
2020 if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_STREAMOUT_BUFFER) {
2021 struct radv_streamout_binding *sb = cmd_buffer->streamout_bindings;
2022 struct radv_streamout_state *so = &cmd_buffer->state.streamout;
2023 unsigned so_offset;
2024 void *so_ptr;
2025 uint64_t va;
2026
2027 /* Allocate some descriptor state for streamout buffers. */
2028 if (!radv_cmd_buffer_upload_alloc(cmd_buffer,
2029 MAX_SO_BUFFERS * 16, 256,
2030 &so_offset, &so_ptr))
2031 return;
2032
2033 for (uint32_t i = 0; i < MAX_SO_BUFFERS; i++) {
2034 struct radv_buffer *buffer = sb[i].buffer;
2035 uint32_t *desc = &((uint32_t *)so_ptr)[i * 4];
2036
2037 if (!(so->enabled_mask & (1 << i)))
2038 continue;
2039
2040 va = radv_buffer_get_va(buffer->bo) + buffer->offset;
2041
2042 va += sb[i].offset;
2043
2044 /* Set the descriptor.
2045 *
2046 * On VI, the format must be non-INVALID, otherwise
2047 * the buffer will be considered not bound and store
2048 * instructions will be no-ops.
2049 */
2050 desc[0] = va;
2051 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32);
2052 desc[2] = 0xffffffff;
2053 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
2054 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
2055 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
2056 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
2057 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
2058 }
2059
2060 va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
2061 va += so_offset;
2062
2063 radv_emit_streamout_buffers(cmd_buffer, va);
2064 }
2065
2066 cmd_buffer->state.dirty &= ~RADV_CMD_DIRTY_STREAMOUT_BUFFER;
2067 }
2068
2069 static void
2070 radv_upload_graphics_shader_descriptors(struct radv_cmd_buffer *cmd_buffer, bool pipeline_is_dirty)
2071 {
2072 radv_flush_vertex_descriptors(cmd_buffer, pipeline_is_dirty);
2073 radv_flush_streamout_descriptors(cmd_buffer);
2074 radv_flush_descriptors(cmd_buffer, VK_SHADER_STAGE_ALL_GRAPHICS);
2075 radv_flush_constants(cmd_buffer, VK_SHADER_STAGE_ALL_GRAPHICS);
2076 }
2077
2078 static void
2079 radv_emit_draw_registers(struct radv_cmd_buffer *cmd_buffer, bool indexed_draw,
2080 bool instanced_draw, bool indirect_draw,
2081 uint32_t draw_vertex_count)
2082 {
2083 struct radeon_info *info = &cmd_buffer->device->physical_device->rad_info;
2084 struct radv_cmd_state *state = &cmd_buffer->state;
2085 struct radeon_cmdbuf *cs = cmd_buffer->cs;
2086 uint32_t ia_multi_vgt_param;
2087 int32_t primitive_reset_en;
2088
2089 /* Draw state. */
2090 ia_multi_vgt_param =
2091 si_get_ia_multi_vgt_param(cmd_buffer, instanced_draw,
2092 indirect_draw, draw_vertex_count);
2093
2094 if (state->last_ia_multi_vgt_param != ia_multi_vgt_param) {
2095 if (info->chip_class >= GFX9) {
2096 radeon_set_uconfig_reg_idx(cs,
2097 R_030960_IA_MULTI_VGT_PARAM,
2098 4, ia_multi_vgt_param);
2099 } else if (info->chip_class >= CIK) {
2100 radeon_set_context_reg_idx(cs,
2101 R_028AA8_IA_MULTI_VGT_PARAM,
2102 1, ia_multi_vgt_param);
2103 } else {
2104 radeon_set_context_reg(cs, R_028AA8_IA_MULTI_VGT_PARAM,
2105 ia_multi_vgt_param);
2106 }
2107 state->last_ia_multi_vgt_param = ia_multi_vgt_param;
2108 }
2109
2110 /* Primitive restart. */
2111 primitive_reset_en =
2112 indexed_draw && state->pipeline->graphics.prim_restart_enable;
2113
2114 if (primitive_reset_en != state->last_primitive_reset_en) {
2115 state->last_primitive_reset_en = primitive_reset_en;
2116 if (info->chip_class >= GFX9) {
2117 radeon_set_uconfig_reg(cs,
2118 R_03092C_VGT_MULTI_PRIM_IB_RESET_EN,
2119 primitive_reset_en);
2120 } else {
2121 radeon_set_context_reg(cs,
2122 R_028A94_VGT_MULTI_PRIM_IB_RESET_EN,
2123 primitive_reset_en);
2124 }
2125 }
2126
2127 if (primitive_reset_en) {
2128 uint32_t primitive_reset_index =
2129 state->index_type ? 0xffffffffu : 0xffffu;
2130
2131 if (primitive_reset_index != state->last_primitive_reset_index) {
2132 radeon_set_context_reg(cs,
2133 R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX,
2134 primitive_reset_index);
2135 state->last_primitive_reset_index = primitive_reset_index;
2136 }
2137 }
2138 }
2139
2140 static void radv_stage_flush(struct radv_cmd_buffer *cmd_buffer,
2141 VkPipelineStageFlags src_stage_mask)
2142 {
2143 if (src_stage_mask & (VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT |
2144 VK_PIPELINE_STAGE_TRANSFER_BIT |
2145 VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT |
2146 VK_PIPELINE_STAGE_ALL_COMMANDS_BIT)) {
2147 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_CS_PARTIAL_FLUSH;
2148 }
2149
2150 if (src_stage_mask & (VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
2151 VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT |
2152 VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT |
2153 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT |
2154 VK_PIPELINE_STAGE_TRANSFER_BIT |
2155 VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT |
2156 VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT |
2157 VK_PIPELINE_STAGE_ALL_COMMANDS_BIT)) {
2158 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_PS_PARTIAL_FLUSH;
2159 } else if (src_stage_mask & (VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT |
2160 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT |
2161 VK_PIPELINE_STAGE_VERTEX_SHADER_BIT |
2162 VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
2163 VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT |
2164 VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
2165 VK_PIPELINE_STAGE_TRANSFORM_FEEDBACK_BIT_EXT)) {
2166 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_VS_PARTIAL_FLUSH;
2167 }
2168 }
2169
2170 static enum radv_cmd_flush_bits
2171 radv_src_access_flush(struct radv_cmd_buffer *cmd_buffer,
2172 VkAccessFlags src_flags,
2173 struct radv_image *image)
2174 {
2175 bool flush_CB_meta = true, flush_DB_meta = true;
2176 enum radv_cmd_flush_bits flush_bits = 0;
2177 uint32_t b;
2178
2179 if (image) {
2180 if (!radv_image_has_CB_metadata(image))
2181 flush_CB_meta = false;
2182 if (!radv_image_has_htile(image))
2183 flush_DB_meta = false;
2184 }
2185
2186 for_each_bit(b, src_flags) {
2187 switch ((VkAccessFlagBits)(1 << b)) {
2188 case VK_ACCESS_SHADER_WRITE_BIT:
2189 case VK_ACCESS_TRANSFORM_FEEDBACK_WRITE_BIT_EXT:
2190 case VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT:
2191 flush_bits |= RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2;
2192 break;
2193 case VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT:
2194 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB;
2195 if (flush_CB_meta)
2196 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
2197 break;
2198 case VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT:
2199 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB;
2200 if (flush_DB_meta)
2201 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
2202 break;
2203 case VK_ACCESS_TRANSFER_WRITE_BIT:
2204 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
2205 RADV_CMD_FLAG_FLUSH_AND_INV_DB |
2206 RADV_CMD_FLAG_INV_GLOBAL_L2;
2207
2208 if (flush_CB_meta)
2209 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
2210 if (flush_DB_meta)
2211 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
2212 break;
2213 default:
2214 break;
2215 }
2216 }
2217 return flush_bits;
2218 }
2219
2220 static enum radv_cmd_flush_bits
2221 radv_dst_access_flush(struct radv_cmd_buffer *cmd_buffer,
2222 VkAccessFlags dst_flags,
2223 struct radv_image *image)
2224 {
2225 bool flush_CB_meta = true, flush_DB_meta = true;
2226 enum radv_cmd_flush_bits flush_bits = 0;
2227 bool flush_CB = true, flush_DB = true;
2228 bool image_is_coherent = false;
2229 uint32_t b;
2230
2231 if (image) {
2232 if (!(image->usage & VK_IMAGE_USAGE_STORAGE_BIT)) {
2233 flush_CB = false;
2234 flush_DB = false;
2235 }
2236
2237 if (!radv_image_has_CB_metadata(image))
2238 flush_CB_meta = false;
2239 if (!radv_image_has_htile(image))
2240 flush_DB_meta = false;
2241
2242 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
2243 if (image->info.samples == 1 &&
2244 (image->usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
2245 VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) &&
2246 !vk_format_is_stencil(image->vk_format)) {
2247 /* Single-sample color and single-sample depth
2248 * (not stencil) are coherent with shaders on
2249 * GFX9.
2250 */
2251 image_is_coherent = true;
2252 }
2253 }
2254 }
2255
2256 for_each_bit(b, dst_flags) {
2257 switch ((VkAccessFlagBits)(1 << b)) {
2258 case VK_ACCESS_INDIRECT_COMMAND_READ_BIT:
2259 case VK_ACCESS_INDEX_READ_BIT:
2260 case VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT:
2261 break;
2262 case VK_ACCESS_UNIFORM_READ_BIT:
2263 flush_bits |= RADV_CMD_FLAG_INV_VMEM_L1 | RADV_CMD_FLAG_INV_SMEM_L1;
2264 break;
2265 case VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT:
2266 case VK_ACCESS_TRANSFER_READ_BIT:
2267 case VK_ACCESS_INPUT_ATTACHMENT_READ_BIT:
2268 flush_bits |= RADV_CMD_FLAG_INV_VMEM_L1 |
2269 RADV_CMD_FLAG_INV_GLOBAL_L2;
2270 break;
2271 case VK_ACCESS_SHADER_READ_BIT:
2272 flush_bits |= RADV_CMD_FLAG_INV_VMEM_L1;
2273
2274 if (!image_is_coherent)
2275 flush_bits |= RADV_CMD_FLAG_INV_GLOBAL_L2;
2276 break;
2277 case VK_ACCESS_COLOR_ATTACHMENT_READ_BIT:
2278 if (flush_CB)
2279 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB;
2280 if (flush_CB_meta)
2281 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
2282 break;
2283 case VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT:
2284 if (flush_DB)
2285 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB;
2286 if (flush_DB_meta)
2287 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
2288 break;
2289 default:
2290 break;
2291 }
2292 }
2293 return flush_bits;
2294 }
2295
2296 void radv_subpass_barrier(struct radv_cmd_buffer *cmd_buffer,
2297 const struct radv_subpass_barrier *barrier)
2298 {
2299 cmd_buffer->state.flush_bits |= radv_src_access_flush(cmd_buffer, barrier->src_access_mask,
2300 NULL);
2301 radv_stage_flush(cmd_buffer, barrier->src_stage_mask);
2302 cmd_buffer->state.flush_bits |= radv_dst_access_flush(cmd_buffer, barrier->dst_access_mask,
2303 NULL);
2304 }
2305
2306 static void radv_handle_subpass_image_transition(struct radv_cmd_buffer *cmd_buffer,
2307 struct radv_subpass_attachment att)
2308 {
2309 unsigned idx = att.attachment;
2310 struct radv_image_view *view = cmd_buffer->state.framebuffer->attachments[idx].attachment;
2311 VkImageSubresourceRange range;
2312 range.aspectMask = 0;
2313 range.baseMipLevel = view->base_mip;
2314 range.levelCount = 1;
2315 range.baseArrayLayer = view->base_layer;
2316 range.layerCount = cmd_buffer->state.framebuffer->layers;
2317
2318 if (cmd_buffer->state.subpass && cmd_buffer->state.subpass->view_mask) {
2319 /* If the current subpass uses multiview, the driver might have
2320 * performed a fast color/depth clear to the whole image
2321 * (including all layers). To make sure the driver will
2322 * decompress the image correctly (if needed), we have to
2323 * account for the "real" number of layers. If the view mask is
2324 * sparse, this will decompress more layers than needed.
2325 */
2326 range.layerCount = util_last_bit(cmd_buffer->state.subpass->view_mask);
2327 }
2328
2329 radv_handle_image_transition(cmd_buffer,
2330 view->image,
2331 cmd_buffer->state.attachments[idx].current_layout,
2332 att.layout, 0, 0, &range);
2333
2334 cmd_buffer->state.attachments[idx].current_layout = att.layout;
2335
2336
2337 }
2338
2339 void
2340 radv_cmd_buffer_set_subpass(struct radv_cmd_buffer *cmd_buffer,
2341 const struct radv_subpass *subpass, bool transitions)
2342 {
2343 if (transitions) {
2344 radv_subpass_barrier(cmd_buffer, &subpass->start_barrier);
2345
2346 for (unsigned i = 0; i < subpass->color_count; ++i) {
2347 if (subpass->color_attachments[i].attachment != VK_ATTACHMENT_UNUSED)
2348 radv_handle_subpass_image_transition(cmd_buffer,
2349 subpass->color_attachments[i]);
2350 }
2351
2352 for (unsigned i = 0; i < subpass->input_count; ++i) {
2353 radv_handle_subpass_image_transition(cmd_buffer,
2354 subpass->input_attachments[i]);
2355 }
2356
2357 if (subpass->depth_stencil_attachment.attachment != VK_ATTACHMENT_UNUSED) {
2358 radv_handle_subpass_image_transition(cmd_buffer,
2359 subpass->depth_stencil_attachment);
2360 }
2361 }
2362
2363 cmd_buffer->state.subpass = subpass;
2364
2365 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_FRAMEBUFFER;
2366 }
2367
2368 static VkResult
2369 radv_cmd_state_setup_attachments(struct radv_cmd_buffer *cmd_buffer,
2370 struct radv_render_pass *pass,
2371 const VkRenderPassBeginInfo *info)
2372 {
2373 struct radv_cmd_state *state = &cmd_buffer->state;
2374
2375 if (pass->attachment_count == 0) {
2376 state->attachments = NULL;
2377 return VK_SUCCESS;
2378 }
2379
2380 state->attachments = vk_alloc(&cmd_buffer->pool->alloc,
2381 pass->attachment_count *
2382 sizeof(state->attachments[0]),
2383 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2384 if (state->attachments == NULL) {
2385 cmd_buffer->record_result = VK_ERROR_OUT_OF_HOST_MEMORY;
2386 return cmd_buffer->record_result;
2387 }
2388
2389 for (uint32_t i = 0; i < pass->attachment_count; ++i) {
2390 struct radv_render_pass_attachment *att = &pass->attachments[i];
2391 VkImageAspectFlags att_aspects = vk_format_aspects(att->format);
2392 VkImageAspectFlags clear_aspects = 0;
2393
2394 if (att_aspects == VK_IMAGE_ASPECT_COLOR_BIT) {
2395 /* color attachment */
2396 if (att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
2397 clear_aspects |= VK_IMAGE_ASPECT_COLOR_BIT;
2398 }
2399 } else {
2400 /* depthstencil attachment */
2401 if ((att_aspects & VK_IMAGE_ASPECT_DEPTH_BIT) &&
2402 att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
2403 clear_aspects |= VK_IMAGE_ASPECT_DEPTH_BIT;
2404 if ((att_aspects & VK_IMAGE_ASPECT_STENCIL_BIT) &&
2405 att->stencil_load_op == VK_ATTACHMENT_LOAD_OP_DONT_CARE)
2406 clear_aspects |= VK_IMAGE_ASPECT_STENCIL_BIT;
2407 }
2408 if ((att_aspects & VK_IMAGE_ASPECT_STENCIL_BIT) &&
2409 att->stencil_load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
2410 clear_aspects |= VK_IMAGE_ASPECT_STENCIL_BIT;
2411 }
2412 }
2413
2414 state->attachments[i].pending_clear_aspects = clear_aspects;
2415 state->attachments[i].cleared_views = 0;
2416 if (clear_aspects && info) {
2417 assert(info->clearValueCount > i);
2418 state->attachments[i].clear_value = info->pClearValues[i];
2419 }
2420
2421 state->attachments[i].current_layout = att->initial_layout;
2422 }
2423
2424 return VK_SUCCESS;
2425 }
2426
2427 VkResult radv_AllocateCommandBuffers(
2428 VkDevice _device,
2429 const VkCommandBufferAllocateInfo *pAllocateInfo,
2430 VkCommandBuffer *pCommandBuffers)
2431 {
2432 RADV_FROM_HANDLE(radv_device, device, _device);
2433 RADV_FROM_HANDLE(radv_cmd_pool, pool, pAllocateInfo->commandPool);
2434
2435 VkResult result = VK_SUCCESS;
2436 uint32_t i;
2437
2438 for (i = 0; i < pAllocateInfo->commandBufferCount; i++) {
2439
2440 if (!list_empty(&pool->free_cmd_buffers)) {
2441 struct radv_cmd_buffer *cmd_buffer = list_first_entry(&pool->free_cmd_buffers, struct radv_cmd_buffer, pool_link);
2442
2443 list_del(&cmd_buffer->pool_link);
2444 list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
2445
2446 result = radv_reset_cmd_buffer(cmd_buffer);
2447 cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
2448 cmd_buffer->level = pAllocateInfo->level;
2449
2450 pCommandBuffers[i] = radv_cmd_buffer_to_handle(cmd_buffer);
2451 } else {
2452 result = radv_create_cmd_buffer(device, pool, pAllocateInfo->level,
2453 &pCommandBuffers[i]);
2454 }
2455 if (result != VK_SUCCESS)
2456 break;
2457 }
2458
2459 if (result != VK_SUCCESS) {
2460 radv_FreeCommandBuffers(_device, pAllocateInfo->commandPool,
2461 i, pCommandBuffers);
2462
2463 /* From the Vulkan 1.0.66 spec:
2464 *
2465 * "vkAllocateCommandBuffers can be used to create multiple
2466 * command buffers. If the creation of any of those command
2467 * buffers fails, the implementation must destroy all
2468 * successfully created command buffer objects from this
2469 * command, set all entries of the pCommandBuffers array to
2470 * NULL and return the error."
2471 */
2472 memset(pCommandBuffers, 0,
2473 sizeof(*pCommandBuffers) * pAllocateInfo->commandBufferCount);
2474 }
2475
2476 return result;
2477 }
2478
2479 void radv_FreeCommandBuffers(
2480 VkDevice device,
2481 VkCommandPool commandPool,
2482 uint32_t commandBufferCount,
2483 const VkCommandBuffer *pCommandBuffers)
2484 {
2485 for (uint32_t i = 0; i < commandBufferCount; i++) {
2486 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, pCommandBuffers[i]);
2487
2488 if (cmd_buffer) {
2489 if (cmd_buffer->pool) {
2490 list_del(&cmd_buffer->pool_link);
2491 list_addtail(&cmd_buffer->pool_link, &cmd_buffer->pool->free_cmd_buffers);
2492 } else
2493 radv_cmd_buffer_destroy(cmd_buffer);
2494
2495 }
2496 }
2497 }
2498
2499 VkResult radv_ResetCommandBuffer(
2500 VkCommandBuffer commandBuffer,
2501 VkCommandBufferResetFlags flags)
2502 {
2503 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2504 return radv_reset_cmd_buffer(cmd_buffer);
2505 }
2506
2507 VkResult radv_BeginCommandBuffer(
2508 VkCommandBuffer commandBuffer,
2509 const VkCommandBufferBeginInfo *pBeginInfo)
2510 {
2511 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2512 VkResult result = VK_SUCCESS;
2513
2514 if (cmd_buffer->status != RADV_CMD_BUFFER_STATUS_INITIAL) {
2515 /* If the command buffer has already been resetted with
2516 * vkResetCommandBuffer, no need to do it again.
2517 */
2518 result = radv_reset_cmd_buffer(cmd_buffer);
2519 if (result != VK_SUCCESS)
2520 return result;
2521 }
2522
2523 memset(&cmd_buffer->state, 0, sizeof(cmd_buffer->state));
2524 cmd_buffer->state.last_primitive_reset_en = -1;
2525 cmd_buffer->state.last_index_type = -1;
2526 cmd_buffer->state.last_num_instances = -1;
2527 cmd_buffer->state.last_vertex_offset = -1;
2528 cmd_buffer->state.last_first_instance = -1;
2529 cmd_buffer->state.predication_type = -1;
2530 cmd_buffer->usage_flags = pBeginInfo->flags;
2531
2532 if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY &&
2533 (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
2534 assert(pBeginInfo->pInheritanceInfo);
2535 cmd_buffer->state.framebuffer = radv_framebuffer_from_handle(pBeginInfo->pInheritanceInfo->framebuffer);
2536 cmd_buffer->state.pass = radv_render_pass_from_handle(pBeginInfo->pInheritanceInfo->renderPass);
2537
2538 struct radv_subpass *subpass =
2539 &cmd_buffer->state.pass->subpasses[pBeginInfo->pInheritanceInfo->subpass];
2540
2541 result = radv_cmd_state_setup_attachments(cmd_buffer, cmd_buffer->state.pass, NULL);
2542 if (result != VK_SUCCESS)
2543 return result;
2544
2545 radv_cmd_buffer_set_subpass(cmd_buffer, subpass, false);
2546 }
2547
2548 if (unlikely(cmd_buffer->device->trace_bo)) {
2549 struct radv_device *device = cmd_buffer->device;
2550
2551 radv_cs_add_buffer(device->ws, cmd_buffer->cs,
2552 device->trace_bo);
2553
2554 radv_cmd_buffer_trace_emit(cmd_buffer);
2555 }
2556
2557 cmd_buffer->status = RADV_CMD_BUFFER_STATUS_RECORDING;
2558
2559 return result;
2560 }
2561
2562 void radv_CmdBindVertexBuffers(
2563 VkCommandBuffer commandBuffer,
2564 uint32_t firstBinding,
2565 uint32_t bindingCount,
2566 const VkBuffer* pBuffers,
2567 const VkDeviceSize* pOffsets)
2568 {
2569 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2570 struct radv_vertex_binding *vb = cmd_buffer->vertex_bindings;
2571 bool changed = false;
2572
2573 /* We have to defer setting up vertex buffer since we need the buffer
2574 * stride from the pipeline. */
2575
2576 assert(firstBinding + bindingCount <= MAX_VBS);
2577 for (uint32_t i = 0; i < bindingCount; i++) {
2578 uint32_t idx = firstBinding + i;
2579
2580 if (!changed &&
2581 (vb[idx].buffer != radv_buffer_from_handle(pBuffers[i]) ||
2582 vb[idx].offset != pOffsets[i])) {
2583 changed = true;
2584 }
2585
2586 vb[idx].buffer = radv_buffer_from_handle(pBuffers[i]);
2587 vb[idx].offset = pOffsets[i];
2588
2589 radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs,
2590 vb[idx].buffer->bo);
2591 }
2592
2593 if (!changed) {
2594 /* No state changes. */
2595 return;
2596 }
2597
2598 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_VERTEX_BUFFER;
2599 }
2600
2601 void radv_CmdBindIndexBuffer(
2602 VkCommandBuffer commandBuffer,
2603 VkBuffer buffer,
2604 VkDeviceSize offset,
2605 VkIndexType indexType)
2606 {
2607 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2608 RADV_FROM_HANDLE(radv_buffer, index_buffer, buffer);
2609
2610 if (cmd_buffer->state.index_buffer == index_buffer &&
2611 cmd_buffer->state.index_offset == offset &&
2612 cmd_buffer->state.index_type == indexType) {
2613 /* No state changes. */
2614 return;
2615 }
2616
2617 cmd_buffer->state.index_buffer = index_buffer;
2618 cmd_buffer->state.index_offset = offset;
2619 cmd_buffer->state.index_type = indexType; /* vk matches hw */
2620 cmd_buffer->state.index_va = radv_buffer_get_va(index_buffer->bo);
2621 cmd_buffer->state.index_va += index_buffer->offset + offset;
2622
2623 int index_size_shift = cmd_buffer->state.index_type ? 2 : 1;
2624 cmd_buffer->state.max_index_count = (index_buffer->size - offset) >> index_size_shift;
2625 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_INDEX_BUFFER;
2626 radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, index_buffer->bo);
2627 }
2628
2629
2630 static void
2631 radv_bind_descriptor_set(struct radv_cmd_buffer *cmd_buffer,
2632 VkPipelineBindPoint bind_point,
2633 struct radv_descriptor_set *set, unsigned idx)
2634 {
2635 struct radeon_winsys *ws = cmd_buffer->device->ws;
2636
2637 radv_set_descriptor_set(cmd_buffer, bind_point, set, idx);
2638
2639 assert(set);
2640 assert(!(set->layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR));
2641
2642 if (!cmd_buffer->device->use_global_bo_list) {
2643 for (unsigned j = 0; j < set->layout->buffer_count; ++j)
2644 if (set->descriptors[j])
2645 radv_cs_add_buffer(ws, cmd_buffer->cs, set->descriptors[j]);
2646 }
2647
2648 if(set->bo)
2649 radv_cs_add_buffer(ws, cmd_buffer->cs, set->bo);
2650 }
2651
2652 void radv_CmdBindDescriptorSets(
2653 VkCommandBuffer commandBuffer,
2654 VkPipelineBindPoint pipelineBindPoint,
2655 VkPipelineLayout _layout,
2656 uint32_t firstSet,
2657 uint32_t descriptorSetCount,
2658 const VkDescriptorSet* pDescriptorSets,
2659 uint32_t dynamicOffsetCount,
2660 const uint32_t* pDynamicOffsets)
2661 {
2662 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2663 RADV_FROM_HANDLE(radv_pipeline_layout, layout, _layout);
2664 unsigned dyn_idx = 0;
2665
2666 const bool no_dynamic_bounds = cmd_buffer->device->instance->debug_flags & RADV_DEBUG_NO_DYNAMIC_BOUNDS;
2667 struct radv_descriptor_state *descriptors_state =
2668 radv_get_descriptors_state(cmd_buffer, pipelineBindPoint);
2669
2670 for (unsigned i = 0; i < descriptorSetCount; ++i) {
2671 unsigned idx = i + firstSet;
2672 RADV_FROM_HANDLE(radv_descriptor_set, set, pDescriptorSets[i]);
2673 radv_bind_descriptor_set(cmd_buffer, pipelineBindPoint, set, idx);
2674
2675 for(unsigned j = 0; j < set->layout->dynamic_offset_count; ++j, ++dyn_idx) {
2676 unsigned idx = j + layout->set[i + firstSet].dynamic_offset_start;
2677 uint32_t *dst = descriptors_state->dynamic_buffers + idx * 4;
2678 assert(dyn_idx < dynamicOffsetCount);
2679
2680 struct radv_descriptor_range *range = set->dynamic_descriptors + j;
2681 uint64_t va = range->va + pDynamicOffsets[dyn_idx];
2682 dst[0] = va;
2683 dst[1] = S_008F04_BASE_ADDRESS_HI(va >> 32);
2684 dst[2] = no_dynamic_bounds ? 0xffffffffu : range->size;
2685 dst[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
2686 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
2687 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
2688 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
2689 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
2690 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
2691 cmd_buffer->push_constant_stages |=
2692 set->layout->dynamic_shader_stages;
2693 }
2694 }
2695 }
2696
2697 static bool radv_init_push_descriptor_set(struct radv_cmd_buffer *cmd_buffer,
2698 struct radv_descriptor_set *set,
2699 struct radv_descriptor_set_layout *layout,
2700 VkPipelineBindPoint bind_point)
2701 {
2702 struct radv_descriptor_state *descriptors_state =
2703 radv_get_descriptors_state(cmd_buffer, bind_point);
2704 set->size = layout->size;
2705 set->layout = layout;
2706
2707 if (descriptors_state->push_set.capacity < set->size) {
2708 size_t new_size = MAX2(set->size, 1024);
2709 new_size = MAX2(new_size, 2 * descriptors_state->push_set.capacity);
2710 new_size = MIN2(new_size, 96 * MAX_PUSH_DESCRIPTORS);
2711
2712 free(set->mapped_ptr);
2713 set->mapped_ptr = malloc(new_size);
2714
2715 if (!set->mapped_ptr) {
2716 descriptors_state->push_set.capacity = 0;
2717 cmd_buffer->record_result = VK_ERROR_OUT_OF_HOST_MEMORY;
2718 return false;
2719 }
2720
2721 descriptors_state->push_set.capacity = new_size;
2722 }
2723
2724 return true;
2725 }
2726
2727 void radv_meta_push_descriptor_set(
2728 struct radv_cmd_buffer* cmd_buffer,
2729 VkPipelineBindPoint pipelineBindPoint,
2730 VkPipelineLayout _layout,
2731 uint32_t set,
2732 uint32_t descriptorWriteCount,
2733 const VkWriteDescriptorSet* pDescriptorWrites)
2734 {
2735 RADV_FROM_HANDLE(radv_pipeline_layout, layout, _layout);
2736 struct radv_descriptor_set *push_set = &cmd_buffer->meta_push_descriptors;
2737 unsigned bo_offset;
2738
2739 assert(set == 0);
2740 assert(layout->set[set].layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR);
2741
2742 push_set->size = layout->set[set].layout->size;
2743 push_set->layout = layout->set[set].layout;
2744
2745 if (!radv_cmd_buffer_upload_alloc(cmd_buffer, push_set->size, 32,
2746 &bo_offset,
2747 (void**) &push_set->mapped_ptr))
2748 return;
2749
2750 push_set->va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
2751 push_set->va += bo_offset;
2752
2753 radv_update_descriptor_sets(cmd_buffer->device, cmd_buffer,
2754 radv_descriptor_set_to_handle(push_set),
2755 descriptorWriteCount, pDescriptorWrites, 0, NULL);
2756
2757 radv_set_descriptor_set(cmd_buffer, pipelineBindPoint, push_set, set);
2758 }
2759
2760 void radv_CmdPushDescriptorSetKHR(
2761 VkCommandBuffer commandBuffer,
2762 VkPipelineBindPoint pipelineBindPoint,
2763 VkPipelineLayout _layout,
2764 uint32_t set,
2765 uint32_t descriptorWriteCount,
2766 const VkWriteDescriptorSet* pDescriptorWrites)
2767 {
2768 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2769 RADV_FROM_HANDLE(radv_pipeline_layout, layout, _layout);
2770 struct radv_descriptor_state *descriptors_state =
2771 radv_get_descriptors_state(cmd_buffer, pipelineBindPoint);
2772 struct radv_descriptor_set *push_set = &descriptors_state->push_set.set;
2773
2774 assert(layout->set[set].layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR);
2775
2776 if (!radv_init_push_descriptor_set(cmd_buffer, push_set,
2777 layout->set[set].layout,
2778 pipelineBindPoint))
2779 return;
2780
2781 radv_update_descriptor_sets(cmd_buffer->device, cmd_buffer,
2782 radv_descriptor_set_to_handle(push_set),
2783 descriptorWriteCount, pDescriptorWrites, 0, NULL);
2784
2785 radv_set_descriptor_set(cmd_buffer, pipelineBindPoint, push_set, set);
2786 descriptors_state->push_dirty = true;
2787 }
2788
2789 void radv_CmdPushDescriptorSetWithTemplateKHR(
2790 VkCommandBuffer commandBuffer,
2791 VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
2792 VkPipelineLayout _layout,
2793 uint32_t set,
2794 const void* pData)
2795 {
2796 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2797 RADV_FROM_HANDLE(radv_pipeline_layout, layout, _layout);
2798 RADV_FROM_HANDLE(radv_descriptor_update_template, templ, descriptorUpdateTemplate);
2799 struct radv_descriptor_state *descriptors_state =
2800 radv_get_descriptors_state(cmd_buffer, templ->bind_point);
2801 struct radv_descriptor_set *push_set = &descriptors_state->push_set.set;
2802
2803 assert(layout->set[set].layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR);
2804
2805 if (!radv_init_push_descriptor_set(cmd_buffer, push_set,
2806 layout->set[set].layout,
2807 templ->bind_point))
2808 return;
2809
2810 radv_update_descriptor_set_with_template(cmd_buffer->device, cmd_buffer, push_set,
2811 descriptorUpdateTemplate, pData);
2812
2813 radv_set_descriptor_set(cmd_buffer, templ->bind_point, push_set, set);
2814 descriptors_state->push_dirty = true;
2815 }
2816
2817 void radv_CmdPushConstants(VkCommandBuffer commandBuffer,
2818 VkPipelineLayout layout,
2819 VkShaderStageFlags stageFlags,
2820 uint32_t offset,
2821 uint32_t size,
2822 const void* pValues)
2823 {
2824 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2825 memcpy(cmd_buffer->push_constants + offset, pValues, size);
2826 cmd_buffer->push_constant_stages |= stageFlags;
2827 }
2828
2829 VkResult radv_EndCommandBuffer(
2830 VkCommandBuffer commandBuffer)
2831 {
2832 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2833
2834 if (cmd_buffer->queue_family_index != RADV_QUEUE_TRANSFER) {
2835 if (cmd_buffer->device->physical_device->rad_info.chip_class == SI)
2836 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_CS_PARTIAL_FLUSH | RADV_CMD_FLAG_PS_PARTIAL_FLUSH | RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2;
2837 si_emit_cache_flush(cmd_buffer);
2838 }
2839
2840 /* Make sure CP DMA is idle at the end of IBs because the kernel
2841 * doesn't wait for it.
2842 */
2843 si_cp_dma_wait_for_idle(cmd_buffer);
2844
2845 vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.attachments);
2846
2847 if (!cmd_buffer->device->ws->cs_finalize(cmd_buffer->cs))
2848 return vk_error(cmd_buffer->device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
2849
2850 cmd_buffer->status = RADV_CMD_BUFFER_STATUS_EXECUTABLE;
2851
2852 return cmd_buffer->record_result;
2853 }
2854
2855 static void
2856 radv_emit_compute_pipeline(struct radv_cmd_buffer *cmd_buffer)
2857 {
2858 struct radv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
2859
2860 if (!pipeline || pipeline == cmd_buffer->state.emitted_compute_pipeline)
2861 return;
2862
2863 cmd_buffer->state.emitted_compute_pipeline = pipeline;
2864
2865 radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, pipeline->cs.cdw);
2866 radeon_emit_array(cmd_buffer->cs, pipeline->cs.buf, pipeline->cs.cdw);
2867
2868 cmd_buffer->compute_scratch_size_needed =
2869 MAX2(cmd_buffer->compute_scratch_size_needed,
2870 pipeline->max_waves * pipeline->scratch_bytes_per_wave);
2871
2872 radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs,
2873 pipeline->shaders[MESA_SHADER_COMPUTE]->bo);
2874
2875 if (unlikely(cmd_buffer->device->trace_bo))
2876 radv_save_pipeline(cmd_buffer, pipeline, RING_COMPUTE);
2877 }
2878
2879 static void radv_mark_descriptor_sets_dirty(struct radv_cmd_buffer *cmd_buffer,
2880 VkPipelineBindPoint bind_point)
2881 {
2882 struct radv_descriptor_state *descriptors_state =
2883 radv_get_descriptors_state(cmd_buffer, bind_point);
2884
2885 descriptors_state->dirty |= descriptors_state->valid;
2886 }
2887
2888 void radv_CmdBindPipeline(
2889 VkCommandBuffer commandBuffer,
2890 VkPipelineBindPoint pipelineBindPoint,
2891 VkPipeline _pipeline)
2892 {
2893 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2894 RADV_FROM_HANDLE(radv_pipeline, pipeline, _pipeline);
2895
2896 switch (pipelineBindPoint) {
2897 case VK_PIPELINE_BIND_POINT_COMPUTE:
2898 if (cmd_buffer->state.compute_pipeline == pipeline)
2899 return;
2900 radv_mark_descriptor_sets_dirty(cmd_buffer, pipelineBindPoint);
2901
2902 cmd_buffer->state.compute_pipeline = pipeline;
2903 cmd_buffer->push_constant_stages |= VK_SHADER_STAGE_COMPUTE_BIT;
2904 break;
2905 case VK_PIPELINE_BIND_POINT_GRAPHICS:
2906 if (cmd_buffer->state.pipeline == pipeline)
2907 return;
2908 radv_mark_descriptor_sets_dirty(cmd_buffer, pipelineBindPoint);
2909
2910 cmd_buffer->state.pipeline = pipeline;
2911 if (!pipeline)
2912 break;
2913
2914 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_PIPELINE;
2915 cmd_buffer->push_constant_stages |= pipeline->active_stages;
2916
2917 /* the new vertex shader might not have the same user regs */
2918 cmd_buffer->state.last_first_instance = -1;
2919 cmd_buffer->state.last_vertex_offset = -1;
2920
2921 /* Prefetch all pipeline shaders at first draw time. */
2922 cmd_buffer->state.prefetch_L2_mask |= RADV_PREFETCH_SHADERS;
2923
2924 radv_bind_dynamic_state(cmd_buffer, &pipeline->dynamic_state);
2925 radv_bind_streamout_state(cmd_buffer, pipeline);
2926
2927 if (pipeline->graphics.esgs_ring_size > cmd_buffer->esgs_ring_size_needed)
2928 cmd_buffer->esgs_ring_size_needed = pipeline->graphics.esgs_ring_size;
2929 if (pipeline->graphics.gsvs_ring_size > cmd_buffer->gsvs_ring_size_needed)
2930 cmd_buffer->gsvs_ring_size_needed = pipeline->graphics.gsvs_ring_size;
2931
2932 if (radv_pipeline_has_tess(pipeline))
2933 cmd_buffer->tess_rings_needed = true;
2934 break;
2935 default:
2936 assert(!"invalid bind point");
2937 break;
2938 }
2939 }
2940
2941 void radv_CmdSetViewport(
2942 VkCommandBuffer commandBuffer,
2943 uint32_t firstViewport,
2944 uint32_t viewportCount,
2945 const VkViewport* pViewports)
2946 {
2947 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2948 struct radv_cmd_state *state = &cmd_buffer->state;
2949 MAYBE_UNUSED const uint32_t total_count = firstViewport + viewportCount;
2950
2951 assert(firstViewport < MAX_VIEWPORTS);
2952 assert(total_count >= 1 && total_count <= MAX_VIEWPORTS);
2953
2954 memcpy(state->dynamic.viewport.viewports + firstViewport, pViewports,
2955 viewportCount * sizeof(*pViewports));
2956
2957 state->dirty |= RADV_CMD_DIRTY_DYNAMIC_VIEWPORT;
2958 }
2959
2960 void radv_CmdSetScissor(
2961 VkCommandBuffer commandBuffer,
2962 uint32_t firstScissor,
2963 uint32_t scissorCount,
2964 const VkRect2D* pScissors)
2965 {
2966 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2967 struct radv_cmd_state *state = &cmd_buffer->state;
2968 MAYBE_UNUSED const uint32_t total_count = firstScissor + scissorCount;
2969
2970 assert(firstScissor < MAX_SCISSORS);
2971 assert(total_count >= 1 && total_count <= MAX_SCISSORS);
2972
2973 memcpy(state->dynamic.scissor.scissors + firstScissor, pScissors,
2974 scissorCount * sizeof(*pScissors));
2975
2976 state->dirty |= RADV_CMD_DIRTY_DYNAMIC_SCISSOR;
2977 }
2978
2979 void radv_CmdSetLineWidth(
2980 VkCommandBuffer commandBuffer,
2981 float lineWidth)
2982 {
2983 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2984 cmd_buffer->state.dynamic.line_width = lineWidth;
2985 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_LINE_WIDTH;
2986 }
2987
2988 void radv_CmdSetDepthBias(
2989 VkCommandBuffer commandBuffer,
2990 float depthBiasConstantFactor,
2991 float depthBiasClamp,
2992 float depthBiasSlopeFactor)
2993 {
2994 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2995
2996 cmd_buffer->state.dynamic.depth_bias.bias = depthBiasConstantFactor;
2997 cmd_buffer->state.dynamic.depth_bias.clamp = depthBiasClamp;
2998 cmd_buffer->state.dynamic.depth_bias.slope = depthBiasSlopeFactor;
2999
3000 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS;
3001 }
3002
3003 void radv_CmdSetBlendConstants(
3004 VkCommandBuffer commandBuffer,
3005 const float blendConstants[4])
3006 {
3007 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3008
3009 memcpy(cmd_buffer->state.dynamic.blend_constants,
3010 blendConstants, sizeof(float) * 4);
3011
3012 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS;
3013 }
3014
3015 void radv_CmdSetDepthBounds(
3016 VkCommandBuffer commandBuffer,
3017 float minDepthBounds,
3018 float maxDepthBounds)
3019 {
3020 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3021
3022 cmd_buffer->state.dynamic.depth_bounds.min = minDepthBounds;
3023 cmd_buffer->state.dynamic.depth_bounds.max = maxDepthBounds;
3024
3025 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS;
3026 }
3027
3028 void radv_CmdSetStencilCompareMask(
3029 VkCommandBuffer commandBuffer,
3030 VkStencilFaceFlags faceMask,
3031 uint32_t compareMask)
3032 {
3033 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3034
3035 if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
3036 cmd_buffer->state.dynamic.stencil_compare_mask.front = compareMask;
3037 if (faceMask & VK_STENCIL_FACE_BACK_BIT)
3038 cmd_buffer->state.dynamic.stencil_compare_mask.back = compareMask;
3039
3040 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK;
3041 }
3042
3043 void radv_CmdSetStencilWriteMask(
3044 VkCommandBuffer commandBuffer,
3045 VkStencilFaceFlags faceMask,
3046 uint32_t writeMask)
3047 {
3048 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3049
3050 if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
3051 cmd_buffer->state.dynamic.stencil_write_mask.front = writeMask;
3052 if (faceMask & VK_STENCIL_FACE_BACK_BIT)
3053 cmd_buffer->state.dynamic.stencil_write_mask.back = writeMask;
3054
3055 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK;
3056 }
3057
3058 void radv_CmdSetStencilReference(
3059 VkCommandBuffer commandBuffer,
3060 VkStencilFaceFlags faceMask,
3061 uint32_t reference)
3062 {
3063 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3064
3065 if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
3066 cmd_buffer->state.dynamic.stencil_reference.front = reference;
3067 if (faceMask & VK_STENCIL_FACE_BACK_BIT)
3068 cmd_buffer->state.dynamic.stencil_reference.back = reference;
3069
3070 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE;
3071 }
3072
3073 void radv_CmdSetDiscardRectangleEXT(
3074 VkCommandBuffer commandBuffer,
3075 uint32_t firstDiscardRectangle,
3076 uint32_t discardRectangleCount,
3077 const VkRect2D* pDiscardRectangles)
3078 {
3079 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3080 struct radv_cmd_state *state = &cmd_buffer->state;
3081 MAYBE_UNUSED const uint32_t total_count = firstDiscardRectangle + discardRectangleCount;
3082
3083 assert(firstDiscardRectangle < MAX_DISCARD_RECTANGLES);
3084 assert(total_count >= 1 && total_count <= MAX_DISCARD_RECTANGLES);
3085
3086 typed_memcpy(&state->dynamic.discard_rectangle.rectangles[firstDiscardRectangle],
3087 pDiscardRectangles, discardRectangleCount);
3088
3089 state->dirty |= RADV_CMD_DIRTY_DYNAMIC_DISCARD_RECTANGLE;
3090 }
3091
3092 void radv_CmdExecuteCommands(
3093 VkCommandBuffer commandBuffer,
3094 uint32_t commandBufferCount,
3095 const VkCommandBuffer* pCmdBuffers)
3096 {
3097 RADV_FROM_HANDLE(radv_cmd_buffer, primary, commandBuffer);
3098
3099 assert(commandBufferCount > 0);
3100
3101 /* Emit pending flushes on primary prior to executing secondary */
3102 si_emit_cache_flush(primary);
3103
3104 for (uint32_t i = 0; i < commandBufferCount; i++) {
3105 RADV_FROM_HANDLE(radv_cmd_buffer, secondary, pCmdBuffers[i]);
3106
3107 primary->scratch_size_needed = MAX2(primary->scratch_size_needed,
3108 secondary->scratch_size_needed);
3109 primary->compute_scratch_size_needed = MAX2(primary->compute_scratch_size_needed,
3110 secondary->compute_scratch_size_needed);
3111
3112 if (secondary->esgs_ring_size_needed > primary->esgs_ring_size_needed)
3113 primary->esgs_ring_size_needed = secondary->esgs_ring_size_needed;
3114 if (secondary->gsvs_ring_size_needed > primary->gsvs_ring_size_needed)
3115 primary->gsvs_ring_size_needed = secondary->gsvs_ring_size_needed;
3116 if (secondary->tess_rings_needed)
3117 primary->tess_rings_needed = true;
3118 if (secondary->sample_positions_needed)
3119 primary->sample_positions_needed = true;
3120
3121 primary->device->ws->cs_execute_secondary(primary->cs, secondary->cs);
3122
3123
3124 /* When the secondary command buffer is compute only we don't
3125 * need to re-emit the current graphics pipeline.
3126 */
3127 if (secondary->state.emitted_pipeline) {
3128 primary->state.emitted_pipeline =
3129 secondary->state.emitted_pipeline;
3130 }
3131
3132 /* When the secondary command buffer is graphics only we don't
3133 * need to re-emit the current compute pipeline.
3134 */
3135 if (secondary->state.emitted_compute_pipeline) {
3136 primary->state.emitted_compute_pipeline =
3137 secondary->state.emitted_compute_pipeline;
3138 }
3139
3140 /* Only re-emit the draw packets when needed. */
3141 if (secondary->state.last_primitive_reset_en != -1) {
3142 primary->state.last_primitive_reset_en =
3143 secondary->state.last_primitive_reset_en;
3144 }
3145
3146 if (secondary->state.last_primitive_reset_index) {
3147 primary->state.last_primitive_reset_index =
3148 secondary->state.last_primitive_reset_index;
3149 }
3150
3151 if (secondary->state.last_ia_multi_vgt_param) {
3152 primary->state.last_ia_multi_vgt_param =
3153 secondary->state.last_ia_multi_vgt_param;
3154 }
3155
3156 primary->state.last_first_instance = secondary->state.last_first_instance;
3157 primary->state.last_num_instances = secondary->state.last_num_instances;
3158 primary->state.last_vertex_offset = secondary->state.last_vertex_offset;
3159
3160 if (secondary->state.last_index_type != -1) {
3161 primary->state.last_index_type =
3162 secondary->state.last_index_type;
3163 }
3164 }
3165
3166 /* After executing commands from secondary buffers we have to dirty
3167 * some states.
3168 */
3169 primary->state.dirty |= RADV_CMD_DIRTY_PIPELINE |
3170 RADV_CMD_DIRTY_INDEX_BUFFER |
3171 RADV_CMD_DIRTY_DYNAMIC_ALL;
3172 radv_mark_descriptor_sets_dirty(primary, VK_PIPELINE_BIND_POINT_GRAPHICS);
3173 radv_mark_descriptor_sets_dirty(primary, VK_PIPELINE_BIND_POINT_COMPUTE);
3174 }
3175
3176 VkResult radv_CreateCommandPool(
3177 VkDevice _device,
3178 const VkCommandPoolCreateInfo* pCreateInfo,
3179 const VkAllocationCallbacks* pAllocator,
3180 VkCommandPool* pCmdPool)
3181 {
3182 RADV_FROM_HANDLE(radv_device, device, _device);
3183 struct radv_cmd_pool *pool;
3184
3185 pool = vk_alloc2(&device->alloc, pAllocator, sizeof(*pool), 8,
3186 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3187 if (pool == NULL)
3188 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
3189
3190 if (pAllocator)
3191 pool->alloc = *pAllocator;
3192 else
3193 pool->alloc = device->alloc;
3194
3195 list_inithead(&pool->cmd_buffers);
3196 list_inithead(&pool->free_cmd_buffers);
3197
3198 pool->queue_family_index = pCreateInfo->queueFamilyIndex;
3199
3200 *pCmdPool = radv_cmd_pool_to_handle(pool);
3201
3202 return VK_SUCCESS;
3203
3204 }
3205
3206 void radv_DestroyCommandPool(
3207 VkDevice _device,
3208 VkCommandPool commandPool,
3209 const VkAllocationCallbacks* pAllocator)
3210 {
3211 RADV_FROM_HANDLE(radv_device, device, _device);
3212 RADV_FROM_HANDLE(radv_cmd_pool, pool, commandPool);
3213
3214 if (!pool)
3215 return;
3216
3217 list_for_each_entry_safe(struct radv_cmd_buffer, cmd_buffer,
3218 &pool->cmd_buffers, pool_link) {
3219 radv_cmd_buffer_destroy(cmd_buffer);
3220 }
3221
3222 list_for_each_entry_safe(struct radv_cmd_buffer, cmd_buffer,
3223 &pool->free_cmd_buffers, pool_link) {
3224 radv_cmd_buffer_destroy(cmd_buffer);
3225 }
3226
3227 vk_free2(&device->alloc, pAllocator, pool);
3228 }
3229
3230 VkResult radv_ResetCommandPool(
3231 VkDevice device,
3232 VkCommandPool commandPool,
3233 VkCommandPoolResetFlags flags)
3234 {
3235 RADV_FROM_HANDLE(radv_cmd_pool, pool, commandPool);
3236 VkResult result;
3237
3238 list_for_each_entry(struct radv_cmd_buffer, cmd_buffer,
3239 &pool->cmd_buffers, pool_link) {
3240 result = radv_reset_cmd_buffer(cmd_buffer);
3241 if (result != VK_SUCCESS)
3242 return result;
3243 }
3244
3245 return VK_SUCCESS;
3246 }
3247
3248 void radv_TrimCommandPool(
3249 VkDevice device,
3250 VkCommandPool commandPool,
3251 VkCommandPoolTrimFlagsKHR flags)
3252 {
3253 RADV_FROM_HANDLE(radv_cmd_pool, pool, commandPool);
3254
3255 if (!pool)
3256 return;
3257
3258 list_for_each_entry_safe(struct radv_cmd_buffer, cmd_buffer,
3259 &pool->free_cmd_buffers, pool_link) {
3260 radv_cmd_buffer_destroy(cmd_buffer);
3261 }
3262 }
3263
3264 void radv_CmdBeginRenderPass(
3265 VkCommandBuffer commandBuffer,
3266 const VkRenderPassBeginInfo* pRenderPassBegin,
3267 VkSubpassContents contents)
3268 {
3269 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3270 RADV_FROM_HANDLE(radv_render_pass, pass, pRenderPassBegin->renderPass);
3271 RADV_FROM_HANDLE(radv_framebuffer, framebuffer, pRenderPassBegin->framebuffer);
3272
3273 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
3274 cmd_buffer->cs, 2048);
3275 MAYBE_UNUSED VkResult result;
3276
3277 cmd_buffer->state.framebuffer = framebuffer;
3278 cmd_buffer->state.pass = pass;
3279 cmd_buffer->state.render_area = pRenderPassBegin->renderArea;
3280
3281 result = radv_cmd_state_setup_attachments(cmd_buffer, pass, pRenderPassBegin);
3282 if (result != VK_SUCCESS)
3283 return;
3284
3285 radv_cmd_buffer_set_subpass(cmd_buffer, pass->subpasses, true);
3286 assert(cmd_buffer->cs->cdw <= cdw_max);
3287
3288 radv_cmd_buffer_clear_subpass(cmd_buffer);
3289 }
3290
3291 void radv_CmdBeginRenderPass2KHR(
3292 VkCommandBuffer commandBuffer,
3293 const VkRenderPassBeginInfo* pRenderPassBeginInfo,
3294 const VkSubpassBeginInfoKHR* pSubpassBeginInfo)
3295 {
3296 radv_CmdBeginRenderPass(commandBuffer, pRenderPassBeginInfo,
3297 pSubpassBeginInfo->contents);
3298 }
3299
3300 void radv_CmdNextSubpass(
3301 VkCommandBuffer commandBuffer,
3302 VkSubpassContents contents)
3303 {
3304 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3305
3306 radv_cmd_buffer_resolve_subpass(cmd_buffer);
3307
3308 radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs,
3309 2048);
3310
3311 radv_cmd_buffer_set_subpass(cmd_buffer, cmd_buffer->state.subpass + 1, true);
3312 radv_cmd_buffer_clear_subpass(cmd_buffer);
3313 }
3314
3315 void radv_CmdNextSubpass2KHR(
3316 VkCommandBuffer commandBuffer,
3317 const VkSubpassBeginInfoKHR* pSubpassBeginInfo,
3318 const VkSubpassEndInfoKHR* pSubpassEndInfo)
3319 {
3320 radv_CmdNextSubpass(commandBuffer, pSubpassBeginInfo->contents);
3321 }
3322
3323 static void radv_emit_view_index(struct radv_cmd_buffer *cmd_buffer, unsigned index)
3324 {
3325 struct radv_pipeline *pipeline = cmd_buffer->state.pipeline;
3326 for (unsigned stage = 0; stage < MESA_SHADER_STAGES; ++stage) {
3327 if (!radv_get_shader(pipeline, stage))
3328 continue;
3329
3330 struct radv_userdata_info *loc = radv_lookup_user_sgpr(pipeline, stage, AC_UD_VIEW_INDEX);
3331 if (loc->sgpr_idx == -1)
3332 continue;
3333 uint32_t base_reg = pipeline->user_data_0[stage];
3334 radeon_set_sh_reg(cmd_buffer->cs, base_reg + loc->sgpr_idx * 4, index);
3335
3336 }
3337 if (pipeline->gs_copy_shader) {
3338 struct radv_userdata_info *loc = &pipeline->gs_copy_shader->info.user_sgprs_locs.shader_data[AC_UD_VIEW_INDEX];
3339 if (loc->sgpr_idx != -1) {
3340 uint32_t base_reg = R_00B130_SPI_SHADER_USER_DATA_VS_0;
3341 radeon_set_sh_reg(cmd_buffer->cs, base_reg + loc->sgpr_idx * 4, index);
3342 }
3343 }
3344 }
3345
3346 static void
3347 radv_cs_emit_draw_packet(struct radv_cmd_buffer *cmd_buffer,
3348 uint32_t vertex_count,
3349 bool use_opaque)
3350 {
3351 radeon_emit(cmd_buffer->cs, PKT3(PKT3_DRAW_INDEX_AUTO, 1, cmd_buffer->state.predicating));
3352 radeon_emit(cmd_buffer->cs, vertex_count);
3353 radeon_emit(cmd_buffer->cs, V_0287F0_DI_SRC_SEL_AUTO_INDEX |
3354 S_0287F0_USE_OPAQUE(use_opaque));
3355 }
3356
3357 static void
3358 radv_cs_emit_draw_indexed_packet(struct radv_cmd_buffer *cmd_buffer,
3359 uint64_t index_va,
3360 uint32_t index_count)
3361 {
3362 radeon_emit(cmd_buffer->cs, PKT3(PKT3_DRAW_INDEX_2, 4, cmd_buffer->state.predicating));
3363 radeon_emit(cmd_buffer->cs, cmd_buffer->state.max_index_count);
3364 radeon_emit(cmd_buffer->cs, index_va);
3365 radeon_emit(cmd_buffer->cs, index_va >> 32);
3366 radeon_emit(cmd_buffer->cs, index_count);
3367 radeon_emit(cmd_buffer->cs, V_0287F0_DI_SRC_SEL_DMA);
3368 }
3369
3370 static void
3371 radv_cs_emit_indirect_draw_packet(struct radv_cmd_buffer *cmd_buffer,
3372 bool indexed,
3373 uint32_t draw_count,
3374 uint64_t count_va,
3375 uint32_t stride)
3376 {
3377 struct radeon_cmdbuf *cs = cmd_buffer->cs;
3378 unsigned di_src_sel = indexed ? V_0287F0_DI_SRC_SEL_DMA
3379 : V_0287F0_DI_SRC_SEL_AUTO_INDEX;
3380 bool draw_id_enable = radv_get_shader(cmd_buffer->state.pipeline, MESA_SHADER_VERTEX)->info.info.vs.needs_draw_id;
3381 uint32_t base_reg = cmd_buffer->state.pipeline->graphics.vtx_base_sgpr;
3382 bool predicating = cmd_buffer->state.predicating;
3383 assert(base_reg);
3384
3385 /* just reset draw state for vertex data */
3386 cmd_buffer->state.last_first_instance = -1;
3387 cmd_buffer->state.last_num_instances = -1;
3388 cmd_buffer->state.last_vertex_offset = -1;
3389
3390 if (draw_count == 1 && !count_va && !draw_id_enable) {
3391 radeon_emit(cs, PKT3(indexed ? PKT3_DRAW_INDEX_INDIRECT :
3392 PKT3_DRAW_INDIRECT, 3, predicating));
3393 radeon_emit(cs, 0);
3394 radeon_emit(cs, (base_reg - SI_SH_REG_OFFSET) >> 2);
3395 radeon_emit(cs, ((base_reg + 4) - SI_SH_REG_OFFSET) >> 2);
3396 radeon_emit(cs, di_src_sel);
3397 } else {
3398 radeon_emit(cs, PKT3(indexed ? PKT3_DRAW_INDEX_INDIRECT_MULTI :
3399 PKT3_DRAW_INDIRECT_MULTI,
3400 8, predicating));
3401 radeon_emit(cs, 0);
3402 radeon_emit(cs, (base_reg - SI_SH_REG_OFFSET) >> 2);
3403 radeon_emit(cs, ((base_reg + 4) - SI_SH_REG_OFFSET) >> 2);
3404 radeon_emit(cs, (((base_reg + 8) - SI_SH_REG_OFFSET) >> 2) |
3405 S_2C3_DRAW_INDEX_ENABLE(draw_id_enable) |
3406 S_2C3_COUNT_INDIRECT_ENABLE(!!count_va));
3407 radeon_emit(cs, draw_count); /* count */
3408 radeon_emit(cs, count_va); /* count_addr */
3409 radeon_emit(cs, count_va >> 32);
3410 radeon_emit(cs, stride); /* stride */
3411 radeon_emit(cs, di_src_sel);
3412 }
3413 }
3414
3415 struct radv_draw_info {
3416 /**
3417 * Number of vertices.
3418 */
3419 uint32_t count;
3420
3421 /**
3422 * Index of the first vertex.
3423 */
3424 int32_t vertex_offset;
3425
3426 /**
3427 * First instance id.
3428 */
3429 uint32_t first_instance;
3430
3431 /**
3432 * Number of instances.
3433 */
3434 uint32_t instance_count;
3435
3436 /**
3437 * First index (indexed draws only).
3438 */
3439 uint32_t first_index;
3440
3441 /**
3442 * Whether it's an indexed draw.
3443 */
3444 bool indexed;
3445
3446 /**
3447 * Indirect draw parameters resource.
3448 */
3449 struct radv_buffer *indirect;
3450 uint64_t indirect_offset;
3451 uint32_t stride;
3452
3453 /**
3454 * Draw count parameters resource.
3455 */
3456 struct radv_buffer *count_buffer;
3457 uint64_t count_buffer_offset;
3458
3459 /**
3460 * Stream output parameters resource.
3461 */
3462 struct radv_buffer *strmout_buffer;
3463 uint64_t strmout_buffer_offset;
3464 };
3465
3466 static void
3467 radv_emit_draw_packets(struct radv_cmd_buffer *cmd_buffer,
3468 const struct radv_draw_info *info)
3469 {
3470 struct radv_cmd_state *state = &cmd_buffer->state;
3471 struct radeon_winsys *ws = cmd_buffer->device->ws;
3472 struct radeon_cmdbuf *cs = cmd_buffer->cs;
3473
3474 if (info->strmout_buffer) {
3475 uint64_t va = radv_buffer_get_va(info->strmout_buffer->bo);
3476
3477 va += info->strmout_buffer->offset +
3478 info->strmout_buffer_offset;
3479
3480 radeon_set_context_reg(cs, R_028B30_VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE,
3481 info->stride);
3482
3483 radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
3484 radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_SRC_MEM) |
3485 COPY_DATA_DST_SEL(COPY_DATA_REG) |
3486 COPY_DATA_WR_CONFIRM);
3487 radeon_emit(cs, va);
3488 radeon_emit(cs, va >> 32);
3489 radeon_emit(cs, R_028B2C_VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE >> 2);
3490 radeon_emit(cs, 0); /* unused */
3491
3492 radv_cs_add_buffer(ws, cs, info->strmout_buffer->bo);
3493 }
3494
3495 if (info->indirect) {
3496 uint64_t va = radv_buffer_get_va(info->indirect->bo);
3497 uint64_t count_va = 0;
3498
3499 va += info->indirect->offset + info->indirect_offset;
3500
3501 radv_cs_add_buffer(ws, cs, info->indirect->bo);
3502
3503 radeon_emit(cs, PKT3(PKT3_SET_BASE, 2, 0));
3504 radeon_emit(cs, 1);
3505 radeon_emit(cs, va);
3506 radeon_emit(cs, va >> 32);
3507
3508 if (info->count_buffer) {
3509 count_va = radv_buffer_get_va(info->count_buffer->bo);
3510 count_va += info->count_buffer->offset +
3511 info->count_buffer_offset;
3512
3513 radv_cs_add_buffer(ws, cs, info->count_buffer->bo);
3514 }
3515
3516 if (!state->subpass->view_mask) {
3517 radv_cs_emit_indirect_draw_packet(cmd_buffer,
3518 info->indexed,
3519 info->count,
3520 count_va,
3521 info->stride);
3522 } else {
3523 unsigned i;
3524 for_each_bit(i, state->subpass->view_mask) {
3525 radv_emit_view_index(cmd_buffer, i);
3526
3527 radv_cs_emit_indirect_draw_packet(cmd_buffer,
3528 info->indexed,
3529 info->count,
3530 count_va,
3531 info->stride);
3532 }
3533 }
3534 } else {
3535 assert(state->pipeline->graphics.vtx_base_sgpr);
3536
3537 if (info->vertex_offset != state->last_vertex_offset ||
3538 info->first_instance != state->last_first_instance) {
3539 radeon_set_sh_reg_seq(cs, state->pipeline->graphics.vtx_base_sgpr,
3540 state->pipeline->graphics.vtx_emit_num);
3541
3542 radeon_emit(cs, info->vertex_offset);
3543 radeon_emit(cs, info->first_instance);
3544 if (state->pipeline->graphics.vtx_emit_num == 3)
3545 radeon_emit(cs, 0);
3546 state->last_first_instance = info->first_instance;
3547 state->last_vertex_offset = info->vertex_offset;
3548 }
3549
3550 if (state->last_num_instances != info->instance_count) {
3551 radeon_emit(cs, PKT3(PKT3_NUM_INSTANCES, 0, false));
3552 radeon_emit(cs, info->instance_count);
3553 state->last_num_instances = info->instance_count;
3554 }
3555
3556 if (info->indexed) {
3557 int index_size = state->index_type ? 4 : 2;
3558 uint64_t index_va;
3559
3560 index_va = state->index_va;
3561 index_va += info->first_index * index_size;
3562
3563 if (!state->subpass->view_mask) {
3564 radv_cs_emit_draw_indexed_packet(cmd_buffer,
3565 index_va,
3566 info->count);
3567 } else {
3568 unsigned i;
3569 for_each_bit(i, state->subpass->view_mask) {
3570 radv_emit_view_index(cmd_buffer, i);
3571
3572 radv_cs_emit_draw_indexed_packet(cmd_buffer,
3573 index_va,
3574 info->count);
3575 }
3576 }
3577 } else {
3578 if (!state->subpass->view_mask) {
3579 radv_cs_emit_draw_packet(cmd_buffer,
3580 info->count,
3581 !!info->strmout_buffer);
3582 } else {
3583 unsigned i;
3584 for_each_bit(i, state->subpass->view_mask) {
3585 radv_emit_view_index(cmd_buffer, i);
3586
3587 radv_cs_emit_draw_packet(cmd_buffer,
3588 info->count,
3589 !!info->strmout_buffer);
3590 }
3591 }
3592 }
3593 }
3594 }
3595
3596 /*
3597 * Vega and raven have a bug which triggers if there are multiple context
3598 * register contexts active at the same time with different scissor values.
3599 *
3600 * There are two possible workarounds:
3601 * 1) Wait for PS_PARTIAL_FLUSH every time the scissor is changed. That way
3602 * there is only ever 1 active set of scissor values at the same time.
3603 *
3604 * 2) Whenever the hardware switches contexts we have to set the scissor
3605 * registers again even if it is a noop. That way the new context gets
3606 * the correct scissor values.
3607 *
3608 * This implements option 2. radv_need_late_scissor_emission needs to
3609 * return true on affected HW if radv_emit_all_graphics_states sets
3610 * any context registers.
3611 */
3612 static bool radv_need_late_scissor_emission(struct radv_cmd_buffer *cmd_buffer,
3613 bool indexed_draw)
3614 {
3615 struct radv_cmd_state *state = &cmd_buffer->state;
3616
3617 if (!cmd_buffer->device->physical_device->has_scissor_bug)
3618 return false;
3619
3620 uint32_t used_states = cmd_buffer->state.pipeline->graphics.needed_dynamic_state | ~RADV_CMD_DIRTY_DYNAMIC_ALL;
3621
3622 /* Index, vertex and streamout buffers don't change context regs, and
3623 * pipeline is handled later.
3624 */
3625 used_states &= ~(RADV_CMD_DIRTY_INDEX_BUFFER |
3626 RADV_CMD_DIRTY_VERTEX_BUFFER |
3627 RADV_CMD_DIRTY_STREAMOUT_BUFFER |
3628 RADV_CMD_DIRTY_PIPELINE);
3629
3630 /* Assume all state changes except these two can imply context rolls. */
3631 if (cmd_buffer->state.dirty & used_states)
3632 return true;
3633
3634 if (cmd_buffer->state.emitted_pipeline != cmd_buffer->state.pipeline)
3635 return true;
3636
3637 if (indexed_draw && state->pipeline->graphics.prim_restart_enable &&
3638 (state->index_type ? 0xffffffffu : 0xffffu) != state->last_primitive_reset_index)
3639 return true;
3640
3641 return false;
3642 }
3643
3644 static void
3645 radv_emit_all_graphics_states(struct radv_cmd_buffer *cmd_buffer,
3646 const struct radv_draw_info *info)
3647 {
3648 bool late_scissor_emission = radv_need_late_scissor_emission(cmd_buffer, info->indexed);
3649
3650 if ((cmd_buffer->state.dirty & RADV_CMD_DIRTY_FRAMEBUFFER) ||
3651 cmd_buffer->state.emitted_pipeline != cmd_buffer->state.pipeline)
3652 radv_emit_rbplus_state(cmd_buffer);
3653
3654 if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_PIPELINE)
3655 radv_emit_graphics_pipeline(cmd_buffer);
3656
3657 if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_FRAMEBUFFER)
3658 radv_emit_framebuffer_state(cmd_buffer);
3659
3660 if (info->indexed) {
3661 if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_INDEX_BUFFER)
3662 radv_emit_index_buffer(cmd_buffer);
3663 } else {
3664 /* On CI and later, non-indexed draws overwrite VGT_INDEX_TYPE,
3665 * so the state must be re-emitted before the next indexed
3666 * draw.
3667 */
3668 if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) {
3669 cmd_buffer->state.last_index_type = -1;
3670 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_INDEX_BUFFER;
3671 }
3672 }
3673
3674 radv_cmd_buffer_flush_dynamic_state(cmd_buffer);
3675
3676 radv_emit_draw_registers(cmd_buffer, info->indexed,
3677 info->instance_count > 1, info->indirect,
3678 info->indirect ? 0 : info->count);
3679
3680 if (late_scissor_emission)
3681 radv_emit_scissor(cmd_buffer);
3682 }
3683
3684 static void
3685 radv_draw(struct radv_cmd_buffer *cmd_buffer,
3686 const struct radv_draw_info *info)
3687 {
3688 struct radeon_info *rad_info =
3689 &cmd_buffer->device->physical_device->rad_info;
3690 bool has_prefetch =
3691 cmd_buffer->device->physical_device->rad_info.chip_class >= CIK;
3692 bool pipeline_is_dirty =
3693 (cmd_buffer->state.dirty & RADV_CMD_DIRTY_PIPELINE) &&
3694 cmd_buffer->state.pipeline != cmd_buffer->state.emitted_pipeline;
3695
3696 MAYBE_UNUSED unsigned cdw_max =
3697 radeon_check_space(cmd_buffer->device->ws,
3698 cmd_buffer->cs, 4096);
3699
3700 /* Use optimal packet order based on whether we need to sync the
3701 * pipeline.
3702 */
3703 if (cmd_buffer->state.flush_bits & (RADV_CMD_FLAG_FLUSH_AND_INV_CB |
3704 RADV_CMD_FLAG_FLUSH_AND_INV_DB |
3705 RADV_CMD_FLAG_PS_PARTIAL_FLUSH |
3706 RADV_CMD_FLAG_CS_PARTIAL_FLUSH)) {
3707 /* If we have to wait for idle, set all states first, so that
3708 * all SET packets are processed in parallel with previous draw
3709 * calls. Then upload descriptors, set shader pointers, and
3710 * draw, and prefetch at the end. This ensures that the time
3711 * the CUs are idle is very short. (there are only SET_SH
3712 * packets between the wait and the draw)
3713 */
3714 radv_emit_all_graphics_states(cmd_buffer, info);
3715 si_emit_cache_flush(cmd_buffer);
3716 /* <-- CUs are idle here --> */
3717
3718 radv_upload_graphics_shader_descriptors(cmd_buffer, pipeline_is_dirty);
3719
3720 radv_emit_draw_packets(cmd_buffer, info);
3721 /* <-- CUs are busy here --> */
3722
3723 /* Start prefetches after the draw has been started. Both will
3724 * run in parallel, but starting the draw first is more
3725 * important.
3726 */
3727 if (has_prefetch && cmd_buffer->state.prefetch_L2_mask) {
3728 radv_emit_prefetch_L2(cmd_buffer,
3729 cmd_buffer->state.pipeline, false);
3730 }
3731 } else {
3732 /* If we don't wait for idle, start prefetches first, then set
3733 * states, and draw at the end.
3734 */
3735 si_emit_cache_flush(cmd_buffer);
3736
3737 if (has_prefetch && cmd_buffer->state.prefetch_L2_mask) {
3738 /* Only prefetch the vertex shader and VBO descriptors
3739 * in order to start the draw as soon as possible.
3740 */
3741 radv_emit_prefetch_L2(cmd_buffer,
3742 cmd_buffer->state.pipeline, true);
3743 }
3744
3745 radv_upload_graphics_shader_descriptors(cmd_buffer, pipeline_is_dirty);
3746
3747 radv_emit_all_graphics_states(cmd_buffer, info);
3748 radv_emit_draw_packets(cmd_buffer, info);
3749
3750 /* Prefetch the remaining shaders after the draw has been
3751 * started.
3752 */
3753 if (has_prefetch && cmd_buffer->state.prefetch_L2_mask) {
3754 radv_emit_prefetch_L2(cmd_buffer,
3755 cmd_buffer->state.pipeline, false);
3756 }
3757 }
3758
3759 /* Workaround for a VGT hang when streamout is enabled.
3760 * It must be done after drawing.
3761 */
3762 if (cmd_buffer->state.streamout.streamout_enabled &&
3763 (rad_info->family == CHIP_HAWAII ||
3764 rad_info->family == CHIP_TONGA ||
3765 rad_info->family == CHIP_FIJI)) {
3766 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_VGT_STREAMOUT_SYNC;
3767 }
3768
3769 assert(cmd_buffer->cs->cdw <= cdw_max);
3770 radv_cmd_buffer_after_draw(cmd_buffer, RADV_CMD_FLAG_PS_PARTIAL_FLUSH);
3771 }
3772
3773 void radv_CmdDraw(
3774 VkCommandBuffer commandBuffer,
3775 uint32_t vertexCount,
3776 uint32_t instanceCount,
3777 uint32_t firstVertex,
3778 uint32_t firstInstance)
3779 {
3780 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3781 struct radv_draw_info info = {};
3782
3783 info.count = vertexCount;
3784 info.instance_count = instanceCount;
3785 info.first_instance = firstInstance;
3786 info.vertex_offset = firstVertex;
3787
3788 radv_draw(cmd_buffer, &info);
3789 }
3790
3791 void radv_CmdDrawIndexed(
3792 VkCommandBuffer commandBuffer,
3793 uint32_t indexCount,
3794 uint32_t instanceCount,
3795 uint32_t firstIndex,
3796 int32_t vertexOffset,
3797 uint32_t firstInstance)
3798 {
3799 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3800 struct radv_draw_info info = {};
3801
3802 info.indexed = true;
3803 info.count = indexCount;
3804 info.instance_count = instanceCount;
3805 info.first_index = firstIndex;
3806 info.vertex_offset = vertexOffset;
3807 info.first_instance = firstInstance;
3808
3809 radv_draw(cmd_buffer, &info);
3810 }
3811
3812 void radv_CmdDrawIndirect(
3813 VkCommandBuffer commandBuffer,
3814 VkBuffer _buffer,
3815 VkDeviceSize offset,
3816 uint32_t drawCount,
3817 uint32_t stride)
3818 {
3819 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3820 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
3821 struct radv_draw_info info = {};
3822
3823 info.count = drawCount;
3824 info.indirect = buffer;
3825 info.indirect_offset = offset;
3826 info.stride = stride;
3827
3828 radv_draw(cmd_buffer, &info);
3829 }
3830
3831 void radv_CmdDrawIndexedIndirect(
3832 VkCommandBuffer commandBuffer,
3833 VkBuffer _buffer,
3834 VkDeviceSize offset,
3835 uint32_t drawCount,
3836 uint32_t stride)
3837 {
3838 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3839 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
3840 struct radv_draw_info info = {};
3841
3842 info.indexed = true;
3843 info.count = drawCount;
3844 info.indirect = buffer;
3845 info.indirect_offset = offset;
3846 info.stride = stride;
3847
3848 radv_draw(cmd_buffer, &info);
3849 }
3850
3851 void radv_CmdDrawIndirectCountAMD(
3852 VkCommandBuffer commandBuffer,
3853 VkBuffer _buffer,
3854 VkDeviceSize offset,
3855 VkBuffer _countBuffer,
3856 VkDeviceSize countBufferOffset,
3857 uint32_t maxDrawCount,
3858 uint32_t stride)
3859 {
3860 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3861 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
3862 RADV_FROM_HANDLE(radv_buffer, count_buffer, _countBuffer);
3863 struct radv_draw_info info = {};
3864
3865 info.count = maxDrawCount;
3866 info.indirect = buffer;
3867 info.indirect_offset = offset;
3868 info.count_buffer = count_buffer;
3869 info.count_buffer_offset = countBufferOffset;
3870 info.stride = stride;
3871
3872 radv_draw(cmd_buffer, &info);
3873 }
3874
3875 void radv_CmdDrawIndexedIndirectCountAMD(
3876 VkCommandBuffer commandBuffer,
3877 VkBuffer _buffer,
3878 VkDeviceSize offset,
3879 VkBuffer _countBuffer,
3880 VkDeviceSize countBufferOffset,
3881 uint32_t maxDrawCount,
3882 uint32_t stride)
3883 {
3884 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3885 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
3886 RADV_FROM_HANDLE(radv_buffer, count_buffer, _countBuffer);
3887 struct radv_draw_info info = {};
3888
3889 info.indexed = true;
3890 info.count = maxDrawCount;
3891 info.indirect = buffer;
3892 info.indirect_offset = offset;
3893 info.count_buffer = count_buffer;
3894 info.count_buffer_offset = countBufferOffset;
3895 info.stride = stride;
3896
3897 radv_draw(cmd_buffer, &info);
3898 }
3899
3900 void radv_CmdDrawIndirectCountKHR(
3901 VkCommandBuffer commandBuffer,
3902 VkBuffer _buffer,
3903 VkDeviceSize offset,
3904 VkBuffer _countBuffer,
3905 VkDeviceSize countBufferOffset,
3906 uint32_t maxDrawCount,
3907 uint32_t stride)
3908 {
3909 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3910 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
3911 RADV_FROM_HANDLE(radv_buffer, count_buffer, _countBuffer);
3912 struct radv_draw_info info = {};
3913
3914 info.count = maxDrawCount;
3915 info.indirect = buffer;
3916 info.indirect_offset = offset;
3917 info.count_buffer = count_buffer;
3918 info.count_buffer_offset = countBufferOffset;
3919 info.stride = stride;
3920
3921 radv_draw(cmd_buffer, &info);
3922 }
3923
3924 void radv_CmdDrawIndexedIndirectCountKHR(
3925 VkCommandBuffer commandBuffer,
3926 VkBuffer _buffer,
3927 VkDeviceSize offset,
3928 VkBuffer _countBuffer,
3929 VkDeviceSize countBufferOffset,
3930 uint32_t maxDrawCount,
3931 uint32_t stride)
3932 {
3933 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3934 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
3935 RADV_FROM_HANDLE(radv_buffer, count_buffer, _countBuffer);
3936 struct radv_draw_info info = {};
3937
3938 info.indexed = true;
3939 info.count = maxDrawCount;
3940 info.indirect = buffer;
3941 info.indirect_offset = offset;
3942 info.count_buffer = count_buffer;
3943 info.count_buffer_offset = countBufferOffset;
3944 info.stride = stride;
3945
3946 radv_draw(cmd_buffer, &info);
3947 }
3948
3949 struct radv_dispatch_info {
3950 /**
3951 * Determine the layout of the grid (in block units) to be used.
3952 */
3953 uint32_t blocks[3];
3954
3955 /**
3956 * A starting offset for the grid. If unaligned is set, the offset
3957 * must still be aligned.
3958 */
3959 uint32_t offsets[3];
3960 /**
3961 * Whether it's an unaligned compute dispatch.
3962 */
3963 bool unaligned;
3964
3965 /**
3966 * Indirect compute parameters resource.
3967 */
3968 struct radv_buffer *indirect;
3969 uint64_t indirect_offset;
3970 };
3971
3972 static void
3973 radv_emit_dispatch_packets(struct radv_cmd_buffer *cmd_buffer,
3974 const struct radv_dispatch_info *info)
3975 {
3976 struct radv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
3977 struct radv_shader_variant *compute_shader = pipeline->shaders[MESA_SHADER_COMPUTE];
3978 unsigned dispatch_initiator = cmd_buffer->device->dispatch_initiator;
3979 struct radeon_winsys *ws = cmd_buffer->device->ws;
3980 bool predicating = cmd_buffer->state.predicating;
3981 struct radeon_cmdbuf *cs = cmd_buffer->cs;
3982 struct radv_userdata_info *loc;
3983
3984 loc = radv_lookup_user_sgpr(pipeline, MESA_SHADER_COMPUTE,
3985 AC_UD_CS_GRID_SIZE);
3986
3987 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(ws, cs, 25);
3988
3989 if (info->indirect) {
3990 uint64_t va = radv_buffer_get_va(info->indirect->bo);
3991
3992 va += info->indirect->offset + info->indirect_offset;
3993
3994 radv_cs_add_buffer(ws, cs, info->indirect->bo);
3995
3996 if (loc->sgpr_idx != -1) {
3997 for (unsigned i = 0; i < 3; ++i) {
3998 radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
3999 radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_SRC_MEM) |
4000 COPY_DATA_DST_SEL(COPY_DATA_REG));
4001 radeon_emit(cs, (va + 4 * i));
4002 radeon_emit(cs, (va + 4 * i) >> 32);
4003 radeon_emit(cs, ((R_00B900_COMPUTE_USER_DATA_0
4004 + loc->sgpr_idx * 4) >> 2) + i);
4005 radeon_emit(cs, 0);
4006 }
4007 }
4008
4009 if (radv_cmd_buffer_uses_mec(cmd_buffer)) {
4010 radeon_emit(cs, PKT3(PKT3_DISPATCH_INDIRECT, 2, predicating) |
4011 PKT3_SHADER_TYPE_S(1));
4012 radeon_emit(cs, va);
4013 radeon_emit(cs, va >> 32);
4014 radeon_emit(cs, dispatch_initiator);
4015 } else {
4016 radeon_emit(cs, PKT3(PKT3_SET_BASE, 2, 0) |
4017 PKT3_SHADER_TYPE_S(1));
4018 radeon_emit(cs, 1);
4019 radeon_emit(cs, va);
4020 radeon_emit(cs, va >> 32);
4021
4022 radeon_emit(cs, PKT3(PKT3_DISPATCH_INDIRECT, 1, predicating) |
4023 PKT3_SHADER_TYPE_S(1));
4024 radeon_emit(cs, 0);
4025 radeon_emit(cs, dispatch_initiator);
4026 }
4027 } else {
4028 unsigned blocks[3] = { info->blocks[0], info->blocks[1], info->blocks[2] };
4029 unsigned offsets[3] = { info->offsets[0], info->offsets[1], info->offsets[2] };
4030
4031 if (info->unaligned) {
4032 unsigned *cs_block_size = compute_shader->info.cs.block_size;
4033 unsigned remainder[3];
4034
4035 /* If aligned, these should be an entire block size,
4036 * not 0.
4037 */
4038 remainder[0] = blocks[0] + cs_block_size[0] -
4039 align_u32_npot(blocks[0], cs_block_size[0]);
4040 remainder[1] = blocks[1] + cs_block_size[1] -
4041 align_u32_npot(blocks[1], cs_block_size[1]);
4042 remainder[2] = blocks[2] + cs_block_size[2] -
4043 align_u32_npot(blocks[2], cs_block_size[2]);
4044
4045 blocks[0] = round_up_u32(blocks[0], cs_block_size[0]);
4046 blocks[1] = round_up_u32(blocks[1], cs_block_size[1]);
4047 blocks[2] = round_up_u32(blocks[2], cs_block_size[2]);
4048
4049 for(unsigned i = 0; i < 3; ++i) {
4050 assert(offsets[i] % cs_block_size[i] == 0);
4051 offsets[i] /= cs_block_size[i];
4052 }
4053
4054 radeon_set_sh_reg_seq(cs, R_00B81C_COMPUTE_NUM_THREAD_X, 3);
4055 radeon_emit(cs,
4056 S_00B81C_NUM_THREAD_FULL(cs_block_size[0]) |
4057 S_00B81C_NUM_THREAD_PARTIAL(remainder[0]));
4058 radeon_emit(cs,
4059 S_00B81C_NUM_THREAD_FULL(cs_block_size[1]) |
4060 S_00B81C_NUM_THREAD_PARTIAL(remainder[1]));
4061 radeon_emit(cs,
4062 S_00B81C_NUM_THREAD_FULL(cs_block_size[2]) |
4063 S_00B81C_NUM_THREAD_PARTIAL(remainder[2]));
4064
4065 dispatch_initiator |= S_00B800_PARTIAL_TG_EN(1);
4066 }
4067
4068 if (loc->sgpr_idx != -1) {
4069 assert(!loc->indirect);
4070 assert(loc->num_sgprs == 3);
4071
4072 radeon_set_sh_reg_seq(cs, R_00B900_COMPUTE_USER_DATA_0 +
4073 loc->sgpr_idx * 4, 3);
4074 radeon_emit(cs, blocks[0]);
4075 radeon_emit(cs, blocks[1]);
4076 radeon_emit(cs, blocks[2]);
4077 }
4078
4079 if (offsets[0] || offsets[1] || offsets[2]) {
4080 radeon_set_sh_reg_seq(cs, R_00B810_COMPUTE_START_X, 3);
4081 radeon_emit(cs, offsets[0]);
4082 radeon_emit(cs, offsets[1]);
4083 radeon_emit(cs, offsets[2]);
4084
4085 /* The blocks in the packet are not counts but end values. */
4086 for (unsigned i = 0; i < 3; ++i)
4087 blocks[i] += offsets[i];
4088 } else {
4089 dispatch_initiator |= S_00B800_FORCE_START_AT_000(1);
4090 }
4091
4092 radeon_emit(cs, PKT3(PKT3_DISPATCH_DIRECT, 3, predicating) |
4093 PKT3_SHADER_TYPE_S(1));
4094 radeon_emit(cs, blocks[0]);
4095 radeon_emit(cs, blocks[1]);
4096 radeon_emit(cs, blocks[2]);
4097 radeon_emit(cs, dispatch_initiator);
4098 }
4099
4100 assert(cmd_buffer->cs->cdw <= cdw_max);
4101 }
4102
4103 static void
4104 radv_upload_compute_shader_descriptors(struct radv_cmd_buffer *cmd_buffer)
4105 {
4106 radv_flush_descriptors(cmd_buffer, VK_SHADER_STAGE_COMPUTE_BIT);
4107 radv_flush_constants(cmd_buffer, VK_SHADER_STAGE_COMPUTE_BIT);
4108 }
4109
4110 static void
4111 radv_dispatch(struct radv_cmd_buffer *cmd_buffer,
4112 const struct radv_dispatch_info *info)
4113 {
4114 struct radv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
4115 bool has_prefetch =
4116 cmd_buffer->device->physical_device->rad_info.chip_class >= CIK;
4117 bool pipeline_is_dirty = pipeline &&
4118 pipeline != cmd_buffer->state.emitted_compute_pipeline;
4119
4120 if (cmd_buffer->state.flush_bits & (RADV_CMD_FLAG_FLUSH_AND_INV_CB |
4121 RADV_CMD_FLAG_FLUSH_AND_INV_DB |
4122 RADV_CMD_FLAG_PS_PARTIAL_FLUSH |
4123 RADV_CMD_FLAG_CS_PARTIAL_FLUSH)) {
4124 /* If we have to wait for idle, set all states first, so that
4125 * all SET packets are processed in parallel with previous draw
4126 * calls. Then upload descriptors, set shader pointers, and
4127 * dispatch, and prefetch at the end. This ensures that the
4128 * time the CUs are idle is very short. (there are only SET_SH
4129 * packets between the wait and the draw)
4130 */
4131 radv_emit_compute_pipeline(cmd_buffer);
4132 si_emit_cache_flush(cmd_buffer);
4133 /* <-- CUs are idle here --> */
4134
4135 radv_upload_compute_shader_descriptors(cmd_buffer);
4136
4137 radv_emit_dispatch_packets(cmd_buffer, info);
4138 /* <-- CUs are busy here --> */
4139
4140 /* Start prefetches after the dispatch has been started. Both
4141 * will run in parallel, but starting the dispatch first is
4142 * more important.
4143 */
4144 if (has_prefetch && pipeline_is_dirty) {
4145 radv_emit_shader_prefetch(cmd_buffer,
4146 pipeline->shaders[MESA_SHADER_COMPUTE]);
4147 }
4148 } else {
4149 /* If we don't wait for idle, start prefetches first, then set
4150 * states, and dispatch at the end.
4151 */
4152 si_emit_cache_flush(cmd_buffer);
4153
4154 if (has_prefetch && pipeline_is_dirty) {
4155 radv_emit_shader_prefetch(cmd_buffer,
4156 pipeline->shaders[MESA_SHADER_COMPUTE]);
4157 }
4158
4159 radv_upload_compute_shader_descriptors(cmd_buffer);
4160
4161 radv_emit_compute_pipeline(cmd_buffer);
4162 radv_emit_dispatch_packets(cmd_buffer, info);
4163 }
4164
4165 radv_cmd_buffer_after_draw(cmd_buffer, RADV_CMD_FLAG_CS_PARTIAL_FLUSH);
4166 }
4167
4168 void radv_CmdDispatchBase(
4169 VkCommandBuffer commandBuffer,
4170 uint32_t base_x,
4171 uint32_t base_y,
4172 uint32_t base_z,
4173 uint32_t x,
4174 uint32_t y,
4175 uint32_t z)
4176 {
4177 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
4178 struct radv_dispatch_info info = {};
4179
4180 info.blocks[0] = x;
4181 info.blocks[1] = y;
4182 info.blocks[2] = z;
4183
4184 info.offsets[0] = base_x;
4185 info.offsets[1] = base_y;
4186 info.offsets[2] = base_z;
4187 radv_dispatch(cmd_buffer, &info);
4188 }
4189
4190 void radv_CmdDispatch(
4191 VkCommandBuffer commandBuffer,
4192 uint32_t x,
4193 uint32_t y,
4194 uint32_t z)
4195 {
4196 radv_CmdDispatchBase(commandBuffer, 0, 0, 0, x, y, z);
4197 }
4198
4199 void radv_CmdDispatchIndirect(
4200 VkCommandBuffer commandBuffer,
4201 VkBuffer _buffer,
4202 VkDeviceSize offset)
4203 {
4204 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
4205 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
4206 struct radv_dispatch_info info = {};
4207
4208 info.indirect = buffer;
4209 info.indirect_offset = offset;
4210
4211 radv_dispatch(cmd_buffer, &info);
4212 }
4213
4214 void radv_unaligned_dispatch(
4215 struct radv_cmd_buffer *cmd_buffer,
4216 uint32_t x,
4217 uint32_t y,
4218 uint32_t z)
4219 {
4220 struct radv_dispatch_info info = {};
4221
4222 info.blocks[0] = x;
4223 info.blocks[1] = y;
4224 info.blocks[2] = z;
4225 info.unaligned = 1;
4226
4227 radv_dispatch(cmd_buffer, &info);
4228 }
4229
4230 void radv_CmdEndRenderPass(
4231 VkCommandBuffer commandBuffer)
4232 {
4233 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
4234
4235 radv_subpass_barrier(cmd_buffer, &cmd_buffer->state.pass->end_barrier);
4236
4237 radv_cmd_buffer_resolve_subpass(cmd_buffer);
4238
4239 for (unsigned i = 0; i < cmd_buffer->state.framebuffer->attachment_count; ++i) {
4240 VkImageLayout layout = cmd_buffer->state.pass->attachments[i].final_layout;
4241 radv_handle_subpass_image_transition(cmd_buffer,
4242 (struct radv_subpass_attachment){i, layout});
4243 }
4244
4245 vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.attachments);
4246
4247 cmd_buffer->state.pass = NULL;
4248 cmd_buffer->state.subpass = NULL;
4249 cmd_buffer->state.attachments = NULL;
4250 cmd_buffer->state.framebuffer = NULL;
4251 }
4252
4253 void radv_CmdEndRenderPass2KHR(
4254 VkCommandBuffer commandBuffer,
4255 const VkSubpassEndInfoKHR* pSubpassEndInfo)
4256 {
4257 radv_CmdEndRenderPass(commandBuffer);
4258 }
4259
4260 /*
4261 * For HTILE we have the following interesting clear words:
4262 * 0xfffff30f: Uncompressed, full depth range, for depth+stencil HTILE
4263 * 0xfffc000f: Uncompressed, full depth range, for depth only HTILE.
4264 * 0xfffffff0: Clear depth to 1.0
4265 * 0x00000000: Clear depth to 0.0
4266 */
4267 static void radv_initialize_htile(struct radv_cmd_buffer *cmd_buffer,
4268 struct radv_image *image,
4269 const VkImageSubresourceRange *range,
4270 uint32_t clear_word)
4271 {
4272 assert(range->baseMipLevel == 0);
4273 assert(range->levelCount == 1 || range->levelCount == VK_REMAINING_ARRAY_LAYERS);
4274 unsigned layer_count = radv_get_layerCount(image, range);
4275 uint64_t size = image->surface.htile_slice_size * layer_count;
4276 VkImageAspectFlags aspects = VK_IMAGE_ASPECT_DEPTH_BIT;
4277 uint64_t offset = image->offset + image->htile_offset +
4278 image->surface.htile_slice_size * range->baseArrayLayer;
4279 struct radv_cmd_state *state = &cmd_buffer->state;
4280 VkClearDepthStencilValue value = {};
4281
4282 state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB |
4283 RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
4284
4285 state->flush_bits |= radv_fill_buffer(cmd_buffer, image->bo, offset,
4286 size, clear_word);
4287
4288 state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
4289
4290 if (vk_format_is_stencil(image->vk_format))
4291 aspects |= VK_IMAGE_ASPECT_STENCIL_BIT;
4292
4293 radv_set_ds_clear_metadata(cmd_buffer, image, value, aspects);
4294
4295 if (radv_image_is_tc_compat_htile(image)) {
4296 /* Initialize the TC-compat metada value to 0 because by
4297 * default DB_Z_INFO.RANGE_PRECISION is set to 1, and we only
4298 * need have to conditionally update its value when performing
4299 * a fast depth clear.
4300 */
4301 radv_set_tc_compat_zrange_metadata(cmd_buffer, image, 0);
4302 }
4303 }
4304
4305 static void radv_handle_depth_image_transition(struct radv_cmd_buffer *cmd_buffer,
4306 struct radv_image *image,
4307 VkImageLayout src_layout,
4308 VkImageLayout dst_layout,
4309 unsigned src_queue_mask,
4310 unsigned dst_queue_mask,
4311 const VkImageSubresourceRange *range)
4312 {
4313 if (!radv_image_has_htile(image))
4314 return;
4315
4316 if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED &&
4317 radv_layout_has_htile(image, dst_layout, dst_queue_mask)) {
4318 /* TODO: merge with the clear if applicable */
4319 radv_initialize_htile(cmd_buffer, image, range, 0);
4320 } else if (!radv_layout_is_htile_compressed(image, src_layout, src_queue_mask) &&
4321 radv_layout_is_htile_compressed(image, dst_layout, dst_queue_mask)) {
4322 uint32_t clear_value = vk_format_is_stencil(image->vk_format) ? 0xfffff30f : 0xfffc000f;
4323 radv_initialize_htile(cmd_buffer, image, range, clear_value);
4324 } else if (radv_layout_is_htile_compressed(image, src_layout, src_queue_mask) &&
4325 !radv_layout_is_htile_compressed(image, dst_layout, dst_queue_mask)) {
4326 VkImageSubresourceRange local_range = *range;
4327 local_range.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
4328 local_range.baseMipLevel = 0;
4329 local_range.levelCount = 1;
4330
4331 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB |
4332 RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
4333
4334 radv_decompress_depth_image_inplace(cmd_buffer, image, &local_range);
4335
4336 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB |
4337 RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
4338 }
4339 }
4340
4341 static void radv_initialise_cmask(struct radv_cmd_buffer *cmd_buffer,
4342 struct radv_image *image, uint32_t value)
4343 {
4344 struct radv_cmd_state *state = &cmd_buffer->state;
4345
4346 state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
4347 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
4348
4349 state->flush_bits |= radv_clear_cmask(cmd_buffer, image, value);
4350
4351 state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
4352 }
4353
4354 void radv_initialize_fmask(struct radv_cmd_buffer *cmd_buffer,
4355 struct radv_image *image)
4356 {
4357 struct radv_cmd_state *state = &cmd_buffer->state;
4358 static const uint32_t fmask_clear_values[4] = {
4359 0x00000000,
4360 0x02020202,
4361 0xE4E4E4E4,
4362 0x76543210
4363 };
4364 uint32_t log2_samples = util_logbase2(image->info.samples);
4365 uint32_t value = fmask_clear_values[log2_samples];
4366
4367 state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
4368 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
4369
4370 state->flush_bits |= radv_clear_fmask(cmd_buffer, image, value);
4371
4372 state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
4373 }
4374
4375 void radv_initialize_dcc(struct radv_cmd_buffer *cmd_buffer,
4376 struct radv_image *image, uint32_t value)
4377 {
4378 struct radv_cmd_state *state = &cmd_buffer->state;
4379
4380 state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
4381 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
4382
4383 state->flush_bits |= radv_clear_dcc(cmd_buffer, image, value);
4384
4385 state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
4386 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
4387 }
4388
4389 /**
4390 * Initialize DCC/FMASK/CMASK metadata for a color image.
4391 */
4392 static void radv_init_color_image_metadata(struct radv_cmd_buffer *cmd_buffer,
4393 struct radv_image *image,
4394 VkImageLayout src_layout,
4395 VkImageLayout dst_layout,
4396 unsigned src_queue_mask,
4397 unsigned dst_queue_mask)
4398 {
4399 if (radv_image_has_cmask(image)) {
4400 uint32_t value = 0xffffffffu; /* Fully expanded mode. */
4401
4402 /* TODO: clarify this. */
4403 if (radv_image_has_fmask(image)) {
4404 value = 0xccccccccu;
4405 }
4406
4407 radv_initialise_cmask(cmd_buffer, image, value);
4408 }
4409
4410 if (radv_image_has_fmask(image)) {
4411 radv_initialize_fmask(cmd_buffer, image);
4412 }
4413
4414 if (radv_image_has_dcc(image)) {
4415 uint32_t value = 0xffffffffu; /* Fully expanded mode. */
4416 bool need_decompress_pass = false;
4417
4418 if (radv_layout_dcc_compressed(image, dst_layout,
4419 dst_queue_mask)) {
4420 value = 0x20202020u;
4421 need_decompress_pass = true;
4422 }
4423
4424 radv_initialize_dcc(cmd_buffer, image, value);
4425
4426 radv_update_fce_metadata(cmd_buffer, image,
4427 need_decompress_pass);
4428 }
4429
4430 if (radv_image_has_cmask(image) || radv_image_has_dcc(image)) {
4431 uint32_t color_values[2] = {};
4432 radv_set_color_clear_metadata(cmd_buffer, image, color_values);
4433 }
4434 }
4435
4436 /**
4437 * Handle color image transitions for DCC/FMASK/CMASK.
4438 */
4439 static void radv_handle_color_image_transition(struct radv_cmd_buffer *cmd_buffer,
4440 struct radv_image *image,
4441 VkImageLayout src_layout,
4442 VkImageLayout dst_layout,
4443 unsigned src_queue_mask,
4444 unsigned dst_queue_mask,
4445 const VkImageSubresourceRange *range)
4446 {
4447 if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED) {
4448 radv_init_color_image_metadata(cmd_buffer, image,
4449 src_layout, dst_layout,
4450 src_queue_mask, dst_queue_mask);
4451 return;
4452 }
4453
4454 if (radv_image_has_dcc(image)) {
4455 if (src_layout == VK_IMAGE_LAYOUT_PREINITIALIZED) {
4456 radv_initialize_dcc(cmd_buffer, image, 0xffffffffu);
4457 } else if (radv_layout_dcc_compressed(image, src_layout, src_queue_mask) &&
4458 !radv_layout_dcc_compressed(image, dst_layout, dst_queue_mask)) {
4459 radv_decompress_dcc(cmd_buffer, image, range);
4460 } else if (radv_layout_can_fast_clear(image, src_layout, src_queue_mask) &&
4461 !radv_layout_can_fast_clear(image, dst_layout, dst_queue_mask)) {
4462 radv_fast_clear_flush_image_inplace(cmd_buffer, image, range);
4463 }
4464 } else if (radv_image_has_cmask(image) || radv_image_has_fmask(image)) {
4465 if (radv_layout_can_fast_clear(image, src_layout, src_queue_mask) &&
4466 !radv_layout_can_fast_clear(image, dst_layout, dst_queue_mask)) {
4467 radv_fast_clear_flush_image_inplace(cmd_buffer, image, range);
4468 }
4469 }
4470 }
4471
4472 static void radv_handle_image_transition(struct radv_cmd_buffer *cmd_buffer,
4473 struct radv_image *image,
4474 VkImageLayout src_layout,
4475 VkImageLayout dst_layout,
4476 uint32_t src_family,
4477 uint32_t dst_family,
4478 const VkImageSubresourceRange *range)
4479 {
4480 if (image->exclusive && src_family != dst_family) {
4481 /* This is an acquire or a release operation and there will be
4482 * a corresponding release/acquire. Do the transition in the
4483 * most flexible queue. */
4484
4485 assert(src_family == cmd_buffer->queue_family_index ||
4486 dst_family == cmd_buffer->queue_family_index);
4487
4488 if (cmd_buffer->queue_family_index == RADV_QUEUE_TRANSFER)
4489 return;
4490
4491 if (cmd_buffer->queue_family_index == RADV_QUEUE_COMPUTE &&
4492 (src_family == RADV_QUEUE_GENERAL ||
4493 dst_family == RADV_QUEUE_GENERAL))
4494 return;
4495 }
4496
4497 unsigned src_queue_mask =
4498 radv_image_queue_family_mask(image, src_family,
4499 cmd_buffer->queue_family_index);
4500 unsigned dst_queue_mask =
4501 radv_image_queue_family_mask(image, dst_family,
4502 cmd_buffer->queue_family_index);
4503
4504 if (vk_format_is_depth(image->vk_format)) {
4505 radv_handle_depth_image_transition(cmd_buffer, image,
4506 src_layout, dst_layout,
4507 src_queue_mask, dst_queue_mask,
4508 range);
4509 } else {
4510 radv_handle_color_image_transition(cmd_buffer, image,
4511 src_layout, dst_layout,
4512 src_queue_mask, dst_queue_mask,
4513 range);
4514 }
4515 }
4516
4517 struct radv_barrier_info {
4518 uint32_t eventCount;
4519 const VkEvent *pEvents;
4520 VkPipelineStageFlags srcStageMask;
4521 };
4522
4523 static void
4524 radv_barrier(struct radv_cmd_buffer *cmd_buffer,
4525 uint32_t memoryBarrierCount,
4526 const VkMemoryBarrier *pMemoryBarriers,
4527 uint32_t bufferMemoryBarrierCount,
4528 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
4529 uint32_t imageMemoryBarrierCount,
4530 const VkImageMemoryBarrier *pImageMemoryBarriers,
4531 const struct radv_barrier_info *info)
4532 {
4533 struct radeon_cmdbuf *cs = cmd_buffer->cs;
4534 enum radv_cmd_flush_bits src_flush_bits = 0;
4535 enum radv_cmd_flush_bits dst_flush_bits = 0;
4536
4537 for (unsigned i = 0; i < info->eventCount; ++i) {
4538 RADV_FROM_HANDLE(radv_event, event, info->pEvents[i]);
4539 uint64_t va = radv_buffer_get_va(event->bo);
4540
4541 radv_cs_add_buffer(cmd_buffer->device->ws, cs, event->bo);
4542
4543 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 7);
4544
4545 radv_cp_wait_mem(cs, WAIT_REG_MEM_EQUAL, va, 1, 0xffffffff);
4546 assert(cmd_buffer->cs->cdw <= cdw_max);
4547 }
4548
4549 for (uint32_t i = 0; i < memoryBarrierCount; i++) {
4550 src_flush_bits |= radv_src_access_flush(cmd_buffer, pMemoryBarriers[i].srcAccessMask,
4551 NULL);
4552 dst_flush_bits |= radv_dst_access_flush(cmd_buffer, pMemoryBarriers[i].dstAccessMask,
4553 NULL);
4554 }
4555
4556 for (uint32_t i = 0; i < bufferMemoryBarrierCount; i++) {
4557 src_flush_bits |= radv_src_access_flush(cmd_buffer, pBufferMemoryBarriers[i].srcAccessMask,
4558 NULL);
4559 dst_flush_bits |= radv_dst_access_flush(cmd_buffer, pBufferMemoryBarriers[i].dstAccessMask,
4560 NULL);
4561 }
4562
4563 for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
4564 RADV_FROM_HANDLE(radv_image, image, pImageMemoryBarriers[i].image);
4565
4566 src_flush_bits |= radv_src_access_flush(cmd_buffer, pImageMemoryBarriers[i].srcAccessMask,
4567 image);
4568 dst_flush_bits |= radv_dst_access_flush(cmd_buffer, pImageMemoryBarriers[i].dstAccessMask,
4569 image);
4570 }
4571
4572 radv_stage_flush(cmd_buffer, info->srcStageMask);
4573 cmd_buffer->state.flush_bits |= src_flush_bits;
4574
4575 for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
4576 RADV_FROM_HANDLE(radv_image, image, pImageMemoryBarriers[i].image);
4577 radv_handle_image_transition(cmd_buffer, image,
4578 pImageMemoryBarriers[i].oldLayout,
4579 pImageMemoryBarriers[i].newLayout,
4580 pImageMemoryBarriers[i].srcQueueFamilyIndex,
4581 pImageMemoryBarriers[i].dstQueueFamilyIndex,
4582 &pImageMemoryBarriers[i].subresourceRange);
4583 }
4584
4585 /* Make sure CP DMA is idle because the driver might have performed a
4586 * DMA operation for copying or filling buffers/images.
4587 */
4588 if (info->srcStageMask & (VK_PIPELINE_STAGE_TRANSFER_BIT |
4589 VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT))
4590 si_cp_dma_wait_for_idle(cmd_buffer);
4591
4592 cmd_buffer->state.flush_bits |= dst_flush_bits;
4593 }
4594
4595 void radv_CmdPipelineBarrier(
4596 VkCommandBuffer commandBuffer,
4597 VkPipelineStageFlags srcStageMask,
4598 VkPipelineStageFlags destStageMask,
4599 VkBool32 byRegion,
4600 uint32_t memoryBarrierCount,
4601 const VkMemoryBarrier* pMemoryBarriers,
4602 uint32_t bufferMemoryBarrierCount,
4603 const VkBufferMemoryBarrier* pBufferMemoryBarriers,
4604 uint32_t imageMemoryBarrierCount,
4605 const VkImageMemoryBarrier* pImageMemoryBarriers)
4606 {
4607 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
4608 struct radv_barrier_info info;
4609
4610 info.eventCount = 0;
4611 info.pEvents = NULL;
4612 info.srcStageMask = srcStageMask;
4613
4614 radv_barrier(cmd_buffer, memoryBarrierCount, pMemoryBarriers,
4615 bufferMemoryBarrierCount, pBufferMemoryBarriers,
4616 imageMemoryBarrierCount, pImageMemoryBarriers, &info);
4617 }
4618
4619
4620 static void write_event(struct radv_cmd_buffer *cmd_buffer,
4621 struct radv_event *event,
4622 VkPipelineStageFlags stageMask,
4623 unsigned value)
4624 {
4625 struct radeon_cmdbuf *cs = cmd_buffer->cs;
4626 uint64_t va = radv_buffer_get_va(event->bo);
4627
4628 si_emit_cache_flush(cmd_buffer);
4629
4630 radv_cs_add_buffer(cmd_buffer->device->ws, cs, event->bo);
4631
4632 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 18);
4633
4634 /* Flags that only require a top-of-pipe event. */
4635 VkPipelineStageFlags top_of_pipe_flags =
4636 VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
4637
4638 /* Flags that only require a post-index-fetch event. */
4639 VkPipelineStageFlags post_index_fetch_flags =
4640 top_of_pipe_flags |
4641 VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT |
4642 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT;
4643
4644 /* Make sure CP DMA is idle because the driver might have performed a
4645 * DMA operation for copying or filling buffers/images.
4646 */
4647 if (stageMask & (VK_PIPELINE_STAGE_TRANSFER_BIT |
4648 VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT))
4649 si_cp_dma_wait_for_idle(cmd_buffer);
4650
4651 /* TODO: Emit EOS events for syncing PS/CS stages. */
4652
4653 if (!(stageMask & ~top_of_pipe_flags)) {
4654 /* Just need to sync the PFP engine. */
4655 radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, 0));
4656 radeon_emit(cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
4657 S_370_WR_CONFIRM(1) |
4658 S_370_ENGINE_SEL(V_370_PFP));
4659 radeon_emit(cs, va);
4660 radeon_emit(cs, va >> 32);
4661 radeon_emit(cs, value);
4662 } else if (!(stageMask & ~post_index_fetch_flags)) {
4663 /* Sync ME because PFP reads index and indirect buffers. */
4664 radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, 0));
4665 radeon_emit(cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
4666 S_370_WR_CONFIRM(1) |
4667 S_370_ENGINE_SEL(V_370_ME));
4668 radeon_emit(cs, va);
4669 radeon_emit(cs, va >> 32);
4670 radeon_emit(cs, value);
4671 } else {
4672 /* Otherwise, sync all prior GPU work using an EOP event. */
4673 si_cs_emit_write_event_eop(cs,
4674 cmd_buffer->device->physical_device->rad_info.chip_class,
4675 radv_cmd_buffer_uses_mec(cmd_buffer),
4676 V_028A90_BOTTOM_OF_PIPE_TS, 0,
4677 EOP_DATA_SEL_VALUE_32BIT, va, 2, value,
4678 cmd_buffer->gfx9_eop_bug_va);
4679 }
4680
4681 assert(cmd_buffer->cs->cdw <= cdw_max);
4682 }
4683
4684 void radv_CmdSetEvent(VkCommandBuffer commandBuffer,
4685 VkEvent _event,
4686 VkPipelineStageFlags stageMask)
4687 {
4688 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
4689 RADV_FROM_HANDLE(radv_event, event, _event);
4690
4691 write_event(cmd_buffer, event, stageMask, 1);
4692 }
4693
4694 void radv_CmdResetEvent(VkCommandBuffer commandBuffer,
4695 VkEvent _event,
4696 VkPipelineStageFlags stageMask)
4697 {
4698 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
4699 RADV_FROM_HANDLE(radv_event, event, _event);
4700
4701 write_event(cmd_buffer, event, stageMask, 0);
4702 }
4703
4704 void radv_CmdWaitEvents(VkCommandBuffer commandBuffer,
4705 uint32_t eventCount,
4706 const VkEvent* pEvents,
4707 VkPipelineStageFlags srcStageMask,
4708 VkPipelineStageFlags dstStageMask,
4709 uint32_t memoryBarrierCount,
4710 const VkMemoryBarrier* pMemoryBarriers,
4711 uint32_t bufferMemoryBarrierCount,
4712 const VkBufferMemoryBarrier* pBufferMemoryBarriers,
4713 uint32_t imageMemoryBarrierCount,
4714 const VkImageMemoryBarrier* pImageMemoryBarriers)
4715 {
4716 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
4717 struct radv_barrier_info info;
4718
4719 info.eventCount = eventCount;
4720 info.pEvents = pEvents;
4721 info.srcStageMask = 0;
4722
4723 radv_barrier(cmd_buffer, memoryBarrierCount, pMemoryBarriers,
4724 bufferMemoryBarrierCount, pBufferMemoryBarriers,
4725 imageMemoryBarrierCount, pImageMemoryBarriers, &info);
4726 }
4727
4728
4729 void radv_CmdSetDeviceMask(VkCommandBuffer commandBuffer,
4730 uint32_t deviceMask)
4731 {
4732 /* No-op */
4733 }
4734
4735 /* VK_EXT_conditional_rendering */
4736 void radv_CmdBeginConditionalRenderingEXT(
4737 VkCommandBuffer commandBuffer,
4738 const VkConditionalRenderingBeginInfoEXT* pConditionalRenderingBegin)
4739 {
4740 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
4741 RADV_FROM_HANDLE(radv_buffer, buffer, pConditionalRenderingBegin->buffer);
4742 bool draw_visible = true;
4743 uint64_t va;
4744
4745 va = radv_buffer_get_va(buffer->bo) + pConditionalRenderingBegin->offset;
4746
4747 /* By default, if the 32-bit value at offset in buffer memory is zero,
4748 * then the rendering commands are discarded, otherwise they are
4749 * executed as normal. If the inverted flag is set, all commands are
4750 * discarded if the value is non zero.
4751 */
4752 if (pConditionalRenderingBegin->flags &
4753 VK_CONDITIONAL_RENDERING_INVERTED_BIT_EXT) {
4754 draw_visible = false;
4755 }
4756
4757 /* Enable predication for this command buffer. */
4758 si_emit_set_predication_state(cmd_buffer, draw_visible, va);
4759 cmd_buffer->state.predicating = true;
4760
4761 /* Store conditional rendering user info. */
4762 cmd_buffer->state.predication_type = draw_visible;
4763 cmd_buffer->state.predication_va = va;
4764 }
4765
4766 void radv_CmdEndConditionalRenderingEXT(
4767 VkCommandBuffer commandBuffer)
4768 {
4769 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
4770
4771 /* Disable predication for this command buffer. */
4772 si_emit_set_predication_state(cmd_buffer, false, 0);
4773 cmd_buffer->state.predicating = false;
4774
4775 /* Reset conditional rendering user info. */
4776 cmd_buffer->state.predication_type = -1;
4777 cmd_buffer->state.predication_va = 0;
4778 }
4779
4780 /* VK_EXT_transform_feedback */
4781 void radv_CmdBindTransformFeedbackBuffersEXT(
4782 VkCommandBuffer commandBuffer,
4783 uint32_t firstBinding,
4784 uint32_t bindingCount,
4785 const VkBuffer* pBuffers,
4786 const VkDeviceSize* pOffsets,
4787 const VkDeviceSize* pSizes)
4788 {
4789 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
4790 struct radv_streamout_binding *sb = cmd_buffer->streamout_bindings;
4791 uint8_t enabled_mask = 0;
4792
4793 assert(firstBinding + bindingCount <= MAX_SO_BUFFERS);
4794 for (uint32_t i = 0; i < bindingCount; i++) {
4795 uint32_t idx = firstBinding + i;
4796
4797 sb[idx].buffer = radv_buffer_from_handle(pBuffers[i]);
4798 sb[idx].offset = pOffsets[i];
4799 sb[idx].size = pSizes[i];
4800
4801 radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs,
4802 sb[idx].buffer->bo);
4803
4804 enabled_mask |= 1 << idx;
4805 }
4806
4807 cmd_buffer->state.streamout.enabled_mask = enabled_mask;
4808
4809 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_STREAMOUT_BUFFER;
4810 }
4811
4812 static void
4813 radv_emit_streamout_enable(struct radv_cmd_buffer *cmd_buffer)
4814 {
4815 struct radv_streamout_state *so = &cmd_buffer->state.streamout;
4816 struct radeon_cmdbuf *cs = cmd_buffer->cs;
4817
4818 radeon_set_context_reg_seq(cs, R_028B94_VGT_STRMOUT_CONFIG, 2);
4819 radeon_emit(cs,
4820 S_028B94_STREAMOUT_0_EN(so->streamout_enabled) |
4821 S_028B94_RAST_STREAM(0) |
4822 S_028B94_STREAMOUT_1_EN(so->streamout_enabled) |
4823 S_028B94_STREAMOUT_2_EN(so->streamout_enabled) |
4824 S_028B94_STREAMOUT_3_EN(so->streamout_enabled));
4825 radeon_emit(cs, so->hw_enabled_mask &
4826 so->enabled_stream_buffers_mask);
4827 }
4828
4829 static void
4830 radv_set_streamout_enable(struct radv_cmd_buffer *cmd_buffer, bool enable)
4831 {
4832 struct radv_streamout_state *so = &cmd_buffer->state.streamout;
4833 bool old_streamout_enabled = so->streamout_enabled;
4834 uint32_t old_hw_enabled_mask = so->hw_enabled_mask;
4835
4836 so->streamout_enabled = enable;
4837
4838 so->hw_enabled_mask = so->enabled_mask |
4839 (so->enabled_mask << 4) |
4840 (so->enabled_mask << 8) |
4841 (so->enabled_mask << 12);
4842
4843 if ((old_streamout_enabled != so->streamout_enabled) ||
4844 (old_hw_enabled_mask != so->hw_enabled_mask))
4845 radv_emit_streamout_enable(cmd_buffer);
4846 }
4847
4848 static void radv_flush_vgt_streamout(struct radv_cmd_buffer *cmd_buffer)
4849 {
4850 struct radeon_cmdbuf *cs = cmd_buffer->cs;
4851 unsigned reg_strmout_cntl;
4852
4853 /* The register is at different places on different ASICs. */
4854 if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) {
4855 reg_strmout_cntl = R_0300FC_CP_STRMOUT_CNTL;
4856 radeon_set_uconfig_reg(cs, reg_strmout_cntl, 0);
4857 } else {
4858 reg_strmout_cntl = R_0084FC_CP_STRMOUT_CNTL;
4859 radeon_set_config_reg(cs, reg_strmout_cntl, 0);
4860 }
4861
4862 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
4863 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SO_VGTSTREAMOUT_FLUSH) | EVENT_INDEX(0));
4864
4865 radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, 0));
4866 radeon_emit(cs, WAIT_REG_MEM_EQUAL); /* wait until the register is equal to the reference value */
4867 radeon_emit(cs, reg_strmout_cntl >> 2); /* register */
4868 radeon_emit(cs, 0);
4869 radeon_emit(cs, S_0084FC_OFFSET_UPDATE_DONE(1)); /* reference value */
4870 radeon_emit(cs, S_0084FC_OFFSET_UPDATE_DONE(1)); /* mask */
4871 radeon_emit(cs, 4); /* poll interval */
4872 }
4873
4874 void radv_CmdBeginTransformFeedbackEXT(
4875 VkCommandBuffer commandBuffer,
4876 uint32_t firstCounterBuffer,
4877 uint32_t counterBufferCount,
4878 const VkBuffer* pCounterBuffers,
4879 const VkDeviceSize* pCounterBufferOffsets)
4880 {
4881 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
4882 struct radv_streamout_binding *sb = cmd_buffer->streamout_bindings;
4883 struct radv_streamout_state *so = &cmd_buffer->state.streamout;
4884 struct radeon_cmdbuf *cs = cmd_buffer->cs;
4885 uint32_t i;
4886
4887 radv_flush_vgt_streamout(cmd_buffer);
4888
4889 assert(firstCounterBuffer + counterBufferCount <= MAX_SO_BUFFERS);
4890 for_each_bit(i, so->enabled_mask) {
4891 int32_t counter_buffer_idx = i - firstCounterBuffer;
4892 if (counter_buffer_idx >= 0 && counter_buffer_idx >= counterBufferCount)
4893 counter_buffer_idx = -1;
4894
4895 /* SI binds streamout buffers as shader resources.
4896 * VGT only counts primitives and tells the shader through
4897 * SGPRs what to do.
4898 */
4899 radeon_set_context_reg_seq(cs, R_028AD0_VGT_STRMOUT_BUFFER_SIZE_0 + 16*i, 2);
4900 radeon_emit(cs, sb[i].size >> 2); /* BUFFER_SIZE (in DW) */
4901 radeon_emit(cs, so->stride_in_dw[i]); /* VTX_STRIDE (in DW) */
4902
4903 if (counter_buffer_idx >= 0 && pCounterBuffers && pCounterBuffers[counter_buffer_idx]) {
4904 /* The array of counter buffers is optional. */
4905 RADV_FROM_HANDLE(radv_buffer, buffer, pCounterBuffers[counter_buffer_idx]);
4906 uint64_t va = radv_buffer_get_va(buffer->bo);
4907
4908 va += buffer->offset + pCounterBufferOffsets[counter_buffer_idx];
4909
4910 /* Append */
4911 radeon_emit(cs, PKT3(PKT3_STRMOUT_BUFFER_UPDATE, 4, 0));
4912 radeon_emit(cs, STRMOUT_SELECT_BUFFER(i) |
4913 STRMOUT_DATA_TYPE(1) | /* offset in bytes */
4914 STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_FROM_MEM)); /* control */
4915 radeon_emit(cs, 0); /* unused */
4916 radeon_emit(cs, 0); /* unused */
4917 radeon_emit(cs, va); /* src address lo */
4918 radeon_emit(cs, va >> 32); /* src address hi */
4919
4920 radv_cs_add_buffer(cmd_buffer->device->ws, cs, buffer->bo);
4921 } else {
4922 /* Start from the beginning. */
4923 radeon_emit(cs, PKT3(PKT3_STRMOUT_BUFFER_UPDATE, 4, 0));
4924 radeon_emit(cs, STRMOUT_SELECT_BUFFER(i) |
4925 STRMOUT_DATA_TYPE(1) | /* offset in bytes */
4926 STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_FROM_PACKET)); /* control */
4927 radeon_emit(cs, 0); /* unused */
4928 radeon_emit(cs, 0); /* unused */
4929 radeon_emit(cs, 0); /* unused */
4930 radeon_emit(cs, 0); /* unused */
4931 }
4932 }
4933
4934 radv_set_streamout_enable(cmd_buffer, true);
4935 }
4936
4937 void radv_CmdEndTransformFeedbackEXT(
4938 VkCommandBuffer commandBuffer,
4939 uint32_t firstCounterBuffer,
4940 uint32_t counterBufferCount,
4941 const VkBuffer* pCounterBuffers,
4942 const VkDeviceSize* pCounterBufferOffsets)
4943 {
4944 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
4945 struct radv_streamout_state *so = &cmd_buffer->state.streamout;
4946 struct radeon_cmdbuf *cs = cmd_buffer->cs;
4947 uint32_t i;
4948
4949 radv_flush_vgt_streamout(cmd_buffer);
4950
4951 assert(firstCounterBuffer + counterBufferCount <= MAX_SO_BUFFERS);
4952 for_each_bit(i, so->enabled_mask) {
4953 int32_t counter_buffer_idx = i - firstCounterBuffer;
4954 if (counter_buffer_idx >= 0 && counter_buffer_idx >= counterBufferCount)
4955 counter_buffer_idx = -1;
4956
4957 if (counter_buffer_idx >= 0 && pCounterBuffers && pCounterBuffers[counter_buffer_idx]) {
4958 /* The array of counters buffer is optional. */
4959 RADV_FROM_HANDLE(radv_buffer, buffer, pCounterBuffers[counter_buffer_idx]);
4960 uint64_t va = radv_buffer_get_va(buffer->bo);
4961
4962 va += buffer->offset + pCounterBufferOffsets[counter_buffer_idx];
4963
4964 radeon_emit(cs, PKT3(PKT3_STRMOUT_BUFFER_UPDATE, 4, 0));
4965 radeon_emit(cs, STRMOUT_SELECT_BUFFER(i) |
4966 STRMOUT_DATA_TYPE(1) | /* offset in bytes */
4967 STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_NONE) |
4968 STRMOUT_STORE_BUFFER_FILLED_SIZE); /* control */
4969 radeon_emit(cs, va); /* dst address lo */
4970 radeon_emit(cs, va >> 32); /* dst address hi */
4971 radeon_emit(cs, 0); /* unused */
4972 radeon_emit(cs, 0); /* unused */
4973
4974 radv_cs_add_buffer(cmd_buffer->device->ws, cs, buffer->bo);
4975 }
4976
4977 /* Deactivate transform feedback by zeroing the buffer size.
4978 * The counters (primitives generated, primitives emitted) may
4979 * be enabled even if there is not buffer bound. This ensures
4980 * that the primitives-emitted query won't increment.
4981 */
4982 radeon_set_context_reg(cs, R_028AD0_VGT_STRMOUT_BUFFER_SIZE_0 + 16*i, 0);
4983 }
4984
4985 radv_set_streamout_enable(cmd_buffer, false);
4986 }
4987
4988 void radv_CmdDrawIndirectByteCountEXT(
4989 VkCommandBuffer commandBuffer,
4990 uint32_t instanceCount,
4991 uint32_t firstInstance,
4992 VkBuffer _counterBuffer,
4993 VkDeviceSize counterBufferOffset,
4994 uint32_t counterOffset,
4995 uint32_t vertexStride)
4996 {
4997 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
4998 RADV_FROM_HANDLE(radv_buffer, counterBuffer, _counterBuffer);
4999 struct radv_draw_info info = {};
5000
5001 info.instance_count = instanceCount;
5002 info.first_instance = firstInstance;
5003 info.strmout_buffer = counterBuffer;
5004 info.strmout_buffer_offset = counterBufferOffset;
5005 info.stride = vertexStride;
5006
5007 radv_draw(cmd_buffer, &info);
5008 }