radv/winsys: Set winsys bo priority on creation.
[mesa.git] / src / amd / vulkan / radv_cmd_buffer.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 */
27
28 #include "radv_private.h"
29 #include "radv_radeon_winsys.h"
30 #include "radv_shader.h"
31 #include "radv_cs.h"
32 #include "sid.h"
33 #include "gfx9d.h"
34 #include "vk_format.h"
35 #include "radv_debug.h"
36 #include "radv_meta.h"
37
38 #include "ac_debug.h"
39
40 enum {
41 RADV_PREFETCH_VBO_DESCRIPTORS = (1 << 0),
42 RADV_PREFETCH_VS = (1 << 1),
43 RADV_PREFETCH_TCS = (1 << 2),
44 RADV_PREFETCH_TES = (1 << 3),
45 RADV_PREFETCH_GS = (1 << 4),
46 RADV_PREFETCH_PS = (1 << 5),
47 RADV_PREFETCH_SHADERS = (RADV_PREFETCH_VS |
48 RADV_PREFETCH_TCS |
49 RADV_PREFETCH_TES |
50 RADV_PREFETCH_GS |
51 RADV_PREFETCH_PS)
52 };
53
54 static void radv_handle_image_transition(struct radv_cmd_buffer *cmd_buffer,
55 struct radv_image *image,
56 VkImageLayout src_layout,
57 VkImageLayout dst_layout,
58 uint32_t src_family,
59 uint32_t dst_family,
60 const VkImageSubresourceRange *range);
61
62 const struct radv_dynamic_state default_dynamic_state = {
63 .viewport = {
64 .count = 0,
65 },
66 .scissor = {
67 .count = 0,
68 },
69 .line_width = 1.0f,
70 .depth_bias = {
71 .bias = 0.0f,
72 .clamp = 0.0f,
73 .slope = 0.0f,
74 },
75 .blend_constants = { 0.0f, 0.0f, 0.0f, 0.0f },
76 .depth_bounds = {
77 .min = 0.0f,
78 .max = 1.0f,
79 },
80 .stencil_compare_mask = {
81 .front = ~0u,
82 .back = ~0u,
83 },
84 .stencil_write_mask = {
85 .front = ~0u,
86 .back = ~0u,
87 },
88 .stencil_reference = {
89 .front = 0u,
90 .back = 0u,
91 },
92 };
93
94 static void
95 radv_bind_dynamic_state(struct radv_cmd_buffer *cmd_buffer,
96 const struct radv_dynamic_state *src)
97 {
98 struct radv_dynamic_state *dest = &cmd_buffer->state.dynamic;
99 uint32_t copy_mask = src->mask;
100 uint32_t dest_mask = 0;
101
102 /* Make sure to copy the number of viewports/scissors because they can
103 * only be specified at pipeline creation time.
104 */
105 dest->viewport.count = src->viewport.count;
106 dest->scissor.count = src->scissor.count;
107 dest->discard_rectangle.count = src->discard_rectangle.count;
108
109 if (copy_mask & RADV_DYNAMIC_VIEWPORT) {
110 if (memcmp(&dest->viewport.viewports, &src->viewport.viewports,
111 src->viewport.count * sizeof(VkViewport))) {
112 typed_memcpy(dest->viewport.viewports,
113 src->viewport.viewports,
114 src->viewport.count);
115 dest_mask |= RADV_DYNAMIC_VIEWPORT;
116 }
117 }
118
119 if (copy_mask & RADV_DYNAMIC_SCISSOR) {
120 if (memcmp(&dest->scissor.scissors, &src->scissor.scissors,
121 src->scissor.count * sizeof(VkRect2D))) {
122 typed_memcpy(dest->scissor.scissors,
123 src->scissor.scissors, src->scissor.count);
124 dest_mask |= RADV_DYNAMIC_SCISSOR;
125 }
126 }
127
128 if (copy_mask & RADV_DYNAMIC_LINE_WIDTH) {
129 if (dest->line_width != src->line_width) {
130 dest->line_width = src->line_width;
131 dest_mask |= RADV_DYNAMIC_LINE_WIDTH;
132 }
133 }
134
135 if (copy_mask & RADV_DYNAMIC_DEPTH_BIAS) {
136 if (memcmp(&dest->depth_bias, &src->depth_bias,
137 sizeof(src->depth_bias))) {
138 dest->depth_bias = src->depth_bias;
139 dest_mask |= RADV_DYNAMIC_DEPTH_BIAS;
140 }
141 }
142
143 if (copy_mask & RADV_DYNAMIC_BLEND_CONSTANTS) {
144 if (memcmp(&dest->blend_constants, &src->blend_constants,
145 sizeof(src->blend_constants))) {
146 typed_memcpy(dest->blend_constants,
147 src->blend_constants, 4);
148 dest_mask |= RADV_DYNAMIC_BLEND_CONSTANTS;
149 }
150 }
151
152 if (copy_mask & RADV_DYNAMIC_DEPTH_BOUNDS) {
153 if (memcmp(&dest->depth_bounds, &src->depth_bounds,
154 sizeof(src->depth_bounds))) {
155 dest->depth_bounds = src->depth_bounds;
156 dest_mask |= RADV_DYNAMIC_DEPTH_BOUNDS;
157 }
158 }
159
160 if (copy_mask & RADV_DYNAMIC_STENCIL_COMPARE_MASK) {
161 if (memcmp(&dest->stencil_compare_mask,
162 &src->stencil_compare_mask,
163 sizeof(src->stencil_compare_mask))) {
164 dest->stencil_compare_mask = src->stencil_compare_mask;
165 dest_mask |= RADV_DYNAMIC_STENCIL_COMPARE_MASK;
166 }
167 }
168
169 if (copy_mask & RADV_DYNAMIC_STENCIL_WRITE_MASK) {
170 if (memcmp(&dest->stencil_write_mask, &src->stencil_write_mask,
171 sizeof(src->stencil_write_mask))) {
172 dest->stencil_write_mask = src->stencil_write_mask;
173 dest_mask |= RADV_DYNAMIC_STENCIL_WRITE_MASK;
174 }
175 }
176
177 if (copy_mask & RADV_DYNAMIC_STENCIL_REFERENCE) {
178 if (memcmp(&dest->stencil_reference, &src->stencil_reference,
179 sizeof(src->stencil_reference))) {
180 dest->stencil_reference = src->stencil_reference;
181 dest_mask |= RADV_DYNAMIC_STENCIL_REFERENCE;
182 }
183 }
184
185 if (copy_mask & RADV_DYNAMIC_DISCARD_RECTANGLE) {
186 if (memcmp(&dest->discard_rectangle.rectangles, &src->discard_rectangle.rectangles,
187 src->discard_rectangle.count * sizeof(VkRect2D))) {
188 typed_memcpy(dest->discard_rectangle.rectangles,
189 src->discard_rectangle.rectangles,
190 src->discard_rectangle.count);
191 dest_mask |= RADV_DYNAMIC_DISCARD_RECTANGLE;
192 }
193 }
194
195 cmd_buffer->state.dirty |= dest_mask;
196 }
197
198 static void
199 radv_bind_streamout_state(struct radv_cmd_buffer *cmd_buffer,
200 struct radv_pipeline *pipeline)
201 {
202 struct radv_streamout_state *so = &cmd_buffer->state.streamout;
203 struct radv_shader_info *info;
204
205 if (!pipeline->streamout_shader)
206 return;
207
208 info = &pipeline->streamout_shader->info.info;
209 for (int i = 0; i < MAX_SO_BUFFERS; i++)
210 so->stride_in_dw[i] = info->so.strides[i];
211
212 so->enabled_stream_buffers_mask = info->so.enabled_stream_buffers_mask;
213 }
214
215 bool radv_cmd_buffer_uses_mec(struct radv_cmd_buffer *cmd_buffer)
216 {
217 return cmd_buffer->queue_family_index == RADV_QUEUE_COMPUTE &&
218 cmd_buffer->device->physical_device->rad_info.chip_class >= CIK;
219 }
220
221 enum ring_type radv_queue_family_to_ring(int f) {
222 switch (f) {
223 case RADV_QUEUE_GENERAL:
224 return RING_GFX;
225 case RADV_QUEUE_COMPUTE:
226 return RING_COMPUTE;
227 case RADV_QUEUE_TRANSFER:
228 return RING_DMA;
229 default:
230 unreachable("Unknown queue family");
231 }
232 }
233
234 static VkResult radv_create_cmd_buffer(
235 struct radv_device * device,
236 struct radv_cmd_pool * pool,
237 VkCommandBufferLevel level,
238 VkCommandBuffer* pCommandBuffer)
239 {
240 struct radv_cmd_buffer *cmd_buffer;
241 unsigned ring;
242 cmd_buffer = vk_zalloc(&pool->alloc, sizeof(*cmd_buffer), 8,
243 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
244 if (cmd_buffer == NULL)
245 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
246
247 cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
248 cmd_buffer->device = device;
249 cmd_buffer->pool = pool;
250 cmd_buffer->level = level;
251
252 if (pool) {
253 list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
254 cmd_buffer->queue_family_index = pool->queue_family_index;
255
256 } else {
257 /* Init the pool_link so we can safely call list_del when we destroy
258 * the command buffer
259 */
260 list_inithead(&cmd_buffer->pool_link);
261 cmd_buffer->queue_family_index = RADV_QUEUE_GENERAL;
262 }
263
264 ring = radv_queue_family_to_ring(cmd_buffer->queue_family_index);
265
266 cmd_buffer->cs = device->ws->cs_create(device->ws, ring);
267 if (!cmd_buffer->cs) {
268 vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
269 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
270 }
271
272 *pCommandBuffer = radv_cmd_buffer_to_handle(cmd_buffer);
273
274 list_inithead(&cmd_buffer->upload.list);
275
276 return VK_SUCCESS;
277 }
278
279 static void
280 radv_cmd_buffer_destroy(struct radv_cmd_buffer *cmd_buffer)
281 {
282 list_del(&cmd_buffer->pool_link);
283
284 list_for_each_entry_safe(struct radv_cmd_buffer_upload, up,
285 &cmd_buffer->upload.list, list) {
286 cmd_buffer->device->ws->buffer_destroy(up->upload_bo);
287 list_del(&up->list);
288 free(up);
289 }
290
291 if (cmd_buffer->upload.upload_bo)
292 cmd_buffer->device->ws->buffer_destroy(cmd_buffer->upload.upload_bo);
293 cmd_buffer->device->ws->cs_destroy(cmd_buffer->cs);
294
295 for (unsigned i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; i++)
296 free(cmd_buffer->descriptors[i].push_set.set.mapped_ptr);
297
298 vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
299 }
300
301 static VkResult
302 radv_reset_cmd_buffer(struct radv_cmd_buffer *cmd_buffer)
303 {
304
305 cmd_buffer->device->ws->cs_reset(cmd_buffer->cs);
306
307 list_for_each_entry_safe(struct radv_cmd_buffer_upload, up,
308 &cmd_buffer->upload.list, list) {
309 cmd_buffer->device->ws->buffer_destroy(up->upload_bo);
310 list_del(&up->list);
311 free(up);
312 }
313
314 cmd_buffer->push_constant_stages = 0;
315 cmd_buffer->scratch_size_needed = 0;
316 cmd_buffer->compute_scratch_size_needed = 0;
317 cmd_buffer->esgs_ring_size_needed = 0;
318 cmd_buffer->gsvs_ring_size_needed = 0;
319 cmd_buffer->tess_rings_needed = false;
320 cmd_buffer->sample_positions_needed = false;
321
322 if (cmd_buffer->upload.upload_bo)
323 radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs,
324 cmd_buffer->upload.upload_bo);
325 cmd_buffer->upload.offset = 0;
326
327 cmd_buffer->record_result = VK_SUCCESS;
328
329 for (unsigned i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; i++) {
330 cmd_buffer->descriptors[i].dirty = 0;
331 cmd_buffer->descriptors[i].valid = 0;
332 cmd_buffer->descriptors[i].push_dirty = false;
333 }
334
335 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9 &&
336 cmd_buffer->queue_family_index == RADV_QUEUE_GENERAL) {
337 unsigned num_db = cmd_buffer->device->physical_device->rad_info.num_render_backends;
338 unsigned fence_offset, eop_bug_offset;
339 void *fence_ptr;
340
341 radv_cmd_buffer_upload_alloc(cmd_buffer, 8, 0, &fence_offset,
342 &fence_ptr);
343 cmd_buffer->gfx9_fence_va =
344 radv_buffer_get_va(cmd_buffer->upload.upload_bo);
345 cmd_buffer->gfx9_fence_va += fence_offset;
346
347 /* Allocate a buffer for the EOP bug on GFX9. */
348 radv_cmd_buffer_upload_alloc(cmd_buffer, 16 * num_db, 0,
349 &eop_bug_offset, &fence_ptr);
350 cmd_buffer->gfx9_eop_bug_va =
351 radv_buffer_get_va(cmd_buffer->upload.upload_bo);
352 cmd_buffer->gfx9_eop_bug_va += eop_bug_offset;
353 }
354
355 cmd_buffer->status = RADV_CMD_BUFFER_STATUS_INITIAL;
356
357 return cmd_buffer->record_result;
358 }
359
360 static bool
361 radv_cmd_buffer_resize_upload_buf(struct radv_cmd_buffer *cmd_buffer,
362 uint64_t min_needed)
363 {
364 uint64_t new_size;
365 struct radeon_winsys_bo *bo;
366 struct radv_cmd_buffer_upload *upload;
367 struct radv_device *device = cmd_buffer->device;
368
369 new_size = MAX2(min_needed, 16 * 1024);
370 new_size = MAX2(new_size, 2 * cmd_buffer->upload.size);
371
372 bo = device->ws->buffer_create(device->ws,
373 new_size, 4096,
374 RADEON_DOMAIN_GTT,
375 RADEON_FLAG_CPU_ACCESS|
376 RADEON_FLAG_NO_INTERPROCESS_SHARING |
377 RADEON_FLAG_32BIT,
378 RADV_BO_PRIORITY_UPLOAD_BUFFER);
379
380 if (!bo) {
381 cmd_buffer->record_result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
382 return false;
383 }
384
385 radv_cs_add_buffer(device->ws, cmd_buffer->cs, bo);
386 if (cmd_buffer->upload.upload_bo) {
387 upload = malloc(sizeof(*upload));
388
389 if (!upload) {
390 cmd_buffer->record_result = VK_ERROR_OUT_OF_HOST_MEMORY;
391 device->ws->buffer_destroy(bo);
392 return false;
393 }
394
395 memcpy(upload, &cmd_buffer->upload, sizeof(*upload));
396 list_add(&upload->list, &cmd_buffer->upload.list);
397 }
398
399 cmd_buffer->upload.upload_bo = bo;
400 cmd_buffer->upload.size = new_size;
401 cmd_buffer->upload.offset = 0;
402 cmd_buffer->upload.map = device->ws->buffer_map(cmd_buffer->upload.upload_bo);
403
404 if (!cmd_buffer->upload.map) {
405 cmd_buffer->record_result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
406 return false;
407 }
408
409 return true;
410 }
411
412 bool
413 radv_cmd_buffer_upload_alloc(struct radv_cmd_buffer *cmd_buffer,
414 unsigned size,
415 unsigned alignment,
416 unsigned *out_offset,
417 void **ptr)
418 {
419 uint64_t offset = align(cmd_buffer->upload.offset, alignment);
420 if (offset + size > cmd_buffer->upload.size) {
421 if (!radv_cmd_buffer_resize_upload_buf(cmd_buffer, size))
422 return false;
423 offset = 0;
424 }
425
426 *out_offset = offset;
427 *ptr = cmd_buffer->upload.map + offset;
428
429 cmd_buffer->upload.offset = offset + size;
430 return true;
431 }
432
433 bool
434 radv_cmd_buffer_upload_data(struct radv_cmd_buffer *cmd_buffer,
435 unsigned size, unsigned alignment,
436 const void *data, unsigned *out_offset)
437 {
438 uint8_t *ptr;
439
440 if (!radv_cmd_buffer_upload_alloc(cmd_buffer, size, alignment,
441 out_offset, (void **)&ptr))
442 return false;
443
444 if (ptr)
445 memcpy(ptr, data, size);
446
447 return true;
448 }
449
450 static void
451 radv_emit_write_data_packet(struct radv_cmd_buffer *cmd_buffer, uint64_t va,
452 unsigned count, const uint32_t *data)
453 {
454 struct radeon_cmdbuf *cs = cmd_buffer->cs;
455
456 radeon_check_space(cmd_buffer->device->ws, cs, 4 + count);
457
458 radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 2 + count, 0));
459 radeon_emit(cs, S_370_DST_SEL(V_370_MEM) |
460 S_370_WR_CONFIRM(1) |
461 S_370_ENGINE_SEL(V_370_ME));
462 radeon_emit(cs, va);
463 radeon_emit(cs, va >> 32);
464 radeon_emit_array(cs, data, count);
465 }
466
467 void radv_cmd_buffer_trace_emit(struct radv_cmd_buffer *cmd_buffer)
468 {
469 struct radv_device *device = cmd_buffer->device;
470 struct radeon_cmdbuf *cs = cmd_buffer->cs;
471 uint64_t va;
472
473 va = radv_buffer_get_va(device->trace_bo);
474 if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY)
475 va += 4;
476
477 ++cmd_buffer->state.trace_id;
478 radv_emit_write_data_packet(cmd_buffer, va, 1,
479 &cmd_buffer->state.trace_id);
480
481 radeon_check_space(cmd_buffer->device->ws, cs, 2);
482
483 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
484 radeon_emit(cs, AC_ENCODE_TRACE_POINT(cmd_buffer->state.trace_id));
485 }
486
487 static void
488 radv_cmd_buffer_after_draw(struct radv_cmd_buffer *cmd_buffer,
489 enum radv_cmd_flush_bits flags)
490 {
491 if (cmd_buffer->device->instance->debug_flags & RADV_DEBUG_SYNC_SHADERS) {
492 assert(flags & (RADV_CMD_FLAG_PS_PARTIAL_FLUSH |
493 RADV_CMD_FLAG_CS_PARTIAL_FLUSH));
494
495 radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 4);
496
497 /* Force wait for graphics or compute engines to be idle. */
498 si_cs_emit_cache_flush(cmd_buffer->cs,
499 cmd_buffer->device->physical_device->rad_info.chip_class,
500 &cmd_buffer->gfx9_fence_idx,
501 cmd_buffer->gfx9_fence_va,
502 radv_cmd_buffer_uses_mec(cmd_buffer),
503 flags, cmd_buffer->gfx9_eop_bug_va);
504 }
505
506 if (unlikely(cmd_buffer->device->trace_bo))
507 radv_cmd_buffer_trace_emit(cmd_buffer);
508 }
509
510 static void
511 radv_save_pipeline(struct radv_cmd_buffer *cmd_buffer,
512 struct radv_pipeline *pipeline, enum ring_type ring)
513 {
514 struct radv_device *device = cmd_buffer->device;
515 uint32_t data[2];
516 uint64_t va;
517
518 va = radv_buffer_get_va(device->trace_bo);
519
520 switch (ring) {
521 case RING_GFX:
522 va += 8;
523 break;
524 case RING_COMPUTE:
525 va += 16;
526 break;
527 default:
528 assert(!"invalid ring type");
529 }
530
531 data[0] = (uintptr_t)pipeline;
532 data[1] = (uintptr_t)pipeline >> 32;
533
534 radv_emit_write_data_packet(cmd_buffer, va, 2, data);
535 }
536
537 void radv_set_descriptor_set(struct radv_cmd_buffer *cmd_buffer,
538 VkPipelineBindPoint bind_point,
539 struct radv_descriptor_set *set,
540 unsigned idx)
541 {
542 struct radv_descriptor_state *descriptors_state =
543 radv_get_descriptors_state(cmd_buffer, bind_point);
544
545 descriptors_state->sets[idx] = set;
546
547 descriptors_state->valid |= (1u << idx); /* active descriptors */
548 descriptors_state->dirty |= (1u << idx);
549 }
550
551 static void
552 radv_save_descriptors(struct radv_cmd_buffer *cmd_buffer,
553 VkPipelineBindPoint bind_point)
554 {
555 struct radv_descriptor_state *descriptors_state =
556 radv_get_descriptors_state(cmd_buffer, bind_point);
557 struct radv_device *device = cmd_buffer->device;
558 uint32_t data[MAX_SETS * 2] = {};
559 uint64_t va;
560 unsigned i;
561 va = radv_buffer_get_va(device->trace_bo) + 24;
562
563 for_each_bit(i, descriptors_state->valid) {
564 struct radv_descriptor_set *set = descriptors_state->sets[i];
565 data[i * 2] = (uintptr_t)set;
566 data[i * 2 + 1] = (uintptr_t)set >> 32;
567 }
568
569 radv_emit_write_data_packet(cmd_buffer, va, MAX_SETS * 2, data);
570 }
571
572 struct radv_userdata_info *
573 radv_lookup_user_sgpr(struct radv_pipeline *pipeline,
574 gl_shader_stage stage,
575 int idx)
576 {
577 struct radv_shader_variant *shader = radv_get_shader(pipeline, stage);
578 return &shader->info.user_sgprs_locs.shader_data[idx];
579 }
580
581 static void
582 radv_emit_userdata_address(struct radv_cmd_buffer *cmd_buffer,
583 struct radv_pipeline *pipeline,
584 gl_shader_stage stage,
585 int idx, uint64_t va)
586 {
587 struct radv_userdata_info *loc = radv_lookup_user_sgpr(pipeline, stage, idx);
588 uint32_t base_reg = pipeline->user_data_0[stage];
589 if (loc->sgpr_idx == -1)
590 return;
591
592 assert(loc->num_sgprs == 1);
593
594 radv_emit_shader_pointer(cmd_buffer->device, cmd_buffer->cs,
595 base_reg + loc->sgpr_idx * 4, va, false);
596 }
597
598 static void
599 radv_emit_descriptor_pointers(struct radv_cmd_buffer *cmd_buffer,
600 struct radv_pipeline *pipeline,
601 struct radv_descriptor_state *descriptors_state,
602 gl_shader_stage stage)
603 {
604 struct radv_device *device = cmd_buffer->device;
605 struct radeon_cmdbuf *cs = cmd_buffer->cs;
606 uint32_t sh_base = pipeline->user_data_0[stage];
607 struct radv_userdata_locations *locs =
608 &pipeline->shaders[stage]->info.user_sgprs_locs;
609 unsigned mask = locs->descriptor_sets_enabled;
610
611 mask &= descriptors_state->dirty & descriptors_state->valid;
612
613 while (mask) {
614 int start, count;
615
616 u_bit_scan_consecutive_range(&mask, &start, &count);
617
618 struct radv_userdata_info *loc = &locs->descriptor_sets[start];
619 unsigned sh_offset = sh_base + loc->sgpr_idx * 4;
620
621 radv_emit_shader_pointer_head(cs, sh_offset, count, true);
622 for (int i = 0; i < count; i++) {
623 struct radv_descriptor_set *set =
624 descriptors_state->sets[start + i];
625
626 radv_emit_shader_pointer_body(device, cs, set->va, true);
627 }
628 }
629 }
630
631 static void
632 radv_update_multisample_state(struct radv_cmd_buffer *cmd_buffer,
633 struct radv_pipeline *pipeline)
634 {
635 int num_samples = pipeline->graphics.ms.num_samples;
636 struct radv_multisample_state *ms = &pipeline->graphics.ms;
637 struct radv_pipeline *old_pipeline = cmd_buffer->state.emitted_pipeline;
638
639 if (pipeline->shaders[MESA_SHADER_FRAGMENT]->info.info.ps.needs_sample_positions)
640 cmd_buffer->sample_positions_needed = true;
641
642 if (old_pipeline && num_samples == old_pipeline->graphics.ms.num_samples)
643 return;
644
645 radeon_set_context_reg_seq(cmd_buffer->cs, R_028BDC_PA_SC_LINE_CNTL, 2);
646 radeon_emit(cmd_buffer->cs, ms->pa_sc_line_cntl);
647 radeon_emit(cmd_buffer->cs, ms->pa_sc_aa_config);
648
649 radeon_set_context_reg(cmd_buffer->cs, R_028A48_PA_SC_MODE_CNTL_0, ms->pa_sc_mode_cntl_0);
650
651 radv_cayman_emit_msaa_sample_locs(cmd_buffer->cs, num_samples);
652
653 /* GFX9: Flush DFSM when the AA mode changes. */
654 if (cmd_buffer->device->dfsm_allowed) {
655 radeon_emit(cmd_buffer->cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
656 radeon_emit(cmd_buffer->cs, EVENT_TYPE(V_028A90_FLUSH_DFSM) | EVENT_INDEX(0));
657 }
658
659 cmd_buffer->state.context_roll_without_scissor_emitted = true;
660 }
661
662 static void
663 radv_emit_shader_prefetch(struct radv_cmd_buffer *cmd_buffer,
664 struct radv_shader_variant *shader)
665 {
666 uint64_t va;
667
668 if (!shader)
669 return;
670
671 va = radv_buffer_get_va(shader->bo) + shader->bo_offset;
672
673 si_cp_dma_prefetch(cmd_buffer, va, shader->code_size);
674 }
675
676 static void
677 radv_emit_prefetch_L2(struct radv_cmd_buffer *cmd_buffer,
678 struct radv_pipeline *pipeline,
679 bool vertex_stage_only)
680 {
681 struct radv_cmd_state *state = &cmd_buffer->state;
682 uint32_t mask = state->prefetch_L2_mask;
683
684 if (vertex_stage_only) {
685 /* Fast prefetch path for starting draws as soon as possible.
686 */
687 mask = state->prefetch_L2_mask & (RADV_PREFETCH_VS |
688 RADV_PREFETCH_VBO_DESCRIPTORS);
689 }
690
691 if (mask & RADV_PREFETCH_VS)
692 radv_emit_shader_prefetch(cmd_buffer,
693 pipeline->shaders[MESA_SHADER_VERTEX]);
694
695 if (mask & RADV_PREFETCH_VBO_DESCRIPTORS)
696 si_cp_dma_prefetch(cmd_buffer, state->vb_va, state->vb_size);
697
698 if (mask & RADV_PREFETCH_TCS)
699 radv_emit_shader_prefetch(cmd_buffer,
700 pipeline->shaders[MESA_SHADER_TESS_CTRL]);
701
702 if (mask & RADV_PREFETCH_TES)
703 radv_emit_shader_prefetch(cmd_buffer,
704 pipeline->shaders[MESA_SHADER_TESS_EVAL]);
705
706 if (mask & RADV_PREFETCH_GS) {
707 radv_emit_shader_prefetch(cmd_buffer,
708 pipeline->shaders[MESA_SHADER_GEOMETRY]);
709 radv_emit_shader_prefetch(cmd_buffer, pipeline->gs_copy_shader);
710 }
711
712 if (mask & RADV_PREFETCH_PS)
713 radv_emit_shader_prefetch(cmd_buffer,
714 pipeline->shaders[MESA_SHADER_FRAGMENT]);
715
716 state->prefetch_L2_mask &= ~mask;
717 }
718
719 static void
720 radv_emit_rbplus_state(struct radv_cmd_buffer *cmd_buffer)
721 {
722 if (!cmd_buffer->device->physical_device->rbplus_allowed)
723 return;
724
725 struct radv_pipeline *pipeline = cmd_buffer->state.pipeline;
726 struct radv_framebuffer *framebuffer = cmd_buffer->state.framebuffer;
727 const struct radv_subpass *subpass = cmd_buffer->state.subpass;
728
729 unsigned sx_ps_downconvert = 0;
730 unsigned sx_blend_opt_epsilon = 0;
731 unsigned sx_blend_opt_control = 0;
732
733 for (unsigned i = 0; i < subpass->color_count; ++i) {
734 if (subpass->color_attachments[i].attachment == VK_ATTACHMENT_UNUSED) {
735 sx_blend_opt_control |= S_02875C_MRT0_COLOR_OPT_DISABLE(1) << (i * 4);
736 sx_blend_opt_control |= S_02875C_MRT0_ALPHA_OPT_DISABLE(1) << (i * 4);
737 continue;
738 }
739
740 int idx = subpass->color_attachments[i].attachment;
741 struct radv_color_buffer_info *cb = &framebuffer->attachments[idx].cb;
742
743 unsigned format = G_028C70_FORMAT(cb->cb_color_info);
744 unsigned swap = G_028C70_COMP_SWAP(cb->cb_color_info);
745 uint32_t spi_format = (pipeline->graphics.col_format >> (i * 4)) & 0xf;
746 uint32_t colormask = (pipeline->graphics.cb_target_mask >> (i * 4)) & 0xf;
747
748 bool has_alpha, has_rgb;
749
750 /* Set if RGB and A are present. */
751 has_alpha = !G_028C74_FORCE_DST_ALPHA_1(cb->cb_color_attrib);
752
753 if (format == V_028C70_COLOR_8 ||
754 format == V_028C70_COLOR_16 ||
755 format == V_028C70_COLOR_32)
756 has_rgb = !has_alpha;
757 else
758 has_rgb = true;
759
760 /* Check the colormask and export format. */
761 if (!(colormask & 0x7))
762 has_rgb = false;
763 if (!(colormask & 0x8))
764 has_alpha = false;
765
766 if (spi_format == V_028714_SPI_SHADER_ZERO) {
767 has_rgb = false;
768 has_alpha = false;
769 }
770
771 /* Disable value checking for disabled channels. */
772 if (!has_rgb)
773 sx_blend_opt_control |= S_02875C_MRT0_COLOR_OPT_DISABLE(1) << (i * 4);
774 if (!has_alpha)
775 sx_blend_opt_control |= S_02875C_MRT0_ALPHA_OPT_DISABLE(1) << (i * 4);
776
777 /* Enable down-conversion for 32bpp and smaller formats. */
778 switch (format) {
779 case V_028C70_COLOR_8:
780 case V_028C70_COLOR_8_8:
781 case V_028C70_COLOR_8_8_8_8:
782 /* For 1 and 2-channel formats, use the superset thereof. */
783 if (spi_format == V_028714_SPI_SHADER_FP16_ABGR ||
784 spi_format == V_028714_SPI_SHADER_UINT16_ABGR ||
785 spi_format == V_028714_SPI_SHADER_SINT16_ABGR) {
786 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_8_8_8_8 << (i * 4);
787 sx_blend_opt_epsilon |= V_028758_8BIT_FORMAT << (i * 4);
788 }
789 break;
790
791 case V_028C70_COLOR_5_6_5:
792 if (spi_format == V_028714_SPI_SHADER_FP16_ABGR) {
793 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_5_6_5 << (i * 4);
794 sx_blend_opt_epsilon |= V_028758_6BIT_FORMAT << (i * 4);
795 }
796 break;
797
798 case V_028C70_COLOR_1_5_5_5:
799 if (spi_format == V_028714_SPI_SHADER_FP16_ABGR) {
800 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_1_5_5_5 << (i * 4);
801 sx_blend_opt_epsilon |= V_028758_5BIT_FORMAT << (i * 4);
802 }
803 break;
804
805 case V_028C70_COLOR_4_4_4_4:
806 if (spi_format == V_028714_SPI_SHADER_FP16_ABGR) {
807 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_4_4_4_4 << (i * 4);
808 sx_blend_opt_epsilon |= V_028758_4BIT_FORMAT << (i * 4);
809 }
810 break;
811
812 case V_028C70_COLOR_32:
813 if (swap == V_028C70_SWAP_STD &&
814 spi_format == V_028714_SPI_SHADER_32_R)
815 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_32_R << (i * 4);
816 else if (swap == V_028C70_SWAP_ALT_REV &&
817 spi_format == V_028714_SPI_SHADER_32_AR)
818 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_32_A << (i * 4);
819 break;
820
821 case V_028C70_COLOR_16:
822 case V_028C70_COLOR_16_16:
823 /* For 1-channel formats, use the superset thereof. */
824 if (spi_format == V_028714_SPI_SHADER_UNORM16_ABGR ||
825 spi_format == V_028714_SPI_SHADER_SNORM16_ABGR ||
826 spi_format == V_028714_SPI_SHADER_UINT16_ABGR ||
827 spi_format == V_028714_SPI_SHADER_SINT16_ABGR) {
828 if (swap == V_028C70_SWAP_STD ||
829 swap == V_028C70_SWAP_STD_REV)
830 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_16_16_GR << (i * 4);
831 else
832 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_16_16_AR << (i * 4);
833 }
834 break;
835
836 case V_028C70_COLOR_10_11_11:
837 if (spi_format == V_028714_SPI_SHADER_FP16_ABGR) {
838 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_10_11_11 << (i * 4);
839 sx_blend_opt_epsilon |= V_028758_11BIT_FORMAT << (i * 4);
840 }
841 break;
842
843 case V_028C70_COLOR_2_10_10_10:
844 if (spi_format == V_028714_SPI_SHADER_FP16_ABGR) {
845 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_2_10_10_10 << (i * 4);
846 sx_blend_opt_epsilon |= V_028758_10BIT_FORMAT << (i * 4);
847 }
848 break;
849 }
850 }
851
852 for (unsigned i = subpass->color_count; i < 8; ++i) {
853 sx_blend_opt_control |= S_02875C_MRT0_COLOR_OPT_DISABLE(1) << (i * 4);
854 sx_blend_opt_control |= S_02875C_MRT0_ALPHA_OPT_DISABLE(1) << (i * 4);
855 }
856 /* TODO: avoid redundantly setting context registers */
857 radeon_set_context_reg_seq(cmd_buffer->cs, R_028754_SX_PS_DOWNCONVERT, 3);
858 radeon_emit(cmd_buffer->cs, sx_ps_downconvert);
859 radeon_emit(cmd_buffer->cs, sx_blend_opt_epsilon);
860 radeon_emit(cmd_buffer->cs, sx_blend_opt_control);
861
862 cmd_buffer->state.context_roll_without_scissor_emitted = true;
863 }
864
865 static void
866 radv_emit_graphics_pipeline(struct radv_cmd_buffer *cmd_buffer)
867 {
868 struct radv_pipeline *pipeline = cmd_buffer->state.pipeline;
869
870 if (!pipeline || cmd_buffer->state.emitted_pipeline == pipeline)
871 return;
872
873 radv_update_multisample_state(cmd_buffer, pipeline);
874
875 cmd_buffer->scratch_size_needed =
876 MAX2(cmd_buffer->scratch_size_needed,
877 pipeline->max_waves * pipeline->scratch_bytes_per_wave);
878
879 if (!cmd_buffer->state.emitted_pipeline ||
880 cmd_buffer->state.emitted_pipeline->graphics.can_use_guardband !=
881 pipeline->graphics.can_use_guardband)
882 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_SCISSOR;
883
884 radeon_emit_array(cmd_buffer->cs, pipeline->cs.buf, pipeline->cs.cdw);
885
886 if (!cmd_buffer->state.emitted_pipeline ||
887 cmd_buffer->state.emitted_pipeline->ctx_cs.cdw != pipeline->ctx_cs.cdw ||
888 cmd_buffer->state.emitted_pipeline->ctx_cs_hash != pipeline->ctx_cs_hash ||
889 memcmp(cmd_buffer->state.emitted_pipeline->ctx_cs.buf,
890 pipeline->ctx_cs.buf, pipeline->ctx_cs.cdw * 4)) {
891 radeon_emit_array(cmd_buffer->cs, pipeline->ctx_cs.buf, pipeline->ctx_cs.cdw);
892 cmd_buffer->state.context_roll_without_scissor_emitted = true;
893 }
894
895 for (unsigned i = 0; i < MESA_SHADER_COMPUTE; i++) {
896 if (!pipeline->shaders[i])
897 continue;
898
899 radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs,
900 pipeline->shaders[i]->bo);
901 }
902
903 if (radv_pipeline_has_gs(pipeline))
904 radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs,
905 pipeline->gs_copy_shader->bo);
906
907 if (unlikely(cmd_buffer->device->trace_bo))
908 radv_save_pipeline(cmd_buffer, pipeline, RING_GFX);
909
910 cmd_buffer->state.emitted_pipeline = pipeline;
911
912 cmd_buffer->state.dirty &= ~RADV_CMD_DIRTY_PIPELINE;
913 }
914
915 static void
916 radv_emit_viewport(struct radv_cmd_buffer *cmd_buffer)
917 {
918 si_write_viewport(cmd_buffer->cs, 0, cmd_buffer->state.dynamic.viewport.count,
919 cmd_buffer->state.dynamic.viewport.viewports);
920 }
921
922 static void
923 radv_emit_scissor(struct radv_cmd_buffer *cmd_buffer)
924 {
925 uint32_t count = cmd_buffer->state.dynamic.scissor.count;
926
927 si_write_scissors(cmd_buffer->cs, 0, count,
928 cmd_buffer->state.dynamic.scissor.scissors,
929 cmd_buffer->state.dynamic.viewport.viewports,
930 cmd_buffer->state.emitted_pipeline->graphics.can_use_guardband);
931
932 cmd_buffer->state.context_roll_without_scissor_emitted = false;
933 }
934
935 static void
936 radv_emit_discard_rectangle(struct radv_cmd_buffer *cmd_buffer)
937 {
938 if (!cmd_buffer->state.dynamic.discard_rectangle.count)
939 return;
940
941 radeon_set_context_reg_seq(cmd_buffer->cs, R_028210_PA_SC_CLIPRECT_0_TL,
942 cmd_buffer->state.dynamic.discard_rectangle.count * 2);
943 for (unsigned i = 0; i < cmd_buffer->state.dynamic.discard_rectangle.count; ++i) {
944 VkRect2D rect = cmd_buffer->state.dynamic.discard_rectangle.rectangles[i];
945 radeon_emit(cmd_buffer->cs, S_028210_TL_X(rect.offset.x) | S_028210_TL_Y(rect.offset.y));
946 radeon_emit(cmd_buffer->cs, S_028214_BR_X(rect.offset.x + rect.extent.width) |
947 S_028214_BR_Y(rect.offset.y + rect.extent.height));
948 }
949 }
950
951 static void
952 radv_emit_line_width(struct radv_cmd_buffer *cmd_buffer)
953 {
954 unsigned width = cmd_buffer->state.dynamic.line_width * 8;
955
956 radeon_set_context_reg(cmd_buffer->cs, R_028A08_PA_SU_LINE_CNTL,
957 S_028A08_WIDTH(CLAMP(width, 0, 0xFFF)));
958 }
959
960 static void
961 radv_emit_blend_constants(struct radv_cmd_buffer *cmd_buffer)
962 {
963 struct radv_dynamic_state *d = &cmd_buffer->state.dynamic;
964
965 radeon_set_context_reg_seq(cmd_buffer->cs, R_028414_CB_BLEND_RED, 4);
966 radeon_emit_array(cmd_buffer->cs, (uint32_t *)d->blend_constants, 4);
967 }
968
969 static void
970 radv_emit_stencil(struct radv_cmd_buffer *cmd_buffer)
971 {
972 struct radv_dynamic_state *d = &cmd_buffer->state.dynamic;
973
974 radeon_set_context_reg_seq(cmd_buffer->cs,
975 R_028430_DB_STENCILREFMASK, 2);
976 radeon_emit(cmd_buffer->cs,
977 S_028430_STENCILTESTVAL(d->stencil_reference.front) |
978 S_028430_STENCILMASK(d->stencil_compare_mask.front) |
979 S_028430_STENCILWRITEMASK(d->stencil_write_mask.front) |
980 S_028430_STENCILOPVAL(1));
981 radeon_emit(cmd_buffer->cs,
982 S_028434_STENCILTESTVAL_BF(d->stencil_reference.back) |
983 S_028434_STENCILMASK_BF(d->stencil_compare_mask.back) |
984 S_028434_STENCILWRITEMASK_BF(d->stencil_write_mask.back) |
985 S_028434_STENCILOPVAL_BF(1));
986 }
987
988 static void
989 radv_emit_depth_bounds(struct radv_cmd_buffer *cmd_buffer)
990 {
991 struct radv_dynamic_state *d = &cmd_buffer->state.dynamic;
992
993 radeon_set_context_reg(cmd_buffer->cs, R_028020_DB_DEPTH_BOUNDS_MIN,
994 fui(d->depth_bounds.min));
995 radeon_set_context_reg(cmd_buffer->cs, R_028024_DB_DEPTH_BOUNDS_MAX,
996 fui(d->depth_bounds.max));
997 }
998
999 static void
1000 radv_emit_depth_bias(struct radv_cmd_buffer *cmd_buffer)
1001 {
1002 struct radv_dynamic_state *d = &cmd_buffer->state.dynamic;
1003 unsigned slope = fui(d->depth_bias.slope * 16.0f);
1004 unsigned bias = fui(d->depth_bias.bias * cmd_buffer->state.offset_scale);
1005
1006
1007 radeon_set_context_reg_seq(cmd_buffer->cs,
1008 R_028B7C_PA_SU_POLY_OFFSET_CLAMP, 5);
1009 radeon_emit(cmd_buffer->cs, fui(d->depth_bias.clamp)); /* CLAMP */
1010 radeon_emit(cmd_buffer->cs, slope); /* FRONT SCALE */
1011 radeon_emit(cmd_buffer->cs, bias); /* FRONT OFFSET */
1012 radeon_emit(cmd_buffer->cs, slope); /* BACK SCALE */
1013 radeon_emit(cmd_buffer->cs, bias); /* BACK OFFSET */
1014 }
1015
1016 static void
1017 radv_emit_fb_color_state(struct radv_cmd_buffer *cmd_buffer,
1018 int index,
1019 struct radv_attachment_info *att,
1020 struct radv_image *image,
1021 VkImageLayout layout)
1022 {
1023 bool is_vi = cmd_buffer->device->physical_device->rad_info.chip_class >= VI;
1024 struct radv_color_buffer_info *cb = &att->cb;
1025 uint32_t cb_color_info = cb->cb_color_info;
1026
1027 if (!radv_layout_dcc_compressed(image, layout,
1028 radv_image_queue_family_mask(image,
1029 cmd_buffer->queue_family_index,
1030 cmd_buffer->queue_family_index))) {
1031 cb_color_info &= C_028C70_DCC_ENABLE;
1032 }
1033
1034 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
1035 radeon_set_context_reg_seq(cmd_buffer->cs, R_028C60_CB_COLOR0_BASE + index * 0x3c, 11);
1036 radeon_emit(cmd_buffer->cs, cb->cb_color_base);
1037 radeon_emit(cmd_buffer->cs, S_028C64_BASE_256B(cb->cb_color_base >> 32));
1038 radeon_emit(cmd_buffer->cs, cb->cb_color_attrib2);
1039 radeon_emit(cmd_buffer->cs, cb->cb_color_view);
1040 radeon_emit(cmd_buffer->cs, cb_color_info);
1041 radeon_emit(cmd_buffer->cs, cb->cb_color_attrib);
1042 radeon_emit(cmd_buffer->cs, cb->cb_dcc_control);
1043 radeon_emit(cmd_buffer->cs, cb->cb_color_cmask);
1044 radeon_emit(cmd_buffer->cs, S_028C80_BASE_256B(cb->cb_color_cmask >> 32));
1045 radeon_emit(cmd_buffer->cs, cb->cb_color_fmask);
1046 radeon_emit(cmd_buffer->cs, S_028C88_BASE_256B(cb->cb_color_fmask >> 32));
1047
1048 radeon_set_context_reg_seq(cmd_buffer->cs, R_028C94_CB_COLOR0_DCC_BASE + index * 0x3c, 2);
1049 radeon_emit(cmd_buffer->cs, cb->cb_dcc_base);
1050 radeon_emit(cmd_buffer->cs, S_028C98_BASE_256B(cb->cb_dcc_base >> 32));
1051
1052 radeon_set_context_reg(cmd_buffer->cs, R_0287A0_CB_MRT0_EPITCH + index * 4,
1053 S_0287A0_EPITCH(att->attachment->image->surface.u.gfx9.surf.epitch));
1054 } else {
1055 radeon_set_context_reg_seq(cmd_buffer->cs, R_028C60_CB_COLOR0_BASE + index * 0x3c, 11);
1056 radeon_emit(cmd_buffer->cs, cb->cb_color_base);
1057 radeon_emit(cmd_buffer->cs, cb->cb_color_pitch);
1058 radeon_emit(cmd_buffer->cs, cb->cb_color_slice);
1059 radeon_emit(cmd_buffer->cs, cb->cb_color_view);
1060 radeon_emit(cmd_buffer->cs, cb_color_info);
1061 radeon_emit(cmd_buffer->cs, cb->cb_color_attrib);
1062 radeon_emit(cmd_buffer->cs, cb->cb_dcc_control);
1063 radeon_emit(cmd_buffer->cs, cb->cb_color_cmask);
1064 radeon_emit(cmd_buffer->cs, cb->cb_color_cmask_slice);
1065 radeon_emit(cmd_buffer->cs, cb->cb_color_fmask);
1066 radeon_emit(cmd_buffer->cs, cb->cb_color_fmask_slice);
1067
1068 if (is_vi) { /* DCC BASE */
1069 radeon_set_context_reg(cmd_buffer->cs, R_028C94_CB_COLOR0_DCC_BASE + index * 0x3c, cb->cb_dcc_base);
1070 }
1071 }
1072
1073 if (radv_image_has_dcc(image)) {
1074 /* Drawing with DCC enabled also compresses colorbuffers. */
1075 radv_update_dcc_metadata(cmd_buffer, image, true);
1076 }
1077 }
1078
1079 static void
1080 radv_update_zrange_precision(struct radv_cmd_buffer *cmd_buffer,
1081 struct radv_ds_buffer_info *ds,
1082 struct radv_image *image, VkImageLayout layout,
1083 bool requires_cond_exec)
1084 {
1085 uint32_t db_z_info = ds->db_z_info;
1086 uint32_t db_z_info_reg;
1087
1088 if (!radv_image_is_tc_compat_htile(image))
1089 return;
1090
1091 if (!radv_layout_has_htile(image, layout,
1092 radv_image_queue_family_mask(image,
1093 cmd_buffer->queue_family_index,
1094 cmd_buffer->queue_family_index))) {
1095 db_z_info &= C_028040_TILE_SURFACE_ENABLE;
1096 }
1097
1098 db_z_info &= C_028040_ZRANGE_PRECISION;
1099
1100 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
1101 db_z_info_reg = R_028038_DB_Z_INFO;
1102 } else {
1103 db_z_info_reg = R_028040_DB_Z_INFO;
1104 }
1105
1106 /* When we don't know the last fast clear value we need to emit a
1107 * conditional packet that will eventually skip the following
1108 * SET_CONTEXT_REG packet.
1109 */
1110 if (requires_cond_exec) {
1111 uint64_t va = radv_buffer_get_va(image->bo);
1112 va += image->offset + image->tc_compat_zrange_offset;
1113
1114 radeon_emit(cmd_buffer->cs, PKT3(PKT3_COND_EXEC, 3, 0));
1115 radeon_emit(cmd_buffer->cs, va);
1116 radeon_emit(cmd_buffer->cs, va >> 32);
1117 radeon_emit(cmd_buffer->cs, 0);
1118 radeon_emit(cmd_buffer->cs, 3); /* SET_CONTEXT_REG size */
1119 }
1120
1121 radeon_set_context_reg(cmd_buffer->cs, db_z_info_reg, db_z_info);
1122 }
1123
1124 static void
1125 radv_emit_fb_ds_state(struct radv_cmd_buffer *cmd_buffer,
1126 struct radv_ds_buffer_info *ds,
1127 struct radv_image *image,
1128 VkImageLayout layout)
1129 {
1130 uint32_t db_z_info = ds->db_z_info;
1131 uint32_t db_stencil_info = ds->db_stencil_info;
1132
1133 if (!radv_layout_has_htile(image, layout,
1134 radv_image_queue_family_mask(image,
1135 cmd_buffer->queue_family_index,
1136 cmd_buffer->queue_family_index))) {
1137 db_z_info &= C_028040_TILE_SURFACE_ENABLE;
1138 db_stencil_info |= S_028044_TILE_STENCIL_DISABLE(1);
1139 }
1140
1141 radeon_set_context_reg(cmd_buffer->cs, R_028008_DB_DEPTH_VIEW, ds->db_depth_view);
1142 radeon_set_context_reg(cmd_buffer->cs, R_028ABC_DB_HTILE_SURFACE, ds->db_htile_surface);
1143
1144
1145 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
1146 radeon_set_context_reg_seq(cmd_buffer->cs, R_028014_DB_HTILE_DATA_BASE, 3);
1147 radeon_emit(cmd_buffer->cs, ds->db_htile_data_base);
1148 radeon_emit(cmd_buffer->cs, S_028018_BASE_HI(ds->db_htile_data_base >> 32));
1149 radeon_emit(cmd_buffer->cs, ds->db_depth_size);
1150
1151 radeon_set_context_reg_seq(cmd_buffer->cs, R_028038_DB_Z_INFO, 10);
1152 radeon_emit(cmd_buffer->cs, db_z_info); /* DB_Z_INFO */
1153 radeon_emit(cmd_buffer->cs, db_stencil_info); /* DB_STENCIL_INFO */
1154 radeon_emit(cmd_buffer->cs, ds->db_z_read_base); /* DB_Z_READ_BASE */
1155 radeon_emit(cmd_buffer->cs, S_028044_BASE_HI(ds->db_z_read_base >> 32)); /* DB_Z_READ_BASE_HI */
1156 radeon_emit(cmd_buffer->cs, ds->db_stencil_read_base); /* DB_STENCIL_READ_BASE */
1157 radeon_emit(cmd_buffer->cs, S_02804C_BASE_HI(ds->db_stencil_read_base >> 32)); /* DB_STENCIL_READ_BASE_HI */
1158 radeon_emit(cmd_buffer->cs, ds->db_z_write_base); /* DB_Z_WRITE_BASE */
1159 radeon_emit(cmd_buffer->cs, S_028054_BASE_HI(ds->db_z_write_base >> 32)); /* DB_Z_WRITE_BASE_HI */
1160 radeon_emit(cmd_buffer->cs, ds->db_stencil_write_base); /* DB_STENCIL_WRITE_BASE */
1161 radeon_emit(cmd_buffer->cs, S_02805C_BASE_HI(ds->db_stencil_write_base >> 32)); /* DB_STENCIL_WRITE_BASE_HI */
1162
1163 radeon_set_context_reg_seq(cmd_buffer->cs, R_028068_DB_Z_INFO2, 2);
1164 radeon_emit(cmd_buffer->cs, ds->db_z_info2);
1165 radeon_emit(cmd_buffer->cs, ds->db_stencil_info2);
1166 } else {
1167 radeon_set_context_reg(cmd_buffer->cs, R_028014_DB_HTILE_DATA_BASE, ds->db_htile_data_base);
1168
1169 radeon_set_context_reg_seq(cmd_buffer->cs, R_02803C_DB_DEPTH_INFO, 9);
1170 radeon_emit(cmd_buffer->cs, ds->db_depth_info); /* R_02803C_DB_DEPTH_INFO */
1171 radeon_emit(cmd_buffer->cs, db_z_info); /* R_028040_DB_Z_INFO */
1172 radeon_emit(cmd_buffer->cs, db_stencil_info); /* R_028044_DB_STENCIL_INFO */
1173 radeon_emit(cmd_buffer->cs, ds->db_z_read_base); /* R_028048_DB_Z_READ_BASE */
1174 radeon_emit(cmd_buffer->cs, ds->db_stencil_read_base); /* R_02804C_DB_STENCIL_READ_BASE */
1175 radeon_emit(cmd_buffer->cs, ds->db_z_write_base); /* R_028050_DB_Z_WRITE_BASE */
1176 radeon_emit(cmd_buffer->cs, ds->db_stencil_write_base); /* R_028054_DB_STENCIL_WRITE_BASE */
1177 radeon_emit(cmd_buffer->cs, ds->db_depth_size); /* R_028058_DB_DEPTH_SIZE */
1178 radeon_emit(cmd_buffer->cs, ds->db_depth_slice); /* R_02805C_DB_DEPTH_SLICE */
1179
1180 }
1181
1182 /* Update the ZRANGE_PRECISION value for the TC-compat bug. */
1183 radv_update_zrange_precision(cmd_buffer, ds, image, layout, true);
1184
1185 radeon_set_context_reg(cmd_buffer->cs, R_028B78_PA_SU_POLY_OFFSET_DB_FMT_CNTL,
1186 ds->pa_su_poly_offset_db_fmt_cntl);
1187 }
1188
1189 /**
1190 * Update the fast clear depth/stencil values if the image is bound as a
1191 * depth/stencil buffer.
1192 */
1193 static void
1194 radv_update_bound_fast_clear_ds(struct radv_cmd_buffer *cmd_buffer,
1195 struct radv_image *image,
1196 VkClearDepthStencilValue ds_clear_value,
1197 VkImageAspectFlags aspects)
1198 {
1199 struct radv_framebuffer *framebuffer = cmd_buffer->state.framebuffer;
1200 const struct radv_subpass *subpass = cmd_buffer->state.subpass;
1201 struct radeon_cmdbuf *cs = cmd_buffer->cs;
1202 struct radv_attachment_info *att;
1203 uint32_t att_idx;
1204
1205 if (!framebuffer || !subpass)
1206 return;
1207
1208 att_idx = subpass->depth_stencil_attachment.attachment;
1209 if (att_idx == VK_ATTACHMENT_UNUSED)
1210 return;
1211
1212 att = &framebuffer->attachments[att_idx];
1213 if (att->attachment->image != image)
1214 return;
1215
1216 radeon_set_context_reg_seq(cs, R_028028_DB_STENCIL_CLEAR, 2);
1217 radeon_emit(cs, ds_clear_value.stencil);
1218 radeon_emit(cs, fui(ds_clear_value.depth));
1219
1220 /* Update the ZRANGE_PRECISION value for the TC-compat bug. This is
1221 * only needed when clearing Z to 0.0.
1222 */
1223 if ((aspects & VK_IMAGE_ASPECT_DEPTH_BIT) &&
1224 ds_clear_value.depth == 0.0) {
1225 VkImageLayout layout = subpass->depth_stencil_attachment.layout;
1226
1227 radv_update_zrange_precision(cmd_buffer, &att->ds, image,
1228 layout, false);
1229 }
1230
1231 cmd_buffer->state.context_roll_without_scissor_emitted = true;
1232 }
1233
1234 /**
1235 * Set the clear depth/stencil values to the image's metadata.
1236 */
1237 static void
1238 radv_set_ds_clear_metadata(struct radv_cmd_buffer *cmd_buffer,
1239 struct radv_image *image,
1240 VkClearDepthStencilValue ds_clear_value,
1241 VkImageAspectFlags aspects)
1242 {
1243 struct radeon_cmdbuf *cs = cmd_buffer->cs;
1244 uint64_t va = radv_buffer_get_va(image->bo);
1245 unsigned reg_offset = 0, reg_count = 0;
1246
1247 va += image->offset + image->clear_value_offset;
1248
1249 if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT) {
1250 ++reg_count;
1251 } else {
1252 ++reg_offset;
1253 va += 4;
1254 }
1255 if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT)
1256 ++reg_count;
1257
1258 radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 2 + reg_count, 0));
1259 radeon_emit(cs, S_370_DST_SEL(V_370_MEM) |
1260 S_370_WR_CONFIRM(1) |
1261 S_370_ENGINE_SEL(V_370_PFP));
1262 radeon_emit(cs, va);
1263 radeon_emit(cs, va >> 32);
1264 if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT)
1265 radeon_emit(cs, ds_clear_value.stencil);
1266 if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT)
1267 radeon_emit(cs, fui(ds_clear_value.depth));
1268 }
1269
1270 /**
1271 * Update the TC-compat metadata value for this image.
1272 */
1273 static void
1274 radv_set_tc_compat_zrange_metadata(struct radv_cmd_buffer *cmd_buffer,
1275 struct radv_image *image,
1276 uint32_t value)
1277 {
1278 struct radeon_cmdbuf *cs = cmd_buffer->cs;
1279 uint64_t va = radv_buffer_get_va(image->bo);
1280 va += image->offset + image->tc_compat_zrange_offset;
1281
1282 radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, 0));
1283 radeon_emit(cs, S_370_DST_SEL(V_370_MEM) |
1284 S_370_WR_CONFIRM(1) |
1285 S_370_ENGINE_SEL(V_370_PFP));
1286 radeon_emit(cs, va);
1287 radeon_emit(cs, va >> 32);
1288 radeon_emit(cs, value);
1289 }
1290
1291 static void
1292 radv_update_tc_compat_zrange_metadata(struct radv_cmd_buffer *cmd_buffer,
1293 struct radv_image *image,
1294 VkClearDepthStencilValue ds_clear_value)
1295 {
1296 uint64_t va = radv_buffer_get_va(image->bo);
1297 va += image->offset + image->tc_compat_zrange_offset;
1298 uint32_t cond_val;
1299
1300 /* Conditionally set DB_Z_INFO.ZRANGE_PRECISION to 0 when the last
1301 * depth clear value is 0.0f.
1302 */
1303 cond_val = ds_clear_value.depth == 0.0f ? UINT_MAX : 0;
1304
1305 radv_set_tc_compat_zrange_metadata(cmd_buffer, image, cond_val);
1306 }
1307
1308 /**
1309 * Update the clear depth/stencil values for this image.
1310 */
1311 void
1312 radv_update_ds_clear_metadata(struct radv_cmd_buffer *cmd_buffer,
1313 struct radv_image *image,
1314 VkClearDepthStencilValue ds_clear_value,
1315 VkImageAspectFlags aspects)
1316 {
1317 assert(radv_image_has_htile(image));
1318
1319 radv_set_ds_clear_metadata(cmd_buffer, image, ds_clear_value, aspects);
1320
1321 if (radv_image_is_tc_compat_htile(image) &&
1322 (aspects & VK_IMAGE_ASPECT_DEPTH_BIT)) {
1323 radv_update_tc_compat_zrange_metadata(cmd_buffer, image,
1324 ds_clear_value);
1325 }
1326
1327 radv_update_bound_fast_clear_ds(cmd_buffer, image, ds_clear_value,
1328 aspects);
1329 }
1330
1331 /**
1332 * Load the clear depth/stencil values from the image's metadata.
1333 */
1334 static void
1335 radv_load_ds_clear_metadata(struct radv_cmd_buffer *cmd_buffer,
1336 struct radv_image *image)
1337 {
1338 struct radeon_cmdbuf *cs = cmd_buffer->cs;
1339 VkImageAspectFlags aspects = vk_format_aspects(image->vk_format);
1340 uint64_t va = radv_buffer_get_va(image->bo);
1341 unsigned reg_offset = 0, reg_count = 0;
1342
1343 va += image->offset + image->clear_value_offset;
1344
1345 if (!radv_image_has_htile(image))
1346 return;
1347
1348 if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT) {
1349 ++reg_count;
1350 } else {
1351 ++reg_offset;
1352 va += 4;
1353 }
1354 if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT)
1355 ++reg_count;
1356
1357 uint32_t reg = R_028028_DB_STENCIL_CLEAR + 4 * reg_offset;
1358
1359 if (cmd_buffer->device->physical_device->rad_info.chip_class >= VI) {
1360 radeon_emit(cs, PKT3(PKT3_LOAD_CONTEXT_REG, 3, 0));
1361 radeon_emit(cs, va);
1362 radeon_emit(cs, va >> 32);
1363 radeon_emit(cs, (reg - SI_CONTEXT_REG_OFFSET) >> 2);
1364 radeon_emit(cs, reg_count);
1365 } else {
1366 radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
1367 radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_SRC_MEM) |
1368 COPY_DATA_DST_SEL(COPY_DATA_REG) |
1369 (reg_count == 2 ? COPY_DATA_COUNT_SEL : 0));
1370 radeon_emit(cs, va);
1371 radeon_emit(cs, va >> 32);
1372 radeon_emit(cs, reg >> 2);
1373 radeon_emit(cs, 0);
1374
1375 radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
1376 radeon_emit(cs, 0);
1377 }
1378 }
1379
1380 /*
1381 * With DCC some colors don't require CMASK elimination before being
1382 * used as a texture. This sets a predicate value to determine if the
1383 * cmask eliminate is required.
1384 */
1385 void
1386 radv_update_fce_metadata(struct radv_cmd_buffer *cmd_buffer,
1387 struct radv_image *image, bool value)
1388 {
1389 uint64_t pred_val = value;
1390 uint64_t va = radv_buffer_get_va(image->bo);
1391 va += image->offset + image->fce_pred_offset;
1392
1393 assert(radv_image_has_dcc(image));
1394
1395 radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 4, 0));
1396 radeon_emit(cmd_buffer->cs, S_370_DST_SEL(V_370_MEM) |
1397 S_370_WR_CONFIRM(1) |
1398 S_370_ENGINE_SEL(V_370_PFP));
1399 radeon_emit(cmd_buffer->cs, va);
1400 radeon_emit(cmd_buffer->cs, va >> 32);
1401 radeon_emit(cmd_buffer->cs, pred_val);
1402 radeon_emit(cmd_buffer->cs, pred_val >> 32);
1403 }
1404
1405 /**
1406 * Update the DCC predicate to reflect the compression state.
1407 */
1408 void
1409 radv_update_dcc_metadata(struct radv_cmd_buffer *cmd_buffer,
1410 struct radv_image *image, bool value)
1411 {
1412 uint64_t pred_val = value;
1413 uint64_t va = radv_buffer_get_va(image->bo);
1414 va += image->offset + image->dcc_pred_offset;
1415
1416 assert(radv_image_has_dcc(image));
1417
1418 radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 4, 0));
1419 radeon_emit(cmd_buffer->cs, S_370_DST_SEL(V_370_MEM) |
1420 S_370_WR_CONFIRM(1) |
1421 S_370_ENGINE_SEL(V_370_PFP));
1422 radeon_emit(cmd_buffer->cs, va);
1423 radeon_emit(cmd_buffer->cs, va >> 32);
1424 radeon_emit(cmd_buffer->cs, pred_val);
1425 radeon_emit(cmd_buffer->cs, pred_val >> 32);
1426 }
1427
1428 /**
1429 * Update the fast clear color values if the image is bound as a color buffer.
1430 */
1431 static void
1432 radv_update_bound_fast_clear_color(struct radv_cmd_buffer *cmd_buffer,
1433 struct radv_image *image,
1434 int cb_idx,
1435 uint32_t color_values[2])
1436 {
1437 struct radv_framebuffer *framebuffer = cmd_buffer->state.framebuffer;
1438 const struct radv_subpass *subpass = cmd_buffer->state.subpass;
1439 struct radeon_cmdbuf *cs = cmd_buffer->cs;
1440 struct radv_attachment_info *att;
1441 uint32_t att_idx;
1442
1443 if (!framebuffer || !subpass)
1444 return;
1445
1446 att_idx = subpass->color_attachments[cb_idx].attachment;
1447 if (att_idx == VK_ATTACHMENT_UNUSED)
1448 return;
1449
1450 att = &framebuffer->attachments[att_idx];
1451 if (att->attachment->image != image)
1452 return;
1453
1454 radeon_set_context_reg_seq(cs, R_028C8C_CB_COLOR0_CLEAR_WORD0 + cb_idx * 0x3c, 2);
1455 radeon_emit(cs, color_values[0]);
1456 radeon_emit(cs, color_values[1]);
1457
1458 cmd_buffer->state.context_roll_without_scissor_emitted = true;
1459 }
1460
1461 /**
1462 * Set the clear color values to the image's metadata.
1463 */
1464 static void
1465 radv_set_color_clear_metadata(struct radv_cmd_buffer *cmd_buffer,
1466 struct radv_image *image,
1467 uint32_t color_values[2])
1468 {
1469 struct radeon_cmdbuf *cs = cmd_buffer->cs;
1470 uint64_t va = radv_buffer_get_va(image->bo);
1471
1472 va += image->offset + image->clear_value_offset;
1473
1474 assert(radv_image_has_cmask(image) || radv_image_has_dcc(image));
1475
1476 radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 4, 0));
1477 radeon_emit(cs, S_370_DST_SEL(V_370_MEM) |
1478 S_370_WR_CONFIRM(1) |
1479 S_370_ENGINE_SEL(V_370_PFP));
1480 radeon_emit(cs, va);
1481 radeon_emit(cs, va >> 32);
1482 radeon_emit(cs, color_values[0]);
1483 radeon_emit(cs, color_values[1]);
1484 }
1485
1486 /**
1487 * Update the clear color values for this image.
1488 */
1489 void
1490 radv_update_color_clear_metadata(struct radv_cmd_buffer *cmd_buffer,
1491 struct radv_image *image,
1492 int cb_idx,
1493 uint32_t color_values[2])
1494 {
1495 assert(radv_image_has_cmask(image) || radv_image_has_dcc(image));
1496
1497 radv_set_color_clear_metadata(cmd_buffer, image, color_values);
1498
1499 radv_update_bound_fast_clear_color(cmd_buffer, image, cb_idx,
1500 color_values);
1501 }
1502
1503 /**
1504 * Load the clear color values from the image's metadata.
1505 */
1506 static void
1507 radv_load_color_clear_metadata(struct radv_cmd_buffer *cmd_buffer,
1508 struct radv_image *image,
1509 int cb_idx)
1510 {
1511 struct radeon_cmdbuf *cs = cmd_buffer->cs;
1512 uint64_t va = radv_buffer_get_va(image->bo);
1513
1514 va += image->offset + image->clear_value_offset;
1515
1516 if (!radv_image_has_cmask(image) && !radv_image_has_dcc(image))
1517 return;
1518
1519 uint32_t reg = R_028C8C_CB_COLOR0_CLEAR_WORD0 + cb_idx * 0x3c;
1520
1521 if (cmd_buffer->device->physical_device->rad_info.chip_class >= VI) {
1522 radeon_emit(cs, PKT3(PKT3_LOAD_CONTEXT_REG, 3, cmd_buffer->state.predicating));
1523 radeon_emit(cs, va);
1524 radeon_emit(cs, va >> 32);
1525 radeon_emit(cs, (reg - SI_CONTEXT_REG_OFFSET) >> 2);
1526 radeon_emit(cs, 2);
1527 } else {
1528 /* TODO: Figure out how to use LOAD_CONTEXT_REG on SI/CIK. */
1529 radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, cmd_buffer->state.predicating));
1530 radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_SRC_MEM) |
1531 COPY_DATA_DST_SEL(COPY_DATA_REG) |
1532 COPY_DATA_COUNT_SEL);
1533 radeon_emit(cs, va);
1534 radeon_emit(cs, va >> 32);
1535 radeon_emit(cs, reg >> 2);
1536 radeon_emit(cs, 0);
1537
1538 radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, cmd_buffer->state.predicating));
1539 radeon_emit(cs, 0);
1540 }
1541 }
1542
1543 static void
1544 radv_emit_framebuffer_state(struct radv_cmd_buffer *cmd_buffer)
1545 {
1546 int i;
1547 struct radv_framebuffer *framebuffer = cmd_buffer->state.framebuffer;
1548 const struct radv_subpass *subpass = cmd_buffer->state.subpass;
1549 unsigned num_bpp64_colorbufs = 0;
1550
1551 /* this may happen for inherited secondary recording */
1552 if (!framebuffer)
1553 return;
1554
1555 for (i = 0; i < 8; ++i) {
1556 if (i >= subpass->color_count || subpass->color_attachments[i].attachment == VK_ATTACHMENT_UNUSED) {
1557 radeon_set_context_reg(cmd_buffer->cs, R_028C70_CB_COLOR0_INFO + i * 0x3C,
1558 S_028C70_FORMAT(V_028C70_COLOR_INVALID));
1559 continue;
1560 }
1561
1562 int idx = subpass->color_attachments[i].attachment;
1563 struct radv_attachment_info *att = &framebuffer->attachments[idx];
1564 struct radv_image *image = att->attachment->image;
1565 VkImageLayout layout = subpass->color_attachments[i].layout;
1566
1567 radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, att->attachment->bo);
1568
1569 assert(att->attachment->aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT);
1570 radv_emit_fb_color_state(cmd_buffer, i, att, image, layout);
1571
1572 radv_load_color_clear_metadata(cmd_buffer, image, i);
1573
1574 if (image->surface.bpe >= 8)
1575 num_bpp64_colorbufs++;
1576 }
1577
1578 if(subpass->depth_stencil_attachment.attachment != VK_ATTACHMENT_UNUSED) {
1579 int idx = subpass->depth_stencil_attachment.attachment;
1580 VkImageLayout layout = subpass->depth_stencil_attachment.layout;
1581 struct radv_attachment_info *att = &framebuffer->attachments[idx];
1582 struct radv_image *image = att->attachment->image;
1583 radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, att->attachment->bo);
1584 MAYBE_UNUSED uint32_t queue_mask = radv_image_queue_family_mask(image,
1585 cmd_buffer->queue_family_index,
1586 cmd_buffer->queue_family_index);
1587 /* We currently don't support writing decompressed HTILE */
1588 assert(radv_layout_has_htile(image, layout, queue_mask) ==
1589 radv_layout_is_htile_compressed(image, layout, queue_mask));
1590
1591 radv_emit_fb_ds_state(cmd_buffer, &att->ds, image, layout);
1592
1593 if (att->ds.offset_scale != cmd_buffer->state.offset_scale) {
1594 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS;
1595 cmd_buffer->state.offset_scale = att->ds.offset_scale;
1596 }
1597 radv_load_ds_clear_metadata(cmd_buffer, image);
1598 } else {
1599 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9)
1600 radeon_set_context_reg_seq(cmd_buffer->cs, R_028038_DB_Z_INFO, 2);
1601 else
1602 radeon_set_context_reg_seq(cmd_buffer->cs, R_028040_DB_Z_INFO, 2);
1603
1604 radeon_emit(cmd_buffer->cs, S_028040_FORMAT(V_028040_Z_INVALID)); /* DB_Z_INFO */
1605 radeon_emit(cmd_buffer->cs, S_028044_FORMAT(V_028044_STENCIL_INVALID)); /* DB_STENCIL_INFO */
1606 }
1607 radeon_set_context_reg(cmd_buffer->cs, R_028208_PA_SC_WINDOW_SCISSOR_BR,
1608 S_028208_BR_X(framebuffer->width) |
1609 S_028208_BR_Y(framebuffer->height));
1610
1611 if (cmd_buffer->device->physical_device->rad_info.chip_class >= VI) {
1612 uint8_t watermark = 4; /* Default value for VI. */
1613
1614 /* For optimal DCC performance. */
1615 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
1616 if (num_bpp64_colorbufs >= 5) {
1617 watermark = 8;
1618 } else {
1619 watermark = 6;
1620 }
1621 }
1622
1623 radeon_set_context_reg(cmd_buffer->cs, R_028424_CB_DCC_CONTROL,
1624 S_028424_OVERWRITE_COMBINER_MRT_SHARING_DISABLE(1) |
1625 S_028424_OVERWRITE_COMBINER_WATERMARK(watermark));
1626 }
1627
1628 if (cmd_buffer->device->dfsm_allowed) {
1629 radeon_emit(cmd_buffer->cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1630 radeon_emit(cmd_buffer->cs, EVENT_TYPE(V_028A90_BREAK_BATCH) | EVENT_INDEX(0));
1631 }
1632
1633 cmd_buffer->state.dirty &= ~RADV_CMD_DIRTY_FRAMEBUFFER;
1634 }
1635
1636 static void
1637 radv_emit_index_buffer(struct radv_cmd_buffer *cmd_buffer)
1638 {
1639 struct radeon_cmdbuf *cs = cmd_buffer->cs;
1640 struct radv_cmd_state *state = &cmd_buffer->state;
1641
1642 if (state->index_type != state->last_index_type) {
1643 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
1644 radeon_set_uconfig_reg_idx(cs, R_03090C_VGT_INDEX_TYPE,
1645 2, state->index_type);
1646 } else {
1647 radeon_emit(cs, PKT3(PKT3_INDEX_TYPE, 0, 0));
1648 radeon_emit(cs, state->index_type);
1649 }
1650
1651 state->last_index_type = state->index_type;
1652 }
1653
1654 radeon_emit(cs, PKT3(PKT3_INDEX_BASE, 1, 0));
1655 radeon_emit(cs, state->index_va);
1656 radeon_emit(cs, state->index_va >> 32);
1657
1658 radeon_emit(cs, PKT3(PKT3_INDEX_BUFFER_SIZE, 0, 0));
1659 radeon_emit(cs, state->max_index_count);
1660
1661 cmd_buffer->state.dirty &= ~RADV_CMD_DIRTY_INDEX_BUFFER;
1662 }
1663
1664 void radv_set_db_count_control(struct radv_cmd_buffer *cmd_buffer)
1665 {
1666 bool has_perfect_queries = cmd_buffer->state.perfect_occlusion_queries_enabled;
1667 struct radv_pipeline *pipeline = cmd_buffer->state.pipeline;
1668 uint32_t pa_sc_mode_cntl_1 =
1669 pipeline ? pipeline->graphics.ms.pa_sc_mode_cntl_1 : 0;
1670 uint32_t db_count_control;
1671
1672 if(!cmd_buffer->state.active_occlusion_queries) {
1673 if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) {
1674 if (G_028A4C_OUT_OF_ORDER_PRIMITIVE_ENABLE(pa_sc_mode_cntl_1) &&
1675 pipeline->graphics.disable_out_of_order_rast_for_occlusion &&
1676 has_perfect_queries) {
1677 /* Re-enable out-of-order rasterization if the
1678 * bound pipeline supports it and if it's has
1679 * been disabled before starting any perfect
1680 * occlusion queries.
1681 */
1682 radeon_set_context_reg(cmd_buffer->cs,
1683 R_028A4C_PA_SC_MODE_CNTL_1,
1684 pa_sc_mode_cntl_1);
1685 }
1686 }
1687 db_count_control = S_028004_ZPASS_INCREMENT_DISABLE(1);
1688 } else {
1689 const struct radv_subpass *subpass = cmd_buffer->state.subpass;
1690 uint32_t sample_rate = subpass ? util_logbase2(subpass->max_sample_count) : 0;
1691
1692 if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) {
1693 db_count_control =
1694 S_028004_PERFECT_ZPASS_COUNTS(has_perfect_queries) |
1695 S_028004_SAMPLE_RATE(sample_rate) |
1696 S_028004_ZPASS_ENABLE(1) |
1697 S_028004_SLICE_EVEN_ENABLE(1) |
1698 S_028004_SLICE_ODD_ENABLE(1);
1699
1700 if (G_028A4C_OUT_OF_ORDER_PRIMITIVE_ENABLE(pa_sc_mode_cntl_1) &&
1701 pipeline->graphics.disable_out_of_order_rast_for_occlusion &&
1702 has_perfect_queries) {
1703 /* If the bound pipeline has enabled
1704 * out-of-order rasterization, we should
1705 * disable it before starting any perfect
1706 * occlusion queries.
1707 */
1708 pa_sc_mode_cntl_1 &= C_028A4C_OUT_OF_ORDER_PRIMITIVE_ENABLE;
1709
1710 radeon_set_context_reg(cmd_buffer->cs,
1711 R_028A4C_PA_SC_MODE_CNTL_1,
1712 pa_sc_mode_cntl_1);
1713 }
1714 } else {
1715 db_count_control = S_028004_PERFECT_ZPASS_COUNTS(1) |
1716 S_028004_SAMPLE_RATE(sample_rate);
1717 }
1718 }
1719
1720 radeon_set_context_reg(cmd_buffer->cs, R_028004_DB_COUNT_CONTROL, db_count_control);
1721
1722 cmd_buffer->state.context_roll_without_scissor_emitted = true;
1723 }
1724
1725 static void
1726 radv_cmd_buffer_flush_dynamic_state(struct radv_cmd_buffer *cmd_buffer)
1727 {
1728 uint32_t states = cmd_buffer->state.dirty & cmd_buffer->state.emitted_pipeline->graphics.needed_dynamic_state;
1729
1730 if (states & (RADV_CMD_DIRTY_DYNAMIC_VIEWPORT))
1731 radv_emit_viewport(cmd_buffer);
1732
1733 if (states & (RADV_CMD_DIRTY_DYNAMIC_SCISSOR | RADV_CMD_DIRTY_DYNAMIC_VIEWPORT) &&
1734 !cmd_buffer->device->physical_device->has_scissor_bug)
1735 radv_emit_scissor(cmd_buffer);
1736
1737 if (states & RADV_CMD_DIRTY_DYNAMIC_LINE_WIDTH)
1738 radv_emit_line_width(cmd_buffer);
1739
1740 if (states & RADV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS)
1741 radv_emit_blend_constants(cmd_buffer);
1742
1743 if (states & (RADV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE |
1744 RADV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK |
1745 RADV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK))
1746 radv_emit_stencil(cmd_buffer);
1747
1748 if (states & RADV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS)
1749 radv_emit_depth_bounds(cmd_buffer);
1750
1751 if (states & RADV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS)
1752 radv_emit_depth_bias(cmd_buffer);
1753
1754 if (states & RADV_CMD_DIRTY_DYNAMIC_DISCARD_RECTANGLE)
1755 radv_emit_discard_rectangle(cmd_buffer);
1756
1757 cmd_buffer->state.dirty &= ~states;
1758 }
1759
1760 static void
1761 radv_flush_push_descriptors(struct radv_cmd_buffer *cmd_buffer,
1762 VkPipelineBindPoint bind_point)
1763 {
1764 struct radv_descriptor_state *descriptors_state =
1765 radv_get_descriptors_state(cmd_buffer, bind_point);
1766 struct radv_descriptor_set *set = &descriptors_state->push_set.set;
1767 unsigned bo_offset;
1768
1769 if (!radv_cmd_buffer_upload_data(cmd_buffer, set->size, 32,
1770 set->mapped_ptr,
1771 &bo_offset))
1772 return;
1773
1774 set->va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
1775 set->va += bo_offset;
1776 }
1777
1778 static void
1779 radv_flush_indirect_descriptor_sets(struct radv_cmd_buffer *cmd_buffer,
1780 VkPipelineBindPoint bind_point)
1781 {
1782 struct radv_descriptor_state *descriptors_state =
1783 radv_get_descriptors_state(cmd_buffer, bind_point);
1784 uint32_t size = MAX_SETS * 4;
1785 uint32_t offset;
1786 void *ptr;
1787
1788 if (!radv_cmd_buffer_upload_alloc(cmd_buffer, size,
1789 256, &offset, &ptr))
1790 return;
1791
1792 for (unsigned i = 0; i < MAX_SETS; i++) {
1793 uint32_t *uptr = ((uint32_t *)ptr) + i;
1794 uint64_t set_va = 0;
1795 struct radv_descriptor_set *set = descriptors_state->sets[i];
1796 if (descriptors_state->valid & (1u << i))
1797 set_va = set->va;
1798 uptr[0] = set_va & 0xffffffff;
1799 }
1800
1801 uint64_t va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
1802 va += offset;
1803
1804 if (cmd_buffer->state.pipeline) {
1805 if (cmd_buffer->state.pipeline->shaders[MESA_SHADER_VERTEX])
1806 radv_emit_userdata_address(cmd_buffer, cmd_buffer->state.pipeline, MESA_SHADER_VERTEX,
1807 AC_UD_INDIRECT_DESCRIPTOR_SETS, va);
1808
1809 if (cmd_buffer->state.pipeline->shaders[MESA_SHADER_FRAGMENT])
1810 radv_emit_userdata_address(cmd_buffer, cmd_buffer->state.pipeline, MESA_SHADER_FRAGMENT,
1811 AC_UD_INDIRECT_DESCRIPTOR_SETS, va);
1812
1813 if (radv_pipeline_has_gs(cmd_buffer->state.pipeline))
1814 radv_emit_userdata_address(cmd_buffer, cmd_buffer->state.pipeline, MESA_SHADER_GEOMETRY,
1815 AC_UD_INDIRECT_DESCRIPTOR_SETS, va);
1816
1817 if (radv_pipeline_has_tess(cmd_buffer->state.pipeline))
1818 radv_emit_userdata_address(cmd_buffer, cmd_buffer->state.pipeline, MESA_SHADER_TESS_CTRL,
1819 AC_UD_INDIRECT_DESCRIPTOR_SETS, va);
1820
1821 if (radv_pipeline_has_tess(cmd_buffer->state.pipeline))
1822 radv_emit_userdata_address(cmd_buffer, cmd_buffer->state.pipeline, MESA_SHADER_TESS_EVAL,
1823 AC_UD_INDIRECT_DESCRIPTOR_SETS, va);
1824 }
1825
1826 if (cmd_buffer->state.compute_pipeline)
1827 radv_emit_userdata_address(cmd_buffer, cmd_buffer->state.compute_pipeline, MESA_SHADER_COMPUTE,
1828 AC_UD_INDIRECT_DESCRIPTOR_SETS, va);
1829 }
1830
1831 static void
1832 radv_flush_descriptors(struct radv_cmd_buffer *cmd_buffer,
1833 VkShaderStageFlags stages)
1834 {
1835 VkPipelineBindPoint bind_point = stages & VK_SHADER_STAGE_COMPUTE_BIT ?
1836 VK_PIPELINE_BIND_POINT_COMPUTE :
1837 VK_PIPELINE_BIND_POINT_GRAPHICS;
1838 struct radv_descriptor_state *descriptors_state =
1839 radv_get_descriptors_state(cmd_buffer, bind_point);
1840 struct radv_cmd_state *state = &cmd_buffer->state;
1841 bool flush_indirect_descriptors;
1842
1843 if (!descriptors_state->dirty)
1844 return;
1845
1846 if (descriptors_state->push_dirty)
1847 radv_flush_push_descriptors(cmd_buffer, bind_point);
1848
1849 flush_indirect_descriptors =
1850 (bind_point == VK_PIPELINE_BIND_POINT_GRAPHICS &&
1851 state->pipeline && state->pipeline->need_indirect_descriptor_sets) ||
1852 (bind_point == VK_PIPELINE_BIND_POINT_COMPUTE &&
1853 state->compute_pipeline && state->compute_pipeline->need_indirect_descriptor_sets);
1854
1855 if (flush_indirect_descriptors)
1856 radv_flush_indirect_descriptor_sets(cmd_buffer, bind_point);
1857
1858 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
1859 cmd_buffer->cs,
1860 MAX_SETS * MESA_SHADER_STAGES * 4);
1861
1862 if (cmd_buffer->state.pipeline) {
1863 radv_foreach_stage(stage, stages) {
1864 if (!cmd_buffer->state.pipeline->shaders[stage])
1865 continue;
1866
1867 radv_emit_descriptor_pointers(cmd_buffer,
1868 cmd_buffer->state.pipeline,
1869 descriptors_state, stage);
1870 }
1871 }
1872
1873 if (cmd_buffer->state.compute_pipeline &&
1874 (stages & VK_SHADER_STAGE_COMPUTE_BIT)) {
1875 radv_emit_descriptor_pointers(cmd_buffer,
1876 cmd_buffer->state.compute_pipeline,
1877 descriptors_state,
1878 MESA_SHADER_COMPUTE);
1879 }
1880
1881 descriptors_state->dirty = 0;
1882 descriptors_state->push_dirty = false;
1883
1884 assert(cmd_buffer->cs->cdw <= cdw_max);
1885
1886 if (unlikely(cmd_buffer->device->trace_bo))
1887 radv_save_descriptors(cmd_buffer, bind_point);
1888 }
1889
1890 static void
1891 radv_flush_constants(struct radv_cmd_buffer *cmd_buffer,
1892 VkShaderStageFlags stages)
1893 {
1894 struct radv_pipeline *pipeline = stages & VK_SHADER_STAGE_COMPUTE_BIT
1895 ? cmd_buffer->state.compute_pipeline
1896 : cmd_buffer->state.pipeline;
1897 VkPipelineBindPoint bind_point = stages & VK_SHADER_STAGE_COMPUTE_BIT ?
1898 VK_PIPELINE_BIND_POINT_COMPUTE :
1899 VK_PIPELINE_BIND_POINT_GRAPHICS;
1900 struct radv_descriptor_state *descriptors_state =
1901 radv_get_descriptors_state(cmd_buffer, bind_point);
1902 struct radv_pipeline_layout *layout = pipeline->layout;
1903 struct radv_shader_variant *shader, *prev_shader;
1904 unsigned offset;
1905 void *ptr;
1906 uint64_t va;
1907
1908 stages &= cmd_buffer->push_constant_stages;
1909 if (!stages ||
1910 (!layout->push_constant_size && !layout->dynamic_offset_count))
1911 return;
1912
1913 if (!radv_cmd_buffer_upload_alloc(cmd_buffer, layout->push_constant_size +
1914 16 * layout->dynamic_offset_count,
1915 256, &offset, &ptr))
1916 return;
1917
1918 memcpy(ptr, cmd_buffer->push_constants, layout->push_constant_size);
1919 memcpy((char*)ptr + layout->push_constant_size,
1920 descriptors_state->dynamic_buffers,
1921 16 * layout->dynamic_offset_count);
1922
1923 va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
1924 va += offset;
1925
1926 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
1927 cmd_buffer->cs, MESA_SHADER_STAGES * 4);
1928
1929 prev_shader = NULL;
1930 radv_foreach_stage(stage, stages) {
1931 shader = radv_get_shader(pipeline, stage);
1932
1933 /* Avoid redundantly emitting the address for merged stages. */
1934 if (shader && shader != prev_shader) {
1935 radv_emit_userdata_address(cmd_buffer, pipeline, stage,
1936 AC_UD_PUSH_CONSTANTS, va);
1937
1938 prev_shader = shader;
1939 }
1940 }
1941
1942 cmd_buffer->push_constant_stages &= ~stages;
1943 assert(cmd_buffer->cs->cdw <= cdw_max);
1944 }
1945
1946 static void
1947 radv_flush_vertex_descriptors(struct radv_cmd_buffer *cmd_buffer,
1948 bool pipeline_is_dirty)
1949 {
1950 if ((pipeline_is_dirty ||
1951 (cmd_buffer->state.dirty & RADV_CMD_DIRTY_VERTEX_BUFFER)) &&
1952 cmd_buffer->state.pipeline->vertex_elements.count &&
1953 radv_get_shader(cmd_buffer->state.pipeline, MESA_SHADER_VERTEX)->info.info.vs.has_vertex_buffers) {
1954 struct radv_vertex_elements_info *velems = &cmd_buffer->state.pipeline->vertex_elements;
1955 unsigned vb_offset;
1956 void *vb_ptr;
1957 uint32_t i = 0;
1958 uint32_t count = velems->count;
1959 uint64_t va;
1960
1961 /* allocate some descriptor state for vertex buffers */
1962 if (!radv_cmd_buffer_upload_alloc(cmd_buffer, count * 16, 256,
1963 &vb_offset, &vb_ptr))
1964 return;
1965
1966 for (i = 0; i < count; i++) {
1967 uint32_t *desc = &((uint32_t *)vb_ptr)[i * 4];
1968 uint32_t offset;
1969 int vb = velems->binding[i];
1970 struct radv_buffer *buffer = cmd_buffer->vertex_bindings[vb].buffer;
1971 uint32_t stride = cmd_buffer->state.pipeline->binding_stride[vb];
1972
1973 va = radv_buffer_get_va(buffer->bo);
1974
1975 offset = cmd_buffer->vertex_bindings[vb].offset + velems->offset[i];
1976 va += offset + buffer->offset;
1977 desc[0] = va;
1978 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) | S_008F04_STRIDE(stride);
1979 if (cmd_buffer->device->physical_device->rad_info.chip_class <= CIK && stride)
1980 desc[2] = (buffer->size - offset - velems->format_size[i]) / stride + 1;
1981 else
1982 desc[2] = buffer->size - offset;
1983 desc[3] = velems->rsrc_word3[i];
1984 }
1985
1986 va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
1987 va += vb_offset;
1988
1989 radv_emit_userdata_address(cmd_buffer, cmd_buffer->state.pipeline, MESA_SHADER_VERTEX,
1990 AC_UD_VS_VERTEX_BUFFERS, va);
1991
1992 cmd_buffer->state.vb_va = va;
1993 cmd_buffer->state.vb_size = count * 16;
1994 cmd_buffer->state.prefetch_L2_mask |= RADV_PREFETCH_VBO_DESCRIPTORS;
1995 }
1996 cmd_buffer->state.dirty &= ~RADV_CMD_DIRTY_VERTEX_BUFFER;
1997 }
1998
1999 static void
2000 radv_emit_streamout_buffers(struct radv_cmd_buffer *cmd_buffer, uint64_t va)
2001 {
2002 struct radv_pipeline *pipeline = cmd_buffer->state.pipeline;
2003 struct radv_userdata_info *loc;
2004 uint32_t base_reg;
2005
2006 for (unsigned stage = 0; stage < MESA_SHADER_STAGES; ++stage) {
2007 if (!radv_get_shader(pipeline, stage))
2008 continue;
2009
2010 loc = radv_lookup_user_sgpr(pipeline, stage,
2011 AC_UD_STREAMOUT_BUFFERS);
2012 if (loc->sgpr_idx == -1)
2013 continue;
2014
2015 base_reg = pipeline->user_data_0[stage];
2016
2017 radv_emit_shader_pointer(cmd_buffer->device, cmd_buffer->cs,
2018 base_reg + loc->sgpr_idx * 4, va, false);
2019 }
2020
2021 if (pipeline->gs_copy_shader) {
2022 loc = &pipeline->gs_copy_shader->info.user_sgprs_locs.shader_data[AC_UD_STREAMOUT_BUFFERS];
2023 if (loc->sgpr_idx != -1) {
2024 base_reg = R_00B130_SPI_SHADER_USER_DATA_VS_0;
2025
2026 radv_emit_shader_pointer(cmd_buffer->device, cmd_buffer->cs,
2027 base_reg + loc->sgpr_idx * 4, va, false);
2028 }
2029 }
2030 }
2031
2032 static void
2033 radv_flush_streamout_descriptors(struct radv_cmd_buffer *cmd_buffer)
2034 {
2035 if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_STREAMOUT_BUFFER) {
2036 struct radv_streamout_binding *sb = cmd_buffer->streamout_bindings;
2037 struct radv_streamout_state *so = &cmd_buffer->state.streamout;
2038 unsigned so_offset;
2039 void *so_ptr;
2040 uint64_t va;
2041
2042 /* Allocate some descriptor state for streamout buffers. */
2043 if (!radv_cmd_buffer_upload_alloc(cmd_buffer,
2044 MAX_SO_BUFFERS * 16, 256,
2045 &so_offset, &so_ptr))
2046 return;
2047
2048 for (uint32_t i = 0; i < MAX_SO_BUFFERS; i++) {
2049 struct radv_buffer *buffer = sb[i].buffer;
2050 uint32_t *desc = &((uint32_t *)so_ptr)[i * 4];
2051
2052 if (!(so->enabled_mask & (1 << i)))
2053 continue;
2054
2055 va = radv_buffer_get_va(buffer->bo) + buffer->offset;
2056
2057 va += sb[i].offset;
2058
2059 /* Set the descriptor.
2060 *
2061 * On VI, the format must be non-INVALID, otherwise
2062 * the buffer will be considered not bound and store
2063 * instructions will be no-ops.
2064 */
2065 desc[0] = va;
2066 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32);
2067 desc[2] = 0xffffffff;
2068 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
2069 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
2070 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
2071 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
2072 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
2073 }
2074
2075 va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
2076 va += so_offset;
2077
2078 radv_emit_streamout_buffers(cmd_buffer, va);
2079 }
2080
2081 cmd_buffer->state.dirty &= ~RADV_CMD_DIRTY_STREAMOUT_BUFFER;
2082 }
2083
2084 static void
2085 radv_upload_graphics_shader_descriptors(struct radv_cmd_buffer *cmd_buffer, bool pipeline_is_dirty)
2086 {
2087 radv_flush_vertex_descriptors(cmd_buffer, pipeline_is_dirty);
2088 radv_flush_streamout_descriptors(cmd_buffer);
2089 radv_flush_descriptors(cmd_buffer, VK_SHADER_STAGE_ALL_GRAPHICS);
2090 radv_flush_constants(cmd_buffer, VK_SHADER_STAGE_ALL_GRAPHICS);
2091 }
2092
2093 struct radv_draw_info {
2094 /**
2095 * Number of vertices.
2096 */
2097 uint32_t count;
2098
2099 /**
2100 * Index of the first vertex.
2101 */
2102 int32_t vertex_offset;
2103
2104 /**
2105 * First instance id.
2106 */
2107 uint32_t first_instance;
2108
2109 /**
2110 * Number of instances.
2111 */
2112 uint32_t instance_count;
2113
2114 /**
2115 * First index (indexed draws only).
2116 */
2117 uint32_t first_index;
2118
2119 /**
2120 * Whether it's an indexed draw.
2121 */
2122 bool indexed;
2123
2124 /**
2125 * Indirect draw parameters resource.
2126 */
2127 struct radv_buffer *indirect;
2128 uint64_t indirect_offset;
2129 uint32_t stride;
2130
2131 /**
2132 * Draw count parameters resource.
2133 */
2134 struct radv_buffer *count_buffer;
2135 uint64_t count_buffer_offset;
2136
2137 /**
2138 * Stream output parameters resource.
2139 */
2140 struct radv_buffer *strmout_buffer;
2141 uint64_t strmout_buffer_offset;
2142 };
2143
2144 static void
2145 radv_emit_draw_registers(struct radv_cmd_buffer *cmd_buffer,
2146 const struct radv_draw_info *draw_info)
2147 {
2148 struct radeon_info *info = &cmd_buffer->device->physical_device->rad_info;
2149 struct radv_cmd_state *state = &cmd_buffer->state;
2150 struct radeon_cmdbuf *cs = cmd_buffer->cs;
2151 uint32_t ia_multi_vgt_param;
2152 int32_t primitive_reset_en;
2153
2154 /* Draw state. */
2155 ia_multi_vgt_param =
2156 si_get_ia_multi_vgt_param(cmd_buffer, draw_info->instance_count > 1,
2157 draw_info->indirect,
2158 draw_info->indirect ? 0 : draw_info->count);
2159
2160 if (state->last_ia_multi_vgt_param != ia_multi_vgt_param) {
2161 if (info->chip_class >= GFX9) {
2162 radeon_set_uconfig_reg_idx(cs,
2163 R_030960_IA_MULTI_VGT_PARAM,
2164 4, ia_multi_vgt_param);
2165 } else if (info->chip_class >= CIK) {
2166 radeon_set_context_reg_idx(cs,
2167 R_028AA8_IA_MULTI_VGT_PARAM,
2168 1, ia_multi_vgt_param);
2169 } else {
2170 radeon_set_context_reg(cs, R_028AA8_IA_MULTI_VGT_PARAM,
2171 ia_multi_vgt_param);
2172 }
2173 state->last_ia_multi_vgt_param = ia_multi_vgt_param;
2174 }
2175
2176 /* Primitive restart. */
2177 primitive_reset_en =
2178 draw_info->indexed && state->pipeline->graphics.prim_restart_enable;
2179
2180 if (primitive_reset_en != state->last_primitive_reset_en) {
2181 state->last_primitive_reset_en = primitive_reset_en;
2182 if (info->chip_class >= GFX9) {
2183 radeon_set_uconfig_reg(cs,
2184 R_03092C_VGT_MULTI_PRIM_IB_RESET_EN,
2185 primitive_reset_en);
2186 } else {
2187 radeon_set_context_reg(cs,
2188 R_028A94_VGT_MULTI_PRIM_IB_RESET_EN,
2189 primitive_reset_en);
2190 }
2191 }
2192
2193 if (primitive_reset_en) {
2194 uint32_t primitive_reset_index =
2195 state->index_type ? 0xffffffffu : 0xffffu;
2196
2197 if (primitive_reset_index != state->last_primitive_reset_index) {
2198 radeon_set_context_reg(cs,
2199 R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX,
2200 primitive_reset_index);
2201 state->last_primitive_reset_index = primitive_reset_index;
2202 }
2203 }
2204
2205 if (draw_info->strmout_buffer) {
2206 uint64_t va = radv_buffer_get_va(draw_info->strmout_buffer->bo);
2207
2208 va += draw_info->strmout_buffer->offset +
2209 draw_info->strmout_buffer_offset;
2210
2211 radeon_set_context_reg(cs, R_028B30_VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE,
2212 draw_info->stride);
2213
2214 radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
2215 radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_SRC_MEM) |
2216 COPY_DATA_DST_SEL(COPY_DATA_REG) |
2217 COPY_DATA_WR_CONFIRM);
2218 radeon_emit(cs, va);
2219 radeon_emit(cs, va >> 32);
2220 radeon_emit(cs, R_028B2C_VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE >> 2);
2221 radeon_emit(cs, 0); /* unused */
2222
2223 radv_cs_add_buffer(cmd_buffer->device->ws, cs, draw_info->strmout_buffer->bo);
2224 }
2225 }
2226
2227 static void radv_stage_flush(struct radv_cmd_buffer *cmd_buffer,
2228 VkPipelineStageFlags src_stage_mask)
2229 {
2230 if (src_stage_mask & (VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT |
2231 VK_PIPELINE_STAGE_TRANSFER_BIT |
2232 VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT |
2233 VK_PIPELINE_STAGE_ALL_COMMANDS_BIT)) {
2234 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_CS_PARTIAL_FLUSH;
2235 }
2236
2237 if (src_stage_mask & (VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
2238 VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT |
2239 VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT |
2240 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT |
2241 VK_PIPELINE_STAGE_TRANSFER_BIT |
2242 VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT |
2243 VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT |
2244 VK_PIPELINE_STAGE_ALL_COMMANDS_BIT)) {
2245 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_PS_PARTIAL_FLUSH;
2246 } else if (src_stage_mask & (VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT |
2247 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT |
2248 VK_PIPELINE_STAGE_VERTEX_SHADER_BIT |
2249 VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
2250 VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT |
2251 VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
2252 VK_PIPELINE_STAGE_TRANSFORM_FEEDBACK_BIT_EXT)) {
2253 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_VS_PARTIAL_FLUSH;
2254 }
2255 }
2256
2257 static enum radv_cmd_flush_bits
2258 radv_src_access_flush(struct radv_cmd_buffer *cmd_buffer,
2259 VkAccessFlags src_flags,
2260 struct radv_image *image)
2261 {
2262 bool flush_CB_meta = true, flush_DB_meta = true;
2263 enum radv_cmd_flush_bits flush_bits = 0;
2264 uint32_t b;
2265
2266 if (image) {
2267 if (!radv_image_has_CB_metadata(image))
2268 flush_CB_meta = false;
2269 if (!radv_image_has_htile(image))
2270 flush_DB_meta = false;
2271 }
2272
2273 for_each_bit(b, src_flags) {
2274 switch ((VkAccessFlagBits)(1 << b)) {
2275 case VK_ACCESS_SHADER_WRITE_BIT:
2276 case VK_ACCESS_TRANSFORM_FEEDBACK_WRITE_BIT_EXT:
2277 case VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT:
2278 flush_bits |= RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2;
2279 break;
2280 case VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT:
2281 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB;
2282 if (flush_CB_meta)
2283 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
2284 break;
2285 case VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT:
2286 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB;
2287 if (flush_DB_meta)
2288 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
2289 break;
2290 case VK_ACCESS_TRANSFER_WRITE_BIT:
2291 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
2292 RADV_CMD_FLAG_FLUSH_AND_INV_DB |
2293 RADV_CMD_FLAG_INV_GLOBAL_L2;
2294
2295 if (flush_CB_meta)
2296 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
2297 if (flush_DB_meta)
2298 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
2299 break;
2300 default:
2301 break;
2302 }
2303 }
2304 return flush_bits;
2305 }
2306
2307 static enum radv_cmd_flush_bits
2308 radv_dst_access_flush(struct radv_cmd_buffer *cmd_buffer,
2309 VkAccessFlags dst_flags,
2310 struct radv_image *image)
2311 {
2312 bool flush_CB_meta = true, flush_DB_meta = true;
2313 enum radv_cmd_flush_bits flush_bits = 0;
2314 bool flush_CB = true, flush_DB = true;
2315 bool image_is_coherent = false;
2316 uint32_t b;
2317
2318 if (image) {
2319 if (!(image->usage & VK_IMAGE_USAGE_STORAGE_BIT)) {
2320 flush_CB = false;
2321 flush_DB = false;
2322 }
2323
2324 if (!radv_image_has_CB_metadata(image))
2325 flush_CB_meta = false;
2326 if (!radv_image_has_htile(image))
2327 flush_DB_meta = false;
2328
2329 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
2330 if (image->info.samples == 1 &&
2331 (image->usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
2332 VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) &&
2333 !vk_format_is_stencil(image->vk_format)) {
2334 /* Single-sample color and single-sample depth
2335 * (not stencil) are coherent with shaders on
2336 * GFX9.
2337 */
2338 image_is_coherent = true;
2339 }
2340 }
2341 }
2342
2343 for_each_bit(b, dst_flags) {
2344 switch ((VkAccessFlagBits)(1 << b)) {
2345 case VK_ACCESS_INDIRECT_COMMAND_READ_BIT:
2346 case VK_ACCESS_INDEX_READ_BIT:
2347 case VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT:
2348 break;
2349 case VK_ACCESS_UNIFORM_READ_BIT:
2350 flush_bits |= RADV_CMD_FLAG_INV_VMEM_L1 | RADV_CMD_FLAG_INV_SMEM_L1;
2351 break;
2352 case VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT:
2353 case VK_ACCESS_TRANSFER_READ_BIT:
2354 case VK_ACCESS_INPUT_ATTACHMENT_READ_BIT:
2355 flush_bits |= RADV_CMD_FLAG_INV_VMEM_L1 |
2356 RADV_CMD_FLAG_INV_GLOBAL_L2;
2357 break;
2358 case VK_ACCESS_SHADER_READ_BIT:
2359 flush_bits |= RADV_CMD_FLAG_INV_VMEM_L1;
2360
2361 if (!image_is_coherent)
2362 flush_bits |= RADV_CMD_FLAG_INV_GLOBAL_L2;
2363 break;
2364 case VK_ACCESS_COLOR_ATTACHMENT_READ_BIT:
2365 if (flush_CB)
2366 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB;
2367 if (flush_CB_meta)
2368 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
2369 break;
2370 case VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT:
2371 if (flush_DB)
2372 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB;
2373 if (flush_DB_meta)
2374 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
2375 break;
2376 default:
2377 break;
2378 }
2379 }
2380 return flush_bits;
2381 }
2382
2383 void radv_subpass_barrier(struct radv_cmd_buffer *cmd_buffer,
2384 const struct radv_subpass_barrier *barrier)
2385 {
2386 cmd_buffer->state.flush_bits |= radv_src_access_flush(cmd_buffer, barrier->src_access_mask,
2387 NULL);
2388 radv_stage_flush(cmd_buffer, barrier->src_stage_mask);
2389 cmd_buffer->state.flush_bits |= radv_dst_access_flush(cmd_buffer, barrier->dst_access_mask,
2390 NULL);
2391 }
2392
2393 static void radv_handle_subpass_image_transition(struct radv_cmd_buffer *cmd_buffer,
2394 struct radv_subpass_attachment att)
2395 {
2396 unsigned idx = att.attachment;
2397 struct radv_image_view *view = cmd_buffer->state.framebuffer->attachments[idx].attachment;
2398 VkImageSubresourceRange range;
2399 range.aspectMask = 0;
2400 range.baseMipLevel = view->base_mip;
2401 range.levelCount = 1;
2402 range.baseArrayLayer = view->base_layer;
2403 range.layerCount = cmd_buffer->state.framebuffer->layers;
2404
2405 if (cmd_buffer->state.subpass && cmd_buffer->state.subpass->view_mask) {
2406 /* If the current subpass uses multiview, the driver might have
2407 * performed a fast color/depth clear to the whole image
2408 * (including all layers). To make sure the driver will
2409 * decompress the image correctly (if needed), we have to
2410 * account for the "real" number of layers. If the view mask is
2411 * sparse, this will decompress more layers than needed.
2412 */
2413 range.layerCount = util_last_bit(cmd_buffer->state.subpass->view_mask);
2414 }
2415
2416 radv_handle_image_transition(cmd_buffer,
2417 view->image,
2418 cmd_buffer->state.attachments[idx].current_layout,
2419 att.layout, 0, 0, &range);
2420
2421 cmd_buffer->state.attachments[idx].current_layout = att.layout;
2422
2423
2424 }
2425
2426 void
2427 radv_cmd_buffer_set_subpass(struct radv_cmd_buffer *cmd_buffer,
2428 const struct radv_subpass *subpass, bool transitions)
2429 {
2430 if (transitions) {
2431 radv_subpass_barrier(cmd_buffer, &subpass->start_barrier);
2432
2433 for (unsigned i = 0; i < subpass->color_count; ++i) {
2434 if (subpass->color_attachments[i].attachment != VK_ATTACHMENT_UNUSED)
2435 radv_handle_subpass_image_transition(cmd_buffer,
2436 subpass->color_attachments[i]);
2437 }
2438
2439 for (unsigned i = 0; i < subpass->input_count; ++i) {
2440 radv_handle_subpass_image_transition(cmd_buffer,
2441 subpass->input_attachments[i]);
2442 }
2443
2444 if (subpass->depth_stencil_attachment.attachment != VK_ATTACHMENT_UNUSED) {
2445 radv_handle_subpass_image_transition(cmd_buffer,
2446 subpass->depth_stencil_attachment);
2447 }
2448 }
2449
2450 cmd_buffer->state.subpass = subpass;
2451
2452 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_FRAMEBUFFER;
2453 }
2454
2455 static VkResult
2456 radv_cmd_state_setup_attachments(struct radv_cmd_buffer *cmd_buffer,
2457 struct radv_render_pass *pass,
2458 const VkRenderPassBeginInfo *info)
2459 {
2460 struct radv_cmd_state *state = &cmd_buffer->state;
2461
2462 if (pass->attachment_count == 0) {
2463 state->attachments = NULL;
2464 return VK_SUCCESS;
2465 }
2466
2467 state->attachments = vk_alloc(&cmd_buffer->pool->alloc,
2468 pass->attachment_count *
2469 sizeof(state->attachments[0]),
2470 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2471 if (state->attachments == NULL) {
2472 cmd_buffer->record_result = VK_ERROR_OUT_OF_HOST_MEMORY;
2473 return cmd_buffer->record_result;
2474 }
2475
2476 for (uint32_t i = 0; i < pass->attachment_count; ++i) {
2477 struct radv_render_pass_attachment *att = &pass->attachments[i];
2478 VkImageAspectFlags att_aspects = vk_format_aspects(att->format);
2479 VkImageAspectFlags clear_aspects = 0;
2480
2481 if (att_aspects == VK_IMAGE_ASPECT_COLOR_BIT) {
2482 /* color attachment */
2483 if (att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
2484 clear_aspects |= VK_IMAGE_ASPECT_COLOR_BIT;
2485 }
2486 } else {
2487 /* depthstencil attachment */
2488 if ((att_aspects & VK_IMAGE_ASPECT_DEPTH_BIT) &&
2489 att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
2490 clear_aspects |= VK_IMAGE_ASPECT_DEPTH_BIT;
2491 if ((att_aspects & VK_IMAGE_ASPECT_STENCIL_BIT) &&
2492 att->stencil_load_op == VK_ATTACHMENT_LOAD_OP_DONT_CARE)
2493 clear_aspects |= VK_IMAGE_ASPECT_STENCIL_BIT;
2494 }
2495 if ((att_aspects & VK_IMAGE_ASPECT_STENCIL_BIT) &&
2496 att->stencil_load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
2497 clear_aspects |= VK_IMAGE_ASPECT_STENCIL_BIT;
2498 }
2499 }
2500
2501 state->attachments[i].pending_clear_aspects = clear_aspects;
2502 state->attachments[i].cleared_views = 0;
2503 if (clear_aspects && info) {
2504 assert(info->clearValueCount > i);
2505 state->attachments[i].clear_value = info->pClearValues[i];
2506 }
2507
2508 state->attachments[i].current_layout = att->initial_layout;
2509 }
2510
2511 return VK_SUCCESS;
2512 }
2513
2514 VkResult radv_AllocateCommandBuffers(
2515 VkDevice _device,
2516 const VkCommandBufferAllocateInfo *pAllocateInfo,
2517 VkCommandBuffer *pCommandBuffers)
2518 {
2519 RADV_FROM_HANDLE(radv_device, device, _device);
2520 RADV_FROM_HANDLE(radv_cmd_pool, pool, pAllocateInfo->commandPool);
2521
2522 VkResult result = VK_SUCCESS;
2523 uint32_t i;
2524
2525 for (i = 0; i < pAllocateInfo->commandBufferCount; i++) {
2526
2527 if (!list_empty(&pool->free_cmd_buffers)) {
2528 struct radv_cmd_buffer *cmd_buffer = list_first_entry(&pool->free_cmd_buffers, struct radv_cmd_buffer, pool_link);
2529
2530 list_del(&cmd_buffer->pool_link);
2531 list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
2532
2533 result = radv_reset_cmd_buffer(cmd_buffer);
2534 cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
2535 cmd_buffer->level = pAllocateInfo->level;
2536
2537 pCommandBuffers[i] = radv_cmd_buffer_to_handle(cmd_buffer);
2538 } else {
2539 result = radv_create_cmd_buffer(device, pool, pAllocateInfo->level,
2540 &pCommandBuffers[i]);
2541 }
2542 if (result != VK_SUCCESS)
2543 break;
2544 }
2545
2546 if (result != VK_SUCCESS) {
2547 radv_FreeCommandBuffers(_device, pAllocateInfo->commandPool,
2548 i, pCommandBuffers);
2549
2550 /* From the Vulkan 1.0.66 spec:
2551 *
2552 * "vkAllocateCommandBuffers can be used to create multiple
2553 * command buffers. If the creation of any of those command
2554 * buffers fails, the implementation must destroy all
2555 * successfully created command buffer objects from this
2556 * command, set all entries of the pCommandBuffers array to
2557 * NULL and return the error."
2558 */
2559 memset(pCommandBuffers, 0,
2560 sizeof(*pCommandBuffers) * pAllocateInfo->commandBufferCount);
2561 }
2562
2563 return result;
2564 }
2565
2566 void radv_FreeCommandBuffers(
2567 VkDevice device,
2568 VkCommandPool commandPool,
2569 uint32_t commandBufferCount,
2570 const VkCommandBuffer *pCommandBuffers)
2571 {
2572 for (uint32_t i = 0; i < commandBufferCount; i++) {
2573 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, pCommandBuffers[i]);
2574
2575 if (cmd_buffer) {
2576 if (cmd_buffer->pool) {
2577 list_del(&cmd_buffer->pool_link);
2578 list_addtail(&cmd_buffer->pool_link, &cmd_buffer->pool->free_cmd_buffers);
2579 } else
2580 radv_cmd_buffer_destroy(cmd_buffer);
2581
2582 }
2583 }
2584 }
2585
2586 VkResult radv_ResetCommandBuffer(
2587 VkCommandBuffer commandBuffer,
2588 VkCommandBufferResetFlags flags)
2589 {
2590 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2591 return radv_reset_cmd_buffer(cmd_buffer);
2592 }
2593
2594 VkResult radv_BeginCommandBuffer(
2595 VkCommandBuffer commandBuffer,
2596 const VkCommandBufferBeginInfo *pBeginInfo)
2597 {
2598 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2599 VkResult result = VK_SUCCESS;
2600
2601 if (cmd_buffer->status != RADV_CMD_BUFFER_STATUS_INITIAL) {
2602 /* If the command buffer has already been resetted with
2603 * vkResetCommandBuffer, no need to do it again.
2604 */
2605 result = radv_reset_cmd_buffer(cmd_buffer);
2606 if (result != VK_SUCCESS)
2607 return result;
2608 }
2609
2610 memset(&cmd_buffer->state, 0, sizeof(cmd_buffer->state));
2611 cmd_buffer->state.last_primitive_reset_en = -1;
2612 cmd_buffer->state.last_index_type = -1;
2613 cmd_buffer->state.last_num_instances = -1;
2614 cmd_buffer->state.last_vertex_offset = -1;
2615 cmd_buffer->state.last_first_instance = -1;
2616 cmd_buffer->state.predication_type = -1;
2617 cmd_buffer->usage_flags = pBeginInfo->flags;
2618
2619 if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY &&
2620 (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
2621 assert(pBeginInfo->pInheritanceInfo);
2622 cmd_buffer->state.framebuffer = radv_framebuffer_from_handle(pBeginInfo->pInheritanceInfo->framebuffer);
2623 cmd_buffer->state.pass = radv_render_pass_from_handle(pBeginInfo->pInheritanceInfo->renderPass);
2624
2625 struct radv_subpass *subpass =
2626 &cmd_buffer->state.pass->subpasses[pBeginInfo->pInheritanceInfo->subpass];
2627
2628 result = radv_cmd_state_setup_attachments(cmd_buffer, cmd_buffer->state.pass, NULL);
2629 if (result != VK_SUCCESS)
2630 return result;
2631
2632 radv_cmd_buffer_set_subpass(cmd_buffer, subpass, false);
2633 }
2634
2635 if (unlikely(cmd_buffer->device->trace_bo)) {
2636 struct radv_device *device = cmd_buffer->device;
2637
2638 radv_cs_add_buffer(device->ws, cmd_buffer->cs,
2639 device->trace_bo);
2640
2641 radv_cmd_buffer_trace_emit(cmd_buffer);
2642 }
2643
2644 cmd_buffer->status = RADV_CMD_BUFFER_STATUS_RECORDING;
2645
2646 return result;
2647 }
2648
2649 void radv_CmdBindVertexBuffers(
2650 VkCommandBuffer commandBuffer,
2651 uint32_t firstBinding,
2652 uint32_t bindingCount,
2653 const VkBuffer* pBuffers,
2654 const VkDeviceSize* pOffsets)
2655 {
2656 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2657 struct radv_vertex_binding *vb = cmd_buffer->vertex_bindings;
2658 bool changed = false;
2659
2660 /* We have to defer setting up vertex buffer since we need the buffer
2661 * stride from the pipeline. */
2662
2663 assert(firstBinding + bindingCount <= MAX_VBS);
2664 for (uint32_t i = 0; i < bindingCount; i++) {
2665 uint32_t idx = firstBinding + i;
2666
2667 if (!changed &&
2668 (vb[idx].buffer != radv_buffer_from_handle(pBuffers[i]) ||
2669 vb[idx].offset != pOffsets[i])) {
2670 changed = true;
2671 }
2672
2673 vb[idx].buffer = radv_buffer_from_handle(pBuffers[i]);
2674 vb[idx].offset = pOffsets[i];
2675
2676 radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs,
2677 vb[idx].buffer->bo);
2678 }
2679
2680 if (!changed) {
2681 /* No state changes. */
2682 return;
2683 }
2684
2685 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_VERTEX_BUFFER;
2686 }
2687
2688 void radv_CmdBindIndexBuffer(
2689 VkCommandBuffer commandBuffer,
2690 VkBuffer buffer,
2691 VkDeviceSize offset,
2692 VkIndexType indexType)
2693 {
2694 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2695 RADV_FROM_HANDLE(radv_buffer, index_buffer, buffer);
2696
2697 if (cmd_buffer->state.index_buffer == index_buffer &&
2698 cmd_buffer->state.index_offset == offset &&
2699 cmd_buffer->state.index_type == indexType) {
2700 /* No state changes. */
2701 return;
2702 }
2703
2704 cmd_buffer->state.index_buffer = index_buffer;
2705 cmd_buffer->state.index_offset = offset;
2706 cmd_buffer->state.index_type = indexType; /* vk matches hw */
2707 cmd_buffer->state.index_va = radv_buffer_get_va(index_buffer->bo);
2708 cmd_buffer->state.index_va += index_buffer->offset + offset;
2709
2710 int index_size_shift = cmd_buffer->state.index_type ? 2 : 1;
2711 cmd_buffer->state.max_index_count = (index_buffer->size - offset) >> index_size_shift;
2712 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_INDEX_BUFFER;
2713 radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, index_buffer->bo);
2714 }
2715
2716
2717 static void
2718 radv_bind_descriptor_set(struct radv_cmd_buffer *cmd_buffer,
2719 VkPipelineBindPoint bind_point,
2720 struct radv_descriptor_set *set, unsigned idx)
2721 {
2722 struct radeon_winsys *ws = cmd_buffer->device->ws;
2723
2724 radv_set_descriptor_set(cmd_buffer, bind_point, set, idx);
2725
2726 assert(set);
2727 assert(!(set->layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR));
2728
2729 if (!cmd_buffer->device->use_global_bo_list) {
2730 for (unsigned j = 0; j < set->layout->buffer_count; ++j)
2731 if (set->descriptors[j])
2732 radv_cs_add_buffer(ws, cmd_buffer->cs, set->descriptors[j]);
2733 }
2734
2735 if(set->bo)
2736 radv_cs_add_buffer(ws, cmd_buffer->cs, set->bo);
2737 }
2738
2739 void radv_CmdBindDescriptorSets(
2740 VkCommandBuffer commandBuffer,
2741 VkPipelineBindPoint pipelineBindPoint,
2742 VkPipelineLayout _layout,
2743 uint32_t firstSet,
2744 uint32_t descriptorSetCount,
2745 const VkDescriptorSet* pDescriptorSets,
2746 uint32_t dynamicOffsetCount,
2747 const uint32_t* pDynamicOffsets)
2748 {
2749 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2750 RADV_FROM_HANDLE(radv_pipeline_layout, layout, _layout);
2751 unsigned dyn_idx = 0;
2752
2753 const bool no_dynamic_bounds = cmd_buffer->device->instance->debug_flags & RADV_DEBUG_NO_DYNAMIC_BOUNDS;
2754 struct radv_descriptor_state *descriptors_state =
2755 radv_get_descriptors_state(cmd_buffer, pipelineBindPoint);
2756
2757 for (unsigned i = 0; i < descriptorSetCount; ++i) {
2758 unsigned idx = i + firstSet;
2759 RADV_FROM_HANDLE(radv_descriptor_set, set, pDescriptorSets[i]);
2760 radv_bind_descriptor_set(cmd_buffer, pipelineBindPoint, set, idx);
2761
2762 for(unsigned j = 0; j < set->layout->dynamic_offset_count; ++j, ++dyn_idx) {
2763 unsigned idx = j + layout->set[i + firstSet].dynamic_offset_start;
2764 uint32_t *dst = descriptors_state->dynamic_buffers + idx * 4;
2765 assert(dyn_idx < dynamicOffsetCount);
2766
2767 struct radv_descriptor_range *range = set->dynamic_descriptors + j;
2768 uint64_t va = range->va + pDynamicOffsets[dyn_idx];
2769 dst[0] = va;
2770 dst[1] = S_008F04_BASE_ADDRESS_HI(va >> 32);
2771 dst[2] = no_dynamic_bounds ? 0xffffffffu : range->size;
2772 dst[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
2773 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
2774 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
2775 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
2776 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
2777 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
2778 cmd_buffer->push_constant_stages |=
2779 set->layout->dynamic_shader_stages;
2780 }
2781 }
2782 }
2783
2784 static bool radv_init_push_descriptor_set(struct radv_cmd_buffer *cmd_buffer,
2785 struct radv_descriptor_set *set,
2786 struct radv_descriptor_set_layout *layout,
2787 VkPipelineBindPoint bind_point)
2788 {
2789 struct radv_descriptor_state *descriptors_state =
2790 radv_get_descriptors_state(cmd_buffer, bind_point);
2791 set->size = layout->size;
2792 set->layout = layout;
2793
2794 if (descriptors_state->push_set.capacity < set->size) {
2795 size_t new_size = MAX2(set->size, 1024);
2796 new_size = MAX2(new_size, 2 * descriptors_state->push_set.capacity);
2797 new_size = MIN2(new_size, 96 * MAX_PUSH_DESCRIPTORS);
2798
2799 free(set->mapped_ptr);
2800 set->mapped_ptr = malloc(new_size);
2801
2802 if (!set->mapped_ptr) {
2803 descriptors_state->push_set.capacity = 0;
2804 cmd_buffer->record_result = VK_ERROR_OUT_OF_HOST_MEMORY;
2805 return false;
2806 }
2807
2808 descriptors_state->push_set.capacity = new_size;
2809 }
2810
2811 return true;
2812 }
2813
2814 void radv_meta_push_descriptor_set(
2815 struct radv_cmd_buffer* cmd_buffer,
2816 VkPipelineBindPoint pipelineBindPoint,
2817 VkPipelineLayout _layout,
2818 uint32_t set,
2819 uint32_t descriptorWriteCount,
2820 const VkWriteDescriptorSet* pDescriptorWrites)
2821 {
2822 RADV_FROM_HANDLE(radv_pipeline_layout, layout, _layout);
2823 struct radv_descriptor_set *push_set = &cmd_buffer->meta_push_descriptors;
2824 unsigned bo_offset;
2825
2826 assert(set == 0);
2827 assert(layout->set[set].layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR);
2828
2829 push_set->size = layout->set[set].layout->size;
2830 push_set->layout = layout->set[set].layout;
2831
2832 if (!radv_cmd_buffer_upload_alloc(cmd_buffer, push_set->size, 32,
2833 &bo_offset,
2834 (void**) &push_set->mapped_ptr))
2835 return;
2836
2837 push_set->va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
2838 push_set->va += bo_offset;
2839
2840 radv_update_descriptor_sets(cmd_buffer->device, cmd_buffer,
2841 radv_descriptor_set_to_handle(push_set),
2842 descriptorWriteCount, pDescriptorWrites, 0, NULL);
2843
2844 radv_set_descriptor_set(cmd_buffer, pipelineBindPoint, push_set, set);
2845 }
2846
2847 void radv_CmdPushDescriptorSetKHR(
2848 VkCommandBuffer commandBuffer,
2849 VkPipelineBindPoint pipelineBindPoint,
2850 VkPipelineLayout _layout,
2851 uint32_t set,
2852 uint32_t descriptorWriteCount,
2853 const VkWriteDescriptorSet* pDescriptorWrites)
2854 {
2855 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2856 RADV_FROM_HANDLE(radv_pipeline_layout, layout, _layout);
2857 struct radv_descriptor_state *descriptors_state =
2858 radv_get_descriptors_state(cmd_buffer, pipelineBindPoint);
2859 struct radv_descriptor_set *push_set = &descriptors_state->push_set.set;
2860
2861 assert(layout->set[set].layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR);
2862
2863 if (!radv_init_push_descriptor_set(cmd_buffer, push_set,
2864 layout->set[set].layout,
2865 pipelineBindPoint))
2866 return;
2867
2868 radv_update_descriptor_sets(cmd_buffer->device, cmd_buffer,
2869 radv_descriptor_set_to_handle(push_set),
2870 descriptorWriteCount, pDescriptorWrites, 0, NULL);
2871
2872 radv_set_descriptor_set(cmd_buffer, pipelineBindPoint, push_set, set);
2873 descriptors_state->push_dirty = true;
2874 }
2875
2876 void radv_CmdPushDescriptorSetWithTemplateKHR(
2877 VkCommandBuffer commandBuffer,
2878 VkDescriptorUpdateTemplate descriptorUpdateTemplate,
2879 VkPipelineLayout _layout,
2880 uint32_t set,
2881 const void* pData)
2882 {
2883 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2884 RADV_FROM_HANDLE(radv_pipeline_layout, layout, _layout);
2885 RADV_FROM_HANDLE(radv_descriptor_update_template, templ, descriptorUpdateTemplate);
2886 struct radv_descriptor_state *descriptors_state =
2887 radv_get_descriptors_state(cmd_buffer, templ->bind_point);
2888 struct radv_descriptor_set *push_set = &descriptors_state->push_set.set;
2889
2890 assert(layout->set[set].layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR);
2891
2892 if (!radv_init_push_descriptor_set(cmd_buffer, push_set,
2893 layout->set[set].layout,
2894 templ->bind_point))
2895 return;
2896
2897 radv_update_descriptor_set_with_template(cmd_buffer->device, cmd_buffer, push_set,
2898 descriptorUpdateTemplate, pData);
2899
2900 radv_set_descriptor_set(cmd_buffer, templ->bind_point, push_set, set);
2901 descriptors_state->push_dirty = true;
2902 }
2903
2904 void radv_CmdPushConstants(VkCommandBuffer commandBuffer,
2905 VkPipelineLayout layout,
2906 VkShaderStageFlags stageFlags,
2907 uint32_t offset,
2908 uint32_t size,
2909 const void* pValues)
2910 {
2911 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2912 memcpy(cmd_buffer->push_constants + offset, pValues, size);
2913 cmd_buffer->push_constant_stages |= stageFlags;
2914 }
2915
2916 VkResult radv_EndCommandBuffer(
2917 VkCommandBuffer commandBuffer)
2918 {
2919 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2920
2921 if (cmd_buffer->queue_family_index != RADV_QUEUE_TRANSFER) {
2922 if (cmd_buffer->device->physical_device->rad_info.chip_class == SI)
2923 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_CS_PARTIAL_FLUSH | RADV_CMD_FLAG_PS_PARTIAL_FLUSH | RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2;
2924 si_emit_cache_flush(cmd_buffer);
2925 }
2926
2927 /* Make sure CP DMA is idle at the end of IBs because the kernel
2928 * doesn't wait for it.
2929 */
2930 si_cp_dma_wait_for_idle(cmd_buffer);
2931
2932 vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.attachments);
2933
2934 if (!cmd_buffer->device->ws->cs_finalize(cmd_buffer->cs))
2935 return vk_error(cmd_buffer->device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
2936
2937 cmd_buffer->status = RADV_CMD_BUFFER_STATUS_EXECUTABLE;
2938
2939 return cmd_buffer->record_result;
2940 }
2941
2942 static void
2943 radv_emit_compute_pipeline(struct radv_cmd_buffer *cmd_buffer)
2944 {
2945 struct radv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
2946
2947 if (!pipeline || pipeline == cmd_buffer->state.emitted_compute_pipeline)
2948 return;
2949
2950 assert(!pipeline->ctx_cs.cdw);
2951
2952 cmd_buffer->state.emitted_compute_pipeline = pipeline;
2953
2954 radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, pipeline->cs.cdw);
2955 radeon_emit_array(cmd_buffer->cs, pipeline->cs.buf, pipeline->cs.cdw);
2956
2957 cmd_buffer->compute_scratch_size_needed =
2958 MAX2(cmd_buffer->compute_scratch_size_needed,
2959 pipeline->max_waves * pipeline->scratch_bytes_per_wave);
2960
2961 radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs,
2962 pipeline->shaders[MESA_SHADER_COMPUTE]->bo);
2963
2964 if (unlikely(cmd_buffer->device->trace_bo))
2965 radv_save_pipeline(cmd_buffer, pipeline, RING_COMPUTE);
2966 }
2967
2968 static void radv_mark_descriptor_sets_dirty(struct radv_cmd_buffer *cmd_buffer,
2969 VkPipelineBindPoint bind_point)
2970 {
2971 struct radv_descriptor_state *descriptors_state =
2972 radv_get_descriptors_state(cmd_buffer, bind_point);
2973
2974 descriptors_state->dirty |= descriptors_state->valid;
2975 }
2976
2977 void radv_CmdBindPipeline(
2978 VkCommandBuffer commandBuffer,
2979 VkPipelineBindPoint pipelineBindPoint,
2980 VkPipeline _pipeline)
2981 {
2982 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2983 RADV_FROM_HANDLE(radv_pipeline, pipeline, _pipeline);
2984
2985 switch (pipelineBindPoint) {
2986 case VK_PIPELINE_BIND_POINT_COMPUTE:
2987 if (cmd_buffer->state.compute_pipeline == pipeline)
2988 return;
2989 radv_mark_descriptor_sets_dirty(cmd_buffer, pipelineBindPoint);
2990
2991 cmd_buffer->state.compute_pipeline = pipeline;
2992 cmd_buffer->push_constant_stages |= VK_SHADER_STAGE_COMPUTE_BIT;
2993 break;
2994 case VK_PIPELINE_BIND_POINT_GRAPHICS:
2995 if (cmd_buffer->state.pipeline == pipeline)
2996 return;
2997 radv_mark_descriptor_sets_dirty(cmd_buffer, pipelineBindPoint);
2998
2999 cmd_buffer->state.pipeline = pipeline;
3000 if (!pipeline)
3001 break;
3002
3003 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_PIPELINE;
3004 cmd_buffer->push_constant_stages |= pipeline->active_stages;
3005
3006 /* the new vertex shader might not have the same user regs */
3007 cmd_buffer->state.last_first_instance = -1;
3008 cmd_buffer->state.last_vertex_offset = -1;
3009
3010 /* Prefetch all pipeline shaders at first draw time. */
3011 cmd_buffer->state.prefetch_L2_mask |= RADV_PREFETCH_SHADERS;
3012
3013 radv_bind_dynamic_state(cmd_buffer, &pipeline->dynamic_state);
3014 radv_bind_streamout_state(cmd_buffer, pipeline);
3015
3016 if (pipeline->graphics.esgs_ring_size > cmd_buffer->esgs_ring_size_needed)
3017 cmd_buffer->esgs_ring_size_needed = pipeline->graphics.esgs_ring_size;
3018 if (pipeline->graphics.gsvs_ring_size > cmd_buffer->gsvs_ring_size_needed)
3019 cmd_buffer->gsvs_ring_size_needed = pipeline->graphics.gsvs_ring_size;
3020
3021 if (radv_pipeline_has_tess(pipeline))
3022 cmd_buffer->tess_rings_needed = true;
3023 break;
3024 default:
3025 assert(!"invalid bind point");
3026 break;
3027 }
3028 }
3029
3030 void radv_CmdSetViewport(
3031 VkCommandBuffer commandBuffer,
3032 uint32_t firstViewport,
3033 uint32_t viewportCount,
3034 const VkViewport* pViewports)
3035 {
3036 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3037 struct radv_cmd_state *state = &cmd_buffer->state;
3038 MAYBE_UNUSED const uint32_t total_count = firstViewport + viewportCount;
3039
3040 assert(firstViewport < MAX_VIEWPORTS);
3041 assert(total_count >= 1 && total_count <= MAX_VIEWPORTS);
3042
3043 if (!memcmp(state->dynamic.viewport.viewports + firstViewport,
3044 pViewports, viewportCount * sizeof(*pViewports))) {
3045 return;
3046 }
3047
3048 memcpy(state->dynamic.viewport.viewports + firstViewport, pViewports,
3049 viewportCount * sizeof(*pViewports));
3050
3051 state->dirty |= RADV_CMD_DIRTY_DYNAMIC_VIEWPORT;
3052 }
3053
3054 void radv_CmdSetScissor(
3055 VkCommandBuffer commandBuffer,
3056 uint32_t firstScissor,
3057 uint32_t scissorCount,
3058 const VkRect2D* pScissors)
3059 {
3060 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3061 struct radv_cmd_state *state = &cmd_buffer->state;
3062 MAYBE_UNUSED const uint32_t total_count = firstScissor + scissorCount;
3063
3064 assert(firstScissor < MAX_SCISSORS);
3065 assert(total_count >= 1 && total_count <= MAX_SCISSORS);
3066
3067 if (!memcmp(state->dynamic.scissor.scissors + firstScissor, pScissors,
3068 scissorCount * sizeof(*pScissors))) {
3069 return;
3070 }
3071
3072 memcpy(state->dynamic.scissor.scissors + firstScissor, pScissors,
3073 scissorCount * sizeof(*pScissors));
3074
3075 state->dirty |= RADV_CMD_DIRTY_DYNAMIC_SCISSOR;
3076 }
3077
3078 void radv_CmdSetLineWidth(
3079 VkCommandBuffer commandBuffer,
3080 float lineWidth)
3081 {
3082 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3083
3084 if (cmd_buffer->state.dynamic.line_width == lineWidth)
3085 return;
3086
3087 cmd_buffer->state.dynamic.line_width = lineWidth;
3088 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_LINE_WIDTH;
3089 }
3090
3091 void radv_CmdSetDepthBias(
3092 VkCommandBuffer commandBuffer,
3093 float depthBiasConstantFactor,
3094 float depthBiasClamp,
3095 float depthBiasSlopeFactor)
3096 {
3097 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3098 struct radv_cmd_state *state = &cmd_buffer->state;
3099
3100 if (state->dynamic.depth_bias.bias == depthBiasConstantFactor &&
3101 state->dynamic.depth_bias.clamp == depthBiasClamp &&
3102 state->dynamic.depth_bias.slope == depthBiasSlopeFactor) {
3103 return;
3104 }
3105
3106 state->dynamic.depth_bias.bias = depthBiasConstantFactor;
3107 state->dynamic.depth_bias.clamp = depthBiasClamp;
3108 state->dynamic.depth_bias.slope = depthBiasSlopeFactor;
3109
3110 state->dirty |= RADV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS;
3111 }
3112
3113 void radv_CmdSetBlendConstants(
3114 VkCommandBuffer commandBuffer,
3115 const float blendConstants[4])
3116 {
3117 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3118 struct radv_cmd_state *state = &cmd_buffer->state;
3119
3120 if (!memcmp(state->dynamic.blend_constants, blendConstants, sizeof(float) * 4))
3121 return;
3122
3123 memcpy(state->dynamic.blend_constants, blendConstants, sizeof(float) * 4);
3124
3125 state->dirty |= RADV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS;
3126 }
3127
3128 void radv_CmdSetDepthBounds(
3129 VkCommandBuffer commandBuffer,
3130 float minDepthBounds,
3131 float maxDepthBounds)
3132 {
3133 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3134 struct radv_cmd_state *state = &cmd_buffer->state;
3135
3136 if (state->dynamic.depth_bounds.min == minDepthBounds &&
3137 state->dynamic.depth_bounds.max == maxDepthBounds) {
3138 return;
3139 }
3140
3141 state->dynamic.depth_bounds.min = minDepthBounds;
3142 state->dynamic.depth_bounds.max = maxDepthBounds;
3143
3144 state->dirty |= RADV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS;
3145 }
3146
3147 void radv_CmdSetStencilCompareMask(
3148 VkCommandBuffer commandBuffer,
3149 VkStencilFaceFlags faceMask,
3150 uint32_t compareMask)
3151 {
3152 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3153 struct radv_cmd_state *state = &cmd_buffer->state;
3154 bool front_same = state->dynamic.stencil_compare_mask.front == compareMask;
3155 bool back_same = state->dynamic.stencil_compare_mask.back == compareMask;
3156
3157 if ((!(faceMask & VK_STENCIL_FACE_FRONT_BIT) || front_same) &&
3158 (!(faceMask & VK_STENCIL_FACE_BACK_BIT) || back_same)) {
3159 return;
3160 }
3161
3162 if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
3163 state->dynamic.stencil_compare_mask.front = compareMask;
3164 if (faceMask & VK_STENCIL_FACE_BACK_BIT)
3165 state->dynamic.stencil_compare_mask.back = compareMask;
3166
3167 state->dirty |= RADV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK;
3168 }
3169
3170 void radv_CmdSetStencilWriteMask(
3171 VkCommandBuffer commandBuffer,
3172 VkStencilFaceFlags faceMask,
3173 uint32_t writeMask)
3174 {
3175 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3176 struct radv_cmd_state *state = &cmd_buffer->state;
3177 bool front_same = state->dynamic.stencil_write_mask.front == writeMask;
3178 bool back_same = state->dynamic.stencil_write_mask.back == writeMask;
3179
3180 if ((!(faceMask & VK_STENCIL_FACE_FRONT_BIT) || front_same) &&
3181 (!(faceMask & VK_STENCIL_FACE_BACK_BIT) || back_same)) {
3182 return;
3183 }
3184
3185 if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
3186 state->dynamic.stencil_write_mask.front = writeMask;
3187 if (faceMask & VK_STENCIL_FACE_BACK_BIT)
3188 state->dynamic.stencil_write_mask.back = writeMask;
3189
3190 state->dirty |= RADV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK;
3191 }
3192
3193 void radv_CmdSetStencilReference(
3194 VkCommandBuffer commandBuffer,
3195 VkStencilFaceFlags faceMask,
3196 uint32_t reference)
3197 {
3198 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3199 struct radv_cmd_state *state = &cmd_buffer->state;
3200 bool front_same = state->dynamic.stencil_reference.front == reference;
3201 bool back_same = state->dynamic.stencil_reference.back == reference;
3202
3203 if ((!(faceMask & VK_STENCIL_FACE_FRONT_BIT) || front_same) &&
3204 (!(faceMask & VK_STENCIL_FACE_BACK_BIT) || back_same)) {
3205 return;
3206 }
3207
3208 if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
3209 cmd_buffer->state.dynamic.stencil_reference.front = reference;
3210 if (faceMask & VK_STENCIL_FACE_BACK_BIT)
3211 cmd_buffer->state.dynamic.stencil_reference.back = reference;
3212
3213 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE;
3214 }
3215
3216 void radv_CmdSetDiscardRectangleEXT(
3217 VkCommandBuffer commandBuffer,
3218 uint32_t firstDiscardRectangle,
3219 uint32_t discardRectangleCount,
3220 const VkRect2D* pDiscardRectangles)
3221 {
3222 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3223 struct radv_cmd_state *state = &cmd_buffer->state;
3224 MAYBE_UNUSED const uint32_t total_count = firstDiscardRectangle + discardRectangleCount;
3225
3226 assert(firstDiscardRectangle < MAX_DISCARD_RECTANGLES);
3227 assert(total_count >= 1 && total_count <= MAX_DISCARD_RECTANGLES);
3228
3229 if (!memcmp(state->dynamic.discard_rectangle.rectangles + firstDiscardRectangle,
3230 pDiscardRectangles, discardRectangleCount * sizeof(*pDiscardRectangles))) {
3231 return;
3232 }
3233
3234 typed_memcpy(&state->dynamic.discard_rectangle.rectangles[firstDiscardRectangle],
3235 pDiscardRectangles, discardRectangleCount);
3236
3237 state->dirty |= RADV_CMD_DIRTY_DYNAMIC_DISCARD_RECTANGLE;
3238 }
3239
3240 void radv_CmdExecuteCommands(
3241 VkCommandBuffer commandBuffer,
3242 uint32_t commandBufferCount,
3243 const VkCommandBuffer* pCmdBuffers)
3244 {
3245 RADV_FROM_HANDLE(radv_cmd_buffer, primary, commandBuffer);
3246
3247 assert(commandBufferCount > 0);
3248
3249 /* Emit pending flushes on primary prior to executing secondary */
3250 si_emit_cache_flush(primary);
3251
3252 for (uint32_t i = 0; i < commandBufferCount; i++) {
3253 RADV_FROM_HANDLE(radv_cmd_buffer, secondary, pCmdBuffers[i]);
3254
3255 primary->scratch_size_needed = MAX2(primary->scratch_size_needed,
3256 secondary->scratch_size_needed);
3257 primary->compute_scratch_size_needed = MAX2(primary->compute_scratch_size_needed,
3258 secondary->compute_scratch_size_needed);
3259
3260 if (secondary->esgs_ring_size_needed > primary->esgs_ring_size_needed)
3261 primary->esgs_ring_size_needed = secondary->esgs_ring_size_needed;
3262 if (secondary->gsvs_ring_size_needed > primary->gsvs_ring_size_needed)
3263 primary->gsvs_ring_size_needed = secondary->gsvs_ring_size_needed;
3264 if (secondary->tess_rings_needed)
3265 primary->tess_rings_needed = true;
3266 if (secondary->sample_positions_needed)
3267 primary->sample_positions_needed = true;
3268
3269 primary->device->ws->cs_execute_secondary(primary->cs, secondary->cs);
3270
3271
3272 /* When the secondary command buffer is compute only we don't
3273 * need to re-emit the current graphics pipeline.
3274 */
3275 if (secondary->state.emitted_pipeline) {
3276 primary->state.emitted_pipeline =
3277 secondary->state.emitted_pipeline;
3278 }
3279
3280 /* When the secondary command buffer is graphics only we don't
3281 * need to re-emit the current compute pipeline.
3282 */
3283 if (secondary->state.emitted_compute_pipeline) {
3284 primary->state.emitted_compute_pipeline =
3285 secondary->state.emitted_compute_pipeline;
3286 }
3287
3288 /* Only re-emit the draw packets when needed. */
3289 if (secondary->state.last_primitive_reset_en != -1) {
3290 primary->state.last_primitive_reset_en =
3291 secondary->state.last_primitive_reset_en;
3292 }
3293
3294 if (secondary->state.last_primitive_reset_index) {
3295 primary->state.last_primitive_reset_index =
3296 secondary->state.last_primitive_reset_index;
3297 }
3298
3299 if (secondary->state.last_ia_multi_vgt_param) {
3300 primary->state.last_ia_multi_vgt_param =
3301 secondary->state.last_ia_multi_vgt_param;
3302 }
3303
3304 primary->state.last_first_instance = secondary->state.last_first_instance;
3305 primary->state.last_num_instances = secondary->state.last_num_instances;
3306 primary->state.last_vertex_offset = secondary->state.last_vertex_offset;
3307
3308 if (secondary->state.last_index_type != -1) {
3309 primary->state.last_index_type =
3310 secondary->state.last_index_type;
3311 }
3312 }
3313
3314 /* After executing commands from secondary buffers we have to dirty
3315 * some states.
3316 */
3317 primary->state.dirty |= RADV_CMD_DIRTY_PIPELINE |
3318 RADV_CMD_DIRTY_INDEX_BUFFER |
3319 RADV_CMD_DIRTY_DYNAMIC_ALL;
3320 radv_mark_descriptor_sets_dirty(primary, VK_PIPELINE_BIND_POINT_GRAPHICS);
3321 radv_mark_descriptor_sets_dirty(primary, VK_PIPELINE_BIND_POINT_COMPUTE);
3322 }
3323
3324 VkResult radv_CreateCommandPool(
3325 VkDevice _device,
3326 const VkCommandPoolCreateInfo* pCreateInfo,
3327 const VkAllocationCallbacks* pAllocator,
3328 VkCommandPool* pCmdPool)
3329 {
3330 RADV_FROM_HANDLE(radv_device, device, _device);
3331 struct radv_cmd_pool *pool;
3332
3333 pool = vk_alloc2(&device->alloc, pAllocator, sizeof(*pool), 8,
3334 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3335 if (pool == NULL)
3336 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
3337
3338 if (pAllocator)
3339 pool->alloc = *pAllocator;
3340 else
3341 pool->alloc = device->alloc;
3342
3343 list_inithead(&pool->cmd_buffers);
3344 list_inithead(&pool->free_cmd_buffers);
3345
3346 pool->queue_family_index = pCreateInfo->queueFamilyIndex;
3347
3348 *pCmdPool = radv_cmd_pool_to_handle(pool);
3349
3350 return VK_SUCCESS;
3351
3352 }
3353
3354 void radv_DestroyCommandPool(
3355 VkDevice _device,
3356 VkCommandPool commandPool,
3357 const VkAllocationCallbacks* pAllocator)
3358 {
3359 RADV_FROM_HANDLE(radv_device, device, _device);
3360 RADV_FROM_HANDLE(radv_cmd_pool, pool, commandPool);
3361
3362 if (!pool)
3363 return;
3364
3365 list_for_each_entry_safe(struct radv_cmd_buffer, cmd_buffer,
3366 &pool->cmd_buffers, pool_link) {
3367 radv_cmd_buffer_destroy(cmd_buffer);
3368 }
3369
3370 list_for_each_entry_safe(struct radv_cmd_buffer, cmd_buffer,
3371 &pool->free_cmd_buffers, pool_link) {
3372 radv_cmd_buffer_destroy(cmd_buffer);
3373 }
3374
3375 vk_free2(&device->alloc, pAllocator, pool);
3376 }
3377
3378 VkResult radv_ResetCommandPool(
3379 VkDevice device,
3380 VkCommandPool commandPool,
3381 VkCommandPoolResetFlags flags)
3382 {
3383 RADV_FROM_HANDLE(radv_cmd_pool, pool, commandPool);
3384 VkResult result;
3385
3386 list_for_each_entry(struct radv_cmd_buffer, cmd_buffer,
3387 &pool->cmd_buffers, pool_link) {
3388 result = radv_reset_cmd_buffer(cmd_buffer);
3389 if (result != VK_SUCCESS)
3390 return result;
3391 }
3392
3393 return VK_SUCCESS;
3394 }
3395
3396 void radv_TrimCommandPool(
3397 VkDevice device,
3398 VkCommandPool commandPool,
3399 VkCommandPoolTrimFlags flags)
3400 {
3401 RADV_FROM_HANDLE(radv_cmd_pool, pool, commandPool);
3402
3403 if (!pool)
3404 return;
3405
3406 list_for_each_entry_safe(struct radv_cmd_buffer, cmd_buffer,
3407 &pool->free_cmd_buffers, pool_link) {
3408 radv_cmd_buffer_destroy(cmd_buffer);
3409 }
3410 }
3411
3412 void radv_CmdBeginRenderPass(
3413 VkCommandBuffer commandBuffer,
3414 const VkRenderPassBeginInfo* pRenderPassBegin,
3415 VkSubpassContents contents)
3416 {
3417 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3418 RADV_FROM_HANDLE(radv_render_pass, pass, pRenderPassBegin->renderPass);
3419 RADV_FROM_HANDLE(radv_framebuffer, framebuffer, pRenderPassBegin->framebuffer);
3420
3421 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
3422 cmd_buffer->cs, 2048);
3423 MAYBE_UNUSED VkResult result;
3424
3425 cmd_buffer->state.framebuffer = framebuffer;
3426 cmd_buffer->state.pass = pass;
3427 cmd_buffer->state.render_area = pRenderPassBegin->renderArea;
3428
3429 result = radv_cmd_state_setup_attachments(cmd_buffer, pass, pRenderPassBegin);
3430 if (result != VK_SUCCESS)
3431 return;
3432
3433 radv_cmd_buffer_set_subpass(cmd_buffer, pass->subpasses, true);
3434 assert(cmd_buffer->cs->cdw <= cdw_max);
3435
3436 radv_cmd_buffer_clear_subpass(cmd_buffer);
3437 }
3438
3439 void radv_CmdBeginRenderPass2KHR(
3440 VkCommandBuffer commandBuffer,
3441 const VkRenderPassBeginInfo* pRenderPassBeginInfo,
3442 const VkSubpassBeginInfoKHR* pSubpassBeginInfo)
3443 {
3444 radv_CmdBeginRenderPass(commandBuffer, pRenderPassBeginInfo,
3445 pSubpassBeginInfo->contents);
3446 }
3447
3448 void radv_CmdNextSubpass(
3449 VkCommandBuffer commandBuffer,
3450 VkSubpassContents contents)
3451 {
3452 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3453
3454 radv_cmd_buffer_resolve_subpass(cmd_buffer);
3455
3456 radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs,
3457 2048);
3458
3459 radv_cmd_buffer_set_subpass(cmd_buffer, cmd_buffer->state.subpass + 1, true);
3460 radv_cmd_buffer_clear_subpass(cmd_buffer);
3461 }
3462
3463 void radv_CmdNextSubpass2KHR(
3464 VkCommandBuffer commandBuffer,
3465 const VkSubpassBeginInfoKHR* pSubpassBeginInfo,
3466 const VkSubpassEndInfoKHR* pSubpassEndInfo)
3467 {
3468 radv_CmdNextSubpass(commandBuffer, pSubpassBeginInfo->contents);
3469 }
3470
3471 static void radv_emit_view_index(struct radv_cmd_buffer *cmd_buffer, unsigned index)
3472 {
3473 struct radv_pipeline *pipeline = cmd_buffer->state.pipeline;
3474 for (unsigned stage = 0; stage < MESA_SHADER_STAGES; ++stage) {
3475 if (!radv_get_shader(pipeline, stage))
3476 continue;
3477
3478 struct radv_userdata_info *loc = radv_lookup_user_sgpr(pipeline, stage, AC_UD_VIEW_INDEX);
3479 if (loc->sgpr_idx == -1)
3480 continue;
3481 uint32_t base_reg = pipeline->user_data_0[stage];
3482 radeon_set_sh_reg(cmd_buffer->cs, base_reg + loc->sgpr_idx * 4, index);
3483
3484 }
3485 if (pipeline->gs_copy_shader) {
3486 struct radv_userdata_info *loc = &pipeline->gs_copy_shader->info.user_sgprs_locs.shader_data[AC_UD_VIEW_INDEX];
3487 if (loc->sgpr_idx != -1) {
3488 uint32_t base_reg = R_00B130_SPI_SHADER_USER_DATA_VS_0;
3489 radeon_set_sh_reg(cmd_buffer->cs, base_reg + loc->sgpr_idx * 4, index);
3490 }
3491 }
3492 }
3493
3494 static void
3495 radv_cs_emit_draw_packet(struct radv_cmd_buffer *cmd_buffer,
3496 uint32_t vertex_count,
3497 bool use_opaque)
3498 {
3499 radeon_emit(cmd_buffer->cs, PKT3(PKT3_DRAW_INDEX_AUTO, 1, cmd_buffer->state.predicating));
3500 radeon_emit(cmd_buffer->cs, vertex_count);
3501 radeon_emit(cmd_buffer->cs, V_0287F0_DI_SRC_SEL_AUTO_INDEX |
3502 S_0287F0_USE_OPAQUE(use_opaque));
3503 }
3504
3505 static void
3506 radv_cs_emit_draw_indexed_packet(struct radv_cmd_buffer *cmd_buffer,
3507 uint64_t index_va,
3508 uint32_t index_count)
3509 {
3510 radeon_emit(cmd_buffer->cs, PKT3(PKT3_DRAW_INDEX_2, 4, cmd_buffer->state.predicating));
3511 radeon_emit(cmd_buffer->cs, cmd_buffer->state.max_index_count);
3512 radeon_emit(cmd_buffer->cs, index_va);
3513 radeon_emit(cmd_buffer->cs, index_va >> 32);
3514 radeon_emit(cmd_buffer->cs, index_count);
3515 radeon_emit(cmd_buffer->cs, V_0287F0_DI_SRC_SEL_DMA);
3516 }
3517
3518 static void
3519 radv_cs_emit_indirect_draw_packet(struct radv_cmd_buffer *cmd_buffer,
3520 bool indexed,
3521 uint32_t draw_count,
3522 uint64_t count_va,
3523 uint32_t stride)
3524 {
3525 struct radeon_cmdbuf *cs = cmd_buffer->cs;
3526 unsigned di_src_sel = indexed ? V_0287F0_DI_SRC_SEL_DMA
3527 : V_0287F0_DI_SRC_SEL_AUTO_INDEX;
3528 bool draw_id_enable = radv_get_shader(cmd_buffer->state.pipeline, MESA_SHADER_VERTEX)->info.info.vs.needs_draw_id;
3529 uint32_t base_reg = cmd_buffer->state.pipeline->graphics.vtx_base_sgpr;
3530 bool predicating = cmd_buffer->state.predicating;
3531 assert(base_reg);
3532
3533 /* just reset draw state for vertex data */
3534 cmd_buffer->state.last_first_instance = -1;
3535 cmd_buffer->state.last_num_instances = -1;
3536 cmd_buffer->state.last_vertex_offset = -1;
3537
3538 if (draw_count == 1 && !count_va && !draw_id_enable) {
3539 radeon_emit(cs, PKT3(indexed ? PKT3_DRAW_INDEX_INDIRECT :
3540 PKT3_DRAW_INDIRECT, 3, predicating));
3541 radeon_emit(cs, 0);
3542 radeon_emit(cs, (base_reg - SI_SH_REG_OFFSET) >> 2);
3543 radeon_emit(cs, ((base_reg + 4) - SI_SH_REG_OFFSET) >> 2);
3544 radeon_emit(cs, di_src_sel);
3545 } else {
3546 radeon_emit(cs, PKT3(indexed ? PKT3_DRAW_INDEX_INDIRECT_MULTI :
3547 PKT3_DRAW_INDIRECT_MULTI,
3548 8, predicating));
3549 radeon_emit(cs, 0);
3550 radeon_emit(cs, (base_reg - SI_SH_REG_OFFSET) >> 2);
3551 radeon_emit(cs, ((base_reg + 4) - SI_SH_REG_OFFSET) >> 2);
3552 radeon_emit(cs, (((base_reg + 8) - SI_SH_REG_OFFSET) >> 2) |
3553 S_2C3_DRAW_INDEX_ENABLE(draw_id_enable) |
3554 S_2C3_COUNT_INDIRECT_ENABLE(!!count_va));
3555 radeon_emit(cs, draw_count); /* count */
3556 radeon_emit(cs, count_va); /* count_addr */
3557 radeon_emit(cs, count_va >> 32);
3558 radeon_emit(cs, stride); /* stride */
3559 radeon_emit(cs, di_src_sel);
3560 }
3561 }
3562
3563 static void
3564 radv_emit_draw_packets(struct radv_cmd_buffer *cmd_buffer,
3565 const struct radv_draw_info *info)
3566 {
3567 struct radv_cmd_state *state = &cmd_buffer->state;
3568 struct radeon_winsys *ws = cmd_buffer->device->ws;
3569 struct radeon_cmdbuf *cs = cmd_buffer->cs;
3570
3571 if (info->indirect) {
3572 uint64_t va = radv_buffer_get_va(info->indirect->bo);
3573 uint64_t count_va = 0;
3574
3575 va += info->indirect->offset + info->indirect_offset;
3576
3577 radv_cs_add_buffer(ws, cs, info->indirect->bo);
3578
3579 radeon_emit(cs, PKT3(PKT3_SET_BASE, 2, 0));
3580 radeon_emit(cs, 1);
3581 radeon_emit(cs, va);
3582 radeon_emit(cs, va >> 32);
3583
3584 if (info->count_buffer) {
3585 count_va = radv_buffer_get_va(info->count_buffer->bo);
3586 count_va += info->count_buffer->offset +
3587 info->count_buffer_offset;
3588
3589 radv_cs_add_buffer(ws, cs, info->count_buffer->bo);
3590 }
3591
3592 if (!state->subpass->view_mask) {
3593 radv_cs_emit_indirect_draw_packet(cmd_buffer,
3594 info->indexed,
3595 info->count,
3596 count_va,
3597 info->stride);
3598 } else {
3599 unsigned i;
3600 for_each_bit(i, state->subpass->view_mask) {
3601 radv_emit_view_index(cmd_buffer, i);
3602
3603 radv_cs_emit_indirect_draw_packet(cmd_buffer,
3604 info->indexed,
3605 info->count,
3606 count_va,
3607 info->stride);
3608 }
3609 }
3610 } else {
3611 assert(state->pipeline->graphics.vtx_base_sgpr);
3612
3613 if (info->vertex_offset != state->last_vertex_offset ||
3614 info->first_instance != state->last_first_instance) {
3615 radeon_set_sh_reg_seq(cs, state->pipeline->graphics.vtx_base_sgpr,
3616 state->pipeline->graphics.vtx_emit_num);
3617
3618 radeon_emit(cs, info->vertex_offset);
3619 radeon_emit(cs, info->first_instance);
3620 if (state->pipeline->graphics.vtx_emit_num == 3)
3621 radeon_emit(cs, 0);
3622 state->last_first_instance = info->first_instance;
3623 state->last_vertex_offset = info->vertex_offset;
3624 }
3625
3626 if (state->last_num_instances != info->instance_count) {
3627 radeon_emit(cs, PKT3(PKT3_NUM_INSTANCES, 0, false));
3628 radeon_emit(cs, info->instance_count);
3629 state->last_num_instances = info->instance_count;
3630 }
3631
3632 if (info->indexed) {
3633 int index_size = state->index_type ? 4 : 2;
3634 uint64_t index_va;
3635
3636 index_va = state->index_va;
3637 index_va += info->first_index * index_size;
3638
3639 if (!state->subpass->view_mask) {
3640 radv_cs_emit_draw_indexed_packet(cmd_buffer,
3641 index_va,
3642 info->count);
3643 } else {
3644 unsigned i;
3645 for_each_bit(i, state->subpass->view_mask) {
3646 radv_emit_view_index(cmd_buffer, i);
3647
3648 radv_cs_emit_draw_indexed_packet(cmd_buffer,
3649 index_va,
3650 info->count);
3651 }
3652 }
3653 } else {
3654 if (!state->subpass->view_mask) {
3655 radv_cs_emit_draw_packet(cmd_buffer,
3656 info->count,
3657 !!info->strmout_buffer);
3658 } else {
3659 unsigned i;
3660 for_each_bit(i, state->subpass->view_mask) {
3661 radv_emit_view_index(cmd_buffer, i);
3662
3663 radv_cs_emit_draw_packet(cmd_buffer,
3664 info->count,
3665 !!info->strmout_buffer);
3666 }
3667 }
3668 }
3669 }
3670 }
3671
3672 /*
3673 * Vega and raven have a bug which triggers if there are multiple context
3674 * register contexts active at the same time with different scissor values.
3675 *
3676 * There are two possible workarounds:
3677 * 1) Wait for PS_PARTIAL_FLUSH every time the scissor is changed. That way
3678 * there is only ever 1 active set of scissor values at the same time.
3679 *
3680 * 2) Whenever the hardware switches contexts we have to set the scissor
3681 * registers again even if it is a noop. That way the new context gets
3682 * the correct scissor values.
3683 *
3684 * This implements option 2. radv_need_late_scissor_emission needs to
3685 * return true on affected HW if radv_emit_all_graphics_states sets
3686 * any context registers.
3687 */
3688 static bool radv_need_late_scissor_emission(struct radv_cmd_buffer *cmd_buffer,
3689 const struct radv_draw_info *info)
3690 {
3691 struct radv_cmd_state *state = &cmd_buffer->state;
3692
3693 if (!cmd_buffer->device->physical_device->has_scissor_bug)
3694 return false;
3695
3696 if (cmd_buffer->state.context_roll_without_scissor_emitted || info->strmout_buffer)
3697 return true;
3698
3699 uint32_t used_states = cmd_buffer->state.pipeline->graphics.needed_dynamic_state | ~RADV_CMD_DIRTY_DYNAMIC_ALL;
3700
3701 /* Index, vertex and streamout buffers don't change context regs, and
3702 * pipeline is already handled.
3703 */
3704 used_states &= ~(RADV_CMD_DIRTY_INDEX_BUFFER |
3705 RADV_CMD_DIRTY_VERTEX_BUFFER |
3706 RADV_CMD_DIRTY_STREAMOUT_BUFFER |
3707 RADV_CMD_DIRTY_PIPELINE);
3708
3709 if (cmd_buffer->state.dirty & used_states)
3710 return true;
3711
3712 if (info->indexed && state->pipeline->graphics.prim_restart_enable &&
3713 (state->index_type ? 0xffffffffu : 0xffffu) != state->last_primitive_reset_index)
3714 return true;
3715
3716 return false;
3717 }
3718
3719 static void
3720 radv_emit_all_graphics_states(struct radv_cmd_buffer *cmd_buffer,
3721 const struct radv_draw_info *info)
3722 {
3723 bool late_scissor_emission;
3724
3725 if ((cmd_buffer->state.dirty & RADV_CMD_DIRTY_FRAMEBUFFER) ||
3726 cmd_buffer->state.emitted_pipeline != cmd_buffer->state.pipeline)
3727 radv_emit_rbplus_state(cmd_buffer);
3728
3729 if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_PIPELINE)
3730 radv_emit_graphics_pipeline(cmd_buffer);
3731
3732 /* This should be before the cmd_buffer->state.dirty is cleared
3733 * (excluding RADV_CMD_DIRTY_PIPELINE) and after
3734 * cmd_buffer->state.context_roll_without_scissor_emitted is set. */
3735 late_scissor_emission =
3736 radv_need_late_scissor_emission(cmd_buffer, info);
3737
3738 if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_FRAMEBUFFER)
3739 radv_emit_framebuffer_state(cmd_buffer);
3740
3741 if (info->indexed) {
3742 if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_INDEX_BUFFER)
3743 radv_emit_index_buffer(cmd_buffer);
3744 } else {
3745 /* On CI and later, non-indexed draws overwrite VGT_INDEX_TYPE,
3746 * so the state must be re-emitted before the next indexed
3747 * draw.
3748 */
3749 if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) {
3750 cmd_buffer->state.last_index_type = -1;
3751 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_INDEX_BUFFER;
3752 }
3753 }
3754
3755 radv_cmd_buffer_flush_dynamic_state(cmd_buffer);
3756
3757 radv_emit_draw_registers(cmd_buffer, info);
3758
3759 if (late_scissor_emission)
3760 radv_emit_scissor(cmd_buffer);
3761 }
3762
3763 static void
3764 radv_draw(struct radv_cmd_buffer *cmd_buffer,
3765 const struct radv_draw_info *info)
3766 {
3767 struct radeon_info *rad_info =
3768 &cmd_buffer->device->physical_device->rad_info;
3769 bool has_prefetch =
3770 cmd_buffer->device->physical_device->rad_info.chip_class >= CIK;
3771 bool pipeline_is_dirty =
3772 (cmd_buffer->state.dirty & RADV_CMD_DIRTY_PIPELINE) &&
3773 cmd_buffer->state.pipeline != cmd_buffer->state.emitted_pipeline;
3774
3775 MAYBE_UNUSED unsigned cdw_max =
3776 radeon_check_space(cmd_buffer->device->ws,
3777 cmd_buffer->cs, 4096);
3778
3779 if (likely(!info->indirect)) {
3780 /* SI-CI treat instance_count==0 as instance_count==1. There is
3781 * no workaround for indirect draws, but we can at least skip
3782 * direct draws.
3783 */
3784 if (unlikely(!info->instance_count))
3785 return;
3786
3787 /* Handle count == 0. */
3788 if (unlikely(!info->count && !info->strmout_buffer))
3789 return;
3790 }
3791
3792 /* Use optimal packet order based on whether we need to sync the
3793 * pipeline.
3794 */
3795 if (cmd_buffer->state.flush_bits & (RADV_CMD_FLAG_FLUSH_AND_INV_CB |
3796 RADV_CMD_FLAG_FLUSH_AND_INV_DB |
3797 RADV_CMD_FLAG_PS_PARTIAL_FLUSH |
3798 RADV_CMD_FLAG_CS_PARTIAL_FLUSH)) {
3799 /* If we have to wait for idle, set all states first, so that
3800 * all SET packets are processed in parallel with previous draw
3801 * calls. Then upload descriptors, set shader pointers, and
3802 * draw, and prefetch at the end. This ensures that the time
3803 * the CUs are idle is very short. (there are only SET_SH
3804 * packets between the wait and the draw)
3805 */
3806 radv_emit_all_graphics_states(cmd_buffer, info);
3807 si_emit_cache_flush(cmd_buffer);
3808 /* <-- CUs are idle here --> */
3809
3810 radv_upload_graphics_shader_descriptors(cmd_buffer, pipeline_is_dirty);
3811
3812 radv_emit_draw_packets(cmd_buffer, info);
3813 /* <-- CUs are busy here --> */
3814
3815 /* Start prefetches after the draw has been started. Both will
3816 * run in parallel, but starting the draw first is more
3817 * important.
3818 */
3819 if (has_prefetch && cmd_buffer->state.prefetch_L2_mask) {
3820 radv_emit_prefetch_L2(cmd_buffer,
3821 cmd_buffer->state.pipeline, false);
3822 }
3823 } else {
3824 /* If we don't wait for idle, start prefetches first, then set
3825 * states, and draw at the end.
3826 */
3827 si_emit_cache_flush(cmd_buffer);
3828
3829 if (has_prefetch && cmd_buffer->state.prefetch_L2_mask) {
3830 /* Only prefetch the vertex shader and VBO descriptors
3831 * in order to start the draw as soon as possible.
3832 */
3833 radv_emit_prefetch_L2(cmd_buffer,
3834 cmd_buffer->state.pipeline, true);
3835 }
3836
3837 radv_upload_graphics_shader_descriptors(cmd_buffer, pipeline_is_dirty);
3838
3839 radv_emit_all_graphics_states(cmd_buffer, info);
3840 radv_emit_draw_packets(cmd_buffer, info);
3841
3842 /* Prefetch the remaining shaders after the draw has been
3843 * started.
3844 */
3845 if (has_prefetch && cmd_buffer->state.prefetch_L2_mask) {
3846 radv_emit_prefetch_L2(cmd_buffer,
3847 cmd_buffer->state.pipeline, false);
3848 }
3849 }
3850
3851 /* Workaround for a VGT hang when streamout is enabled.
3852 * It must be done after drawing.
3853 */
3854 if (cmd_buffer->state.streamout.streamout_enabled &&
3855 (rad_info->family == CHIP_HAWAII ||
3856 rad_info->family == CHIP_TONGA ||
3857 rad_info->family == CHIP_FIJI)) {
3858 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_VGT_STREAMOUT_SYNC;
3859 }
3860
3861 assert(cmd_buffer->cs->cdw <= cdw_max);
3862 radv_cmd_buffer_after_draw(cmd_buffer, RADV_CMD_FLAG_PS_PARTIAL_FLUSH);
3863 }
3864
3865 void radv_CmdDraw(
3866 VkCommandBuffer commandBuffer,
3867 uint32_t vertexCount,
3868 uint32_t instanceCount,
3869 uint32_t firstVertex,
3870 uint32_t firstInstance)
3871 {
3872 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3873 struct radv_draw_info info = {};
3874
3875 info.count = vertexCount;
3876 info.instance_count = instanceCount;
3877 info.first_instance = firstInstance;
3878 info.vertex_offset = firstVertex;
3879
3880 radv_draw(cmd_buffer, &info);
3881 }
3882
3883 void radv_CmdDrawIndexed(
3884 VkCommandBuffer commandBuffer,
3885 uint32_t indexCount,
3886 uint32_t instanceCount,
3887 uint32_t firstIndex,
3888 int32_t vertexOffset,
3889 uint32_t firstInstance)
3890 {
3891 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3892 struct radv_draw_info info = {};
3893
3894 info.indexed = true;
3895 info.count = indexCount;
3896 info.instance_count = instanceCount;
3897 info.first_index = firstIndex;
3898 info.vertex_offset = vertexOffset;
3899 info.first_instance = firstInstance;
3900
3901 radv_draw(cmd_buffer, &info);
3902 }
3903
3904 void radv_CmdDrawIndirect(
3905 VkCommandBuffer commandBuffer,
3906 VkBuffer _buffer,
3907 VkDeviceSize offset,
3908 uint32_t drawCount,
3909 uint32_t stride)
3910 {
3911 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3912 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
3913 struct radv_draw_info info = {};
3914
3915 info.count = drawCount;
3916 info.indirect = buffer;
3917 info.indirect_offset = offset;
3918 info.stride = stride;
3919
3920 radv_draw(cmd_buffer, &info);
3921 }
3922
3923 void radv_CmdDrawIndexedIndirect(
3924 VkCommandBuffer commandBuffer,
3925 VkBuffer _buffer,
3926 VkDeviceSize offset,
3927 uint32_t drawCount,
3928 uint32_t stride)
3929 {
3930 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3931 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
3932 struct radv_draw_info info = {};
3933
3934 info.indexed = true;
3935 info.count = drawCount;
3936 info.indirect = buffer;
3937 info.indirect_offset = offset;
3938 info.stride = stride;
3939
3940 radv_draw(cmd_buffer, &info);
3941 }
3942
3943 void radv_CmdDrawIndirectCountAMD(
3944 VkCommandBuffer commandBuffer,
3945 VkBuffer _buffer,
3946 VkDeviceSize offset,
3947 VkBuffer _countBuffer,
3948 VkDeviceSize countBufferOffset,
3949 uint32_t maxDrawCount,
3950 uint32_t stride)
3951 {
3952 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3953 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
3954 RADV_FROM_HANDLE(radv_buffer, count_buffer, _countBuffer);
3955 struct radv_draw_info info = {};
3956
3957 info.count = maxDrawCount;
3958 info.indirect = buffer;
3959 info.indirect_offset = offset;
3960 info.count_buffer = count_buffer;
3961 info.count_buffer_offset = countBufferOffset;
3962 info.stride = stride;
3963
3964 radv_draw(cmd_buffer, &info);
3965 }
3966
3967 void radv_CmdDrawIndexedIndirectCountAMD(
3968 VkCommandBuffer commandBuffer,
3969 VkBuffer _buffer,
3970 VkDeviceSize offset,
3971 VkBuffer _countBuffer,
3972 VkDeviceSize countBufferOffset,
3973 uint32_t maxDrawCount,
3974 uint32_t stride)
3975 {
3976 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3977 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
3978 RADV_FROM_HANDLE(radv_buffer, count_buffer, _countBuffer);
3979 struct radv_draw_info info = {};
3980
3981 info.indexed = true;
3982 info.count = maxDrawCount;
3983 info.indirect = buffer;
3984 info.indirect_offset = offset;
3985 info.count_buffer = count_buffer;
3986 info.count_buffer_offset = countBufferOffset;
3987 info.stride = stride;
3988
3989 radv_draw(cmd_buffer, &info);
3990 }
3991
3992 void radv_CmdDrawIndirectCountKHR(
3993 VkCommandBuffer commandBuffer,
3994 VkBuffer _buffer,
3995 VkDeviceSize offset,
3996 VkBuffer _countBuffer,
3997 VkDeviceSize countBufferOffset,
3998 uint32_t maxDrawCount,
3999 uint32_t stride)
4000 {
4001 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
4002 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
4003 RADV_FROM_HANDLE(radv_buffer, count_buffer, _countBuffer);
4004 struct radv_draw_info info = {};
4005
4006 info.count = maxDrawCount;
4007 info.indirect = buffer;
4008 info.indirect_offset = offset;
4009 info.count_buffer = count_buffer;
4010 info.count_buffer_offset = countBufferOffset;
4011 info.stride = stride;
4012
4013 radv_draw(cmd_buffer, &info);
4014 }
4015
4016 void radv_CmdDrawIndexedIndirectCountKHR(
4017 VkCommandBuffer commandBuffer,
4018 VkBuffer _buffer,
4019 VkDeviceSize offset,
4020 VkBuffer _countBuffer,
4021 VkDeviceSize countBufferOffset,
4022 uint32_t maxDrawCount,
4023 uint32_t stride)
4024 {
4025 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
4026 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
4027 RADV_FROM_HANDLE(radv_buffer, count_buffer, _countBuffer);
4028 struct radv_draw_info info = {};
4029
4030 info.indexed = true;
4031 info.count = maxDrawCount;
4032 info.indirect = buffer;
4033 info.indirect_offset = offset;
4034 info.count_buffer = count_buffer;
4035 info.count_buffer_offset = countBufferOffset;
4036 info.stride = stride;
4037
4038 radv_draw(cmd_buffer, &info);
4039 }
4040
4041 struct radv_dispatch_info {
4042 /**
4043 * Determine the layout of the grid (in block units) to be used.
4044 */
4045 uint32_t blocks[3];
4046
4047 /**
4048 * A starting offset for the grid. If unaligned is set, the offset
4049 * must still be aligned.
4050 */
4051 uint32_t offsets[3];
4052 /**
4053 * Whether it's an unaligned compute dispatch.
4054 */
4055 bool unaligned;
4056
4057 /**
4058 * Indirect compute parameters resource.
4059 */
4060 struct radv_buffer *indirect;
4061 uint64_t indirect_offset;
4062 };
4063
4064 static void
4065 radv_emit_dispatch_packets(struct radv_cmd_buffer *cmd_buffer,
4066 const struct radv_dispatch_info *info)
4067 {
4068 struct radv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
4069 struct radv_shader_variant *compute_shader = pipeline->shaders[MESA_SHADER_COMPUTE];
4070 unsigned dispatch_initiator = cmd_buffer->device->dispatch_initiator;
4071 struct radeon_winsys *ws = cmd_buffer->device->ws;
4072 bool predicating = cmd_buffer->state.predicating;
4073 struct radeon_cmdbuf *cs = cmd_buffer->cs;
4074 struct radv_userdata_info *loc;
4075
4076 loc = radv_lookup_user_sgpr(pipeline, MESA_SHADER_COMPUTE,
4077 AC_UD_CS_GRID_SIZE);
4078
4079 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(ws, cs, 25);
4080
4081 if (info->indirect) {
4082 uint64_t va = radv_buffer_get_va(info->indirect->bo);
4083
4084 va += info->indirect->offset + info->indirect_offset;
4085
4086 radv_cs_add_buffer(ws, cs, info->indirect->bo);
4087
4088 if (loc->sgpr_idx != -1) {
4089 for (unsigned i = 0; i < 3; ++i) {
4090 radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
4091 radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_SRC_MEM) |
4092 COPY_DATA_DST_SEL(COPY_DATA_REG));
4093 radeon_emit(cs, (va + 4 * i));
4094 radeon_emit(cs, (va + 4 * i) >> 32);
4095 radeon_emit(cs, ((R_00B900_COMPUTE_USER_DATA_0
4096 + loc->sgpr_idx * 4) >> 2) + i);
4097 radeon_emit(cs, 0);
4098 }
4099 }
4100
4101 if (radv_cmd_buffer_uses_mec(cmd_buffer)) {
4102 radeon_emit(cs, PKT3(PKT3_DISPATCH_INDIRECT, 2, predicating) |
4103 PKT3_SHADER_TYPE_S(1));
4104 radeon_emit(cs, va);
4105 radeon_emit(cs, va >> 32);
4106 radeon_emit(cs, dispatch_initiator);
4107 } else {
4108 radeon_emit(cs, PKT3(PKT3_SET_BASE, 2, 0) |
4109 PKT3_SHADER_TYPE_S(1));
4110 radeon_emit(cs, 1);
4111 radeon_emit(cs, va);
4112 radeon_emit(cs, va >> 32);
4113
4114 radeon_emit(cs, PKT3(PKT3_DISPATCH_INDIRECT, 1, predicating) |
4115 PKT3_SHADER_TYPE_S(1));
4116 radeon_emit(cs, 0);
4117 radeon_emit(cs, dispatch_initiator);
4118 }
4119 } else {
4120 unsigned blocks[3] = { info->blocks[0], info->blocks[1], info->blocks[2] };
4121 unsigned offsets[3] = { info->offsets[0], info->offsets[1], info->offsets[2] };
4122
4123 if (info->unaligned) {
4124 unsigned *cs_block_size = compute_shader->info.cs.block_size;
4125 unsigned remainder[3];
4126
4127 /* If aligned, these should be an entire block size,
4128 * not 0.
4129 */
4130 remainder[0] = blocks[0] + cs_block_size[0] -
4131 align_u32_npot(blocks[0], cs_block_size[0]);
4132 remainder[1] = blocks[1] + cs_block_size[1] -
4133 align_u32_npot(blocks[1], cs_block_size[1]);
4134 remainder[2] = blocks[2] + cs_block_size[2] -
4135 align_u32_npot(blocks[2], cs_block_size[2]);
4136
4137 blocks[0] = round_up_u32(blocks[0], cs_block_size[0]);
4138 blocks[1] = round_up_u32(blocks[1], cs_block_size[1]);
4139 blocks[2] = round_up_u32(blocks[2], cs_block_size[2]);
4140
4141 for(unsigned i = 0; i < 3; ++i) {
4142 assert(offsets[i] % cs_block_size[i] == 0);
4143 offsets[i] /= cs_block_size[i];
4144 }
4145
4146 radeon_set_sh_reg_seq(cs, R_00B81C_COMPUTE_NUM_THREAD_X, 3);
4147 radeon_emit(cs,
4148 S_00B81C_NUM_THREAD_FULL(cs_block_size[0]) |
4149 S_00B81C_NUM_THREAD_PARTIAL(remainder[0]));
4150 radeon_emit(cs,
4151 S_00B81C_NUM_THREAD_FULL(cs_block_size[1]) |
4152 S_00B81C_NUM_THREAD_PARTIAL(remainder[1]));
4153 radeon_emit(cs,
4154 S_00B81C_NUM_THREAD_FULL(cs_block_size[2]) |
4155 S_00B81C_NUM_THREAD_PARTIAL(remainder[2]));
4156
4157 dispatch_initiator |= S_00B800_PARTIAL_TG_EN(1);
4158 }
4159
4160 if (loc->sgpr_idx != -1) {
4161 assert(loc->num_sgprs == 3);
4162
4163 radeon_set_sh_reg_seq(cs, R_00B900_COMPUTE_USER_DATA_0 +
4164 loc->sgpr_idx * 4, 3);
4165 radeon_emit(cs, blocks[0]);
4166 radeon_emit(cs, blocks[1]);
4167 radeon_emit(cs, blocks[2]);
4168 }
4169
4170 if (offsets[0] || offsets[1] || offsets[2]) {
4171 radeon_set_sh_reg_seq(cs, R_00B810_COMPUTE_START_X, 3);
4172 radeon_emit(cs, offsets[0]);
4173 radeon_emit(cs, offsets[1]);
4174 radeon_emit(cs, offsets[2]);
4175
4176 /* The blocks in the packet are not counts but end values. */
4177 for (unsigned i = 0; i < 3; ++i)
4178 blocks[i] += offsets[i];
4179 } else {
4180 dispatch_initiator |= S_00B800_FORCE_START_AT_000(1);
4181 }
4182
4183 radeon_emit(cs, PKT3(PKT3_DISPATCH_DIRECT, 3, predicating) |
4184 PKT3_SHADER_TYPE_S(1));
4185 radeon_emit(cs, blocks[0]);
4186 radeon_emit(cs, blocks[1]);
4187 radeon_emit(cs, blocks[2]);
4188 radeon_emit(cs, dispatch_initiator);
4189 }
4190
4191 assert(cmd_buffer->cs->cdw <= cdw_max);
4192 }
4193
4194 static void
4195 radv_upload_compute_shader_descriptors(struct radv_cmd_buffer *cmd_buffer)
4196 {
4197 radv_flush_descriptors(cmd_buffer, VK_SHADER_STAGE_COMPUTE_BIT);
4198 radv_flush_constants(cmd_buffer, VK_SHADER_STAGE_COMPUTE_BIT);
4199 }
4200
4201 static void
4202 radv_dispatch(struct radv_cmd_buffer *cmd_buffer,
4203 const struct radv_dispatch_info *info)
4204 {
4205 struct radv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
4206 bool has_prefetch =
4207 cmd_buffer->device->physical_device->rad_info.chip_class >= CIK;
4208 bool pipeline_is_dirty = pipeline &&
4209 pipeline != cmd_buffer->state.emitted_compute_pipeline;
4210
4211 if (cmd_buffer->state.flush_bits & (RADV_CMD_FLAG_FLUSH_AND_INV_CB |
4212 RADV_CMD_FLAG_FLUSH_AND_INV_DB |
4213 RADV_CMD_FLAG_PS_PARTIAL_FLUSH |
4214 RADV_CMD_FLAG_CS_PARTIAL_FLUSH)) {
4215 /* If we have to wait for idle, set all states first, so that
4216 * all SET packets are processed in parallel with previous draw
4217 * calls. Then upload descriptors, set shader pointers, and
4218 * dispatch, and prefetch at the end. This ensures that the
4219 * time the CUs are idle is very short. (there are only SET_SH
4220 * packets between the wait and the draw)
4221 */
4222 radv_emit_compute_pipeline(cmd_buffer);
4223 si_emit_cache_flush(cmd_buffer);
4224 /* <-- CUs are idle here --> */
4225
4226 radv_upload_compute_shader_descriptors(cmd_buffer);
4227
4228 radv_emit_dispatch_packets(cmd_buffer, info);
4229 /* <-- CUs are busy here --> */
4230
4231 /* Start prefetches after the dispatch has been started. Both
4232 * will run in parallel, but starting the dispatch first is
4233 * more important.
4234 */
4235 if (has_prefetch && pipeline_is_dirty) {
4236 radv_emit_shader_prefetch(cmd_buffer,
4237 pipeline->shaders[MESA_SHADER_COMPUTE]);
4238 }
4239 } else {
4240 /* If we don't wait for idle, start prefetches first, then set
4241 * states, and dispatch at the end.
4242 */
4243 si_emit_cache_flush(cmd_buffer);
4244
4245 if (has_prefetch && pipeline_is_dirty) {
4246 radv_emit_shader_prefetch(cmd_buffer,
4247 pipeline->shaders[MESA_SHADER_COMPUTE]);
4248 }
4249
4250 radv_upload_compute_shader_descriptors(cmd_buffer);
4251
4252 radv_emit_compute_pipeline(cmd_buffer);
4253 radv_emit_dispatch_packets(cmd_buffer, info);
4254 }
4255
4256 radv_cmd_buffer_after_draw(cmd_buffer, RADV_CMD_FLAG_CS_PARTIAL_FLUSH);
4257 }
4258
4259 void radv_CmdDispatchBase(
4260 VkCommandBuffer commandBuffer,
4261 uint32_t base_x,
4262 uint32_t base_y,
4263 uint32_t base_z,
4264 uint32_t x,
4265 uint32_t y,
4266 uint32_t z)
4267 {
4268 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
4269 struct radv_dispatch_info info = {};
4270
4271 info.blocks[0] = x;
4272 info.blocks[1] = y;
4273 info.blocks[2] = z;
4274
4275 info.offsets[0] = base_x;
4276 info.offsets[1] = base_y;
4277 info.offsets[2] = base_z;
4278 radv_dispatch(cmd_buffer, &info);
4279 }
4280
4281 void radv_CmdDispatch(
4282 VkCommandBuffer commandBuffer,
4283 uint32_t x,
4284 uint32_t y,
4285 uint32_t z)
4286 {
4287 radv_CmdDispatchBase(commandBuffer, 0, 0, 0, x, y, z);
4288 }
4289
4290 void radv_CmdDispatchIndirect(
4291 VkCommandBuffer commandBuffer,
4292 VkBuffer _buffer,
4293 VkDeviceSize offset)
4294 {
4295 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
4296 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
4297 struct radv_dispatch_info info = {};
4298
4299 info.indirect = buffer;
4300 info.indirect_offset = offset;
4301
4302 radv_dispatch(cmd_buffer, &info);
4303 }
4304
4305 void radv_unaligned_dispatch(
4306 struct radv_cmd_buffer *cmd_buffer,
4307 uint32_t x,
4308 uint32_t y,
4309 uint32_t z)
4310 {
4311 struct radv_dispatch_info info = {};
4312
4313 info.blocks[0] = x;
4314 info.blocks[1] = y;
4315 info.blocks[2] = z;
4316 info.unaligned = 1;
4317
4318 radv_dispatch(cmd_buffer, &info);
4319 }
4320
4321 void radv_CmdEndRenderPass(
4322 VkCommandBuffer commandBuffer)
4323 {
4324 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
4325
4326 radv_subpass_barrier(cmd_buffer, &cmd_buffer->state.pass->end_barrier);
4327
4328 radv_cmd_buffer_resolve_subpass(cmd_buffer);
4329
4330 for (unsigned i = 0; i < cmd_buffer->state.framebuffer->attachment_count; ++i) {
4331 VkImageLayout layout = cmd_buffer->state.pass->attachments[i].final_layout;
4332 radv_handle_subpass_image_transition(cmd_buffer,
4333 (struct radv_subpass_attachment){i, layout});
4334 }
4335
4336 vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.attachments);
4337
4338 cmd_buffer->state.pass = NULL;
4339 cmd_buffer->state.subpass = NULL;
4340 cmd_buffer->state.attachments = NULL;
4341 cmd_buffer->state.framebuffer = NULL;
4342 }
4343
4344 void radv_CmdEndRenderPass2KHR(
4345 VkCommandBuffer commandBuffer,
4346 const VkSubpassEndInfoKHR* pSubpassEndInfo)
4347 {
4348 radv_CmdEndRenderPass(commandBuffer);
4349 }
4350
4351 /*
4352 * For HTILE we have the following interesting clear words:
4353 * 0xfffff30f: Uncompressed, full depth range, for depth+stencil HTILE
4354 * 0xfffc000f: Uncompressed, full depth range, for depth only HTILE.
4355 * 0xfffffff0: Clear depth to 1.0
4356 * 0x00000000: Clear depth to 0.0
4357 */
4358 static void radv_initialize_htile(struct radv_cmd_buffer *cmd_buffer,
4359 struct radv_image *image,
4360 const VkImageSubresourceRange *range,
4361 uint32_t clear_word)
4362 {
4363 assert(range->baseMipLevel == 0);
4364 assert(range->levelCount == 1 || range->levelCount == VK_REMAINING_ARRAY_LAYERS);
4365 unsigned layer_count = radv_get_layerCount(image, range);
4366 uint64_t size = image->surface.htile_slice_size * layer_count;
4367 VkImageAspectFlags aspects = VK_IMAGE_ASPECT_DEPTH_BIT;
4368 uint64_t offset = image->offset + image->htile_offset +
4369 image->surface.htile_slice_size * range->baseArrayLayer;
4370 struct radv_cmd_state *state = &cmd_buffer->state;
4371 VkClearDepthStencilValue value = {};
4372
4373 state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB |
4374 RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
4375
4376 state->flush_bits |= radv_fill_buffer(cmd_buffer, image->bo, offset,
4377 size, clear_word);
4378
4379 state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
4380
4381 if (vk_format_is_stencil(image->vk_format))
4382 aspects |= VK_IMAGE_ASPECT_STENCIL_BIT;
4383
4384 radv_set_ds_clear_metadata(cmd_buffer, image, value, aspects);
4385
4386 if (radv_image_is_tc_compat_htile(image)) {
4387 /* Initialize the TC-compat metada value to 0 because by
4388 * default DB_Z_INFO.RANGE_PRECISION is set to 1, and we only
4389 * need have to conditionally update its value when performing
4390 * a fast depth clear.
4391 */
4392 radv_set_tc_compat_zrange_metadata(cmd_buffer, image, 0);
4393 }
4394 }
4395
4396 static void radv_handle_depth_image_transition(struct radv_cmd_buffer *cmd_buffer,
4397 struct radv_image *image,
4398 VkImageLayout src_layout,
4399 VkImageLayout dst_layout,
4400 unsigned src_queue_mask,
4401 unsigned dst_queue_mask,
4402 const VkImageSubresourceRange *range)
4403 {
4404 if (!radv_image_has_htile(image))
4405 return;
4406
4407 if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED &&
4408 radv_layout_has_htile(image, dst_layout, dst_queue_mask)) {
4409 /* TODO: merge with the clear if applicable */
4410 radv_initialize_htile(cmd_buffer, image, range, 0);
4411 } else if (!radv_layout_is_htile_compressed(image, src_layout, src_queue_mask) &&
4412 radv_layout_is_htile_compressed(image, dst_layout, dst_queue_mask)) {
4413 uint32_t clear_value = vk_format_is_stencil(image->vk_format) ? 0xfffff30f : 0xfffc000f;
4414 radv_initialize_htile(cmd_buffer, image, range, clear_value);
4415 } else if (radv_layout_is_htile_compressed(image, src_layout, src_queue_mask) &&
4416 !radv_layout_is_htile_compressed(image, dst_layout, dst_queue_mask)) {
4417 VkImageSubresourceRange local_range = *range;
4418 local_range.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
4419 local_range.baseMipLevel = 0;
4420 local_range.levelCount = 1;
4421
4422 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB |
4423 RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
4424
4425 radv_decompress_depth_image_inplace(cmd_buffer, image, &local_range);
4426
4427 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB |
4428 RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
4429 }
4430 }
4431
4432 static void radv_initialise_cmask(struct radv_cmd_buffer *cmd_buffer,
4433 struct radv_image *image, uint32_t value)
4434 {
4435 struct radv_cmd_state *state = &cmd_buffer->state;
4436
4437 state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
4438 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
4439
4440 state->flush_bits |= radv_clear_cmask(cmd_buffer, image, value);
4441
4442 state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
4443 }
4444
4445 void radv_initialize_fmask(struct radv_cmd_buffer *cmd_buffer,
4446 struct radv_image *image)
4447 {
4448 struct radv_cmd_state *state = &cmd_buffer->state;
4449 static const uint32_t fmask_clear_values[4] = {
4450 0x00000000,
4451 0x02020202,
4452 0xE4E4E4E4,
4453 0x76543210
4454 };
4455 uint32_t log2_samples = util_logbase2(image->info.samples);
4456 uint32_t value = fmask_clear_values[log2_samples];
4457
4458 state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
4459 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
4460
4461 state->flush_bits |= radv_clear_fmask(cmd_buffer, image, value);
4462
4463 state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
4464 }
4465
4466 void radv_initialize_dcc(struct radv_cmd_buffer *cmd_buffer,
4467 struct radv_image *image, uint32_t value)
4468 {
4469 struct radv_cmd_state *state = &cmd_buffer->state;
4470
4471 state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
4472 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
4473
4474 state->flush_bits |= radv_clear_dcc(cmd_buffer, image, value);
4475
4476 state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
4477 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
4478 }
4479
4480 /**
4481 * Initialize DCC/FMASK/CMASK metadata for a color image.
4482 */
4483 static void radv_init_color_image_metadata(struct radv_cmd_buffer *cmd_buffer,
4484 struct radv_image *image,
4485 VkImageLayout src_layout,
4486 VkImageLayout dst_layout,
4487 unsigned src_queue_mask,
4488 unsigned dst_queue_mask)
4489 {
4490 if (radv_image_has_cmask(image)) {
4491 uint32_t value = 0xffffffffu; /* Fully expanded mode. */
4492
4493 /* TODO: clarify this. */
4494 if (radv_image_has_fmask(image)) {
4495 value = 0xccccccccu;
4496 }
4497
4498 radv_initialise_cmask(cmd_buffer, image, value);
4499 }
4500
4501 if (radv_image_has_fmask(image)) {
4502 radv_initialize_fmask(cmd_buffer, image);
4503 }
4504
4505 if (radv_image_has_dcc(image)) {
4506 uint32_t value = 0xffffffffu; /* Fully expanded mode. */
4507 bool need_decompress_pass = false;
4508
4509 if (radv_layout_dcc_compressed(image, dst_layout,
4510 dst_queue_mask)) {
4511 value = 0x20202020u;
4512 need_decompress_pass = true;
4513 }
4514
4515 radv_initialize_dcc(cmd_buffer, image, value);
4516
4517 radv_update_fce_metadata(cmd_buffer, image,
4518 need_decompress_pass);
4519 }
4520
4521 if (radv_image_has_cmask(image) || radv_image_has_dcc(image)) {
4522 uint32_t color_values[2] = {};
4523 radv_set_color_clear_metadata(cmd_buffer, image, color_values);
4524 }
4525 }
4526
4527 /**
4528 * Handle color image transitions for DCC/FMASK/CMASK.
4529 */
4530 static void radv_handle_color_image_transition(struct radv_cmd_buffer *cmd_buffer,
4531 struct radv_image *image,
4532 VkImageLayout src_layout,
4533 VkImageLayout dst_layout,
4534 unsigned src_queue_mask,
4535 unsigned dst_queue_mask,
4536 const VkImageSubresourceRange *range)
4537 {
4538 if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED) {
4539 radv_init_color_image_metadata(cmd_buffer, image,
4540 src_layout, dst_layout,
4541 src_queue_mask, dst_queue_mask);
4542 return;
4543 }
4544
4545 if (radv_image_has_dcc(image)) {
4546 if (src_layout == VK_IMAGE_LAYOUT_PREINITIALIZED) {
4547 radv_initialize_dcc(cmd_buffer, image, 0xffffffffu);
4548 } else if (radv_layout_dcc_compressed(image, src_layout, src_queue_mask) &&
4549 !radv_layout_dcc_compressed(image, dst_layout, dst_queue_mask)) {
4550 radv_decompress_dcc(cmd_buffer, image, range);
4551 } else if (radv_layout_can_fast_clear(image, src_layout, src_queue_mask) &&
4552 !radv_layout_can_fast_clear(image, dst_layout, dst_queue_mask)) {
4553 radv_fast_clear_flush_image_inplace(cmd_buffer, image, range);
4554 }
4555 } else if (radv_image_has_cmask(image) || radv_image_has_fmask(image)) {
4556 if (radv_layout_can_fast_clear(image, src_layout, src_queue_mask) &&
4557 !radv_layout_can_fast_clear(image, dst_layout, dst_queue_mask)) {
4558 radv_fast_clear_flush_image_inplace(cmd_buffer, image, range);
4559 }
4560
4561 if (radv_image_has_fmask(image)) {
4562 if (src_layout != VK_IMAGE_LAYOUT_GENERAL &&
4563 dst_layout == VK_IMAGE_LAYOUT_GENERAL) {
4564 radv_expand_fmask_image_inplace(cmd_buffer, image, range);
4565 }
4566 }
4567 }
4568 }
4569
4570 static void radv_handle_image_transition(struct radv_cmd_buffer *cmd_buffer,
4571 struct radv_image *image,
4572 VkImageLayout src_layout,
4573 VkImageLayout dst_layout,
4574 uint32_t src_family,
4575 uint32_t dst_family,
4576 const VkImageSubresourceRange *range)
4577 {
4578 if (image->exclusive && src_family != dst_family) {
4579 /* This is an acquire or a release operation and there will be
4580 * a corresponding release/acquire. Do the transition in the
4581 * most flexible queue. */
4582
4583 assert(src_family == cmd_buffer->queue_family_index ||
4584 dst_family == cmd_buffer->queue_family_index);
4585
4586 if (cmd_buffer->queue_family_index == RADV_QUEUE_TRANSFER)
4587 return;
4588
4589 if (cmd_buffer->queue_family_index == RADV_QUEUE_COMPUTE &&
4590 (src_family == RADV_QUEUE_GENERAL ||
4591 dst_family == RADV_QUEUE_GENERAL))
4592 return;
4593 }
4594
4595 unsigned src_queue_mask =
4596 radv_image_queue_family_mask(image, src_family,
4597 cmd_buffer->queue_family_index);
4598 unsigned dst_queue_mask =
4599 radv_image_queue_family_mask(image, dst_family,
4600 cmd_buffer->queue_family_index);
4601
4602 if (vk_format_is_depth(image->vk_format)) {
4603 radv_handle_depth_image_transition(cmd_buffer, image,
4604 src_layout, dst_layout,
4605 src_queue_mask, dst_queue_mask,
4606 range);
4607 } else {
4608 radv_handle_color_image_transition(cmd_buffer, image,
4609 src_layout, dst_layout,
4610 src_queue_mask, dst_queue_mask,
4611 range);
4612 }
4613 }
4614
4615 struct radv_barrier_info {
4616 uint32_t eventCount;
4617 const VkEvent *pEvents;
4618 VkPipelineStageFlags srcStageMask;
4619 };
4620
4621 static void
4622 radv_barrier(struct radv_cmd_buffer *cmd_buffer,
4623 uint32_t memoryBarrierCount,
4624 const VkMemoryBarrier *pMemoryBarriers,
4625 uint32_t bufferMemoryBarrierCount,
4626 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
4627 uint32_t imageMemoryBarrierCount,
4628 const VkImageMemoryBarrier *pImageMemoryBarriers,
4629 const struct radv_barrier_info *info)
4630 {
4631 struct radeon_cmdbuf *cs = cmd_buffer->cs;
4632 enum radv_cmd_flush_bits src_flush_bits = 0;
4633 enum radv_cmd_flush_bits dst_flush_bits = 0;
4634
4635 for (unsigned i = 0; i < info->eventCount; ++i) {
4636 RADV_FROM_HANDLE(radv_event, event, info->pEvents[i]);
4637 uint64_t va = radv_buffer_get_va(event->bo);
4638
4639 radv_cs_add_buffer(cmd_buffer->device->ws, cs, event->bo);
4640
4641 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 7);
4642
4643 radv_cp_wait_mem(cs, WAIT_REG_MEM_EQUAL, va, 1, 0xffffffff);
4644 assert(cmd_buffer->cs->cdw <= cdw_max);
4645 }
4646
4647 for (uint32_t i = 0; i < memoryBarrierCount; i++) {
4648 src_flush_bits |= radv_src_access_flush(cmd_buffer, pMemoryBarriers[i].srcAccessMask,
4649 NULL);
4650 dst_flush_bits |= radv_dst_access_flush(cmd_buffer, pMemoryBarriers[i].dstAccessMask,
4651 NULL);
4652 }
4653
4654 for (uint32_t i = 0; i < bufferMemoryBarrierCount; i++) {
4655 src_flush_bits |= radv_src_access_flush(cmd_buffer, pBufferMemoryBarriers[i].srcAccessMask,
4656 NULL);
4657 dst_flush_bits |= radv_dst_access_flush(cmd_buffer, pBufferMemoryBarriers[i].dstAccessMask,
4658 NULL);
4659 }
4660
4661 for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
4662 RADV_FROM_HANDLE(radv_image, image, pImageMemoryBarriers[i].image);
4663
4664 src_flush_bits |= radv_src_access_flush(cmd_buffer, pImageMemoryBarriers[i].srcAccessMask,
4665 image);
4666 dst_flush_bits |= radv_dst_access_flush(cmd_buffer, pImageMemoryBarriers[i].dstAccessMask,
4667 image);
4668 }
4669
4670 radv_stage_flush(cmd_buffer, info->srcStageMask);
4671 cmd_buffer->state.flush_bits |= src_flush_bits;
4672
4673 for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
4674 RADV_FROM_HANDLE(radv_image, image, pImageMemoryBarriers[i].image);
4675 radv_handle_image_transition(cmd_buffer, image,
4676 pImageMemoryBarriers[i].oldLayout,
4677 pImageMemoryBarriers[i].newLayout,
4678 pImageMemoryBarriers[i].srcQueueFamilyIndex,
4679 pImageMemoryBarriers[i].dstQueueFamilyIndex,
4680 &pImageMemoryBarriers[i].subresourceRange);
4681 }
4682
4683 /* Make sure CP DMA is idle because the driver might have performed a
4684 * DMA operation for copying or filling buffers/images.
4685 */
4686 if (info->srcStageMask & (VK_PIPELINE_STAGE_TRANSFER_BIT |
4687 VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT))
4688 si_cp_dma_wait_for_idle(cmd_buffer);
4689
4690 cmd_buffer->state.flush_bits |= dst_flush_bits;
4691 }
4692
4693 void radv_CmdPipelineBarrier(
4694 VkCommandBuffer commandBuffer,
4695 VkPipelineStageFlags srcStageMask,
4696 VkPipelineStageFlags destStageMask,
4697 VkBool32 byRegion,
4698 uint32_t memoryBarrierCount,
4699 const VkMemoryBarrier* pMemoryBarriers,
4700 uint32_t bufferMemoryBarrierCount,
4701 const VkBufferMemoryBarrier* pBufferMemoryBarriers,
4702 uint32_t imageMemoryBarrierCount,
4703 const VkImageMemoryBarrier* pImageMemoryBarriers)
4704 {
4705 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
4706 struct radv_barrier_info info;
4707
4708 info.eventCount = 0;
4709 info.pEvents = NULL;
4710 info.srcStageMask = srcStageMask;
4711
4712 radv_barrier(cmd_buffer, memoryBarrierCount, pMemoryBarriers,
4713 bufferMemoryBarrierCount, pBufferMemoryBarriers,
4714 imageMemoryBarrierCount, pImageMemoryBarriers, &info);
4715 }
4716
4717
4718 static void write_event(struct radv_cmd_buffer *cmd_buffer,
4719 struct radv_event *event,
4720 VkPipelineStageFlags stageMask,
4721 unsigned value)
4722 {
4723 struct radeon_cmdbuf *cs = cmd_buffer->cs;
4724 uint64_t va = radv_buffer_get_va(event->bo);
4725
4726 si_emit_cache_flush(cmd_buffer);
4727
4728 radv_cs_add_buffer(cmd_buffer->device->ws, cs, event->bo);
4729
4730 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 18);
4731
4732 /* Flags that only require a top-of-pipe event. */
4733 VkPipelineStageFlags top_of_pipe_flags =
4734 VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
4735
4736 /* Flags that only require a post-index-fetch event. */
4737 VkPipelineStageFlags post_index_fetch_flags =
4738 top_of_pipe_flags |
4739 VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT |
4740 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT;
4741
4742 /* Make sure CP DMA is idle because the driver might have performed a
4743 * DMA operation for copying or filling buffers/images.
4744 */
4745 if (stageMask & (VK_PIPELINE_STAGE_TRANSFER_BIT |
4746 VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT))
4747 si_cp_dma_wait_for_idle(cmd_buffer);
4748
4749 /* TODO: Emit EOS events for syncing PS/CS stages. */
4750
4751 if (!(stageMask & ~top_of_pipe_flags)) {
4752 /* Just need to sync the PFP engine. */
4753 radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, 0));
4754 radeon_emit(cs, S_370_DST_SEL(V_370_MEM) |
4755 S_370_WR_CONFIRM(1) |
4756 S_370_ENGINE_SEL(V_370_PFP));
4757 radeon_emit(cs, va);
4758 radeon_emit(cs, va >> 32);
4759 radeon_emit(cs, value);
4760 } else if (!(stageMask & ~post_index_fetch_flags)) {
4761 /* Sync ME because PFP reads index and indirect buffers. */
4762 radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, 0));
4763 radeon_emit(cs, S_370_DST_SEL(V_370_MEM) |
4764 S_370_WR_CONFIRM(1) |
4765 S_370_ENGINE_SEL(V_370_ME));
4766 radeon_emit(cs, va);
4767 radeon_emit(cs, va >> 32);
4768 radeon_emit(cs, value);
4769 } else {
4770 /* Otherwise, sync all prior GPU work using an EOP event. */
4771 si_cs_emit_write_event_eop(cs,
4772 cmd_buffer->device->physical_device->rad_info.chip_class,
4773 radv_cmd_buffer_uses_mec(cmd_buffer),
4774 V_028A90_BOTTOM_OF_PIPE_TS, 0,
4775 EOP_DATA_SEL_VALUE_32BIT, va, value,
4776 cmd_buffer->gfx9_eop_bug_va);
4777 }
4778
4779 assert(cmd_buffer->cs->cdw <= cdw_max);
4780 }
4781
4782 void radv_CmdSetEvent(VkCommandBuffer commandBuffer,
4783 VkEvent _event,
4784 VkPipelineStageFlags stageMask)
4785 {
4786 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
4787 RADV_FROM_HANDLE(radv_event, event, _event);
4788
4789 write_event(cmd_buffer, event, stageMask, 1);
4790 }
4791
4792 void radv_CmdResetEvent(VkCommandBuffer commandBuffer,
4793 VkEvent _event,
4794 VkPipelineStageFlags stageMask)
4795 {
4796 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
4797 RADV_FROM_HANDLE(radv_event, event, _event);
4798
4799 write_event(cmd_buffer, event, stageMask, 0);
4800 }
4801
4802 void radv_CmdWaitEvents(VkCommandBuffer commandBuffer,
4803 uint32_t eventCount,
4804 const VkEvent* pEvents,
4805 VkPipelineStageFlags srcStageMask,
4806 VkPipelineStageFlags dstStageMask,
4807 uint32_t memoryBarrierCount,
4808 const VkMemoryBarrier* pMemoryBarriers,
4809 uint32_t bufferMemoryBarrierCount,
4810 const VkBufferMemoryBarrier* pBufferMemoryBarriers,
4811 uint32_t imageMemoryBarrierCount,
4812 const VkImageMemoryBarrier* pImageMemoryBarriers)
4813 {
4814 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
4815 struct radv_barrier_info info;
4816
4817 info.eventCount = eventCount;
4818 info.pEvents = pEvents;
4819 info.srcStageMask = 0;
4820
4821 radv_barrier(cmd_buffer, memoryBarrierCount, pMemoryBarriers,
4822 bufferMemoryBarrierCount, pBufferMemoryBarriers,
4823 imageMemoryBarrierCount, pImageMemoryBarriers, &info);
4824 }
4825
4826
4827 void radv_CmdSetDeviceMask(VkCommandBuffer commandBuffer,
4828 uint32_t deviceMask)
4829 {
4830 /* No-op */
4831 }
4832
4833 /* VK_EXT_conditional_rendering */
4834 void radv_CmdBeginConditionalRenderingEXT(
4835 VkCommandBuffer commandBuffer,
4836 const VkConditionalRenderingBeginInfoEXT* pConditionalRenderingBegin)
4837 {
4838 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
4839 RADV_FROM_HANDLE(radv_buffer, buffer, pConditionalRenderingBegin->buffer);
4840 bool draw_visible = true;
4841 uint64_t va;
4842
4843 va = radv_buffer_get_va(buffer->bo) + pConditionalRenderingBegin->offset;
4844
4845 /* By default, if the 32-bit value at offset in buffer memory is zero,
4846 * then the rendering commands are discarded, otherwise they are
4847 * executed as normal. If the inverted flag is set, all commands are
4848 * discarded if the value is non zero.
4849 */
4850 if (pConditionalRenderingBegin->flags &
4851 VK_CONDITIONAL_RENDERING_INVERTED_BIT_EXT) {
4852 draw_visible = false;
4853 }
4854
4855 si_emit_cache_flush(cmd_buffer);
4856
4857 /* Enable predication for this command buffer. */
4858 si_emit_set_predication_state(cmd_buffer, draw_visible, va);
4859 cmd_buffer->state.predicating = true;
4860
4861 /* Store conditional rendering user info. */
4862 cmd_buffer->state.predication_type = draw_visible;
4863 cmd_buffer->state.predication_va = va;
4864 }
4865
4866 void radv_CmdEndConditionalRenderingEXT(
4867 VkCommandBuffer commandBuffer)
4868 {
4869 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
4870
4871 /* Disable predication for this command buffer. */
4872 si_emit_set_predication_state(cmd_buffer, false, 0);
4873 cmd_buffer->state.predicating = false;
4874
4875 /* Reset conditional rendering user info. */
4876 cmd_buffer->state.predication_type = -1;
4877 cmd_buffer->state.predication_va = 0;
4878 }
4879
4880 /* VK_EXT_transform_feedback */
4881 void radv_CmdBindTransformFeedbackBuffersEXT(
4882 VkCommandBuffer commandBuffer,
4883 uint32_t firstBinding,
4884 uint32_t bindingCount,
4885 const VkBuffer* pBuffers,
4886 const VkDeviceSize* pOffsets,
4887 const VkDeviceSize* pSizes)
4888 {
4889 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
4890 struct radv_streamout_binding *sb = cmd_buffer->streamout_bindings;
4891 uint8_t enabled_mask = 0;
4892
4893 assert(firstBinding + bindingCount <= MAX_SO_BUFFERS);
4894 for (uint32_t i = 0; i < bindingCount; i++) {
4895 uint32_t idx = firstBinding + i;
4896
4897 sb[idx].buffer = radv_buffer_from_handle(pBuffers[i]);
4898 sb[idx].offset = pOffsets[i];
4899 sb[idx].size = pSizes[i];
4900
4901 radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs,
4902 sb[idx].buffer->bo);
4903
4904 enabled_mask |= 1 << idx;
4905 }
4906
4907 cmd_buffer->state.streamout.enabled_mask = enabled_mask;
4908
4909 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_STREAMOUT_BUFFER;
4910 }
4911
4912 static void
4913 radv_emit_streamout_enable(struct radv_cmd_buffer *cmd_buffer)
4914 {
4915 struct radv_streamout_state *so = &cmd_buffer->state.streamout;
4916 struct radeon_cmdbuf *cs = cmd_buffer->cs;
4917
4918 radeon_set_context_reg_seq(cs, R_028B94_VGT_STRMOUT_CONFIG, 2);
4919 radeon_emit(cs,
4920 S_028B94_STREAMOUT_0_EN(so->streamout_enabled) |
4921 S_028B94_RAST_STREAM(0) |
4922 S_028B94_STREAMOUT_1_EN(so->streamout_enabled) |
4923 S_028B94_STREAMOUT_2_EN(so->streamout_enabled) |
4924 S_028B94_STREAMOUT_3_EN(so->streamout_enabled));
4925 radeon_emit(cs, so->hw_enabled_mask &
4926 so->enabled_stream_buffers_mask);
4927
4928 cmd_buffer->state.context_roll_without_scissor_emitted = true;
4929 }
4930
4931 static void
4932 radv_set_streamout_enable(struct radv_cmd_buffer *cmd_buffer, bool enable)
4933 {
4934 struct radv_streamout_state *so = &cmd_buffer->state.streamout;
4935 bool old_streamout_enabled = so->streamout_enabled;
4936 uint32_t old_hw_enabled_mask = so->hw_enabled_mask;
4937
4938 so->streamout_enabled = enable;
4939
4940 so->hw_enabled_mask = so->enabled_mask |
4941 (so->enabled_mask << 4) |
4942 (so->enabled_mask << 8) |
4943 (so->enabled_mask << 12);
4944
4945 if ((old_streamout_enabled != so->streamout_enabled) ||
4946 (old_hw_enabled_mask != so->hw_enabled_mask))
4947 radv_emit_streamout_enable(cmd_buffer);
4948 }
4949
4950 static void radv_flush_vgt_streamout(struct radv_cmd_buffer *cmd_buffer)
4951 {
4952 struct radeon_cmdbuf *cs = cmd_buffer->cs;
4953 unsigned reg_strmout_cntl;
4954
4955 /* The register is at different places on different ASICs. */
4956 if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) {
4957 reg_strmout_cntl = R_0300FC_CP_STRMOUT_CNTL;
4958 radeon_set_uconfig_reg(cs, reg_strmout_cntl, 0);
4959 } else {
4960 reg_strmout_cntl = R_0084FC_CP_STRMOUT_CNTL;
4961 radeon_set_config_reg(cs, reg_strmout_cntl, 0);
4962 }
4963
4964 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
4965 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SO_VGTSTREAMOUT_FLUSH) | EVENT_INDEX(0));
4966
4967 radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, 0));
4968 radeon_emit(cs, WAIT_REG_MEM_EQUAL); /* wait until the register is equal to the reference value */
4969 radeon_emit(cs, reg_strmout_cntl >> 2); /* register */
4970 radeon_emit(cs, 0);
4971 radeon_emit(cs, S_0084FC_OFFSET_UPDATE_DONE(1)); /* reference value */
4972 radeon_emit(cs, S_0084FC_OFFSET_UPDATE_DONE(1)); /* mask */
4973 radeon_emit(cs, 4); /* poll interval */
4974 }
4975
4976 void radv_CmdBeginTransformFeedbackEXT(
4977 VkCommandBuffer commandBuffer,
4978 uint32_t firstCounterBuffer,
4979 uint32_t counterBufferCount,
4980 const VkBuffer* pCounterBuffers,
4981 const VkDeviceSize* pCounterBufferOffsets)
4982 {
4983 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
4984 struct radv_streamout_binding *sb = cmd_buffer->streamout_bindings;
4985 struct radv_streamout_state *so = &cmd_buffer->state.streamout;
4986 struct radeon_cmdbuf *cs = cmd_buffer->cs;
4987 uint32_t i;
4988
4989 radv_flush_vgt_streamout(cmd_buffer);
4990
4991 assert(firstCounterBuffer + counterBufferCount <= MAX_SO_BUFFERS);
4992 for_each_bit(i, so->enabled_mask) {
4993 int32_t counter_buffer_idx = i - firstCounterBuffer;
4994 if (counter_buffer_idx >= 0 && counter_buffer_idx >= counterBufferCount)
4995 counter_buffer_idx = -1;
4996
4997 /* SI binds streamout buffers as shader resources.
4998 * VGT only counts primitives and tells the shader through
4999 * SGPRs what to do.
5000 */
5001 radeon_set_context_reg_seq(cs, R_028AD0_VGT_STRMOUT_BUFFER_SIZE_0 + 16*i, 2);
5002 radeon_emit(cs, sb[i].size >> 2); /* BUFFER_SIZE (in DW) */
5003 radeon_emit(cs, so->stride_in_dw[i]); /* VTX_STRIDE (in DW) */
5004
5005 cmd_buffer->state.context_roll_without_scissor_emitted = true;
5006
5007 if (counter_buffer_idx >= 0 && pCounterBuffers && pCounterBuffers[counter_buffer_idx]) {
5008 /* The array of counter buffers is optional. */
5009 RADV_FROM_HANDLE(radv_buffer, buffer, pCounterBuffers[counter_buffer_idx]);
5010 uint64_t va = radv_buffer_get_va(buffer->bo);
5011
5012 va += buffer->offset + pCounterBufferOffsets[counter_buffer_idx];
5013
5014 /* Append */
5015 radeon_emit(cs, PKT3(PKT3_STRMOUT_BUFFER_UPDATE, 4, 0));
5016 radeon_emit(cs, STRMOUT_SELECT_BUFFER(i) |
5017 STRMOUT_DATA_TYPE(1) | /* offset in bytes */
5018 STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_FROM_MEM)); /* control */
5019 radeon_emit(cs, 0); /* unused */
5020 radeon_emit(cs, 0); /* unused */
5021 radeon_emit(cs, va); /* src address lo */
5022 radeon_emit(cs, va >> 32); /* src address hi */
5023
5024 radv_cs_add_buffer(cmd_buffer->device->ws, cs, buffer->bo);
5025 } else {
5026 /* Start from the beginning. */
5027 radeon_emit(cs, PKT3(PKT3_STRMOUT_BUFFER_UPDATE, 4, 0));
5028 radeon_emit(cs, STRMOUT_SELECT_BUFFER(i) |
5029 STRMOUT_DATA_TYPE(1) | /* offset in bytes */
5030 STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_FROM_PACKET)); /* control */
5031 radeon_emit(cs, 0); /* unused */
5032 radeon_emit(cs, 0); /* unused */
5033 radeon_emit(cs, 0); /* unused */
5034 radeon_emit(cs, 0); /* unused */
5035 }
5036 }
5037
5038 radv_set_streamout_enable(cmd_buffer, true);
5039 }
5040
5041 void radv_CmdEndTransformFeedbackEXT(
5042 VkCommandBuffer commandBuffer,
5043 uint32_t firstCounterBuffer,
5044 uint32_t counterBufferCount,
5045 const VkBuffer* pCounterBuffers,
5046 const VkDeviceSize* pCounterBufferOffsets)
5047 {
5048 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
5049 struct radv_streamout_state *so = &cmd_buffer->state.streamout;
5050 struct radeon_cmdbuf *cs = cmd_buffer->cs;
5051 uint32_t i;
5052
5053 radv_flush_vgt_streamout(cmd_buffer);
5054
5055 assert(firstCounterBuffer + counterBufferCount <= MAX_SO_BUFFERS);
5056 for_each_bit(i, so->enabled_mask) {
5057 int32_t counter_buffer_idx = i - firstCounterBuffer;
5058 if (counter_buffer_idx >= 0 && counter_buffer_idx >= counterBufferCount)
5059 counter_buffer_idx = -1;
5060
5061 if (counter_buffer_idx >= 0 && pCounterBuffers && pCounterBuffers[counter_buffer_idx]) {
5062 /* The array of counters buffer is optional. */
5063 RADV_FROM_HANDLE(radv_buffer, buffer, pCounterBuffers[counter_buffer_idx]);
5064 uint64_t va = radv_buffer_get_va(buffer->bo);
5065
5066 va += buffer->offset + pCounterBufferOffsets[counter_buffer_idx];
5067
5068 radeon_emit(cs, PKT3(PKT3_STRMOUT_BUFFER_UPDATE, 4, 0));
5069 radeon_emit(cs, STRMOUT_SELECT_BUFFER(i) |
5070 STRMOUT_DATA_TYPE(1) | /* offset in bytes */
5071 STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_NONE) |
5072 STRMOUT_STORE_BUFFER_FILLED_SIZE); /* control */
5073 radeon_emit(cs, va); /* dst address lo */
5074 radeon_emit(cs, va >> 32); /* dst address hi */
5075 radeon_emit(cs, 0); /* unused */
5076 radeon_emit(cs, 0); /* unused */
5077
5078 radv_cs_add_buffer(cmd_buffer->device->ws, cs, buffer->bo);
5079 }
5080
5081 /* Deactivate transform feedback by zeroing the buffer size.
5082 * The counters (primitives generated, primitives emitted) may
5083 * be enabled even if there is not buffer bound. This ensures
5084 * that the primitives-emitted query won't increment.
5085 */
5086 radeon_set_context_reg(cs, R_028AD0_VGT_STRMOUT_BUFFER_SIZE_0 + 16*i, 0);
5087
5088 cmd_buffer->state.context_roll_without_scissor_emitted = true;
5089 }
5090
5091 radv_set_streamout_enable(cmd_buffer, false);
5092 }
5093
5094 void radv_CmdDrawIndirectByteCountEXT(
5095 VkCommandBuffer commandBuffer,
5096 uint32_t instanceCount,
5097 uint32_t firstInstance,
5098 VkBuffer _counterBuffer,
5099 VkDeviceSize counterBufferOffset,
5100 uint32_t counterOffset,
5101 uint32_t vertexStride)
5102 {
5103 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
5104 RADV_FROM_HANDLE(radv_buffer, counterBuffer, _counterBuffer);
5105 struct radv_draw_info info = {};
5106
5107 info.instance_count = instanceCount;
5108 info.first_instance = firstInstance;
5109 info.strmout_buffer = counterBuffer;
5110 info.strmout_buffer_offset = counterBufferOffset;
5111 info.stride = vertexStride;
5112
5113 radv_draw(cmd_buffer, &info);
5114 }