e60df1471c9a775e0820ab32d86ba84ca3fdad56
[mesa.git] / src / amd / vulkan / radv_cmd_buffer.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 */
27
28 #include "radv_private.h"
29 #include "radv_radeon_winsys.h"
30 #include "radv_shader.h"
31 #include "radv_cs.h"
32 #include "sid.h"
33 #include "gfx9d.h"
34 #include "vk_format.h"
35 #include "radv_debug.h"
36 #include "radv_meta.h"
37
38 #include "ac_debug.h"
39
40 static void radv_handle_image_transition(struct radv_cmd_buffer *cmd_buffer,
41 struct radv_image *image,
42 VkImageLayout src_layout,
43 VkImageLayout dst_layout,
44 uint32_t src_family,
45 uint32_t dst_family,
46 const VkImageSubresourceRange *range,
47 VkImageAspectFlags pending_clears);
48
49 const struct radv_dynamic_state default_dynamic_state = {
50 .viewport = {
51 .count = 0,
52 },
53 .scissor = {
54 .count = 0,
55 },
56 .line_width = 1.0f,
57 .depth_bias = {
58 .bias = 0.0f,
59 .clamp = 0.0f,
60 .slope = 0.0f,
61 },
62 .blend_constants = { 0.0f, 0.0f, 0.0f, 0.0f },
63 .depth_bounds = {
64 .min = 0.0f,
65 .max = 1.0f,
66 },
67 .stencil_compare_mask = {
68 .front = ~0u,
69 .back = ~0u,
70 },
71 .stencil_write_mask = {
72 .front = ~0u,
73 .back = ~0u,
74 },
75 .stencil_reference = {
76 .front = 0u,
77 .back = 0u,
78 },
79 };
80
81 static void
82 radv_bind_dynamic_state(struct radv_cmd_buffer *cmd_buffer,
83 const struct radv_dynamic_state *src)
84 {
85 struct radv_dynamic_state *dest = &cmd_buffer->state.dynamic;
86 uint32_t copy_mask = src->mask;
87 uint32_t dest_mask = 0;
88
89 /* Make sure to copy the number of viewports/scissors because they can
90 * only be specified at pipeline creation time.
91 */
92 dest->viewport.count = src->viewport.count;
93 dest->scissor.count = src->scissor.count;
94
95 if (copy_mask & (1 << VK_DYNAMIC_STATE_VIEWPORT)) {
96 if (memcmp(&dest->viewport.viewports, &src->viewport.viewports,
97 src->viewport.count * sizeof(VkViewport))) {
98 typed_memcpy(dest->viewport.viewports,
99 src->viewport.viewports,
100 src->viewport.count);
101 dest_mask |= 1 << VK_DYNAMIC_STATE_VIEWPORT;
102 }
103 }
104
105 if (copy_mask & (1 << VK_DYNAMIC_STATE_SCISSOR)) {
106 if (memcmp(&dest->scissor.scissors, &src->scissor.scissors,
107 src->scissor.count * sizeof(VkRect2D))) {
108 typed_memcpy(dest->scissor.scissors,
109 src->scissor.scissors, src->scissor.count);
110 dest_mask |= 1 << VK_DYNAMIC_STATE_SCISSOR;
111 }
112 }
113
114 if (copy_mask & (1 << VK_DYNAMIC_STATE_LINE_WIDTH)) {
115 if (dest->line_width != src->line_width) {
116 dest->line_width = src->line_width;
117 dest_mask |= 1 << VK_DYNAMIC_STATE_LINE_WIDTH;
118 }
119 }
120
121 if (copy_mask & (1 << VK_DYNAMIC_STATE_DEPTH_BIAS)) {
122 if (memcmp(&dest->depth_bias, &src->depth_bias,
123 sizeof(src->depth_bias))) {
124 dest->depth_bias = src->depth_bias;
125 dest_mask |= 1 << VK_DYNAMIC_STATE_DEPTH_BIAS;
126 }
127 }
128
129 if (copy_mask & (1 << VK_DYNAMIC_STATE_BLEND_CONSTANTS)) {
130 if (memcmp(&dest->blend_constants, &src->blend_constants,
131 sizeof(src->blend_constants))) {
132 typed_memcpy(dest->blend_constants,
133 src->blend_constants, 4);
134 dest_mask |= 1 << VK_DYNAMIC_STATE_BLEND_CONSTANTS;
135 }
136 }
137
138 if (copy_mask & (1 << VK_DYNAMIC_STATE_DEPTH_BOUNDS)) {
139 if (memcmp(&dest->depth_bounds, &src->depth_bounds,
140 sizeof(src->depth_bounds))) {
141 dest->depth_bounds = src->depth_bounds;
142 dest_mask |= 1 << VK_DYNAMIC_STATE_DEPTH_BOUNDS;
143 }
144 }
145
146 if (copy_mask & (1 << VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK)) {
147 if (memcmp(&dest->stencil_compare_mask,
148 &src->stencil_compare_mask,
149 sizeof(src->stencil_compare_mask))) {
150 dest->stencil_compare_mask = src->stencil_compare_mask;
151 dest_mask |= 1 << VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK;
152 }
153 }
154
155 if (copy_mask & (1 << VK_DYNAMIC_STATE_STENCIL_WRITE_MASK)) {
156 if (memcmp(&dest->stencil_write_mask, &src->stencil_write_mask,
157 sizeof(src->stencil_write_mask))) {
158 dest->stencil_write_mask = src->stencil_write_mask;
159 dest_mask |= 1 << VK_DYNAMIC_STATE_STENCIL_WRITE_MASK;
160 }
161 }
162
163 if (copy_mask & (1 << VK_DYNAMIC_STATE_STENCIL_REFERENCE)) {
164 if (memcmp(&dest->stencil_reference, &src->stencil_reference,
165 sizeof(src->stencil_reference))) {
166 dest->stencil_reference = src->stencil_reference;
167 dest_mask |= 1 << VK_DYNAMIC_STATE_STENCIL_REFERENCE;
168 }
169 }
170
171 cmd_buffer->state.dirty |= dest_mask;
172 }
173
174 bool radv_cmd_buffer_uses_mec(struct radv_cmd_buffer *cmd_buffer)
175 {
176 return cmd_buffer->queue_family_index == RADV_QUEUE_COMPUTE &&
177 cmd_buffer->device->physical_device->rad_info.chip_class >= CIK;
178 }
179
180 enum ring_type radv_queue_family_to_ring(int f) {
181 switch (f) {
182 case RADV_QUEUE_GENERAL:
183 return RING_GFX;
184 case RADV_QUEUE_COMPUTE:
185 return RING_COMPUTE;
186 case RADV_QUEUE_TRANSFER:
187 return RING_DMA;
188 default:
189 unreachable("Unknown queue family");
190 }
191 }
192
193 static VkResult radv_create_cmd_buffer(
194 struct radv_device * device,
195 struct radv_cmd_pool * pool,
196 VkCommandBufferLevel level,
197 VkCommandBuffer* pCommandBuffer)
198 {
199 struct radv_cmd_buffer *cmd_buffer;
200 unsigned ring;
201 cmd_buffer = vk_zalloc(&pool->alloc, sizeof(*cmd_buffer), 8,
202 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
203 if (cmd_buffer == NULL)
204 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
205
206 cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
207 cmd_buffer->device = device;
208 cmd_buffer->pool = pool;
209 cmd_buffer->level = level;
210
211 if (pool) {
212 list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
213 cmd_buffer->queue_family_index = pool->queue_family_index;
214
215 } else {
216 /* Init the pool_link so we can safefly call list_del when we destroy
217 * the command buffer
218 */
219 list_inithead(&cmd_buffer->pool_link);
220 cmd_buffer->queue_family_index = RADV_QUEUE_GENERAL;
221 }
222
223 ring = radv_queue_family_to_ring(cmd_buffer->queue_family_index);
224
225 cmd_buffer->cs = device->ws->cs_create(device->ws, ring);
226 if (!cmd_buffer->cs) {
227 vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
228 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
229 }
230
231 *pCommandBuffer = radv_cmd_buffer_to_handle(cmd_buffer);
232
233 list_inithead(&cmd_buffer->upload.list);
234
235 return VK_SUCCESS;
236 }
237
238 static void
239 radv_cmd_buffer_destroy(struct radv_cmd_buffer *cmd_buffer)
240 {
241 list_del(&cmd_buffer->pool_link);
242
243 list_for_each_entry_safe(struct radv_cmd_buffer_upload, up,
244 &cmd_buffer->upload.list, list) {
245 cmd_buffer->device->ws->buffer_destroy(up->upload_bo);
246 list_del(&up->list);
247 free(up);
248 }
249
250 if (cmd_buffer->upload.upload_bo)
251 cmd_buffer->device->ws->buffer_destroy(cmd_buffer->upload.upload_bo);
252 cmd_buffer->device->ws->cs_destroy(cmd_buffer->cs);
253 free(cmd_buffer->push_descriptors.set.mapped_ptr);
254 vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
255 }
256
257 static VkResult
258 radv_reset_cmd_buffer(struct radv_cmd_buffer *cmd_buffer)
259 {
260
261 cmd_buffer->device->ws->cs_reset(cmd_buffer->cs);
262
263 list_for_each_entry_safe(struct radv_cmd_buffer_upload, up,
264 &cmd_buffer->upload.list, list) {
265 cmd_buffer->device->ws->buffer_destroy(up->upload_bo);
266 list_del(&up->list);
267 free(up);
268 }
269
270 cmd_buffer->push_constant_stages = 0;
271 cmd_buffer->scratch_size_needed = 0;
272 cmd_buffer->compute_scratch_size_needed = 0;
273 cmd_buffer->esgs_ring_size_needed = 0;
274 cmd_buffer->gsvs_ring_size_needed = 0;
275 cmd_buffer->tess_rings_needed = false;
276 cmd_buffer->sample_positions_needed = false;
277
278 if (cmd_buffer->upload.upload_bo)
279 radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs,
280 cmd_buffer->upload.upload_bo, 8);
281 cmd_buffer->upload.offset = 0;
282
283 cmd_buffer->record_result = VK_SUCCESS;
284
285 cmd_buffer->ring_offsets_idx = -1;
286
287 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
288 void *fence_ptr;
289 radv_cmd_buffer_upload_alloc(cmd_buffer, 8, 0,
290 &cmd_buffer->gfx9_fence_offset,
291 &fence_ptr);
292 cmd_buffer->gfx9_fence_bo = cmd_buffer->upload.upload_bo;
293 }
294
295 return cmd_buffer->record_result;
296 }
297
298 static bool
299 radv_cmd_buffer_resize_upload_buf(struct radv_cmd_buffer *cmd_buffer,
300 uint64_t min_needed)
301 {
302 uint64_t new_size;
303 struct radeon_winsys_bo *bo;
304 struct radv_cmd_buffer_upload *upload;
305 struct radv_device *device = cmd_buffer->device;
306
307 new_size = MAX2(min_needed, 16 * 1024);
308 new_size = MAX2(new_size, 2 * cmd_buffer->upload.size);
309
310 bo = device->ws->buffer_create(device->ws,
311 new_size, 4096,
312 RADEON_DOMAIN_GTT,
313 RADEON_FLAG_CPU_ACCESS|
314 RADEON_FLAG_NO_INTERPROCESS_SHARING);
315
316 if (!bo) {
317 cmd_buffer->record_result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
318 return false;
319 }
320
321 radv_cs_add_buffer(device->ws, cmd_buffer->cs, bo, 8);
322 if (cmd_buffer->upload.upload_bo) {
323 upload = malloc(sizeof(*upload));
324
325 if (!upload) {
326 cmd_buffer->record_result = VK_ERROR_OUT_OF_HOST_MEMORY;
327 device->ws->buffer_destroy(bo);
328 return false;
329 }
330
331 memcpy(upload, &cmd_buffer->upload, sizeof(*upload));
332 list_add(&upload->list, &cmd_buffer->upload.list);
333 }
334
335 cmd_buffer->upload.upload_bo = bo;
336 cmd_buffer->upload.size = new_size;
337 cmd_buffer->upload.offset = 0;
338 cmd_buffer->upload.map = device->ws->buffer_map(cmd_buffer->upload.upload_bo);
339
340 if (!cmd_buffer->upload.map) {
341 cmd_buffer->record_result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
342 return false;
343 }
344
345 return true;
346 }
347
348 bool
349 radv_cmd_buffer_upload_alloc(struct radv_cmd_buffer *cmd_buffer,
350 unsigned size,
351 unsigned alignment,
352 unsigned *out_offset,
353 void **ptr)
354 {
355 uint64_t offset = align(cmd_buffer->upload.offset, alignment);
356 if (offset + size > cmd_buffer->upload.size) {
357 if (!radv_cmd_buffer_resize_upload_buf(cmd_buffer, size))
358 return false;
359 offset = 0;
360 }
361
362 *out_offset = offset;
363 *ptr = cmd_buffer->upload.map + offset;
364
365 cmd_buffer->upload.offset = offset + size;
366 return true;
367 }
368
369 bool
370 radv_cmd_buffer_upload_data(struct radv_cmd_buffer *cmd_buffer,
371 unsigned size, unsigned alignment,
372 const void *data, unsigned *out_offset)
373 {
374 uint8_t *ptr;
375
376 if (!radv_cmd_buffer_upload_alloc(cmd_buffer, size, alignment,
377 out_offset, (void **)&ptr))
378 return false;
379
380 if (ptr)
381 memcpy(ptr, data, size);
382
383 return true;
384 }
385
386 static void
387 radv_emit_write_data_packet(struct radeon_winsys_cs *cs, uint64_t va,
388 unsigned count, const uint32_t *data)
389 {
390 radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 2 + count, 0));
391 radeon_emit(cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
392 S_370_WR_CONFIRM(1) |
393 S_370_ENGINE_SEL(V_370_ME));
394 radeon_emit(cs, va);
395 radeon_emit(cs, va >> 32);
396 radeon_emit_array(cs, data, count);
397 }
398
399 void radv_cmd_buffer_trace_emit(struct radv_cmd_buffer *cmd_buffer)
400 {
401 struct radv_device *device = cmd_buffer->device;
402 struct radeon_winsys_cs *cs = cmd_buffer->cs;
403 uint64_t va;
404
405 va = radv_buffer_get_va(device->trace_bo);
406 if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY)
407 va += 4;
408
409 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 7);
410
411 ++cmd_buffer->state.trace_id;
412 radv_cs_add_buffer(device->ws, cs, device->trace_bo, 8);
413 radv_emit_write_data_packet(cs, va, 1, &cmd_buffer->state.trace_id);
414 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
415 radeon_emit(cs, AC_ENCODE_TRACE_POINT(cmd_buffer->state.trace_id));
416 }
417
418 static void
419 radv_cmd_buffer_after_draw(struct radv_cmd_buffer *cmd_buffer)
420 {
421 if (cmd_buffer->device->instance->debug_flags & RADV_DEBUG_SYNC_SHADERS) {
422 enum radv_cmd_flush_bits flags;
423
424 /* Force wait for graphics/compute engines to be idle. */
425 flags = RADV_CMD_FLAG_PS_PARTIAL_FLUSH |
426 RADV_CMD_FLAG_CS_PARTIAL_FLUSH;
427
428 si_cs_emit_cache_flush(cmd_buffer->cs, false,
429 cmd_buffer->device->physical_device->rad_info.chip_class,
430 NULL, 0,
431 radv_cmd_buffer_uses_mec(cmd_buffer),
432 flags);
433 }
434
435 if (unlikely(cmd_buffer->device->trace_bo))
436 radv_cmd_buffer_trace_emit(cmd_buffer);
437 }
438
439 static void
440 radv_save_pipeline(struct radv_cmd_buffer *cmd_buffer,
441 struct radv_pipeline *pipeline, enum ring_type ring)
442 {
443 struct radv_device *device = cmd_buffer->device;
444 struct radeon_winsys_cs *cs = cmd_buffer->cs;
445 uint32_t data[2];
446 uint64_t va;
447
448 va = radv_buffer_get_va(device->trace_bo);
449
450 switch (ring) {
451 case RING_GFX:
452 va += 8;
453 break;
454 case RING_COMPUTE:
455 va += 16;
456 break;
457 default:
458 assert(!"invalid ring type");
459 }
460
461 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(device->ws,
462 cmd_buffer->cs, 6);
463
464 data[0] = (uintptr_t)pipeline;
465 data[1] = (uintptr_t)pipeline >> 32;
466
467 radv_cs_add_buffer(device->ws, cs, device->trace_bo, 8);
468 radv_emit_write_data_packet(cs, va, 2, data);
469 }
470
471 void radv_set_descriptor_set(struct radv_cmd_buffer *cmd_buffer,
472 struct radv_descriptor_set *set,
473 unsigned idx)
474 {
475 cmd_buffer->descriptors[idx] = set;
476 if (set)
477 cmd_buffer->state.valid_descriptors |= (1u << idx);
478 else
479 cmd_buffer->state.valid_descriptors &= ~(1u << idx);
480 cmd_buffer->state.descriptors_dirty |= (1u << idx);
481
482 }
483
484 static void
485 radv_save_descriptors(struct radv_cmd_buffer *cmd_buffer)
486 {
487 struct radv_device *device = cmd_buffer->device;
488 struct radeon_winsys_cs *cs = cmd_buffer->cs;
489 uint32_t data[MAX_SETS * 2] = {};
490 uint64_t va;
491 unsigned i;
492 va = radv_buffer_get_va(device->trace_bo) + 24;
493
494 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(device->ws,
495 cmd_buffer->cs, 4 + MAX_SETS * 2);
496
497 for_each_bit(i, cmd_buffer->state.valid_descriptors) {
498 struct radv_descriptor_set *set = cmd_buffer->descriptors[i];
499 data[i * 2] = (uintptr_t)set;
500 data[i * 2 + 1] = (uintptr_t)set >> 32;
501 }
502
503 radv_cs_add_buffer(device->ws, cs, device->trace_bo, 8);
504 radv_emit_write_data_packet(cs, va, MAX_SETS * 2, data);
505 }
506
507 static void
508 radv_emit_graphics_blend_state(struct radv_cmd_buffer *cmd_buffer,
509 struct radv_pipeline *pipeline)
510 {
511 radeon_set_context_reg_seq(cmd_buffer->cs, R_028780_CB_BLEND0_CONTROL, 8);
512 radeon_emit_array(cmd_buffer->cs, pipeline->graphics.blend.cb_blend_control,
513 8);
514 radeon_set_context_reg(cmd_buffer->cs, R_028808_CB_COLOR_CONTROL, pipeline->graphics.blend.cb_color_control);
515 radeon_set_context_reg(cmd_buffer->cs, R_028B70_DB_ALPHA_TO_MASK, pipeline->graphics.blend.db_alpha_to_mask);
516
517 if (cmd_buffer->device->physical_device->has_rbplus) {
518
519 radeon_set_context_reg_seq(cmd_buffer->cs, R_028760_SX_MRT0_BLEND_OPT, 8);
520 radeon_emit_array(cmd_buffer->cs, pipeline->graphics.blend.sx_mrt_blend_opt, 8);
521
522 radeon_set_context_reg_seq(cmd_buffer->cs, R_028754_SX_PS_DOWNCONVERT, 3);
523 radeon_emit(cmd_buffer->cs, 0); /* R_028754_SX_PS_DOWNCONVERT */
524 radeon_emit(cmd_buffer->cs, 0); /* R_028758_SX_BLEND_OPT_EPSILON */
525 radeon_emit(cmd_buffer->cs, 0); /* R_02875C_SX_BLEND_OPT_CONTROL */
526 }
527 }
528
529 static void
530 radv_emit_graphics_depth_stencil_state(struct radv_cmd_buffer *cmd_buffer,
531 struct radv_pipeline *pipeline)
532 {
533 struct radv_depth_stencil_state *ds = &pipeline->graphics.ds;
534 radeon_set_context_reg(cmd_buffer->cs, R_028800_DB_DEPTH_CONTROL, ds->db_depth_control);
535 radeon_set_context_reg(cmd_buffer->cs, R_02842C_DB_STENCIL_CONTROL, ds->db_stencil_control);
536
537 radeon_set_context_reg(cmd_buffer->cs, R_028000_DB_RENDER_CONTROL, ds->db_render_control);
538 radeon_set_context_reg(cmd_buffer->cs, R_028010_DB_RENDER_OVERRIDE2, ds->db_render_override2);
539 }
540
541 struct ac_userdata_info *
542 radv_lookup_user_sgpr(struct radv_pipeline *pipeline,
543 gl_shader_stage stage,
544 int idx)
545 {
546 if (stage == MESA_SHADER_VERTEX) {
547 if (pipeline->shaders[MESA_SHADER_VERTEX])
548 return &pipeline->shaders[MESA_SHADER_VERTEX]->info.user_sgprs_locs.shader_data[idx];
549 if (pipeline->shaders[MESA_SHADER_TESS_CTRL])
550 return &pipeline->shaders[MESA_SHADER_TESS_CTRL]->info.user_sgprs_locs.shader_data[idx];
551 if (pipeline->shaders[MESA_SHADER_GEOMETRY])
552 return &pipeline->shaders[MESA_SHADER_GEOMETRY]->info.user_sgprs_locs.shader_data[idx];
553 } else if (stage == MESA_SHADER_TESS_EVAL) {
554 if (pipeline->shaders[MESA_SHADER_TESS_EVAL])
555 return &pipeline->shaders[MESA_SHADER_TESS_EVAL]->info.user_sgprs_locs.shader_data[idx];
556 if (pipeline->shaders[MESA_SHADER_GEOMETRY])
557 return &pipeline->shaders[MESA_SHADER_GEOMETRY]->info.user_sgprs_locs.shader_data[idx];
558 }
559 return &pipeline->shaders[stage]->info.user_sgprs_locs.shader_data[idx];
560 }
561
562 static void
563 radv_emit_userdata_address(struct radv_cmd_buffer *cmd_buffer,
564 struct radv_pipeline *pipeline,
565 gl_shader_stage stage,
566 int idx, uint64_t va)
567 {
568 struct ac_userdata_info *loc = radv_lookup_user_sgpr(pipeline, stage, idx);
569 uint32_t base_reg = pipeline->user_data_0[stage];
570 if (loc->sgpr_idx == -1)
571 return;
572 assert(loc->num_sgprs == 2);
573 assert(!loc->indirect);
574 radeon_set_sh_reg_seq(cmd_buffer->cs, base_reg + loc->sgpr_idx * 4, 2);
575 radeon_emit(cmd_buffer->cs, va);
576 radeon_emit(cmd_buffer->cs, va >> 32);
577 }
578
579 static void
580 radv_update_multisample_state(struct radv_cmd_buffer *cmd_buffer,
581 struct radv_pipeline *pipeline)
582 {
583 int num_samples = pipeline->graphics.ms.num_samples;
584 struct radv_multisample_state *ms = &pipeline->graphics.ms;
585 struct radv_pipeline *old_pipeline = cmd_buffer->state.emitted_pipeline;
586
587 radeon_set_context_reg_seq(cmd_buffer->cs, R_028C38_PA_SC_AA_MASK_X0Y0_X1Y0, 2);
588 radeon_emit(cmd_buffer->cs, ms->pa_sc_aa_mask[0]);
589 radeon_emit(cmd_buffer->cs, ms->pa_sc_aa_mask[1]);
590
591 radeon_set_context_reg(cmd_buffer->cs, R_028804_DB_EQAA, ms->db_eqaa);
592 radeon_set_context_reg(cmd_buffer->cs, R_028A4C_PA_SC_MODE_CNTL_1, ms->pa_sc_mode_cntl_1);
593
594 if (old_pipeline && num_samples == old_pipeline->graphics.ms.num_samples)
595 return;
596
597 radeon_set_context_reg_seq(cmd_buffer->cs, R_028BDC_PA_SC_LINE_CNTL, 2);
598 radeon_emit(cmd_buffer->cs, ms->pa_sc_line_cntl);
599 radeon_emit(cmd_buffer->cs, ms->pa_sc_aa_config);
600
601 radv_cayman_emit_msaa_sample_locs(cmd_buffer->cs, num_samples);
602
603 /* GFX9: Flush DFSM when the AA mode changes. */
604 if (cmd_buffer->device->dfsm_allowed) {
605 radeon_emit(cmd_buffer->cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
606 radeon_emit(cmd_buffer->cs, EVENT_TYPE(V_028A90_FLUSH_DFSM) | EVENT_INDEX(0));
607 }
608 if (pipeline->shaders[MESA_SHADER_FRAGMENT]->info.info.ps.needs_sample_positions) {
609 uint32_t offset;
610 struct ac_userdata_info *loc = radv_lookup_user_sgpr(pipeline, MESA_SHADER_FRAGMENT, AC_UD_PS_SAMPLE_POS_OFFSET);
611 uint32_t base_reg = pipeline->user_data_0[MESA_SHADER_FRAGMENT];
612 if (loc->sgpr_idx == -1)
613 return;
614 assert(loc->num_sgprs == 1);
615 assert(!loc->indirect);
616 switch (num_samples) {
617 default:
618 offset = 0;
619 break;
620 case 2:
621 offset = 1;
622 break;
623 case 4:
624 offset = 3;
625 break;
626 case 8:
627 offset = 7;
628 break;
629 case 16:
630 offset = 15;
631 break;
632 }
633
634 radeon_set_sh_reg(cmd_buffer->cs, base_reg + loc->sgpr_idx * 4, offset);
635 cmd_buffer->sample_positions_needed = true;
636 }
637 }
638
639 static void
640 radv_emit_graphics_raster_state(struct radv_cmd_buffer *cmd_buffer,
641 struct radv_pipeline *pipeline)
642 {
643 struct radv_raster_state *raster = &pipeline->graphics.raster;
644
645 radeon_set_context_reg(cmd_buffer->cs, R_028810_PA_CL_CLIP_CNTL,
646 raster->pa_cl_clip_cntl);
647 radeon_set_context_reg(cmd_buffer->cs, R_0286D4_SPI_INTERP_CONTROL_0,
648 raster->spi_interp_control);
649 radeon_set_context_reg(cmd_buffer->cs, R_028BE4_PA_SU_VTX_CNTL,
650 raster->pa_su_vtx_cntl);
651 radeon_set_context_reg(cmd_buffer->cs, R_028814_PA_SU_SC_MODE_CNTL,
652 raster->pa_su_sc_mode_cntl);
653 }
654
655 static inline void
656 radv_emit_prefetch_TC_L2_async(struct radv_cmd_buffer *cmd_buffer, uint64_t va,
657 unsigned size)
658 {
659 if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK)
660 si_cp_dma_prefetch(cmd_buffer, va, size);
661 }
662
663 static void
664 radv_emit_VBO_descriptors_prefetch(struct radv_cmd_buffer *cmd_buffer)
665 {
666 if (cmd_buffer->state.vb_prefetch_dirty) {
667 radv_emit_prefetch_TC_L2_async(cmd_buffer,
668 cmd_buffer->state.vb_va,
669 cmd_buffer->state.vb_size);
670 cmd_buffer->state.vb_prefetch_dirty = false;
671 }
672 }
673
674 static void
675 radv_emit_shader_prefetch(struct radv_cmd_buffer *cmd_buffer,
676 struct radv_shader_variant *shader)
677 {
678 struct radeon_winsys *ws = cmd_buffer->device->ws;
679 struct radeon_winsys_cs *cs = cmd_buffer->cs;
680 uint64_t va;
681
682 if (!shader)
683 return;
684
685 va = radv_buffer_get_va(shader->bo) + shader->bo_offset;
686
687 radv_cs_add_buffer(ws, cs, shader->bo, 8);
688 radv_emit_prefetch_TC_L2_async(cmd_buffer, va, shader->code_size);
689 }
690
691 static void
692 radv_emit_prefetch(struct radv_cmd_buffer *cmd_buffer,
693 struct radv_pipeline *pipeline)
694 {
695 radv_emit_shader_prefetch(cmd_buffer,
696 pipeline->shaders[MESA_SHADER_VERTEX]);
697 radv_emit_VBO_descriptors_prefetch(cmd_buffer);
698 radv_emit_shader_prefetch(cmd_buffer,
699 pipeline->shaders[MESA_SHADER_TESS_CTRL]);
700 radv_emit_shader_prefetch(cmd_buffer,
701 pipeline->shaders[MESA_SHADER_TESS_EVAL]);
702 radv_emit_shader_prefetch(cmd_buffer,
703 pipeline->shaders[MESA_SHADER_GEOMETRY]);
704 radv_emit_shader_prefetch(cmd_buffer, pipeline->gs_copy_shader);
705 radv_emit_shader_prefetch(cmd_buffer,
706 pipeline->shaders[MESA_SHADER_FRAGMENT]);
707 }
708
709 static void
710 radv_emit_hw_vs(struct radv_cmd_buffer *cmd_buffer,
711 struct radv_pipeline *pipeline,
712 struct radv_shader_variant *shader)
713 {
714 uint64_t va = radv_buffer_get_va(shader->bo) + shader->bo_offset;
715
716 radeon_set_context_reg(cmd_buffer->cs, R_0286C4_SPI_VS_OUT_CONFIG,
717 pipeline->graphics.vs.spi_vs_out_config);
718
719 radeon_set_context_reg(cmd_buffer->cs, R_02870C_SPI_SHADER_POS_FORMAT,
720 pipeline->graphics.vs.spi_shader_pos_format);
721
722 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B120_SPI_SHADER_PGM_LO_VS, 4);
723 radeon_emit(cmd_buffer->cs, va >> 8);
724 radeon_emit(cmd_buffer->cs, va >> 40);
725 radeon_emit(cmd_buffer->cs, shader->rsrc1);
726 radeon_emit(cmd_buffer->cs, shader->rsrc2);
727
728 radeon_set_context_reg(cmd_buffer->cs, R_028818_PA_CL_VTE_CNTL,
729 S_028818_VTX_W0_FMT(1) |
730 S_028818_VPORT_X_SCALE_ENA(1) | S_028818_VPORT_X_OFFSET_ENA(1) |
731 S_028818_VPORT_Y_SCALE_ENA(1) | S_028818_VPORT_Y_OFFSET_ENA(1) |
732 S_028818_VPORT_Z_SCALE_ENA(1) | S_028818_VPORT_Z_OFFSET_ENA(1));
733
734
735 radeon_set_context_reg(cmd_buffer->cs, R_02881C_PA_CL_VS_OUT_CNTL,
736 pipeline->graphics.vs.pa_cl_vs_out_cntl);
737
738 if (cmd_buffer->device->physical_device->rad_info.chip_class <= VI)
739 radeon_set_context_reg(cmd_buffer->cs, R_028AB4_VGT_REUSE_OFF,
740 pipeline->graphics.vs.vgt_reuse_off);
741 }
742
743 static void
744 radv_emit_hw_es(struct radv_cmd_buffer *cmd_buffer,
745 struct radv_pipeline *pipeline,
746 struct radv_shader_variant *shader)
747 {
748 uint64_t va = radv_buffer_get_va(shader->bo) + shader->bo_offset;
749
750 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B320_SPI_SHADER_PGM_LO_ES, 4);
751 radeon_emit(cmd_buffer->cs, va >> 8);
752 radeon_emit(cmd_buffer->cs, va >> 40);
753 radeon_emit(cmd_buffer->cs, shader->rsrc1);
754 radeon_emit(cmd_buffer->cs, shader->rsrc2);
755 }
756
757 static void
758 radv_emit_hw_ls(struct radv_cmd_buffer *cmd_buffer,
759 struct radv_shader_variant *shader)
760 {
761 uint64_t va = radv_buffer_get_va(shader->bo) + shader->bo_offset;
762 uint32_t rsrc2 = shader->rsrc2;
763
764 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B520_SPI_SHADER_PGM_LO_LS, 2);
765 radeon_emit(cmd_buffer->cs, va >> 8);
766 radeon_emit(cmd_buffer->cs, va >> 40);
767
768 rsrc2 |= S_00B52C_LDS_SIZE(cmd_buffer->state.pipeline->graphics.tess.lds_size);
769 if (cmd_buffer->device->physical_device->rad_info.chip_class == CIK &&
770 cmd_buffer->device->physical_device->rad_info.family != CHIP_HAWAII)
771 radeon_set_sh_reg(cmd_buffer->cs, R_00B52C_SPI_SHADER_PGM_RSRC2_LS, rsrc2);
772
773 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B528_SPI_SHADER_PGM_RSRC1_LS, 2);
774 radeon_emit(cmd_buffer->cs, shader->rsrc1);
775 radeon_emit(cmd_buffer->cs, rsrc2);
776 }
777
778 static void
779 radv_emit_hw_hs(struct radv_cmd_buffer *cmd_buffer,
780 struct radv_shader_variant *shader)
781 {
782 uint64_t va = radv_buffer_get_va(shader->bo) + shader->bo_offset;
783
784 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
785 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B410_SPI_SHADER_PGM_LO_LS, 2);
786 radeon_emit(cmd_buffer->cs, va >> 8);
787 radeon_emit(cmd_buffer->cs, va >> 40);
788
789 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B428_SPI_SHADER_PGM_RSRC1_HS, 2);
790 radeon_emit(cmd_buffer->cs, shader->rsrc1);
791 radeon_emit(cmd_buffer->cs, shader->rsrc2 |
792 S_00B42C_LDS_SIZE(cmd_buffer->state.pipeline->graphics.tess.lds_size));
793 } else {
794 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B420_SPI_SHADER_PGM_LO_HS, 4);
795 radeon_emit(cmd_buffer->cs, va >> 8);
796 radeon_emit(cmd_buffer->cs, va >> 40);
797 radeon_emit(cmd_buffer->cs, shader->rsrc1);
798 radeon_emit(cmd_buffer->cs, shader->rsrc2);
799 }
800 }
801
802 static void
803 radv_emit_vertex_shader(struct radv_cmd_buffer *cmd_buffer,
804 struct radv_pipeline *pipeline)
805 {
806 struct radv_shader_variant *vs;
807
808 radeon_set_context_reg(cmd_buffer->cs, R_028A84_VGT_PRIMITIVEID_EN, pipeline->graphics.vgt_primitiveid_en);
809
810 /* Skip shaders merged into HS/GS */
811 vs = pipeline->shaders[MESA_SHADER_VERTEX];
812 if (!vs)
813 return;
814
815 if (vs->info.vs.as_ls)
816 radv_emit_hw_ls(cmd_buffer, vs);
817 else if (vs->info.vs.as_es)
818 radv_emit_hw_es(cmd_buffer, pipeline, vs);
819 else
820 radv_emit_hw_vs(cmd_buffer, pipeline, vs);
821 }
822
823
824 static void
825 radv_emit_tess_shaders(struct radv_cmd_buffer *cmd_buffer,
826 struct radv_pipeline *pipeline)
827 {
828 if (!radv_pipeline_has_tess(pipeline))
829 return;
830
831 struct radv_shader_variant *tes, *tcs;
832
833 tcs = pipeline->shaders[MESA_SHADER_TESS_CTRL];
834 tes = pipeline->shaders[MESA_SHADER_TESS_EVAL];
835
836 if (tes) {
837 if (tes->info.tes.as_es)
838 radv_emit_hw_es(cmd_buffer, pipeline, tes);
839 else
840 radv_emit_hw_vs(cmd_buffer, pipeline, tes);
841 }
842
843 radv_emit_hw_hs(cmd_buffer, tcs);
844
845 radeon_set_context_reg(cmd_buffer->cs, R_028B6C_VGT_TF_PARAM,
846 pipeline->graphics.tess.tf_param);
847
848 if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK)
849 radeon_set_context_reg_idx(cmd_buffer->cs, R_028B58_VGT_LS_HS_CONFIG, 2,
850 pipeline->graphics.tess.ls_hs_config);
851 else
852 radeon_set_context_reg(cmd_buffer->cs, R_028B58_VGT_LS_HS_CONFIG,
853 pipeline->graphics.tess.ls_hs_config);
854
855 struct ac_userdata_info *loc;
856
857 loc = radv_lookup_user_sgpr(pipeline, MESA_SHADER_TESS_CTRL, AC_UD_TCS_OFFCHIP_LAYOUT);
858 if (loc->sgpr_idx != -1) {
859 uint32_t base_reg = pipeline->user_data_0[MESA_SHADER_TESS_CTRL];
860 assert(loc->num_sgprs == 4);
861 assert(!loc->indirect);
862 radeon_set_sh_reg_seq(cmd_buffer->cs, base_reg + loc->sgpr_idx * 4, 4);
863 radeon_emit(cmd_buffer->cs, pipeline->graphics.tess.offchip_layout);
864 radeon_emit(cmd_buffer->cs, pipeline->graphics.tess.tcs_out_offsets);
865 radeon_emit(cmd_buffer->cs, pipeline->graphics.tess.tcs_out_layout |
866 pipeline->graphics.tess.num_tcs_input_cp << 26);
867 radeon_emit(cmd_buffer->cs, pipeline->graphics.tess.tcs_in_layout);
868 }
869
870 loc = radv_lookup_user_sgpr(pipeline, MESA_SHADER_TESS_EVAL, AC_UD_TES_OFFCHIP_LAYOUT);
871 if (loc->sgpr_idx != -1) {
872 uint32_t base_reg = pipeline->user_data_0[MESA_SHADER_TESS_EVAL];
873 assert(loc->num_sgprs == 1);
874 assert(!loc->indirect);
875
876 radeon_set_sh_reg(cmd_buffer->cs, base_reg + loc->sgpr_idx * 4,
877 pipeline->graphics.tess.offchip_layout);
878 }
879
880 loc = radv_lookup_user_sgpr(pipeline, MESA_SHADER_VERTEX, AC_UD_VS_LS_TCS_IN_LAYOUT);
881 if (loc->sgpr_idx != -1) {
882 uint32_t base_reg = pipeline->user_data_0[MESA_SHADER_VERTEX];
883 assert(loc->num_sgprs == 1);
884 assert(!loc->indirect);
885
886 radeon_set_sh_reg(cmd_buffer->cs, base_reg + loc->sgpr_idx * 4,
887 pipeline->graphics.tess.tcs_in_layout);
888 }
889 }
890
891 static void
892 radv_emit_geometry_shader(struct radv_cmd_buffer *cmd_buffer,
893 struct radv_pipeline *pipeline)
894 {
895 struct radv_shader_variant *gs;
896 uint64_t va;
897
898 radeon_set_context_reg(cmd_buffer->cs, R_028A40_VGT_GS_MODE, pipeline->graphics.vgt_gs_mode);
899
900 gs = pipeline->shaders[MESA_SHADER_GEOMETRY];
901 if (!gs)
902 return;
903
904 uint32_t gsvs_itemsize = gs->info.gs.max_gsvs_emit_size >> 2;
905
906 radeon_set_context_reg_seq(cmd_buffer->cs, R_028A60_VGT_GSVS_RING_OFFSET_1, 3);
907 radeon_emit(cmd_buffer->cs, gsvs_itemsize);
908 radeon_emit(cmd_buffer->cs, gsvs_itemsize);
909 radeon_emit(cmd_buffer->cs, gsvs_itemsize);
910
911 radeon_set_context_reg(cmd_buffer->cs, R_028AB0_VGT_GSVS_RING_ITEMSIZE, gsvs_itemsize);
912
913 radeon_set_context_reg(cmd_buffer->cs, R_028B38_VGT_GS_MAX_VERT_OUT, gs->info.gs.vertices_out);
914
915 uint32_t gs_vert_itemsize = gs->info.gs.gsvs_vertex_size;
916 radeon_set_context_reg_seq(cmd_buffer->cs, R_028B5C_VGT_GS_VERT_ITEMSIZE, 4);
917 radeon_emit(cmd_buffer->cs, gs_vert_itemsize >> 2);
918 radeon_emit(cmd_buffer->cs, 0);
919 radeon_emit(cmd_buffer->cs, 0);
920 radeon_emit(cmd_buffer->cs, 0);
921
922 uint32_t gs_num_invocations = gs->info.gs.invocations;
923 radeon_set_context_reg(cmd_buffer->cs, R_028B90_VGT_GS_INSTANCE_CNT,
924 S_028B90_CNT(MIN2(gs_num_invocations, 127)) |
925 S_028B90_ENABLE(gs_num_invocations > 0));
926
927 radeon_set_context_reg(cmd_buffer->cs, R_028AAC_VGT_ESGS_RING_ITEMSIZE,
928 pipeline->graphics.gs.vgt_esgs_ring_itemsize);
929
930 va = radv_buffer_get_va(gs->bo) + gs->bo_offset;
931
932 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
933 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B210_SPI_SHADER_PGM_LO_ES, 2);
934 radeon_emit(cmd_buffer->cs, va >> 8);
935 radeon_emit(cmd_buffer->cs, va >> 40);
936
937 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B228_SPI_SHADER_PGM_RSRC1_GS, 2);
938 radeon_emit(cmd_buffer->cs, gs->rsrc1);
939 radeon_emit(cmd_buffer->cs, gs->rsrc2 |
940 S_00B22C_LDS_SIZE(pipeline->graphics.gs.lds_size));
941
942 radeon_set_context_reg(cmd_buffer->cs, R_028A44_VGT_GS_ONCHIP_CNTL, pipeline->graphics.gs.vgt_gs_onchip_cntl);
943 radeon_set_context_reg(cmd_buffer->cs, R_028A94_VGT_GS_MAX_PRIMS_PER_SUBGROUP, pipeline->graphics.gs.vgt_gs_max_prims_per_subgroup);
944 } else {
945 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B220_SPI_SHADER_PGM_LO_GS, 4);
946 radeon_emit(cmd_buffer->cs, va >> 8);
947 radeon_emit(cmd_buffer->cs, va >> 40);
948 radeon_emit(cmd_buffer->cs, gs->rsrc1);
949 radeon_emit(cmd_buffer->cs, gs->rsrc2);
950 }
951
952 radv_emit_hw_vs(cmd_buffer, pipeline, pipeline->gs_copy_shader);
953
954 struct ac_userdata_info *loc = radv_lookup_user_sgpr(cmd_buffer->state.pipeline, MESA_SHADER_GEOMETRY,
955 AC_UD_GS_VS_RING_STRIDE_ENTRIES);
956 if (loc->sgpr_idx != -1) {
957 uint32_t stride = gs->info.gs.max_gsvs_emit_size;
958 uint32_t num_entries = 64;
959 bool is_vi = cmd_buffer->device->physical_device->rad_info.chip_class >= VI;
960
961 if (is_vi)
962 num_entries *= stride;
963
964 stride = S_008F04_STRIDE(stride);
965 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B230_SPI_SHADER_USER_DATA_GS_0 + loc->sgpr_idx * 4, 2);
966 radeon_emit(cmd_buffer->cs, stride);
967 radeon_emit(cmd_buffer->cs, num_entries);
968 }
969 }
970
971 static void
972 radv_emit_fragment_shader(struct radv_cmd_buffer *cmd_buffer,
973 struct radv_pipeline *pipeline)
974 {
975 struct radv_shader_variant *ps;
976 uint64_t va;
977 unsigned spi_baryc_cntl = S_0286E0_FRONT_FACE_ALL_BITS(1);
978 struct radv_blend_state *blend = &pipeline->graphics.blend;
979 assert (pipeline->shaders[MESA_SHADER_FRAGMENT]);
980
981 ps = pipeline->shaders[MESA_SHADER_FRAGMENT];
982 va = radv_buffer_get_va(ps->bo) + ps->bo_offset;
983
984 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B020_SPI_SHADER_PGM_LO_PS, 4);
985 radeon_emit(cmd_buffer->cs, va >> 8);
986 radeon_emit(cmd_buffer->cs, va >> 40);
987 radeon_emit(cmd_buffer->cs, ps->rsrc1);
988 radeon_emit(cmd_buffer->cs, ps->rsrc2);
989
990 radeon_set_context_reg(cmd_buffer->cs, R_02880C_DB_SHADER_CONTROL,
991 pipeline->graphics.db_shader_control);
992
993 radeon_set_context_reg(cmd_buffer->cs, R_0286CC_SPI_PS_INPUT_ENA,
994 ps->config.spi_ps_input_ena);
995
996 radeon_set_context_reg(cmd_buffer->cs, R_0286D0_SPI_PS_INPUT_ADDR,
997 ps->config.spi_ps_input_addr);
998
999 if (ps->info.info.ps.force_persample)
1000 spi_baryc_cntl |= S_0286E0_POS_FLOAT_LOCATION(2);
1001
1002 radeon_set_context_reg(cmd_buffer->cs, R_0286D8_SPI_PS_IN_CONTROL,
1003 S_0286D8_NUM_INTERP(ps->info.fs.num_interp));
1004
1005 radeon_set_context_reg(cmd_buffer->cs, R_0286E0_SPI_BARYC_CNTL, spi_baryc_cntl);
1006
1007 radeon_set_context_reg(cmd_buffer->cs, R_028710_SPI_SHADER_Z_FORMAT,
1008 pipeline->graphics.shader_z_format);
1009
1010 radeon_set_context_reg(cmd_buffer->cs, R_028714_SPI_SHADER_COL_FORMAT, blend->spi_shader_col_format);
1011
1012 radeon_set_context_reg(cmd_buffer->cs, R_028238_CB_TARGET_MASK, blend->cb_target_mask);
1013 radeon_set_context_reg(cmd_buffer->cs, R_02823C_CB_SHADER_MASK, blend->cb_shader_mask);
1014
1015 if (cmd_buffer->device->dfsm_allowed) {
1016 /* optimise this? */
1017 radeon_emit(cmd_buffer->cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1018 radeon_emit(cmd_buffer->cs, EVENT_TYPE(V_028A90_FLUSH_DFSM) | EVENT_INDEX(0));
1019 }
1020
1021 if (pipeline->graphics.ps_input_cntl_num) {
1022 radeon_set_context_reg_seq(cmd_buffer->cs, R_028644_SPI_PS_INPUT_CNTL_0, pipeline->graphics.ps_input_cntl_num);
1023 for (unsigned i = 0; i < pipeline->graphics.ps_input_cntl_num; i++) {
1024 radeon_emit(cmd_buffer->cs, pipeline->graphics.ps_input_cntl[i]);
1025 }
1026 }
1027 }
1028
1029 static void
1030 radv_emit_vgt_vertex_reuse(struct radv_cmd_buffer *cmd_buffer,
1031 struct radv_pipeline *pipeline)
1032 {
1033 struct radeon_winsys_cs *cs = cmd_buffer->cs;
1034
1035 if (cmd_buffer->device->physical_device->rad_info.family < CHIP_POLARIS10)
1036 return;
1037
1038 radeon_set_context_reg(cs, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL,
1039 pipeline->graphics.vtx_reuse_depth);
1040 }
1041
1042 static void
1043 radv_emit_graphics_pipeline(struct radv_cmd_buffer *cmd_buffer)
1044 {
1045 struct radv_pipeline *pipeline = cmd_buffer->state.pipeline;
1046
1047 if (!pipeline || cmd_buffer->state.emitted_pipeline == pipeline)
1048 return;
1049
1050 radv_emit_graphics_depth_stencil_state(cmd_buffer, pipeline);
1051 radv_emit_graphics_blend_state(cmd_buffer, pipeline);
1052 radv_emit_graphics_raster_state(cmd_buffer, pipeline);
1053 radv_update_multisample_state(cmd_buffer, pipeline);
1054 radv_emit_vertex_shader(cmd_buffer, pipeline);
1055 radv_emit_tess_shaders(cmd_buffer, pipeline);
1056 radv_emit_geometry_shader(cmd_buffer, pipeline);
1057 radv_emit_fragment_shader(cmd_buffer, pipeline);
1058 radv_emit_vgt_vertex_reuse(cmd_buffer, pipeline);
1059
1060 cmd_buffer->scratch_size_needed =
1061 MAX2(cmd_buffer->scratch_size_needed,
1062 pipeline->max_waves * pipeline->scratch_bytes_per_wave);
1063
1064 radeon_set_context_reg(cmd_buffer->cs, R_0286E8_SPI_TMPRING_SIZE,
1065 S_0286E8_WAVES(pipeline->max_waves) |
1066 S_0286E8_WAVESIZE(pipeline->scratch_bytes_per_wave >> 10));
1067
1068 if (!cmd_buffer->state.emitted_pipeline ||
1069 cmd_buffer->state.emitted_pipeline->graphics.can_use_guardband !=
1070 pipeline->graphics.can_use_guardband)
1071 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_SCISSOR;
1072
1073 radeon_set_context_reg(cmd_buffer->cs, R_028B54_VGT_SHADER_STAGES_EN, pipeline->graphics.vgt_shader_stages_en);
1074
1075 if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) {
1076 radeon_set_uconfig_reg_idx(cmd_buffer->cs, R_030908_VGT_PRIMITIVE_TYPE, 1, pipeline->graphics.prim);
1077 } else {
1078 radeon_set_config_reg(cmd_buffer->cs, R_008958_VGT_PRIMITIVE_TYPE, pipeline->graphics.prim);
1079 }
1080 radeon_set_context_reg(cmd_buffer->cs, R_028A6C_VGT_GS_OUT_PRIM_TYPE, pipeline->graphics.gs_out);
1081
1082 if (unlikely(cmd_buffer->device->trace_bo))
1083 radv_save_pipeline(cmd_buffer, pipeline, RING_GFX);
1084
1085 cmd_buffer->state.emitted_pipeline = pipeline;
1086
1087 cmd_buffer->state.dirty &= ~RADV_CMD_DIRTY_PIPELINE;
1088 }
1089
1090 static void
1091 radv_emit_viewport(struct radv_cmd_buffer *cmd_buffer)
1092 {
1093 si_write_viewport(cmd_buffer->cs, 0, cmd_buffer->state.dynamic.viewport.count,
1094 cmd_buffer->state.dynamic.viewport.viewports);
1095 }
1096
1097 static void
1098 radv_emit_scissor(struct radv_cmd_buffer *cmd_buffer)
1099 {
1100 uint32_t count = cmd_buffer->state.dynamic.scissor.count;
1101
1102 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
1103 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_PS_PARTIAL_FLUSH;
1104 si_emit_cache_flush(cmd_buffer);
1105 }
1106 si_write_scissors(cmd_buffer->cs, 0, count,
1107 cmd_buffer->state.dynamic.scissor.scissors,
1108 cmd_buffer->state.dynamic.viewport.viewports,
1109 cmd_buffer->state.emitted_pipeline->graphics.can_use_guardband);
1110 radeon_set_context_reg(cmd_buffer->cs, R_028A48_PA_SC_MODE_CNTL_0,
1111 cmd_buffer->state.pipeline->graphics.ms.pa_sc_mode_cntl_0 | S_028A48_VPORT_SCISSOR_ENABLE(count ? 1 : 0));
1112 }
1113
1114 static void
1115 radv_emit_line_width(struct radv_cmd_buffer *cmd_buffer)
1116 {
1117 unsigned width = cmd_buffer->state.dynamic.line_width * 8;
1118
1119 radeon_set_context_reg(cmd_buffer->cs, R_028A08_PA_SU_LINE_CNTL,
1120 S_028A08_WIDTH(CLAMP(width, 0, 0xFFF)));
1121 }
1122
1123 static void
1124 radv_emit_blend_constants(struct radv_cmd_buffer *cmd_buffer)
1125 {
1126 struct radv_dynamic_state *d = &cmd_buffer->state.dynamic;
1127
1128 radeon_set_context_reg_seq(cmd_buffer->cs, R_028414_CB_BLEND_RED, 4);
1129 radeon_emit_array(cmd_buffer->cs, (uint32_t *)d->blend_constants, 4);
1130 }
1131
1132 static void
1133 radv_emit_stencil(struct radv_cmd_buffer *cmd_buffer)
1134 {
1135 struct radv_dynamic_state *d = &cmd_buffer->state.dynamic;
1136
1137 radeon_set_context_reg_seq(cmd_buffer->cs,
1138 R_028430_DB_STENCILREFMASK, 2);
1139 radeon_emit(cmd_buffer->cs,
1140 S_028430_STENCILTESTVAL(d->stencil_reference.front) |
1141 S_028430_STENCILMASK(d->stencil_compare_mask.front) |
1142 S_028430_STENCILWRITEMASK(d->stencil_write_mask.front) |
1143 S_028430_STENCILOPVAL(1));
1144 radeon_emit(cmd_buffer->cs,
1145 S_028434_STENCILTESTVAL_BF(d->stencil_reference.back) |
1146 S_028434_STENCILMASK_BF(d->stencil_compare_mask.back) |
1147 S_028434_STENCILWRITEMASK_BF(d->stencil_write_mask.back) |
1148 S_028434_STENCILOPVAL_BF(1));
1149 }
1150
1151 static void
1152 radv_emit_depth_bounds(struct radv_cmd_buffer *cmd_buffer)
1153 {
1154 struct radv_dynamic_state *d = &cmd_buffer->state.dynamic;
1155
1156 radeon_set_context_reg(cmd_buffer->cs, R_028020_DB_DEPTH_BOUNDS_MIN,
1157 fui(d->depth_bounds.min));
1158 radeon_set_context_reg(cmd_buffer->cs, R_028024_DB_DEPTH_BOUNDS_MAX,
1159 fui(d->depth_bounds.max));
1160 }
1161
1162 static void
1163 radv_emit_depth_biais(struct radv_cmd_buffer *cmd_buffer)
1164 {
1165 struct radv_raster_state *raster = &cmd_buffer->state.pipeline->graphics.raster;
1166 struct radv_dynamic_state *d = &cmd_buffer->state.dynamic;
1167 unsigned slope = fui(d->depth_bias.slope * 16.0f);
1168 unsigned bias = fui(d->depth_bias.bias * cmd_buffer->state.offset_scale);
1169
1170 if (G_028814_POLY_OFFSET_FRONT_ENABLE(raster->pa_su_sc_mode_cntl)) {
1171 radeon_set_context_reg_seq(cmd_buffer->cs,
1172 R_028B7C_PA_SU_POLY_OFFSET_CLAMP, 5);
1173 radeon_emit(cmd_buffer->cs, fui(d->depth_bias.clamp)); /* CLAMP */
1174 radeon_emit(cmd_buffer->cs, slope); /* FRONT SCALE */
1175 radeon_emit(cmd_buffer->cs, bias); /* FRONT OFFSET */
1176 radeon_emit(cmd_buffer->cs, slope); /* BACK SCALE */
1177 radeon_emit(cmd_buffer->cs, bias); /* BACK OFFSET */
1178 }
1179 }
1180
1181 static void
1182 radv_emit_fb_color_state(struct radv_cmd_buffer *cmd_buffer,
1183 int index,
1184 struct radv_color_buffer_info *cb)
1185 {
1186 bool is_vi = cmd_buffer->device->physical_device->rad_info.chip_class >= VI;
1187
1188 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
1189 radeon_set_context_reg_seq(cmd_buffer->cs, R_028C60_CB_COLOR0_BASE + index * 0x3c, 11);
1190 radeon_emit(cmd_buffer->cs, cb->cb_color_base);
1191 radeon_emit(cmd_buffer->cs, cb->cb_color_base >> 32);
1192 radeon_emit(cmd_buffer->cs, cb->cb_color_attrib2);
1193 radeon_emit(cmd_buffer->cs, cb->cb_color_view);
1194 radeon_emit(cmd_buffer->cs, cb->cb_color_info);
1195 radeon_emit(cmd_buffer->cs, cb->cb_color_attrib);
1196 radeon_emit(cmd_buffer->cs, cb->cb_dcc_control);
1197 radeon_emit(cmd_buffer->cs, cb->cb_color_cmask);
1198 radeon_emit(cmd_buffer->cs, cb->cb_color_cmask >> 32);
1199 radeon_emit(cmd_buffer->cs, cb->cb_color_fmask);
1200 radeon_emit(cmd_buffer->cs, cb->cb_color_fmask >> 32);
1201
1202 radeon_set_context_reg_seq(cmd_buffer->cs, R_028C94_CB_COLOR0_DCC_BASE + index * 0x3c, 2);
1203 radeon_emit(cmd_buffer->cs, cb->cb_dcc_base);
1204 radeon_emit(cmd_buffer->cs, cb->cb_dcc_base >> 32);
1205
1206 radeon_set_context_reg(cmd_buffer->cs, R_0287A0_CB_MRT0_EPITCH + index * 4,
1207 cb->gfx9_epitch);
1208 } else {
1209 radeon_set_context_reg_seq(cmd_buffer->cs, R_028C60_CB_COLOR0_BASE + index * 0x3c, 11);
1210 radeon_emit(cmd_buffer->cs, cb->cb_color_base);
1211 radeon_emit(cmd_buffer->cs, cb->cb_color_pitch);
1212 radeon_emit(cmd_buffer->cs, cb->cb_color_slice);
1213 radeon_emit(cmd_buffer->cs, cb->cb_color_view);
1214 radeon_emit(cmd_buffer->cs, cb->cb_color_info);
1215 radeon_emit(cmd_buffer->cs, cb->cb_color_attrib);
1216 radeon_emit(cmd_buffer->cs, cb->cb_dcc_control);
1217 radeon_emit(cmd_buffer->cs, cb->cb_color_cmask);
1218 radeon_emit(cmd_buffer->cs, cb->cb_color_cmask_slice);
1219 radeon_emit(cmd_buffer->cs, cb->cb_color_fmask);
1220 radeon_emit(cmd_buffer->cs, cb->cb_color_fmask_slice);
1221
1222 if (is_vi) { /* DCC BASE */
1223 radeon_set_context_reg(cmd_buffer->cs, R_028C94_CB_COLOR0_DCC_BASE + index * 0x3c, cb->cb_dcc_base);
1224 }
1225 }
1226 }
1227
1228 static void
1229 radv_emit_fb_ds_state(struct radv_cmd_buffer *cmd_buffer,
1230 struct radv_ds_buffer_info *ds,
1231 struct radv_image *image,
1232 VkImageLayout layout)
1233 {
1234 uint32_t db_z_info = ds->db_z_info;
1235 uint32_t db_stencil_info = ds->db_stencil_info;
1236
1237 if (!radv_layout_has_htile(image, layout,
1238 radv_image_queue_family_mask(image,
1239 cmd_buffer->queue_family_index,
1240 cmd_buffer->queue_family_index))) {
1241 db_z_info &= C_028040_TILE_SURFACE_ENABLE;
1242 db_stencil_info |= S_028044_TILE_STENCIL_DISABLE(1);
1243 }
1244
1245 radeon_set_context_reg(cmd_buffer->cs, R_028008_DB_DEPTH_VIEW, ds->db_depth_view);
1246 radeon_set_context_reg(cmd_buffer->cs, R_028ABC_DB_HTILE_SURFACE, ds->db_htile_surface);
1247
1248
1249 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
1250 radeon_set_context_reg_seq(cmd_buffer->cs, R_028014_DB_HTILE_DATA_BASE, 3);
1251 radeon_emit(cmd_buffer->cs, ds->db_htile_data_base);
1252 radeon_emit(cmd_buffer->cs, ds->db_htile_data_base >> 32);
1253 radeon_emit(cmd_buffer->cs, ds->db_depth_size);
1254
1255 radeon_set_context_reg_seq(cmd_buffer->cs, R_028038_DB_Z_INFO, 10);
1256 radeon_emit(cmd_buffer->cs, db_z_info); /* DB_Z_INFO */
1257 radeon_emit(cmd_buffer->cs, db_stencil_info); /* DB_STENCIL_INFO */
1258 radeon_emit(cmd_buffer->cs, ds->db_z_read_base); /* DB_Z_READ_BASE */
1259 radeon_emit(cmd_buffer->cs, ds->db_z_read_base >> 32); /* DB_Z_READ_BASE_HI */
1260 radeon_emit(cmd_buffer->cs, ds->db_stencil_read_base); /* DB_STENCIL_READ_BASE */
1261 radeon_emit(cmd_buffer->cs, ds->db_stencil_read_base >> 32); /* DB_STENCIL_READ_BASE_HI */
1262 radeon_emit(cmd_buffer->cs, ds->db_z_write_base); /* DB_Z_WRITE_BASE */
1263 radeon_emit(cmd_buffer->cs, ds->db_z_write_base >> 32); /* DB_Z_WRITE_BASE_HI */
1264 radeon_emit(cmd_buffer->cs, ds->db_stencil_write_base); /* DB_STENCIL_WRITE_BASE */
1265 radeon_emit(cmd_buffer->cs, ds->db_stencil_write_base >> 32); /* DB_STENCIL_WRITE_BASE_HI */
1266
1267 radeon_set_context_reg_seq(cmd_buffer->cs, R_028068_DB_Z_INFO2, 2);
1268 radeon_emit(cmd_buffer->cs, ds->db_z_info2);
1269 radeon_emit(cmd_buffer->cs, ds->db_stencil_info2);
1270 } else {
1271 radeon_set_context_reg(cmd_buffer->cs, R_028014_DB_HTILE_DATA_BASE, ds->db_htile_data_base);
1272
1273 radeon_set_context_reg_seq(cmd_buffer->cs, R_02803C_DB_DEPTH_INFO, 9);
1274 radeon_emit(cmd_buffer->cs, ds->db_depth_info); /* R_02803C_DB_DEPTH_INFO */
1275 radeon_emit(cmd_buffer->cs, db_z_info); /* R_028040_DB_Z_INFO */
1276 radeon_emit(cmd_buffer->cs, db_stencil_info); /* R_028044_DB_STENCIL_INFO */
1277 radeon_emit(cmd_buffer->cs, ds->db_z_read_base); /* R_028048_DB_Z_READ_BASE */
1278 radeon_emit(cmd_buffer->cs, ds->db_stencil_read_base); /* R_02804C_DB_STENCIL_READ_BASE */
1279 radeon_emit(cmd_buffer->cs, ds->db_z_write_base); /* R_028050_DB_Z_WRITE_BASE */
1280 radeon_emit(cmd_buffer->cs, ds->db_stencil_write_base); /* R_028054_DB_STENCIL_WRITE_BASE */
1281 radeon_emit(cmd_buffer->cs, ds->db_depth_size); /* R_028058_DB_DEPTH_SIZE */
1282 radeon_emit(cmd_buffer->cs, ds->db_depth_slice); /* R_02805C_DB_DEPTH_SLICE */
1283
1284 }
1285
1286 radeon_set_context_reg(cmd_buffer->cs, R_028B78_PA_SU_POLY_OFFSET_DB_FMT_CNTL,
1287 ds->pa_su_poly_offset_db_fmt_cntl);
1288 }
1289
1290 void
1291 radv_set_depth_clear_regs(struct radv_cmd_buffer *cmd_buffer,
1292 struct radv_image *image,
1293 VkClearDepthStencilValue ds_clear_value,
1294 VkImageAspectFlags aspects)
1295 {
1296 uint64_t va = radv_buffer_get_va(image->bo);
1297 va += image->offset + image->clear_value_offset;
1298 unsigned reg_offset = 0, reg_count = 0;
1299
1300 if (!image->surface.htile_size || !aspects)
1301 return;
1302
1303 if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT) {
1304 ++reg_count;
1305 } else {
1306 ++reg_offset;
1307 va += 4;
1308 }
1309 if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT)
1310 ++reg_count;
1311
1312 radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, image->bo, 8);
1313
1314 radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 2 + reg_count, 0));
1315 radeon_emit(cmd_buffer->cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
1316 S_370_WR_CONFIRM(1) |
1317 S_370_ENGINE_SEL(V_370_PFP));
1318 radeon_emit(cmd_buffer->cs, va);
1319 radeon_emit(cmd_buffer->cs, va >> 32);
1320 if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT)
1321 radeon_emit(cmd_buffer->cs, ds_clear_value.stencil);
1322 if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT)
1323 radeon_emit(cmd_buffer->cs, fui(ds_clear_value.depth));
1324
1325 radeon_set_context_reg_seq(cmd_buffer->cs, R_028028_DB_STENCIL_CLEAR + 4 * reg_offset, reg_count);
1326 if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT)
1327 radeon_emit(cmd_buffer->cs, ds_clear_value.stencil); /* R_028028_DB_STENCIL_CLEAR */
1328 if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT)
1329 radeon_emit(cmd_buffer->cs, fui(ds_clear_value.depth)); /* R_02802C_DB_DEPTH_CLEAR */
1330 }
1331
1332 static void
1333 radv_load_depth_clear_regs(struct radv_cmd_buffer *cmd_buffer,
1334 struct radv_image *image)
1335 {
1336 uint64_t va = radv_buffer_get_va(image->bo);
1337 va += image->offset + image->clear_value_offset;
1338
1339 if (!image->surface.htile_size)
1340 return;
1341
1342
1343 radeon_emit(cmd_buffer->cs, PKT3(PKT3_COPY_DATA, 4, 0));
1344 radeon_emit(cmd_buffer->cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
1345 COPY_DATA_DST_SEL(COPY_DATA_REG) |
1346 COPY_DATA_COUNT_SEL);
1347 radeon_emit(cmd_buffer->cs, va);
1348 radeon_emit(cmd_buffer->cs, va >> 32);
1349 radeon_emit(cmd_buffer->cs, R_028028_DB_STENCIL_CLEAR >> 2);
1350 radeon_emit(cmd_buffer->cs, 0);
1351
1352 radeon_emit(cmd_buffer->cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
1353 radeon_emit(cmd_buffer->cs, 0);
1354 }
1355
1356 /*
1357 *with DCC some colors don't require CMASK elimiation before being
1358 * used as a texture. This sets a predicate value to determine if the
1359 * cmask eliminate is required.
1360 */
1361 void
1362 radv_set_dcc_need_cmask_elim_pred(struct radv_cmd_buffer *cmd_buffer,
1363 struct radv_image *image,
1364 bool value)
1365 {
1366 uint64_t pred_val = value;
1367 uint64_t va = radv_buffer_get_va(image->bo);
1368 va += image->offset + image->dcc_pred_offset;
1369
1370 if (!image->surface.dcc_size)
1371 return;
1372
1373 radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 4, 0));
1374 radeon_emit(cmd_buffer->cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
1375 S_370_WR_CONFIRM(1) |
1376 S_370_ENGINE_SEL(V_370_PFP));
1377 radeon_emit(cmd_buffer->cs, va);
1378 radeon_emit(cmd_buffer->cs, va >> 32);
1379 radeon_emit(cmd_buffer->cs, pred_val);
1380 radeon_emit(cmd_buffer->cs, pred_val >> 32);
1381 }
1382
1383 void
1384 radv_set_color_clear_regs(struct radv_cmd_buffer *cmd_buffer,
1385 struct radv_image *image,
1386 int idx,
1387 uint32_t color_values[2])
1388 {
1389 uint64_t va = radv_buffer_get_va(image->bo);
1390 va += image->offset + image->clear_value_offset;
1391
1392 if (!image->cmask.size && !image->surface.dcc_size)
1393 return;
1394
1395 radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 4, 0));
1396 radeon_emit(cmd_buffer->cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
1397 S_370_WR_CONFIRM(1) |
1398 S_370_ENGINE_SEL(V_370_PFP));
1399 radeon_emit(cmd_buffer->cs, va);
1400 radeon_emit(cmd_buffer->cs, va >> 32);
1401 radeon_emit(cmd_buffer->cs, color_values[0]);
1402 radeon_emit(cmd_buffer->cs, color_values[1]);
1403
1404 radeon_set_context_reg_seq(cmd_buffer->cs, R_028C8C_CB_COLOR0_CLEAR_WORD0 + idx * 0x3c, 2);
1405 radeon_emit(cmd_buffer->cs, color_values[0]);
1406 radeon_emit(cmd_buffer->cs, color_values[1]);
1407 }
1408
1409 static void
1410 radv_load_color_clear_regs(struct radv_cmd_buffer *cmd_buffer,
1411 struct radv_image *image,
1412 int idx)
1413 {
1414 uint64_t va = radv_buffer_get_va(image->bo);
1415 va += image->offset + image->clear_value_offset;
1416
1417 if (!image->cmask.size && !image->surface.dcc_size)
1418 return;
1419
1420 uint32_t reg = R_028C8C_CB_COLOR0_CLEAR_WORD0 + idx * 0x3c;
1421
1422 radeon_emit(cmd_buffer->cs, PKT3(PKT3_COPY_DATA, 4, cmd_buffer->state.predicating));
1423 radeon_emit(cmd_buffer->cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
1424 COPY_DATA_DST_SEL(COPY_DATA_REG) |
1425 COPY_DATA_COUNT_SEL);
1426 radeon_emit(cmd_buffer->cs, va);
1427 radeon_emit(cmd_buffer->cs, va >> 32);
1428 radeon_emit(cmd_buffer->cs, reg >> 2);
1429 radeon_emit(cmd_buffer->cs, 0);
1430
1431 radeon_emit(cmd_buffer->cs, PKT3(PKT3_PFP_SYNC_ME, 0, cmd_buffer->state.predicating));
1432 radeon_emit(cmd_buffer->cs, 0);
1433 }
1434
1435 static void
1436 radv_emit_framebuffer_state(struct radv_cmd_buffer *cmd_buffer)
1437 {
1438 int i;
1439 struct radv_framebuffer *framebuffer = cmd_buffer->state.framebuffer;
1440 const struct radv_subpass *subpass = cmd_buffer->state.subpass;
1441
1442 /* this may happen for inherited secondary recording */
1443 if (!framebuffer)
1444 return;
1445
1446 for (i = 0; i < 8; ++i) {
1447 if (i >= subpass->color_count || subpass->color_attachments[i].attachment == VK_ATTACHMENT_UNUSED) {
1448 radeon_set_context_reg(cmd_buffer->cs, R_028C70_CB_COLOR0_INFO + i * 0x3C,
1449 S_028C70_FORMAT(V_028C70_COLOR_INVALID));
1450 continue;
1451 }
1452
1453 int idx = subpass->color_attachments[i].attachment;
1454 struct radv_attachment_info *att = &framebuffer->attachments[idx];
1455
1456 radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, att->attachment->bo, 8);
1457
1458 assert(att->attachment->aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT);
1459 radv_emit_fb_color_state(cmd_buffer, i, &att->cb);
1460
1461 radv_load_color_clear_regs(cmd_buffer, att->attachment->image, i);
1462 }
1463
1464 if(subpass->depth_stencil_attachment.attachment != VK_ATTACHMENT_UNUSED) {
1465 int idx = subpass->depth_stencil_attachment.attachment;
1466 VkImageLayout layout = subpass->depth_stencil_attachment.layout;
1467 struct radv_attachment_info *att = &framebuffer->attachments[idx];
1468 struct radv_image *image = att->attachment->image;
1469 radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, att->attachment->bo, 8);
1470 MAYBE_UNUSED uint32_t queue_mask = radv_image_queue_family_mask(image,
1471 cmd_buffer->queue_family_index,
1472 cmd_buffer->queue_family_index);
1473 /* We currently don't support writing decompressed HTILE */
1474 assert(radv_layout_has_htile(image, layout, queue_mask) ==
1475 radv_layout_is_htile_compressed(image, layout, queue_mask));
1476
1477 radv_emit_fb_ds_state(cmd_buffer, &att->ds, image, layout);
1478
1479 if (att->ds.offset_scale != cmd_buffer->state.offset_scale) {
1480 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS;
1481 cmd_buffer->state.offset_scale = att->ds.offset_scale;
1482 }
1483 radv_load_depth_clear_regs(cmd_buffer, image);
1484 } else {
1485 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9)
1486 radeon_set_context_reg_seq(cmd_buffer->cs, R_028038_DB_Z_INFO, 2);
1487 else
1488 radeon_set_context_reg_seq(cmd_buffer->cs, R_028040_DB_Z_INFO, 2);
1489
1490 radeon_emit(cmd_buffer->cs, S_028040_FORMAT(V_028040_Z_INVALID)); /* DB_Z_INFO */
1491 radeon_emit(cmd_buffer->cs, S_028044_FORMAT(V_028044_STENCIL_INVALID)); /* DB_STENCIL_INFO */
1492 }
1493 radeon_set_context_reg(cmd_buffer->cs, R_028208_PA_SC_WINDOW_SCISSOR_BR,
1494 S_028208_BR_X(framebuffer->width) |
1495 S_028208_BR_Y(framebuffer->height));
1496
1497 if (cmd_buffer->device->dfsm_allowed) {
1498 radeon_emit(cmd_buffer->cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1499 radeon_emit(cmd_buffer->cs, EVENT_TYPE(V_028A90_BREAK_BATCH) | EVENT_INDEX(0));
1500 }
1501
1502 cmd_buffer->state.dirty &= ~RADV_CMD_DIRTY_FRAMEBUFFER;
1503 }
1504
1505 static void
1506 radv_emit_index_buffer(struct radv_cmd_buffer *cmd_buffer)
1507 {
1508 struct radeon_winsys_cs *cs = cmd_buffer->cs;
1509
1510 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
1511 radeon_set_uconfig_reg_idx(cs, R_03090C_VGT_INDEX_TYPE,
1512 2, cmd_buffer->state.index_type);
1513 } else {
1514 radeon_emit(cs, PKT3(PKT3_INDEX_TYPE, 0, 0));
1515 radeon_emit(cs, cmd_buffer->state.index_type);
1516 }
1517
1518 radeon_emit(cs, PKT3(PKT3_INDEX_BASE, 1, 0));
1519 radeon_emit(cs, cmd_buffer->state.index_va);
1520 radeon_emit(cs, cmd_buffer->state.index_va >> 32);
1521
1522 radeon_emit(cs, PKT3(PKT3_INDEX_BUFFER_SIZE, 0, 0));
1523 radeon_emit(cs, cmd_buffer->state.max_index_count);
1524
1525 cmd_buffer->state.dirty &= ~RADV_CMD_DIRTY_INDEX_BUFFER;
1526 }
1527
1528 void radv_set_db_count_control(struct radv_cmd_buffer *cmd_buffer)
1529 {
1530 uint32_t db_count_control;
1531
1532 if(!cmd_buffer->state.active_occlusion_queries) {
1533 if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) {
1534 db_count_control = 0;
1535 } else {
1536 db_count_control = S_028004_ZPASS_INCREMENT_DISABLE(1);
1537 }
1538 } else {
1539 if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) {
1540 db_count_control = S_028004_PERFECT_ZPASS_COUNTS(1) |
1541 S_028004_SAMPLE_RATE(0) | /* TODO: set this to the number of samples of the current framebuffer */
1542 S_028004_ZPASS_ENABLE(1) |
1543 S_028004_SLICE_EVEN_ENABLE(1) |
1544 S_028004_SLICE_ODD_ENABLE(1);
1545 } else {
1546 db_count_control = S_028004_PERFECT_ZPASS_COUNTS(1) |
1547 S_028004_SAMPLE_RATE(0); /* TODO: set this to the number of samples of the current framebuffer */
1548 }
1549 }
1550
1551 radeon_set_context_reg(cmd_buffer->cs, R_028004_DB_COUNT_CONTROL, db_count_control);
1552 }
1553
1554 static void
1555 radv_cmd_buffer_flush_dynamic_state(struct radv_cmd_buffer *cmd_buffer)
1556 {
1557 if (G_028810_DX_RASTERIZATION_KILL(cmd_buffer->state.pipeline->graphics.raster.pa_cl_clip_cntl))
1558 return;
1559
1560 if (cmd_buffer->state.dirty & (RADV_CMD_DIRTY_DYNAMIC_VIEWPORT))
1561 radv_emit_viewport(cmd_buffer);
1562
1563 if (cmd_buffer->state.dirty & (RADV_CMD_DIRTY_DYNAMIC_SCISSOR | RADV_CMD_DIRTY_DYNAMIC_VIEWPORT))
1564 radv_emit_scissor(cmd_buffer);
1565
1566 if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_DYNAMIC_LINE_WIDTH)
1567 radv_emit_line_width(cmd_buffer);
1568
1569 if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS)
1570 radv_emit_blend_constants(cmd_buffer);
1571
1572 if (cmd_buffer->state.dirty & (RADV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE |
1573 RADV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK |
1574 RADV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK))
1575 radv_emit_stencil(cmd_buffer);
1576
1577 if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS)
1578 radv_emit_depth_bounds(cmd_buffer);
1579
1580 if (cmd_buffer->state.dirty & (RADV_CMD_DIRTY_PIPELINE |
1581 RADV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS))
1582 radv_emit_depth_biais(cmd_buffer);
1583
1584 cmd_buffer->state.dirty &= ~RADV_CMD_DIRTY_DYNAMIC_ALL;
1585 }
1586
1587 static void
1588 emit_stage_descriptor_set_userdata(struct radv_cmd_buffer *cmd_buffer,
1589 struct radv_pipeline *pipeline,
1590 int idx,
1591 uint64_t va,
1592 gl_shader_stage stage)
1593 {
1594 struct ac_userdata_info *desc_set_loc = &pipeline->shaders[stage]->info.user_sgprs_locs.descriptor_sets[idx];
1595 uint32_t base_reg = pipeline->user_data_0[stage];
1596
1597 if (desc_set_loc->sgpr_idx == -1 || desc_set_loc->indirect)
1598 return;
1599
1600 assert(!desc_set_loc->indirect);
1601 assert(desc_set_loc->num_sgprs == 2);
1602 radeon_set_sh_reg_seq(cmd_buffer->cs,
1603 base_reg + desc_set_loc->sgpr_idx * 4, 2);
1604 radeon_emit(cmd_buffer->cs, va);
1605 radeon_emit(cmd_buffer->cs, va >> 32);
1606 }
1607
1608 static void
1609 radv_emit_descriptor_set_userdata(struct radv_cmd_buffer *cmd_buffer,
1610 VkShaderStageFlags stages,
1611 struct radv_descriptor_set *set,
1612 unsigned idx)
1613 {
1614 if (cmd_buffer->state.pipeline) {
1615 radv_foreach_stage(stage, stages) {
1616 if (cmd_buffer->state.pipeline->shaders[stage])
1617 emit_stage_descriptor_set_userdata(cmd_buffer, cmd_buffer->state.pipeline,
1618 idx, set->va,
1619 stage);
1620 }
1621 }
1622
1623 if (cmd_buffer->state.compute_pipeline && (stages & VK_SHADER_STAGE_COMPUTE_BIT))
1624 emit_stage_descriptor_set_userdata(cmd_buffer, cmd_buffer->state.compute_pipeline,
1625 idx, set->va,
1626 MESA_SHADER_COMPUTE);
1627 }
1628
1629 static void
1630 radv_flush_push_descriptors(struct radv_cmd_buffer *cmd_buffer)
1631 {
1632 struct radv_descriptor_set *set = &cmd_buffer->push_descriptors.set;
1633 unsigned bo_offset;
1634
1635 if (!radv_cmd_buffer_upload_data(cmd_buffer, set->size, 32,
1636 set->mapped_ptr,
1637 &bo_offset))
1638 return;
1639
1640 set->va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
1641 set->va += bo_offset;
1642 }
1643
1644 static void
1645 radv_flush_indirect_descriptor_sets(struct radv_cmd_buffer *cmd_buffer)
1646 {
1647 uint32_t size = MAX_SETS * 2 * 4;
1648 uint32_t offset;
1649 void *ptr;
1650
1651 if (!radv_cmd_buffer_upload_alloc(cmd_buffer, size,
1652 256, &offset, &ptr))
1653 return;
1654
1655 for (unsigned i = 0; i < MAX_SETS; i++) {
1656 uint32_t *uptr = ((uint32_t *)ptr) + i * 2;
1657 uint64_t set_va = 0;
1658 struct radv_descriptor_set *set = cmd_buffer->descriptors[i];
1659 if (cmd_buffer->state.valid_descriptors & (1u << i))
1660 set_va = set->va;
1661 uptr[0] = set_va & 0xffffffff;
1662 uptr[1] = set_va >> 32;
1663 }
1664
1665 uint64_t va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
1666 va += offset;
1667
1668 if (cmd_buffer->state.pipeline) {
1669 if (cmd_buffer->state.pipeline->shaders[MESA_SHADER_VERTEX])
1670 radv_emit_userdata_address(cmd_buffer, cmd_buffer->state.pipeline, MESA_SHADER_VERTEX,
1671 AC_UD_INDIRECT_DESCRIPTOR_SETS, va);
1672
1673 if (cmd_buffer->state.pipeline->shaders[MESA_SHADER_FRAGMENT])
1674 radv_emit_userdata_address(cmd_buffer, cmd_buffer->state.pipeline, MESA_SHADER_FRAGMENT,
1675 AC_UD_INDIRECT_DESCRIPTOR_SETS, va);
1676
1677 if (radv_pipeline_has_gs(cmd_buffer->state.pipeline))
1678 radv_emit_userdata_address(cmd_buffer, cmd_buffer->state.pipeline, MESA_SHADER_GEOMETRY,
1679 AC_UD_INDIRECT_DESCRIPTOR_SETS, va);
1680
1681 if (radv_pipeline_has_tess(cmd_buffer->state.pipeline))
1682 radv_emit_userdata_address(cmd_buffer, cmd_buffer->state.pipeline, MESA_SHADER_TESS_CTRL,
1683 AC_UD_INDIRECT_DESCRIPTOR_SETS, va);
1684
1685 if (radv_pipeline_has_tess(cmd_buffer->state.pipeline))
1686 radv_emit_userdata_address(cmd_buffer, cmd_buffer->state.pipeline, MESA_SHADER_TESS_EVAL,
1687 AC_UD_INDIRECT_DESCRIPTOR_SETS, va);
1688 }
1689
1690 if (cmd_buffer->state.compute_pipeline)
1691 radv_emit_userdata_address(cmd_buffer, cmd_buffer->state.compute_pipeline, MESA_SHADER_COMPUTE,
1692 AC_UD_INDIRECT_DESCRIPTOR_SETS, va);
1693 }
1694
1695 static void
1696 radv_flush_descriptors(struct radv_cmd_buffer *cmd_buffer,
1697 VkShaderStageFlags stages)
1698 {
1699 unsigned i;
1700
1701 if (!cmd_buffer->state.descriptors_dirty)
1702 return;
1703
1704 if (cmd_buffer->state.push_descriptors_dirty)
1705 radv_flush_push_descriptors(cmd_buffer);
1706
1707 if ((cmd_buffer->state.pipeline && cmd_buffer->state.pipeline->need_indirect_descriptor_sets) ||
1708 (cmd_buffer->state.compute_pipeline && cmd_buffer->state.compute_pipeline->need_indirect_descriptor_sets)) {
1709 radv_flush_indirect_descriptor_sets(cmd_buffer);
1710 }
1711
1712 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
1713 cmd_buffer->cs,
1714 MAX_SETS * MESA_SHADER_STAGES * 4);
1715
1716 for_each_bit(i, cmd_buffer->state.descriptors_dirty) {
1717 struct radv_descriptor_set *set = cmd_buffer->descriptors[i];
1718 if (!(cmd_buffer->state.valid_descriptors & (1u << i)))
1719 continue;
1720
1721 radv_emit_descriptor_set_userdata(cmd_buffer, stages, set, i);
1722 }
1723 cmd_buffer->state.descriptors_dirty = 0;
1724 cmd_buffer->state.push_descriptors_dirty = false;
1725
1726 if (unlikely(cmd_buffer->device->trace_bo))
1727 radv_save_descriptors(cmd_buffer);
1728
1729 assert(cmd_buffer->cs->cdw <= cdw_max);
1730 }
1731
1732 static void
1733 radv_flush_constants(struct radv_cmd_buffer *cmd_buffer,
1734 struct radv_pipeline *pipeline,
1735 VkShaderStageFlags stages)
1736 {
1737 struct radv_pipeline_layout *layout = pipeline->layout;
1738 unsigned offset;
1739 void *ptr;
1740 uint64_t va;
1741
1742 stages &= cmd_buffer->push_constant_stages;
1743 if (!stages || !layout || (!layout->push_constant_size && !layout->dynamic_offset_count))
1744 return;
1745
1746 if (!radv_cmd_buffer_upload_alloc(cmd_buffer, layout->push_constant_size +
1747 16 * layout->dynamic_offset_count,
1748 256, &offset, &ptr))
1749 return;
1750
1751 memcpy(ptr, cmd_buffer->push_constants, layout->push_constant_size);
1752 memcpy((char*)ptr + layout->push_constant_size, cmd_buffer->dynamic_buffers,
1753 16 * layout->dynamic_offset_count);
1754
1755 va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
1756 va += offset;
1757
1758 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
1759 cmd_buffer->cs, MESA_SHADER_STAGES * 4);
1760
1761 radv_foreach_stage(stage, stages) {
1762 if (pipeline->shaders[stage]) {
1763 radv_emit_userdata_address(cmd_buffer, pipeline, stage,
1764 AC_UD_PUSH_CONSTANTS, va);
1765 }
1766 }
1767
1768 cmd_buffer->push_constant_stages &= ~stages;
1769 assert(cmd_buffer->cs->cdw <= cdw_max);
1770 }
1771
1772 static bool
1773 radv_cmd_buffer_update_vertex_descriptors(struct radv_cmd_buffer *cmd_buffer, bool pipeline_is_dirty)
1774 {
1775 struct radv_device *device = cmd_buffer->device;
1776
1777 if ((pipeline_is_dirty || cmd_buffer->state.vb_dirty) &&
1778 cmd_buffer->state.pipeline->vertex_elements.count &&
1779 radv_get_vertex_shader(cmd_buffer->state.pipeline)->info.info.vs.has_vertex_buffers) {
1780 struct radv_vertex_elements_info *velems = &cmd_buffer->state.pipeline->vertex_elements;
1781 unsigned vb_offset;
1782 void *vb_ptr;
1783 uint32_t i = 0;
1784 uint32_t count = velems->count;
1785 uint64_t va;
1786
1787 /* allocate some descriptor state for vertex buffers */
1788 if (!radv_cmd_buffer_upload_alloc(cmd_buffer, count * 16, 256,
1789 &vb_offset, &vb_ptr))
1790 return false;
1791
1792 for (i = 0; i < count; i++) {
1793 uint32_t *desc = &((uint32_t *)vb_ptr)[i * 4];
1794 uint32_t offset;
1795 int vb = velems->binding[i];
1796 struct radv_buffer *buffer = cmd_buffer->vertex_bindings[vb].buffer;
1797 uint32_t stride = cmd_buffer->state.pipeline->binding_stride[vb];
1798
1799 radv_cs_add_buffer(device->ws, cmd_buffer->cs, buffer->bo, 8);
1800 va = radv_buffer_get_va(buffer->bo);
1801
1802 offset = cmd_buffer->vertex_bindings[vb].offset + velems->offset[i];
1803 va += offset + buffer->offset;
1804 desc[0] = va;
1805 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) | S_008F04_STRIDE(stride);
1806 if (cmd_buffer->device->physical_device->rad_info.chip_class <= CIK && stride)
1807 desc[2] = (buffer->size - offset - velems->format_size[i]) / stride + 1;
1808 else
1809 desc[2] = buffer->size - offset;
1810 desc[3] = velems->rsrc_word3[i];
1811 }
1812
1813 va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
1814 va += vb_offset;
1815
1816 radv_emit_userdata_address(cmd_buffer, cmd_buffer->state.pipeline, MESA_SHADER_VERTEX,
1817 AC_UD_VS_VERTEX_BUFFERS, va);
1818
1819 cmd_buffer->state.vb_va = va;
1820 cmd_buffer->state.vb_size = count * 16;
1821 cmd_buffer->state.vb_prefetch_dirty = true;
1822 }
1823 cmd_buffer->state.vb_dirty = false;
1824
1825 return true;
1826 }
1827
1828 static bool
1829 radv_upload_graphics_shader_descriptors(struct radv_cmd_buffer *cmd_buffer, bool pipeline_is_dirty)
1830 {
1831 if (!radv_cmd_buffer_update_vertex_descriptors(cmd_buffer, pipeline_is_dirty))
1832 return false;
1833
1834 radv_flush_descriptors(cmd_buffer, VK_SHADER_STAGE_ALL_GRAPHICS);
1835 radv_flush_constants(cmd_buffer, cmd_buffer->state.pipeline,
1836 VK_SHADER_STAGE_ALL_GRAPHICS);
1837
1838 return true;
1839 }
1840
1841 static void
1842 radv_emit_draw_registers(struct radv_cmd_buffer *cmd_buffer, bool indexed_draw,
1843 bool instanced_draw, bool indirect_draw,
1844 uint32_t draw_vertex_count)
1845 {
1846 struct radeon_info *info = &cmd_buffer->device->physical_device->rad_info;
1847 struct radv_cmd_state *state = &cmd_buffer->state;
1848 struct radeon_winsys_cs *cs = cmd_buffer->cs;
1849 uint32_t ia_multi_vgt_param;
1850 int32_t primitive_reset_en;
1851
1852 /* Draw state. */
1853 ia_multi_vgt_param =
1854 si_get_ia_multi_vgt_param(cmd_buffer, instanced_draw,
1855 indirect_draw, draw_vertex_count);
1856
1857 if (state->last_ia_multi_vgt_param != ia_multi_vgt_param) {
1858 if (info->chip_class >= GFX9) {
1859 radeon_set_uconfig_reg_idx(cs,
1860 R_030960_IA_MULTI_VGT_PARAM,
1861 4, ia_multi_vgt_param);
1862 } else if (info->chip_class >= CIK) {
1863 radeon_set_context_reg_idx(cs,
1864 R_028AA8_IA_MULTI_VGT_PARAM,
1865 1, ia_multi_vgt_param);
1866 } else {
1867 radeon_set_context_reg(cs, R_028AA8_IA_MULTI_VGT_PARAM,
1868 ia_multi_vgt_param);
1869 }
1870 state->last_ia_multi_vgt_param = ia_multi_vgt_param;
1871 }
1872
1873 /* Primitive restart. */
1874 primitive_reset_en =
1875 indexed_draw && state->pipeline->graphics.prim_restart_enable;
1876
1877 if (primitive_reset_en != state->last_primitive_reset_en) {
1878 state->last_primitive_reset_en = primitive_reset_en;
1879 if (info->chip_class >= GFX9) {
1880 radeon_set_uconfig_reg(cs,
1881 R_03092C_VGT_MULTI_PRIM_IB_RESET_EN,
1882 primitive_reset_en);
1883 } else {
1884 radeon_set_context_reg(cs,
1885 R_028A94_VGT_MULTI_PRIM_IB_RESET_EN,
1886 primitive_reset_en);
1887 }
1888 }
1889
1890 if (primitive_reset_en) {
1891 uint32_t primitive_reset_index =
1892 state->index_type ? 0xffffffffu : 0xffffu;
1893
1894 if (primitive_reset_index != state->last_primitive_reset_index) {
1895 radeon_set_context_reg(cs,
1896 R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX,
1897 primitive_reset_index);
1898 state->last_primitive_reset_index = primitive_reset_index;
1899 }
1900 }
1901 }
1902
1903 static void radv_stage_flush(struct radv_cmd_buffer *cmd_buffer,
1904 VkPipelineStageFlags src_stage_mask)
1905 {
1906 if (src_stage_mask & (VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT |
1907 VK_PIPELINE_STAGE_TRANSFER_BIT |
1908 VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT |
1909 VK_PIPELINE_STAGE_ALL_COMMANDS_BIT)) {
1910 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_CS_PARTIAL_FLUSH;
1911 }
1912
1913 if (src_stage_mask & (VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
1914 VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT |
1915 VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
1916 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
1917 VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT |
1918 VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT |
1919 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT |
1920 VK_PIPELINE_STAGE_TRANSFER_BIT |
1921 VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT |
1922 VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT |
1923 VK_PIPELINE_STAGE_ALL_COMMANDS_BIT)) {
1924 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_PS_PARTIAL_FLUSH;
1925 } else if (src_stage_mask & (VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT |
1926 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT |
1927 VK_PIPELINE_STAGE_VERTEX_SHADER_BIT)) {
1928 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_VS_PARTIAL_FLUSH;
1929 }
1930 }
1931
1932 static enum radv_cmd_flush_bits
1933 radv_src_access_flush(struct radv_cmd_buffer *cmd_buffer,
1934 VkAccessFlags src_flags)
1935 {
1936 enum radv_cmd_flush_bits flush_bits = 0;
1937 uint32_t b;
1938 for_each_bit(b, src_flags) {
1939 switch ((VkAccessFlagBits)(1 << b)) {
1940 case VK_ACCESS_SHADER_WRITE_BIT:
1941 flush_bits |= RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2;
1942 break;
1943 case VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT:
1944 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
1945 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
1946 break;
1947 case VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT:
1948 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB |
1949 RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
1950 break;
1951 case VK_ACCESS_TRANSFER_WRITE_BIT:
1952 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
1953 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META |
1954 RADV_CMD_FLAG_FLUSH_AND_INV_DB |
1955 RADV_CMD_FLAG_FLUSH_AND_INV_DB_META |
1956 RADV_CMD_FLAG_INV_GLOBAL_L2;
1957 break;
1958 default:
1959 break;
1960 }
1961 }
1962 return flush_bits;
1963 }
1964
1965 static enum radv_cmd_flush_bits
1966 radv_dst_access_flush(struct radv_cmd_buffer *cmd_buffer,
1967 VkAccessFlags dst_flags,
1968 struct radv_image *image)
1969 {
1970 enum radv_cmd_flush_bits flush_bits = 0;
1971 uint32_t b;
1972 for_each_bit(b, dst_flags) {
1973 switch ((VkAccessFlagBits)(1 << b)) {
1974 case VK_ACCESS_INDIRECT_COMMAND_READ_BIT:
1975 case VK_ACCESS_INDEX_READ_BIT:
1976 case VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT:
1977 break;
1978 case VK_ACCESS_UNIFORM_READ_BIT:
1979 flush_bits |= RADV_CMD_FLAG_INV_VMEM_L1 | RADV_CMD_FLAG_INV_SMEM_L1;
1980 break;
1981 case VK_ACCESS_SHADER_READ_BIT:
1982 case VK_ACCESS_TRANSFER_READ_BIT:
1983 case VK_ACCESS_INPUT_ATTACHMENT_READ_BIT:
1984 flush_bits |= RADV_CMD_FLAG_INV_VMEM_L1 |
1985 RADV_CMD_FLAG_INV_GLOBAL_L2;
1986 break;
1987 case VK_ACCESS_COLOR_ATTACHMENT_READ_BIT:
1988 /* TODO: change to image && when the image gets passed
1989 * through from the subpass. */
1990 if (!image || (image->usage & VK_IMAGE_USAGE_STORAGE_BIT))
1991 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
1992 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
1993 break;
1994 case VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT:
1995 if (!image || (image->usage & VK_IMAGE_USAGE_STORAGE_BIT))
1996 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB |
1997 RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
1998 break;
1999 default:
2000 break;
2001 }
2002 }
2003 return flush_bits;
2004 }
2005
2006 static void radv_subpass_barrier(struct radv_cmd_buffer *cmd_buffer, const struct radv_subpass_barrier *barrier)
2007 {
2008 cmd_buffer->state.flush_bits |= radv_src_access_flush(cmd_buffer, barrier->src_access_mask);
2009 radv_stage_flush(cmd_buffer, barrier->src_stage_mask);
2010 cmd_buffer->state.flush_bits |= radv_dst_access_flush(cmd_buffer, barrier->dst_access_mask,
2011 NULL);
2012 }
2013
2014 static void radv_handle_subpass_image_transition(struct radv_cmd_buffer *cmd_buffer,
2015 VkAttachmentReference att)
2016 {
2017 unsigned idx = att.attachment;
2018 struct radv_image_view *view = cmd_buffer->state.framebuffer->attachments[idx].attachment;
2019 VkImageSubresourceRange range;
2020 range.aspectMask = 0;
2021 range.baseMipLevel = view->base_mip;
2022 range.levelCount = 1;
2023 range.baseArrayLayer = view->base_layer;
2024 range.layerCount = cmd_buffer->state.framebuffer->layers;
2025
2026 radv_handle_image_transition(cmd_buffer,
2027 view->image,
2028 cmd_buffer->state.attachments[idx].current_layout,
2029 att.layout, 0, 0, &range,
2030 cmd_buffer->state.attachments[idx].pending_clear_aspects);
2031
2032 cmd_buffer->state.attachments[idx].current_layout = att.layout;
2033
2034
2035 }
2036
2037 void
2038 radv_cmd_buffer_set_subpass(struct radv_cmd_buffer *cmd_buffer,
2039 const struct radv_subpass *subpass, bool transitions)
2040 {
2041 if (transitions) {
2042 radv_subpass_barrier(cmd_buffer, &subpass->start_barrier);
2043
2044 for (unsigned i = 0; i < subpass->color_count; ++i) {
2045 if (subpass->color_attachments[i].attachment != VK_ATTACHMENT_UNUSED)
2046 radv_handle_subpass_image_transition(cmd_buffer,
2047 subpass->color_attachments[i]);
2048 }
2049
2050 for (unsigned i = 0; i < subpass->input_count; ++i) {
2051 radv_handle_subpass_image_transition(cmd_buffer,
2052 subpass->input_attachments[i]);
2053 }
2054
2055 if (subpass->depth_stencil_attachment.attachment != VK_ATTACHMENT_UNUSED) {
2056 radv_handle_subpass_image_transition(cmd_buffer,
2057 subpass->depth_stencil_attachment);
2058 }
2059 }
2060
2061 cmd_buffer->state.subpass = subpass;
2062
2063 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_FRAMEBUFFER;
2064 }
2065
2066 static VkResult
2067 radv_cmd_state_setup_attachments(struct radv_cmd_buffer *cmd_buffer,
2068 struct radv_render_pass *pass,
2069 const VkRenderPassBeginInfo *info)
2070 {
2071 struct radv_cmd_state *state = &cmd_buffer->state;
2072
2073 if (pass->attachment_count == 0) {
2074 state->attachments = NULL;
2075 return VK_SUCCESS;
2076 }
2077
2078 state->attachments = vk_alloc(&cmd_buffer->pool->alloc,
2079 pass->attachment_count *
2080 sizeof(state->attachments[0]),
2081 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2082 if (state->attachments == NULL) {
2083 cmd_buffer->record_result = VK_ERROR_OUT_OF_HOST_MEMORY;
2084 return cmd_buffer->record_result;
2085 }
2086
2087 for (uint32_t i = 0; i < pass->attachment_count; ++i) {
2088 struct radv_render_pass_attachment *att = &pass->attachments[i];
2089 VkImageAspectFlags att_aspects = vk_format_aspects(att->format);
2090 VkImageAspectFlags clear_aspects = 0;
2091
2092 if (att_aspects == VK_IMAGE_ASPECT_COLOR_BIT) {
2093 /* color attachment */
2094 if (att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
2095 clear_aspects |= VK_IMAGE_ASPECT_COLOR_BIT;
2096 }
2097 } else {
2098 /* depthstencil attachment */
2099 if ((att_aspects & VK_IMAGE_ASPECT_DEPTH_BIT) &&
2100 att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
2101 clear_aspects |= VK_IMAGE_ASPECT_DEPTH_BIT;
2102 if ((att_aspects & VK_IMAGE_ASPECT_STENCIL_BIT) &&
2103 att->stencil_load_op == VK_ATTACHMENT_LOAD_OP_DONT_CARE)
2104 clear_aspects |= VK_IMAGE_ASPECT_STENCIL_BIT;
2105 }
2106 if ((att_aspects & VK_IMAGE_ASPECT_STENCIL_BIT) &&
2107 att->stencil_load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
2108 clear_aspects |= VK_IMAGE_ASPECT_STENCIL_BIT;
2109 }
2110 }
2111
2112 state->attachments[i].pending_clear_aspects = clear_aspects;
2113 state->attachments[i].cleared_views = 0;
2114 if (clear_aspects && info) {
2115 assert(info->clearValueCount > i);
2116 state->attachments[i].clear_value = info->pClearValues[i];
2117 }
2118
2119 state->attachments[i].current_layout = att->initial_layout;
2120 }
2121
2122 return VK_SUCCESS;
2123 }
2124
2125 VkResult radv_AllocateCommandBuffers(
2126 VkDevice _device,
2127 const VkCommandBufferAllocateInfo *pAllocateInfo,
2128 VkCommandBuffer *pCommandBuffers)
2129 {
2130 RADV_FROM_HANDLE(radv_device, device, _device);
2131 RADV_FROM_HANDLE(radv_cmd_pool, pool, pAllocateInfo->commandPool);
2132
2133 VkResult result = VK_SUCCESS;
2134 uint32_t i;
2135
2136 for (i = 0; i < pAllocateInfo->commandBufferCount; i++) {
2137
2138 if (!list_empty(&pool->free_cmd_buffers)) {
2139 struct radv_cmd_buffer *cmd_buffer = list_first_entry(&pool->free_cmd_buffers, struct radv_cmd_buffer, pool_link);
2140
2141 list_del(&cmd_buffer->pool_link);
2142 list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
2143
2144 result = radv_reset_cmd_buffer(cmd_buffer);
2145 cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
2146 cmd_buffer->level = pAllocateInfo->level;
2147
2148 pCommandBuffers[i] = radv_cmd_buffer_to_handle(cmd_buffer);
2149 } else {
2150 result = radv_create_cmd_buffer(device, pool, pAllocateInfo->level,
2151 &pCommandBuffers[i]);
2152 }
2153 if (result != VK_SUCCESS)
2154 break;
2155 }
2156
2157 if (result != VK_SUCCESS)
2158 radv_FreeCommandBuffers(_device, pAllocateInfo->commandPool,
2159 i, pCommandBuffers);
2160
2161 return result;
2162 }
2163
2164 void radv_FreeCommandBuffers(
2165 VkDevice device,
2166 VkCommandPool commandPool,
2167 uint32_t commandBufferCount,
2168 const VkCommandBuffer *pCommandBuffers)
2169 {
2170 for (uint32_t i = 0; i < commandBufferCount; i++) {
2171 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, pCommandBuffers[i]);
2172
2173 if (cmd_buffer) {
2174 if (cmd_buffer->pool) {
2175 list_del(&cmd_buffer->pool_link);
2176 list_addtail(&cmd_buffer->pool_link, &cmd_buffer->pool->free_cmd_buffers);
2177 } else
2178 radv_cmd_buffer_destroy(cmd_buffer);
2179
2180 }
2181 }
2182 }
2183
2184 VkResult radv_ResetCommandBuffer(
2185 VkCommandBuffer commandBuffer,
2186 VkCommandBufferResetFlags flags)
2187 {
2188 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2189 return radv_reset_cmd_buffer(cmd_buffer);
2190 }
2191
2192 static void emit_gfx_buffer_state(struct radv_cmd_buffer *cmd_buffer)
2193 {
2194 struct radv_device *device = cmd_buffer->device;
2195 if (device->gfx_init) {
2196 uint64_t va = radv_buffer_get_va(device->gfx_init);
2197 radv_cs_add_buffer(device->ws, cmd_buffer->cs, device->gfx_init, 8);
2198 radeon_emit(cmd_buffer->cs, PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0));
2199 radeon_emit(cmd_buffer->cs, va);
2200 radeon_emit(cmd_buffer->cs, va >> 32);
2201 radeon_emit(cmd_buffer->cs, device->gfx_init_size_dw & 0xffff);
2202 } else
2203 si_init_config(cmd_buffer);
2204 }
2205
2206 VkResult radv_BeginCommandBuffer(
2207 VkCommandBuffer commandBuffer,
2208 const VkCommandBufferBeginInfo *pBeginInfo)
2209 {
2210 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2211 VkResult result;
2212
2213 result = radv_reset_cmd_buffer(cmd_buffer);
2214 if (result != VK_SUCCESS)
2215 return result;
2216
2217 memset(&cmd_buffer->state, 0, sizeof(cmd_buffer->state));
2218 cmd_buffer->state.last_primitive_reset_en = -1;
2219 cmd_buffer->usage_flags = pBeginInfo->flags;
2220
2221 /* setup initial configuration into command buffer */
2222 if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
2223 switch (cmd_buffer->queue_family_index) {
2224 case RADV_QUEUE_GENERAL:
2225 emit_gfx_buffer_state(cmd_buffer);
2226 break;
2227 case RADV_QUEUE_COMPUTE:
2228 si_init_compute(cmd_buffer);
2229 break;
2230 case RADV_QUEUE_TRANSFER:
2231 default:
2232 break;
2233 }
2234 }
2235
2236 if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
2237 assert(pBeginInfo->pInheritanceInfo);
2238 cmd_buffer->state.framebuffer = radv_framebuffer_from_handle(pBeginInfo->pInheritanceInfo->framebuffer);
2239 cmd_buffer->state.pass = radv_render_pass_from_handle(pBeginInfo->pInheritanceInfo->renderPass);
2240
2241 struct radv_subpass *subpass =
2242 &cmd_buffer->state.pass->subpasses[pBeginInfo->pInheritanceInfo->subpass];
2243
2244 result = radv_cmd_state_setup_attachments(cmd_buffer, cmd_buffer->state.pass, NULL);
2245 if (result != VK_SUCCESS)
2246 return result;
2247
2248 radv_cmd_buffer_set_subpass(cmd_buffer, subpass, false);
2249 }
2250
2251 if (unlikely(cmd_buffer->device->trace_bo))
2252 radv_cmd_buffer_trace_emit(cmd_buffer);
2253
2254 return result;
2255 }
2256
2257 void radv_CmdBindVertexBuffers(
2258 VkCommandBuffer commandBuffer,
2259 uint32_t firstBinding,
2260 uint32_t bindingCount,
2261 const VkBuffer* pBuffers,
2262 const VkDeviceSize* pOffsets)
2263 {
2264 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2265 struct radv_vertex_binding *vb = cmd_buffer->vertex_bindings;
2266 bool changed = false;
2267
2268 /* We have to defer setting up vertex buffer since we need the buffer
2269 * stride from the pipeline. */
2270
2271 assert(firstBinding + bindingCount <= MAX_VBS);
2272 for (uint32_t i = 0; i < bindingCount; i++) {
2273 uint32_t idx = firstBinding + i;
2274
2275 if (!changed &&
2276 (vb[idx].buffer != radv_buffer_from_handle(pBuffers[i]) ||
2277 vb[idx].offset != pOffsets[i])) {
2278 changed = true;
2279 }
2280
2281 vb[idx].buffer = radv_buffer_from_handle(pBuffers[i]);
2282 vb[idx].offset = pOffsets[i];
2283 }
2284
2285 if (!changed) {
2286 /* No state changes. */
2287 return;
2288 }
2289
2290 cmd_buffer->state.vb_dirty = true;
2291 }
2292
2293 void radv_CmdBindIndexBuffer(
2294 VkCommandBuffer commandBuffer,
2295 VkBuffer buffer,
2296 VkDeviceSize offset,
2297 VkIndexType indexType)
2298 {
2299 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2300 RADV_FROM_HANDLE(radv_buffer, index_buffer, buffer);
2301
2302 if (cmd_buffer->state.index_buffer == index_buffer &&
2303 cmd_buffer->state.index_offset == offset &&
2304 cmd_buffer->state.index_type == indexType) {
2305 /* No state changes. */
2306 return;
2307 }
2308
2309 cmd_buffer->state.index_buffer = index_buffer;
2310 cmd_buffer->state.index_offset = offset;
2311 cmd_buffer->state.index_type = indexType; /* vk matches hw */
2312 cmd_buffer->state.index_va = radv_buffer_get_va(index_buffer->bo);
2313 cmd_buffer->state.index_va += index_buffer->offset + offset;
2314
2315 int index_size_shift = cmd_buffer->state.index_type ? 2 : 1;
2316 cmd_buffer->state.max_index_count = (index_buffer->size - offset) >> index_size_shift;
2317 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_INDEX_BUFFER;
2318 radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, index_buffer->bo, 8);
2319 }
2320
2321
2322 static void
2323 radv_bind_descriptor_set(struct radv_cmd_buffer *cmd_buffer,
2324 struct radv_descriptor_set *set, unsigned idx)
2325 {
2326 struct radeon_winsys *ws = cmd_buffer->device->ws;
2327
2328 radv_set_descriptor_set(cmd_buffer, set, idx);
2329 if (!set)
2330 return;
2331
2332 assert(!(set->layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR));
2333
2334 for (unsigned j = 0; j < set->layout->buffer_count; ++j)
2335 if (set->descriptors[j])
2336 radv_cs_add_buffer(ws, cmd_buffer->cs, set->descriptors[j], 7);
2337
2338 if(set->bo)
2339 radv_cs_add_buffer(ws, cmd_buffer->cs, set->bo, 8);
2340 }
2341
2342 void radv_CmdBindDescriptorSets(
2343 VkCommandBuffer commandBuffer,
2344 VkPipelineBindPoint pipelineBindPoint,
2345 VkPipelineLayout _layout,
2346 uint32_t firstSet,
2347 uint32_t descriptorSetCount,
2348 const VkDescriptorSet* pDescriptorSets,
2349 uint32_t dynamicOffsetCount,
2350 const uint32_t* pDynamicOffsets)
2351 {
2352 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2353 RADV_FROM_HANDLE(radv_pipeline_layout, layout, _layout);
2354 unsigned dyn_idx = 0;
2355
2356 for (unsigned i = 0; i < descriptorSetCount; ++i) {
2357 unsigned idx = i + firstSet;
2358 RADV_FROM_HANDLE(radv_descriptor_set, set, pDescriptorSets[i]);
2359 radv_bind_descriptor_set(cmd_buffer, set, idx);
2360
2361 for(unsigned j = 0; j < set->layout->dynamic_offset_count; ++j, ++dyn_idx) {
2362 unsigned idx = j + layout->set[i + firstSet].dynamic_offset_start;
2363 uint32_t *dst = cmd_buffer->dynamic_buffers + idx * 4;
2364 assert(dyn_idx < dynamicOffsetCount);
2365
2366 struct radv_descriptor_range *range = set->dynamic_descriptors + j;
2367 uint64_t va = range->va + pDynamicOffsets[dyn_idx];
2368 dst[0] = va;
2369 dst[1] = S_008F04_BASE_ADDRESS_HI(va >> 32);
2370 dst[2] = range->size;
2371 dst[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
2372 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
2373 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
2374 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
2375 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
2376 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
2377 cmd_buffer->push_constant_stages |=
2378 set->layout->dynamic_shader_stages;
2379 }
2380 }
2381 }
2382
2383 static bool radv_init_push_descriptor_set(struct radv_cmd_buffer *cmd_buffer,
2384 struct radv_descriptor_set *set,
2385 struct radv_descriptor_set_layout *layout)
2386 {
2387 set->size = layout->size;
2388 set->layout = layout;
2389
2390 if (cmd_buffer->push_descriptors.capacity < set->size) {
2391 size_t new_size = MAX2(set->size, 1024);
2392 new_size = MAX2(new_size, 2 * cmd_buffer->push_descriptors.capacity);
2393 new_size = MIN2(new_size, 96 * MAX_PUSH_DESCRIPTORS);
2394
2395 free(set->mapped_ptr);
2396 set->mapped_ptr = malloc(new_size);
2397
2398 if (!set->mapped_ptr) {
2399 cmd_buffer->push_descriptors.capacity = 0;
2400 cmd_buffer->record_result = VK_ERROR_OUT_OF_HOST_MEMORY;
2401 return false;
2402 }
2403
2404 cmd_buffer->push_descriptors.capacity = new_size;
2405 }
2406
2407 return true;
2408 }
2409
2410 void radv_meta_push_descriptor_set(
2411 struct radv_cmd_buffer* cmd_buffer,
2412 VkPipelineBindPoint pipelineBindPoint,
2413 VkPipelineLayout _layout,
2414 uint32_t set,
2415 uint32_t descriptorWriteCount,
2416 const VkWriteDescriptorSet* pDescriptorWrites)
2417 {
2418 RADV_FROM_HANDLE(radv_pipeline_layout, layout, _layout);
2419 struct radv_descriptor_set *push_set = &cmd_buffer->meta_push_descriptors;
2420 unsigned bo_offset;
2421
2422 assert(set == 0);
2423 assert(layout->set[set].layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR);
2424
2425 push_set->size = layout->set[set].layout->size;
2426 push_set->layout = layout->set[set].layout;
2427
2428 if (!radv_cmd_buffer_upload_alloc(cmd_buffer, push_set->size, 32,
2429 &bo_offset,
2430 (void**) &push_set->mapped_ptr))
2431 return;
2432
2433 push_set->va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
2434 push_set->va += bo_offset;
2435
2436 radv_update_descriptor_sets(cmd_buffer->device, cmd_buffer,
2437 radv_descriptor_set_to_handle(push_set),
2438 descriptorWriteCount, pDescriptorWrites, 0, NULL);
2439
2440 radv_set_descriptor_set(cmd_buffer, push_set, set);
2441 }
2442
2443 void radv_CmdPushDescriptorSetKHR(
2444 VkCommandBuffer commandBuffer,
2445 VkPipelineBindPoint pipelineBindPoint,
2446 VkPipelineLayout _layout,
2447 uint32_t set,
2448 uint32_t descriptorWriteCount,
2449 const VkWriteDescriptorSet* pDescriptorWrites)
2450 {
2451 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2452 RADV_FROM_HANDLE(radv_pipeline_layout, layout, _layout);
2453 struct radv_descriptor_set *push_set = &cmd_buffer->push_descriptors.set;
2454
2455 assert(layout->set[set].layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR);
2456
2457 if (!radv_init_push_descriptor_set(cmd_buffer, push_set, layout->set[set].layout))
2458 return;
2459
2460 radv_update_descriptor_sets(cmd_buffer->device, cmd_buffer,
2461 radv_descriptor_set_to_handle(push_set),
2462 descriptorWriteCount, pDescriptorWrites, 0, NULL);
2463
2464 radv_set_descriptor_set(cmd_buffer, push_set, set);
2465 cmd_buffer->state.push_descriptors_dirty = true;
2466 }
2467
2468 void radv_CmdPushDescriptorSetWithTemplateKHR(
2469 VkCommandBuffer commandBuffer,
2470 VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
2471 VkPipelineLayout _layout,
2472 uint32_t set,
2473 const void* pData)
2474 {
2475 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2476 RADV_FROM_HANDLE(radv_pipeline_layout, layout, _layout);
2477 struct radv_descriptor_set *push_set = &cmd_buffer->push_descriptors.set;
2478
2479 assert(layout->set[set].layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR);
2480
2481 if (!radv_init_push_descriptor_set(cmd_buffer, push_set, layout->set[set].layout))
2482 return;
2483
2484 radv_update_descriptor_set_with_template(cmd_buffer->device, cmd_buffer, push_set,
2485 descriptorUpdateTemplate, pData);
2486
2487 radv_set_descriptor_set(cmd_buffer, push_set, set);
2488 cmd_buffer->state.push_descriptors_dirty = true;
2489 }
2490
2491 void radv_CmdPushConstants(VkCommandBuffer commandBuffer,
2492 VkPipelineLayout layout,
2493 VkShaderStageFlags stageFlags,
2494 uint32_t offset,
2495 uint32_t size,
2496 const void* pValues)
2497 {
2498 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2499 memcpy(cmd_buffer->push_constants + offset, pValues, size);
2500 cmd_buffer->push_constant_stages |= stageFlags;
2501 }
2502
2503 VkResult radv_EndCommandBuffer(
2504 VkCommandBuffer commandBuffer)
2505 {
2506 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2507
2508 if (cmd_buffer->queue_family_index != RADV_QUEUE_TRANSFER) {
2509 if (cmd_buffer->device->physical_device->rad_info.chip_class == SI)
2510 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_CS_PARTIAL_FLUSH | RADV_CMD_FLAG_PS_PARTIAL_FLUSH | RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2;
2511 si_emit_cache_flush(cmd_buffer);
2512 }
2513
2514 vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.attachments);
2515
2516 if (!cmd_buffer->device->ws->cs_finalize(cmd_buffer->cs))
2517 return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
2518
2519 return cmd_buffer->record_result;
2520 }
2521
2522 static void
2523 radv_emit_compute_pipeline(struct radv_cmd_buffer *cmd_buffer)
2524 {
2525 struct radv_shader_variant *compute_shader;
2526 struct radv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
2527 uint64_t va;
2528
2529 if (!pipeline || pipeline == cmd_buffer->state.emitted_compute_pipeline)
2530 return;
2531
2532 cmd_buffer->state.emitted_compute_pipeline = pipeline;
2533
2534 compute_shader = pipeline->shaders[MESA_SHADER_COMPUTE];
2535 va = radv_buffer_get_va(compute_shader->bo) + compute_shader->bo_offset;
2536
2537 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
2538 cmd_buffer->cs, 16);
2539
2540 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B830_COMPUTE_PGM_LO, 2);
2541 radeon_emit(cmd_buffer->cs, va >> 8);
2542 radeon_emit(cmd_buffer->cs, va >> 40);
2543
2544 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B848_COMPUTE_PGM_RSRC1, 2);
2545 radeon_emit(cmd_buffer->cs, compute_shader->rsrc1);
2546 radeon_emit(cmd_buffer->cs, compute_shader->rsrc2);
2547
2548
2549 cmd_buffer->compute_scratch_size_needed =
2550 MAX2(cmd_buffer->compute_scratch_size_needed,
2551 pipeline->max_waves * pipeline->scratch_bytes_per_wave);
2552
2553 /* change these once we have scratch support */
2554 radeon_set_sh_reg(cmd_buffer->cs, R_00B860_COMPUTE_TMPRING_SIZE,
2555 S_00B860_WAVES(pipeline->max_waves) |
2556 S_00B860_WAVESIZE(pipeline->scratch_bytes_per_wave >> 10));
2557
2558 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B81C_COMPUTE_NUM_THREAD_X, 3);
2559 radeon_emit(cmd_buffer->cs,
2560 S_00B81C_NUM_THREAD_FULL(compute_shader->info.cs.block_size[0]));
2561 radeon_emit(cmd_buffer->cs,
2562 S_00B81C_NUM_THREAD_FULL(compute_shader->info.cs.block_size[1]));
2563 radeon_emit(cmd_buffer->cs,
2564 S_00B81C_NUM_THREAD_FULL(compute_shader->info.cs.block_size[2]));
2565
2566 assert(cmd_buffer->cs->cdw <= cdw_max);
2567
2568 if (unlikely(cmd_buffer->device->trace_bo))
2569 radv_save_pipeline(cmd_buffer, pipeline, RING_COMPUTE);
2570 }
2571
2572 static void radv_mark_descriptor_sets_dirty(struct radv_cmd_buffer *cmd_buffer)
2573 {
2574 cmd_buffer->state.descriptors_dirty |= cmd_buffer->state.valid_descriptors;
2575 }
2576
2577 void radv_CmdBindPipeline(
2578 VkCommandBuffer commandBuffer,
2579 VkPipelineBindPoint pipelineBindPoint,
2580 VkPipeline _pipeline)
2581 {
2582 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2583 RADV_FROM_HANDLE(radv_pipeline, pipeline, _pipeline);
2584
2585 switch (pipelineBindPoint) {
2586 case VK_PIPELINE_BIND_POINT_COMPUTE:
2587 if (cmd_buffer->state.compute_pipeline == pipeline)
2588 return;
2589 radv_mark_descriptor_sets_dirty(cmd_buffer);
2590
2591 cmd_buffer->state.compute_pipeline = pipeline;
2592 cmd_buffer->push_constant_stages |= VK_SHADER_STAGE_COMPUTE_BIT;
2593 break;
2594 case VK_PIPELINE_BIND_POINT_GRAPHICS:
2595 if (cmd_buffer->state.pipeline == pipeline)
2596 return;
2597 radv_mark_descriptor_sets_dirty(cmd_buffer);
2598
2599 cmd_buffer->state.pipeline = pipeline;
2600 if (!pipeline)
2601 break;
2602
2603 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_PIPELINE;
2604 cmd_buffer->push_constant_stages |= pipeline->active_stages;
2605
2606 radv_bind_dynamic_state(cmd_buffer, &pipeline->dynamic_state);
2607
2608 if (pipeline->graphics.esgs_ring_size > cmd_buffer->esgs_ring_size_needed)
2609 cmd_buffer->esgs_ring_size_needed = pipeline->graphics.esgs_ring_size;
2610 if (pipeline->graphics.gsvs_ring_size > cmd_buffer->gsvs_ring_size_needed)
2611 cmd_buffer->gsvs_ring_size_needed = pipeline->graphics.gsvs_ring_size;
2612
2613 if (radv_pipeline_has_tess(pipeline))
2614 cmd_buffer->tess_rings_needed = true;
2615
2616 if (radv_pipeline_has_gs(pipeline)) {
2617 struct ac_userdata_info *loc = radv_lookup_user_sgpr(cmd_buffer->state.pipeline, MESA_SHADER_GEOMETRY,
2618 AC_UD_SCRATCH_RING_OFFSETS);
2619 if (cmd_buffer->ring_offsets_idx == -1)
2620 cmd_buffer->ring_offsets_idx = loc->sgpr_idx;
2621 else if (loc->sgpr_idx != -1)
2622 assert(loc->sgpr_idx == cmd_buffer->ring_offsets_idx);
2623 }
2624 break;
2625 default:
2626 assert(!"invalid bind point");
2627 break;
2628 }
2629 }
2630
2631 void radv_CmdSetViewport(
2632 VkCommandBuffer commandBuffer,
2633 uint32_t firstViewport,
2634 uint32_t viewportCount,
2635 const VkViewport* pViewports)
2636 {
2637 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2638 MAYBE_UNUSED const uint32_t total_count = firstViewport + viewportCount;
2639
2640 assert(firstViewport < MAX_VIEWPORTS);
2641 assert(total_count >= 1 && total_count <= MAX_VIEWPORTS);
2642
2643 memcpy(cmd_buffer->state.dynamic.viewport.viewports + firstViewport,
2644 pViewports, viewportCount * sizeof(*pViewports));
2645
2646 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_VIEWPORT;
2647 }
2648
2649 void radv_CmdSetScissor(
2650 VkCommandBuffer commandBuffer,
2651 uint32_t firstScissor,
2652 uint32_t scissorCount,
2653 const VkRect2D* pScissors)
2654 {
2655 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2656 MAYBE_UNUSED const uint32_t total_count = firstScissor + scissorCount;
2657
2658 assert(firstScissor < MAX_SCISSORS);
2659 assert(total_count >= 1 && total_count <= MAX_SCISSORS);
2660
2661 memcpy(cmd_buffer->state.dynamic.scissor.scissors + firstScissor,
2662 pScissors, scissorCount * sizeof(*pScissors));
2663 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_SCISSOR;
2664 }
2665
2666 void radv_CmdSetLineWidth(
2667 VkCommandBuffer commandBuffer,
2668 float lineWidth)
2669 {
2670 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2671 cmd_buffer->state.dynamic.line_width = lineWidth;
2672 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_LINE_WIDTH;
2673 }
2674
2675 void radv_CmdSetDepthBias(
2676 VkCommandBuffer commandBuffer,
2677 float depthBiasConstantFactor,
2678 float depthBiasClamp,
2679 float depthBiasSlopeFactor)
2680 {
2681 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2682
2683 cmd_buffer->state.dynamic.depth_bias.bias = depthBiasConstantFactor;
2684 cmd_buffer->state.dynamic.depth_bias.clamp = depthBiasClamp;
2685 cmd_buffer->state.dynamic.depth_bias.slope = depthBiasSlopeFactor;
2686
2687 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS;
2688 }
2689
2690 void radv_CmdSetBlendConstants(
2691 VkCommandBuffer commandBuffer,
2692 const float blendConstants[4])
2693 {
2694 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2695
2696 memcpy(cmd_buffer->state.dynamic.blend_constants,
2697 blendConstants, sizeof(float) * 4);
2698
2699 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS;
2700 }
2701
2702 void radv_CmdSetDepthBounds(
2703 VkCommandBuffer commandBuffer,
2704 float minDepthBounds,
2705 float maxDepthBounds)
2706 {
2707 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2708
2709 cmd_buffer->state.dynamic.depth_bounds.min = minDepthBounds;
2710 cmd_buffer->state.dynamic.depth_bounds.max = maxDepthBounds;
2711
2712 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS;
2713 }
2714
2715 void radv_CmdSetStencilCompareMask(
2716 VkCommandBuffer commandBuffer,
2717 VkStencilFaceFlags faceMask,
2718 uint32_t compareMask)
2719 {
2720 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2721
2722 if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
2723 cmd_buffer->state.dynamic.stencil_compare_mask.front = compareMask;
2724 if (faceMask & VK_STENCIL_FACE_BACK_BIT)
2725 cmd_buffer->state.dynamic.stencil_compare_mask.back = compareMask;
2726
2727 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK;
2728 }
2729
2730 void radv_CmdSetStencilWriteMask(
2731 VkCommandBuffer commandBuffer,
2732 VkStencilFaceFlags faceMask,
2733 uint32_t writeMask)
2734 {
2735 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2736
2737 if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
2738 cmd_buffer->state.dynamic.stencil_write_mask.front = writeMask;
2739 if (faceMask & VK_STENCIL_FACE_BACK_BIT)
2740 cmd_buffer->state.dynamic.stencil_write_mask.back = writeMask;
2741
2742 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK;
2743 }
2744
2745 void radv_CmdSetStencilReference(
2746 VkCommandBuffer commandBuffer,
2747 VkStencilFaceFlags faceMask,
2748 uint32_t reference)
2749 {
2750 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2751
2752 if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
2753 cmd_buffer->state.dynamic.stencil_reference.front = reference;
2754 if (faceMask & VK_STENCIL_FACE_BACK_BIT)
2755 cmd_buffer->state.dynamic.stencil_reference.back = reference;
2756
2757 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE;
2758 }
2759
2760 void radv_CmdExecuteCommands(
2761 VkCommandBuffer commandBuffer,
2762 uint32_t commandBufferCount,
2763 const VkCommandBuffer* pCmdBuffers)
2764 {
2765 RADV_FROM_HANDLE(radv_cmd_buffer, primary, commandBuffer);
2766
2767 assert(commandBufferCount > 0);
2768
2769 /* Emit pending flushes on primary prior to executing secondary */
2770 si_emit_cache_flush(primary);
2771
2772 for (uint32_t i = 0; i < commandBufferCount; i++) {
2773 RADV_FROM_HANDLE(radv_cmd_buffer, secondary, pCmdBuffers[i]);
2774
2775 primary->scratch_size_needed = MAX2(primary->scratch_size_needed,
2776 secondary->scratch_size_needed);
2777 primary->compute_scratch_size_needed = MAX2(primary->compute_scratch_size_needed,
2778 secondary->compute_scratch_size_needed);
2779
2780 if (secondary->esgs_ring_size_needed > primary->esgs_ring_size_needed)
2781 primary->esgs_ring_size_needed = secondary->esgs_ring_size_needed;
2782 if (secondary->gsvs_ring_size_needed > primary->gsvs_ring_size_needed)
2783 primary->gsvs_ring_size_needed = secondary->gsvs_ring_size_needed;
2784 if (secondary->tess_rings_needed)
2785 primary->tess_rings_needed = true;
2786 if (secondary->sample_positions_needed)
2787 primary->sample_positions_needed = true;
2788
2789 if (secondary->ring_offsets_idx != -1) {
2790 if (primary->ring_offsets_idx == -1)
2791 primary->ring_offsets_idx = secondary->ring_offsets_idx;
2792 else
2793 assert(secondary->ring_offsets_idx == primary->ring_offsets_idx);
2794 }
2795 primary->device->ws->cs_execute_secondary(primary->cs, secondary->cs);
2796
2797
2798 /* When the secondary command buffer is compute only we don't
2799 * need to re-emit the current graphics pipeline.
2800 */
2801 if (secondary->state.emitted_pipeline) {
2802 primary->state.emitted_pipeline =
2803 secondary->state.emitted_pipeline;
2804 }
2805
2806 /* When the secondary command buffer is graphics only we don't
2807 * need to re-emit the current compute pipeline.
2808 */
2809 if (secondary->state.emitted_compute_pipeline) {
2810 primary->state.emitted_compute_pipeline =
2811 secondary->state.emitted_compute_pipeline;
2812 }
2813
2814 /* Only re-emit the draw packets when needed. */
2815 if (secondary->state.last_primitive_reset_en != -1) {
2816 primary->state.last_primitive_reset_en =
2817 secondary->state.last_primitive_reset_en;
2818 }
2819
2820 if (secondary->state.last_primitive_reset_index) {
2821 primary->state.last_primitive_reset_index =
2822 secondary->state.last_primitive_reset_index;
2823 }
2824
2825 if (secondary->state.last_ia_multi_vgt_param) {
2826 primary->state.last_ia_multi_vgt_param =
2827 secondary->state.last_ia_multi_vgt_param;
2828 }
2829 }
2830
2831 /* After executing commands from secondary buffers we have to dirty
2832 * some states.
2833 */
2834 primary->state.dirty |= RADV_CMD_DIRTY_PIPELINE |
2835 RADV_CMD_DIRTY_INDEX_BUFFER |
2836 RADV_CMD_DIRTY_DYNAMIC_ALL;
2837 radv_mark_descriptor_sets_dirty(primary);
2838 }
2839
2840 VkResult radv_CreateCommandPool(
2841 VkDevice _device,
2842 const VkCommandPoolCreateInfo* pCreateInfo,
2843 const VkAllocationCallbacks* pAllocator,
2844 VkCommandPool* pCmdPool)
2845 {
2846 RADV_FROM_HANDLE(radv_device, device, _device);
2847 struct radv_cmd_pool *pool;
2848
2849 pool = vk_alloc2(&device->alloc, pAllocator, sizeof(*pool), 8,
2850 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2851 if (pool == NULL)
2852 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2853
2854 if (pAllocator)
2855 pool->alloc = *pAllocator;
2856 else
2857 pool->alloc = device->alloc;
2858
2859 list_inithead(&pool->cmd_buffers);
2860 list_inithead(&pool->free_cmd_buffers);
2861
2862 pool->queue_family_index = pCreateInfo->queueFamilyIndex;
2863
2864 *pCmdPool = radv_cmd_pool_to_handle(pool);
2865
2866 return VK_SUCCESS;
2867
2868 }
2869
2870 void radv_DestroyCommandPool(
2871 VkDevice _device,
2872 VkCommandPool commandPool,
2873 const VkAllocationCallbacks* pAllocator)
2874 {
2875 RADV_FROM_HANDLE(radv_device, device, _device);
2876 RADV_FROM_HANDLE(radv_cmd_pool, pool, commandPool);
2877
2878 if (!pool)
2879 return;
2880
2881 list_for_each_entry_safe(struct radv_cmd_buffer, cmd_buffer,
2882 &pool->cmd_buffers, pool_link) {
2883 radv_cmd_buffer_destroy(cmd_buffer);
2884 }
2885
2886 list_for_each_entry_safe(struct radv_cmd_buffer, cmd_buffer,
2887 &pool->free_cmd_buffers, pool_link) {
2888 radv_cmd_buffer_destroy(cmd_buffer);
2889 }
2890
2891 vk_free2(&device->alloc, pAllocator, pool);
2892 }
2893
2894 VkResult radv_ResetCommandPool(
2895 VkDevice device,
2896 VkCommandPool commandPool,
2897 VkCommandPoolResetFlags flags)
2898 {
2899 RADV_FROM_HANDLE(radv_cmd_pool, pool, commandPool);
2900 VkResult result;
2901
2902 list_for_each_entry(struct radv_cmd_buffer, cmd_buffer,
2903 &pool->cmd_buffers, pool_link) {
2904 result = radv_reset_cmd_buffer(cmd_buffer);
2905 if (result != VK_SUCCESS)
2906 return result;
2907 }
2908
2909 return VK_SUCCESS;
2910 }
2911
2912 void radv_TrimCommandPoolKHR(
2913 VkDevice device,
2914 VkCommandPool commandPool,
2915 VkCommandPoolTrimFlagsKHR flags)
2916 {
2917 RADV_FROM_HANDLE(radv_cmd_pool, pool, commandPool);
2918
2919 if (!pool)
2920 return;
2921
2922 list_for_each_entry_safe(struct radv_cmd_buffer, cmd_buffer,
2923 &pool->free_cmd_buffers, pool_link) {
2924 radv_cmd_buffer_destroy(cmd_buffer);
2925 }
2926 }
2927
2928 void radv_CmdBeginRenderPass(
2929 VkCommandBuffer commandBuffer,
2930 const VkRenderPassBeginInfo* pRenderPassBegin,
2931 VkSubpassContents contents)
2932 {
2933 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2934 RADV_FROM_HANDLE(radv_render_pass, pass, pRenderPassBegin->renderPass);
2935 RADV_FROM_HANDLE(radv_framebuffer, framebuffer, pRenderPassBegin->framebuffer);
2936
2937 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
2938 cmd_buffer->cs, 2048);
2939 MAYBE_UNUSED VkResult result;
2940
2941 cmd_buffer->state.framebuffer = framebuffer;
2942 cmd_buffer->state.pass = pass;
2943 cmd_buffer->state.render_area = pRenderPassBegin->renderArea;
2944
2945 result = radv_cmd_state_setup_attachments(cmd_buffer, pass, pRenderPassBegin);
2946 if (result != VK_SUCCESS)
2947 return;
2948
2949 radv_cmd_buffer_set_subpass(cmd_buffer, pass->subpasses, true);
2950 assert(cmd_buffer->cs->cdw <= cdw_max);
2951
2952 radv_cmd_buffer_clear_subpass(cmd_buffer);
2953 }
2954
2955 void radv_CmdNextSubpass(
2956 VkCommandBuffer commandBuffer,
2957 VkSubpassContents contents)
2958 {
2959 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2960
2961 radv_cmd_buffer_resolve_subpass(cmd_buffer);
2962
2963 radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs,
2964 2048);
2965
2966 radv_cmd_buffer_set_subpass(cmd_buffer, cmd_buffer->state.subpass + 1, true);
2967 radv_cmd_buffer_clear_subpass(cmd_buffer);
2968 }
2969
2970 static void radv_emit_view_index(struct radv_cmd_buffer *cmd_buffer, unsigned index)
2971 {
2972 struct radv_pipeline *pipeline = cmd_buffer->state.pipeline;
2973 for (unsigned stage = 0; stage < MESA_SHADER_STAGES; ++stage) {
2974 if (!pipeline->shaders[stage])
2975 continue;
2976 struct ac_userdata_info *loc = radv_lookup_user_sgpr(pipeline, stage, AC_UD_VIEW_INDEX);
2977 if (loc->sgpr_idx == -1)
2978 continue;
2979 uint32_t base_reg = pipeline->user_data_0[stage];
2980 radeon_set_sh_reg(cmd_buffer->cs, base_reg + loc->sgpr_idx * 4, index);
2981
2982 }
2983 if (pipeline->gs_copy_shader) {
2984 struct ac_userdata_info *loc = &pipeline->gs_copy_shader->info.user_sgprs_locs.shader_data[AC_UD_VIEW_INDEX];
2985 if (loc->sgpr_idx != -1) {
2986 uint32_t base_reg = R_00B130_SPI_SHADER_USER_DATA_VS_0;
2987 radeon_set_sh_reg(cmd_buffer->cs, base_reg + loc->sgpr_idx * 4, index);
2988 }
2989 }
2990 }
2991
2992 static void
2993 radv_cs_emit_draw_packet(struct radv_cmd_buffer *cmd_buffer,
2994 uint32_t vertex_count)
2995 {
2996 radeon_emit(cmd_buffer->cs, PKT3(PKT3_DRAW_INDEX_AUTO, 1, cmd_buffer->state.predicating));
2997 radeon_emit(cmd_buffer->cs, vertex_count);
2998 radeon_emit(cmd_buffer->cs, V_0287F0_DI_SRC_SEL_AUTO_INDEX |
2999 S_0287F0_USE_OPAQUE(0));
3000 }
3001
3002 static void
3003 radv_cs_emit_draw_indexed_packet(struct radv_cmd_buffer *cmd_buffer,
3004 uint64_t index_va,
3005 uint32_t index_count)
3006 {
3007 radeon_emit(cmd_buffer->cs, PKT3(PKT3_DRAW_INDEX_2, 4, false));
3008 radeon_emit(cmd_buffer->cs, cmd_buffer->state.max_index_count);
3009 radeon_emit(cmd_buffer->cs, index_va);
3010 radeon_emit(cmd_buffer->cs, index_va >> 32);
3011 radeon_emit(cmd_buffer->cs, index_count);
3012 radeon_emit(cmd_buffer->cs, V_0287F0_DI_SRC_SEL_DMA);
3013 }
3014
3015 static void
3016 radv_cs_emit_indirect_draw_packet(struct radv_cmd_buffer *cmd_buffer,
3017 bool indexed,
3018 uint32_t draw_count,
3019 uint64_t count_va,
3020 uint32_t stride)
3021 {
3022 struct radeon_winsys_cs *cs = cmd_buffer->cs;
3023 unsigned di_src_sel = indexed ? V_0287F0_DI_SRC_SEL_DMA
3024 : V_0287F0_DI_SRC_SEL_AUTO_INDEX;
3025 bool draw_id_enable = radv_get_vertex_shader(cmd_buffer->state.pipeline)->info.info.vs.needs_draw_id;
3026 uint32_t base_reg = cmd_buffer->state.pipeline->graphics.vtx_base_sgpr;
3027 assert(base_reg);
3028
3029 if (draw_count == 1 && !count_va && !draw_id_enable) {
3030 radeon_emit(cs, PKT3(indexed ? PKT3_DRAW_INDEX_INDIRECT :
3031 PKT3_DRAW_INDIRECT, 3, false));
3032 radeon_emit(cs, 0);
3033 radeon_emit(cs, (base_reg - SI_SH_REG_OFFSET) >> 2);
3034 radeon_emit(cs, ((base_reg + 4) - SI_SH_REG_OFFSET) >> 2);
3035 radeon_emit(cs, di_src_sel);
3036 } else {
3037 radeon_emit(cs, PKT3(indexed ? PKT3_DRAW_INDEX_INDIRECT_MULTI :
3038 PKT3_DRAW_INDIRECT_MULTI,
3039 8, false));
3040 radeon_emit(cs, 0);
3041 radeon_emit(cs, (base_reg - SI_SH_REG_OFFSET) >> 2);
3042 radeon_emit(cs, ((base_reg + 4) - SI_SH_REG_OFFSET) >> 2);
3043 radeon_emit(cs, (((base_reg + 8) - SI_SH_REG_OFFSET) >> 2) |
3044 S_2C3_DRAW_INDEX_ENABLE(draw_id_enable) |
3045 S_2C3_COUNT_INDIRECT_ENABLE(!!count_va));
3046 radeon_emit(cs, draw_count); /* count */
3047 radeon_emit(cs, count_va); /* count_addr */
3048 radeon_emit(cs, count_va >> 32);
3049 radeon_emit(cs, stride); /* stride */
3050 radeon_emit(cs, di_src_sel);
3051 }
3052 }
3053
3054 struct radv_draw_info {
3055 /**
3056 * Number of vertices.
3057 */
3058 uint32_t count;
3059
3060 /**
3061 * Index of the first vertex.
3062 */
3063 int32_t vertex_offset;
3064
3065 /**
3066 * First instance id.
3067 */
3068 uint32_t first_instance;
3069
3070 /**
3071 * Number of instances.
3072 */
3073 uint32_t instance_count;
3074
3075 /**
3076 * First index (indexed draws only).
3077 */
3078 uint32_t first_index;
3079
3080 /**
3081 * Whether it's an indexed draw.
3082 */
3083 bool indexed;
3084
3085 /**
3086 * Indirect draw parameters resource.
3087 */
3088 struct radv_buffer *indirect;
3089 uint64_t indirect_offset;
3090 uint32_t stride;
3091
3092 /**
3093 * Draw count parameters resource.
3094 */
3095 struct radv_buffer *count_buffer;
3096 uint64_t count_buffer_offset;
3097 };
3098
3099 static void
3100 radv_emit_draw_packets(struct radv_cmd_buffer *cmd_buffer,
3101 const struct radv_draw_info *info)
3102 {
3103 struct radv_cmd_state *state = &cmd_buffer->state;
3104 struct radeon_winsys *ws = cmd_buffer->device->ws;
3105 struct radeon_winsys_cs *cs = cmd_buffer->cs;
3106
3107 if (info->indirect) {
3108 uint64_t va = radv_buffer_get_va(info->indirect->bo);
3109 uint64_t count_va = 0;
3110
3111 va += info->indirect->offset + info->indirect_offset;
3112
3113 radv_cs_add_buffer(ws, cs, info->indirect->bo, 8);
3114
3115 radeon_emit(cs, PKT3(PKT3_SET_BASE, 2, 0));
3116 radeon_emit(cs, 1);
3117 radeon_emit(cs, va);
3118 radeon_emit(cs, va >> 32);
3119
3120 if (info->count_buffer) {
3121 count_va = radv_buffer_get_va(info->count_buffer->bo);
3122 count_va += info->count_buffer->offset +
3123 info->count_buffer_offset;
3124
3125 radv_cs_add_buffer(ws, cs, info->count_buffer->bo, 8);
3126 }
3127
3128 if (!state->subpass->view_mask) {
3129 radv_cs_emit_indirect_draw_packet(cmd_buffer,
3130 info->indexed,
3131 info->count,
3132 count_va,
3133 info->stride);
3134 } else {
3135 unsigned i;
3136 for_each_bit(i, state->subpass->view_mask) {
3137 radv_emit_view_index(cmd_buffer, i);
3138
3139 radv_cs_emit_indirect_draw_packet(cmd_buffer,
3140 info->indexed,
3141 info->count,
3142 count_va,
3143 info->stride);
3144 }
3145 }
3146 } else {
3147 assert(state->pipeline->graphics.vtx_base_sgpr);
3148 radeon_set_sh_reg_seq(cs, state->pipeline->graphics.vtx_base_sgpr,
3149 state->pipeline->graphics.vtx_emit_num);
3150 radeon_emit(cs, info->vertex_offset);
3151 radeon_emit(cs, info->first_instance);
3152 if (state->pipeline->graphics.vtx_emit_num == 3)
3153 radeon_emit(cs, 0);
3154
3155 radeon_emit(cs, PKT3(PKT3_NUM_INSTANCES, 0, state->predicating));
3156 radeon_emit(cs, info->instance_count);
3157
3158 if (info->indexed) {
3159 int index_size = state->index_type ? 4 : 2;
3160 uint64_t index_va;
3161
3162 index_va = state->index_va;
3163 index_va += info->first_index * index_size;
3164
3165 if (!state->subpass->view_mask) {
3166 radv_cs_emit_draw_indexed_packet(cmd_buffer,
3167 index_va,
3168 info->count);
3169 } else {
3170 unsigned i;
3171 for_each_bit(i, state->subpass->view_mask) {
3172 radv_emit_view_index(cmd_buffer, i);
3173
3174 radv_cs_emit_draw_indexed_packet(cmd_buffer,
3175 index_va,
3176 info->count);
3177 }
3178 }
3179 } else {
3180 if (!state->subpass->view_mask) {
3181 radv_cs_emit_draw_packet(cmd_buffer, info->count);
3182 } else {
3183 unsigned i;
3184 for_each_bit(i, state->subpass->view_mask) {
3185 radv_emit_view_index(cmd_buffer, i);
3186
3187 radv_cs_emit_draw_packet(cmd_buffer,
3188 info->count);
3189 }
3190 }
3191 }
3192 }
3193 }
3194
3195 static void
3196 radv_emit_all_graphics_states(struct radv_cmd_buffer *cmd_buffer,
3197 const struct radv_draw_info *info)
3198 {
3199 if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_PIPELINE)
3200 radv_emit_graphics_pipeline(cmd_buffer);
3201
3202 if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_FRAMEBUFFER)
3203 radv_emit_framebuffer_state(cmd_buffer);
3204
3205 if (info->indexed) {
3206 if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_INDEX_BUFFER)
3207 radv_emit_index_buffer(cmd_buffer);
3208 } else {
3209 /* On CI and later, non-indexed draws overwrite VGT_INDEX_TYPE,
3210 * so the state must be re-emitted before the next indexed
3211 * draw.
3212 */
3213 if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK)
3214 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_INDEX_BUFFER;
3215 }
3216
3217 radv_cmd_buffer_flush_dynamic_state(cmd_buffer);
3218
3219 radv_emit_draw_registers(cmd_buffer, info->indexed,
3220 info->instance_count > 1, info->indirect,
3221 info->indirect ? 0 : info->count);
3222 }
3223
3224 static void
3225 radv_draw(struct radv_cmd_buffer *cmd_buffer,
3226 const struct radv_draw_info *info)
3227 {
3228 bool pipeline_is_dirty =
3229 (cmd_buffer->state.dirty & RADV_CMD_DIRTY_PIPELINE) &&
3230 cmd_buffer->state.pipeline &&
3231 cmd_buffer->state.pipeline != cmd_buffer->state.emitted_pipeline;
3232
3233 MAYBE_UNUSED unsigned cdw_max =
3234 radeon_check_space(cmd_buffer->device->ws,
3235 cmd_buffer->cs, 4096);
3236
3237 /* Use optimal packet order based on whether we need to sync the
3238 * pipeline.
3239 */
3240 if (cmd_buffer->state.flush_bits & (RADV_CMD_FLAG_FLUSH_AND_INV_CB |
3241 RADV_CMD_FLAG_FLUSH_AND_INV_DB |
3242 RADV_CMD_FLAG_PS_PARTIAL_FLUSH |
3243 RADV_CMD_FLAG_CS_PARTIAL_FLUSH)) {
3244 /* If we have to wait for idle, set all states first, so that
3245 * all SET packets are processed in parallel with previous draw
3246 * calls. Then upload descriptors, set shader pointers, and
3247 * draw, and prefetch at the end. This ensures that the time
3248 * the CUs are idle is very short. (there are only SET_SH
3249 * packets between the wait and the draw)
3250 */
3251 radv_emit_all_graphics_states(cmd_buffer, info);
3252 si_emit_cache_flush(cmd_buffer);
3253 /* <-- CUs are idle here --> */
3254
3255 if (!radv_upload_graphics_shader_descriptors(cmd_buffer, pipeline_is_dirty))
3256 return;
3257
3258 radv_emit_draw_packets(cmd_buffer, info);
3259 /* <-- CUs are busy here --> */
3260
3261 /* Start prefetches after the draw has been started. Both will
3262 * run in parallel, but starting the draw first is more
3263 * important.
3264 */
3265 if (pipeline_is_dirty) {
3266 radv_emit_prefetch(cmd_buffer,
3267 cmd_buffer->state.pipeline);
3268 }
3269 } else {
3270 /* If we don't wait for idle, start prefetches first, then set
3271 * states, and draw at the end.
3272 */
3273 si_emit_cache_flush(cmd_buffer);
3274
3275 if (pipeline_is_dirty) {
3276 radv_emit_prefetch(cmd_buffer,
3277 cmd_buffer->state.pipeline);
3278 }
3279
3280 if (!radv_upload_graphics_shader_descriptors(cmd_buffer, pipeline_is_dirty))
3281 return;
3282
3283 radv_emit_all_graphics_states(cmd_buffer, info);
3284 radv_emit_draw_packets(cmd_buffer, info);
3285 }
3286
3287 assert(cmd_buffer->cs->cdw <= cdw_max);
3288 radv_cmd_buffer_after_draw(cmd_buffer);
3289 }
3290
3291 void radv_CmdDraw(
3292 VkCommandBuffer commandBuffer,
3293 uint32_t vertexCount,
3294 uint32_t instanceCount,
3295 uint32_t firstVertex,
3296 uint32_t firstInstance)
3297 {
3298 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3299 struct radv_draw_info info = {};
3300
3301 info.count = vertexCount;
3302 info.instance_count = instanceCount;
3303 info.first_instance = firstInstance;
3304 info.vertex_offset = firstVertex;
3305
3306 radv_draw(cmd_buffer, &info);
3307 }
3308
3309 void radv_CmdDrawIndexed(
3310 VkCommandBuffer commandBuffer,
3311 uint32_t indexCount,
3312 uint32_t instanceCount,
3313 uint32_t firstIndex,
3314 int32_t vertexOffset,
3315 uint32_t firstInstance)
3316 {
3317 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3318 struct radv_draw_info info = {};
3319
3320 info.indexed = true;
3321 info.count = indexCount;
3322 info.instance_count = instanceCount;
3323 info.first_index = firstIndex;
3324 info.vertex_offset = vertexOffset;
3325 info.first_instance = firstInstance;
3326
3327 radv_draw(cmd_buffer, &info);
3328 }
3329
3330 void radv_CmdDrawIndirect(
3331 VkCommandBuffer commandBuffer,
3332 VkBuffer _buffer,
3333 VkDeviceSize offset,
3334 uint32_t drawCount,
3335 uint32_t stride)
3336 {
3337 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3338 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
3339 struct radv_draw_info info = {};
3340
3341 info.count = drawCount;
3342 info.indirect = buffer;
3343 info.indirect_offset = offset;
3344 info.stride = stride;
3345
3346 radv_draw(cmd_buffer, &info);
3347 }
3348
3349 void radv_CmdDrawIndexedIndirect(
3350 VkCommandBuffer commandBuffer,
3351 VkBuffer _buffer,
3352 VkDeviceSize offset,
3353 uint32_t drawCount,
3354 uint32_t stride)
3355 {
3356 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3357 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
3358 struct radv_draw_info info = {};
3359
3360 info.indexed = true;
3361 info.count = drawCount;
3362 info.indirect = buffer;
3363 info.indirect_offset = offset;
3364 info.stride = stride;
3365
3366 radv_draw(cmd_buffer, &info);
3367 }
3368
3369 void radv_CmdDrawIndirectCountAMD(
3370 VkCommandBuffer commandBuffer,
3371 VkBuffer _buffer,
3372 VkDeviceSize offset,
3373 VkBuffer _countBuffer,
3374 VkDeviceSize countBufferOffset,
3375 uint32_t maxDrawCount,
3376 uint32_t stride)
3377 {
3378 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3379 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
3380 RADV_FROM_HANDLE(radv_buffer, count_buffer, _countBuffer);
3381 struct radv_draw_info info = {};
3382
3383 info.count = maxDrawCount;
3384 info.indirect = buffer;
3385 info.indirect_offset = offset;
3386 info.count_buffer = count_buffer;
3387 info.count_buffer_offset = countBufferOffset;
3388 info.stride = stride;
3389
3390 radv_draw(cmd_buffer, &info);
3391 }
3392
3393 void radv_CmdDrawIndexedIndirectCountAMD(
3394 VkCommandBuffer commandBuffer,
3395 VkBuffer _buffer,
3396 VkDeviceSize offset,
3397 VkBuffer _countBuffer,
3398 VkDeviceSize countBufferOffset,
3399 uint32_t maxDrawCount,
3400 uint32_t stride)
3401 {
3402 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3403 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
3404 RADV_FROM_HANDLE(radv_buffer, count_buffer, _countBuffer);
3405 struct radv_draw_info info = {};
3406
3407 info.indexed = true;
3408 info.count = maxDrawCount;
3409 info.indirect = buffer;
3410 info.indirect_offset = offset;
3411 info.count_buffer = count_buffer;
3412 info.count_buffer_offset = countBufferOffset;
3413 info.stride = stride;
3414
3415 radv_draw(cmd_buffer, &info);
3416 }
3417
3418 struct radv_dispatch_info {
3419 /**
3420 * Determine the layout of the grid (in block units) to be used.
3421 */
3422 uint32_t blocks[3];
3423
3424 /**
3425 * Whether it's an unaligned compute dispatch.
3426 */
3427 bool unaligned;
3428
3429 /**
3430 * Indirect compute parameters resource.
3431 */
3432 struct radv_buffer *indirect;
3433 uint64_t indirect_offset;
3434 };
3435
3436 static void
3437 radv_emit_dispatch_packets(struct radv_cmd_buffer *cmd_buffer,
3438 const struct radv_dispatch_info *info)
3439 {
3440 struct radv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
3441 struct radv_shader_variant *compute_shader = pipeline->shaders[MESA_SHADER_COMPUTE];
3442 struct radeon_winsys *ws = cmd_buffer->device->ws;
3443 struct radeon_winsys_cs *cs = cmd_buffer->cs;
3444 struct ac_userdata_info *loc;
3445 unsigned dispatch_initiator;
3446 uint8_t grid_used;
3447
3448 grid_used = compute_shader->info.info.cs.grid_components_used;
3449
3450 loc = radv_lookup_user_sgpr(pipeline, MESA_SHADER_COMPUTE,
3451 AC_UD_CS_GRID_SIZE);
3452
3453 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(ws, cs, 25);
3454
3455 dispatch_initiator = S_00B800_COMPUTE_SHADER_EN(1) |
3456 S_00B800_FORCE_START_AT_000(1);
3457
3458 if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) {
3459 /* If the KMD allows it (there is a KMD hw register for it),
3460 * allow launching waves out-of-order.
3461 */
3462 dispatch_initiator |= S_00B800_ORDER_MODE(1);
3463 }
3464
3465 if (info->indirect) {
3466 uint64_t va = radv_buffer_get_va(info->indirect->bo);
3467
3468 va += info->indirect->offset + info->indirect_offset;
3469
3470 radv_cs_add_buffer(ws, cs, info->indirect->bo, 8);
3471
3472 if (loc->sgpr_idx != -1) {
3473 for (unsigned i = 0; i < grid_used; ++i) {
3474 radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
3475 radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
3476 COPY_DATA_DST_SEL(COPY_DATA_REG));
3477 radeon_emit(cs, (va + 4 * i));
3478 radeon_emit(cs, (va + 4 * i) >> 32);
3479 radeon_emit(cs, ((R_00B900_COMPUTE_USER_DATA_0
3480 + loc->sgpr_idx * 4) >> 2) + i);
3481 radeon_emit(cs, 0);
3482 }
3483 }
3484
3485 if (radv_cmd_buffer_uses_mec(cmd_buffer)) {
3486 radeon_emit(cs, PKT3(PKT3_DISPATCH_INDIRECT, 2, 0) |
3487 PKT3_SHADER_TYPE_S(1));
3488 radeon_emit(cs, va);
3489 radeon_emit(cs, va >> 32);
3490 radeon_emit(cs, dispatch_initiator);
3491 } else {
3492 radeon_emit(cs, PKT3(PKT3_SET_BASE, 2, 0) |
3493 PKT3_SHADER_TYPE_S(1));
3494 radeon_emit(cs, 1);
3495 radeon_emit(cs, va);
3496 radeon_emit(cs, va >> 32);
3497
3498 radeon_emit(cs, PKT3(PKT3_DISPATCH_INDIRECT, 1, 0) |
3499 PKT3_SHADER_TYPE_S(1));
3500 radeon_emit(cs, 0);
3501 radeon_emit(cs, dispatch_initiator);
3502 }
3503 } else {
3504 unsigned blocks[3] = { info->blocks[0], info->blocks[1], info->blocks[2] };
3505
3506 if (info->unaligned) {
3507 unsigned *cs_block_size = compute_shader->info.cs.block_size;
3508 unsigned remainder[3];
3509
3510 /* If aligned, these should be an entire block size,
3511 * not 0.
3512 */
3513 remainder[0] = blocks[0] + cs_block_size[0] -
3514 align_u32_npot(blocks[0], cs_block_size[0]);
3515 remainder[1] = blocks[1] + cs_block_size[1] -
3516 align_u32_npot(blocks[1], cs_block_size[1]);
3517 remainder[2] = blocks[2] + cs_block_size[2] -
3518 align_u32_npot(blocks[2], cs_block_size[2]);
3519
3520 blocks[0] = round_up_u32(blocks[0], cs_block_size[0]);
3521 blocks[1] = round_up_u32(blocks[1], cs_block_size[1]);
3522 blocks[2] = round_up_u32(blocks[2], cs_block_size[2]);
3523
3524 radeon_set_sh_reg_seq(cs, R_00B81C_COMPUTE_NUM_THREAD_X, 3);
3525 radeon_emit(cs,
3526 S_00B81C_NUM_THREAD_FULL(cs_block_size[0]) |
3527 S_00B81C_NUM_THREAD_PARTIAL(remainder[0]));
3528 radeon_emit(cs,
3529 S_00B81C_NUM_THREAD_FULL(cs_block_size[1]) |
3530 S_00B81C_NUM_THREAD_PARTIAL(remainder[1]));
3531 radeon_emit(cs,
3532 S_00B81C_NUM_THREAD_FULL(cs_block_size[2]) |
3533 S_00B81C_NUM_THREAD_PARTIAL(remainder[2]));
3534
3535 dispatch_initiator |= S_00B800_PARTIAL_TG_EN(1);
3536 }
3537
3538 if (loc->sgpr_idx != -1) {
3539 assert(!loc->indirect);
3540 assert(loc->num_sgprs == grid_used);
3541
3542 radeon_set_sh_reg_seq(cs, R_00B900_COMPUTE_USER_DATA_0 +
3543 loc->sgpr_idx * 4, grid_used);
3544 radeon_emit(cs, blocks[0]);
3545 if (grid_used > 1)
3546 radeon_emit(cs, blocks[1]);
3547 if (grid_used > 2)
3548 radeon_emit(cs, blocks[2]);
3549 }
3550
3551 radeon_emit(cs, PKT3(PKT3_DISPATCH_DIRECT, 3, 0) |
3552 PKT3_SHADER_TYPE_S(1));
3553 radeon_emit(cs, blocks[0]);
3554 radeon_emit(cs, blocks[1]);
3555 radeon_emit(cs, blocks[2]);
3556 radeon_emit(cs, dispatch_initiator);
3557 }
3558
3559 assert(cmd_buffer->cs->cdw <= cdw_max);
3560 }
3561
3562 static void
3563 radv_upload_compute_shader_descriptors(struct radv_cmd_buffer *cmd_buffer)
3564 {
3565 radv_flush_descriptors(cmd_buffer, VK_SHADER_STAGE_COMPUTE_BIT);
3566 radv_flush_constants(cmd_buffer, cmd_buffer->state.compute_pipeline,
3567 VK_SHADER_STAGE_COMPUTE_BIT);
3568 }
3569
3570 static void
3571 radv_dispatch(struct radv_cmd_buffer *cmd_buffer,
3572 const struct radv_dispatch_info *info)
3573 {
3574 struct radv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
3575 bool pipeline_is_dirty = pipeline &&
3576 pipeline != cmd_buffer->state.emitted_compute_pipeline;
3577
3578 if (cmd_buffer->state.flush_bits & (RADV_CMD_FLAG_FLUSH_AND_INV_CB |
3579 RADV_CMD_FLAG_FLUSH_AND_INV_DB |
3580 RADV_CMD_FLAG_PS_PARTIAL_FLUSH |
3581 RADV_CMD_FLAG_CS_PARTIAL_FLUSH)) {
3582 /* If we have to wait for idle, set all states first, so that
3583 * all SET packets are processed in parallel with previous draw
3584 * calls. Then upload descriptors, set shader pointers, and
3585 * dispatch, and prefetch at the end. This ensures that the
3586 * time the CUs are idle is very short. (there are only SET_SH
3587 * packets between the wait and the draw)
3588 */
3589 radv_emit_compute_pipeline(cmd_buffer);
3590 si_emit_cache_flush(cmd_buffer);
3591 /* <-- CUs are idle here --> */
3592
3593 radv_upload_compute_shader_descriptors(cmd_buffer);
3594
3595 radv_emit_dispatch_packets(cmd_buffer, info);
3596 /* <-- CUs are busy here --> */
3597
3598 /* Start prefetches after the dispatch has been started. Both
3599 * will run in parallel, but starting the dispatch first is
3600 * more important.
3601 */
3602 if (pipeline_is_dirty) {
3603 radv_emit_shader_prefetch(cmd_buffer,
3604 pipeline->shaders[MESA_SHADER_COMPUTE]);
3605 }
3606 } else {
3607 /* If we don't wait for idle, start prefetches first, then set
3608 * states, and dispatch at the end.
3609 */
3610 si_emit_cache_flush(cmd_buffer);
3611
3612 if (pipeline_is_dirty) {
3613 radv_emit_shader_prefetch(cmd_buffer,
3614 pipeline->shaders[MESA_SHADER_COMPUTE]);
3615 }
3616
3617 radv_upload_compute_shader_descriptors(cmd_buffer);
3618
3619 radv_emit_compute_pipeline(cmd_buffer);
3620 radv_emit_dispatch_packets(cmd_buffer, info);
3621 }
3622
3623 radv_cmd_buffer_after_draw(cmd_buffer);
3624 }
3625
3626 void radv_CmdDispatch(
3627 VkCommandBuffer commandBuffer,
3628 uint32_t x,
3629 uint32_t y,
3630 uint32_t z)
3631 {
3632 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3633 struct radv_dispatch_info info = {};
3634
3635 info.blocks[0] = x;
3636 info.blocks[1] = y;
3637 info.blocks[2] = z;
3638
3639 radv_dispatch(cmd_buffer, &info);
3640 }
3641
3642 void radv_CmdDispatchIndirect(
3643 VkCommandBuffer commandBuffer,
3644 VkBuffer _buffer,
3645 VkDeviceSize offset)
3646 {
3647 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3648 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
3649 struct radv_dispatch_info info = {};
3650
3651 info.indirect = buffer;
3652 info.indirect_offset = offset;
3653
3654 radv_dispatch(cmd_buffer, &info);
3655 }
3656
3657 void radv_unaligned_dispatch(
3658 struct radv_cmd_buffer *cmd_buffer,
3659 uint32_t x,
3660 uint32_t y,
3661 uint32_t z)
3662 {
3663 struct radv_dispatch_info info = {};
3664
3665 info.blocks[0] = x;
3666 info.blocks[1] = y;
3667 info.blocks[2] = z;
3668 info.unaligned = 1;
3669
3670 radv_dispatch(cmd_buffer, &info);
3671 }
3672
3673 void radv_CmdEndRenderPass(
3674 VkCommandBuffer commandBuffer)
3675 {
3676 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3677
3678 radv_subpass_barrier(cmd_buffer, &cmd_buffer->state.pass->end_barrier);
3679
3680 radv_cmd_buffer_resolve_subpass(cmd_buffer);
3681
3682 for (unsigned i = 0; i < cmd_buffer->state.framebuffer->attachment_count; ++i) {
3683 VkImageLayout layout = cmd_buffer->state.pass->attachments[i].final_layout;
3684 radv_handle_subpass_image_transition(cmd_buffer,
3685 (VkAttachmentReference){i, layout});
3686 }
3687
3688 vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.attachments);
3689
3690 cmd_buffer->state.pass = NULL;
3691 cmd_buffer->state.subpass = NULL;
3692 cmd_buffer->state.attachments = NULL;
3693 cmd_buffer->state.framebuffer = NULL;
3694 }
3695
3696 /*
3697 * For HTILE we have the following interesting clear words:
3698 * 0x0000030f: Uncompressed.
3699 * 0xfffffff0: Clear depth to 1.0
3700 * 0x00000000: Clear depth to 0.0
3701 */
3702 static void radv_initialize_htile(struct radv_cmd_buffer *cmd_buffer,
3703 struct radv_image *image,
3704 const VkImageSubresourceRange *range,
3705 uint32_t clear_word)
3706 {
3707 assert(range->baseMipLevel == 0);
3708 assert(range->levelCount == 1 || range->levelCount == VK_REMAINING_ARRAY_LAYERS);
3709 unsigned layer_count = radv_get_layerCount(image, range);
3710 uint64_t size = image->surface.htile_slice_size * layer_count;
3711 uint64_t offset = image->offset + image->htile_offset +
3712 image->surface.htile_slice_size * range->baseArrayLayer;
3713 struct radv_cmd_state *state = &cmd_buffer->state;
3714
3715 state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB |
3716 RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
3717
3718 state->flush_bits |= radv_fill_buffer(cmd_buffer, image->bo, offset,
3719 size, clear_word);
3720
3721 state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
3722 }
3723
3724 static void radv_handle_depth_image_transition(struct radv_cmd_buffer *cmd_buffer,
3725 struct radv_image *image,
3726 VkImageLayout src_layout,
3727 VkImageLayout dst_layout,
3728 unsigned src_queue_mask,
3729 unsigned dst_queue_mask,
3730 const VkImageSubresourceRange *range,
3731 VkImageAspectFlags pending_clears)
3732 {
3733 if (dst_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL &&
3734 (pending_clears & vk_format_aspects(image->vk_format)) == vk_format_aspects(image->vk_format) &&
3735 cmd_buffer->state.render_area.offset.x == 0 && cmd_buffer->state.render_area.offset.y == 0 &&
3736 cmd_buffer->state.render_area.extent.width == image->info.width &&
3737 cmd_buffer->state.render_area.extent.height == image->info.height) {
3738 /* The clear will initialize htile. */
3739 return;
3740 } else if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED &&
3741 radv_layout_has_htile(image, dst_layout, dst_queue_mask)) {
3742 /* TODO: merge with the clear if applicable */
3743 radv_initialize_htile(cmd_buffer, image, range, 0);
3744 } else if (!radv_layout_is_htile_compressed(image, src_layout, src_queue_mask) &&
3745 radv_layout_is_htile_compressed(image, dst_layout, dst_queue_mask)) {
3746 radv_initialize_htile(cmd_buffer, image, range, 0xffffffff);
3747 } else if (radv_layout_is_htile_compressed(image, src_layout, src_queue_mask) &&
3748 !radv_layout_is_htile_compressed(image, dst_layout, dst_queue_mask)) {
3749 VkImageSubresourceRange local_range = *range;
3750 local_range.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
3751 local_range.baseMipLevel = 0;
3752 local_range.levelCount = 1;
3753
3754 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB |
3755 RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
3756
3757 radv_decompress_depth_image_inplace(cmd_buffer, image, &local_range);
3758
3759 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB |
3760 RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
3761 }
3762 }
3763
3764 void radv_initialise_cmask(struct radv_cmd_buffer *cmd_buffer,
3765 struct radv_image *image, uint32_t value)
3766 {
3767 struct radv_cmd_state *state = &cmd_buffer->state;
3768
3769 state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
3770 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
3771
3772 state->flush_bits |= radv_fill_buffer(cmd_buffer, image->bo,
3773 image->offset + image->cmask.offset,
3774 image->cmask.size, value);
3775
3776 state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
3777 }
3778
3779 static void radv_handle_cmask_image_transition(struct radv_cmd_buffer *cmd_buffer,
3780 struct radv_image *image,
3781 VkImageLayout src_layout,
3782 VkImageLayout dst_layout,
3783 unsigned src_queue_mask,
3784 unsigned dst_queue_mask,
3785 const VkImageSubresourceRange *range)
3786 {
3787 if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED) {
3788 if (image->fmask.size)
3789 radv_initialise_cmask(cmd_buffer, image, 0xccccccccu);
3790 else
3791 radv_initialise_cmask(cmd_buffer, image, 0xffffffffu);
3792 } else if (radv_layout_can_fast_clear(image, src_layout, src_queue_mask) &&
3793 !radv_layout_can_fast_clear(image, dst_layout, dst_queue_mask)) {
3794 radv_fast_clear_flush_image_inplace(cmd_buffer, image, range);
3795 }
3796 }
3797
3798 void radv_initialize_dcc(struct radv_cmd_buffer *cmd_buffer,
3799 struct radv_image *image, uint32_t value)
3800 {
3801 struct radv_cmd_state *state = &cmd_buffer->state;
3802
3803 state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
3804 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
3805
3806 state->flush_bits |= radv_fill_buffer(cmd_buffer, image->bo,
3807 image->offset + image->dcc_offset,
3808 image->surface.dcc_size, value);
3809
3810 state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
3811 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
3812 }
3813
3814 static void radv_handle_dcc_image_transition(struct radv_cmd_buffer *cmd_buffer,
3815 struct radv_image *image,
3816 VkImageLayout src_layout,
3817 VkImageLayout dst_layout,
3818 unsigned src_queue_mask,
3819 unsigned dst_queue_mask,
3820 const VkImageSubresourceRange *range)
3821 {
3822 if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED) {
3823 radv_initialize_dcc(cmd_buffer, image, 0x20202020u);
3824 } else if (radv_layout_can_fast_clear(image, src_layout, src_queue_mask) &&
3825 !radv_layout_can_fast_clear(image, dst_layout, dst_queue_mask)) {
3826 radv_fast_clear_flush_image_inplace(cmd_buffer, image, range);
3827 }
3828 }
3829
3830 static void radv_handle_image_transition(struct radv_cmd_buffer *cmd_buffer,
3831 struct radv_image *image,
3832 VkImageLayout src_layout,
3833 VkImageLayout dst_layout,
3834 uint32_t src_family,
3835 uint32_t dst_family,
3836 const VkImageSubresourceRange *range,
3837 VkImageAspectFlags pending_clears)
3838 {
3839 if (image->exclusive && src_family != dst_family) {
3840 /* This is an acquire or a release operation and there will be
3841 * a corresponding release/acquire. Do the transition in the
3842 * most flexible queue. */
3843
3844 assert(src_family == cmd_buffer->queue_family_index ||
3845 dst_family == cmd_buffer->queue_family_index);
3846
3847 if (cmd_buffer->queue_family_index == RADV_QUEUE_TRANSFER)
3848 return;
3849
3850 if (cmd_buffer->queue_family_index == RADV_QUEUE_COMPUTE &&
3851 (src_family == RADV_QUEUE_GENERAL ||
3852 dst_family == RADV_QUEUE_GENERAL))
3853 return;
3854 }
3855
3856 unsigned src_queue_mask = radv_image_queue_family_mask(image, src_family, cmd_buffer->queue_family_index);
3857 unsigned dst_queue_mask = radv_image_queue_family_mask(image, dst_family, cmd_buffer->queue_family_index);
3858
3859 if (image->surface.htile_size)
3860 radv_handle_depth_image_transition(cmd_buffer, image, src_layout,
3861 dst_layout, src_queue_mask,
3862 dst_queue_mask, range,
3863 pending_clears);
3864
3865 if (image->cmask.size || image->fmask.size)
3866 radv_handle_cmask_image_transition(cmd_buffer, image, src_layout,
3867 dst_layout, src_queue_mask,
3868 dst_queue_mask, range);
3869
3870 if (image->surface.dcc_size)
3871 radv_handle_dcc_image_transition(cmd_buffer, image, src_layout,
3872 dst_layout, src_queue_mask,
3873 dst_queue_mask, range);
3874 }
3875
3876 void radv_CmdPipelineBarrier(
3877 VkCommandBuffer commandBuffer,
3878 VkPipelineStageFlags srcStageMask,
3879 VkPipelineStageFlags destStageMask,
3880 VkBool32 byRegion,
3881 uint32_t memoryBarrierCount,
3882 const VkMemoryBarrier* pMemoryBarriers,
3883 uint32_t bufferMemoryBarrierCount,
3884 const VkBufferMemoryBarrier* pBufferMemoryBarriers,
3885 uint32_t imageMemoryBarrierCount,
3886 const VkImageMemoryBarrier* pImageMemoryBarriers)
3887 {
3888 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3889 enum radv_cmd_flush_bits src_flush_bits = 0;
3890 enum radv_cmd_flush_bits dst_flush_bits = 0;
3891
3892 for (uint32_t i = 0; i < memoryBarrierCount; i++) {
3893 src_flush_bits |= radv_src_access_flush(cmd_buffer, pMemoryBarriers[i].srcAccessMask);
3894 dst_flush_bits |= radv_dst_access_flush(cmd_buffer, pMemoryBarriers[i].dstAccessMask,
3895 NULL);
3896 }
3897
3898 for (uint32_t i = 0; i < bufferMemoryBarrierCount; i++) {
3899 src_flush_bits |= radv_src_access_flush(cmd_buffer, pBufferMemoryBarriers[i].srcAccessMask);
3900 dst_flush_bits |= radv_dst_access_flush(cmd_buffer, pBufferMemoryBarriers[i].dstAccessMask,
3901 NULL);
3902 }
3903
3904 for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
3905 RADV_FROM_HANDLE(radv_image, image, pImageMemoryBarriers[i].image);
3906 src_flush_bits |= radv_src_access_flush(cmd_buffer, pImageMemoryBarriers[i].srcAccessMask);
3907 dst_flush_bits |= radv_dst_access_flush(cmd_buffer, pImageMemoryBarriers[i].dstAccessMask,
3908 image);
3909 }
3910
3911 radv_stage_flush(cmd_buffer, srcStageMask);
3912 cmd_buffer->state.flush_bits |= src_flush_bits;
3913
3914 for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
3915 RADV_FROM_HANDLE(radv_image, image, pImageMemoryBarriers[i].image);
3916 radv_handle_image_transition(cmd_buffer, image,
3917 pImageMemoryBarriers[i].oldLayout,
3918 pImageMemoryBarriers[i].newLayout,
3919 pImageMemoryBarriers[i].srcQueueFamilyIndex,
3920 pImageMemoryBarriers[i].dstQueueFamilyIndex,
3921 &pImageMemoryBarriers[i].subresourceRange,
3922 0);
3923 }
3924
3925 cmd_buffer->state.flush_bits |= dst_flush_bits;
3926 }
3927
3928
3929 static void write_event(struct radv_cmd_buffer *cmd_buffer,
3930 struct radv_event *event,
3931 VkPipelineStageFlags stageMask,
3932 unsigned value)
3933 {
3934 struct radeon_winsys_cs *cs = cmd_buffer->cs;
3935 uint64_t va = radv_buffer_get_va(event->bo);
3936
3937 radv_cs_add_buffer(cmd_buffer->device->ws, cs, event->bo, 8);
3938
3939 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 18);
3940
3941 /* TODO: this is overkill. Probably should figure something out from
3942 * the stage mask. */
3943
3944 si_cs_emit_write_event_eop(cs,
3945 cmd_buffer->state.predicating,
3946 cmd_buffer->device->physical_device->rad_info.chip_class,
3947 false,
3948 V_028A90_BOTTOM_OF_PIPE_TS, 0,
3949 1, va, 2, value);
3950
3951 assert(cmd_buffer->cs->cdw <= cdw_max);
3952 }
3953
3954 void radv_CmdSetEvent(VkCommandBuffer commandBuffer,
3955 VkEvent _event,
3956 VkPipelineStageFlags stageMask)
3957 {
3958 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3959 RADV_FROM_HANDLE(radv_event, event, _event);
3960
3961 write_event(cmd_buffer, event, stageMask, 1);
3962 }
3963
3964 void radv_CmdResetEvent(VkCommandBuffer commandBuffer,
3965 VkEvent _event,
3966 VkPipelineStageFlags stageMask)
3967 {
3968 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3969 RADV_FROM_HANDLE(radv_event, event, _event);
3970
3971 write_event(cmd_buffer, event, stageMask, 0);
3972 }
3973
3974 void radv_CmdWaitEvents(VkCommandBuffer commandBuffer,
3975 uint32_t eventCount,
3976 const VkEvent* pEvents,
3977 VkPipelineStageFlags srcStageMask,
3978 VkPipelineStageFlags dstStageMask,
3979 uint32_t memoryBarrierCount,
3980 const VkMemoryBarrier* pMemoryBarriers,
3981 uint32_t bufferMemoryBarrierCount,
3982 const VkBufferMemoryBarrier* pBufferMemoryBarriers,
3983 uint32_t imageMemoryBarrierCount,
3984 const VkImageMemoryBarrier* pImageMemoryBarriers)
3985 {
3986 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3987 struct radeon_winsys_cs *cs = cmd_buffer->cs;
3988
3989 for (unsigned i = 0; i < eventCount; ++i) {
3990 RADV_FROM_HANDLE(radv_event, event, pEvents[i]);
3991 uint64_t va = radv_buffer_get_va(event->bo);
3992
3993 radv_cs_add_buffer(cmd_buffer->device->ws, cs, event->bo, 8);
3994
3995 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 7);
3996
3997 si_emit_wait_fence(cs, false, va, 1, 0xffffffff);
3998 assert(cmd_buffer->cs->cdw <= cdw_max);
3999 }
4000
4001
4002 for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
4003 RADV_FROM_HANDLE(radv_image, image, pImageMemoryBarriers[i].image);
4004
4005 radv_handle_image_transition(cmd_buffer, image,
4006 pImageMemoryBarriers[i].oldLayout,
4007 pImageMemoryBarriers[i].newLayout,
4008 pImageMemoryBarriers[i].srcQueueFamilyIndex,
4009 pImageMemoryBarriers[i].dstQueueFamilyIndex,
4010 &pImageMemoryBarriers[i].subresourceRange,
4011 0);
4012 }
4013
4014 /* TODO: figure out how to do memory barriers without waiting */
4015 cmd_buffer->state.flush_bits |= RADV_CMD_FLUSH_AND_INV_FRAMEBUFFER |
4016 RADV_CMD_FLAG_INV_GLOBAL_L2 |
4017 RADV_CMD_FLAG_INV_VMEM_L1 |
4018 RADV_CMD_FLAG_INV_SMEM_L1;
4019 }