a682c5e7558341d4f38b953dfc54758783ac9387
[mesa.git] / src / amd / vulkan / radv_cmd_buffer.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 */
27
28 #include "radv_private.h"
29 #include "radv_radeon_winsys.h"
30 #include "radv_shader.h"
31 #include "radv_cs.h"
32 #include "sid.h"
33 #include "gfx9d.h"
34 #include "vk_format.h"
35 #include "radv_debug.h"
36 #include "radv_meta.h"
37
38 #include "ac_debug.h"
39
40 static void radv_handle_image_transition(struct radv_cmd_buffer *cmd_buffer,
41 struct radv_image *image,
42 VkImageLayout src_layout,
43 VkImageLayout dst_layout,
44 uint32_t src_family,
45 uint32_t dst_family,
46 const VkImageSubresourceRange *range,
47 VkImageAspectFlags pending_clears);
48
49 const struct radv_dynamic_state default_dynamic_state = {
50 .viewport = {
51 .count = 0,
52 },
53 .scissor = {
54 .count = 0,
55 },
56 .line_width = 1.0f,
57 .depth_bias = {
58 .bias = 0.0f,
59 .clamp = 0.0f,
60 .slope = 0.0f,
61 },
62 .blend_constants = { 0.0f, 0.0f, 0.0f, 0.0f },
63 .depth_bounds = {
64 .min = 0.0f,
65 .max = 1.0f,
66 },
67 .stencil_compare_mask = {
68 .front = ~0u,
69 .back = ~0u,
70 },
71 .stencil_write_mask = {
72 .front = ~0u,
73 .back = ~0u,
74 },
75 .stencil_reference = {
76 .front = 0u,
77 .back = 0u,
78 },
79 };
80
81 static void
82 radv_dynamic_state_copy(struct radv_dynamic_state *dest,
83 const struct radv_dynamic_state *src,
84 uint32_t copy_mask)
85 {
86 /* Make sure to copy the number of viewports/scissors because they can
87 * only be specified at pipeline creation time.
88 */
89 dest->viewport.count = src->viewport.count;
90 dest->scissor.count = src->scissor.count;
91
92 if (copy_mask & (1 << VK_DYNAMIC_STATE_VIEWPORT)) {
93 typed_memcpy(dest->viewport.viewports, src->viewport.viewports,
94 src->viewport.count);
95 }
96
97 if (copy_mask & (1 << VK_DYNAMIC_STATE_SCISSOR)) {
98 typed_memcpy(dest->scissor.scissors, src->scissor.scissors,
99 src->scissor.count);
100 }
101
102 if (copy_mask & (1 << VK_DYNAMIC_STATE_LINE_WIDTH))
103 dest->line_width = src->line_width;
104
105 if (copy_mask & (1 << VK_DYNAMIC_STATE_DEPTH_BIAS))
106 dest->depth_bias = src->depth_bias;
107
108 if (copy_mask & (1 << VK_DYNAMIC_STATE_BLEND_CONSTANTS))
109 typed_memcpy(dest->blend_constants, src->blend_constants, 4);
110
111 if (copy_mask & (1 << VK_DYNAMIC_STATE_DEPTH_BOUNDS))
112 dest->depth_bounds = src->depth_bounds;
113
114 if (copy_mask & (1 << VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK))
115 dest->stencil_compare_mask = src->stencil_compare_mask;
116
117 if (copy_mask & (1 << VK_DYNAMIC_STATE_STENCIL_WRITE_MASK))
118 dest->stencil_write_mask = src->stencil_write_mask;
119
120 if (copy_mask & (1 << VK_DYNAMIC_STATE_STENCIL_REFERENCE))
121 dest->stencil_reference = src->stencil_reference;
122 }
123
124 bool radv_cmd_buffer_uses_mec(struct radv_cmd_buffer *cmd_buffer)
125 {
126 return cmd_buffer->queue_family_index == RADV_QUEUE_COMPUTE &&
127 cmd_buffer->device->physical_device->rad_info.chip_class >= CIK;
128 }
129
130 enum ring_type radv_queue_family_to_ring(int f) {
131 switch (f) {
132 case RADV_QUEUE_GENERAL:
133 return RING_GFX;
134 case RADV_QUEUE_COMPUTE:
135 return RING_COMPUTE;
136 case RADV_QUEUE_TRANSFER:
137 return RING_DMA;
138 default:
139 unreachable("Unknown queue family");
140 }
141 }
142
143 static VkResult radv_create_cmd_buffer(
144 struct radv_device * device,
145 struct radv_cmd_pool * pool,
146 VkCommandBufferLevel level,
147 VkCommandBuffer* pCommandBuffer)
148 {
149 struct radv_cmd_buffer *cmd_buffer;
150 unsigned ring;
151 cmd_buffer = vk_alloc(&pool->alloc, sizeof(*cmd_buffer), 8,
152 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
153 if (cmd_buffer == NULL)
154 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
155
156 memset(cmd_buffer, 0, sizeof(*cmd_buffer));
157 cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
158 cmd_buffer->device = device;
159 cmd_buffer->pool = pool;
160 cmd_buffer->level = level;
161
162 if (pool) {
163 list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
164 cmd_buffer->queue_family_index = pool->queue_family_index;
165
166 } else {
167 /* Init the pool_link so we can safefly call list_del when we destroy
168 * the command buffer
169 */
170 list_inithead(&cmd_buffer->pool_link);
171 cmd_buffer->queue_family_index = RADV_QUEUE_GENERAL;
172 }
173
174 ring = radv_queue_family_to_ring(cmd_buffer->queue_family_index);
175
176 cmd_buffer->cs = device->ws->cs_create(device->ws, ring);
177 if (!cmd_buffer->cs) {
178 vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
179 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
180 }
181
182 *pCommandBuffer = radv_cmd_buffer_to_handle(cmd_buffer);
183
184 cmd_buffer->upload.offset = 0;
185 cmd_buffer->upload.size = 0;
186 list_inithead(&cmd_buffer->upload.list);
187
188 return VK_SUCCESS;
189 }
190
191 static void
192 radv_cmd_buffer_destroy(struct radv_cmd_buffer *cmd_buffer)
193 {
194 list_del(&cmd_buffer->pool_link);
195
196 list_for_each_entry_safe(struct radv_cmd_buffer_upload, up,
197 &cmd_buffer->upload.list, list) {
198 cmd_buffer->device->ws->buffer_destroy(up->upload_bo);
199 list_del(&up->list);
200 free(up);
201 }
202
203 if (cmd_buffer->upload.upload_bo)
204 cmd_buffer->device->ws->buffer_destroy(cmd_buffer->upload.upload_bo);
205 cmd_buffer->device->ws->cs_destroy(cmd_buffer->cs);
206 free(cmd_buffer->push_descriptors.set.mapped_ptr);
207 vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
208 }
209
210 static VkResult
211 radv_reset_cmd_buffer(struct radv_cmd_buffer *cmd_buffer)
212 {
213
214 cmd_buffer->device->ws->cs_reset(cmd_buffer->cs);
215
216 list_for_each_entry_safe(struct radv_cmd_buffer_upload, up,
217 &cmd_buffer->upload.list, list) {
218 cmd_buffer->device->ws->buffer_destroy(up->upload_bo);
219 list_del(&up->list);
220 free(up);
221 }
222
223 cmd_buffer->push_constant_stages = 0;
224 cmd_buffer->scratch_size_needed = 0;
225 cmd_buffer->compute_scratch_size_needed = 0;
226 cmd_buffer->esgs_ring_size_needed = 0;
227 cmd_buffer->gsvs_ring_size_needed = 0;
228 cmd_buffer->tess_rings_needed = false;
229 cmd_buffer->sample_positions_needed = false;
230
231 if (cmd_buffer->upload.upload_bo)
232 cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs,
233 cmd_buffer->upload.upload_bo, 8);
234 cmd_buffer->upload.offset = 0;
235
236 cmd_buffer->record_result = VK_SUCCESS;
237
238 cmd_buffer->ring_offsets_idx = -1;
239
240 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
241 void *fence_ptr;
242 radv_cmd_buffer_upload_alloc(cmd_buffer, 8, 0,
243 &cmd_buffer->gfx9_fence_offset,
244 &fence_ptr);
245 cmd_buffer->gfx9_fence_bo = cmd_buffer->upload.upload_bo;
246 }
247
248 return cmd_buffer->record_result;
249 }
250
251 static bool
252 radv_cmd_buffer_resize_upload_buf(struct radv_cmd_buffer *cmd_buffer,
253 uint64_t min_needed)
254 {
255 uint64_t new_size;
256 struct radeon_winsys_bo *bo;
257 struct radv_cmd_buffer_upload *upload;
258 struct radv_device *device = cmd_buffer->device;
259
260 new_size = MAX2(min_needed, 16 * 1024);
261 new_size = MAX2(new_size, 2 * cmd_buffer->upload.size);
262
263 bo = device->ws->buffer_create(device->ws,
264 new_size, 4096,
265 RADEON_DOMAIN_GTT,
266 RADEON_FLAG_CPU_ACCESS);
267
268 if (!bo) {
269 cmd_buffer->record_result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
270 return false;
271 }
272
273 device->ws->cs_add_buffer(cmd_buffer->cs, bo, 8);
274 if (cmd_buffer->upload.upload_bo) {
275 upload = malloc(sizeof(*upload));
276
277 if (!upload) {
278 cmd_buffer->record_result = VK_ERROR_OUT_OF_HOST_MEMORY;
279 device->ws->buffer_destroy(bo);
280 return false;
281 }
282
283 memcpy(upload, &cmd_buffer->upload, sizeof(*upload));
284 list_add(&upload->list, &cmd_buffer->upload.list);
285 }
286
287 cmd_buffer->upload.upload_bo = bo;
288 cmd_buffer->upload.size = new_size;
289 cmd_buffer->upload.offset = 0;
290 cmd_buffer->upload.map = device->ws->buffer_map(cmd_buffer->upload.upload_bo);
291
292 if (!cmd_buffer->upload.map) {
293 cmd_buffer->record_result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
294 return false;
295 }
296
297 return true;
298 }
299
300 bool
301 radv_cmd_buffer_upload_alloc(struct radv_cmd_buffer *cmd_buffer,
302 unsigned size,
303 unsigned alignment,
304 unsigned *out_offset,
305 void **ptr)
306 {
307 uint64_t offset = align(cmd_buffer->upload.offset, alignment);
308 if (offset + size > cmd_buffer->upload.size) {
309 if (!radv_cmd_buffer_resize_upload_buf(cmd_buffer, size))
310 return false;
311 offset = 0;
312 }
313
314 *out_offset = offset;
315 *ptr = cmd_buffer->upload.map + offset;
316
317 cmd_buffer->upload.offset = offset + size;
318 return true;
319 }
320
321 bool
322 radv_cmd_buffer_upload_data(struct radv_cmd_buffer *cmd_buffer,
323 unsigned size, unsigned alignment,
324 const void *data, unsigned *out_offset)
325 {
326 uint8_t *ptr;
327
328 if (!radv_cmd_buffer_upload_alloc(cmd_buffer, size, alignment,
329 out_offset, (void **)&ptr))
330 return false;
331
332 if (ptr)
333 memcpy(ptr, data, size);
334
335 return true;
336 }
337
338 static void
339 radv_emit_write_data_packet(struct radeon_winsys_cs *cs, uint64_t va,
340 unsigned count, const uint32_t *data)
341 {
342 radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 2 + count, 0));
343 radeon_emit(cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
344 S_370_WR_CONFIRM(1) |
345 S_370_ENGINE_SEL(V_370_ME));
346 radeon_emit(cs, va);
347 radeon_emit(cs, va >> 32);
348 radeon_emit_array(cs, data, count);
349 }
350
351 void radv_cmd_buffer_trace_emit(struct radv_cmd_buffer *cmd_buffer)
352 {
353 struct radv_device *device = cmd_buffer->device;
354 struct radeon_winsys_cs *cs = cmd_buffer->cs;
355 uint64_t va;
356
357 if (!device->trace_bo)
358 return;
359
360 va = radv_buffer_get_va(device->trace_bo);
361 if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY)
362 va += 4;
363
364 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 7);
365
366 ++cmd_buffer->state.trace_id;
367 device->ws->cs_add_buffer(cs, device->trace_bo, 8);
368 radv_emit_write_data_packet(cs, va, 1, &cmd_buffer->state.trace_id);
369 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
370 radeon_emit(cs, AC_ENCODE_TRACE_POINT(cmd_buffer->state.trace_id));
371 }
372
373 static void
374 radv_cmd_buffer_after_draw(struct radv_cmd_buffer *cmd_buffer)
375 {
376 if (cmd_buffer->device->instance->debug_flags & RADV_DEBUG_SYNC_SHADERS) {
377 enum radv_cmd_flush_bits flags;
378
379 /* Force wait for graphics/compute engines to be idle. */
380 flags = RADV_CMD_FLAG_PS_PARTIAL_FLUSH |
381 RADV_CMD_FLAG_CS_PARTIAL_FLUSH;
382
383 si_cs_emit_cache_flush(cmd_buffer->cs, false,
384 cmd_buffer->device->physical_device->rad_info.chip_class,
385 NULL, 0,
386 radv_cmd_buffer_uses_mec(cmd_buffer),
387 flags);
388 }
389
390 radv_cmd_buffer_trace_emit(cmd_buffer);
391 }
392
393 static void
394 radv_save_pipeline(struct radv_cmd_buffer *cmd_buffer,
395 struct radv_pipeline *pipeline, enum ring_type ring)
396 {
397 struct radv_device *device = cmd_buffer->device;
398 struct radeon_winsys_cs *cs = cmd_buffer->cs;
399 uint32_t data[2];
400 uint64_t va;
401
402 if (!device->trace_bo)
403 return;
404
405 va = radv_buffer_get_va(device->trace_bo);
406
407 switch (ring) {
408 case RING_GFX:
409 va += 8;
410 break;
411 case RING_COMPUTE:
412 va += 16;
413 break;
414 default:
415 assert(!"invalid ring type");
416 }
417
418 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(device->ws,
419 cmd_buffer->cs, 6);
420
421 data[0] = (uintptr_t)pipeline;
422 data[1] = (uintptr_t)pipeline >> 32;
423
424 device->ws->cs_add_buffer(cs, device->trace_bo, 8);
425 radv_emit_write_data_packet(cs, va, 2, data);
426 }
427
428 static void
429 radv_save_descriptors(struct radv_cmd_buffer *cmd_buffer)
430 {
431 struct radv_device *device = cmd_buffer->device;
432 struct radeon_winsys_cs *cs = cmd_buffer->cs;
433 uint32_t data[MAX_SETS * 2] = {};
434 uint64_t va;
435
436 if (!device->trace_bo)
437 return;
438
439 va = radv_buffer_get_va(device->trace_bo) + 24;
440
441 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(device->ws,
442 cmd_buffer->cs, 4 + MAX_SETS * 2);
443
444 for (int i = 0; i < MAX_SETS; i++) {
445 struct radv_descriptor_set *set = cmd_buffer->state.descriptors[i];
446 if (!set)
447 continue;
448
449 data[i * 2] = (uintptr_t)set;
450 data[i * 2 + 1] = (uintptr_t)set >> 32;
451 }
452
453 device->ws->cs_add_buffer(cs, device->trace_bo, 8);
454 radv_emit_write_data_packet(cs, va, MAX_SETS * 2, data);
455 }
456
457 static void
458 radv_emit_graphics_blend_state(struct radv_cmd_buffer *cmd_buffer,
459 struct radv_pipeline *pipeline)
460 {
461 radeon_set_context_reg_seq(cmd_buffer->cs, R_028780_CB_BLEND0_CONTROL, 8);
462 radeon_emit_array(cmd_buffer->cs, pipeline->graphics.blend.cb_blend_control,
463 8);
464 radeon_set_context_reg(cmd_buffer->cs, R_028808_CB_COLOR_CONTROL, pipeline->graphics.blend.cb_color_control);
465 radeon_set_context_reg(cmd_buffer->cs, R_028B70_DB_ALPHA_TO_MASK, pipeline->graphics.blend.db_alpha_to_mask);
466
467 if (cmd_buffer->device->physical_device->has_rbplus) {
468
469 radeon_set_context_reg_seq(cmd_buffer->cs, R_028760_SX_MRT0_BLEND_OPT, 8);
470 radeon_emit_array(cmd_buffer->cs, pipeline->graphics.blend.sx_mrt_blend_opt, 8);
471
472 radeon_set_context_reg_seq(cmd_buffer->cs, R_028754_SX_PS_DOWNCONVERT, 3);
473 radeon_emit(cmd_buffer->cs, 0); /* R_028754_SX_PS_DOWNCONVERT */
474 radeon_emit(cmd_buffer->cs, 0); /* R_028758_SX_BLEND_OPT_EPSILON */
475 radeon_emit(cmd_buffer->cs, 0); /* R_02875C_SX_BLEND_OPT_CONTROL */
476 }
477 }
478
479 static void
480 radv_emit_graphics_depth_stencil_state(struct radv_cmd_buffer *cmd_buffer,
481 struct radv_pipeline *pipeline)
482 {
483 struct radv_depth_stencil_state *ds = &pipeline->graphics.ds;
484 radeon_set_context_reg(cmd_buffer->cs, R_028800_DB_DEPTH_CONTROL, ds->db_depth_control);
485 radeon_set_context_reg(cmd_buffer->cs, R_02842C_DB_STENCIL_CONTROL, ds->db_stencil_control);
486
487 radeon_set_context_reg(cmd_buffer->cs, R_028000_DB_RENDER_CONTROL, ds->db_render_control);
488 radeon_set_context_reg(cmd_buffer->cs, R_028010_DB_RENDER_OVERRIDE2, ds->db_render_override2);
489 }
490
491 struct ac_userdata_info *
492 radv_lookup_user_sgpr(struct radv_pipeline *pipeline,
493 gl_shader_stage stage,
494 int idx)
495 {
496 if (stage == MESA_SHADER_VERTEX) {
497 if (pipeline->shaders[MESA_SHADER_VERTEX])
498 return &pipeline->shaders[MESA_SHADER_VERTEX]->info.user_sgprs_locs.shader_data[idx];
499 if (pipeline->shaders[MESA_SHADER_TESS_CTRL])
500 return &pipeline->shaders[MESA_SHADER_TESS_CTRL]->info.user_sgprs_locs.shader_data[idx];
501 if (pipeline->shaders[MESA_SHADER_GEOMETRY])
502 return &pipeline->shaders[MESA_SHADER_GEOMETRY]->info.user_sgprs_locs.shader_data[idx];
503 } else if (stage == MESA_SHADER_TESS_EVAL) {
504 if (pipeline->shaders[MESA_SHADER_TESS_EVAL])
505 return &pipeline->shaders[MESA_SHADER_TESS_EVAL]->info.user_sgprs_locs.shader_data[idx];
506 if (pipeline->shaders[MESA_SHADER_GEOMETRY])
507 return &pipeline->shaders[MESA_SHADER_GEOMETRY]->info.user_sgprs_locs.shader_data[idx];
508 }
509 return &pipeline->shaders[stage]->info.user_sgprs_locs.shader_data[idx];
510 }
511
512 static void
513 radv_emit_userdata_address(struct radv_cmd_buffer *cmd_buffer,
514 struct radv_pipeline *pipeline,
515 gl_shader_stage stage,
516 int idx, uint64_t va)
517 {
518 struct ac_userdata_info *loc = radv_lookup_user_sgpr(pipeline, stage, idx);
519 uint32_t base_reg = radv_shader_stage_to_user_data_0(stage, cmd_buffer->device->physical_device->rad_info.chip_class, radv_pipeline_has_gs(pipeline), radv_pipeline_has_tess(pipeline));
520 if (loc->sgpr_idx == -1)
521 return;
522 assert(loc->num_sgprs == 2);
523 assert(!loc->indirect);
524 radeon_set_sh_reg_seq(cmd_buffer->cs, base_reg + loc->sgpr_idx * 4, 2);
525 radeon_emit(cmd_buffer->cs, va);
526 radeon_emit(cmd_buffer->cs, va >> 32);
527 }
528
529 static void
530 radv_update_multisample_state(struct radv_cmd_buffer *cmd_buffer,
531 struct radv_pipeline *pipeline)
532 {
533 int num_samples = pipeline->graphics.ms.num_samples;
534 struct radv_multisample_state *ms = &pipeline->graphics.ms;
535 struct radv_pipeline *old_pipeline = cmd_buffer->state.emitted_pipeline;
536
537 radeon_set_context_reg_seq(cmd_buffer->cs, R_028C38_PA_SC_AA_MASK_X0Y0_X1Y0, 2);
538 radeon_emit(cmd_buffer->cs, ms->pa_sc_aa_mask[0]);
539 radeon_emit(cmd_buffer->cs, ms->pa_sc_aa_mask[1]);
540
541 radeon_set_context_reg(cmd_buffer->cs, R_028804_DB_EQAA, ms->db_eqaa);
542 radeon_set_context_reg(cmd_buffer->cs, R_028A4C_PA_SC_MODE_CNTL_1, ms->pa_sc_mode_cntl_1);
543
544 if (old_pipeline && num_samples == old_pipeline->graphics.ms.num_samples)
545 return;
546
547 radeon_set_context_reg_seq(cmd_buffer->cs, R_028BDC_PA_SC_LINE_CNTL, 2);
548 radeon_emit(cmd_buffer->cs, ms->pa_sc_line_cntl);
549 radeon_emit(cmd_buffer->cs, ms->pa_sc_aa_config);
550
551 radv_cayman_emit_msaa_sample_locs(cmd_buffer->cs, num_samples);
552
553 /* GFX9: Flush DFSM when the AA mode changes. */
554 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
555 radeon_emit(cmd_buffer->cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
556 radeon_emit(cmd_buffer->cs, EVENT_TYPE(V_028A90_FLUSH_DFSM) | EVENT_INDEX(0));
557 }
558 if (pipeline->shaders[MESA_SHADER_FRAGMENT]->info.info.ps.needs_sample_positions) {
559 uint32_t offset;
560 struct ac_userdata_info *loc = radv_lookup_user_sgpr(pipeline, MESA_SHADER_FRAGMENT, AC_UD_PS_SAMPLE_POS_OFFSET);
561 uint32_t base_reg = radv_shader_stage_to_user_data_0(MESA_SHADER_FRAGMENT, cmd_buffer->device->physical_device->rad_info.chip_class, radv_pipeline_has_gs(pipeline), radv_pipeline_has_tess(pipeline));
562 if (loc->sgpr_idx == -1)
563 return;
564 assert(loc->num_sgprs == 1);
565 assert(!loc->indirect);
566 switch (num_samples) {
567 default:
568 offset = 0;
569 break;
570 case 2:
571 offset = 1;
572 break;
573 case 4:
574 offset = 3;
575 break;
576 case 8:
577 offset = 7;
578 break;
579 case 16:
580 offset = 15;
581 break;
582 }
583
584 radeon_set_sh_reg(cmd_buffer->cs, base_reg + loc->sgpr_idx * 4, offset);
585 cmd_buffer->sample_positions_needed = true;
586 }
587 }
588
589 static void
590 radv_emit_graphics_raster_state(struct radv_cmd_buffer *cmd_buffer,
591 struct radv_pipeline *pipeline)
592 {
593 struct radv_raster_state *raster = &pipeline->graphics.raster;
594
595 radeon_set_context_reg(cmd_buffer->cs, R_028810_PA_CL_CLIP_CNTL,
596 raster->pa_cl_clip_cntl);
597 radeon_set_context_reg(cmd_buffer->cs, R_0286D4_SPI_INTERP_CONTROL_0,
598 raster->spi_interp_control);
599 radeon_set_context_reg(cmd_buffer->cs, R_028BE4_PA_SU_VTX_CNTL,
600 raster->pa_su_vtx_cntl);
601 radeon_set_context_reg(cmd_buffer->cs, R_028814_PA_SU_SC_MODE_CNTL,
602 raster->pa_su_sc_mode_cntl);
603 }
604
605 static inline void
606 radv_emit_prefetch(struct radv_cmd_buffer *cmd_buffer, uint64_t va,
607 unsigned size)
608 {
609 if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK)
610 si_cp_dma_prefetch(cmd_buffer, va, size);
611 }
612
613 static void
614 radv_emit_hw_vs(struct radv_cmd_buffer *cmd_buffer,
615 struct radv_pipeline *pipeline,
616 struct radv_shader_variant *shader,
617 struct ac_vs_output_info *outinfo)
618 {
619 struct radeon_winsys *ws = cmd_buffer->device->ws;
620 uint64_t va = radv_buffer_get_va(shader->bo) + shader->bo_offset;
621 unsigned export_count;
622
623 ws->cs_add_buffer(cmd_buffer->cs, shader->bo, 8);
624 radv_emit_prefetch(cmd_buffer, va, shader->code_size);
625
626 export_count = MAX2(1, outinfo->param_exports);
627 radeon_set_context_reg(cmd_buffer->cs, R_0286C4_SPI_VS_OUT_CONFIG,
628 S_0286C4_VS_EXPORT_COUNT(export_count - 1));
629
630 radeon_set_context_reg(cmd_buffer->cs, R_02870C_SPI_SHADER_POS_FORMAT,
631 S_02870C_POS0_EXPORT_FORMAT(V_02870C_SPI_SHADER_4COMP) |
632 S_02870C_POS1_EXPORT_FORMAT(outinfo->pos_exports > 1 ?
633 V_02870C_SPI_SHADER_4COMP :
634 V_02870C_SPI_SHADER_NONE) |
635 S_02870C_POS2_EXPORT_FORMAT(outinfo->pos_exports > 2 ?
636 V_02870C_SPI_SHADER_4COMP :
637 V_02870C_SPI_SHADER_NONE) |
638 S_02870C_POS3_EXPORT_FORMAT(outinfo->pos_exports > 3 ?
639 V_02870C_SPI_SHADER_4COMP :
640 V_02870C_SPI_SHADER_NONE));
641
642
643 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B120_SPI_SHADER_PGM_LO_VS, 4);
644 radeon_emit(cmd_buffer->cs, va >> 8);
645 radeon_emit(cmd_buffer->cs, va >> 40);
646 radeon_emit(cmd_buffer->cs, shader->rsrc1);
647 radeon_emit(cmd_buffer->cs, shader->rsrc2);
648
649 radeon_set_context_reg(cmd_buffer->cs, R_028818_PA_CL_VTE_CNTL,
650 S_028818_VTX_W0_FMT(1) |
651 S_028818_VPORT_X_SCALE_ENA(1) | S_028818_VPORT_X_OFFSET_ENA(1) |
652 S_028818_VPORT_Y_SCALE_ENA(1) | S_028818_VPORT_Y_OFFSET_ENA(1) |
653 S_028818_VPORT_Z_SCALE_ENA(1) | S_028818_VPORT_Z_OFFSET_ENA(1));
654
655
656 radeon_set_context_reg(cmd_buffer->cs, R_02881C_PA_CL_VS_OUT_CNTL,
657 pipeline->graphics.pa_cl_vs_out_cntl);
658
659 if (cmd_buffer->device->physical_device->rad_info.chip_class <= VI)
660 radeon_set_context_reg(cmd_buffer->cs, R_028AB4_VGT_REUSE_OFF,
661 S_028AB4_REUSE_OFF(outinfo->writes_viewport_index));
662 }
663
664 static void
665 radv_emit_hw_es(struct radv_cmd_buffer *cmd_buffer,
666 struct radv_shader_variant *shader,
667 struct ac_es_output_info *outinfo)
668 {
669 struct radeon_winsys *ws = cmd_buffer->device->ws;
670 uint64_t va = radv_buffer_get_va(shader->bo) + shader->bo_offset;
671
672 ws->cs_add_buffer(cmd_buffer->cs, shader->bo, 8);
673 radv_emit_prefetch(cmd_buffer, va, shader->code_size);
674
675 radeon_set_context_reg(cmd_buffer->cs, R_028AAC_VGT_ESGS_RING_ITEMSIZE,
676 outinfo->esgs_itemsize / 4);
677 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B320_SPI_SHADER_PGM_LO_ES, 4);
678 radeon_emit(cmd_buffer->cs, va >> 8);
679 radeon_emit(cmd_buffer->cs, va >> 40);
680 radeon_emit(cmd_buffer->cs, shader->rsrc1);
681 radeon_emit(cmd_buffer->cs, shader->rsrc2);
682 }
683
684 static void
685 radv_emit_hw_ls(struct radv_cmd_buffer *cmd_buffer,
686 struct radv_shader_variant *shader)
687 {
688 struct radeon_winsys *ws = cmd_buffer->device->ws;
689 uint64_t va = radv_buffer_get_va(shader->bo) + shader->bo_offset;
690 uint32_t rsrc2 = shader->rsrc2;
691
692 ws->cs_add_buffer(cmd_buffer->cs, shader->bo, 8);
693 radv_emit_prefetch(cmd_buffer, va, shader->code_size);
694
695 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B520_SPI_SHADER_PGM_LO_LS, 2);
696 radeon_emit(cmd_buffer->cs, va >> 8);
697 radeon_emit(cmd_buffer->cs, va >> 40);
698
699 rsrc2 |= S_00B52C_LDS_SIZE(cmd_buffer->state.pipeline->graphics.tess.lds_size);
700 if (cmd_buffer->device->physical_device->rad_info.chip_class == CIK &&
701 cmd_buffer->device->physical_device->rad_info.family != CHIP_HAWAII)
702 radeon_set_sh_reg(cmd_buffer->cs, R_00B52C_SPI_SHADER_PGM_RSRC2_LS, rsrc2);
703
704 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B528_SPI_SHADER_PGM_RSRC1_LS, 2);
705 radeon_emit(cmd_buffer->cs, shader->rsrc1);
706 radeon_emit(cmd_buffer->cs, rsrc2);
707 }
708
709 static void
710 radv_emit_hw_hs(struct radv_cmd_buffer *cmd_buffer,
711 struct radv_shader_variant *shader)
712 {
713 struct radeon_winsys *ws = cmd_buffer->device->ws;
714 uint64_t va = radv_buffer_get_va(shader->bo) + shader->bo_offset;
715
716 ws->cs_add_buffer(cmd_buffer->cs, shader->bo, 8);
717 radv_emit_prefetch(cmd_buffer, va, shader->code_size);
718
719 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
720 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B410_SPI_SHADER_PGM_LO_LS, 2);
721 radeon_emit(cmd_buffer->cs, va >> 8);
722 radeon_emit(cmd_buffer->cs, va >> 40);
723
724 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B428_SPI_SHADER_PGM_RSRC1_HS, 2);
725 radeon_emit(cmd_buffer->cs, shader->rsrc1);
726 radeon_emit(cmd_buffer->cs, shader->rsrc2 |
727 S_00B42C_LDS_SIZE(cmd_buffer->state.pipeline->graphics.tess.lds_size));
728 } else {
729 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B420_SPI_SHADER_PGM_LO_HS, 4);
730 radeon_emit(cmd_buffer->cs, va >> 8);
731 radeon_emit(cmd_buffer->cs, va >> 40);
732 radeon_emit(cmd_buffer->cs, shader->rsrc1);
733 radeon_emit(cmd_buffer->cs, shader->rsrc2);
734 }
735 }
736
737 static void
738 radv_emit_vertex_shader(struct radv_cmd_buffer *cmd_buffer,
739 struct radv_pipeline *pipeline)
740 {
741 struct radv_shader_variant *vs;
742
743 radeon_set_context_reg(cmd_buffer->cs, R_028A84_VGT_PRIMITIVEID_EN, pipeline->graphics.vgt_primitiveid_en);
744
745 /* Skip shaders merged into HS/GS */
746 vs = pipeline->shaders[MESA_SHADER_VERTEX];
747 if (!vs)
748 return;
749
750 if (vs->info.vs.as_ls)
751 radv_emit_hw_ls(cmd_buffer, vs);
752 else if (vs->info.vs.as_es)
753 radv_emit_hw_es(cmd_buffer, vs, &vs->info.vs.es_info);
754 else
755 radv_emit_hw_vs(cmd_buffer, pipeline, vs, &vs->info.vs.outinfo);
756 }
757
758
759 static void
760 radv_emit_tess_shaders(struct radv_cmd_buffer *cmd_buffer,
761 struct radv_pipeline *pipeline)
762 {
763 if (!radv_pipeline_has_tess(pipeline))
764 return;
765
766 struct radv_shader_variant *tes, *tcs;
767
768 tcs = pipeline->shaders[MESA_SHADER_TESS_CTRL];
769 tes = pipeline->shaders[MESA_SHADER_TESS_EVAL];
770
771 if (tes) {
772 if (tes->info.tes.as_es)
773 radv_emit_hw_es(cmd_buffer, tes, &tes->info.tes.es_info);
774 else
775 radv_emit_hw_vs(cmd_buffer, pipeline, tes, &tes->info.tes.outinfo);
776 }
777
778 radv_emit_hw_hs(cmd_buffer, tcs);
779
780 radeon_set_context_reg(cmd_buffer->cs, R_028B6C_VGT_TF_PARAM,
781 pipeline->graphics.tess.tf_param);
782
783 if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK)
784 radeon_set_context_reg_idx(cmd_buffer->cs, R_028B58_VGT_LS_HS_CONFIG, 2,
785 pipeline->graphics.tess.ls_hs_config);
786 else
787 radeon_set_context_reg(cmd_buffer->cs, R_028B58_VGT_LS_HS_CONFIG,
788 pipeline->graphics.tess.ls_hs_config);
789
790 struct ac_userdata_info *loc;
791
792 loc = radv_lookup_user_sgpr(pipeline, MESA_SHADER_TESS_CTRL, AC_UD_TCS_OFFCHIP_LAYOUT);
793 if (loc->sgpr_idx != -1) {
794 uint32_t base_reg = radv_shader_stage_to_user_data_0(MESA_SHADER_TESS_CTRL, cmd_buffer->device->physical_device->rad_info.chip_class, radv_pipeline_has_gs(pipeline), radv_pipeline_has_tess(pipeline));
795 assert(loc->num_sgprs == 4);
796 assert(!loc->indirect);
797 radeon_set_sh_reg_seq(cmd_buffer->cs, base_reg + loc->sgpr_idx * 4, 4);
798 radeon_emit(cmd_buffer->cs, pipeline->graphics.tess.offchip_layout);
799 radeon_emit(cmd_buffer->cs, pipeline->graphics.tess.tcs_out_offsets);
800 radeon_emit(cmd_buffer->cs, pipeline->graphics.tess.tcs_out_layout |
801 pipeline->graphics.tess.num_tcs_input_cp << 26);
802 radeon_emit(cmd_buffer->cs, pipeline->graphics.tess.tcs_in_layout);
803 }
804
805 loc = radv_lookup_user_sgpr(pipeline, MESA_SHADER_TESS_EVAL, AC_UD_TES_OFFCHIP_LAYOUT);
806 if (loc->sgpr_idx != -1) {
807 uint32_t base_reg = radv_shader_stage_to_user_data_0(MESA_SHADER_TESS_EVAL, cmd_buffer->device->physical_device->rad_info.chip_class, radv_pipeline_has_gs(pipeline), radv_pipeline_has_tess(pipeline));
808 assert(loc->num_sgprs == 1);
809 assert(!loc->indirect);
810
811 radeon_set_sh_reg(cmd_buffer->cs, base_reg + loc->sgpr_idx * 4,
812 pipeline->graphics.tess.offchip_layout);
813 }
814
815 loc = radv_lookup_user_sgpr(pipeline, MESA_SHADER_VERTEX, AC_UD_VS_LS_TCS_IN_LAYOUT);
816 if (loc->sgpr_idx != -1) {
817 uint32_t base_reg = radv_shader_stage_to_user_data_0(MESA_SHADER_VERTEX, cmd_buffer->device->physical_device->rad_info.chip_class, radv_pipeline_has_gs(pipeline), radv_pipeline_has_tess(pipeline));
818 assert(loc->num_sgprs == 1);
819 assert(!loc->indirect);
820
821 radeon_set_sh_reg(cmd_buffer->cs, base_reg + loc->sgpr_idx * 4,
822 pipeline->graphics.tess.tcs_in_layout);
823 }
824 }
825
826 static void
827 radv_emit_geometry_shader(struct radv_cmd_buffer *cmd_buffer,
828 struct radv_pipeline *pipeline)
829 {
830 struct radeon_winsys *ws = cmd_buffer->device->ws;
831 struct radv_shader_variant *gs;
832 uint64_t va;
833
834 radeon_set_context_reg(cmd_buffer->cs, R_028A40_VGT_GS_MODE, pipeline->graphics.vgt_gs_mode);
835
836 gs = pipeline->shaders[MESA_SHADER_GEOMETRY];
837 if (!gs)
838 return;
839
840 uint32_t gsvs_itemsize = gs->info.gs.max_gsvs_emit_size >> 2;
841
842 radeon_set_context_reg_seq(cmd_buffer->cs, R_028A60_VGT_GSVS_RING_OFFSET_1, 3);
843 radeon_emit(cmd_buffer->cs, gsvs_itemsize);
844 radeon_emit(cmd_buffer->cs, gsvs_itemsize);
845 radeon_emit(cmd_buffer->cs, gsvs_itemsize);
846
847 radeon_set_context_reg(cmd_buffer->cs, R_028AB0_VGT_GSVS_RING_ITEMSIZE, gsvs_itemsize);
848
849 radeon_set_context_reg(cmd_buffer->cs, R_028B38_VGT_GS_MAX_VERT_OUT, gs->info.gs.vertices_out);
850
851 uint32_t gs_vert_itemsize = gs->info.gs.gsvs_vertex_size;
852 radeon_set_context_reg_seq(cmd_buffer->cs, R_028B5C_VGT_GS_VERT_ITEMSIZE, 4);
853 radeon_emit(cmd_buffer->cs, gs_vert_itemsize >> 2);
854 radeon_emit(cmd_buffer->cs, 0);
855 radeon_emit(cmd_buffer->cs, 0);
856 radeon_emit(cmd_buffer->cs, 0);
857
858 uint32_t gs_num_invocations = gs->info.gs.invocations;
859 radeon_set_context_reg(cmd_buffer->cs, R_028B90_VGT_GS_INSTANCE_CNT,
860 S_028B90_CNT(MIN2(gs_num_invocations, 127)) |
861 S_028B90_ENABLE(gs_num_invocations > 0));
862
863 va = radv_buffer_get_va(gs->bo) + gs->bo_offset;
864 ws->cs_add_buffer(cmd_buffer->cs, gs->bo, 8);
865 radv_emit_prefetch(cmd_buffer, va, gs->code_size);
866
867 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
868 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B210_SPI_SHADER_PGM_LO_ES, 2);
869 radeon_emit(cmd_buffer->cs, va >> 8);
870 radeon_emit(cmd_buffer->cs, va >> 40);
871
872 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B228_SPI_SHADER_PGM_RSRC1_GS, 2);
873 radeon_emit(cmd_buffer->cs, gs->rsrc1);
874 radeon_emit(cmd_buffer->cs, gs->rsrc2 |
875 S_00B22C_LDS_SIZE(pipeline->graphics.gs.lds_size));
876
877 radeon_set_context_reg(cmd_buffer->cs, R_028A44_VGT_GS_ONCHIP_CNTL, pipeline->graphics.gs.vgt_gs_onchip_cntl);
878 radeon_set_context_reg(cmd_buffer->cs, R_028A94_VGT_GS_MAX_PRIMS_PER_SUBGROUP, pipeline->graphics.gs.vgt_gs_max_prims_per_subgroup);
879 radeon_set_context_reg(cmd_buffer->cs, R_028AAC_VGT_ESGS_RING_ITEMSIZE, pipeline->graphics.gs.vgt_esgs_ring_itemsize);
880 } else {
881 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B220_SPI_SHADER_PGM_LO_GS, 4);
882 radeon_emit(cmd_buffer->cs, va >> 8);
883 radeon_emit(cmd_buffer->cs, va >> 40);
884 radeon_emit(cmd_buffer->cs, gs->rsrc1);
885 radeon_emit(cmd_buffer->cs, gs->rsrc2);
886 }
887
888 radv_emit_hw_vs(cmd_buffer, pipeline, pipeline->gs_copy_shader, &pipeline->gs_copy_shader->info.vs.outinfo);
889
890 struct ac_userdata_info *loc = radv_lookup_user_sgpr(cmd_buffer->state.pipeline, MESA_SHADER_GEOMETRY,
891 AC_UD_GS_VS_RING_STRIDE_ENTRIES);
892 if (loc->sgpr_idx != -1) {
893 uint32_t stride = gs->info.gs.max_gsvs_emit_size;
894 uint32_t num_entries = 64;
895 bool is_vi = cmd_buffer->device->physical_device->rad_info.chip_class >= VI;
896
897 if (is_vi)
898 num_entries *= stride;
899
900 stride = S_008F04_STRIDE(stride);
901 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B230_SPI_SHADER_USER_DATA_GS_0 + loc->sgpr_idx * 4, 2);
902 radeon_emit(cmd_buffer->cs, stride);
903 radeon_emit(cmd_buffer->cs, num_entries);
904 }
905 }
906
907 static void
908 radv_emit_fragment_shader(struct radv_cmd_buffer *cmd_buffer,
909 struct radv_pipeline *pipeline)
910 {
911 struct radeon_winsys *ws = cmd_buffer->device->ws;
912 struct radv_shader_variant *ps;
913 uint64_t va;
914 unsigned spi_baryc_cntl = S_0286E0_FRONT_FACE_ALL_BITS(1);
915 struct radv_blend_state *blend = &pipeline->graphics.blend;
916 assert (pipeline->shaders[MESA_SHADER_FRAGMENT]);
917
918 ps = pipeline->shaders[MESA_SHADER_FRAGMENT];
919 va = radv_buffer_get_va(ps->bo) + ps->bo_offset;
920 ws->cs_add_buffer(cmd_buffer->cs, ps->bo, 8);
921 radv_emit_prefetch(cmd_buffer, va, ps->code_size);
922
923 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B020_SPI_SHADER_PGM_LO_PS, 4);
924 radeon_emit(cmd_buffer->cs, va >> 8);
925 radeon_emit(cmd_buffer->cs, va >> 40);
926 radeon_emit(cmd_buffer->cs, ps->rsrc1);
927 radeon_emit(cmd_buffer->cs, ps->rsrc2);
928
929 radeon_set_context_reg(cmd_buffer->cs, R_02880C_DB_SHADER_CONTROL,
930 pipeline->graphics.db_shader_control);
931
932 radeon_set_context_reg(cmd_buffer->cs, R_0286CC_SPI_PS_INPUT_ENA,
933 ps->config.spi_ps_input_ena);
934
935 radeon_set_context_reg(cmd_buffer->cs, R_0286D0_SPI_PS_INPUT_ADDR,
936 ps->config.spi_ps_input_addr);
937
938 if (ps->info.info.ps.force_persample)
939 spi_baryc_cntl |= S_0286E0_POS_FLOAT_LOCATION(2);
940
941 radeon_set_context_reg(cmd_buffer->cs, R_0286D8_SPI_PS_IN_CONTROL,
942 S_0286D8_NUM_INTERP(ps->info.fs.num_interp));
943
944 radeon_set_context_reg(cmd_buffer->cs, R_0286E0_SPI_BARYC_CNTL, spi_baryc_cntl);
945
946 radeon_set_context_reg(cmd_buffer->cs, R_028710_SPI_SHADER_Z_FORMAT,
947 pipeline->graphics.shader_z_format);
948
949 radeon_set_context_reg(cmd_buffer->cs, R_028714_SPI_SHADER_COL_FORMAT, blend->spi_shader_col_format);
950
951 radeon_set_context_reg(cmd_buffer->cs, R_028238_CB_TARGET_MASK, blend->cb_target_mask);
952 radeon_set_context_reg(cmd_buffer->cs, R_02823C_CB_SHADER_MASK, blend->cb_shader_mask);
953
954 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
955 /* optimise this? */
956 radeon_emit(cmd_buffer->cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
957 radeon_emit(cmd_buffer->cs, EVENT_TYPE(V_028A90_FLUSH_DFSM) | EVENT_INDEX(0));
958 }
959
960 if (pipeline->graphics.ps_input_cntl_num) {
961 radeon_set_context_reg_seq(cmd_buffer->cs, R_028644_SPI_PS_INPUT_CNTL_0, pipeline->graphics.ps_input_cntl_num);
962 for (unsigned i = 0; i < pipeline->graphics.ps_input_cntl_num; i++) {
963 radeon_emit(cmd_buffer->cs, pipeline->graphics.ps_input_cntl[i]);
964 }
965 }
966 }
967
968 static void
969 radv_emit_vgt_vertex_reuse(struct radv_cmd_buffer *cmd_buffer,
970 struct radv_pipeline *pipeline)
971 {
972 struct radeon_winsys_cs *cs = cmd_buffer->cs;
973
974 if (cmd_buffer->device->physical_device->rad_info.family < CHIP_POLARIS10)
975 return;
976
977 radeon_set_context_reg(cs, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL,
978 pipeline->graphics.vtx_reuse_depth);
979 }
980
981 static void
982 radv_emit_graphics_pipeline(struct radv_cmd_buffer *cmd_buffer)
983 {
984 struct radv_pipeline *pipeline = cmd_buffer->state.pipeline;
985
986 if (!pipeline || cmd_buffer->state.emitted_pipeline == pipeline)
987 return;
988
989 radv_emit_graphics_depth_stencil_state(cmd_buffer, pipeline);
990 radv_emit_graphics_blend_state(cmd_buffer, pipeline);
991 radv_emit_graphics_raster_state(cmd_buffer, pipeline);
992 radv_update_multisample_state(cmd_buffer, pipeline);
993 radv_emit_vertex_shader(cmd_buffer, pipeline);
994 radv_emit_tess_shaders(cmd_buffer, pipeline);
995 radv_emit_geometry_shader(cmd_buffer, pipeline);
996 radv_emit_fragment_shader(cmd_buffer, pipeline);
997 radv_emit_vgt_vertex_reuse(cmd_buffer, pipeline);
998
999 cmd_buffer->scratch_size_needed =
1000 MAX2(cmd_buffer->scratch_size_needed,
1001 pipeline->max_waves * pipeline->scratch_bytes_per_wave);
1002
1003 radeon_set_context_reg(cmd_buffer->cs, R_0286E8_SPI_TMPRING_SIZE,
1004 S_0286E8_WAVES(pipeline->max_waves) |
1005 S_0286E8_WAVESIZE(pipeline->scratch_bytes_per_wave >> 10));
1006
1007 if (!cmd_buffer->state.emitted_pipeline ||
1008 cmd_buffer->state.emitted_pipeline->graphics.can_use_guardband !=
1009 pipeline->graphics.can_use_guardband)
1010 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_SCISSOR;
1011
1012 radeon_set_context_reg(cmd_buffer->cs, R_028B54_VGT_SHADER_STAGES_EN, pipeline->graphics.vgt_shader_stages_en);
1013
1014 if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) {
1015 radeon_set_uconfig_reg_idx(cmd_buffer->cs, R_030908_VGT_PRIMITIVE_TYPE, 1, pipeline->graphics.prim);
1016 } else {
1017 radeon_set_config_reg(cmd_buffer->cs, R_008958_VGT_PRIMITIVE_TYPE, pipeline->graphics.prim);
1018 }
1019 radeon_set_context_reg(cmd_buffer->cs, R_028A6C_VGT_GS_OUT_PRIM_TYPE, pipeline->graphics.gs_out);
1020
1021 radv_save_pipeline(cmd_buffer, pipeline, RING_GFX);
1022
1023 cmd_buffer->state.emitted_pipeline = pipeline;
1024
1025 cmd_buffer->state.dirty &= ~RADV_CMD_DIRTY_PIPELINE;
1026 }
1027
1028 static void
1029 radv_emit_viewport(struct radv_cmd_buffer *cmd_buffer)
1030 {
1031 si_write_viewport(cmd_buffer->cs, 0, cmd_buffer->state.dynamic.viewport.count,
1032 cmd_buffer->state.dynamic.viewport.viewports);
1033 }
1034
1035 static void
1036 radv_emit_scissor(struct radv_cmd_buffer *cmd_buffer)
1037 {
1038 uint32_t count = cmd_buffer->state.dynamic.scissor.count;
1039
1040 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
1041 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_PS_PARTIAL_FLUSH;
1042 si_emit_cache_flush(cmd_buffer);
1043 }
1044 si_write_scissors(cmd_buffer->cs, 0, count,
1045 cmd_buffer->state.dynamic.scissor.scissors,
1046 cmd_buffer->state.dynamic.viewport.viewports,
1047 cmd_buffer->state.emitted_pipeline->graphics.can_use_guardband);
1048 radeon_set_context_reg(cmd_buffer->cs, R_028A48_PA_SC_MODE_CNTL_0,
1049 cmd_buffer->state.pipeline->graphics.ms.pa_sc_mode_cntl_0 | S_028A48_VPORT_SCISSOR_ENABLE(count ? 1 : 0));
1050 }
1051
1052 static void
1053 radv_emit_line_width(struct radv_cmd_buffer *cmd_buffer)
1054 {
1055 unsigned width = cmd_buffer->state.dynamic.line_width * 8;
1056
1057 radeon_set_context_reg(cmd_buffer->cs, R_028A08_PA_SU_LINE_CNTL,
1058 S_028A08_WIDTH(CLAMP(width, 0, 0xFFF)));
1059 }
1060
1061 static void
1062 radv_emit_blend_constants(struct radv_cmd_buffer *cmd_buffer)
1063 {
1064 struct radv_dynamic_state *d = &cmd_buffer->state.dynamic;
1065
1066 radeon_set_context_reg_seq(cmd_buffer->cs, R_028414_CB_BLEND_RED, 4);
1067 radeon_emit_array(cmd_buffer->cs, (uint32_t *)d->blend_constants, 4);
1068 }
1069
1070 static void
1071 radv_emit_stencil(struct radv_cmd_buffer *cmd_buffer)
1072 {
1073 struct radv_dynamic_state *d = &cmd_buffer->state.dynamic;
1074
1075 radeon_set_context_reg_seq(cmd_buffer->cs,
1076 R_028430_DB_STENCILREFMASK, 2);
1077 radeon_emit(cmd_buffer->cs,
1078 S_028430_STENCILTESTVAL(d->stencil_reference.front) |
1079 S_028430_STENCILMASK(d->stencil_compare_mask.front) |
1080 S_028430_STENCILWRITEMASK(d->stencil_write_mask.front) |
1081 S_028430_STENCILOPVAL(1));
1082 radeon_emit(cmd_buffer->cs,
1083 S_028434_STENCILTESTVAL_BF(d->stencil_reference.back) |
1084 S_028434_STENCILMASK_BF(d->stencil_compare_mask.back) |
1085 S_028434_STENCILWRITEMASK_BF(d->stencil_write_mask.back) |
1086 S_028434_STENCILOPVAL_BF(1));
1087 }
1088
1089 static void
1090 radv_emit_depth_bounds(struct radv_cmd_buffer *cmd_buffer)
1091 {
1092 struct radv_dynamic_state *d = &cmd_buffer->state.dynamic;
1093
1094 radeon_set_context_reg(cmd_buffer->cs, R_028020_DB_DEPTH_BOUNDS_MIN,
1095 fui(d->depth_bounds.min));
1096 radeon_set_context_reg(cmd_buffer->cs, R_028024_DB_DEPTH_BOUNDS_MAX,
1097 fui(d->depth_bounds.max));
1098 }
1099
1100 static void
1101 radv_emit_depth_biais(struct radv_cmd_buffer *cmd_buffer)
1102 {
1103 struct radv_raster_state *raster = &cmd_buffer->state.pipeline->graphics.raster;
1104 struct radv_dynamic_state *d = &cmd_buffer->state.dynamic;
1105 unsigned slope = fui(d->depth_bias.slope * 16.0f);
1106 unsigned bias = fui(d->depth_bias.bias * cmd_buffer->state.offset_scale);
1107
1108 if (G_028814_POLY_OFFSET_FRONT_ENABLE(raster->pa_su_sc_mode_cntl)) {
1109 radeon_set_context_reg_seq(cmd_buffer->cs,
1110 R_028B7C_PA_SU_POLY_OFFSET_CLAMP, 5);
1111 radeon_emit(cmd_buffer->cs, fui(d->depth_bias.clamp)); /* CLAMP */
1112 radeon_emit(cmd_buffer->cs, slope); /* FRONT SCALE */
1113 radeon_emit(cmd_buffer->cs, bias); /* FRONT OFFSET */
1114 radeon_emit(cmd_buffer->cs, slope); /* BACK SCALE */
1115 radeon_emit(cmd_buffer->cs, bias); /* BACK OFFSET */
1116 }
1117 }
1118
1119 static void
1120 radv_emit_fb_color_state(struct radv_cmd_buffer *cmd_buffer,
1121 int index,
1122 struct radv_color_buffer_info *cb)
1123 {
1124 bool is_vi = cmd_buffer->device->physical_device->rad_info.chip_class >= VI;
1125
1126 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
1127 radeon_set_context_reg_seq(cmd_buffer->cs, R_028C60_CB_COLOR0_BASE + index * 0x3c, 11);
1128 radeon_emit(cmd_buffer->cs, cb->cb_color_base);
1129 radeon_emit(cmd_buffer->cs, cb->cb_color_base >> 32);
1130 radeon_emit(cmd_buffer->cs, cb->cb_color_attrib2);
1131 radeon_emit(cmd_buffer->cs, cb->cb_color_view);
1132 radeon_emit(cmd_buffer->cs, cb->cb_color_info);
1133 radeon_emit(cmd_buffer->cs, cb->cb_color_attrib);
1134 radeon_emit(cmd_buffer->cs, cb->cb_dcc_control);
1135 radeon_emit(cmd_buffer->cs, cb->cb_color_cmask);
1136 radeon_emit(cmd_buffer->cs, cb->cb_color_cmask >> 32);
1137 radeon_emit(cmd_buffer->cs, cb->cb_color_fmask);
1138 radeon_emit(cmd_buffer->cs, cb->cb_color_fmask >> 32);
1139
1140 radeon_set_context_reg_seq(cmd_buffer->cs, R_028C94_CB_COLOR0_DCC_BASE + index * 0x3c, 2);
1141 radeon_emit(cmd_buffer->cs, cb->cb_dcc_base);
1142 radeon_emit(cmd_buffer->cs, cb->cb_dcc_base >> 32);
1143
1144 radeon_set_context_reg(cmd_buffer->cs, R_0287A0_CB_MRT0_EPITCH + index * 4,
1145 cb->gfx9_epitch);
1146 } else {
1147 radeon_set_context_reg_seq(cmd_buffer->cs, R_028C60_CB_COLOR0_BASE + index * 0x3c, 11);
1148 radeon_emit(cmd_buffer->cs, cb->cb_color_base);
1149 radeon_emit(cmd_buffer->cs, cb->cb_color_pitch);
1150 radeon_emit(cmd_buffer->cs, cb->cb_color_slice);
1151 radeon_emit(cmd_buffer->cs, cb->cb_color_view);
1152 radeon_emit(cmd_buffer->cs, cb->cb_color_info);
1153 radeon_emit(cmd_buffer->cs, cb->cb_color_attrib);
1154 radeon_emit(cmd_buffer->cs, cb->cb_dcc_control);
1155 radeon_emit(cmd_buffer->cs, cb->cb_color_cmask);
1156 radeon_emit(cmd_buffer->cs, cb->cb_color_cmask_slice);
1157 radeon_emit(cmd_buffer->cs, cb->cb_color_fmask);
1158 radeon_emit(cmd_buffer->cs, cb->cb_color_fmask_slice);
1159
1160 if (is_vi) { /* DCC BASE */
1161 radeon_set_context_reg(cmd_buffer->cs, R_028C94_CB_COLOR0_DCC_BASE + index * 0x3c, cb->cb_dcc_base);
1162 }
1163 }
1164 }
1165
1166 static void
1167 radv_emit_fb_ds_state(struct radv_cmd_buffer *cmd_buffer,
1168 struct radv_ds_buffer_info *ds,
1169 struct radv_image *image,
1170 VkImageLayout layout)
1171 {
1172 uint32_t db_z_info = ds->db_z_info;
1173 uint32_t db_stencil_info = ds->db_stencil_info;
1174
1175 if (!radv_layout_has_htile(image, layout,
1176 radv_image_queue_family_mask(image,
1177 cmd_buffer->queue_family_index,
1178 cmd_buffer->queue_family_index))) {
1179 db_z_info &= C_028040_TILE_SURFACE_ENABLE;
1180 db_stencil_info |= S_028044_TILE_STENCIL_DISABLE(1);
1181 }
1182
1183 radeon_set_context_reg(cmd_buffer->cs, R_028008_DB_DEPTH_VIEW, ds->db_depth_view);
1184 radeon_set_context_reg(cmd_buffer->cs, R_028ABC_DB_HTILE_SURFACE, ds->db_htile_surface);
1185
1186
1187 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
1188 radeon_set_context_reg_seq(cmd_buffer->cs, R_028014_DB_HTILE_DATA_BASE, 3);
1189 radeon_emit(cmd_buffer->cs, ds->db_htile_data_base);
1190 radeon_emit(cmd_buffer->cs, ds->db_htile_data_base >> 32);
1191 radeon_emit(cmd_buffer->cs, ds->db_depth_size);
1192
1193 radeon_set_context_reg_seq(cmd_buffer->cs, R_028038_DB_Z_INFO, 10);
1194 radeon_emit(cmd_buffer->cs, db_z_info); /* DB_Z_INFO */
1195 radeon_emit(cmd_buffer->cs, db_stencil_info); /* DB_STENCIL_INFO */
1196 radeon_emit(cmd_buffer->cs, ds->db_z_read_base); /* DB_Z_READ_BASE */
1197 radeon_emit(cmd_buffer->cs, ds->db_z_read_base >> 32); /* DB_Z_READ_BASE_HI */
1198 radeon_emit(cmd_buffer->cs, ds->db_stencil_read_base); /* DB_STENCIL_READ_BASE */
1199 radeon_emit(cmd_buffer->cs, ds->db_stencil_read_base >> 32); /* DB_STENCIL_READ_BASE_HI */
1200 radeon_emit(cmd_buffer->cs, ds->db_z_write_base); /* DB_Z_WRITE_BASE */
1201 radeon_emit(cmd_buffer->cs, ds->db_z_write_base >> 32); /* DB_Z_WRITE_BASE_HI */
1202 radeon_emit(cmd_buffer->cs, ds->db_stencil_write_base); /* DB_STENCIL_WRITE_BASE */
1203 radeon_emit(cmd_buffer->cs, ds->db_stencil_write_base >> 32); /* DB_STENCIL_WRITE_BASE_HI */
1204
1205 radeon_set_context_reg_seq(cmd_buffer->cs, R_028068_DB_Z_INFO2, 2);
1206 radeon_emit(cmd_buffer->cs, ds->db_z_info2);
1207 radeon_emit(cmd_buffer->cs, ds->db_stencil_info2);
1208 } else {
1209 radeon_set_context_reg(cmd_buffer->cs, R_028014_DB_HTILE_DATA_BASE, ds->db_htile_data_base);
1210
1211 radeon_set_context_reg_seq(cmd_buffer->cs, R_02803C_DB_DEPTH_INFO, 9);
1212 radeon_emit(cmd_buffer->cs, ds->db_depth_info); /* R_02803C_DB_DEPTH_INFO */
1213 radeon_emit(cmd_buffer->cs, db_z_info); /* R_028040_DB_Z_INFO */
1214 radeon_emit(cmd_buffer->cs, db_stencil_info); /* R_028044_DB_STENCIL_INFO */
1215 radeon_emit(cmd_buffer->cs, ds->db_z_read_base); /* R_028048_DB_Z_READ_BASE */
1216 radeon_emit(cmd_buffer->cs, ds->db_stencil_read_base); /* R_02804C_DB_STENCIL_READ_BASE */
1217 radeon_emit(cmd_buffer->cs, ds->db_z_write_base); /* R_028050_DB_Z_WRITE_BASE */
1218 radeon_emit(cmd_buffer->cs, ds->db_stencil_write_base); /* R_028054_DB_STENCIL_WRITE_BASE */
1219 radeon_emit(cmd_buffer->cs, ds->db_depth_size); /* R_028058_DB_DEPTH_SIZE */
1220 radeon_emit(cmd_buffer->cs, ds->db_depth_slice); /* R_02805C_DB_DEPTH_SLICE */
1221
1222 }
1223
1224 radeon_set_context_reg(cmd_buffer->cs, R_028B78_PA_SU_POLY_OFFSET_DB_FMT_CNTL,
1225 ds->pa_su_poly_offset_db_fmt_cntl);
1226 }
1227
1228 void
1229 radv_set_depth_clear_regs(struct radv_cmd_buffer *cmd_buffer,
1230 struct radv_image *image,
1231 VkClearDepthStencilValue ds_clear_value,
1232 VkImageAspectFlags aspects)
1233 {
1234 uint64_t va = radv_buffer_get_va(image->bo);
1235 va += image->offset + image->clear_value_offset;
1236 unsigned reg_offset = 0, reg_count = 0;
1237
1238 if (!image->surface.htile_size || !aspects)
1239 return;
1240
1241 if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT) {
1242 ++reg_count;
1243 } else {
1244 ++reg_offset;
1245 va += 4;
1246 }
1247 if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT)
1248 ++reg_count;
1249
1250 cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, image->bo, 8);
1251
1252 radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 2 + reg_count, 0));
1253 radeon_emit(cmd_buffer->cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
1254 S_370_WR_CONFIRM(1) |
1255 S_370_ENGINE_SEL(V_370_PFP));
1256 radeon_emit(cmd_buffer->cs, va);
1257 radeon_emit(cmd_buffer->cs, va >> 32);
1258 if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT)
1259 radeon_emit(cmd_buffer->cs, ds_clear_value.stencil);
1260 if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT)
1261 radeon_emit(cmd_buffer->cs, fui(ds_clear_value.depth));
1262
1263 radeon_set_context_reg_seq(cmd_buffer->cs, R_028028_DB_STENCIL_CLEAR + 4 * reg_offset, reg_count);
1264 if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT)
1265 radeon_emit(cmd_buffer->cs, ds_clear_value.stencil); /* R_028028_DB_STENCIL_CLEAR */
1266 if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT)
1267 radeon_emit(cmd_buffer->cs, fui(ds_clear_value.depth)); /* R_02802C_DB_DEPTH_CLEAR */
1268 }
1269
1270 static void
1271 radv_load_depth_clear_regs(struct radv_cmd_buffer *cmd_buffer,
1272 struct radv_image *image)
1273 {
1274 uint64_t va = radv_buffer_get_va(image->bo);
1275 va += image->offset + image->clear_value_offset;
1276
1277 if (!image->surface.htile_size)
1278 return;
1279
1280 cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, image->bo, 8);
1281
1282 radeon_emit(cmd_buffer->cs, PKT3(PKT3_COPY_DATA, 4, 0));
1283 radeon_emit(cmd_buffer->cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
1284 COPY_DATA_DST_SEL(COPY_DATA_REG) |
1285 COPY_DATA_COUNT_SEL);
1286 radeon_emit(cmd_buffer->cs, va);
1287 radeon_emit(cmd_buffer->cs, va >> 32);
1288 radeon_emit(cmd_buffer->cs, R_028028_DB_STENCIL_CLEAR >> 2);
1289 radeon_emit(cmd_buffer->cs, 0);
1290
1291 radeon_emit(cmd_buffer->cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
1292 radeon_emit(cmd_buffer->cs, 0);
1293 }
1294
1295 /*
1296 *with DCC some colors don't require CMASK elimiation before being
1297 * used as a texture. This sets a predicate value to determine if the
1298 * cmask eliminate is required.
1299 */
1300 void
1301 radv_set_dcc_need_cmask_elim_pred(struct radv_cmd_buffer *cmd_buffer,
1302 struct radv_image *image,
1303 bool value)
1304 {
1305 uint64_t pred_val = value;
1306 uint64_t va = radv_buffer_get_va(image->bo);
1307 va += image->offset + image->dcc_pred_offset;
1308
1309 if (!image->surface.dcc_size)
1310 return;
1311
1312 cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, image->bo, 8);
1313
1314 radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 4, 0));
1315 radeon_emit(cmd_buffer->cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
1316 S_370_WR_CONFIRM(1) |
1317 S_370_ENGINE_SEL(V_370_PFP));
1318 radeon_emit(cmd_buffer->cs, va);
1319 radeon_emit(cmd_buffer->cs, va >> 32);
1320 radeon_emit(cmd_buffer->cs, pred_val);
1321 radeon_emit(cmd_buffer->cs, pred_val >> 32);
1322 }
1323
1324 void
1325 radv_set_color_clear_regs(struct radv_cmd_buffer *cmd_buffer,
1326 struct radv_image *image,
1327 int idx,
1328 uint32_t color_values[2])
1329 {
1330 uint64_t va = radv_buffer_get_va(image->bo);
1331 va += image->offset + image->clear_value_offset;
1332
1333 if (!image->cmask.size && !image->surface.dcc_size)
1334 return;
1335
1336 cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, image->bo, 8);
1337
1338 radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 4, 0));
1339 radeon_emit(cmd_buffer->cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
1340 S_370_WR_CONFIRM(1) |
1341 S_370_ENGINE_SEL(V_370_PFP));
1342 radeon_emit(cmd_buffer->cs, va);
1343 radeon_emit(cmd_buffer->cs, va >> 32);
1344 radeon_emit(cmd_buffer->cs, color_values[0]);
1345 radeon_emit(cmd_buffer->cs, color_values[1]);
1346
1347 radeon_set_context_reg_seq(cmd_buffer->cs, R_028C8C_CB_COLOR0_CLEAR_WORD0 + idx * 0x3c, 2);
1348 radeon_emit(cmd_buffer->cs, color_values[0]);
1349 radeon_emit(cmd_buffer->cs, color_values[1]);
1350 }
1351
1352 static void
1353 radv_load_color_clear_regs(struct radv_cmd_buffer *cmd_buffer,
1354 struct radv_image *image,
1355 int idx)
1356 {
1357 uint64_t va = radv_buffer_get_va(image->bo);
1358 va += image->offset + image->clear_value_offset;
1359
1360 if (!image->cmask.size && !image->surface.dcc_size)
1361 return;
1362
1363 uint32_t reg = R_028C8C_CB_COLOR0_CLEAR_WORD0 + idx * 0x3c;
1364 cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, image->bo, 8);
1365
1366 radeon_emit(cmd_buffer->cs, PKT3(PKT3_COPY_DATA, 4, cmd_buffer->state.predicating));
1367 radeon_emit(cmd_buffer->cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
1368 COPY_DATA_DST_SEL(COPY_DATA_REG) |
1369 COPY_DATA_COUNT_SEL);
1370 radeon_emit(cmd_buffer->cs, va);
1371 radeon_emit(cmd_buffer->cs, va >> 32);
1372 radeon_emit(cmd_buffer->cs, reg >> 2);
1373 radeon_emit(cmd_buffer->cs, 0);
1374
1375 radeon_emit(cmd_buffer->cs, PKT3(PKT3_PFP_SYNC_ME, 0, cmd_buffer->state.predicating));
1376 radeon_emit(cmd_buffer->cs, 0);
1377 }
1378
1379 void
1380 radv_emit_framebuffer_state(struct radv_cmd_buffer *cmd_buffer)
1381 {
1382 int i;
1383 struct radv_framebuffer *framebuffer = cmd_buffer->state.framebuffer;
1384 const struct radv_subpass *subpass = cmd_buffer->state.subpass;
1385
1386 /* this may happen for inherited secondary recording */
1387 if (!framebuffer)
1388 return;
1389
1390 for (i = 0; i < 8; ++i) {
1391 if (i >= subpass->color_count || subpass->color_attachments[i].attachment == VK_ATTACHMENT_UNUSED) {
1392 radeon_set_context_reg(cmd_buffer->cs, R_028C70_CB_COLOR0_INFO + i * 0x3C,
1393 S_028C70_FORMAT(V_028C70_COLOR_INVALID));
1394 continue;
1395 }
1396
1397 int idx = subpass->color_attachments[i].attachment;
1398 struct radv_attachment_info *att = &framebuffer->attachments[idx];
1399
1400 cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, att->attachment->bo, 8);
1401
1402 assert(att->attachment->aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT);
1403 radv_emit_fb_color_state(cmd_buffer, i, &att->cb);
1404
1405 radv_load_color_clear_regs(cmd_buffer, att->attachment->image, i);
1406 }
1407
1408 if(subpass->depth_stencil_attachment.attachment != VK_ATTACHMENT_UNUSED) {
1409 int idx = subpass->depth_stencil_attachment.attachment;
1410 VkImageLayout layout = subpass->depth_stencil_attachment.layout;
1411 struct radv_attachment_info *att = &framebuffer->attachments[idx];
1412 struct radv_image *image = att->attachment->image;
1413 cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, att->attachment->bo, 8);
1414 MAYBE_UNUSED uint32_t queue_mask = radv_image_queue_family_mask(image,
1415 cmd_buffer->queue_family_index,
1416 cmd_buffer->queue_family_index);
1417 /* We currently don't support writing decompressed HTILE */
1418 assert(radv_layout_has_htile(image, layout, queue_mask) ==
1419 radv_layout_is_htile_compressed(image, layout, queue_mask));
1420
1421 radv_emit_fb_ds_state(cmd_buffer, &att->ds, image, layout);
1422
1423 if (att->ds.offset_scale != cmd_buffer->state.offset_scale) {
1424 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS;
1425 cmd_buffer->state.offset_scale = att->ds.offset_scale;
1426 }
1427 radv_load_depth_clear_regs(cmd_buffer, image);
1428 } else {
1429 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9)
1430 radeon_set_context_reg_seq(cmd_buffer->cs, R_028038_DB_Z_INFO, 2);
1431 else
1432 radeon_set_context_reg_seq(cmd_buffer->cs, R_028040_DB_Z_INFO, 2);
1433
1434 radeon_emit(cmd_buffer->cs, S_028040_FORMAT(V_028040_Z_INVALID)); /* DB_Z_INFO */
1435 radeon_emit(cmd_buffer->cs, S_028044_FORMAT(V_028044_STENCIL_INVALID)); /* DB_STENCIL_INFO */
1436 }
1437 radeon_set_context_reg(cmd_buffer->cs, R_028208_PA_SC_WINDOW_SCISSOR_BR,
1438 S_028208_BR_X(framebuffer->width) |
1439 S_028208_BR_Y(framebuffer->height));
1440
1441 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
1442 radeon_emit(cmd_buffer->cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1443 radeon_emit(cmd_buffer->cs, EVENT_TYPE(V_028A90_BREAK_BATCH) | EVENT_INDEX(0));
1444 }
1445
1446 cmd_buffer->state.dirty &= ~RADV_CMD_DIRTY_FRAMEBUFFER;
1447 }
1448
1449 static void
1450 radv_emit_index_buffer(struct radv_cmd_buffer *cmd_buffer)
1451 {
1452 struct radeon_winsys_cs *cs = cmd_buffer->cs;
1453
1454 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
1455 radeon_set_uconfig_reg_idx(cs, R_03090C_VGT_INDEX_TYPE,
1456 2, cmd_buffer->state.index_type);
1457 } else {
1458 radeon_emit(cs, PKT3(PKT3_INDEX_TYPE, 0, 0));
1459 radeon_emit(cs, cmd_buffer->state.index_type);
1460 }
1461
1462 radeon_emit(cs, PKT3(PKT3_INDEX_BASE, 1, 0));
1463 radeon_emit(cs, cmd_buffer->state.index_va);
1464 radeon_emit(cs, cmd_buffer->state.index_va >> 32);
1465
1466 radeon_emit(cs, PKT3(PKT3_INDEX_BUFFER_SIZE, 0, 0));
1467 radeon_emit(cs, cmd_buffer->state.max_index_count);
1468
1469 cmd_buffer->state.dirty &= ~RADV_CMD_DIRTY_INDEX_BUFFER;
1470 }
1471
1472 void radv_set_db_count_control(struct radv_cmd_buffer *cmd_buffer)
1473 {
1474 uint32_t db_count_control;
1475
1476 if(!cmd_buffer->state.active_occlusion_queries) {
1477 if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) {
1478 db_count_control = 0;
1479 } else {
1480 db_count_control = S_028004_ZPASS_INCREMENT_DISABLE(1);
1481 }
1482 } else {
1483 if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) {
1484 db_count_control = S_028004_PERFECT_ZPASS_COUNTS(1) |
1485 S_028004_SAMPLE_RATE(0) | /* TODO: set this to the number of samples of the current framebuffer */
1486 S_028004_ZPASS_ENABLE(1) |
1487 S_028004_SLICE_EVEN_ENABLE(1) |
1488 S_028004_SLICE_ODD_ENABLE(1);
1489 } else {
1490 db_count_control = S_028004_PERFECT_ZPASS_COUNTS(1) |
1491 S_028004_SAMPLE_RATE(0); /* TODO: set this to the number of samples of the current framebuffer */
1492 }
1493 }
1494
1495 radeon_set_context_reg(cmd_buffer->cs, R_028004_DB_COUNT_CONTROL, db_count_control);
1496 }
1497
1498 static void
1499 radv_cmd_buffer_flush_dynamic_state(struct radv_cmd_buffer *cmd_buffer)
1500 {
1501 if (G_028810_DX_RASTERIZATION_KILL(cmd_buffer->state.pipeline->graphics.raster.pa_cl_clip_cntl))
1502 return;
1503
1504 if (cmd_buffer->state.dirty & (RADV_CMD_DIRTY_DYNAMIC_VIEWPORT))
1505 radv_emit_viewport(cmd_buffer);
1506
1507 if (cmd_buffer->state.dirty & (RADV_CMD_DIRTY_DYNAMIC_SCISSOR | RADV_CMD_DIRTY_DYNAMIC_VIEWPORT))
1508 radv_emit_scissor(cmd_buffer);
1509
1510 if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_DYNAMIC_LINE_WIDTH)
1511 radv_emit_line_width(cmd_buffer);
1512
1513 if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS)
1514 radv_emit_blend_constants(cmd_buffer);
1515
1516 if (cmd_buffer->state.dirty & (RADV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE |
1517 RADV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK |
1518 RADV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK))
1519 radv_emit_stencil(cmd_buffer);
1520
1521 if (cmd_buffer->state.dirty & (RADV_CMD_DIRTY_PIPELINE |
1522 RADV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS))
1523 radv_emit_depth_bounds(cmd_buffer);
1524
1525 if (cmd_buffer->state.dirty & (RADV_CMD_DIRTY_PIPELINE |
1526 RADV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS))
1527 radv_emit_depth_biais(cmd_buffer);
1528
1529 cmd_buffer->state.dirty &= ~RADV_CMD_DIRTY_DYNAMIC_ALL;
1530 }
1531
1532 static void
1533 emit_stage_descriptor_set_userdata(struct radv_cmd_buffer *cmd_buffer,
1534 struct radv_pipeline *pipeline,
1535 int idx,
1536 uint64_t va,
1537 gl_shader_stage stage)
1538 {
1539 struct ac_userdata_info *desc_set_loc = &pipeline->shaders[stage]->info.user_sgprs_locs.descriptor_sets[idx];
1540 uint32_t base_reg = radv_shader_stage_to_user_data_0(stage, cmd_buffer->device->physical_device->rad_info.chip_class, radv_pipeline_has_gs(pipeline), radv_pipeline_has_tess(pipeline));
1541
1542 if (desc_set_loc->sgpr_idx == -1 || desc_set_loc->indirect)
1543 return;
1544
1545 assert(!desc_set_loc->indirect);
1546 assert(desc_set_loc->num_sgprs == 2);
1547 radeon_set_sh_reg_seq(cmd_buffer->cs,
1548 base_reg + desc_set_loc->sgpr_idx * 4, 2);
1549 radeon_emit(cmd_buffer->cs, va);
1550 radeon_emit(cmd_buffer->cs, va >> 32);
1551 }
1552
1553 static void
1554 radv_emit_descriptor_set_userdata(struct radv_cmd_buffer *cmd_buffer,
1555 VkShaderStageFlags stages,
1556 struct radv_descriptor_set *set,
1557 unsigned idx)
1558 {
1559 if (cmd_buffer->state.pipeline) {
1560 radv_foreach_stage(stage, stages) {
1561 if (cmd_buffer->state.pipeline->shaders[stage])
1562 emit_stage_descriptor_set_userdata(cmd_buffer, cmd_buffer->state.pipeline,
1563 idx, set->va,
1564 stage);
1565 }
1566 }
1567
1568 if (cmd_buffer->state.compute_pipeline && (stages & VK_SHADER_STAGE_COMPUTE_BIT))
1569 emit_stage_descriptor_set_userdata(cmd_buffer, cmd_buffer->state.compute_pipeline,
1570 idx, set->va,
1571 MESA_SHADER_COMPUTE);
1572 }
1573
1574 static void
1575 radv_flush_push_descriptors(struct radv_cmd_buffer *cmd_buffer)
1576 {
1577 struct radv_descriptor_set *set = &cmd_buffer->push_descriptors.set;
1578 unsigned bo_offset;
1579
1580 if (!radv_cmd_buffer_upload_data(cmd_buffer, set->size, 32,
1581 set->mapped_ptr,
1582 &bo_offset))
1583 return;
1584
1585 set->va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
1586 set->va += bo_offset;
1587 }
1588
1589 static void
1590 radv_flush_indirect_descriptor_sets(struct radv_cmd_buffer *cmd_buffer)
1591 {
1592 uint32_t size = MAX_SETS * 2 * 4;
1593 uint32_t offset;
1594 void *ptr;
1595
1596 if (!radv_cmd_buffer_upload_alloc(cmd_buffer, size,
1597 256, &offset, &ptr))
1598 return;
1599
1600 for (unsigned i = 0; i < MAX_SETS; i++) {
1601 uint32_t *uptr = ((uint32_t *)ptr) + i * 2;
1602 uint64_t set_va = 0;
1603 struct radv_descriptor_set *set = cmd_buffer->state.descriptors[i];
1604 if (set)
1605 set_va = set->va;
1606 uptr[0] = set_va & 0xffffffff;
1607 uptr[1] = set_va >> 32;
1608 }
1609
1610 uint64_t va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
1611 va += offset;
1612
1613 if (cmd_buffer->state.pipeline) {
1614 if (cmd_buffer->state.pipeline->shaders[MESA_SHADER_VERTEX])
1615 radv_emit_userdata_address(cmd_buffer, cmd_buffer->state.pipeline, MESA_SHADER_VERTEX,
1616 AC_UD_INDIRECT_DESCRIPTOR_SETS, va);
1617
1618 if (cmd_buffer->state.pipeline->shaders[MESA_SHADER_FRAGMENT])
1619 radv_emit_userdata_address(cmd_buffer, cmd_buffer->state.pipeline, MESA_SHADER_FRAGMENT,
1620 AC_UD_INDIRECT_DESCRIPTOR_SETS, va);
1621
1622 if (radv_pipeline_has_gs(cmd_buffer->state.pipeline))
1623 radv_emit_userdata_address(cmd_buffer, cmd_buffer->state.pipeline, MESA_SHADER_GEOMETRY,
1624 AC_UD_INDIRECT_DESCRIPTOR_SETS, va);
1625
1626 if (radv_pipeline_has_tess(cmd_buffer->state.pipeline))
1627 radv_emit_userdata_address(cmd_buffer, cmd_buffer->state.pipeline, MESA_SHADER_TESS_CTRL,
1628 AC_UD_INDIRECT_DESCRIPTOR_SETS, va);
1629
1630 if (radv_pipeline_has_tess(cmd_buffer->state.pipeline))
1631 radv_emit_userdata_address(cmd_buffer, cmd_buffer->state.pipeline, MESA_SHADER_TESS_EVAL,
1632 AC_UD_INDIRECT_DESCRIPTOR_SETS, va);
1633 }
1634
1635 if (cmd_buffer->state.compute_pipeline)
1636 radv_emit_userdata_address(cmd_buffer, cmd_buffer->state.compute_pipeline, MESA_SHADER_COMPUTE,
1637 AC_UD_INDIRECT_DESCRIPTOR_SETS, va);
1638 }
1639
1640 static void
1641 radv_flush_descriptors(struct radv_cmd_buffer *cmd_buffer,
1642 VkShaderStageFlags stages)
1643 {
1644 unsigned i;
1645
1646 if (!cmd_buffer->state.descriptors_dirty)
1647 return;
1648
1649 if (cmd_buffer->state.push_descriptors_dirty)
1650 radv_flush_push_descriptors(cmd_buffer);
1651
1652 if ((cmd_buffer->state.pipeline && cmd_buffer->state.pipeline->need_indirect_descriptor_sets) ||
1653 (cmd_buffer->state.compute_pipeline && cmd_buffer->state.compute_pipeline->need_indirect_descriptor_sets)) {
1654 radv_flush_indirect_descriptor_sets(cmd_buffer);
1655 }
1656
1657 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
1658 cmd_buffer->cs,
1659 MAX_SETS * MESA_SHADER_STAGES * 4);
1660
1661 for_each_bit(i, cmd_buffer->state.descriptors_dirty) {
1662 struct radv_descriptor_set *set = cmd_buffer->state.descriptors[i];
1663 if (!set)
1664 continue;
1665
1666 radv_emit_descriptor_set_userdata(cmd_buffer, stages, set, i);
1667 }
1668 cmd_buffer->state.descriptors_dirty = 0;
1669 cmd_buffer->state.push_descriptors_dirty = false;
1670
1671 radv_save_descriptors(cmd_buffer);
1672
1673 assert(cmd_buffer->cs->cdw <= cdw_max);
1674 }
1675
1676 static void
1677 radv_flush_constants(struct radv_cmd_buffer *cmd_buffer,
1678 struct radv_pipeline *pipeline,
1679 VkShaderStageFlags stages)
1680 {
1681 struct radv_pipeline_layout *layout = pipeline->layout;
1682 unsigned offset;
1683 void *ptr;
1684 uint64_t va;
1685
1686 stages &= cmd_buffer->push_constant_stages;
1687 if (!stages || !layout || (!layout->push_constant_size && !layout->dynamic_offset_count))
1688 return;
1689
1690 if (!radv_cmd_buffer_upload_alloc(cmd_buffer, layout->push_constant_size +
1691 16 * layout->dynamic_offset_count,
1692 256, &offset, &ptr))
1693 return;
1694
1695 memcpy(ptr, cmd_buffer->push_constants, layout->push_constant_size);
1696 memcpy((char*)ptr + layout->push_constant_size, cmd_buffer->dynamic_buffers,
1697 16 * layout->dynamic_offset_count);
1698
1699 va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
1700 va += offset;
1701
1702 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
1703 cmd_buffer->cs, MESA_SHADER_STAGES * 4);
1704
1705 radv_foreach_stage(stage, stages) {
1706 if (pipeline->shaders[stage]) {
1707 radv_emit_userdata_address(cmd_buffer, pipeline, stage,
1708 AC_UD_PUSH_CONSTANTS, va);
1709 }
1710 }
1711
1712 cmd_buffer->push_constant_stages &= ~stages;
1713 assert(cmd_buffer->cs->cdw <= cdw_max);
1714 }
1715
1716 static void radv_emit_primitive_reset_state(struct radv_cmd_buffer *cmd_buffer,
1717 bool indexed_draw)
1718 {
1719 int32_t primitive_reset_en = indexed_draw && cmd_buffer->state.pipeline->graphics.prim_restart_enable;
1720
1721 if (primitive_reset_en != cmd_buffer->state.last_primitive_reset_en) {
1722 cmd_buffer->state.last_primitive_reset_en = primitive_reset_en;
1723 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
1724 radeon_set_uconfig_reg(cmd_buffer->cs, R_03092C_VGT_MULTI_PRIM_IB_RESET_EN,
1725 primitive_reset_en);
1726 } else {
1727 radeon_set_context_reg(cmd_buffer->cs, R_028A94_VGT_MULTI_PRIM_IB_RESET_EN,
1728 primitive_reset_en);
1729 }
1730 }
1731
1732 if (primitive_reset_en) {
1733 uint32_t primitive_reset_index = cmd_buffer->state.index_type ? 0xffffffffu : 0xffffu;
1734
1735 if (primitive_reset_index != cmd_buffer->state.last_primitive_reset_index) {
1736 cmd_buffer->state.last_primitive_reset_index = primitive_reset_index;
1737 radeon_set_context_reg(cmd_buffer->cs, R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX,
1738 primitive_reset_index);
1739 }
1740 }
1741 }
1742
1743 static bool
1744 radv_cmd_buffer_update_vertex_descriptors(struct radv_cmd_buffer *cmd_buffer)
1745 {
1746 struct radv_device *device = cmd_buffer->device;
1747
1748 if ((cmd_buffer->state.pipeline != cmd_buffer->state.emitted_pipeline || cmd_buffer->state.vb_dirty) &&
1749 cmd_buffer->state.pipeline->vertex_elements.count &&
1750 radv_get_vertex_shader(cmd_buffer->state.pipeline)->info.info.vs.has_vertex_buffers) {
1751 struct radv_vertex_elements_info *velems = &cmd_buffer->state.pipeline->vertex_elements;
1752 unsigned vb_offset;
1753 void *vb_ptr;
1754 uint32_t i = 0;
1755 uint32_t count = velems->count;
1756 uint64_t va;
1757
1758 /* allocate some descriptor state for vertex buffers */
1759 if (!radv_cmd_buffer_upload_alloc(cmd_buffer, count * 16, 256,
1760 &vb_offset, &vb_ptr))
1761 return false;
1762
1763 for (i = 0; i < count; i++) {
1764 uint32_t *desc = &((uint32_t *)vb_ptr)[i * 4];
1765 uint32_t offset;
1766 int vb = velems->binding[i];
1767 struct radv_buffer *buffer = cmd_buffer->state.vertex_bindings[vb].buffer;
1768 uint32_t stride = cmd_buffer->state.pipeline->binding_stride[vb];
1769
1770 device->ws->cs_add_buffer(cmd_buffer->cs, buffer->bo, 8);
1771 va = radv_buffer_get_va(buffer->bo);
1772
1773 offset = cmd_buffer->state.vertex_bindings[vb].offset + velems->offset[i];
1774 va += offset + buffer->offset;
1775 desc[0] = va;
1776 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) | S_008F04_STRIDE(stride);
1777 if (cmd_buffer->device->physical_device->rad_info.chip_class <= CIK && stride)
1778 desc[2] = (buffer->size - offset - velems->format_size[i]) / stride + 1;
1779 else
1780 desc[2] = buffer->size - offset;
1781 desc[3] = velems->rsrc_word3[i];
1782 }
1783
1784 va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
1785 va += vb_offset;
1786
1787 radv_emit_userdata_address(cmd_buffer, cmd_buffer->state.pipeline, MESA_SHADER_VERTEX,
1788 AC_UD_VS_VERTEX_BUFFERS, va);
1789 }
1790 cmd_buffer->state.vb_dirty = false;
1791
1792 return true;
1793 }
1794
1795 static void
1796 radv_emit_draw_registers(struct radv_cmd_buffer *cmd_buffer, bool indexed_draw,
1797 bool instanced_draw, bool indirect_draw,
1798 uint32_t draw_vertex_count)
1799 {
1800 struct radeon_info *info = &cmd_buffer->device->physical_device->rad_info;
1801 struct radv_cmd_state *state = &cmd_buffer->state;
1802 struct radeon_winsys_cs *cs = cmd_buffer->cs;
1803 uint32_t ia_multi_vgt_param;
1804
1805 /* Draw state. */
1806 ia_multi_vgt_param =
1807 si_get_ia_multi_vgt_param(cmd_buffer, instanced_draw,
1808 indirect_draw, draw_vertex_count);
1809
1810 if (state->last_ia_multi_vgt_param != ia_multi_vgt_param) {
1811 if (info->chip_class >= GFX9) {
1812 radeon_set_uconfig_reg_idx(cs,
1813 R_030960_IA_MULTI_VGT_PARAM,
1814 4, ia_multi_vgt_param);
1815 } else if (info->chip_class >= CIK) {
1816 radeon_set_context_reg_idx(cs,
1817 R_028AA8_IA_MULTI_VGT_PARAM,
1818 1, ia_multi_vgt_param);
1819 } else {
1820 radeon_set_context_reg(cs, R_028AA8_IA_MULTI_VGT_PARAM,
1821 ia_multi_vgt_param);
1822 }
1823 state->last_ia_multi_vgt_param = ia_multi_vgt_param;
1824 }
1825 }
1826
1827 static void
1828 radv_cmd_buffer_flush_state(struct radv_cmd_buffer *cmd_buffer,
1829 bool indexed_draw, bool instanced_draw,
1830 bool indirect_draw,
1831 uint32_t draw_vertex_count)
1832 {
1833 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
1834 cmd_buffer->cs, 4096);
1835
1836 if (!radv_cmd_buffer_update_vertex_descriptors(cmd_buffer))
1837 return;
1838
1839 if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_PIPELINE)
1840 radv_emit_graphics_pipeline(cmd_buffer);
1841
1842 if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_FRAMEBUFFER)
1843 radv_emit_framebuffer_state(cmd_buffer);
1844
1845 if (indexed_draw) {
1846 if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_INDEX_BUFFER)
1847 radv_emit_index_buffer(cmd_buffer);
1848 } else {
1849 /* On CI and later, non-indexed draws overwrite VGT_INDEX_TYPE,
1850 * so the state must be re-emitted before the next indexed
1851 * draw.
1852 */
1853 if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK)
1854 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_INDEX_BUFFER;
1855 }
1856
1857 radv_emit_draw_registers(cmd_buffer, indexed_draw, instanced_draw,
1858 indirect_draw, draw_vertex_count);
1859
1860 radv_cmd_buffer_flush_dynamic_state(cmd_buffer);
1861
1862 radv_emit_primitive_reset_state(cmd_buffer, indexed_draw);
1863
1864 radv_flush_descriptors(cmd_buffer, VK_SHADER_STAGE_ALL_GRAPHICS);
1865 radv_flush_constants(cmd_buffer, cmd_buffer->state.pipeline,
1866 VK_SHADER_STAGE_ALL_GRAPHICS);
1867
1868 assert(cmd_buffer->cs->cdw <= cdw_max);
1869
1870 si_emit_cache_flush(cmd_buffer);
1871 }
1872
1873 static void radv_stage_flush(struct radv_cmd_buffer *cmd_buffer,
1874 VkPipelineStageFlags src_stage_mask)
1875 {
1876 if (src_stage_mask & (VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT |
1877 VK_PIPELINE_STAGE_TRANSFER_BIT |
1878 VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT |
1879 VK_PIPELINE_STAGE_ALL_COMMANDS_BIT)) {
1880 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_CS_PARTIAL_FLUSH;
1881 }
1882
1883 if (src_stage_mask & (VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
1884 VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT |
1885 VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
1886 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
1887 VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT |
1888 VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT |
1889 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT |
1890 VK_PIPELINE_STAGE_TRANSFER_BIT |
1891 VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT |
1892 VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT |
1893 VK_PIPELINE_STAGE_ALL_COMMANDS_BIT)) {
1894 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_PS_PARTIAL_FLUSH;
1895 } else if (src_stage_mask & (VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT |
1896 VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT |
1897 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT |
1898 VK_PIPELINE_STAGE_VERTEX_SHADER_BIT)) {
1899 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_VS_PARTIAL_FLUSH;
1900 }
1901 }
1902
1903 static enum radv_cmd_flush_bits
1904 radv_src_access_flush(struct radv_cmd_buffer *cmd_buffer,
1905 VkAccessFlags src_flags)
1906 {
1907 enum radv_cmd_flush_bits flush_bits = 0;
1908 uint32_t b;
1909 for_each_bit(b, src_flags) {
1910 switch ((VkAccessFlagBits)(1 << b)) {
1911 case VK_ACCESS_SHADER_WRITE_BIT:
1912 flush_bits |= RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2;
1913 break;
1914 case VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT:
1915 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
1916 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
1917 break;
1918 case VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT:
1919 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB |
1920 RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
1921 break;
1922 case VK_ACCESS_TRANSFER_WRITE_BIT:
1923 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
1924 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META |
1925 RADV_CMD_FLAG_FLUSH_AND_INV_DB |
1926 RADV_CMD_FLAG_FLUSH_AND_INV_DB_META |
1927 RADV_CMD_FLAG_INV_GLOBAL_L2;
1928 break;
1929 default:
1930 break;
1931 }
1932 }
1933 return flush_bits;
1934 }
1935
1936 static enum radv_cmd_flush_bits
1937 radv_dst_access_flush(struct radv_cmd_buffer *cmd_buffer,
1938 VkAccessFlags dst_flags,
1939 struct radv_image *image)
1940 {
1941 enum radv_cmd_flush_bits flush_bits = 0;
1942 uint32_t b;
1943 for_each_bit(b, dst_flags) {
1944 switch ((VkAccessFlagBits)(1 << b)) {
1945 case VK_ACCESS_INDIRECT_COMMAND_READ_BIT:
1946 case VK_ACCESS_INDEX_READ_BIT:
1947 case VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT:
1948 break;
1949 case VK_ACCESS_UNIFORM_READ_BIT:
1950 flush_bits |= RADV_CMD_FLAG_INV_VMEM_L1 | RADV_CMD_FLAG_INV_SMEM_L1;
1951 break;
1952 case VK_ACCESS_SHADER_READ_BIT:
1953 case VK_ACCESS_TRANSFER_READ_BIT:
1954 case VK_ACCESS_INPUT_ATTACHMENT_READ_BIT:
1955 flush_bits |= RADV_CMD_FLAG_INV_VMEM_L1 |
1956 RADV_CMD_FLAG_INV_GLOBAL_L2;
1957 break;
1958 case VK_ACCESS_COLOR_ATTACHMENT_READ_BIT:
1959 /* TODO: change to image && when the image gets passed
1960 * through from the subpass. */
1961 if (!image || (image->usage & VK_IMAGE_USAGE_STORAGE_BIT))
1962 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
1963 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
1964 break;
1965 case VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT:
1966 if (!image || (image->usage & VK_IMAGE_USAGE_STORAGE_BIT))
1967 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB |
1968 RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
1969 break;
1970 default:
1971 break;
1972 }
1973 }
1974 return flush_bits;
1975 }
1976
1977 static void radv_subpass_barrier(struct radv_cmd_buffer *cmd_buffer, const struct radv_subpass_barrier *barrier)
1978 {
1979 cmd_buffer->state.flush_bits |= radv_src_access_flush(cmd_buffer, barrier->src_access_mask);
1980 radv_stage_flush(cmd_buffer, barrier->src_stage_mask);
1981 cmd_buffer->state.flush_bits |= radv_dst_access_flush(cmd_buffer, barrier->dst_access_mask,
1982 NULL);
1983 }
1984
1985 static void radv_handle_subpass_image_transition(struct radv_cmd_buffer *cmd_buffer,
1986 VkAttachmentReference att)
1987 {
1988 unsigned idx = att.attachment;
1989 struct radv_image_view *view = cmd_buffer->state.framebuffer->attachments[idx].attachment;
1990 VkImageSubresourceRange range;
1991 range.aspectMask = 0;
1992 range.baseMipLevel = view->base_mip;
1993 range.levelCount = 1;
1994 range.baseArrayLayer = view->base_layer;
1995 range.layerCount = cmd_buffer->state.framebuffer->layers;
1996
1997 radv_handle_image_transition(cmd_buffer,
1998 view->image,
1999 cmd_buffer->state.attachments[idx].current_layout,
2000 att.layout, 0, 0, &range,
2001 cmd_buffer->state.attachments[idx].pending_clear_aspects);
2002
2003 cmd_buffer->state.attachments[idx].current_layout = att.layout;
2004
2005
2006 }
2007
2008 void
2009 radv_cmd_buffer_set_subpass(struct radv_cmd_buffer *cmd_buffer,
2010 const struct radv_subpass *subpass, bool transitions)
2011 {
2012 if (transitions) {
2013 radv_subpass_barrier(cmd_buffer, &subpass->start_barrier);
2014
2015 for (unsigned i = 0; i < subpass->color_count; ++i) {
2016 if (subpass->color_attachments[i].attachment != VK_ATTACHMENT_UNUSED)
2017 radv_handle_subpass_image_transition(cmd_buffer,
2018 subpass->color_attachments[i]);
2019 }
2020
2021 for (unsigned i = 0; i < subpass->input_count; ++i) {
2022 radv_handle_subpass_image_transition(cmd_buffer,
2023 subpass->input_attachments[i]);
2024 }
2025
2026 if (subpass->depth_stencil_attachment.attachment != VK_ATTACHMENT_UNUSED) {
2027 radv_handle_subpass_image_transition(cmd_buffer,
2028 subpass->depth_stencil_attachment);
2029 }
2030 }
2031
2032 cmd_buffer->state.subpass = subpass;
2033
2034 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_FRAMEBUFFER;
2035 }
2036
2037 static VkResult
2038 radv_cmd_state_setup_attachments(struct radv_cmd_buffer *cmd_buffer,
2039 struct radv_render_pass *pass,
2040 const VkRenderPassBeginInfo *info)
2041 {
2042 struct radv_cmd_state *state = &cmd_buffer->state;
2043
2044 if (pass->attachment_count == 0) {
2045 state->attachments = NULL;
2046 return VK_SUCCESS;
2047 }
2048
2049 state->attachments = vk_alloc(&cmd_buffer->pool->alloc,
2050 pass->attachment_count *
2051 sizeof(state->attachments[0]),
2052 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2053 if (state->attachments == NULL) {
2054 cmd_buffer->record_result = VK_ERROR_OUT_OF_HOST_MEMORY;
2055 return cmd_buffer->record_result;
2056 }
2057
2058 for (uint32_t i = 0; i < pass->attachment_count; ++i) {
2059 struct radv_render_pass_attachment *att = &pass->attachments[i];
2060 VkImageAspectFlags att_aspects = vk_format_aspects(att->format);
2061 VkImageAspectFlags clear_aspects = 0;
2062
2063 if (att_aspects == VK_IMAGE_ASPECT_COLOR_BIT) {
2064 /* color attachment */
2065 if (att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
2066 clear_aspects |= VK_IMAGE_ASPECT_COLOR_BIT;
2067 }
2068 } else {
2069 /* depthstencil attachment */
2070 if ((att_aspects & VK_IMAGE_ASPECT_DEPTH_BIT) &&
2071 att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
2072 clear_aspects |= VK_IMAGE_ASPECT_DEPTH_BIT;
2073 if ((att_aspects & VK_IMAGE_ASPECT_STENCIL_BIT) &&
2074 att->stencil_load_op == VK_ATTACHMENT_LOAD_OP_DONT_CARE)
2075 clear_aspects |= VK_IMAGE_ASPECT_STENCIL_BIT;
2076 }
2077 if ((att_aspects & VK_IMAGE_ASPECT_STENCIL_BIT) &&
2078 att->stencil_load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
2079 clear_aspects |= VK_IMAGE_ASPECT_STENCIL_BIT;
2080 }
2081 }
2082
2083 state->attachments[i].pending_clear_aspects = clear_aspects;
2084 state->attachments[i].cleared_views = 0;
2085 if (clear_aspects && info) {
2086 assert(info->clearValueCount > i);
2087 state->attachments[i].clear_value = info->pClearValues[i];
2088 }
2089
2090 state->attachments[i].current_layout = att->initial_layout;
2091 }
2092
2093 return VK_SUCCESS;
2094 }
2095
2096 VkResult radv_AllocateCommandBuffers(
2097 VkDevice _device,
2098 const VkCommandBufferAllocateInfo *pAllocateInfo,
2099 VkCommandBuffer *pCommandBuffers)
2100 {
2101 RADV_FROM_HANDLE(radv_device, device, _device);
2102 RADV_FROM_HANDLE(radv_cmd_pool, pool, pAllocateInfo->commandPool);
2103
2104 VkResult result = VK_SUCCESS;
2105 uint32_t i;
2106
2107 memset(pCommandBuffers, 0,
2108 sizeof(*pCommandBuffers)*pAllocateInfo->commandBufferCount);
2109
2110 for (i = 0; i < pAllocateInfo->commandBufferCount; i++) {
2111
2112 if (!list_empty(&pool->free_cmd_buffers)) {
2113 struct radv_cmd_buffer *cmd_buffer = list_first_entry(&pool->free_cmd_buffers, struct radv_cmd_buffer, pool_link);
2114
2115 list_del(&cmd_buffer->pool_link);
2116 list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
2117
2118 result = radv_reset_cmd_buffer(cmd_buffer);
2119 cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
2120 cmd_buffer->level = pAllocateInfo->level;
2121
2122 pCommandBuffers[i] = radv_cmd_buffer_to_handle(cmd_buffer);
2123 } else {
2124 result = radv_create_cmd_buffer(device, pool, pAllocateInfo->level,
2125 &pCommandBuffers[i]);
2126 }
2127 if (result != VK_SUCCESS)
2128 break;
2129 }
2130
2131 if (result != VK_SUCCESS)
2132 radv_FreeCommandBuffers(_device, pAllocateInfo->commandPool,
2133 i, pCommandBuffers);
2134
2135 return result;
2136 }
2137
2138 void radv_FreeCommandBuffers(
2139 VkDevice device,
2140 VkCommandPool commandPool,
2141 uint32_t commandBufferCount,
2142 const VkCommandBuffer *pCommandBuffers)
2143 {
2144 for (uint32_t i = 0; i < commandBufferCount; i++) {
2145 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, pCommandBuffers[i]);
2146
2147 if (cmd_buffer) {
2148 if (cmd_buffer->pool) {
2149 list_del(&cmd_buffer->pool_link);
2150 list_addtail(&cmd_buffer->pool_link, &cmd_buffer->pool->free_cmd_buffers);
2151 } else
2152 radv_cmd_buffer_destroy(cmd_buffer);
2153
2154 }
2155 }
2156 }
2157
2158 VkResult radv_ResetCommandBuffer(
2159 VkCommandBuffer commandBuffer,
2160 VkCommandBufferResetFlags flags)
2161 {
2162 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2163 return radv_reset_cmd_buffer(cmd_buffer);
2164 }
2165
2166 static void emit_gfx_buffer_state(struct radv_cmd_buffer *cmd_buffer)
2167 {
2168 struct radv_device *device = cmd_buffer->device;
2169 if (device->gfx_init) {
2170 uint64_t va = radv_buffer_get_va(device->gfx_init);
2171 device->ws->cs_add_buffer(cmd_buffer->cs, device->gfx_init, 8);
2172 radeon_emit(cmd_buffer->cs, PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0));
2173 radeon_emit(cmd_buffer->cs, va);
2174 radeon_emit(cmd_buffer->cs, va >> 32);
2175 radeon_emit(cmd_buffer->cs, device->gfx_init_size_dw & 0xffff);
2176 } else
2177 si_init_config(cmd_buffer);
2178 }
2179
2180 VkResult radv_BeginCommandBuffer(
2181 VkCommandBuffer commandBuffer,
2182 const VkCommandBufferBeginInfo *pBeginInfo)
2183 {
2184 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2185 VkResult result;
2186
2187 result = radv_reset_cmd_buffer(cmd_buffer);
2188 if (result != VK_SUCCESS)
2189 return result;
2190
2191 memset(&cmd_buffer->state, 0, sizeof(cmd_buffer->state));
2192 cmd_buffer->state.last_primitive_reset_en = -1;
2193 cmd_buffer->usage_flags = pBeginInfo->flags;
2194
2195 /* setup initial configuration into command buffer */
2196 if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
2197 switch (cmd_buffer->queue_family_index) {
2198 case RADV_QUEUE_GENERAL:
2199 emit_gfx_buffer_state(cmd_buffer);
2200 break;
2201 case RADV_QUEUE_COMPUTE:
2202 si_init_compute(cmd_buffer);
2203 break;
2204 case RADV_QUEUE_TRANSFER:
2205 default:
2206 break;
2207 }
2208 }
2209
2210 if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
2211 assert(pBeginInfo->pInheritanceInfo);
2212 cmd_buffer->state.framebuffer = radv_framebuffer_from_handle(pBeginInfo->pInheritanceInfo->framebuffer);
2213 cmd_buffer->state.pass = radv_render_pass_from_handle(pBeginInfo->pInheritanceInfo->renderPass);
2214
2215 struct radv_subpass *subpass =
2216 &cmd_buffer->state.pass->subpasses[pBeginInfo->pInheritanceInfo->subpass];
2217
2218 result = radv_cmd_state_setup_attachments(cmd_buffer, cmd_buffer->state.pass, NULL);
2219 if (result != VK_SUCCESS)
2220 return result;
2221
2222 radv_cmd_buffer_set_subpass(cmd_buffer, subpass, false);
2223 }
2224
2225 radv_cmd_buffer_trace_emit(cmd_buffer);
2226 return result;
2227 }
2228
2229 void radv_CmdBindVertexBuffers(
2230 VkCommandBuffer commandBuffer,
2231 uint32_t firstBinding,
2232 uint32_t bindingCount,
2233 const VkBuffer* pBuffers,
2234 const VkDeviceSize* pOffsets)
2235 {
2236 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2237 struct radv_vertex_binding *vb = cmd_buffer->state.vertex_bindings;
2238
2239 /* We have to defer setting up vertex buffer since we need the buffer
2240 * stride from the pipeline. */
2241
2242 assert(firstBinding + bindingCount <= MAX_VBS);
2243 for (uint32_t i = 0; i < bindingCount; i++) {
2244 vb[firstBinding + i].buffer = radv_buffer_from_handle(pBuffers[i]);
2245 vb[firstBinding + i].offset = pOffsets[i];
2246 }
2247
2248 cmd_buffer->state.vb_dirty = true;
2249 }
2250
2251 void radv_CmdBindIndexBuffer(
2252 VkCommandBuffer commandBuffer,
2253 VkBuffer buffer,
2254 VkDeviceSize offset,
2255 VkIndexType indexType)
2256 {
2257 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2258 RADV_FROM_HANDLE(radv_buffer, index_buffer, buffer);
2259
2260 cmd_buffer->state.index_type = indexType; /* vk matches hw */
2261 cmd_buffer->state.index_va = radv_buffer_get_va(index_buffer->bo);
2262 cmd_buffer->state.index_va += index_buffer->offset + offset;
2263
2264 int index_size_shift = cmd_buffer->state.index_type ? 2 : 1;
2265 cmd_buffer->state.max_index_count = (index_buffer->size - offset) >> index_size_shift;
2266 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_INDEX_BUFFER;
2267 cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, index_buffer->bo, 8);
2268 }
2269
2270
2271 void radv_bind_descriptor_set(struct radv_cmd_buffer *cmd_buffer,
2272 struct radv_descriptor_set *set,
2273 unsigned idx)
2274 {
2275 struct radeon_winsys *ws = cmd_buffer->device->ws;
2276
2277 cmd_buffer->state.descriptors[idx] = set;
2278 cmd_buffer->state.descriptors_dirty |= (1u << idx);
2279 if (!set)
2280 return;
2281
2282 assert(!(set->layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR));
2283
2284 for (unsigned j = 0; j < set->layout->buffer_count; ++j)
2285 if (set->descriptors[j])
2286 ws->cs_add_buffer(cmd_buffer->cs, set->descriptors[j], 7);
2287
2288 if(set->bo)
2289 ws->cs_add_buffer(cmd_buffer->cs, set->bo, 8);
2290 }
2291
2292 void radv_CmdBindDescriptorSets(
2293 VkCommandBuffer commandBuffer,
2294 VkPipelineBindPoint pipelineBindPoint,
2295 VkPipelineLayout _layout,
2296 uint32_t firstSet,
2297 uint32_t descriptorSetCount,
2298 const VkDescriptorSet* pDescriptorSets,
2299 uint32_t dynamicOffsetCount,
2300 const uint32_t* pDynamicOffsets)
2301 {
2302 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2303 RADV_FROM_HANDLE(radv_pipeline_layout, layout, _layout);
2304 unsigned dyn_idx = 0;
2305
2306 for (unsigned i = 0; i < descriptorSetCount; ++i) {
2307 unsigned idx = i + firstSet;
2308 RADV_FROM_HANDLE(radv_descriptor_set, set, pDescriptorSets[i]);
2309 radv_bind_descriptor_set(cmd_buffer, set, idx);
2310
2311 for(unsigned j = 0; j < set->layout->dynamic_offset_count; ++j, ++dyn_idx) {
2312 unsigned idx = j + layout->set[i + firstSet].dynamic_offset_start;
2313 uint32_t *dst = cmd_buffer->dynamic_buffers + idx * 4;
2314 assert(dyn_idx < dynamicOffsetCount);
2315
2316 struct radv_descriptor_range *range = set->dynamic_descriptors + j;
2317 uint64_t va = range->va + pDynamicOffsets[dyn_idx];
2318 dst[0] = va;
2319 dst[1] = S_008F04_BASE_ADDRESS_HI(va >> 32);
2320 dst[2] = range->size;
2321 dst[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
2322 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
2323 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
2324 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
2325 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
2326 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
2327 cmd_buffer->push_constant_stages |=
2328 set->layout->dynamic_shader_stages;
2329 }
2330 }
2331 }
2332
2333 static bool radv_init_push_descriptor_set(struct radv_cmd_buffer *cmd_buffer,
2334 struct radv_descriptor_set *set,
2335 struct radv_descriptor_set_layout *layout)
2336 {
2337 set->size = layout->size;
2338 set->layout = layout;
2339
2340 if (cmd_buffer->push_descriptors.capacity < set->size) {
2341 size_t new_size = MAX2(set->size, 1024);
2342 new_size = MAX2(new_size, 2 * cmd_buffer->push_descriptors.capacity);
2343 new_size = MIN2(new_size, 96 * MAX_PUSH_DESCRIPTORS);
2344
2345 free(set->mapped_ptr);
2346 set->mapped_ptr = malloc(new_size);
2347
2348 if (!set->mapped_ptr) {
2349 cmd_buffer->push_descriptors.capacity = 0;
2350 cmd_buffer->record_result = VK_ERROR_OUT_OF_HOST_MEMORY;
2351 return false;
2352 }
2353
2354 cmd_buffer->push_descriptors.capacity = new_size;
2355 }
2356
2357 return true;
2358 }
2359
2360 void radv_meta_push_descriptor_set(
2361 struct radv_cmd_buffer* cmd_buffer,
2362 VkPipelineBindPoint pipelineBindPoint,
2363 VkPipelineLayout _layout,
2364 uint32_t set,
2365 uint32_t descriptorWriteCount,
2366 const VkWriteDescriptorSet* pDescriptorWrites)
2367 {
2368 RADV_FROM_HANDLE(radv_pipeline_layout, layout, _layout);
2369 struct radv_descriptor_set *push_set = &cmd_buffer->meta_push_descriptors;
2370 unsigned bo_offset;
2371
2372 assert(set == 0);
2373 assert(layout->set[set].layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR);
2374
2375 push_set->size = layout->set[set].layout->size;
2376 push_set->layout = layout->set[set].layout;
2377
2378 if (!radv_cmd_buffer_upload_alloc(cmd_buffer, push_set->size, 32,
2379 &bo_offset,
2380 (void**) &push_set->mapped_ptr))
2381 return;
2382
2383 push_set->va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
2384 push_set->va += bo_offset;
2385
2386 radv_update_descriptor_sets(cmd_buffer->device, cmd_buffer,
2387 radv_descriptor_set_to_handle(push_set),
2388 descriptorWriteCount, pDescriptorWrites, 0, NULL);
2389
2390 cmd_buffer->state.descriptors[set] = push_set;
2391 cmd_buffer->state.descriptors_dirty |= (1u << set);
2392 }
2393
2394 void radv_CmdPushDescriptorSetKHR(
2395 VkCommandBuffer commandBuffer,
2396 VkPipelineBindPoint pipelineBindPoint,
2397 VkPipelineLayout _layout,
2398 uint32_t set,
2399 uint32_t descriptorWriteCount,
2400 const VkWriteDescriptorSet* pDescriptorWrites)
2401 {
2402 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2403 RADV_FROM_HANDLE(radv_pipeline_layout, layout, _layout);
2404 struct radv_descriptor_set *push_set = &cmd_buffer->push_descriptors.set;
2405
2406 assert(layout->set[set].layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR);
2407
2408 if (!radv_init_push_descriptor_set(cmd_buffer, push_set, layout->set[set].layout))
2409 return;
2410
2411 radv_update_descriptor_sets(cmd_buffer->device, cmd_buffer,
2412 radv_descriptor_set_to_handle(push_set),
2413 descriptorWriteCount, pDescriptorWrites, 0, NULL);
2414
2415 cmd_buffer->state.descriptors[set] = push_set;
2416 cmd_buffer->state.descriptors_dirty |= (1u << set);
2417 cmd_buffer->state.push_descriptors_dirty = true;
2418 }
2419
2420 void radv_CmdPushDescriptorSetWithTemplateKHR(
2421 VkCommandBuffer commandBuffer,
2422 VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
2423 VkPipelineLayout _layout,
2424 uint32_t set,
2425 const void* pData)
2426 {
2427 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2428 RADV_FROM_HANDLE(radv_pipeline_layout, layout, _layout);
2429 struct radv_descriptor_set *push_set = &cmd_buffer->push_descriptors.set;
2430
2431 assert(layout->set[set].layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR);
2432
2433 if (!radv_init_push_descriptor_set(cmd_buffer, push_set, layout->set[set].layout))
2434 return;
2435
2436 radv_update_descriptor_set_with_template(cmd_buffer->device, cmd_buffer, push_set,
2437 descriptorUpdateTemplate, pData);
2438
2439 cmd_buffer->state.descriptors[set] = push_set;
2440 cmd_buffer->state.descriptors_dirty |= (1u << set);
2441 cmd_buffer->state.push_descriptors_dirty = true;
2442 }
2443
2444 void radv_CmdPushConstants(VkCommandBuffer commandBuffer,
2445 VkPipelineLayout layout,
2446 VkShaderStageFlags stageFlags,
2447 uint32_t offset,
2448 uint32_t size,
2449 const void* pValues)
2450 {
2451 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2452 memcpy(cmd_buffer->push_constants + offset, pValues, size);
2453 cmd_buffer->push_constant_stages |= stageFlags;
2454 }
2455
2456 VkResult radv_EndCommandBuffer(
2457 VkCommandBuffer commandBuffer)
2458 {
2459 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2460
2461 if (cmd_buffer->queue_family_index != RADV_QUEUE_TRANSFER) {
2462 if (cmd_buffer->device->physical_device->rad_info.chip_class == SI)
2463 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_CS_PARTIAL_FLUSH | RADV_CMD_FLAG_PS_PARTIAL_FLUSH | RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2;
2464 si_emit_cache_flush(cmd_buffer);
2465 }
2466
2467 if (!cmd_buffer->device->ws->cs_finalize(cmd_buffer->cs))
2468 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
2469
2470 return cmd_buffer->record_result;
2471 }
2472
2473 static void
2474 radv_emit_compute_pipeline(struct radv_cmd_buffer *cmd_buffer)
2475 {
2476 struct radeon_winsys *ws = cmd_buffer->device->ws;
2477 struct radv_shader_variant *compute_shader;
2478 struct radv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
2479 uint64_t va;
2480
2481 if (!pipeline || pipeline == cmd_buffer->state.emitted_compute_pipeline)
2482 return;
2483
2484 cmd_buffer->state.emitted_compute_pipeline = pipeline;
2485
2486 compute_shader = pipeline->shaders[MESA_SHADER_COMPUTE];
2487 va = radv_buffer_get_va(compute_shader->bo) + compute_shader->bo_offset;
2488
2489 ws->cs_add_buffer(cmd_buffer->cs, compute_shader->bo, 8);
2490 radv_emit_prefetch(cmd_buffer, va, compute_shader->code_size);
2491
2492 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
2493 cmd_buffer->cs, 16);
2494
2495 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B830_COMPUTE_PGM_LO, 2);
2496 radeon_emit(cmd_buffer->cs, va >> 8);
2497 radeon_emit(cmd_buffer->cs, va >> 40);
2498
2499 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B848_COMPUTE_PGM_RSRC1, 2);
2500 radeon_emit(cmd_buffer->cs, compute_shader->rsrc1);
2501 radeon_emit(cmd_buffer->cs, compute_shader->rsrc2);
2502
2503
2504 cmd_buffer->compute_scratch_size_needed =
2505 MAX2(cmd_buffer->compute_scratch_size_needed,
2506 pipeline->max_waves * pipeline->scratch_bytes_per_wave);
2507
2508 /* change these once we have scratch support */
2509 radeon_set_sh_reg(cmd_buffer->cs, R_00B860_COMPUTE_TMPRING_SIZE,
2510 S_00B860_WAVES(pipeline->max_waves) |
2511 S_00B860_WAVESIZE(pipeline->scratch_bytes_per_wave >> 10));
2512
2513 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B81C_COMPUTE_NUM_THREAD_X, 3);
2514 radeon_emit(cmd_buffer->cs,
2515 S_00B81C_NUM_THREAD_FULL(compute_shader->info.cs.block_size[0]));
2516 radeon_emit(cmd_buffer->cs,
2517 S_00B81C_NUM_THREAD_FULL(compute_shader->info.cs.block_size[1]));
2518 radeon_emit(cmd_buffer->cs,
2519 S_00B81C_NUM_THREAD_FULL(compute_shader->info.cs.block_size[2]));
2520
2521 assert(cmd_buffer->cs->cdw <= cdw_max);
2522 radv_save_pipeline(cmd_buffer, pipeline, RING_COMPUTE);
2523 }
2524
2525 static void radv_mark_descriptor_sets_dirty(struct radv_cmd_buffer *cmd_buffer)
2526 {
2527 for (unsigned i = 0; i < MAX_SETS; i++) {
2528 if (cmd_buffer->state.descriptors[i])
2529 cmd_buffer->state.descriptors_dirty |= (1u << i);
2530 }
2531 }
2532
2533 void radv_CmdBindPipeline(
2534 VkCommandBuffer commandBuffer,
2535 VkPipelineBindPoint pipelineBindPoint,
2536 VkPipeline _pipeline)
2537 {
2538 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2539 RADV_FROM_HANDLE(radv_pipeline, pipeline, _pipeline);
2540
2541 switch (pipelineBindPoint) {
2542 case VK_PIPELINE_BIND_POINT_COMPUTE:
2543 if (cmd_buffer->state.compute_pipeline == pipeline)
2544 return;
2545 radv_mark_descriptor_sets_dirty(cmd_buffer);
2546
2547 cmd_buffer->state.compute_pipeline = pipeline;
2548 cmd_buffer->push_constant_stages |= VK_SHADER_STAGE_COMPUTE_BIT;
2549 break;
2550 case VK_PIPELINE_BIND_POINT_GRAPHICS:
2551 if (cmd_buffer->state.pipeline == pipeline)
2552 return;
2553 radv_mark_descriptor_sets_dirty(cmd_buffer);
2554
2555 cmd_buffer->state.pipeline = pipeline;
2556 if (!pipeline)
2557 break;
2558
2559 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_PIPELINE;
2560 cmd_buffer->push_constant_stages |= pipeline->active_stages;
2561
2562 /* Apply the dynamic state from the pipeline */
2563 cmd_buffer->state.dirty |= pipeline->dynamic_state_mask;
2564 radv_dynamic_state_copy(&cmd_buffer->state.dynamic,
2565 &pipeline->dynamic_state,
2566 pipeline->dynamic_state_mask);
2567
2568 if (pipeline->graphics.esgs_ring_size > cmd_buffer->esgs_ring_size_needed)
2569 cmd_buffer->esgs_ring_size_needed = pipeline->graphics.esgs_ring_size;
2570 if (pipeline->graphics.gsvs_ring_size > cmd_buffer->gsvs_ring_size_needed)
2571 cmd_buffer->gsvs_ring_size_needed = pipeline->graphics.gsvs_ring_size;
2572
2573 if (radv_pipeline_has_tess(pipeline))
2574 cmd_buffer->tess_rings_needed = true;
2575
2576 if (radv_pipeline_has_gs(pipeline)) {
2577 struct ac_userdata_info *loc = radv_lookup_user_sgpr(cmd_buffer->state.pipeline, MESA_SHADER_GEOMETRY,
2578 AC_UD_SCRATCH_RING_OFFSETS);
2579 if (cmd_buffer->ring_offsets_idx == -1)
2580 cmd_buffer->ring_offsets_idx = loc->sgpr_idx;
2581 else if (loc->sgpr_idx != -1)
2582 assert(loc->sgpr_idx == cmd_buffer->ring_offsets_idx);
2583 }
2584 break;
2585 default:
2586 assert(!"invalid bind point");
2587 break;
2588 }
2589 }
2590
2591 void radv_CmdSetViewport(
2592 VkCommandBuffer commandBuffer,
2593 uint32_t firstViewport,
2594 uint32_t viewportCount,
2595 const VkViewport* pViewports)
2596 {
2597 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2598 const uint32_t total_count = firstViewport + viewportCount;
2599
2600 assert(firstViewport < MAX_VIEWPORTS);
2601 assert(total_count >= 1 && total_count <= MAX_VIEWPORTS);
2602
2603 memcpy(cmd_buffer->state.dynamic.viewport.viewports + firstViewport,
2604 pViewports, viewportCount * sizeof(*pViewports));
2605
2606 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_VIEWPORT;
2607 }
2608
2609 void radv_CmdSetScissor(
2610 VkCommandBuffer commandBuffer,
2611 uint32_t firstScissor,
2612 uint32_t scissorCount,
2613 const VkRect2D* pScissors)
2614 {
2615 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2616 const uint32_t total_count = firstScissor + scissorCount;
2617
2618 assert(firstScissor < MAX_SCISSORS);
2619 assert(total_count >= 1 && total_count <= MAX_SCISSORS);
2620
2621 memcpy(cmd_buffer->state.dynamic.scissor.scissors + firstScissor,
2622 pScissors, scissorCount * sizeof(*pScissors));
2623 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_SCISSOR;
2624 }
2625
2626 void radv_CmdSetLineWidth(
2627 VkCommandBuffer commandBuffer,
2628 float lineWidth)
2629 {
2630 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2631 cmd_buffer->state.dynamic.line_width = lineWidth;
2632 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_LINE_WIDTH;
2633 }
2634
2635 void radv_CmdSetDepthBias(
2636 VkCommandBuffer commandBuffer,
2637 float depthBiasConstantFactor,
2638 float depthBiasClamp,
2639 float depthBiasSlopeFactor)
2640 {
2641 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2642
2643 cmd_buffer->state.dynamic.depth_bias.bias = depthBiasConstantFactor;
2644 cmd_buffer->state.dynamic.depth_bias.clamp = depthBiasClamp;
2645 cmd_buffer->state.dynamic.depth_bias.slope = depthBiasSlopeFactor;
2646
2647 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS;
2648 }
2649
2650 void radv_CmdSetBlendConstants(
2651 VkCommandBuffer commandBuffer,
2652 const float blendConstants[4])
2653 {
2654 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2655
2656 memcpy(cmd_buffer->state.dynamic.blend_constants,
2657 blendConstants, sizeof(float) * 4);
2658
2659 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS;
2660 }
2661
2662 void radv_CmdSetDepthBounds(
2663 VkCommandBuffer commandBuffer,
2664 float minDepthBounds,
2665 float maxDepthBounds)
2666 {
2667 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2668
2669 cmd_buffer->state.dynamic.depth_bounds.min = minDepthBounds;
2670 cmd_buffer->state.dynamic.depth_bounds.max = maxDepthBounds;
2671
2672 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS;
2673 }
2674
2675 void radv_CmdSetStencilCompareMask(
2676 VkCommandBuffer commandBuffer,
2677 VkStencilFaceFlags faceMask,
2678 uint32_t compareMask)
2679 {
2680 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2681
2682 if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
2683 cmd_buffer->state.dynamic.stencil_compare_mask.front = compareMask;
2684 if (faceMask & VK_STENCIL_FACE_BACK_BIT)
2685 cmd_buffer->state.dynamic.stencil_compare_mask.back = compareMask;
2686
2687 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK;
2688 }
2689
2690 void radv_CmdSetStencilWriteMask(
2691 VkCommandBuffer commandBuffer,
2692 VkStencilFaceFlags faceMask,
2693 uint32_t writeMask)
2694 {
2695 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2696
2697 if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
2698 cmd_buffer->state.dynamic.stencil_write_mask.front = writeMask;
2699 if (faceMask & VK_STENCIL_FACE_BACK_BIT)
2700 cmd_buffer->state.dynamic.stencil_write_mask.back = writeMask;
2701
2702 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK;
2703 }
2704
2705 void radv_CmdSetStencilReference(
2706 VkCommandBuffer commandBuffer,
2707 VkStencilFaceFlags faceMask,
2708 uint32_t reference)
2709 {
2710 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2711
2712 if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
2713 cmd_buffer->state.dynamic.stencil_reference.front = reference;
2714 if (faceMask & VK_STENCIL_FACE_BACK_BIT)
2715 cmd_buffer->state.dynamic.stencil_reference.back = reference;
2716
2717 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE;
2718 }
2719
2720 void radv_CmdExecuteCommands(
2721 VkCommandBuffer commandBuffer,
2722 uint32_t commandBufferCount,
2723 const VkCommandBuffer* pCmdBuffers)
2724 {
2725 RADV_FROM_HANDLE(radv_cmd_buffer, primary, commandBuffer);
2726
2727 assert(commandBufferCount > 0);
2728
2729 /* Emit pending flushes on primary prior to executing secondary */
2730 si_emit_cache_flush(primary);
2731
2732 for (uint32_t i = 0; i < commandBufferCount; i++) {
2733 RADV_FROM_HANDLE(radv_cmd_buffer, secondary, pCmdBuffers[i]);
2734
2735 primary->scratch_size_needed = MAX2(primary->scratch_size_needed,
2736 secondary->scratch_size_needed);
2737 primary->compute_scratch_size_needed = MAX2(primary->compute_scratch_size_needed,
2738 secondary->compute_scratch_size_needed);
2739
2740 if (secondary->esgs_ring_size_needed > primary->esgs_ring_size_needed)
2741 primary->esgs_ring_size_needed = secondary->esgs_ring_size_needed;
2742 if (secondary->gsvs_ring_size_needed > primary->gsvs_ring_size_needed)
2743 primary->gsvs_ring_size_needed = secondary->gsvs_ring_size_needed;
2744 if (secondary->tess_rings_needed)
2745 primary->tess_rings_needed = true;
2746 if (secondary->sample_positions_needed)
2747 primary->sample_positions_needed = true;
2748
2749 if (secondary->ring_offsets_idx != -1) {
2750 if (primary->ring_offsets_idx == -1)
2751 primary->ring_offsets_idx = secondary->ring_offsets_idx;
2752 else
2753 assert(secondary->ring_offsets_idx == primary->ring_offsets_idx);
2754 }
2755 primary->device->ws->cs_execute_secondary(primary->cs, secondary->cs);
2756
2757
2758 /* When the secondary command buffer is compute only we don't
2759 * need to re-emit the current graphics pipeline.
2760 */
2761 if (secondary->state.emitted_pipeline) {
2762 primary->state.emitted_pipeline =
2763 secondary->state.emitted_pipeline;
2764 }
2765
2766 /* When the secondary command buffer is graphics only we don't
2767 * need to re-emit the current compute pipeline.
2768 */
2769 if (secondary->state.emitted_compute_pipeline) {
2770 primary->state.emitted_compute_pipeline =
2771 secondary->state.emitted_compute_pipeline;
2772 }
2773
2774 /* Only re-emit the draw packets when needed. */
2775 if (secondary->state.last_primitive_reset_en != -1) {
2776 primary->state.last_primitive_reset_en =
2777 secondary->state.last_primitive_reset_en;
2778 }
2779
2780 if (secondary->state.last_primitive_reset_index) {
2781 primary->state.last_primitive_reset_index =
2782 secondary->state.last_primitive_reset_index;
2783 }
2784
2785 if (secondary->state.last_ia_multi_vgt_param) {
2786 primary->state.last_ia_multi_vgt_param =
2787 secondary->state.last_ia_multi_vgt_param;
2788 }
2789 }
2790
2791 /* After executing commands from secondary buffers we have to dirty
2792 * some states.
2793 */
2794 primary->state.dirty |= RADV_CMD_DIRTY_PIPELINE |
2795 RADV_CMD_DIRTY_INDEX_BUFFER |
2796 RADV_CMD_DIRTY_DYNAMIC_ALL;
2797 radv_mark_descriptor_sets_dirty(primary);
2798 }
2799
2800 VkResult radv_CreateCommandPool(
2801 VkDevice _device,
2802 const VkCommandPoolCreateInfo* pCreateInfo,
2803 const VkAllocationCallbacks* pAllocator,
2804 VkCommandPool* pCmdPool)
2805 {
2806 RADV_FROM_HANDLE(radv_device, device, _device);
2807 struct radv_cmd_pool *pool;
2808
2809 pool = vk_alloc2(&device->alloc, pAllocator, sizeof(*pool), 8,
2810 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2811 if (pool == NULL)
2812 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2813
2814 if (pAllocator)
2815 pool->alloc = *pAllocator;
2816 else
2817 pool->alloc = device->alloc;
2818
2819 list_inithead(&pool->cmd_buffers);
2820 list_inithead(&pool->free_cmd_buffers);
2821
2822 pool->queue_family_index = pCreateInfo->queueFamilyIndex;
2823
2824 *pCmdPool = radv_cmd_pool_to_handle(pool);
2825
2826 return VK_SUCCESS;
2827
2828 }
2829
2830 void radv_DestroyCommandPool(
2831 VkDevice _device,
2832 VkCommandPool commandPool,
2833 const VkAllocationCallbacks* pAllocator)
2834 {
2835 RADV_FROM_HANDLE(radv_device, device, _device);
2836 RADV_FROM_HANDLE(radv_cmd_pool, pool, commandPool);
2837
2838 if (!pool)
2839 return;
2840
2841 list_for_each_entry_safe(struct radv_cmd_buffer, cmd_buffer,
2842 &pool->cmd_buffers, pool_link) {
2843 radv_cmd_buffer_destroy(cmd_buffer);
2844 }
2845
2846 list_for_each_entry_safe(struct radv_cmd_buffer, cmd_buffer,
2847 &pool->free_cmd_buffers, pool_link) {
2848 radv_cmd_buffer_destroy(cmd_buffer);
2849 }
2850
2851 vk_free2(&device->alloc, pAllocator, pool);
2852 }
2853
2854 VkResult radv_ResetCommandPool(
2855 VkDevice device,
2856 VkCommandPool commandPool,
2857 VkCommandPoolResetFlags flags)
2858 {
2859 RADV_FROM_HANDLE(radv_cmd_pool, pool, commandPool);
2860 VkResult result;
2861
2862 list_for_each_entry(struct radv_cmd_buffer, cmd_buffer,
2863 &pool->cmd_buffers, pool_link) {
2864 result = radv_reset_cmd_buffer(cmd_buffer);
2865 if (result != VK_SUCCESS)
2866 return result;
2867 }
2868
2869 return VK_SUCCESS;
2870 }
2871
2872 void radv_TrimCommandPoolKHR(
2873 VkDevice device,
2874 VkCommandPool commandPool,
2875 VkCommandPoolTrimFlagsKHR flags)
2876 {
2877 RADV_FROM_HANDLE(radv_cmd_pool, pool, commandPool);
2878
2879 if (!pool)
2880 return;
2881
2882 list_for_each_entry_safe(struct radv_cmd_buffer, cmd_buffer,
2883 &pool->free_cmd_buffers, pool_link) {
2884 radv_cmd_buffer_destroy(cmd_buffer);
2885 }
2886 }
2887
2888 void radv_CmdBeginRenderPass(
2889 VkCommandBuffer commandBuffer,
2890 const VkRenderPassBeginInfo* pRenderPassBegin,
2891 VkSubpassContents contents)
2892 {
2893 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2894 RADV_FROM_HANDLE(radv_render_pass, pass, pRenderPassBegin->renderPass);
2895 RADV_FROM_HANDLE(radv_framebuffer, framebuffer, pRenderPassBegin->framebuffer);
2896
2897 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
2898 cmd_buffer->cs, 2048);
2899 MAYBE_UNUSED VkResult result;
2900
2901 cmd_buffer->state.framebuffer = framebuffer;
2902 cmd_buffer->state.pass = pass;
2903 cmd_buffer->state.render_area = pRenderPassBegin->renderArea;
2904
2905 result = radv_cmd_state_setup_attachments(cmd_buffer, pass, pRenderPassBegin);
2906 if (result != VK_SUCCESS)
2907 return;
2908
2909 radv_cmd_buffer_set_subpass(cmd_buffer, pass->subpasses, true);
2910 assert(cmd_buffer->cs->cdw <= cdw_max);
2911
2912 radv_cmd_buffer_clear_subpass(cmd_buffer);
2913 }
2914
2915 void radv_CmdNextSubpass(
2916 VkCommandBuffer commandBuffer,
2917 VkSubpassContents contents)
2918 {
2919 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2920
2921 radv_cmd_buffer_resolve_subpass(cmd_buffer);
2922
2923 radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs,
2924 2048);
2925
2926 radv_cmd_buffer_set_subpass(cmd_buffer, cmd_buffer->state.subpass + 1, true);
2927 radv_cmd_buffer_clear_subpass(cmd_buffer);
2928 }
2929
2930 static void radv_emit_view_index(struct radv_cmd_buffer *cmd_buffer, unsigned index)
2931 {
2932 struct radv_pipeline *pipeline = cmd_buffer->state.pipeline;
2933 for (unsigned stage = 0; stage < MESA_SHADER_STAGES; ++stage) {
2934 if (!pipeline->shaders[stage])
2935 continue;
2936 struct ac_userdata_info *loc = radv_lookup_user_sgpr(pipeline, stage, AC_UD_VIEW_INDEX);
2937 if (loc->sgpr_idx == -1)
2938 continue;
2939 uint32_t base_reg = radv_shader_stage_to_user_data_0(stage, cmd_buffer->device->physical_device->rad_info.chip_class, radv_pipeline_has_gs(pipeline), radv_pipeline_has_tess(pipeline));
2940 radeon_set_sh_reg(cmd_buffer->cs, base_reg + loc->sgpr_idx * 4, index);
2941
2942 }
2943 if (pipeline->gs_copy_shader) {
2944 struct ac_userdata_info *loc = &pipeline->gs_copy_shader->info.user_sgprs_locs.shader_data[AC_UD_VIEW_INDEX];
2945 if (loc->sgpr_idx != -1) {
2946 uint32_t base_reg = R_00B130_SPI_SHADER_USER_DATA_VS_0;
2947 radeon_set_sh_reg(cmd_buffer->cs, base_reg + loc->sgpr_idx * 4, index);
2948 }
2949 }
2950 }
2951
2952 static void
2953 radv_cs_emit_draw_packet(struct radv_cmd_buffer *cmd_buffer,
2954 uint32_t vertex_count)
2955 {
2956 radeon_emit(cmd_buffer->cs, PKT3(PKT3_DRAW_INDEX_AUTO, 1, cmd_buffer->state.predicating));
2957 radeon_emit(cmd_buffer->cs, vertex_count);
2958 radeon_emit(cmd_buffer->cs, V_0287F0_DI_SRC_SEL_AUTO_INDEX |
2959 S_0287F0_USE_OPAQUE(0));
2960 }
2961
2962 static void
2963 radv_cs_emit_draw_indexed_packet(struct radv_cmd_buffer *cmd_buffer,
2964 uint64_t index_va,
2965 uint32_t index_count)
2966 {
2967 radeon_emit(cmd_buffer->cs, PKT3(PKT3_DRAW_INDEX_2, 4, false));
2968 radeon_emit(cmd_buffer->cs, cmd_buffer->state.max_index_count);
2969 radeon_emit(cmd_buffer->cs, index_va);
2970 radeon_emit(cmd_buffer->cs, index_va >> 32);
2971 radeon_emit(cmd_buffer->cs, index_count);
2972 radeon_emit(cmd_buffer->cs, V_0287F0_DI_SRC_SEL_DMA);
2973 }
2974
2975 static void
2976 radv_cs_emit_indirect_draw_packet(struct radv_cmd_buffer *cmd_buffer,
2977 bool indexed,
2978 uint32_t draw_count,
2979 uint64_t count_va,
2980 uint32_t stride)
2981 {
2982 struct radeon_winsys_cs *cs = cmd_buffer->cs;
2983 unsigned di_src_sel = indexed ? V_0287F0_DI_SRC_SEL_DMA
2984 : V_0287F0_DI_SRC_SEL_AUTO_INDEX;
2985 bool draw_id_enable = cmd_buffer->state.pipeline->shaders[MESA_SHADER_VERTEX]->info.info.vs.needs_draw_id;
2986 uint32_t base_reg = cmd_buffer->state.pipeline->graphics.vtx_base_sgpr;
2987 assert(base_reg);
2988
2989 if (draw_count == 1 && !count_va && !draw_id_enable) {
2990 radeon_emit(cs, PKT3(indexed ? PKT3_DRAW_INDEX_INDIRECT :
2991 PKT3_DRAW_INDIRECT, 3, false));
2992 radeon_emit(cs, 0);
2993 radeon_emit(cs, (base_reg - SI_SH_REG_OFFSET) >> 2);
2994 radeon_emit(cs, ((base_reg + 4) - SI_SH_REG_OFFSET) >> 2);
2995 radeon_emit(cs, di_src_sel);
2996 } else {
2997 radeon_emit(cs, PKT3(indexed ? PKT3_DRAW_INDEX_INDIRECT_MULTI :
2998 PKT3_DRAW_INDIRECT_MULTI,
2999 8, false));
3000 radeon_emit(cs, 0);
3001 radeon_emit(cs, (base_reg - SI_SH_REG_OFFSET) >> 2);
3002 radeon_emit(cs, ((base_reg + 4) - SI_SH_REG_OFFSET) >> 2);
3003 radeon_emit(cs, (((base_reg + 8) - SI_SH_REG_OFFSET) >> 2) |
3004 S_2C3_DRAW_INDEX_ENABLE(draw_id_enable) |
3005 S_2C3_COUNT_INDIRECT_ENABLE(!!count_va));
3006 radeon_emit(cs, draw_count); /* count */
3007 radeon_emit(cs, count_va); /* count_addr */
3008 radeon_emit(cs, count_va >> 32);
3009 radeon_emit(cs, stride); /* stride */
3010 radeon_emit(cs, di_src_sel);
3011 }
3012 }
3013
3014 struct radv_draw_info {
3015 /**
3016 * Number of vertices.
3017 */
3018 uint32_t count;
3019
3020 /**
3021 * Index of the first vertex.
3022 */
3023 int32_t vertex_offset;
3024
3025 /**
3026 * First instance id.
3027 */
3028 uint32_t first_instance;
3029
3030 /**
3031 * Number of instances.
3032 */
3033 uint32_t instance_count;
3034
3035 /**
3036 * First index (indexed draws only).
3037 */
3038 uint32_t first_index;
3039
3040 /**
3041 * Whether it's an indexed draw.
3042 */
3043 bool indexed;
3044
3045 /**
3046 * Indirect draw parameters resource.
3047 */
3048 struct radv_buffer *indirect;
3049 uint64_t indirect_offset;
3050 uint32_t stride;
3051
3052 /**
3053 * Draw count parameters resource.
3054 */
3055 struct radv_buffer *count_buffer;
3056 uint64_t count_buffer_offset;
3057 };
3058
3059 static void
3060 radv_emit_draw_packets(struct radv_cmd_buffer *cmd_buffer,
3061 const struct radv_draw_info *info)
3062 {
3063 struct radv_cmd_state *state = &cmd_buffer->state;
3064 struct radeon_winsys *ws = cmd_buffer->device->ws;
3065 struct radv_device *device = cmd_buffer->device;
3066 struct radeon_winsys_cs *cs = cmd_buffer->cs;
3067
3068 radv_cmd_buffer_flush_state(cmd_buffer, info->indexed,
3069 info->instance_count > 1, info->indirect,
3070 info->indirect ? 0 : info->count);
3071
3072 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(device->ws, cs,
3073 31 * MAX_VIEWS);
3074
3075 if (info->indirect) {
3076 uint64_t va = radv_buffer_get_va(info->indirect->bo);
3077 uint64_t count_va = 0;
3078
3079 va += info->indirect->offset + info->indirect_offset;
3080
3081 ws->cs_add_buffer(cs, info->indirect->bo, 8);
3082
3083 radeon_emit(cs, PKT3(PKT3_SET_BASE, 2, 0));
3084 radeon_emit(cs, 1);
3085 radeon_emit(cs, va);
3086 radeon_emit(cs, va >> 32);
3087
3088 if (info->count_buffer) {
3089 count_va = radv_buffer_get_va(info->count_buffer->bo);
3090 count_va += info->count_buffer->offset +
3091 info->count_buffer_offset;
3092
3093 ws->cs_add_buffer(cs, info->count_buffer->bo, 8);
3094 }
3095
3096 if (!state->subpass->view_mask) {
3097 radv_cs_emit_indirect_draw_packet(cmd_buffer,
3098 info->indexed,
3099 info->count,
3100 count_va,
3101 info->stride);
3102 } else {
3103 unsigned i;
3104 for_each_bit(i, state->subpass->view_mask) {
3105 radv_emit_view_index(cmd_buffer, i);
3106
3107 radv_cs_emit_indirect_draw_packet(cmd_buffer,
3108 info->indexed,
3109 info->count,
3110 count_va,
3111 info->stride);
3112 }
3113 }
3114 } else {
3115 assert(state->pipeline->graphics.vtx_base_sgpr);
3116 radeon_set_sh_reg_seq(cs, state->pipeline->graphics.vtx_base_sgpr,
3117 state->pipeline->graphics.vtx_emit_num);
3118 radeon_emit(cs, info->vertex_offset);
3119 radeon_emit(cs, info->first_instance);
3120 if (state->pipeline->graphics.vtx_emit_num == 3)
3121 radeon_emit(cs, 0);
3122
3123 radeon_emit(cs, PKT3(PKT3_NUM_INSTANCES, 0, state->predicating));
3124 radeon_emit(cs, info->instance_count);
3125
3126 if (info->indexed) {
3127 int index_size = state->index_type ? 4 : 2;
3128 uint64_t index_va;
3129
3130 index_va = state->index_va;
3131 index_va += info->first_index * index_size;
3132
3133 if (!state->subpass->view_mask) {
3134 radv_cs_emit_draw_indexed_packet(cmd_buffer,
3135 index_va,
3136 info->count);
3137 } else {
3138 unsigned i;
3139 for_each_bit(i, state->subpass->view_mask) {
3140 radv_emit_view_index(cmd_buffer, i);
3141
3142 radv_cs_emit_draw_indexed_packet(cmd_buffer,
3143 index_va,
3144 info->count);
3145 }
3146 }
3147 } else {
3148 if (!state->subpass->view_mask) {
3149 radv_cs_emit_draw_packet(cmd_buffer, info->count);
3150 } else {
3151 unsigned i;
3152 for_each_bit(i, state->subpass->view_mask) {
3153 radv_emit_view_index(cmd_buffer, i);
3154
3155 radv_cs_emit_draw_packet(cmd_buffer,
3156 info->count);
3157 }
3158 }
3159 }
3160 }
3161
3162 assert(cs->cdw <= cdw_max);
3163 radv_cmd_buffer_after_draw(cmd_buffer);
3164 }
3165
3166 void radv_CmdDraw(
3167 VkCommandBuffer commandBuffer,
3168 uint32_t vertexCount,
3169 uint32_t instanceCount,
3170 uint32_t firstVertex,
3171 uint32_t firstInstance)
3172 {
3173 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3174 struct radv_draw_info info = {};
3175
3176 info.count = vertexCount;
3177 info.instance_count = instanceCount;
3178 info.first_instance = firstInstance;
3179 info.vertex_offset = firstVertex;
3180
3181 radv_emit_draw_packets(cmd_buffer, &info);
3182 }
3183
3184 void radv_CmdDrawIndexed(
3185 VkCommandBuffer commandBuffer,
3186 uint32_t indexCount,
3187 uint32_t instanceCount,
3188 uint32_t firstIndex,
3189 int32_t vertexOffset,
3190 uint32_t firstInstance)
3191 {
3192 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3193 struct radv_draw_info info = {};
3194
3195 info.indexed = true;
3196 info.count = indexCount;
3197 info.instance_count = instanceCount;
3198 info.first_index = firstIndex;
3199 info.vertex_offset = vertexOffset;
3200 info.first_instance = firstInstance;
3201
3202 radv_emit_draw_packets(cmd_buffer, &info);
3203 }
3204
3205 void radv_CmdDrawIndirect(
3206 VkCommandBuffer commandBuffer,
3207 VkBuffer _buffer,
3208 VkDeviceSize offset,
3209 uint32_t drawCount,
3210 uint32_t stride)
3211 {
3212 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3213 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
3214 struct radv_draw_info info = {};
3215
3216 info.count = drawCount;
3217 info.indirect = buffer;
3218 info.indirect_offset = offset;
3219 info.stride = stride;
3220
3221 radv_emit_draw_packets(cmd_buffer, &info);
3222 }
3223
3224 void radv_CmdDrawIndexedIndirect(
3225 VkCommandBuffer commandBuffer,
3226 VkBuffer _buffer,
3227 VkDeviceSize offset,
3228 uint32_t drawCount,
3229 uint32_t stride)
3230 {
3231 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3232 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
3233 struct radv_draw_info info = {};
3234
3235 info.indexed = true;
3236 info.count = drawCount;
3237 info.indirect = buffer;
3238 info.indirect_offset = offset;
3239 info.stride = stride;
3240
3241 radv_emit_draw_packets(cmd_buffer, &info);
3242 }
3243
3244 void radv_CmdDrawIndirectCountAMD(
3245 VkCommandBuffer commandBuffer,
3246 VkBuffer _buffer,
3247 VkDeviceSize offset,
3248 VkBuffer _countBuffer,
3249 VkDeviceSize countBufferOffset,
3250 uint32_t maxDrawCount,
3251 uint32_t stride)
3252 {
3253 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3254 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
3255 RADV_FROM_HANDLE(radv_buffer, count_buffer, _countBuffer);
3256 struct radv_draw_info info = {};
3257
3258 info.count = maxDrawCount;
3259 info.indirect = buffer;
3260 info.indirect_offset = offset;
3261 info.count_buffer = count_buffer;
3262 info.count_buffer_offset = countBufferOffset;
3263 info.stride = stride;
3264
3265 radv_emit_draw_packets(cmd_buffer, &info);
3266 }
3267
3268 void radv_CmdDrawIndexedIndirectCountAMD(
3269 VkCommandBuffer commandBuffer,
3270 VkBuffer _buffer,
3271 VkDeviceSize offset,
3272 VkBuffer _countBuffer,
3273 VkDeviceSize countBufferOffset,
3274 uint32_t maxDrawCount,
3275 uint32_t stride)
3276 {
3277 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3278 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
3279 RADV_FROM_HANDLE(radv_buffer, count_buffer, _countBuffer);
3280 struct radv_draw_info info = {};
3281
3282 info.indexed = true;
3283 info.count = maxDrawCount;
3284 info.indirect = buffer;
3285 info.indirect_offset = offset;
3286 info.count_buffer = count_buffer;
3287 info.count_buffer_offset = countBufferOffset;
3288 info.stride = stride;
3289
3290 radv_emit_draw_packets(cmd_buffer, &info);
3291 }
3292
3293 struct radv_dispatch_info {
3294 /**
3295 * Determine the layout of the grid (in block units) to be used.
3296 */
3297 uint32_t blocks[3];
3298
3299 /**
3300 * Whether it's an unaligned compute dispatch.
3301 */
3302 bool unaligned;
3303
3304 /**
3305 * Indirect compute parameters resource.
3306 */
3307 struct radv_buffer *indirect;
3308 uint64_t indirect_offset;
3309 };
3310
3311 static void
3312 radv_emit_dispatch_packets(struct radv_cmd_buffer *cmd_buffer,
3313 const struct radv_dispatch_info *info)
3314 {
3315 struct radv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
3316 struct radv_shader_variant *compute_shader = pipeline->shaders[MESA_SHADER_COMPUTE];
3317 struct radeon_winsys *ws = cmd_buffer->device->ws;
3318 struct radeon_winsys_cs *cs = cmd_buffer->cs;
3319 struct ac_userdata_info *loc;
3320 unsigned dispatch_initiator;
3321 uint8_t grid_used;
3322
3323 grid_used = compute_shader->info.info.cs.grid_components_used;
3324
3325 loc = radv_lookup_user_sgpr(pipeline, MESA_SHADER_COMPUTE,
3326 AC_UD_CS_GRID_SIZE);
3327
3328 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(ws, cs, 25);
3329
3330 dispatch_initiator = S_00B800_COMPUTE_SHADER_EN(1) |
3331 S_00B800_FORCE_START_AT_000(1);
3332
3333 if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) {
3334 /* If the KMD allows it (there is a KMD hw register for it),
3335 * allow launching waves out-of-order.
3336 */
3337 dispatch_initiator |= S_00B800_ORDER_MODE(1);
3338 }
3339
3340 if (info->indirect) {
3341 uint64_t va = radv_buffer_get_va(info->indirect->bo);
3342
3343 va += info->indirect->offset + info->indirect_offset;
3344
3345 ws->cs_add_buffer(cs, info->indirect->bo, 8);
3346
3347 if (loc->sgpr_idx != -1) {
3348 for (unsigned i = 0; i < grid_used; ++i) {
3349 radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
3350 radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
3351 COPY_DATA_DST_SEL(COPY_DATA_REG));
3352 radeon_emit(cs, (va + 4 * i));
3353 radeon_emit(cs, (va + 4 * i) >> 32);
3354 radeon_emit(cs, ((R_00B900_COMPUTE_USER_DATA_0
3355 + loc->sgpr_idx * 4) >> 2) + i);
3356 radeon_emit(cs, 0);
3357 }
3358 }
3359
3360 if (radv_cmd_buffer_uses_mec(cmd_buffer)) {
3361 radeon_emit(cs, PKT3(PKT3_DISPATCH_INDIRECT, 2, 0) |
3362 PKT3_SHADER_TYPE_S(1));
3363 radeon_emit(cs, va);
3364 radeon_emit(cs, va >> 32);
3365 radeon_emit(cs, dispatch_initiator);
3366 } else {
3367 radeon_emit(cs, PKT3(PKT3_SET_BASE, 2, 0) |
3368 PKT3_SHADER_TYPE_S(1));
3369 radeon_emit(cs, 1);
3370 radeon_emit(cs, va);
3371 radeon_emit(cs, va >> 32);
3372
3373 radeon_emit(cs, PKT3(PKT3_DISPATCH_INDIRECT, 1, 0) |
3374 PKT3_SHADER_TYPE_S(1));
3375 radeon_emit(cs, 0);
3376 radeon_emit(cs, dispatch_initiator);
3377 }
3378 } else {
3379 unsigned blocks[3] = { info->blocks[0], info->blocks[1], info->blocks[2] };
3380
3381 if (info->unaligned) {
3382 unsigned *cs_block_size = compute_shader->info.cs.block_size;
3383 unsigned remainder[3];
3384
3385 /* If aligned, these should be an entire block size,
3386 * not 0.
3387 */
3388 remainder[0] = blocks[0] + cs_block_size[0] -
3389 align_u32_npot(blocks[0], cs_block_size[0]);
3390 remainder[1] = blocks[1] + cs_block_size[1] -
3391 align_u32_npot(blocks[1], cs_block_size[1]);
3392 remainder[2] = blocks[2] + cs_block_size[2] -
3393 align_u32_npot(blocks[2], cs_block_size[2]);
3394
3395 blocks[0] = round_up_u32(blocks[0], cs_block_size[0]);
3396 blocks[1] = round_up_u32(blocks[1], cs_block_size[1]);
3397 blocks[2] = round_up_u32(blocks[2], cs_block_size[2]);
3398
3399 radeon_set_sh_reg_seq(cs, R_00B81C_COMPUTE_NUM_THREAD_X, 3);
3400 radeon_emit(cs,
3401 S_00B81C_NUM_THREAD_FULL(cs_block_size[0]) |
3402 S_00B81C_NUM_THREAD_PARTIAL(remainder[0]));
3403 radeon_emit(cs,
3404 S_00B81C_NUM_THREAD_FULL(cs_block_size[1]) |
3405 S_00B81C_NUM_THREAD_PARTIAL(remainder[1]));
3406 radeon_emit(cs,
3407 S_00B81C_NUM_THREAD_FULL(cs_block_size[2]) |
3408 S_00B81C_NUM_THREAD_PARTIAL(remainder[2]));
3409
3410 dispatch_initiator |= S_00B800_PARTIAL_TG_EN(1);
3411 }
3412
3413 if (loc->sgpr_idx != -1) {
3414 assert(!loc->indirect);
3415 assert(loc->num_sgprs == grid_used);
3416
3417 radeon_set_sh_reg_seq(cs, R_00B900_COMPUTE_USER_DATA_0 +
3418 loc->sgpr_idx * 4, grid_used);
3419 radeon_emit(cs, blocks[0]);
3420 if (grid_used > 1)
3421 radeon_emit(cs, blocks[1]);
3422 if (grid_used > 2)
3423 radeon_emit(cs, blocks[2]);
3424 }
3425
3426 radeon_emit(cs, PKT3(PKT3_DISPATCH_DIRECT, 3, 0) |
3427 PKT3_SHADER_TYPE_S(1));
3428 radeon_emit(cs, blocks[0]);
3429 radeon_emit(cs, blocks[1]);
3430 radeon_emit(cs, blocks[2]);
3431 radeon_emit(cs, dispatch_initiator);
3432 }
3433
3434 assert(cmd_buffer->cs->cdw <= cdw_max);
3435 }
3436
3437 static void
3438 radv_dispatch(struct radv_cmd_buffer *cmd_buffer,
3439 const struct radv_dispatch_info *info)
3440 {
3441 radv_emit_compute_pipeline(cmd_buffer);
3442
3443 radv_flush_descriptors(cmd_buffer, VK_SHADER_STAGE_COMPUTE_BIT);
3444 radv_flush_constants(cmd_buffer, cmd_buffer->state.compute_pipeline,
3445 VK_SHADER_STAGE_COMPUTE_BIT);
3446
3447 si_emit_cache_flush(cmd_buffer);
3448
3449 radv_emit_dispatch_packets(cmd_buffer, info);
3450
3451 radv_cmd_buffer_after_draw(cmd_buffer);
3452 }
3453
3454 void radv_CmdDispatch(
3455 VkCommandBuffer commandBuffer,
3456 uint32_t x,
3457 uint32_t y,
3458 uint32_t z)
3459 {
3460 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3461 struct radv_dispatch_info info = {};
3462
3463 info.blocks[0] = x;
3464 info.blocks[1] = y;
3465 info.blocks[2] = z;
3466
3467 radv_dispatch(cmd_buffer, &info);
3468 }
3469
3470 void radv_CmdDispatchIndirect(
3471 VkCommandBuffer commandBuffer,
3472 VkBuffer _buffer,
3473 VkDeviceSize offset)
3474 {
3475 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3476 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
3477 struct radv_dispatch_info info = {};
3478
3479 info.indirect = buffer;
3480 info.indirect_offset = offset;
3481
3482 radv_dispatch(cmd_buffer, &info);
3483 }
3484
3485 void radv_unaligned_dispatch(
3486 struct radv_cmd_buffer *cmd_buffer,
3487 uint32_t x,
3488 uint32_t y,
3489 uint32_t z)
3490 {
3491 struct radv_dispatch_info info = {};
3492
3493 info.blocks[0] = x;
3494 info.blocks[1] = y;
3495 info.blocks[2] = z;
3496 info.unaligned = 1;
3497
3498 radv_dispatch(cmd_buffer, &info);
3499 }
3500
3501 void radv_CmdEndRenderPass(
3502 VkCommandBuffer commandBuffer)
3503 {
3504 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3505
3506 radv_subpass_barrier(cmd_buffer, &cmd_buffer->state.pass->end_barrier);
3507
3508 radv_cmd_buffer_resolve_subpass(cmd_buffer);
3509
3510 for (unsigned i = 0; i < cmd_buffer->state.framebuffer->attachment_count; ++i) {
3511 VkImageLayout layout = cmd_buffer->state.pass->attachments[i].final_layout;
3512 radv_handle_subpass_image_transition(cmd_buffer,
3513 (VkAttachmentReference){i, layout});
3514 }
3515
3516 vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.attachments);
3517
3518 cmd_buffer->state.pass = NULL;
3519 cmd_buffer->state.subpass = NULL;
3520 cmd_buffer->state.attachments = NULL;
3521 cmd_buffer->state.framebuffer = NULL;
3522 }
3523
3524 /*
3525 * For HTILE we have the following interesting clear words:
3526 * 0x0000030f: Uncompressed.
3527 * 0xfffffff0: Clear depth to 1.0
3528 * 0x00000000: Clear depth to 0.0
3529 */
3530 static void radv_initialize_htile(struct radv_cmd_buffer *cmd_buffer,
3531 struct radv_image *image,
3532 const VkImageSubresourceRange *range,
3533 uint32_t clear_word)
3534 {
3535 assert(range->baseMipLevel == 0);
3536 assert(range->levelCount == 1 || range->levelCount == VK_REMAINING_ARRAY_LAYERS);
3537 unsigned layer_count = radv_get_layerCount(image, range);
3538 uint64_t size = image->surface.htile_slice_size * layer_count;
3539 uint64_t offset = image->offset + image->htile_offset +
3540 image->surface.htile_slice_size * range->baseArrayLayer;
3541
3542 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB |
3543 RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
3544
3545 radv_fill_buffer(cmd_buffer, image->bo, offset, size, clear_word);
3546
3547 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB_META |
3548 RADV_CMD_FLAG_CS_PARTIAL_FLUSH |
3549 RADV_CMD_FLAG_INV_VMEM_L1 |
3550 RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2;
3551 }
3552
3553 static void radv_handle_depth_image_transition(struct radv_cmd_buffer *cmd_buffer,
3554 struct radv_image *image,
3555 VkImageLayout src_layout,
3556 VkImageLayout dst_layout,
3557 unsigned src_queue_mask,
3558 unsigned dst_queue_mask,
3559 const VkImageSubresourceRange *range,
3560 VkImageAspectFlags pending_clears)
3561 {
3562 if (dst_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL &&
3563 (pending_clears & vk_format_aspects(image->vk_format)) == vk_format_aspects(image->vk_format) &&
3564 cmd_buffer->state.render_area.offset.x == 0 && cmd_buffer->state.render_area.offset.y == 0 &&
3565 cmd_buffer->state.render_area.extent.width == image->info.width &&
3566 cmd_buffer->state.render_area.extent.height == image->info.height) {
3567 /* The clear will initialize htile. */
3568 return;
3569 } else if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED &&
3570 radv_layout_has_htile(image, dst_layout, dst_queue_mask)) {
3571 /* TODO: merge with the clear if applicable */
3572 radv_initialize_htile(cmd_buffer, image, range, 0);
3573 } else if (!radv_layout_is_htile_compressed(image, src_layout, src_queue_mask) &&
3574 radv_layout_is_htile_compressed(image, dst_layout, dst_queue_mask)) {
3575 radv_initialize_htile(cmd_buffer, image, range, 0xffffffff);
3576 } else if (radv_layout_is_htile_compressed(image, src_layout, src_queue_mask) &&
3577 !radv_layout_is_htile_compressed(image, dst_layout, dst_queue_mask)) {
3578 VkImageSubresourceRange local_range = *range;
3579 local_range.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
3580 local_range.baseMipLevel = 0;
3581 local_range.levelCount = 1;
3582
3583 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB |
3584 RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
3585
3586 radv_decompress_depth_image_inplace(cmd_buffer, image, &local_range);
3587
3588 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB |
3589 RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
3590 }
3591 }
3592
3593 void radv_initialise_cmask(struct radv_cmd_buffer *cmd_buffer,
3594 struct radv_image *image, uint32_t value)
3595 {
3596 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
3597 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
3598
3599 radv_fill_buffer(cmd_buffer, image->bo, image->offset + image->cmask.offset,
3600 image->cmask.size, value);
3601
3602 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META |
3603 RADV_CMD_FLAG_CS_PARTIAL_FLUSH |
3604 RADV_CMD_FLAG_INV_VMEM_L1 |
3605 RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2;
3606 }
3607
3608 static void radv_handle_cmask_image_transition(struct radv_cmd_buffer *cmd_buffer,
3609 struct radv_image *image,
3610 VkImageLayout src_layout,
3611 VkImageLayout dst_layout,
3612 unsigned src_queue_mask,
3613 unsigned dst_queue_mask,
3614 const VkImageSubresourceRange *range)
3615 {
3616 if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED) {
3617 if (image->fmask.size)
3618 radv_initialise_cmask(cmd_buffer, image, 0xccccccccu);
3619 else
3620 radv_initialise_cmask(cmd_buffer, image, 0xffffffffu);
3621 } else if (radv_layout_can_fast_clear(image, src_layout, src_queue_mask) &&
3622 !radv_layout_can_fast_clear(image, dst_layout, dst_queue_mask)) {
3623 radv_fast_clear_flush_image_inplace(cmd_buffer, image, range);
3624 }
3625 }
3626
3627 void radv_initialize_dcc(struct radv_cmd_buffer *cmd_buffer,
3628 struct radv_image *image, uint32_t value)
3629 {
3630
3631 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
3632 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
3633
3634 radv_fill_buffer(cmd_buffer, image->bo, image->offset + image->dcc_offset,
3635 image->surface.dcc_size, value);
3636
3637 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
3638 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META |
3639 RADV_CMD_FLAG_CS_PARTIAL_FLUSH |
3640 RADV_CMD_FLAG_INV_VMEM_L1 |
3641 RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2;
3642 }
3643
3644 static void radv_handle_dcc_image_transition(struct radv_cmd_buffer *cmd_buffer,
3645 struct radv_image *image,
3646 VkImageLayout src_layout,
3647 VkImageLayout dst_layout,
3648 unsigned src_queue_mask,
3649 unsigned dst_queue_mask,
3650 const VkImageSubresourceRange *range)
3651 {
3652 if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED) {
3653 radv_initialize_dcc(cmd_buffer, image, 0x20202020u);
3654 } else if (radv_layout_can_fast_clear(image, src_layout, src_queue_mask) &&
3655 !radv_layout_can_fast_clear(image, dst_layout, dst_queue_mask)) {
3656 radv_fast_clear_flush_image_inplace(cmd_buffer, image, range);
3657 }
3658 }
3659
3660 static void radv_handle_image_transition(struct radv_cmd_buffer *cmd_buffer,
3661 struct radv_image *image,
3662 VkImageLayout src_layout,
3663 VkImageLayout dst_layout,
3664 uint32_t src_family,
3665 uint32_t dst_family,
3666 const VkImageSubresourceRange *range,
3667 VkImageAspectFlags pending_clears)
3668 {
3669 if (image->exclusive && src_family != dst_family) {
3670 /* This is an acquire or a release operation and there will be
3671 * a corresponding release/acquire. Do the transition in the
3672 * most flexible queue. */
3673
3674 assert(src_family == cmd_buffer->queue_family_index ||
3675 dst_family == cmd_buffer->queue_family_index);
3676
3677 if (cmd_buffer->queue_family_index == RADV_QUEUE_TRANSFER)
3678 return;
3679
3680 if (cmd_buffer->queue_family_index == RADV_QUEUE_COMPUTE &&
3681 (src_family == RADV_QUEUE_GENERAL ||
3682 dst_family == RADV_QUEUE_GENERAL))
3683 return;
3684 }
3685
3686 unsigned src_queue_mask = radv_image_queue_family_mask(image, src_family, cmd_buffer->queue_family_index);
3687 unsigned dst_queue_mask = radv_image_queue_family_mask(image, dst_family, cmd_buffer->queue_family_index);
3688
3689 if (image->surface.htile_size)
3690 radv_handle_depth_image_transition(cmd_buffer, image, src_layout,
3691 dst_layout, src_queue_mask,
3692 dst_queue_mask, range,
3693 pending_clears);
3694
3695 if (image->cmask.size || image->fmask.size)
3696 radv_handle_cmask_image_transition(cmd_buffer, image, src_layout,
3697 dst_layout, src_queue_mask,
3698 dst_queue_mask, range);
3699
3700 if (image->surface.dcc_size)
3701 radv_handle_dcc_image_transition(cmd_buffer, image, src_layout,
3702 dst_layout, src_queue_mask,
3703 dst_queue_mask, range);
3704 }
3705
3706 void radv_CmdPipelineBarrier(
3707 VkCommandBuffer commandBuffer,
3708 VkPipelineStageFlags srcStageMask,
3709 VkPipelineStageFlags destStageMask,
3710 VkBool32 byRegion,
3711 uint32_t memoryBarrierCount,
3712 const VkMemoryBarrier* pMemoryBarriers,
3713 uint32_t bufferMemoryBarrierCount,
3714 const VkBufferMemoryBarrier* pBufferMemoryBarriers,
3715 uint32_t imageMemoryBarrierCount,
3716 const VkImageMemoryBarrier* pImageMemoryBarriers)
3717 {
3718 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3719 enum radv_cmd_flush_bits src_flush_bits = 0;
3720 enum radv_cmd_flush_bits dst_flush_bits = 0;
3721
3722 for (uint32_t i = 0; i < memoryBarrierCount; i++) {
3723 src_flush_bits |= radv_src_access_flush(cmd_buffer, pMemoryBarriers[i].srcAccessMask);
3724 dst_flush_bits |= radv_dst_access_flush(cmd_buffer, pMemoryBarriers[i].dstAccessMask,
3725 NULL);
3726 }
3727
3728 for (uint32_t i = 0; i < bufferMemoryBarrierCount; i++) {
3729 src_flush_bits |= radv_src_access_flush(cmd_buffer, pBufferMemoryBarriers[i].srcAccessMask);
3730 dst_flush_bits |= radv_dst_access_flush(cmd_buffer, pBufferMemoryBarriers[i].dstAccessMask,
3731 NULL);
3732 }
3733
3734 for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
3735 RADV_FROM_HANDLE(radv_image, image, pImageMemoryBarriers[i].image);
3736 src_flush_bits |= radv_src_access_flush(cmd_buffer, pImageMemoryBarriers[i].srcAccessMask);
3737 dst_flush_bits |= radv_dst_access_flush(cmd_buffer, pImageMemoryBarriers[i].dstAccessMask,
3738 image);
3739 }
3740
3741 radv_stage_flush(cmd_buffer, srcStageMask);
3742 cmd_buffer->state.flush_bits |= src_flush_bits;
3743
3744 for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
3745 RADV_FROM_HANDLE(radv_image, image, pImageMemoryBarriers[i].image);
3746 radv_handle_image_transition(cmd_buffer, image,
3747 pImageMemoryBarriers[i].oldLayout,
3748 pImageMemoryBarriers[i].newLayout,
3749 pImageMemoryBarriers[i].srcQueueFamilyIndex,
3750 pImageMemoryBarriers[i].dstQueueFamilyIndex,
3751 &pImageMemoryBarriers[i].subresourceRange,
3752 0);
3753 }
3754
3755 cmd_buffer->state.flush_bits |= dst_flush_bits;
3756 }
3757
3758
3759 static void write_event(struct radv_cmd_buffer *cmd_buffer,
3760 struct radv_event *event,
3761 VkPipelineStageFlags stageMask,
3762 unsigned value)
3763 {
3764 struct radeon_winsys_cs *cs = cmd_buffer->cs;
3765 uint64_t va = radv_buffer_get_va(event->bo);
3766
3767 cmd_buffer->device->ws->cs_add_buffer(cs, event->bo, 8);
3768
3769 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 18);
3770
3771 /* TODO: this is overkill. Probably should figure something out from
3772 * the stage mask. */
3773
3774 si_cs_emit_write_event_eop(cs,
3775 cmd_buffer->state.predicating,
3776 cmd_buffer->device->physical_device->rad_info.chip_class,
3777 false,
3778 V_028A90_BOTTOM_OF_PIPE_TS, 0,
3779 1, va, 2, value);
3780
3781 assert(cmd_buffer->cs->cdw <= cdw_max);
3782 }
3783
3784 void radv_CmdSetEvent(VkCommandBuffer commandBuffer,
3785 VkEvent _event,
3786 VkPipelineStageFlags stageMask)
3787 {
3788 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3789 RADV_FROM_HANDLE(radv_event, event, _event);
3790
3791 write_event(cmd_buffer, event, stageMask, 1);
3792 }
3793
3794 void radv_CmdResetEvent(VkCommandBuffer commandBuffer,
3795 VkEvent _event,
3796 VkPipelineStageFlags stageMask)
3797 {
3798 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3799 RADV_FROM_HANDLE(radv_event, event, _event);
3800
3801 write_event(cmd_buffer, event, stageMask, 0);
3802 }
3803
3804 void radv_CmdWaitEvents(VkCommandBuffer commandBuffer,
3805 uint32_t eventCount,
3806 const VkEvent* pEvents,
3807 VkPipelineStageFlags srcStageMask,
3808 VkPipelineStageFlags dstStageMask,
3809 uint32_t memoryBarrierCount,
3810 const VkMemoryBarrier* pMemoryBarriers,
3811 uint32_t bufferMemoryBarrierCount,
3812 const VkBufferMemoryBarrier* pBufferMemoryBarriers,
3813 uint32_t imageMemoryBarrierCount,
3814 const VkImageMemoryBarrier* pImageMemoryBarriers)
3815 {
3816 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3817 struct radeon_winsys_cs *cs = cmd_buffer->cs;
3818
3819 for (unsigned i = 0; i < eventCount; ++i) {
3820 RADV_FROM_HANDLE(radv_event, event, pEvents[i]);
3821 uint64_t va = radv_buffer_get_va(event->bo);
3822
3823 cmd_buffer->device->ws->cs_add_buffer(cs, event->bo, 8);
3824
3825 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 7);
3826
3827 si_emit_wait_fence(cs, false, va, 1, 0xffffffff);
3828 assert(cmd_buffer->cs->cdw <= cdw_max);
3829 }
3830
3831
3832 for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
3833 RADV_FROM_HANDLE(radv_image, image, pImageMemoryBarriers[i].image);
3834
3835 radv_handle_image_transition(cmd_buffer, image,
3836 pImageMemoryBarriers[i].oldLayout,
3837 pImageMemoryBarriers[i].newLayout,
3838 pImageMemoryBarriers[i].srcQueueFamilyIndex,
3839 pImageMemoryBarriers[i].dstQueueFamilyIndex,
3840 &pImageMemoryBarriers[i].subresourceRange,
3841 0);
3842 }
3843
3844 /* TODO: figure out how to do memory barriers without waiting */
3845 cmd_buffer->state.flush_bits |= RADV_CMD_FLUSH_AND_INV_FRAMEBUFFER |
3846 RADV_CMD_FLAG_INV_GLOBAL_L2 |
3847 RADV_CMD_FLAG_INV_VMEM_L1 |
3848 RADV_CMD_FLAG_INV_SMEM_L1;
3849 }