radv: Don't set dynamic state for pipelines with rasterizer dicard.
[mesa.git] / src / amd / vulkan / radv_cmd_buffer.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 */
27
28 #include "radv_private.h"
29 #include "radv_radeon_winsys.h"
30 #include "radv_cs.h"
31 #include "sid.h"
32 #include "vk_format.h"
33 #include "radv_meta.h"
34
35 #include "ac_debug.h"
36
37 static void radv_handle_image_transition(struct radv_cmd_buffer *cmd_buffer,
38 struct radv_image *image,
39 VkImageLayout src_layout,
40 VkImageLayout dst_layout,
41 uint32_t src_family,
42 uint32_t dst_family,
43 const VkImageSubresourceRange *range,
44 VkImageAspectFlags pending_clears);
45
46 const struct radv_dynamic_state default_dynamic_state = {
47 .viewport = {
48 .count = 0,
49 },
50 .scissor = {
51 .count = 0,
52 },
53 .line_width = 1.0f,
54 .depth_bias = {
55 .bias = 0.0f,
56 .clamp = 0.0f,
57 .slope = 0.0f,
58 },
59 .blend_constants = { 0.0f, 0.0f, 0.0f, 0.0f },
60 .depth_bounds = {
61 .min = 0.0f,
62 .max = 1.0f,
63 },
64 .stencil_compare_mask = {
65 .front = ~0u,
66 .back = ~0u,
67 },
68 .stencil_write_mask = {
69 .front = ~0u,
70 .back = ~0u,
71 },
72 .stencil_reference = {
73 .front = 0u,
74 .back = 0u,
75 },
76 };
77
78 void
79 radv_dynamic_state_copy(struct radv_dynamic_state *dest,
80 const struct radv_dynamic_state *src,
81 uint32_t copy_mask)
82 {
83 if (copy_mask & (1 << VK_DYNAMIC_STATE_VIEWPORT)) {
84 dest->viewport.count = src->viewport.count;
85 typed_memcpy(dest->viewport.viewports, src->viewport.viewports,
86 src->viewport.count);
87 }
88
89 if (copy_mask & (1 << VK_DYNAMIC_STATE_SCISSOR)) {
90 dest->scissor.count = src->scissor.count;
91 typed_memcpy(dest->scissor.scissors, src->scissor.scissors,
92 src->scissor.count);
93 }
94
95 if (copy_mask & (1 << VK_DYNAMIC_STATE_LINE_WIDTH))
96 dest->line_width = src->line_width;
97
98 if (copy_mask & (1 << VK_DYNAMIC_STATE_DEPTH_BIAS))
99 dest->depth_bias = src->depth_bias;
100
101 if (copy_mask & (1 << VK_DYNAMIC_STATE_BLEND_CONSTANTS))
102 typed_memcpy(dest->blend_constants, src->blend_constants, 4);
103
104 if (copy_mask & (1 << VK_DYNAMIC_STATE_DEPTH_BOUNDS))
105 dest->depth_bounds = src->depth_bounds;
106
107 if (copy_mask & (1 << VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK))
108 dest->stencil_compare_mask = src->stencil_compare_mask;
109
110 if (copy_mask & (1 << VK_DYNAMIC_STATE_STENCIL_WRITE_MASK))
111 dest->stencil_write_mask = src->stencil_write_mask;
112
113 if (copy_mask & (1 << VK_DYNAMIC_STATE_STENCIL_REFERENCE))
114 dest->stencil_reference = src->stencil_reference;
115 }
116
117 bool radv_cmd_buffer_uses_mec(struct radv_cmd_buffer *cmd_buffer)
118 {
119 return cmd_buffer->queue_family_index == RADV_QUEUE_COMPUTE &&
120 cmd_buffer->device->physical_device->rad_info.chip_class >= CIK;
121 }
122
123 enum ring_type radv_queue_family_to_ring(int f) {
124 switch (f) {
125 case RADV_QUEUE_GENERAL:
126 return RING_GFX;
127 case RADV_QUEUE_COMPUTE:
128 return RING_COMPUTE;
129 case RADV_QUEUE_TRANSFER:
130 return RING_DMA;
131 default:
132 unreachable("Unknown queue family");
133 }
134 }
135
136 static VkResult radv_create_cmd_buffer(
137 struct radv_device * device,
138 struct radv_cmd_pool * pool,
139 VkCommandBufferLevel level,
140 VkCommandBuffer* pCommandBuffer)
141 {
142 struct radv_cmd_buffer *cmd_buffer;
143 VkResult result;
144 unsigned ring;
145 cmd_buffer = vk_alloc(&pool->alloc, sizeof(*cmd_buffer), 8,
146 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
147 if (cmd_buffer == NULL)
148 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
149
150 memset(cmd_buffer, 0, sizeof(*cmd_buffer));
151 cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
152 cmd_buffer->device = device;
153 cmd_buffer->pool = pool;
154 cmd_buffer->level = level;
155
156 if (pool) {
157 list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
158 cmd_buffer->queue_family_index = pool->queue_family_index;
159
160 } else {
161 /* Init the pool_link so we can safefly call list_del when we destroy
162 * the command buffer
163 */
164 list_inithead(&cmd_buffer->pool_link);
165 cmd_buffer->queue_family_index = RADV_QUEUE_GENERAL;
166 }
167
168 ring = radv_queue_family_to_ring(cmd_buffer->queue_family_index);
169
170 cmd_buffer->cs = device->ws->cs_create(device->ws, ring);
171 if (!cmd_buffer->cs) {
172 result = VK_ERROR_OUT_OF_HOST_MEMORY;
173 goto fail;
174 }
175
176 *pCommandBuffer = radv_cmd_buffer_to_handle(cmd_buffer);
177
178 cmd_buffer->upload.offset = 0;
179 cmd_buffer->upload.size = 0;
180 list_inithead(&cmd_buffer->upload.list);
181
182 return VK_SUCCESS;
183
184 fail:
185 vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
186
187 return result;
188 }
189
190 static void
191 radv_cmd_buffer_destroy(struct radv_cmd_buffer *cmd_buffer)
192 {
193 list_del(&cmd_buffer->pool_link);
194
195 list_for_each_entry_safe(struct radv_cmd_buffer_upload, up,
196 &cmd_buffer->upload.list, list) {
197 cmd_buffer->device->ws->buffer_destroy(up->upload_bo);
198 list_del(&up->list);
199 free(up);
200 }
201
202 if (cmd_buffer->upload.upload_bo)
203 cmd_buffer->device->ws->buffer_destroy(cmd_buffer->upload.upload_bo);
204 cmd_buffer->device->ws->cs_destroy(cmd_buffer->cs);
205 free(cmd_buffer->push_descriptors.set.mapped_ptr);
206 vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
207 }
208
209 static void radv_reset_cmd_buffer(struct radv_cmd_buffer *cmd_buffer)
210 {
211
212 cmd_buffer->device->ws->cs_reset(cmd_buffer->cs);
213
214 list_for_each_entry_safe(struct radv_cmd_buffer_upload, up,
215 &cmd_buffer->upload.list, list) {
216 cmd_buffer->device->ws->buffer_destroy(up->upload_bo);
217 list_del(&up->list);
218 free(up);
219 }
220
221 cmd_buffer->scratch_size_needed = 0;
222 cmd_buffer->compute_scratch_size_needed = 0;
223 cmd_buffer->esgs_ring_size_needed = 0;
224 cmd_buffer->gsvs_ring_size_needed = 0;
225 cmd_buffer->tess_rings_needed = false;
226 cmd_buffer->sample_positions_needed = false;
227
228 if (cmd_buffer->upload.upload_bo)
229 cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs,
230 cmd_buffer->upload.upload_bo, 8);
231 cmd_buffer->upload.offset = 0;
232
233 cmd_buffer->record_fail = false;
234
235 cmd_buffer->ring_offsets_idx = -1;
236 }
237
238 static bool
239 radv_cmd_buffer_resize_upload_buf(struct radv_cmd_buffer *cmd_buffer,
240 uint64_t min_needed)
241 {
242 uint64_t new_size;
243 struct radeon_winsys_bo *bo;
244 struct radv_cmd_buffer_upload *upload;
245 struct radv_device *device = cmd_buffer->device;
246
247 new_size = MAX2(min_needed, 16 * 1024);
248 new_size = MAX2(new_size, 2 * cmd_buffer->upload.size);
249
250 bo = device->ws->buffer_create(device->ws,
251 new_size, 4096,
252 RADEON_DOMAIN_GTT,
253 RADEON_FLAG_CPU_ACCESS);
254
255 if (!bo) {
256 cmd_buffer->record_fail = true;
257 return false;
258 }
259
260 device->ws->cs_add_buffer(cmd_buffer->cs, bo, 8);
261 if (cmd_buffer->upload.upload_bo) {
262 upload = malloc(sizeof(*upload));
263
264 if (!upload) {
265 cmd_buffer->record_fail = true;
266 device->ws->buffer_destroy(bo);
267 return false;
268 }
269
270 memcpy(upload, &cmd_buffer->upload, sizeof(*upload));
271 list_add(&upload->list, &cmd_buffer->upload.list);
272 }
273
274 cmd_buffer->upload.upload_bo = bo;
275 cmd_buffer->upload.size = new_size;
276 cmd_buffer->upload.offset = 0;
277 cmd_buffer->upload.map = device->ws->buffer_map(cmd_buffer->upload.upload_bo);
278
279 if (!cmd_buffer->upload.map) {
280 cmd_buffer->record_fail = true;
281 return false;
282 }
283
284 return true;
285 }
286
287 bool
288 radv_cmd_buffer_upload_alloc(struct radv_cmd_buffer *cmd_buffer,
289 unsigned size,
290 unsigned alignment,
291 unsigned *out_offset,
292 void **ptr)
293 {
294 uint64_t offset = align(cmd_buffer->upload.offset, alignment);
295 if (offset + size > cmd_buffer->upload.size) {
296 if (!radv_cmd_buffer_resize_upload_buf(cmd_buffer, size))
297 return false;
298 offset = 0;
299 }
300
301 *out_offset = offset;
302 *ptr = cmd_buffer->upload.map + offset;
303
304 cmd_buffer->upload.offset = offset + size;
305 return true;
306 }
307
308 bool
309 radv_cmd_buffer_upload_data(struct radv_cmd_buffer *cmd_buffer,
310 unsigned size, unsigned alignment,
311 const void *data, unsigned *out_offset)
312 {
313 uint8_t *ptr;
314
315 if (!radv_cmd_buffer_upload_alloc(cmd_buffer, size, alignment,
316 out_offset, (void **)&ptr))
317 return false;
318
319 if (ptr)
320 memcpy(ptr, data, size);
321
322 return true;
323 }
324
325 void radv_cmd_buffer_trace_emit(struct radv_cmd_buffer *cmd_buffer)
326 {
327 struct radv_device *device = cmd_buffer->device;
328 struct radeon_winsys_cs *cs = cmd_buffer->cs;
329 uint64_t va;
330
331 if (!device->trace_bo)
332 return;
333
334 va = device->ws->buffer_get_va(device->trace_bo);
335
336 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 7);
337
338 ++cmd_buffer->state.trace_id;
339 device->ws->cs_add_buffer(cs, device->trace_bo, 8);
340 radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, 0));
341 radeon_emit(cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
342 S_370_WR_CONFIRM(1) |
343 S_370_ENGINE_SEL(V_370_ME));
344 radeon_emit(cs, va);
345 radeon_emit(cs, va >> 32);
346 radeon_emit(cs, cmd_buffer->state.trace_id);
347 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
348 radeon_emit(cs, AC_ENCODE_TRACE_POINT(cmd_buffer->state.trace_id));
349 }
350
351 static void
352 radv_emit_graphics_blend_state(struct radv_cmd_buffer *cmd_buffer,
353 struct radv_pipeline *pipeline)
354 {
355 radeon_set_context_reg_seq(cmd_buffer->cs, R_028780_CB_BLEND0_CONTROL, 8);
356 radeon_emit_array(cmd_buffer->cs, pipeline->graphics.blend.cb_blend_control,
357 8);
358 radeon_set_context_reg(cmd_buffer->cs, R_028808_CB_COLOR_CONTROL, pipeline->graphics.blend.cb_color_control);
359 radeon_set_context_reg(cmd_buffer->cs, R_028B70_DB_ALPHA_TO_MASK, pipeline->graphics.blend.db_alpha_to_mask);
360 }
361
362 static void
363 radv_emit_graphics_depth_stencil_state(struct radv_cmd_buffer *cmd_buffer,
364 struct radv_pipeline *pipeline)
365 {
366 struct radv_depth_stencil_state *ds = &pipeline->graphics.ds;
367 radeon_set_context_reg(cmd_buffer->cs, R_028800_DB_DEPTH_CONTROL, ds->db_depth_control);
368 radeon_set_context_reg(cmd_buffer->cs, R_02842C_DB_STENCIL_CONTROL, ds->db_stencil_control);
369
370 radeon_set_context_reg(cmd_buffer->cs, R_028000_DB_RENDER_CONTROL, ds->db_render_control);
371 radeon_set_context_reg(cmd_buffer->cs, R_028010_DB_RENDER_OVERRIDE2, ds->db_render_override2);
372 }
373
374 /* 12.4 fixed-point */
375 static unsigned radv_pack_float_12p4(float x)
376 {
377 return x <= 0 ? 0 :
378 x >= 4096 ? 0xffff : x * 16;
379 }
380
381 static uint32_t
382 shader_stage_to_user_data_0(gl_shader_stage stage, bool has_gs, bool has_tess)
383 {
384 switch (stage) {
385 case MESA_SHADER_FRAGMENT:
386 return R_00B030_SPI_SHADER_USER_DATA_PS_0;
387 case MESA_SHADER_VERTEX:
388 if (has_tess)
389 return R_00B530_SPI_SHADER_USER_DATA_LS_0;
390 else
391 return has_gs ? R_00B330_SPI_SHADER_USER_DATA_ES_0 : R_00B130_SPI_SHADER_USER_DATA_VS_0;
392 case MESA_SHADER_GEOMETRY:
393 return R_00B230_SPI_SHADER_USER_DATA_GS_0;
394 case MESA_SHADER_COMPUTE:
395 return R_00B900_COMPUTE_USER_DATA_0;
396 case MESA_SHADER_TESS_CTRL:
397 return R_00B430_SPI_SHADER_USER_DATA_HS_0;
398 case MESA_SHADER_TESS_EVAL:
399 if (has_gs)
400 return R_00B330_SPI_SHADER_USER_DATA_ES_0;
401 else
402 return R_00B130_SPI_SHADER_USER_DATA_VS_0;
403 default:
404 unreachable("unknown shader");
405 }
406 }
407
408 static struct ac_userdata_info *
409 radv_lookup_user_sgpr(struct radv_pipeline *pipeline,
410 gl_shader_stage stage,
411 int idx)
412 {
413 return &pipeline->shaders[stage]->info.user_sgprs_locs.shader_data[idx];
414 }
415
416 static void
417 radv_emit_userdata_address(struct radv_cmd_buffer *cmd_buffer,
418 struct radv_pipeline *pipeline,
419 gl_shader_stage stage,
420 int idx, uint64_t va)
421 {
422 struct ac_userdata_info *loc = radv_lookup_user_sgpr(pipeline, stage, idx);
423 uint32_t base_reg = shader_stage_to_user_data_0(stage, radv_pipeline_has_gs(pipeline), radv_pipeline_has_tess(pipeline));
424 if (loc->sgpr_idx == -1)
425 return;
426 assert(loc->num_sgprs == 2);
427 assert(!loc->indirect);
428 radeon_set_sh_reg_seq(cmd_buffer->cs, base_reg + loc->sgpr_idx * 4, 2);
429 radeon_emit(cmd_buffer->cs, va);
430 radeon_emit(cmd_buffer->cs, va >> 32);
431 }
432
433 static void
434 radv_update_multisample_state(struct radv_cmd_buffer *cmd_buffer,
435 struct radv_pipeline *pipeline)
436 {
437 int num_samples = pipeline->graphics.ms.num_samples;
438 struct radv_multisample_state *ms = &pipeline->graphics.ms;
439 struct radv_pipeline *old_pipeline = cmd_buffer->state.emitted_pipeline;
440
441 radeon_set_context_reg_seq(cmd_buffer->cs, R_028C38_PA_SC_AA_MASK_X0Y0_X1Y0, 2);
442 radeon_emit(cmd_buffer->cs, ms->pa_sc_aa_mask[0]);
443 radeon_emit(cmd_buffer->cs, ms->pa_sc_aa_mask[1]);
444
445 radeon_set_context_reg(cmd_buffer->cs, CM_R_028804_DB_EQAA, ms->db_eqaa);
446 radeon_set_context_reg(cmd_buffer->cs, EG_R_028A4C_PA_SC_MODE_CNTL_1, ms->pa_sc_mode_cntl_1);
447
448 if (old_pipeline && num_samples == old_pipeline->graphics.ms.num_samples)
449 return;
450
451 radeon_set_context_reg_seq(cmd_buffer->cs, CM_R_028BDC_PA_SC_LINE_CNTL, 2);
452 radeon_emit(cmd_buffer->cs, ms->pa_sc_line_cntl);
453 radeon_emit(cmd_buffer->cs, ms->pa_sc_aa_config);
454
455 radv_cayman_emit_msaa_sample_locs(cmd_buffer->cs, num_samples);
456
457 if (pipeline->shaders[MESA_SHADER_FRAGMENT]->info.info.ps.needs_sample_positions) {
458 uint32_t offset;
459 struct ac_userdata_info *loc = radv_lookup_user_sgpr(pipeline, MESA_SHADER_FRAGMENT, AC_UD_PS_SAMPLE_POS_OFFSET);
460 uint32_t base_reg = shader_stage_to_user_data_0(MESA_SHADER_FRAGMENT, radv_pipeline_has_gs(pipeline), radv_pipeline_has_tess(pipeline));
461 if (loc->sgpr_idx == -1)
462 return;
463 assert(loc->num_sgprs == 1);
464 assert(!loc->indirect);
465 switch (num_samples) {
466 default:
467 offset = 0;
468 break;
469 case 2:
470 offset = 1;
471 break;
472 case 4:
473 offset = 3;
474 break;
475 case 8:
476 offset = 7;
477 break;
478 case 16:
479 offset = 15;
480 break;
481 }
482
483 radeon_set_sh_reg(cmd_buffer->cs, base_reg + loc->sgpr_idx * 4, offset);
484 cmd_buffer->sample_positions_needed = true;
485 }
486 }
487
488 static void
489 radv_emit_graphics_raster_state(struct radv_cmd_buffer *cmd_buffer,
490 struct radv_pipeline *pipeline)
491 {
492 struct radv_raster_state *raster = &pipeline->graphics.raster;
493
494 radeon_set_context_reg(cmd_buffer->cs, R_028810_PA_CL_CLIP_CNTL,
495 raster->pa_cl_clip_cntl);
496
497 radeon_set_context_reg(cmd_buffer->cs, R_0286D4_SPI_INTERP_CONTROL_0,
498 raster->spi_interp_control);
499
500 radeon_set_context_reg_seq(cmd_buffer->cs, R_028A00_PA_SU_POINT_SIZE, 2);
501 unsigned tmp = (unsigned)(1.0 * 8.0);
502 radeon_emit(cmd_buffer->cs, S_028A00_HEIGHT(tmp) | S_028A00_WIDTH(tmp));
503 radeon_emit(cmd_buffer->cs, S_028A04_MIN_SIZE(radv_pack_float_12p4(0)) |
504 S_028A04_MAX_SIZE(radv_pack_float_12p4(8192/2))); /* R_028A04_PA_SU_POINT_MINMAX */
505
506 radeon_set_context_reg(cmd_buffer->cs, R_028BE4_PA_SU_VTX_CNTL,
507 raster->pa_su_vtx_cntl);
508
509 radeon_set_context_reg(cmd_buffer->cs, R_028814_PA_SU_SC_MODE_CNTL,
510 raster->pa_su_sc_mode_cntl);
511 }
512
513 static void
514 radv_emit_hw_vs(struct radv_cmd_buffer *cmd_buffer,
515 struct radv_pipeline *pipeline,
516 struct radv_shader_variant *shader,
517 struct ac_vs_output_info *outinfo)
518 {
519 struct radeon_winsys *ws = cmd_buffer->device->ws;
520 uint64_t va = ws->buffer_get_va(shader->bo);
521 unsigned export_count;
522
523 ws->cs_add_buffer(cmd_buffer->cs, shader->bo, 8);
524 si_cp_dma_prefetch(cmd_buffer, va, shader->code_size);
525
526 export_count = MAX2(1, outinfo->param_exports);
527 radeon_set_context_reg(cmd_buffer->cs, R_0286C4_SPI_VS_OUT_CONFIG,
528 S_0286C4_VS_EXPORT_COUNT(export_count - 1));
529
530 radeon_set_context_reg(cmd_buffer->cs, R_02870C_SPI_SHADER_POS_FORMAT,
531 S_02870C_POS0_EXPORT_FORMAT(V_02870C_SPI_SHADER_4COMP) |
532 S_02870C_POS1_EXPORT_FORMAT(outinfo->pos_exports > 1 ?
533 V_02870C_SPI_SHADER_4COMP :
534 V_02870C_SPI_SHADER_NONE) |
535 S_02870C_POS2_EXPORT_FORMAT(outinfo->pos_exports > 2 ?
536 V_02870C_SPI_SHADER_4COMP :
537 V_02870C_SPI_SHADER_NONE) |
538 S_02870C_POS3_EXPORT_FORMAT(outinfo->pos_exports > 3 ?
539 V_02870C_SPI_SHADER_4COMP :
540 V_02870C_SPI_SHADER_NONE));
541
542
543 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B120_SPI_SHADER_PGM_LO_VS, 4);
544 radeon_emit(cmd_buffer->cs, va >> 8);
545 radeon_emit(cmd_buffer->cs, va >> 40);
546 radeon_emit(cmd_buffer->cs, shader->rsrc1);
547 radeon_emit(cmd_buffer->cs, shader->rsrc2);
548
549 radeon_set_context_reg(cmd_buffer->cs, R_028818_PA_CL_VTE_CNTL,
550 S_028818_VTX_W0_FMT(1) |
551 S_028818_VPORT_X_SCALE_ENA(1) | S_028818_VPORT_X_OFFSET_ENA(1) |
552 S_028818_VPORT_Y_SCALE_ENA(1) | S_028818_VPORT_Y_OFFSET_ENA(1) |
553 S_028818_VPORT_Z_SCALE_ENA(1) | S_028818_VPORT_Z_OFFSET_ENA(1));
554
555
556 radeon_set_context_reg(cmd_buffer->cs, R_02881C_PA_CL_VS_OUT_CNTL,
557 pipeline->graphics.pa_cl_vs_out_cntl);
558
559 radeon_set_context_reg(cmd_buffer->cs, R_028AB4_VGT_REUSE_OFF,
560 S_028AB4_REUSE_OFF(outinfo->writes_viewport_index));
561 }
562
563 static void
564 radv_emit_hw_es(struct radv_cmd_buffer *cmd_buffer,
565 struct radv_shader_variant *shader,
566 struct ac_es_output_info *outinfo)
567 {
568 struct radeon_winsys *ws = cmd_buffer->device->ws;
569 uint64_t va = ws->buffer_get_va(shader->bo);
570
571 ws->cs_add_buffer(cmd_buffer->cs, shader->bo, 8);
572 si_cp_dma_prefetch(cmd_buffer, va, shader->code_size);
573
574 radeon_set_context_reg(cmd_buffer->cs, R_028AAC_VGT_ESGS_RING_ITEMSIZE,
575 outinfo->esgs_itemsize / 4);
576 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B320_SPI_SHADER_PGM_LO_ES, 4);
577 radeon_emit(cmd_buffer->cs, va >> 8);
578 radeon_emit(cmd_buffer->cs, va >> 40);
579 radeon_emit(cmd_buffer->cs, shader->rsrc1);
580 radeon_emit(cmd_buffer->cs, shader->rsrc2);
581 }
582
583 static void
584 radv_emit_hw_ls(struct radv_cmd_buffer *cmd_buffer,
585 struct radv_shader_variant *shader)
586 {
587 struct radeon_winsys *ws = cmd_buffer->device->ws;
588 uint64_t va = ws->buffer_get_va(shader->bo);
589 uint32_t rsrc2 = shader->rsrc2;
590
591 ws->cs_add_buffer(cmd_buffer->cs, shader->bo, 8);
592 si_cp_dma_prefetch(cmd_buffer, va, shader->code_size);
593
594 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B520_SPI_SHADER_PGM_LO_LS, 2);
595 radeon_emit(cmd_buffer->cs, va >> 8);
596 radeon_emit(cmd_buffer->cs, va >> 40);
597
598 rsrc2 |= S_00B52C_LDS_SIZE(cmd_buffer->state.pipeline->graphics.tess.lds_size);
599 if (cmd_buffer->device->physical_device->rad_info.chip_class == CIK &&
600 cmd_buffer->device->physical_device->rad_info.family != CHIP_HAWAII)
601 radeon_set_sh_reg(cmd_buffer->cs, R_00B52C_SPI_SHADER_PGM_RSRC2_LS, rsrc2);
602
603 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B528_SPI_SHADER_PGM_RSRC1_LS, 2);
604 radeon_emit(cmd_buffer->cs, shader->rsrc1);
605 radeon_emit(cmd_buffer->cs, rsrc2);
606 }
607
608 static void
609 radv_emit_hw_hs(struct radv_cmd_buffer *cmd_buffer,
610 struct radv_shader_variant *shader)
611 {
612 struct radeon_winsys *ws = cmd_buffer->device->ws;
613 uint64_t va = ws->buffer_get_va(shader->bo);
614
615 ws->cs_add_buffer(cmd_buffer->cs, shader->bo, 8);
616 si_cp_dma_prefetch(cmd_buffer, va, shader->code_size);
617
618 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B420_SPI_SHADER_PGM_LO_HS, 4);
619 radeon_emit(cmd_buffer->cs, va >> 8);
620 radeon_emit(cmd_buffer->cs, va >> 40);
621 radeon_emit(cmd_buffer->cs, shader->rsrc1);
622 radeon_emit(cmd_buffer->cs, shader->rsrc2);
623 }
624
625 static void
626 radv_emit_vertex_shader(struct radv_cmd_buffer *cmd_buffer,
627 struct radv_pipeline *pipeline)
628 {
629 struct radv_shader_variant *vs;
630
631 assert (pipeline->shaders[MESA_SHADER_VERTEX]);
632
633 vs = pipeline->shaders[MESA_SHADER_VERTEX];
634
635 if (vs->info.vs.as_ls)
636 radv_emit_hw_ls(cmd_buffer, vs);
637 else if (vs->info.vs.as_es)
638 radv_emit_hw_es(cmd_buffer, vs, &vs->info.vs.es_info);
639 else
640 radv_emit_hw_vs(cmd_buffer, pipeline, vs, &vs->info.vs.outinfo);
641
642 radeon_set_context_reg(cmd_buffer->cs, R_028A84_VGT_PRIMITIVEID_EN, 0);
643 }
644
645
646 static void
647 radv_emit_tess_shaders(struct radv_cmd_buffer *cmd_buffer,
648 struct radv_pipeline *pipeline)
649 {
650 if (!radv_pipeline_has_tess(pipeline))
651 return;
652
653 struct radv_shader_variant *tes, *tcs;
654
655 tcs = pipeline->shaders[MESA_SHADER_TESS_CTRL];
656 tes = pipeline->shaders[MESA_SHADER_TESS_EVAL];
657
658 if (tes->info.tes.as_es)
659 radv_emit_hw_es(cmd_buffer, tes, &tes->info.tes.es_info);
660 else
661 radv_emit_hw_vs(cmd_buffer, pipeline, tes, &tes->info.tes.outinfo);
662
663 radv_emit_hw_hs(cmd_buffer, tcs);
664
665 radeon_set_context_reg(cmd_buffer->cs, R_028B6C_VGT_TF_PARAM,
666 pipeline->graphics.tess.tf_param);
667
668 if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK)
669 radeon_set_context_reg_idx(cmd_buffer->cs, R_028B58_VGT_LS_HS_CONFIG, 2,
670 pipeline->graphics.tess.ls_hs_config);
671 else
672 radeon_set_context_reg(cmd_buffer->cs, R_028B58_VGT_LS_HS_CONFIG,
673 pipeline->graphics.tess.ls_hs_config);
674
675 struct ac_userdata_info *loc;
676
677 loc = radv_lookup_user_sgpr(pipeline, MESA_SHADER_TESS_CTRL, AC_UD_TCS_OFFCHIP_LAYOUT);
678 if (loc->sgpr_idx != -1) {
679 uint32_t base_reg = shader_stage_to_user_data_0(MESA_SHADER_TESS_CTRL, radv_pipeline_has_gs(pipeline), radv_pipeline_has_tess(pipeline));
680 assert(loc->num_sgprs == 4);
681 assert(!loc->indirect);
682 radeon_set_sh_reg_seq(cmd_buffer->cs, base_reg + loc->sgpr_idx * 4, 4);
683 radeon_emit(cmd_buffer->cs, pipeline->graphics.tess.offchip_layout);
684 radeon_emit(cmd_buffer->cs, pipeline->graphics.tess.tcs_out_offsets);
685 radeon_emit(cmd_buffer->cs, pipeline->graphics.tess.tcs_out_layout |
686 pipeline->graphics.tess.num_tcs_input_cp << 26);
687 radeon_emit(cmd_buffer->cs, pipeline->graphics.tess.tcs_in_layout);
688 }
689
690 loc = radv_lookup_user_sgpr(pipeline, MESA_SHADER_TESS_EVAL, AC_UD_TES_OFFCHIP_LAYOUT);
691 if (loc->sgpr_idx != -1) {
692 uint32_t base_reg = shader_stage_to_user_data_0(MESA_SHADER_TESS_EVAL, radv_pipeline_has_gs(pipeline), radv_pipeline_has_tess(pipeline));
693 assert(loc->num_sgprs == 1);
694 assert(!loc->indirect);
695
696 radeon_set_sh_reg(cmd_buffer->cs, base_reg + loc->sgpr_idx * 4,
697 pipeline->graphics.tess.offchip_layout);
698 }
699
700 loc = radv_lookup_user_sgpr(pipeline, MESA_SHADER_VERTEX, AC_UD_VS_LS_TCS_IN_LAYOUT);
701 if (loc->sgpr_idx != -1) {
702 uint32_t base_reg = shader_stage_to_user_data_0(MESA_SHADER_VERTEX, radv_pipeline_has_gs(pipeline), radv_pipeline_has_tess(pipeline));
703 assert(loc->num_sgprs == 1);
704 assert(!loc->indirect);
705
706 radeon_set_sh_reg(cmd_buffer->cs, base_reg + loc->sgpr_idx * 4,
707 pipeline->graphics.tess.tcs_in_layout);
708 }
709 }
710
711 static void
712 radv_emit_geometry_shader(struct radv_cmd_buffer *cmd_buffer,
713 struct radv_pipeline *pipeline)
714 {
715 struct radeon_winsys *ws = cmd_buffer->device->ws;
716 struct radv_shader_variant *gs;
717 uint64_t va;
718
719 radeon_set_context_reg(cmd_buffer->cs, R_028A40_VGT_GS_MODE, pipeline->graphics.vgt_gs_mode);
720
721 gs = pipeline->shaders[MESA_SHADER_GEOMETRY];
722 if (!gs)
723 return;
724
725 uint32_t gsvs_itemsize = gs->info.gs.max_gsvs_emit_size >> 2;
726
727 radeon_set_context_reg_seq(cmd_buffer->cs, R_028A60_VGT_GSVS_RING_OFFSET_1, 3);
728 radeon_emit(cmd_buffer->cs, gsvs_itemsize);
729 radeon_emit(cmd_buffer->cs, gsvs_itemsize);
730 radeon_emit(cmd_buffer->cs, gsvs_itemsize);
731
732 radeon_set_context_reg(cmd_buffer->cs, R_028AB0_VGT_GSVS_RING_ITEMSIZE, gsvs_itemsize);
733
734 radeon_set_context_reg(cmd_buffer->cs, R_028B38_VGT_GS_MAX_VERT_OUT, gs->info.gs.vertices_out);
735
736 uint32_t gs_vert_itemsize = gs->info.gs.gsvs_vertex_size;
737 radeon_set_context_reg_seq(cmd_buffer->cs, R_028B5C_VGT_GS_VERT_ITEMSIZE, 4);
738 radeon_emit(cmd_buffer->cs, gs_vert_itemsize >> 2);
739 radeon_emit(cmd_buffer->cs, 0);
740 radeon_emit(cmd_buffer->cs, 0);
741 radeon_emit(cmd_buffer->cs, 0);
742
743 uint32_t gs_num_invocations = gs->info.gs.invocations;
744 radeon_set_context_reg(cmd_buffer->cs, R_028B90_VGT_GS_INSTANCE_CNT,
745 S_028B90_CNT(MIN2(gs_num_invocations, 127)) |
746 S_028B90_ENABLE(gs_num_invocations > 0));
747
748 va = ws->buffer_get_va(gs->bo);
749 ws->cs_add_buffer(cmd_buffer->cs, gs->bo, 8);
750 si_cp_dma_prefetch(cmd_buffer, va, gs->code_size);
751 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B220_SPI_SHADER_PGM_LO_GS, 4);
752 radeon_emit(cmd_buffer->cs, va >> 8);
753 radeon_emit(cmd_buffer->cs, va >> 40);
754 radeon_emit(cmd_buffer->cs, gs->rsrc1);
755 radeon_emit(cmd_buffer->cs, gs->rsrc2);
756
757 radv_emit_hw_vs(cmd_buffer, pipeline, pipeline->gs_copy_shader, &pipeline->gs_copy_shader->info.vs.outinfo);
758
759 struct ac_userdata_info *loc = radv_lookup_user_sgpr(cmd_buffer->state.pipeline, MESA_SHADER_GEOMETRY,
760 AC_UD_GS_VS_RING_STRIDE_ENTRIES);
761 if (loc->sgpr_idx != -1) {
762 uint32_t stride = gs->info.gs.max_gsvs_emit_size;
763 uint32_t num_entries = 64;
764 bool is_vi = cmd_buffer->device->physical_device->rad_info.chip_class >= VI;
765
766 if (is_vi)
767 num_entries *= stride;
768
769 stride = S_008F04_STRIDE(stride);
770 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B230_SPI_SHADER_USER_DATA_GS_0 + loc->sgpr_idx * 4, 2);
771 radeon_emit(cmd_buffer->cs, stride);
772 radeon_emit(cmd_buffer->cs, num_entries);
773 }
774 }
775
776 static void
777 radv_emit_fragment_shader(struct radv_cmd_buffer *cmd_buffer,
778 struct radv_pipeline *pipeline)
779 {
780 struct radeon_winsys *ws = cmd_buffer->device->ws;
781 struct radv_shader_variant *ps;
782 uint64_t va;
783 unsigned spi_baryc_cntl = S_0286E0_FRONT_FACE_ALL_BITS(1);
784 struct radv_blend_state *blend = &pipeline->graphics.blend;
785 assert (pipeline->shaders[MESA_SHADER_FRAGMENT]);
786
787 ps = pipeline->shaders[MESA_SHADER_FRAGMENT];
788
789 va = ws->buffer_get_va(ps->bo);
790 ws->cs_add_buffer(cmd_buffer->cs, ps->bo, 8);
791 si_cp_dma_prefetch(cmd_buffer, va, ps->code_size);
792
793 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B020_SPI_SHADER_PGM_LO_PS, 4);
794 radeon_emit(cmd_buffer->cs, va >> 8);
795 radeon_emit(cmd_buffer->cs, va >> 40);
796 radeon_emit(cmd_buffer->cs, ps->rsrc1);
797 radeon_emit(cmd_buffer->cs, ps->rsrc2);
798
799 radeon_set_context_reg(cmd_buffer->cs, R_02880C_DB_SHADER_CONTROL,
800 pipeline->graphics.db_shader_control);
801
802 radeon_set_context_reg(cmd_buffer->cs, R_0286CC_SPI_PS_INPUT_ENA,
803 ps->config.spi_ps_input_ena);
804
805 radeon_set_context_reg(cmd_buffer->cs, R_0286D0_SPI_PS_INPUT_ADDR,
806 ps->config.spi_ps_input_addr);
807
808 if (ps->info.fs.force_persample)
809 spi_baryc_cntl |= S_0286E0_POS_FLOAT_LOCATION(2);
810
811 radeon_set_context_reg(cmd_buffer->cs, R_0286D8_SPI_PS_IN_CONTROL,
812 S_0286D8_NUM_INTERP(ps->info.fs.num_interp));
813
814 radeon_set_context_reg(cmd_buffer->cs, R_0286E0_SPI_BARYC_CNTL, spi_baryc_cntl);
815
816 radeon_set_context_reg(cmd_buffer->cs, R_028710_SPI_SHADER_Z_FORMAT,
817 pipeline->graphics.shader_z_format);
818
819 radeon_set_context_reg(cmd_buffer->cs, R_028714_SPI_SHADER_COL_FORMAT, blend->spi_shader_col_format);
820
821 radeon_set_context_reg(cmd_buffer->cs, R_028238_CB_TARGET_MASK, blend->cb_target_mask);
822 radeon_set_context_reg(cmd_buffer->cs, R_02823C_CB_SHADER_MASK, blend->cb_shader_mask);
823
824 if (pipeline->graphics.ps_input_cntl_num) {
825 radeon_set_context_reg_seq(cmd_buffer->cs, R_028644_SPI_PS_INPUT_CNTL_0, pipeline->graphics.ps_input_cntl_num);
826 for (unsigned i = 0; i < pipeline->graphics.ps_input_cntl_num; i++) {
827 radeon_emit(cmd_buffer->cs, pipeline->graphics.ps_input_cntl[i]);
828 }
829 }
830 }
831
832 static void polaris_set_vgt_vertex_reuse(struct radv_cmd_buffer *cmd_buffer,
833 struct radv_pipeline *pipeline)
834 {
835 uint32_t vtx_reuse_depth = 30;
836 if (cmd_buffer->device->physical_device->rad_info.family < CHIP_POLARIS10)
837 return;
838
839 if (pipeline->shaders[MESA_SHADER_TESS_EVAL]) {
840 if (pipeline->shaders[MESA_SHADER_TESS_EVAL]->info.tes.spacing == TESS_SPACING_FRACTIONAL_ODD)
841 vtx_reuse_depth = 14;
842 }
843 radeon_set_context_reg(cmd_buffer->cs, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL,
844 vtx_reuse_depth);
845 }
846
847 static void
848 radv_emit_graphics_pipeline(struct radv_cmd_buffer *cmd_buffer,
849 struct radv_pipeline *pipeline)
850 {
851 if (!pipeline || cmd_buffer->state.emitted_pipeline == pipeline)
852 return;
853
854 radv_emit_graphics_depth_stencil_state(cmd_buffer, pipeline);
855 radv_emit_graphics_blend_state(cmd_buffer, pipeline);
856 radv_emit_graphics_raster_state(cmd_buffer, pipeline);
857 radv_update_multisample_state(cmd_buffer, pipeline);
858 radv_emit_vertex_shader(cmd_buffer, pipeline);
859 radv_emit_tess_shaders(cmd_buffer, pipeline);
860 radv_emit_geometry_shader(cmd_buffer, pipeline);
861 radv_emit_fragment_shader(cmd_buffer, pipeline);
862 polaris_set_vgt_vertex_reuse(cmd_buffer, pipeline);
863
864 cmd_buffer->scratch_size_needed =
865 MAX2(cmd_buffer->scratch_size_needed,
866 pipeline->max_waves * pipeline->scratch_bytes_per_wave);
867
868 radeon_set_context_reg(cmd_buffer->cs, R_0286E8_SPI_TMPRING_SIZE,
869 S_0286E8_WAVES(pipeline->max_waves) |
870 S_0286E8_WAVESIZE(pipeline->scratch_bytes_per_wave >> 10));
871
872 if (!cmd_buffer->state.emitted_pipeline ||
873 cmd_buffer->state.emitted_pipeline->graphics.can_use_guardband !=
874 pipeline->graphics.can_use_guardband)
875 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_SCISSOR;
876 cmd_buffer->state.emitted_pipeline = pipeline;
877 }
878
879 static void
880 radv_emit_viewport(struct radv_cmd_buffer *cmd_buffer)
881 {
882 si_write_viewport(cmd_buffer->cs, 0, cmd_buffer->state.dynamic.viewport.count,
883 cmd_buffer->state.dynamic.viewport.viewports);
884 }
885
886 static void
887 radv_emit_scissor(struct radv_cmd_buffer *cmd_buffer)
888 {
889 uint32_t count = cmd_buffer->state.dynamic.scissor.count;
890 si_write_scissors(cmd_buffer->cs, 0, count,
891 cmd_buffer->state.dynamic.scissor.scissors,
892 cmd_buffer->state.dynamic.viewport.viewports,
893 cmd_buffer->state.emitted_pipeline->graphics.can_use_guardband);
894 radeon_set_context_reg(cmd_buffer->cs, R_028A48_PA_SC_MODE_CNTL_0,
895 cmd_buffer->state.pipeline->graphics.ms.pa_sc_mode_cntl_0 | S_028A48_VPORT_SCISSOR_ENABLE(count ? 1 : 0));
896 }
897
898 static void
899 radv_emit_fb_color_state(struct radv_cmd_buffer *cmd_buffer,
900 int index,
901 struct radv_color_buffer_info *cb)
902 {
903 bool is_vi = cmd_buffer->device->physical_device->rad_info.chip_class >= VI;
904 radeon_set_context_reg_seq(cmd_buffer->cs, R_028C60_CB_COLOR0_BASE + index * 0x3c, 11);
905 radeon_emit(cmd_buffer->cs, cb->cb_color_base);
906 radeon_emit(cmd_buffer->cs, cb->cb_color_pitch);
907 radeon_emit(cmd_buffer->cs, cb->cb_color_slice);
908 radeon_emit(cmd_buffer->cs, cb->cb_color_view);
909 radeon_emit(cmd_buffer->cs, cb->cb_color_info);
910 radeon_emit(cmd_buffer->cs, cb->cb_color_attrib);
911 radeon_emit(cmd_buffer->cs, cb->cb_dcc_control);
912 radeon_emit(cmd_buffer->cs, cb->cb_color_cmask);
913 radeon_emit(cmd_buffer->cs, cb->cb_color_cmask_slice);
914 radeon_emit(cmd_buffer->cs, cb->cb_color_fmask);
915 radeon_emit(cmd_buffer->cs, cb->cb_color_fmask_slice);
916
917 if (is_vi) { /* DCC BASE */
918 radeon_set_context_reg(cmd_buffer->cs, R_028C94_CB_COLOR0_DCC_BASE + index * 0x3c, cb->cb_dcc_base);
919 }
920 }
921
922 static void
923 radv_emit_fb_ds_state(struct radv_cmd_buffer *cmd_buffer,
924 struct radv_ds_buffer_info *ds,
925 struct radv_image *image,
926 VkImageLayout layout)
927 {
928 uint32_t db_z_info = ds->db_z_info;
929
930 if (!radv_layout_has_htile(image, layout))
931 db_z_info &= C_028040_TILE_SURFACE_ENABLE;
932
933 if (!radv_layout_can_expclear(image, layout))
934 db_z_info &= C_028040_ALLOW_EXPCLEAR & C_028044_ALLOW_EXPCLEAR;
935
936 radeon_set_context_reg(cmd_buffer->cs, R_028008_DB_DEPTH_VIEW, ds->db_depth_view);
937 radeon_set_context_reg(cmd_buffer->cs, R_028014_DB_HTILE_DATA_BASE, ds->db_htile_data_base);
938
939 radeon_set_context_reg_seq(cmd_buffer->cs, R_02803C_DB_DEPTH_INFO, 9);
940 radeon_emit(cmd_buffer->cs, ds->db_depth_info); /* R_02803C_DB_DEPTH_INFO */
941 radeon_emit(cmd_buffer->cs, db_z_info); /* R_028040_DB_Z_INFO */
942 radeon_emit(cmd_buffer->cs, ds->db_stencil_info); /* R_028044_DB_STENCIL_INFO */
943 radeon_emit(cmd_buffer->cs, ds->db_z_read_base); /* R_028048_DB_Z_READ_BASE */
944 radeon_emit(cmd_buffer->cs, ds->db_stencil_read_base); /* R_02804C_DB_STENCIL_READ_BASE */
945 radeon_emit(cmd_buffer->cs, ds->db_z_write_base); /* R_028050_DB_Z_WRITE_BASE */
946 radeon_emit(cmd_buffer->cs, ds->db_stencil_write_base); /* R_028054_DB_STENCIL_WRITE_BASE */
947 radeon_emit(cmd_buffer->cs, ds->db_depth_size); /* R_028058_DB_DEPTH_SIZE */
948 radeon_emit(cmd_buffer->cs, ds->db_depth_slice); /* R_02805C_DB_DEPTH_SLICE */
949
950 radeon_set_context_reg(cmd_buffer->cs, R_028ABC_DB_HTILE_SURFACE, ds->db_htile_surface);
951 radeon_set_context_reg(cmd_buffer->cs, R_028B78_PA_SU_POLY_OFFSET_DB_FMT_CNTL,
952 ds->pa_su_poly_offset_db_fmt_cntl);
953 }
954
955 /*
956 * To hw resolve multisample images both src and dst need to have the same
957 * micro tiling mode. However we don't always know in advance when creating
958 * the images. This function gets called if we have a resolve attachment,
959 * and tests if the attachment image has the same tiling mode, then it
960 * checks if the generated framebuffer data has the same tiling mode, and
961 * updates it if not.
962 */
963 static void radv_set_optimal_micro_tile_mode(struct radv_device *device,
964 struct radv_attachment_info *att,
965 uint32_t micro_tile_mode)
966 {
967 struct radv_image *image = att->attachment->image;
968 uint32_t tile_mode_index;
969 if (image->info.samples <= 1)
970 return;
971
972 if (image->surface.micro_tile_mode != micro_tile_mode) {
973 radv_image_set_optimal_micro_tile_mode(device, image, micro_tile_mode);
974 }
975
976 if (att->cb.micro_tile_mode != micro_tile_mode) {
977 tile_mode_index = image->surface.tiling_index[0];
978
979 att->cb.cb_color_attrib &= C_028C74_TILE_MODE_INDEX;
980 att->cb.cb_color_attrib |= S_028C74_TILE_MODE_INDEX(tile_mode_index);
981 att->cb.micro_tile_mode = micro_tile_mode;
982 }
983 }
984
985 void
986 radv_set_depth_clear_regs(struct radv_cmd_buffer *cmd_buffer,
987 struct radv_image *image,
988 VkClearDepthStencilValue ds_clear_value,
989 VkImageAspectFlags aspects)
990 {
991 uint64_t va = cmd_buffer->device->ws->buffer_get_va(image->bo);
992 va += image->offset + image->clear_value_offset;
993 unsigned reg_offset = 0, reg_count = 0;
994
995 if (!image->surface.htile_size || !aspects)
996 return;
997
998 if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT) {
999 ++reg_count;
1000 } else {
1001 ++reg_offset;
1002 va += 4;
1003 }
1004 if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT)
1005 ++reg_count;
1006
1007 cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, image->bo, 8);
1008
1009 radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 2 + reg_count, 0));
1010 radeon_emit(cmd_buffer->cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
1011 S_370_WR_CONFIRM(1) |
1012 S_370_ENGINE_SEL(V_370_PFP));
1013 radeon_emit(cmd_buffer->cs, va);
1014 radeon_emit(cmd_buffer->cs, va >> 32);
1015 if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT)
1016 radeon_emit(cmd_buffer->cs, ds_clear_value.stencil);
1017 if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT)
1018 radeon_emit(cmd_buffer->cs, fui(ds_clear_value.depth));
1019
1020 radeon_set_context_reg_seq(cmd_buffer->cs, R_028028_DB_STENCIL_CLEAR + 4 * reg_offset, reg_count);
1021 if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT)
1022 radeon_emit(cmd_buffer->cs, ds_clear_value.stencil); /* R_028028_DB_STENCIL_CLEAR */
1023 if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT)
1024 radeon_emit(cmd_buffer->cs, fui(ds_clear_value.depth)); /* R_02802C_DB_DEPTH_CLEAR */
1025 }
1026
1027 static void
1028 radv_load_depth_clear_regs(struct radv_cmd_buffer *cmd_buffer,
1029 struct radv_image *image)
1030 {
1031 uint64_t va = cmd_buffer->device->ws->buffer_get_va(image->bo);
1032 va += image->offset + image->clear_value_offset;
1033
1034 if (!image->surface.htile_size)
1035 return;
1036
1037 cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, image->bo, 8);
1038
1039 radeon_emit(cmd_buffer->cs, PKT3(PKT3_COPY_DATA, 4, 0));
1040 radeon_emit(cmd_buffer->cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
1041 COPY_DATA_DST_SEL(COPY_DATA_REG) |
1042 COPY_DATA_COUNT_SEL);
1043 radeon_emit(cmd_buffer->cs, va);
1044 radeon_emit(cmd_buffer->cs, va >> 32);
1045 radeon_emit(cmd_buffer->cs, R_028028_DB_STENCIL_CLEAR >> 2);
1046 radeon_emit(cmd_buffer->cs, 0);
1047
1048 radeon_emit(cmd_buffer->cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
1049 radeon_emit(cmd_buffer->cs, 0);
1050 }
1051
1052 void
1053 radv_set_color_clear_regs(struct radv_cmd_buffer *cmd_buffer,
1054 struct radv_image *image,
1055 int idx,
1056 uint32_t color_values[2])
1057 {
1058 uint64_t va = cmd_buffer->device->ws->buffer_get_va(image->bo);
1059 va += image->offset + image->clear_value_offset;
1060
1061 if (!image->cmask.size && !image->surface.dcc_size)
1062 return;
1063
1064 cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, image->bo, 8);
1065
1066 radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 4, 0));
1067 radeon_emit(cmd_buffer->cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
1068 S_370_WR_CONFIRM(1) |
1069 S_370_ENGINE_SEL(V_370_PFP));
1070 radeon_emit(cmd_buffer->cs, va);
1071 radeon_emit(cmd_buffer->cs, va >> 32);
1072 radeon_emit(cmd_buffer->cs, color_values[0]);
1073 radeon_emit(cmd_buffer->cs, color_values[1]);
1074
1075 radeon_set_context_reg_seq(cmd_buffer->cs, R_028C8C_CB_COLOR0_CLEAR_WORD0 + idx * 0x3c, 2);
1076 radeon_emit(cmd_buffer->cs, color_values[0]);
1077 radeon_emit(cmd_buffer->cs, color_values[1]);
1078 }
1079
1080 static void
1081 radv_load_color_clear_regs(struct radv_cmd_buffer *cmd_buffer,
1082 struct radv_image *image,
1083 int idx)
1084 {
1085 uint64_t va = cmd_buffer->device->ws->buffer_get_va(image->bo);
1086 va += image->offset + image->clear_value_offset;
1087
1088 if (!image->cmask.size && !image->surface.dcc_size)
1089 return;
1090
1091 uint32_t reg = R_028C8C_CB_COLOR0_CLEAR_WORD0 + idx * 0x3c;
1092 cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, image->bo, 8);
1093
1094 radeon_emit(cmd_buffer->cs, PKT3(PKT3_COPY_DATA, 4, 0));
1095 radeon_emit(cmd_buffer->cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
1096 COPY_DATA_DST_SEL(COPY_DATA_REG) |
1097 COPY_DATA_COUNT_SEL);
1098 radeon_emit(cmd_buffer->cs, va);
1099 radeon_emit(cmd_buffer->cs, va >> 32);
1100 radeon_emit(cmd_buffer->cs, reg >> 2);
1101 radeon_emit(cmd_buffer->cs, 0);
1102
1103 radeon_emit(cmd_buffer->cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
1104 radeon_emit(cmd_buffer->cs, 0);
1105 }
1106
1107 void
1108 radv_emit_framebuffer_state(struct radv_cmd_buffer *cmd_buffer)
1109 {
1110 int i;
1111 struct radv_framebuffer *framebuffer = cmd_buffer->state.framebuffer;
1112 const struct radv_subpass *subpass = cmd_buffer->state.subpass;
1113 int dst_resolve_micro_tile_mode = -1;
1114
1115 if (subpass->has_resolve) {
1116 uint32_t a = subpass->resolve_attachments[0].attachment;
1117 const struct radv_image *image = framebuffer->attachments[a].attachment->image;
1118 dst_resolve_micro_tile_mode = image->surface.micro_tile_mode;
1119 }
1120 for (i = 0; i < subpass->color_count; ++i) {
1121 int idx = subpass->color_attachments[i].attachment;
1122 struct radv_attachment_info *att = &framebuffer->attachments[idx];
1123
1124 if (dst_resolve_micro_tile_mode != -1) {
1125 radv_set_optimal_micro_tile_mode(cmd_buffer->device,
1126 att, dst_resolve_micro_tile_mode);
1127 }
1128 cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, att->attachment->bo, 8);
1129
1130 assert(att->attachment->aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT);
1131 radv_emit_fb_color_state(cmd_buffer, i, &att->cb);
1132
1133 radv_load_color_clear_regs(cmd_buffer, att->attachment->image, i);
1134 }
1135
1136 for (i = subpass->color_count; i < 8; i++)
1137 radeon_set_context_reg(cmd_buffer->cs, R_028C70_CB_COLOR0_INFO + i * 0x3C,
1138 S_028C70_FORMAT(V_028C70_COLOR_INVALID));
1139
1140 if(subpass->depth_stencil_attachment.attachment != VK_ATTACHMENT_UNUSED) {
1141 int idx = subpass->depth_stencil_attachment.attachment;
1142 VkImageLayout layout = subpass->depth_stencil_attachment.layout;
1143 struct radv_attachment_info *att = &framebuffer->attachments[idx];
1144 struct radv_image *image = att->attachment->image;
1145 cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, att->attachment->bo, 8);
1146
1147 radv_emit_fb_ds_state(cmd_buffer, &att->ds, image, layout);
1148
1149 if (att->ds.offset_scale != cmd_buffer->state.offset_scale) {
1150 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS;
1151 cmd_buffer->state.offset_scale = att->ds.offset_scale;
1152 }
1153 radv_load_depth_clear_regs(cmd_buffer, image);
1154 } else {
1155 radeon_set_context_reg_seq(cmd_buffer->cs, R_028040_DB_Z_INFO, 2);
1156 radeon_emit(cmd_buffer->cs, S_028040_FORMAT(V_028040_Z_INVALID)); /* R_028040_DB_Z_INFO */
1157 radeon_emit(cmd_buffer->cs, S_028044_FORMAT(V_028044_STENCIL_INVALID)); /* R_028044_DB_STENCIL_INFO */
1158 }
1159 radeon_set_context_reg(cmd_buffer->cs, R_028208_PA_SC_WINDOW_SCISSOR_BR,
1160 S_028208_BR_X(framebuffer->width) |
1161 S_028208_BR_Y(framebuffer->height));
1162 }
1163
1164 void radv_set_db_count_control(struct radv_cmd_buffer *cmd_buffer)
1165 {
1166 uint32_t db_count_control;
1167
1168 if(!cmd_buffer->state.active_occlusion_queries) {
1169 if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) {
1170 db_count_control = 0;
1171 } else {
1172 db_count_control = S_028004_ZPASS_INCREMENT_DISABLE(1);
1173 }
1174 } else {
1175 if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) {
1176 db_count_control = S_028004_PERFECT_ZPASS_COUNTS(1) |
1177 S_028004_SAMPLE_RATE(0) | /* TODO: set this to the number of samples of the current framebuffer */
1178 S_028004_ZPASS_ENABLE(1) |
1179 S_028004_SLICE_EVEN_ENABLE(1) |
1180 S_028004_SLICE_ODD_ENABLE(1);
1181 } else {
1182 db_count_control = S_028004_PERFECT_ZPASS_COUNTS(1) |
1183 S_028004_SAMPLE_RATE(0); /* TODO: set this to the number of samples of the current framebuffer */
1184 }
1185 }
1186
1187 radeon_set_context_reg(cmd_buffer->cs, R_028004_DB_COUNT_CONTROL, db_count_control);
1188 }
1189
1190 static void
1191 radv_cmd_buffer_flush_dynamic_state(struct radv_cmd_buffer *cmd_buffer)
1192 {
1193 struct radv_dynamic_state *d = &cmd_buffer->state.dynamic;
1194
1195 if (G_028810_DX_RASTERIZATION_KILL(cmd_buffer->state.pipeline->graphics.raster.pa_cl_clip_cntl))
1196 return;
1197
1198 if (cmd_buffer->state.dirty & (RADV_CMD_DIRTY_DYNAMIC_VIEWPORT))
1199 radv_emit_viewport(cmd_buffer);
1200
1201 if (cmd_buffer->state.dirty & (RADV_CMD_DIRTY_DYNAMIC_SCISSOR | RADV_CMD_DIRTY_DYNAMIC_VIEWPORT))
1202 radv_emit_scissor(cmd_buffer);
1203
1204 if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_DYNAMIC_LINE_WIDTH) {
1205 unsigned width = cmd_buffer->state.dynamic.line_width * 8;
1206 radeon_set_context_reg(cmd_buffer->cs, R_028A08_PA_SU_LINE_CNTL,
1207 S_028A08_WIDTH(CLAMP(width, 0, 0xFFF)));
1208 }
1209
1210 if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS) {
1211 radeon_set_context_reg_seq(cmd_buffer->cs, R_028414_CB_BLEND_RED, 4);
1212 radeon_emit_array(cmd_buffer->cs, (uint32_t*)d->blend_constants, 4);
1213 }
1214
1215 if (cmd_buffer->state.dirty & (RADV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE |
1216 RADV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK |
1217 RADV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK)) {
1218 radeon_set_context_reg_seq(cmd_buffer->cs, R_028430_DB_STENCILREFMASK, 2);
1219 radeon_emit(cmd_buffer->cs, S_028430_STENCILTESTVAL(d->stencil_reference.front) |
1220 S_028430_STENCILMASK(d->stencil_compare_mask.front) |
1221 S_028430_STENCILWRITEMASK(d->stencil_write_mask.front) |
1222 S_028430_STENCILOPVAL(1));
1223 radeon_emit(cmd_buffer->cs, S_028434_STENCILTESTVAL_BF(d->stencil_reference.back) |
1224 S_028434_STENCILMASK_BF(d->stencil_compare_mask.back) |
1225 S_028434_STENCILWRITEMASK_BF(d->stencil_write_mask.back) |
1226 S_028434_STENCILOPVAL_BF(1));
1227 }
1228
1229 if (cmd_buffer->state.dirty & (RADV_CMD_DIRTY_PIPELINE |
1230 RADV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS)) {
1231 radeon_set_context_reg(cmd_buffer->cs, R_028020_DB_DEPTH_BOUNDS_MIN, fui(d->depth_bounds.min));
1232 radeon_set_context_reg(cmd_buffer->cs, R_028024_DB_DEPTH_BOUNDS_MAX, fui(d->depth_bounds.max));
1233 }
1234
1235 if (cmd_buffer->state.dirty & (RADV_CMD_DIRTY_PIPELINE |
1236 RADV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS)) {
1237 struct radv_raster_state *raster = &cmd_buffer->state.pipeline->graphics.raster;
1238 unsigned slope = fui(d->depth_bias.slope * 16.0f);
1239 unsigned bias = fui(d->depth_bias.bias * cmd_buffer->state.offset_scale);
1240
1241 if (G_028814_POLY_OFFSET_FRONT_ENABLE(raster->pa_su_sc_mode_cntl)) {
1242 radeon_set_context_reg_seq(cmd_buffer->cs, R_028B7C_PA_SU_POLY_OFFSET_CLAMP, 5);
1243 radeon_emit(cmd_buffer->cs, fui(d->depth_bias.clamp)); /* CLAMP */
1244 radeon_emit(cmd_buffer->cs, slope); /* FRONT SCALE */
1245 radeon_emit(cmd_buffer->cs, bias); /* FRONT OFFSET */
1246 radeon_emit(cmd_buffer->cs, slope); /* BACK SCALE */
1247 radeon_emit(cmd_buffer->cs, bias); /* BACK OFFSET */
1248 }
1249 }
1250
1251 cmd_buffer->state.dirty = 0;
1252 }
1253
1254 static void
1255 emit_stage_descriptor_set_userdata(struct radv_cmd_buffer *cmd_buffer,
1256 struct radv_pipeline *pipeline,
1257 int idx,
1258 uint64_t va,
1259 gl_shader_stage stage)
1260 {
1261 struct ac_userdata_info *desc_set_loc = &pipeline->shaders[stage]->info.user_sgprs_locs.descriptor_sets[idx];
1262 uint32_t base_reg = shader_stage_to_user_data_0(stage, radv_pipeline_has_gs(pipeline), radv_pipeline_has_tess(pipeline));
1263
1264 if (desc_set_loc->sgpr_idx == -1 || desc_set_loc->indirect)
1265 return;
1266
1267 assert(!desc_set_loc->indirect);
1268 assert(desc_set_loc->num_sgprs == 2);
1269 radeon_set_sh_reg_seq(cmd_buffer->cs,
1270 base_reg + desc_set_loc->sgpr_idx * 4, 2);
1271 radeon_emit(cmd_buffer->cs, va);
1272 radeon_emit(cmd_buffer->cs, va >> 32);
1273 }
1274
1275 static void
1276 radv_emit_descriptor_set_userdata(struct radv_cmd_buffer *cmd_buffer,
1277 struct radv_pipeline *pipeline,
1278 VkShaderStageFlags stages,
1279 struct radv_descriptor_set *set,
1280 unsigned idx)
1281 {
1282 if (stages & VK_SHADER_STAGE_FRAGMENT_BIT)
1283 emit_stage_descriptor_set_userdata(cmd_buffer, pipeline,
1284 idx, set->va,
1285 MESA_SHADER_FRAGMENT);
1286
1287 if (stages & VK_SHADER_STAGE_VERTEX_BIT)
1288 emit_stage_descriptor_set_userdata(cmd_buffer, pipeline,
1289 idx, set->va,
1290 MESA_SHADER_VERTEX);
1291
1292 if ((stages & VK_SHADER_STAGE_GEOMETRY_BIT) && radv_pipeline_has_gs(pipeline))
1293 emit_stage_descriptor_set_userdata(cmd_buffer, pipeline,
1294 idx, set->va,
1295 MESA_SHADER_GEOMETRY);
1296
1297 if ((stages & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) && radv_pipeline_has_tess(pipeline))
1298 emit_stage_descriptor_set_userdata(cmd_buffer, pipeline,
1299 idx, set->va,
1300 MESA_SHADER_TESS_CTRL);
1301
1302 if ((stages & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) && radv_pipeline_has_tess(pipeline))
1303 emit_stage_descriptor_set_userdata(cmd_buffer, pipeline,
1304 idx, set->va,
1305 MESA_SHADER_TESS_EVAL);
1306
1307 if (stages & VK_SHADER_STAGE_COMPUTE_BIT)
1308 emit_stage_descriptor_set_userdata(cmd_buffer, pipeline,
1309 idx, set->va,
1310 MESA_SHADER_COMPUTE);
1311 }
1312
1313 static void
1314 radv_flush_push_descriptors(struct radv_cmd_buffer *cmd_buffer)
1315 {
1316 struct radv_descriptor_set *set = &cmd_buffer->push_descriptors.set;
1317 uint32_t *ptr = NULL;
1318 unsigned bo_offset;
1319
1320 if (!radv_cmd_buffer_upload_alloc(cmd_buffer, set->size, 32,
1321 &bo_offset,
1322 (void**) &ptr))
1323 return;
1324
1325 set->va = cmd_buffer->device->ws->buffer_get_va(cmd_buffer->upload.upload_bo);
1326 set->va += bo_offset;
1327
1328 memcpy(ptr, set->mapped_ptr, set->size);
1329 }
1330
1331 static void
1332 radv_flush_indirect_descriptor_sets(struct radv_cmd_buffer *cmd_buffer,
1333 struct radv_pipeline *pipeline)
1334 {
1335 uint32_t size = MAX_SETS * 2 * 4;
1336 uint32_t offset;
1337 void *ptr;
1338
1339 if (!radv_cmd_buffer_upload_alloc(cmd_buffer, size,
1340 256, &offset, &ptr))
1341 return;
1342
1343 for (unsigned i = 0; i < MAX_SETS; i++) {
1344 uint32_t *uptr = ((uint32_t *)ptr) + i * 2;
1345 uint64_t set_va = 0;
1346 struct radv_descriptor_set *set = cmd_buffer->state.descriptors[i];
1347 if (set)
1348 set_va = set->va;
1349 uptr[0] = set_va & 0xffffffff;
1350 uptr[1] = set_va >> 32;
1351 }
1352
1353 uint64_t va = cmd_buffer->device->ws->buffer_get_va(cmd_buffer->upload.upload_bo);
1354 va += offset;
1355
1356 if (pipeline->shaders[MESA_SHADER_VERTEX])
1357 radv_emit_userdata_address(cmd_buffer, pipeline, MESA_SHADER_VERTEX,
1358 AC_UD_INDIRECT_DESCRIPTOR_SETS, va);
1359
1360 if (pipeline->shaders[MESA_SHADER_FRAGMENT])
1361 radv_emit_userdata_address(cmd_buffer, pipeline, MESA_SHADER_FRAGMENT,
1362 AC_UD_INDIRECT_DESCRIPTOR_SETS, va);
1363
1364 if (radv_pipeline_has_gs(pipeline))
1365 radv_emit_userdata_address(cmd_buffer, pipeline, MESA_SHADER_GEOMETRY,
1366 AC_UD_INDIRECT_DESCRIPTOR_SETS, va);
1367
1368 if (radv_pipeline_has_tess(pipeline))
1369 radv_emit_userdata_address(cmd_buffer, pipeline, MESA_SHADER_TESS_CTRL,
1370 AC_UD_INDIRECT_DESCRIPTOR_SETS, va);
1371
1372 if (radv_pipeline_has_tess(pipeline))
1373 radv_emit_userdata_address(cmd_buffer, pipeline, MESA_SHADER_TESS_EVAL,
1374 AC_UD_INDIRECT_DESCRIPTOR_SETS, va);
1375
1376 if (pipeline->shaders[MESA_SHADER_COMPUTE])
1377 radv_emit_userdata_address(cmd_buffer, pipeline, MESA_SHADER_COMPUTE,
1378 AC_UD_INDIRECT_DESCRIPTOR_SETS, va);
1379 }
1380
1381 static void
1382 radv_flush_descriptors(struct radv_cmd_buffer *cmd_buffer,
1383 struct radv_pipeline *pipeline,
1384 VkShaderStageFlags stages)
1385 {
1386 unsigned i;
1387
1388 if (!cmd_buffer->state.descriptors_dirty)
1389 return;
1390
1391 if (cmd_buffer->state.push_descriptors_dirty)
1392 radv_flush_push_descriptors(cmd_buffer);
1393
1394 if (pipeline->need_indirect_descriptor_sets) {
1395 radv_flush_indirect_descriptor_sets(cmd_buffer, pipeline);
1396 }
1397
1398 for (i = 0; i < MAX_SETS; i++) {
1399 if (!(cmd_buffer->state.descriptors_dirty & (1u << i)))
1400 continue;
1401 struct radv_descriptor_set *set = cmd_buffer->state.descriptors[i];
1402 if (!set)
1403 continue;
1404
1405 radv_emit_descriptor_set_userdata(cmd_buffer, pipeline, stages, set, i);
1406 }
1407 cmd_buffer->state.descriptors_dirty = 0;
1408 cmd_buffer->state.push_descriptors_dirty = false;
1409 }
1410
1411 static void
1412 radv_flush_constants(struct radv_cmd_buffer *cmd_buffer,
1413 struct radv_pipeline *pipeline,
1414 VkShaderStageFlags stages)
1415 {
1416 struct radv_pipeline_layout *layout = pipeline->layout;
1417 unsigned offset;
1418 void *ptr;
1419 uint64_t va;
1420
1421 stages &= cmd_buffer->push_constant_stages;
1422 if (!stages || !layout || (!layout->push_constant_size && !layout->dynamic_offset_count))
1423 return;
1424
1425 if (!radv_cmd_buffer_upload_alloc(cmd_buffer, layout->push_constant_size +
1426 16 * layout->dynamic_offset_count,
1427 256, &offset, &ptr))
1428 return;
1429
1430 memcpy(ptr, cmd_buffer->push_constants, layout->push_constant_size);
1431 memcpy((char*)ptr + layout->push_constant_size, cmd_buffer->dynamic_buffers,
1432 16 * layout->dynamic_offset_count);
1433
1434 va = cmd_buffer->device->ws->buffer_get_va(cmd_buffer->upload.upload_bo);
1435 va += offset;
1436
1437 if (stages & VK_SHADER_STAGE_VERTEX_BIT)
1438 radv_emit_userdata_address(cmd_buffer, pipeline, MESA_SHADER_VERTEX,
1439 AC_UD_PUSH_CONSTANTS, va);
1440
1441 if (stages & VK_SHADER_STAGE_FRAGMENT_BIT)
1442 radv_emit_userdata_address(cmd_buffer, pipeline, MESA_SHADER_FRAGMENT,
1443 AC_UD_PUSH_CONSTANTS, va);
1444
1445 if ((stages & VK_SHADER_STAGE_GEOMETRY_BIT) && radv_pipeline_has_gs(pipeline))
1446 radv_emit_userdata_address(cmd_buffer, pipeline, MESA_SHADER_GEOMETRY,
1447 AC_UD_PUSH_CONSTANTS, va);
1448
1449 if ((stages & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) && radv_pipeline_has_tess(pipeline))
1450 radv_emit_userdata_address(cmd_buffer, pipeline, MESA_SHADER_TESS_CTRL,
1451 AC_UD_PUSH_CONSTANTS, va);
1452
1453 if ((stages & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) && radv_pipeline_has_tess(pipeline))
1454 radv_emit_userdata_address(cmd_buffer, pipeline, MESA_SHADER_TESS_EVAL,
1455 AC_UD_PUSH_CONSTANTS, va);
1456
1457 if (stages & VK_SHADER_STAGE_COMPUTE_BIT)
1458 radv_emit_userdata_address(cmd_buffer, pipeline, MESA_SHADER_COMPUTE,
1459 AC_UD_PUSH_CONSTANTS, va);
1460
1461 cmd_buffer->push_constant_stages &= ~stages;
1462 }
1463
1464 static void radv_emit_primitive_reset_state(struct radv_cmd_buffer *cmd_buffer,
1465 bool indexed_draw)
1466 {
1467 int32_t primitive_reset_en = indexed_draw && cmd_buffer->state.pipeline->graphics.prim_restart_enable;
1468
1469 if (primitive_reset_en != cmd_buffer->state.last_primitive_reset_en) {
1470 cmd_buffer->state.last_primitive_reset_en = primitive_reset_en;
1471 radeon_set_context_reg(cmd_buffer->cs, R_028A94_VGT_MULTI_PRIM_IB_RESET_EN,
1472 primitive_reset_en);
1473 }
1474
1475 if (primitive_reset_en) {
1476 uint32_t primitive_reset_index = cmd_buffer->state.index_type ? 0xffffffffu : 0xffffu;
1477
1478 if (primitive_reset_index != cmd_buffer->state.last_primitive_reset_index) {
1479 cmd_buffer->state.last_primitive_reset_index = primitive_reset_index;
1480 radeon_set_context_reg(cmd_buffer->cs, R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX,
1481 primitive_reset_index);
1482 }
1483 }
1484 }
1485
1486 static void
1487 radv_cmd_buffer_flush_state(struct radv_cmd_buffer *cmd_buffer,
1488 bool indexed_draw, bool instanced_draw,
1489 bool indirect_draw,
1490 uint32_t draw_vertex_count)
1491 {
1492 struct radv_pipeline *pipeline = cmd_buffer->state.pipeline;
1493 struct radv_device *device = cmd_buffer->device;
1494 uint32_t ia_multi_vgt_param;
1495
1496 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
1497 cmd_buffer->cs, 4096);
1498
1499 if ((cmd_buffer->state.vertex_descriptors_dirty || cmd_buffer->state.vb_dirty) &&
1500 cmd_buffer->state.pipeline->num_vertex_attribs &&
1501 cmd_buffer->state.pipeline->shaders[MESA_SHADER_VERTEX]->info.info.vs.has_vertex_buffers) {
1502 unsigned vb_offset;
1503 void *vb_ptr;
1504 uint32_t i = 0;
1505 uint32_t num_attribs = cmd_buffer->state.pipeline->num_vertex_attribs;
1506 uint64_t va;
1507
1508 /* allocate some descriptor state for vertex buffers */
1509 radv_cmd_buffer_upload_alloc(cmd_buffer, num_attribs * 16, 256,
1510 &vb_offset, &vb_ptr);
1511
1512 for (i = 0; i < num_attribs; i++) {
1513 uint32_t *desc = &((uint32_t *)vb_ptr)[i * 4];
1514 uint32_t offset;
1515 int vb = cmd_buffer->state.pipeline->va_binding[i];
1516 struct radv_buffer *buffer = cmd_buffer->state.vertex_bindings[vb].buffer;
1517 uint32_t stride = cmd_buffer->state.pipeline->binding_stride[vb];
1518
1519 device->ws->cs_add_buffer(cmd_buffer->cs, buffer->bo, 8);
1520 va = device->ws->buffer_get_va(buffer->bo);
1521
1522 offset = cmd_buffer->state.vertex_bindings[vb].offset + cmd_buffer->state.pipeline->va_offset[i];
1523 va += offset + buffer->offset;
1524 desc[0] = va;
1525 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) | S_008F04_STRIDE(stride);
1526 if (cmd_buffer->device->physical_device->rad_info.chip_class <= CIK && stride)
1527 desc[2] = (buffer->size - offset - cmd_buffer->state.pipeline->va_format_size[i]) / stride + 1;
1528 else
1529 desc[2] = buffer->size - offset;
1530 desc[3] = cmd_buffer->state.pipeline->va_rsrc_word3[i];
1531 }
1532
1533 va = device->ws->buffer_get_va(cmd_buffer->upload.upload_bo);
1534 va += vb_offset;
1535
1536 radv_emit_userdata_address(cmd_buffer, pipeline, MESA_SHADER_VERTEX,
1537 AC_UD_VS_VERTEX_BUFFERS, va);
1538 }
1539
1540 cmd_buffer->state.vertex_descriptors_dirty = false;
1541 cmd_buffer->state.vb_dirty = 0;
1542 if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_PIPELINE)
1543 radv_emit_graphics_pipeline(cmd_buffer, pipeline);
1544
1545 if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_RENDER_TARGETS)
1546 radv_emit_framebuffer_state(cmd_buffer);
1547
1548 ia_multi_vgt_param = si_get_ia_multi_vgt_param(cmd_buffer, instanced_draw, indirect_draw, draw_vertex_count);
1549 if (cmd_buffer->state.last_ia_multi_vgt_param != ia_multi_vgt_param) {
1550 if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK)
1551 radeon_set_context_reg_idx(cmd_buffer->cs, R_028AA8_IA_MULTI_VGT_PARAM, 1, ia_multi_vgt_param);
1552 else
1553 radeon_set_context_reg(cmd_buffer->cs, R_028AA8_IA_MULTI_VGT_PARAM, ia_multi_vgt_param);
1554 cmd_buffer->state.last_ia_multi_vgt_param = ia_multi_vgt_param;
1555 }
1556
1557 if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_PIPELINE) {
1558 radeon_set_context_reg(cmd_buffer->cs, R_028B54_VGT_SHADER_STAGES_EN, pipeline->graphics.vgt_shader_stages_en);
1559
1560 if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) {
1561 radeon_set_uconfig_reg_idx(cmd_buffer->cs, R_030908_VGT_PRIMITIVE_TYPE, 1, cmd_buffer->state.pipeline->graphics.prim);
1562 } else {
1563 radeon_set_config_reg(cmd_buffer->cs, R_008958_VGT_PRIMITIVE_TYPE, cmd_buffer->state.pipeline->graphics.prim);
1564 }
1565 radeon_set_context_reg(cmd_buffer->cs, R_028A6C_VGT_GS_OUT_PRIM_TYPE, cmd_buffer->state.pipeline->graphics.gs_out);
1566 }
1567
1568 radv_cmd_buffer_flush_dynamic_state(cmd_buffer);
1569
1570 radv_emit_primitive_reset_state(cmd_buffer, indexed_draw);
1571
1572 radv_flush_descriptors(cmd_buffer, cmd_buffer->state.pipeline,
1573 VK_SHADER_STAGE_ALL_GRAPHICS);
1574 radv_flush_constants(cmd_buffer, cmd_buffer->state.pipeline,
1575 VK_SHADER_STAGE_ALL_GRAPHICS);
1576
1577 assert(cmd_buffer->cs->cdw <= cdw_max);
1578
1579 si_emit_cache_flush(cmd_buffer);
1580 }
1581
1582 static void radv_stage_flush(struct radv_cmd_buffer *cmd_buffer,
1583 VkPipelineStageFlags src_stage_mask)
1584 {
1585 if (src_stage_mask & (VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT |
1586 VK_PIPELINE_STAGE_TRANSFER_BIT |
1587 VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT |
1588 VK_PIPELINE_STAGE_ALL_COMMANDS_BIT)) {
1589 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_CS_PARTIAL_FLUSH;
1590 }
1591
1592 if (src_stage_mask & (VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
1593 VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT |
1594 VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
1595 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
1596 VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT |
1597 VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT |
1598 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT |
1599 VK_PIPELINE_STAGE_TRANSFER_BIT |
1600 VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT |
1601 VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT |
1602 VK_PIPELINE_STAGE_ALL_COMMANDS_BIT)) {
1603 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_PS_PARTIAL_FLUSH;
1604 } else if (src_stage_mask & (VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT |
1605 VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT |
1606 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT |
1607 VK_PIPELINE_STAGE_VERTEX_SHADER_BIT)) {
1608 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_VS_PARTIAL_FLUSH;
1609 }
1610 }
1611
1612 static enum radv_cmd_flush_bits
1613 radv_src_access_flush(struct radv_cmd_buffer *cmd_buffer,
1614 VkAccessFlags src_flags)
1615 {
1616 enum radv_cmd_flush_bits flush_bits = 0;
1617 uint32_t b;
1618 for_each_bit(b, src_flags) {
1619 switch ((VkAccessFlagBits)(1 << b)) {
1620 case VK_ACCESS_SHADER_WRITE_BIT:
1621 flush_bits |= RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2;
1622 break;
1623 case VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT:
1624 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
1625 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
1626 break;
1627 case VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT:
1628 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB |
1629 RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
1630 break;
1631 case VK_ACCESS_TRANSFER_WRITE_BIT:
1632 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
1633 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META |
1634 RADV_CMD_FLAG_FLUSH_AND_INV_DB |
1635 RADV_CMD_FLAG_FLUSH_AND_INV_DB_META |
1636 RADV_CMD_FLAG_INV_GLOBAL_L2;
1637 break;
1638 default:
1639 break;
1640 }
1641 }
1642 return flush_bits;
1643 }
1644
1645 static enum radv_cmd_flush_bits
1646 radv_dst_access_flush(struct radv_cmd_buffer *cmd_buffer,
1647 VkAccessFlags dst_flags,
1648 struct radv_image *image)
1649 {
1650 enum radv_cmd_flush_bits flush_bits = 0;
1651 uint32_t b;
1652 for_each_bit(b, dst_flags) {
1653 switch ((VkAccessFlagBits)(1 << b)) {
1654 case VK_ACCESS_INDIRECT_COMMAND_READ_BIT:
1655 case VK_ACCESS_INDEX_READ_BIT:
1656 case VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT:
1657 break;
1658 case VK_ACCESS_UNIFORM_READ_BIT:
1659 flush_bits |= RADV_CMD_FLAG_INV_VMEM_L1 | RADV_CMD_FLAG_INV_SMEM_L1;
1660 break;
1661 case VK_ACCESS_SHADER_READ_BIT:
1662 case VK_ACCESS_TRANSFER_READ_BIT:
1663 case VK_ACCESS_INPUT_ATTACHMENT_READ_BIT:
1664 flush_bits |= RADV_CMD_FLAG_INV_VMEM_L1 |
1665 RADV_CMD_FLAG_INV_GLOBAL_L2;
1666 break;
1667 case VK_ACCESS_COLOR_ATTACHMENT_READ_BIT:
1668 /* TODO: change to image && when the image gets passed
1669 * through from the subpass. */
1670 if (!image || (image->usage & VK_IMAGE_USAGE_STORAGE_BIT))
1671 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
1672 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
1673 break;
1674 case VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT:
1675 if (!image || (image->usage & VK_IMAGE_USAGE_STORAGE_BIT))
1676 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB |
1677 RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
1678 break;
1679 default:
1680 break;
1681 }
1682 }
1683 return flush_bits;
1684 }
1685
1686 static void radv_subpass_barrier(struct radv_cmd_buffer *cmd_buffer, const struct radv_subpass_barrier *barrier)
1687 {
1688 cmd_buffer->state.flush_bits |= radv_src_access_flush(cmd_buffer, barrier->src_access_mask);
1689 radv_stage_flush(cmd_buffer, barrier->src_stage_mask);
1690 cmd_buffer->state.flush_bits |= radv_dst_access_flush(cmd_buffer, barrier->dst_access_mask,
1691 NULL);
1692 }
1693
1694 static void radv_handle_subpass_image_transition(struct radv_cmd_buffer *cmd_buffer,
1695 VkAttachmentReference att)
1696 {
1697 unsigned idx = att.attachment;
1698 struct radv_image_view *view = cmd_buffer->state.framebuffer->attachments[idx].attachment;
1699 VkImageSubresourceRange range;
1700 range.aspectMask = 0;
1701 range.baseMipLevel = view->base_mip;
1702 range.levelCount = 1;
1703 range.baseArrayLayer = view->base_layer;
1704 range.layerCount = cmd_buffer->state.framebuffer->layers;
1705
1706 radv_handle_image_transition(cmd_buffer,
1707 view->image,
1708 cmd_buffer->state.attachments[idx].current_layout,
1709 att.layout, 0, 0, &range,
1710 cmd_buffer->state.attachments[idx].pending_clear_aspects);
1711
1712 cmd_buffer->state.attachments[idx].current_layout = att.layout;
1713
1714
1715 }
1716
1717 void
1718 radv_cmd_buffer_set_subpass(struct radv_cmd_buffer *cmd_buffer,
1719 const struct radv_subpass *subpass, bool transitions)
1720 {
1721 if (transitions) {
1722 radv_subpass_barrier(cmd_buffer, &subpass->start_barrier);
1723
1724 for (unsigned i = 0; i < subpass->color_count; ++i) {
1725 radv_handle_subpass_image_transition(cmd_buffer,
1726 subpass->color_attachments[i]);
1727 }
1728
1729 for (unsigned i = 0; i < subpass->input_count; ++i) {
1730 radv_handle_subpass_image_transition(cmd_buffer,
1731 subpass->input_attachments[i]);
1732 }
1733
1734 if (subpass->depth_stencil_attachment.attachment != VK_ATTACHMENT_UNUSED) {
1735 radv_handle_subpass_image_transition(cmd_buffer,
1736 subpass->depth_stencil_attachment);
1737 }
1738 }
1739
1740 cmd_buffer->state.subpass = subpass;
1741
1742 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_RENDER_TARGETS;
1743 }
1744
1745 static void
1746 radv_cmd_state_setup_attachments(struct radv_cmd_buffer *cmd_buffer,
1747 struct radv_render_pass *pass,
1748 const VkRenderPassBeginInfo *info)
1749 {
1750 struct radv_cmd_state *state = &cmd_buffer->state;
1751
1752 if (pass->attachment_count == 0) {
1753 state->attachments = NULL;
1754 return;
1755 }
1756
1757 state->attachments = vk_alloc(&cmd_buffer->pool->alloc,
1758 pass->attachment_count *
1759 sizeof(state->attachments[0]),
1760 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1761 if (state->attachments == NULL) {
1762 /* FIXME: Propagate VK_ERROR_OUT_OF_HOST_MEMORY to vkEndCommandBuffer */
1763 abort();
1764 }
1765
1766 for (uint32_t i = 0; i < pass->attachment_count; ++i) {
1767 struct radv_render_pass_attachment *att = &pass->attachments[i];
1768 VkImageAspectFlags att_aspects = vk_format_aspects(att->format);
1769 VkImageAspectFlags clear_aspects = 0;
1770
1771 if (att_aspects == VK_IMAGE_ASPECT_COLOR_BIT) {
1772 /* color attachment */
1773 if (att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
1774 clear_aspects |= VK_IMAGE_ASPECT_COLOR_BIT;
1775 }
1776 } else {
1777 /* depthstencil attachment */
1778 if ((att_aspects & VK_IMAGE_ASPECT_DEPTH_BIT) &&
1779 att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
1780 clear_aspects |= VK_IMAGE_ASPECT_DEPTH_BIT;
1781 }
1782 if ((att_aspects & VK_IMAGE_ASPECT_STENCIL_BIT) &&
1783 att->stencil_load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
1784 clear_aspects |= VK_IMAGE_ASPECT_STENCIL_BIT;
1785 }
1786 }
1787
1788 state->attachments[i].pending_clear_aspects = clear_aspects;
1789 if (clear_aspects && info) {
1790 assert(info->clearValueCount > i);
1791 state->attachments[i].clear_value = info->pClearValues[i];
1792 }
1793
1794 state->attachments[i].current_layout = att->initial_layout;
1795 }
1796 }
1797
1798 VkResult radv_AllocateCommandBuffers(
1799 VkDevice _device,
1800 const VkCommandBufferAllocateInfo *pAllocateInfo,
1801 VkCommandBuffer *pCommandBuffers)
1802 {
1803 RADV_FROM_HANDLE(radv_device, device, _device);
1804 RADV_FROM_HANDLE(radv_cmd_pool, pool, pAllocateInfo->commandPool);
1805
1806 VkResult result = VK_SUCCESS;
1807 uint32_t i;
1808
1809 memset(pCommandBuffers, 0,
1810 sizeof(*pCommandBuffers)*pAllocateInfo->commandBufferCount);
1811
1812 for (i = 0; i < pAllocateInfo->commandBufferCount; i++) {
1813
1814 if (!list_empty(&pool->free_cmd_buffers)) {
1815 struct radv_cmd_buffer *cmd_buffer = list_first_entry(&pool->free_cmd_buffers, struct radv_cmd_buffer, pool_link);
1816
1817 list_del(&cmd_buffer->pool_link);
1818 list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
1819
1820 radv_reset_cmd_buffer(cmd_buffer);
1821 cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
1822 cmd_buffer->level = pAllocateInfo->level;
1823
1824 pCommandBuffers[i] = radv_cmd_buffer_to_handle(cmd_buffer);
1825 result = VK_SUCCESS;
1826 } else {
1827 result = radv_create_cmd_buffer(device, pool, pAllocateInfo->level,
1828 &pCommandBuffers[i]);
1829 }
1830 if (result != VK_SUCCESS)
1831 break;
1832 }
1833
1834 if (result != VK_SUCCESS)
1835 radv_FreeCommandBuffers(_device, pAllocateInfo->commandPool,
1836 i, pCommandBuffers);
1837
1838 return result;
1839 }
1840
1841 void radv_FreeCommandBuffers(
1842 VkDevice device,
1843 VkCommandPool commandPool,
1844 uint32_t commandBufferCount,
1845 const VkCommandBuffer *pCommandBuffers)
1846 {
1847 for (uint32_t i = 0; i < commandBufferCount; i++) {
1848 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, pCommandBuffers[i]);
1849
1850 if (cmd_buffer) {
1851 if (cmd_buffer->pool) {
1852 list_del(&cmd_buffer->pool_link);
1853 list_addtail(&cmd_buffer->pool_link, &cmd_buffer->pool->free_cmd_buffers);
1854 } else
1855 radv_cmd_buffer_destroy(cmd_buffer);
1856
1857 }
1858 }
1859 }
1860
1861 VkResult radv_ResetCommandBuffer(
1862 VkCommandBuffer commandBuffer,
1863 VkCommandBufferResetFlags flags)
1864 {
1865 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
1866 radv_reset_cmd_buffer(cmd_buffer);
1867 return VK_SUCCESS;
1868 }
1869
1870 static void emit_gfx_buffer_state(struct radv_cmd_buffer *cmd_buffer)
1871 {
1872 struct radv_device *device = cmd_buffer->device;
1873 if (device->gfx_init) {
1874 uint64_t va = device->ws->buffer_get_va(device->gfx_init);
1875 device->ws->cs_add_buffer(cmd_buffer->cs, device->gfx_init, 8);
1876 radeon_emit(cmd_buffer->cs, PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0));
1877 radeon_emit(cmd_buffer->cs, va);
1878 radeon_emit(cmd_buffer->cs, (va >> 32) & 0xffff);
1879 radeon_emit(cmd_buffer->cs, device->gfx_init_size_dw & 0xffff);
1880 } else
1881 si_init_config(cmd_buffer);
1882 }
1883
1884 VkResult radv_BeginCommandBuffer(
1885 VkCommandBuffer commandBuffer,
1886 const VkCommandBufferBeginInfo *pBeginInfo)
1887 {
1888 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
1889 radv_reset_cmd_buffer(cmd_buffer);
1890
1891 memset(&cmd_buffer->state, 0, sizeof(cmd_buffer->state));
1892 cmd_buffer->state.last_primitive_reset_en = -1;
1893
1894 /* setup initial configuration into command buffer */
1895 if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
1896 switch (cmd_buffer->queue_family_index) {
1897 case RADV_QUEUE_GENERAL:
1898 emit_gfx_buffer_state(cmd_buffer);
1899 radv_set_db_count_control(cmd_buffer);
1900 break;
1901 case RADV_QUEUE_COMPUTE:
1902 si_init_compute(cmd_buffer);
1903 break;
1904 case RADV_QUEUE_TRANSFER:
1905 default:
1906 break;
1907 }
1908 }
1909
1910 if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
1911 cmd_buffer->state.framebuffer = radv_framebuffer_from_handle(pBeginInfo->pInheritanceInfo->framebuffer);
1912 cmd_buffer->state.pass = radv_render_pass_from_handle(pBeginInfo->pInheritanceInfo->renderPass);
1913
1914 struct radv_subpass *subpass =
1915 &cmd_buffer->state.pass->subpasses[pBeginInfo->pInheritanceInfo->subpass];
1916
1917 radv_cmd_state_setup_attachments(cmd_buffer, cmd_buffer->state.pass, NULL);
1918 radv_cmd_buffer_set_subpass(cmd_buffer, subpass, false);
1919 }
1920
1921 radv_cmd_buffer_trace_emit(cmd_buffer);
1922 return VK_SUCCESS;
1923 }
1924
1925 void radv_CmdBindVertexBuffers(
1926 VkCommandBuffer commandBuffer,
1927 uint32_t firstBinding,
1928 uint32_t bindingCount,
1929 const VkBuffer* pBuffers,
1930 const VkDeviceSize* pOffsets)
1931 {
1932 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
1933 struct radv_vertex_binding *vb = cmd_buffer->state.vertex_bindings;
1934
1935 /* We have to defer setting up vertex buffer since we need the buffer
1936 * stride from the pipeline. */
1937
1938 assert(firstBinding + bindingCount < MAX_VBS);
1939 for (uint32_t i = 0; i < bindingCount; i++) {
1940 vb[firstBinding + i].buffer = radv_buffer_from_handle(pBuffers[i]);
1941 vb[firstBinding + i].offset = pOffsets[i];
1942 cmd_buffer->state.vb_dirty |= 1 << (firstBinding + i);
1943 }
1944 }
1945
1946 void radv_CmdBindIndexBuffer(
1947 VkCommandBuffer commandBuffer,
1948 VkBuffer buffer,
1949 VkDeviceSize offset,
1950 VkIndexType indexType)
1951 {
1952 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
1953
1954 cmd_buffer->state.index_buffer = radv_buffer_from_handle(buffer);
1955 cmd_buffer->state.index_offset = offset;
1956 cmd_buffer->state.index_type = indexType; /* vk matches hw */
1957 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_INDEX_BUFFER;
1958 cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, cmd_buffer->state.index_buffer->bo, 8);
1959 }
1960
1961
1962 void radv_bind_descriptor_set(struct radv_cmd_buffer *cmd_buffer,
1963 struct radv_descriptor_set *set,
1964 unsigned idx)
1965 {
1966 struct radeon_winsys *ws = cmd_buffer->device->ws;
1967
1968 cmd_buffer->state.descriptors[idx] = set;
1969 cmd_buffer->state.descriptors_dirty |= (1u << idx);
1970 if (!set)
1971 return;
1972
1973 for (unsigned j = 0; j < set->layout->buffer_count; ++j)
1974 if (set->descriptors[j])
1975 ws->cs_add_buffer(cmd_buffer->cs, set->descriptors[j], 7);
1976
1977 if(set->bo)
1978 ws->cs_add_buffer(cmd_buffer->cs, set->bo, 8);
1979 }
1980
1981 void radv_CmdBindDescriptorSets(
1982 VkCommandBuffer commandBuffer,
1983 VkPipelineBindPoint pipelineBindPoint,
1984 VkPipelineLayout _layout,
1985 uint32_t firstSet,
1986 uint32_t descriptorSetCount,
1987 const VkDescriptorSet* pDescriptorSets,
1988 uint32_t dynamicOffsetCount,
1989 const uint32_t* pDynamicOffsets)
1990 {
1991 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
1992 RADV_FROM_HANDLE(radv_pipeline_layout, layout, _layout);
1993 unsigned dyn_idx = 0;
1994
1995 for (unsigned i = 0; i < descriptorSetCount; ++i) {
1996 unsigned idx = i + firstSet;
1997 RADV_FROM_HANDLE(radv_descriptor_set, set, pDescriptorSets[i]);
1998 radv_bind_descriptor_set(cmd_buffer, set, idx);
1999
2000 for(unsigned j = 0; j < set->layout->dynamic_offset_count; ++j, ++dyn_idx) {
2001 unsigned idx = j + layout->set[i + firstSet].dynamic_offset_start;
2002 uint32_t *dst = cmd_buffer->dynamic_buffers + idx * 4;
2003 assert(dyn_idx < dynamicOffsetCount);
2004
2005 struct radv_descriptor_range *range = set->dynamic_descriptors + j;
2006 uint64_t va = range->va + pDynamicOffsets[dyn_idx];
2007 dst[0] = va;
2008 dst[1] = S_008F04_BASE_ADDRESS_HI(va >> 32);
2009 dst[2] = range->size;
2010 dst[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
2011 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
2012 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
2013 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
2014 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
2015 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
2016 cmd_buffer->push_constant_stages |=
2017 set->layout->dynamic_shader_stages;
2018 }
2019 }
2020 }
2021
2022 static bool radv_init_push_descriptor_set(struct radv_cmd_buffer *cmd_buffer,
2023 struct radv_descriptor_set *set,
2024 struct radv_descriptor_set_layout *layout)
2025 {
2026 set->size = layout->size;
2027 set->layout = layout;
2028
2029 if (cmd_buffer->push_descriptors.capacity < set->size) {
2030 size_t new_size = MAX2(set->size, 1024);
2031 new_size = MAX2(new_size, 2 * cmd_buffer->push_descriptors.capacity);
2032 new_size = MIN2(new_size, 96 * MAX_PUSH_DESCRIPTORS);
2033
2034 free(set->mapped_ptr);
2035 set->mapped_ptr = malloc(new_size);
2036
2037 if (!set->mapped_ptr) {
2038 cmd_buffer->push_descriptors.capacity = 0;
2039 cmd_buffer->record_fail = true;
2040 return false;
2041 }
2042
2043 cmd_buffer->push_descriptors.capacity = new_size;
2044 }
2045
2046 return true;
2047 }
2048
2049 void radv_meta_push_descriptor_set(
2050 struct radv_cmd_buffer* cmd_buffer,
2051 VkPipelineBindPoint pipelineBindPoint,
2052 VkPipelineLayout _layout,
2053 uint32_t set,
2054 uint32_t descriptorWriteCount,
2055 const VkWriteDescriptorSet* pDescriptorWrites)
2056 {
2057 RADV_FROM_HANDLE(radv_pipeline_layout, layout, _layout);
2058 struct radv_descriptor_set *push_set = &cmd_buffer->meta_push_descriptors;
2059 unsigned bo_offset;
2060
2061 assert(layout->set[set].layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR);
2062
2063 push_set->size = layout->set[set].layout->size;
2064 push_set->layout = layout->set[set].layout;
2065
2066 if (!radv_cmd_buffer_upload_alloc(cmd_buffer, push_set->size, 32,
2067 &bo_offset,
2068 (void**) &push_set->mapped_ptr))
2069 return;
2070
2071 push_set->va = cmd_buffer->device->ws->buffer_get_va(cmd_buffer->upload.upload_bo);
2072 push_set->va += bo_offset;
2073
2074 radv_update_descriptor_sets(cmd_buffer->device, cmd_buffer,
2075 radv_descriptor_set_to_handle(push_set),
2076 descriptorWriteCount, pDescriptorWrites, 0, NULL);
2077
2078 cmd_buffer->state.descriptors[set] = push_set;
2079 cmd_buffer->state.descriptors_dirty |= (1u << set);
2080 }
2081
2082 void radv_CmdPushDescriptorSetKHR(
2083 VkCommandBuffer commandBuffer,
2084 VkPipelineBindPoint pipelineBindPoint,
2085 VkPipelineLayout _layout,
2086 uint32_t set,
2087 uint32_t descriptorWriteCount,
2088 const VkWriteDescriptorSet* pDescriptorWrites)
2089 {
2090 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2091 RADV_FROM_HANDLE(radv_pipeline_layout, layout, _layout);
2092 struct radv_descriptor_set *push_set = &cmd_buffer->push_descriptors.set;
2093
2094 assert(layout->set[set].layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR);
2095
2096 if (!radv_init_push_descriptor_set(cmd_buffer, push_set, layout->set[set].layout))
2097 return;
2098
2099 radv_update_descriptor_sets(cmd_buffer->device, cmd_buffer,
2100 radv_descriptor_set_to_handle(push_set),
2101 descriptorWriteCount, pDescriptorWrites, 0, NULL);
2102
2103 cmd_buffer->state.descriptors[set] = push_set;
2104 cmd_buffer->state.descriptors_dirty |= (1u << set);
2105 cmd_buffer->state.push_descriptors_dirty = true;
2106 }
2107
2108 void radv_CmdPushDescriptorSetWithTemplateKHR(
2109 VkCommandBuffer commandBuffer,
2110 VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
2111 VkPipelineLayout _layout,
2112 uint32_t set,
2113 const void* pData)
2114 {
2115 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2116 RADV_FROM_HANDLE(radv_pipeline_layout, layout, _layout);
2117 struct radv_descriptor_set *push_set = &cmd_buffer->push_descriptors.set;
2118
2119 assert(layout->set[set].layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR);
2120
2121 if (!radv_init_push_descriptor_set(cmd_buffer, push_set, layout->set[set].layout))
2122 return;
2123
2124 radv_update_descriptor_set_with_template(cmd_buffer->device, cmd_buffer, push_set,
2125 descriptorUpdateTemplate, pData);
2126
2127 cmd_buffer->state.descriptors[set] = push_set;
2128 cmd_buffer->state.descriptors_dirty |= (1u << set);
2129 cmd_buffer->state.push_descriptors_dirty = true;
2130 }
2131
2132 void radv_CmdPushConstants(VkCommandBuffer commandBuffer,
2133 VkPipelineLayout layout,
2134 VkShaderStageFlags stageFlags,
2135 uint32_t offset,
2136 uint32_t size,
2137 const void* pValues)
2138 {
2139 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2140 memcpy(cmd_buffer->push_constants + offset, pValues, size);
2141 cmd_buffer->push_constant_stages |= stageFlags;
2142 }
2143
2144 VkResult radv_EndCommandBuffer(
2145 VkCommandBuffer commandBuffer)
2146 {
2147 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2148
2149 if (cmd_buffer->queue_family_index != RADV_QUEUE_TRANSFER)
2150 si_emit_cache_flush(cmd_buffer);
2151
2152 if (!cmd_buffer->device->ws->cs_finalize(cmd_buffer->cs) ||
2153 cmd_buffer->record_fail)
2154 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
2155 return VK_SUCCESS;
2156 }
2157
2158 static void
2159 radv_emit_compute_pipeline(struct radv_cmd_buffer *cmd_buffer)
2160 {
2161 struct radeon_winsys *ws = cmd_buffer->device->ws;
2162 struct radv_shader_variant *compute_shader;
2163 struct radv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
2164 uint64_t va;
2165
2166 if (!pipeline || pipeline == cmd_buffer->state.emitted_compute_pipeline)
2167 return;
2168
2169 cmd_buffer->state.emitted_compute_pipeline = pipeline;
2170
2171 compute_shader = pipeline->shaders[MESA_SHADER_COMPUTE];
2172 va = ws->buffer_get_va(compute_shader->bo);
2173
2174 ws->cs_add_buffer(cmd_buffer->cs, compute_shader->bo, 8);
2175 si_cp_dma_prefetch(cmd_buffer, va, compute_shader->code_size);
2176
2177 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
2178 cmd_buffer->cs, 16);
2179
2180 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B830_COMPUTE_PGM_LO, 2);
2181 radeon_emit(cmd_buffer->cs, va >> 8);
2182 radeon_emit(cmd_buffer->cs, va >> 40);
2183
2184 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B848_COMPUTE_PGM_RSRC1, 2);
2185 radeon_emit(cmd_buffer->cs, compute_shader->rsrc1);
2186 radeon_emit(cmd_buffer->cs, compute_shader->rsrc2);
2187
2188
2189 cmd_buffer->compute_scratch_size_needed =
2190 MAX2(cmd_buffer->compute_scratch_size_needed,
2191 pipeline->max_waves * pipeline->scratch_bytes_per_wave);
2192
2193 /* change these once we have scratch support */
2194 radeon_set_sh_reg(cmd_buffer->cs, R_00B860_COMPUTE_TMPRING_SIZE,
2195 S_00B860_WAVES(pipeline->max_waves) |
2196 S_00B860_WAVESIZE(pipeline->scratch_bytes_per_wave >> 10));
2197
2198 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B81C_COMPUTE_NUM_THREAD_X, 3);
2199 radeon_emit(cmd_buffer->cs,
2200 S_00B81C_NUM_THREAD_FULL(compute_shader->info.cs.block_size[0]));
2201 radeon_emit(cmd_buffer->cs,
2202 S_00B81C_NUM_THREAD_FULL(compute_shader->info.cs.block_size[1]));
2203 radeon_emit(cmd_buffer->cs,
2204 S_00B81C_NUM_THREAD_FULL(compute_shader->info.cs.block_size[2]));
2205
2206 assert(cmd_buffer->cs->cdw <= cdw_max);
2207 }
2208
2209
2210 void radv_CmdBindPipeline(
2211 VkCommandBuffer commandBuffer,
2212 VkPipelineBindPoint pipelineBindPoint,
2213 VkPipeline _pipeline)
2214 {
2215 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2216 RADV_FROM_HANDLE(radv_pipeline, pipeline, _pipeline);
2217
2218 for (unsigned i = 0; i < MAX_SETS; i++) {
2219 if (cmd_buffer->state.descriptors[i])
2220 cmd_buffer->state.descriptors_dirty |= (1u << i);
2221 }
2222
2223 switch (pipelineBindPoint) {
2224 case VK_PIPELINE_BIND_POINT_COMPUTE:
2225 cmd_buffer->state.compute_pipeline = pipeline;
2226 cmd_buffer->push_constant_stages |= VK_SHADER_STAGE_COMPUTE_BIT;
2227 break;
2228 case VK_PIPELINE_BIND_POINT_GRAPHICS:
2229 cmd_buffer->state.pipeline = pipeline;
2230 cmd_buffer->state.vertex_descriptors_dirty = true;
2231 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_PIPELINE;
2232 cmd_buffer->push_constant_stages |= pipeline->active_stages;
2233
2234 /* Apply the dynamic state from the pipeline */
2235 cmd_buffer->state.dirty |= pipeline->dynamic_state_mask;
2236 radv_dynamic_state_copy(&cmd_buffer->state.dynamic,
2237 &pipeline->dynamic_state,
2238 pipeline->dynamic_state_mask);
2239
2240 if (pipeline->graphics.esgs_ring_size > cmd_buffer->esgs_ring_size_needed)
2241 cmd_buffer->esgs_ring_size_needed = pipeline->graphics.esgs_ring_size;
2242 if (pipeline->graphics.gsvs_ring_size > cmd_buffer->gsvs_ring_size_needed)
2243 cmd_buffer->gsvs_ring_size_needed = pipeline->graphics.gsvs_ring_size;
2244
2245 if (radv_pipeline_has_tess(pipeline))
2246 cmd_buffer->tess_rings_needed = true;
2247
2248 if (radv_pipeline_has_gs(pipeline)) {
2249 struct ac_userdata_info *loc = radv_lookup_user_sgpr(cmd_buffer->state.pipeline, MESA_SHADER_GEOMETRY,
2250 AC_UD_SCRATCH_RING_OFFSETS);
2251 if (cmd_buffer->ring_offsets_idx == -1)
2252 cmd_buffer->ring_offsets_idx = loc->sgpr_idx;
2253 else if (loc->sgpr_idx != -1)
2254 assert(loc->sgpr_idx == cmd_buffer->ring_offsets_idx);
2255 }
2256 break;
2257 default:
2258 assert(!"invalid bind point");
2259 break;
2260 }
2261 }
2262
2263 void radv_CmdSetViewport(
2264 VkCommandBuffer commandBuffer,
2265 uint32_t firstViewport,
2266 uint32_t viewportCount,
2267 const VkViewport* pViewports)
2268 {
2269 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2270
2271 const uint32_t total_count = firstViewport + viewportCount;
2272 if (cmd_buffer->state.dynamic.viewport.count < total_count)
2273 cmd_buffer->state.dynamic.viewport.count = total_count;
2274
2275 memcpy(cmd_buffer->state.dynamic.viewport.viewports + firstViewport,
2276 pViewports, viewportCount * sizeof(*pViewports));
2277
2278 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_VIEWPORT;
2279 }
2280
2281 void radv_CmdSetScissor(
2282 VkCommandBuffer commandBuffer,
2283 uint32_t firstScissor,
2284 uint32_t scissorCount,
2285 const VkRect2D* pScissors)
2286 {
2287 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2288
2289 const uint32_t total_count = firstScissor + scissorCount;
2290 if (cmd_buffer->state.dynamic.scissor.count < total_count)
2291 cmd_buffer->state.dynamic.scissor.count = total_count;
2292
2293 memcpy(cmd_buffer->state.dynamic.scissor.scissors + firstScissor,
2294 pScissors, scissorCount * sizeof(*pScissors));
2295 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_SCISSOR;
2296 }
2297
2298 void radv_CmdSetLineWidth(
2299 VkCommandBuffer commandBuffer,
2300 float lineWidth)
2301 {
2302 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2303 cmd_buffer->state.dynamic.line_width = lineWidth;
2304 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_LINE_WIDTH;
2305 }
2306
2307 void radv_CmdSetDepthBias(
2308 VkCommandBuffer commandBuffer,
2309 float depthBiasConstantFactor,
2310 float depthBiasClamp,
2311 float depthBiasSlopeFactor)
2312 {
2313 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2314
2315 cmd_buffer->state.dynamic.depth_bias.bias = depthBiasConstantFactor;
2316 cmd_buffer->state.dynamic.depth_bias.clamp = depthBiasClamp;
2317 cmd_buffer->state.dynamic.depth_bias.slope = depthBiasSlopeFactor;
2318
2319 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS;
2320 }
2321
2322 void radv_CmdSetBlendConstants(
2323 VkCommandBuffer commandBuffer,
2324 const float blendConstants[4])
2325 {
2326 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2327
2328 memcpy(cmd_buffer->state.dynamic.blend_constants,
2329 blendConstants, sizeof(float) * 4);
2330
2331 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS;
2332 }
2333
2334 void radv_CmdSetDepthBounds(
2335 VkCommandBuffer commandBuffer,
2336 float minDepthBounds,
2337 float maxDepthBounds)
2338 {
2339 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2340
2341 cmd_buffer->state.dynamic.depth_bounds.min = minDepthBounds;
2342 cmd_buffer->state.dynamic.depth_bounds.max = maxDepthBounds;
2343
2344 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS;
2345 }
2346
2347 void radv_CmdSetStencilCompareMask(
2348 VkCommandBuffer commandBuffer,
2349 VkStencilFaceFlags faceMask,
2350 uint32_t compareMask)
2351 {
2352 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2353
2354 if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
2355 cmd_buffer->state.dynamic.stencil_compare_mask.front = compareMask;
2356 if (faceMask & VK_STENCIL_FACE_BACK_BIT)
2357 cmd_buffer->state.dynamic.stencil_compare_mask.back = compareMask;
2358
2359 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK;
2360 }
2361
2362 void radv_CmdSetStencilWriteMask(
2363 VkCommandBuffer commandBuffer,
2364 VkStencilFaceFlags faceMask,
2365 uint32_t writeMask)
2366 {
2367 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2368
2369 if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
2370 cmd_buffer->state.dynamic.stencil_write_mask.front = writeMask;
2371 if (faceMask & VK_STENCIL_FACE_BACK_BIT)
2372 cmd_buffer->state.dynamic.stencil_write_mask.back = writeMask;
2373
2374 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK;
2375 }
2376
2377 void radv_CmdSetStencilReference(
2378 VkCommandBuffer commandBuffer,
2379 VkStencilFaceFlags faceMask,
2380 uint32_t reference)
2381 {
2382 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2383
2384 if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
2385 cmd_buffer->state.dynamic.stencil_reference.front = reference;
2386 if (faceMask & VK_STENCIL_FACE_BACK_BIT)
2387 cmd_buffer->state.dynamic.stencil_reference.back = reference;
2388
2389 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE;
2390 }
2391
2392
2393 void radv_CmdExecuteCommands(
2394 VkCommandBuffer commandBuffer,
2395 uint32_t commandBufferCount,
2396 const VkCommandBuffer* pCmdBuffers)
2397 {
2398 RADV_FROM_HANDLE(radv_cmd_buffer, primary, commandBuffer);
2399
2400 /* Emit pending flushes on primary prior to executing secondary */
2401 si_emit_cache_flush(primary);
2402
2403 for (uint32_t i = 0; i < commandBufferCount; i++) {
2404 RADV_FROM_HANDLE(radv_cmd_buffer, secondary, pCmdBuffers[i]);
2405
2406 primary->scratch_size_needed = MAX2(primary->scratch_size_needed,
2407 secondary->scratch_size_needed);
2408 primary->compute_scratch_size_needed = MAX2(primary->compute_scratch_size_needed,
2409 secondary->compute_scratch_size_needed);
2410
2411 if (secondary->esgs_ring_size_needed > primary->esgs_ring_size_needed)
2412 primary->esgs_ring_size_needed = secondary->esgs_ring_size_needed;
2413 if (secondary->gsvs_ring_size_needed > primary->gsvs_ring_size_needed)
2414 primary->gsvs_ring_size_needed = secondary->gsvs_ring_size_needed;
2415 if (secondary->tess_rings_needed)
2416 primary->tess_rings_needed = true;
2417 if (secondary->sample_positions_needed)
2418 primary->sample_positions_needed = true;
2419
2420 if (secondary->ring_offsets_idx != -1) {
2421 if (primary->ring_offsets_idx == -1)
2422 primary->ring_offsets_idx = secondary->ring_offsets_idx;
2423 else
2424 assert(secondary->ring_offsets_idx == primary->ring_offsets_idx);
2425 }
2426 primary->device->ws->cs_execute_secondary(primary->cs, secondary->cs);
2427 }
2428
2429 /* if we execute secondary we need to re-emit out pipelines */
2430 if (commandBufferCount) {
2431 primary->state.emitted_pipeline = NULL;
2432 primary->state.emitted_compute_pipeline = NULL;
2433 primary->state.dirty |= RADV_CMD_DIRTY_PIPELINE;
2434 primary->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_ALL;
2435 primary->state.last_primitive_reset_en = -1;
2436 primary->state.last_primitive_reset_index = 0;
2437 }
2438 }
2439
2440 VkResult radv_CreateCommandPool(
2441 VkDevice _device,
2442 const VkCommandPoolCreateInfo* pCreateInfo,
2443 const VkAllocationCallbacks* pAllocator,
2444 VkCommandPool* pCmdPool)
2445 {
2446 RADV_FROM_HANDLE(radv_device, device, _device);
2447 struct radv_cmd_pool *pool;
2448
2449 pool = vk_alloc2(&device->alloc, pAllocator, sizeof(*pool), 8,
2450 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2451 if (pool == NULL)
2452 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2453
2454 if (pAllocator)
2455 pool->alloc = *pAllocator;
2456 else
2457 pool->alloc = device->alloc;
2458
2459 list_inithead(&pool->cmd_buffers);
2460 list_inithead(&pool->free_cmd_buffers);
2461
2462 pool->queue_family_index = pCreateInfo->queueFamilyIndex;
2463
2464 *pCmdPool = radv_cmd_pool_to_handle(pool);
2465
2466 return VK_SUCCESS;
2467
2468 }
2469
2470 void radv_DestroyCommandPool(
2471 VkDevice _device,
2472 VkCommandPool commandPool,
2473 const VkAllocationCallbacks* pAllocator)
2474 {
2475 RADV_FROM_HANDLE(radv_device, device, _device);
2476 RADV_FROM_HANDLE(radv_cmd_pool, pool, commandPool);
2477
2478 if (!pool)
2479 return;
2480
2481 list_for_each_entry_safe(struct radv_cmd_buffer, cmd_buffer,
2482 &pool->cmd_buffers, pool_link) {
2483 radv_cmd_buffer_destroy(cmd_buffer);
2484 }
2485
2486 list_for_each_entry_safe(struct radv_cmd_buffer, cmd_buffer,
2487 &pool->free_cmd_buffers, pool_link) {
2488 radv_cmd_buffer_destroy(cmd_buffer);
2489 }
2490
2491 vk_free2(&device->alloc, pAllocator, pool);
2492 }
2493
2494 VkResult radv_ResetCommandPool(
2495 VkDevice device,
2496 VkCommandPool commandPool,
2497 VkCommandPoolResetFlags flags)
2498 {
2499 RADV_FROM_HANDLE(radv_cmd_pool, pool, commandPool);
2500
2501 list_for_each_entry(struct radv_cmd_buffer, cmd_buffer,
2502 &pool->cmd_buffers, pool_link) {
2503 radv_reset_cmd_buffer(cmd_buffer);
2504 }
2505
2506 return VK_SUCCESS;
2507 }
2508
2509 void radv_TrimCommandPoolKHR(
2510 VkDevice device,
2511 VkCommandPool commandPool,
2512 VkCommandPoolTrimFlagsKHR flags)
2513 {
2514 RADV_FROM_HANDLE(radv_cmd_pool, pool, commandPool);
2515
2516 if (!pool)
2517 return;
2518
2519 list_for_each_entry_safe(struct radv_cmd_buffer, cmd_buffer,
2520 &pool->free_cmd_buffers, pool_link) {
2521 radv_cmd_buffer_destroy(cmd_buffer);
2522 }
2523 }
2524
2525 void radv_CmdBeginRenderPass(
2526 VkCommandBuffer commandBuffer,
2527 const VkRenderPassBeginInfo* pRenderPassBegin,
2528 VkSubpassContents contents)
2529 {
2530 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2531 RADV_FROM_HANDLE(radv_render_pass, pass, pRenderPassBegin->renderPass);
2532 RADV_FROM_HANDLE(radv_framebuffer, framebuffer, pRenderPassBegin->framebuffer);
2533
2534 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
2535 cmd_buffer->cs, 2048);
2536
2537 cmd_buffer->state.framebuffer = framebuffer;
2538 cmd_buffer->state.pass = pass;
2539 cmd_buffer->state.render_area = pRenderPassBegin->renderArea;
2540 radv_cmd_state_setup_attachments(cmd_buffer, pass, pRenderPassBegin);
2541
2542 radv_cmd_buffer_set_subpass(cmd_buffer, pass->subpasses, true);
2543 assert(cmd_buffer->cs->cdw <= cdw_max);
2544
2545 radv_cmd_buffer_clear_subpass(cmd_buffer);
2546 }
2547
2548 void radv_CmdNextSubpass(
2549 VkCommandBuffer commandBuffer,
2550 VkSubpassContents contents)
2551 {
2552 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2553
2554 radv_cmd_buffer_resolve_subpass(cmd_buffer);
2555
2556 radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs,
2557 2048);
2558
2559 radv_cmd_buffer_set_subpass(cmd_buffer, cmd_buffer->state.subpass + 1, true);
2560 radv_cmd_buffer_clear_subpass(cmd_buffer);
2561 }
2562
2563 void radv_CmdDraw(
2564 VkCommandBuffer commandBuffer,
2565 uint32_t vertexCount,
2566 uint32_t instanceCount,
2567 uint32_t firstVertex,
2568 uint32_t firstInstance)
2569 {
2570 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2571
2572 radv_cmd_buffer_flush_state(cmd_buffer, false, (instanceCount > 1), false, vertexCount);
2573
2574 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 10);
2575
2576 struct ac_userdata_info *loc = radv_lookup_user_sgpr(cmd_buffer->state.pipeline, MESA_SHADER_VERTEX,
2577 AC_UD_VS_BASE_VERTEX_START_INSTANCE);
2578 if (loc->sgpr_idx != -1) {
2579 uint32_t base_reg = shader_stage_to_user_data_0(MESA_SHADER_VERTEX, radv_pipeline_has_gs(cmd_buffer->state.pipeline),
2580 radv_pipeline_has_tess(cmd_buffer->state.pipeline));
2581 int vs_num = 2;
2582 if (cmd_buffer->state.pipeline->shaders[MESA_SHADER_VERTEX]->info.info.vs.needs_draw_id)
2583 vs_num = 3;
2584
2585 assert (loc->num_sgprs == vs_num);
2586 radeon_set_sh_reg_seq(cmd_buffer->cs, base_reg + loc->sgpr_idx * 4, vs_num);
2587 radeon_emit(cmd_buffer->cs, firstVertex);
2588 radeon_emit(cmd_buffer->cs, firstInstance);
2589 if (cmd_buffer->state.pipeline->shaders[MESA_SHADER_VERTEX]->info.info.vs.needs_draw_id)
2590 radeon_emit(cmd_buffer->cs, 0);
2591 }
2592 radeon_emit(cmd_buffer->cs, PKT3(PKT3_NUM_INSTANCES, 0, 0));
2593 radeon_emit(cmd_buffer->cs, instanceCount);
2594
2595 radeon_emit(cmd_buffer->cs, PKT3(PKT3_DRAW_INDEX_AUTO, 1, 0));
2596 radeon_emit(cmd_buffer->cs, vertexCount);
2597 radeon_emit(cmd_buffer->cs, V_0287F0_DI_SRC_SEL_AUTO_INDEX |
2598 S_0287F0_USE_OPAQUE(0));
2599
2600 assert(cmd_buffer->cs->cdw <= cdw_max);
2601
2602 radv_cmd_buffer_trace_emit(cmd_buffer);
2603 }
2604
2605 void radv_CmdDrawIndexed(
2606 VkCommandBuffer commandBuffer,
2607 uint32_t indexCount,
2608 uint32_t instanceCount,
2609 uint32_t firstIndex,
2610 int32_t vertexOffset,
2611 uint32_t firstInstance)
2612 {
2613 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2614 int index_size = cmd_buffer->state.index_type ? 4 : 2;
2615 uint32_t index_max_size = (cmd_buffer->state.index_buffer->size - cmd_buffer->state.index_offset) / index_size;
2616 uint64_t index_va;
2617
2618 radv_cmd_buffer_flush_state(cmd_buffer, true, (instanceCount > 1), false, indexCount);
2619
2620 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 15);
2621
2622 radeon_emit(cmd_buffer->cs, PKT3(PKT3_INDEX_TYPE, 0, 0));
2623 radeon_emit(cmd_buffer->cs, cmd_buffer->state.index_type);
2624
2625 struct ac_userdata_info *loc = radv_lookup_user_sgpr(cmd_buffer->state.pipeline, MESA_SHADER_VERTEX,
2626 AC_UD_VS_BASE_VERTEX_START_INSTANCE);
2627 if (loc->sgpr_idx != -1) {
2628 uint32_t base_reg = shader_stage_to_user_data_0(MESA_SHADER_VERTEX, radv_pipeline_has_gs(cmd_buffer->state.pipeline),
2629 radv_pipeline_has_tess(cmd_buffer->state.pipeline));
2630 int vs_num = 2;
2631 if (cmd_buffer->state.pipeline->shaders[MESA_SHADER_VERTEX]->info.info.vs.needs_draw_id)
2632 vs_num = 3;
2633
2634 assert (loc->num_sgprs == vs_num);
2635 radeon_set_sh_reg_seq(cmd_buffer->cs, base_reg + loc->sgpr_idx * 4, vs_num);
2636 radeon_emit(cmd_buffer->cs, vertexOffset);
2637 radeon_emit(cmd_buffer->cs, firstInstance);
2638 if (cmd_buffer->state.pipeline->shaders[MESA_SHADER_VERTEX]->info.info.vs.needs_draw_id)
2639 radeon_emit(cmd_buffer->cs, 0);
2640 }
2641 radeon_emit(cmd_buffer->cs, PKT3(PKT3_NUM_INSTANCES, 0, 0));
2642 radeon_emit(cmd_buffer->cs, instanceCount);
2643
2644 index_va = cmd_buffer->device->ws->buffer_get_va(cmd_buffer->state.index_buffer->bo);
2645 index_va += firstIndex * index_size + cmd_buffer->state.index_buffer->offset + cmd_buffer->state.index_offset;
2646 radeon_emit(cmd_buffer->cs, PKT3(PKT3_DRAW_INDEX_2, 4, false));
2647 radeon_emit(cmd_buffer->cs, index_max_size);
2648 radeon_emit(cmd_buffer->cs, index_va);
2649 radeon_emit(cmd_buffer->cs, (index_va >> 32UL) & 0xFF);
2650 radeon_emit(cmd_buffer->cs, indexCount);
2651 radeon_emit(cmd_buffer->cs, V_0287F0_DI_SRC_SEL_DMA);
2652
2653 assert(cmd_buffer->cs->cdw <= cdw_max);
2654 radv_cmd_buffer_trace_emit(cmd_buffer);
2655 }
2656
2657 static void
2658 radv_emit_indirect_draw(struct radv_cmd_buffer *cmd_buffer,
2659 VkBuffer _buffer,
2660 VkDeviceSize offset,
2661 VkBuffer _count_buffer,
2662 VkDeviceSize count_offset,
2663 uint32_t draw_count,
2664 uint32_t stride,
2665 bool indexed)
2666 {
2667 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
2668 RADV_FROM_HANDLE(radv_buffer, count_buffer, _count_buffer);
2669 struct radeon_winsys_cs *cs = cmd_buffer->cs;
2670 unsigned di_src_sel = indexed ? V_0287F0_DI_SRC_SEL_DMA
2671 : V_0287F0_DI_SRC_SEL_AUTO_INDEX;
2672 uint64_t indirect_va = cmd_buffer->device->ws->buffer_get_va(buffer->bo);
2673 indirect_va += offset + buffer->offset;
2674 uint64_t count_va = 0;
2675
2676 if (count_buffer) {
2677 count_va = cmd_buffer->device->ws->buffer_get_va(count_buffer->bo);
2678 count_va += count_offset + count_buffer->offset;
2679 }
2680
2681 if (!draw_count)
2682 return;
2683
2684 cmd_buffer->device->ws->cs_add_buffer(cs, buffer->bo, 8);
2685
2686 struct ac_userdata_info *loc = radv_lookup_user_sgpr(cmd_buffer->state.pipeline, MESA_SHADER_VERTEX,
2687 AC_UD_VS_BASE_VERTEX_START_INSTANCE);
2688 uint32_t base_reg = shader_stage_to_user_data_0(MESA_SHADER_VERTEX, radv_pipeline_has_gs(cmd_buffer->state.pipeline),
2689 radv_pipeline_has_tess(cmd_buffer->state.pipeline));
2690 bool draw_id_enable = cmd_buffer->state.pipeline->shaders[MESA_SHADER_VERTEX]->info.info.vs.needs_draw_id;
2691 assert(loc->sgpr_idx != -1);
2692 radeon_emit(cs, PKT3(PKT3_SET_BASE, 2, 0));
2693 radeon_emit(cs, 1);
2694 radeon_emit(cs, indirect_va);
2695 radeon_emit(cs, indirect_va >> 32);
2696
2697 radeon_emit(cs, PKT3(indexed ? PKT3_DRAW_INDEX_INDIRECT_MULTI :
2698 PKT3_DRAW_INDIRECT_MULTI,
2699 8, false));
2700 radeon_emit(cs, 0);
2701 radeon_emit(cs, ((base_reg + loc->sgpr_idx * 4) - SI_SH_REG_OFFSET) >> 2);
2702 radeon_emit(cs, ((base_reg + (loc->sgpr_idx + 1) * 4) - SI_SH_REG_OFFSET) >> 2);
2703 radeon_emit(cs, (((base_reg + (loc->sgpr_idx + 2) * 4) - SI_SH_REG_OFFSET) >> 2) |
2704 S_2C3_DRAW_INDEX_ENABLE(draw_id_enable) |
2705 S_2C3_COUNT_INDIRECT_ENABLE(!!count_va));
2706 radeon_emit(cs, draw_count); /* count */
2707 radeon_emit(cs, count_va); /* count_addr */
2708 radeon_emit(cs, count_va >> 32);
2709 radeon_emit(cs, stride); /* stride */
2710 radeon_emit(cs, di_src_sel);
2711 radv_cmd_buffer_trace_emit(cmd_buffer);
2712 }
2713
2714 static void
2715 radv_cmd_draw_indirect_count(VkCommandBuffer commandBuffer,
2716 VkBuffer buffer,
2717 VkDeviceSize offset,
2718 VkBuffer countBuffer,
2719 VkDeviceSize countBufferOffset,
2720 uint32_t maxDrawCount,
2721 uint32_t stride)
2722 {
2723 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2724 radv_cmd_buffer_flush_state(cmd_buffer, false, false, true, 0);
2725
2726 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
2727 cmd_buffer->cs, 14);
2728
2729 radv_emit_indirect_draw(cmd_buffer, buffer, offset,
2730 countBuffer, countBufferOffset, maxDrawCount, stride, false);
2731
2732 assert(cmd_buffer->cs->cdw <= cdw_max);
2733 }
2734
2735 static void
2736 radv_cmd_draw_indexed_indirect_count(
2737 VkCommandBuffer commandBuffer,
2738 VkBuffer buffer,
2739 VkDeviceSize offset,
2740 VkBuffer countBuffer,
2741 VkDeviceSize countBufferOffset,
2742 uint32_t maxDrawCount,
2743 uint32_t stride)
2744 {
2745 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2746 int index_size = cmd_buffer->state.index_type ? 4 : 2;
2747 uint32_t index_max_size = (cmd_buffer->state.index_buffer->size - cmd_buffer->state.index_offset) / index_size;
2748 uint64_t index_va;
2749 radv_cmd_buffer_flush_state(cmd_buffer, true, false, true, 0);
2750
2751 index_va = cmd_buffer->device->ws->buffer_get_va(cmd_buffer->state.index_buffer->bo);
2752 index_va += cmd_buffer->state.index_buffer->offset + cmd_buffer->state.index_offset;
2753
2754 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 21);
2755
2756 radeon_emit(cmd_buffer->cs, PKT3(PKT3_INDEX_TYPE, 0, 0));
2757 radeon_emit(cmd_buffer->cs, cmd_buffer->state.index_type);
2758
2759 radeon_emit(cmd_buffer->cs, PKT3(PKT3_INDEX_BASE, 1, 0));
2760 radeon_emit(cmd_buffer->cs, index_va);
2761 radeon_emit(cmd_buffer->cs, index_va >> 32);
2762
2763 radeon_emit(cmd_buffer->cs, PKT3(PKT3_INDEX_BUFFER_SIZE, 0, 0));
2764 radeon_emit(cmd_buffer->cs, index_max_size);
2765
2766 radv_emit_indirect_draw(cmd_buffer, buffer, offset,
2767 countBuffer, countBufferOffset, maxDrawCount, stride, true);
2768
2769 assert(cmd_buffer->cs->cdw <= cdw_max);
2770 }
2771
2772 void radv_CmdDrawIndirect(
2773 VkCommandBuffer commandBuffer,
2774 VkBuffer buffer,
2775 VkDeviceSize offset,
2776 uint32_t drawCount,
2777 uint32_t stride)
2778 {
2779 radv_cmd_draw_indirect_count(commandBuffer, buffer, offset,
2780 VK_NULL_HANDLE, 0, drawCount, stride);
2781 }
2782
2783 void radv_CmdDrawIndexedIndirect(
2784 VkCommandBuffer commandBuffer,
2785 VkBuffer buffer,
2786 VkDeviceSize offset,
2787 uint32_t drawCount,
2788 uint32_t stride)
2789 {
2790 radv_cmd_draw_indexed_indirect_count(commandBuffer, buffer, offset,
2791 VK_NULL_HANDLE, 0, drawCount, stride);
2792 }
2793
2794 void radv_CmdDrawIndirectCountAMD(
2795 VkCommandBuffer commandBuffer,
2796 VkBuffer buffer,
2797 VkDeviceSize offset,
2798 VkBuffer countBuffer,
2799 VkDeviceSize countBufferOffset,
2800 uint32_t maxDrawCount,
2801 uint32_t stride)
2802 {
2803 radv_cmd_draw_indirect_count(commandBuffer, buffer, offset,
2804 countBuffer, countBufferOffset,
2805 maxDrawCount, stride);
2806 }
2807
2808 void radv_CmdDrawIndexedIndirectCountAMD(
2809 VkCommandBuffer commandBuffer,
2810 VkBuffer buffer,
2811 VkDeviceSize offset,
2812 VkBuffer countBuffer,
2813 VkDeviceSize countBufferOffset,
2814 uint32_t maxDrawCount,
2815 uint32_t stride)
2816 {
2817 radv_cmd_draw_indexed_indirect_count(commandBuffer, buffer, offset,
2818 countBuffer, countBufferOffset,
2819 maxDrawCount, stride);
2820 }
2821
2822 static void
2823 radv_flush_compute_state(struct radv_cmd_buffer *cmd_buffer)
2824 {
2825 radv_emit_compute_pipeline(cmd_buffer);
2826 radv_flush_descriptors(cmd_buffer, cmd_buffer->state.compute_pipeline,
2827 VK_SHADER_STAGE_COMPUTE_BIT);
2828 radv_flush_constants(cmd_buffer, cmd_buffer->state.compute_pipeline,
2829 VK_SHADER_STAGE_COMPUTE_BIT);
2830 si_emit_cache_flush(cmd_buffer);
2831 }
2832
2833 void radv_CmdDispatch(
2834 VkCommandBuffer commandBuffer,
2835 uint32_t x,
2836 uint32_t y,
2837 uint32_t z)
2838 {
2839 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2840
2841 radv_flush_compute_state(cmd_buffer);
2842
2843 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 10);
2844
2845 struct ac_userdata_info *loc = radv_lookup_user_sgpr(cmd_buffer->state.compute_pipeline,
2846 MESA_SHADER_COMPUTE, AC_UD_CS_GRID_SIZE);
2847 if (loc->sgpr_idx != -1) {
2848 assert(!loc->indirect);
2849 uint8_t grid_used = cmd_buffer->state.compute_pipeline->shaders[MESA_SHADER_COMPUTE]->info.info.cs.grid_components_used;
2850 assert(loc->num_sgprs == grid_used);
2851 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B900_COMPUTE_USER_DATA_0 + loc->sgpr_idx * 4, grid_used);
2852 radeon_emit(cmd_buffer->cs, x);
2853 if (grid_used > 1)
2854 radeon_emit(cmd_buffer->cs, y);
2855 if (grid_used > 2)
2856 radeon_emit(cmd_buffer->cs, z);
2857 }
2858
2859 radeon_emit(cmd_buffer->cs, PKT3(PKT3_DISPATCH_DIRECT, 3, 0) |
2860 PKT3_SHADER_TYPE_S(1));
2861 radeon_emit(cmd_buffer->cs, x);
2862 radeon_emit(cmd_buffer->cs, y);
2863 radeon_emit(cmd_buffer->cs, z);
2864 radeon_emit(cmd_buffer->cs, 1);
2865
2866 assert(cmd_buffer->cs->cdw <= cdw_max);
2867 radv_cmd_buffer_trace_emit(cmd_buffer);
2868 }
2869
2870 void radv_CmdDispatchIndirect(
2871 VkCommandBuffer commandBuffer,
2872 VkBuffer _buffer,
2873 VkDeviceSize offset)
2874 {
2875 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2876 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
2877 uint64_t va = cmd_buffer->device->ws->buffer_get_va(buffer->bo);
2878 va += buffer->offset + offset;
2879
2880 cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, buffer->bo, 8);
2881
2882 radv_flush_compute_state(cmd_buffer);
2883
2884 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 25);
2885 struct ac_userdata_info *loc = radv_lookup_user_sgpr(cmd_buffer->state.compute_pipeline,
2886 MESA_SHADER_COMPUTE, AC_UD_CS_GRID_SIZE);
2887 if (loc->sgpr_idx != -1) {
2888 uint8_t grid_used = cmd_buffer->state.compute_pipeline->shaders[MESA_SHADER_COMPUTE]->info.info.cs.grid_components_used;
2889 for (unsigned i = 0; i < grid_used; ++i) {
2890 radeon_emit(cmd_buffer->cs, PKT3(PKT3_COPY_DATA, 4, 0));
2891 radeon_emit(cmd_buffer->cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
2892 COPY_DATA_DST_SEL(COPY_DATA_REG));
2893 radeon_emit(cmd_buffer->cs, (va + 4 * i));
2894 radeon_emit(cmd_buffer->cs, (va + 4 * i) >> 32);
2895 radeon_emit(cmd_buffer->cs, ((R_00B900_COMPUTE_USER_DATA_0 + loc->sgpr_idx * 4) >> 2) + i);
2896 radeon_emit(cmd_buffer->cs, 0);
2897 }
2898 }
2899
2900 if (radv_cmd_buffer_uses_mec(cmd_buffer)) {
2901 radeon_emit(cmd_buffer->cs, PKT3(PKT3_DISPATCH_INDIRECT, 2, 0) |
2902 PKT3_SHADER_TYPE_S(1));
2903 radeon_emit(cmd_buffer->cs, va);
2904 radeon_emit(cmd_buffer->cs, va >> 32);
2905 radeon_emit(cmd_buffer->cs, 1);
2906 } else {
2907 radeon_emit(cmd_buffer->cs, PKT3(PKT3_SET_BASE, 2, 0) |
2908 PKT3_SHADER_TYPE_S(1));
2909 radeon_emit(cmd_buffer->cs, 1);
2910 radeon_emit(cmd_buffer->cs, va);
2911 radeon_emit(cmd_buffer->cs, va >> 32);
2912
2913 radeon_emit(cmd_buffer->cs, PKT3(PKT3_DISPATCH_INDIRECT, 1, 0) |
2914 PKT3_SHADER_TYPE_S(1));
2915 radeon_emit(cmd_buffer->cs, 0);
2916 radeon_emit(cmd_buffer->cs, 1);
2917 }
2918
2919 assert(cmd_buffer->cs->cdw <= cdw_max);
2920 radv_cmd_buffer_trace_emit(cmd_buffer);
2921 }
2922
2923 void radv_unaligned_dispatch(
2924 struct radv_cmd_buffer *cmd_buffer,
2925 uint32_t x,
2926 uint32_t y,
2927 uint32_t z)
2928 {
2929 struct radv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
2930 struct radv_shader_variant *compute_shader = pipeline->shaders[MESA_SHADER_COMPUTE];
2931 uint32_t blocks[3], remainder[3];
2932
2933 blocks[0] = round_up_u32(x, compute_shader->info.cs.block_size[0]);
2934 blocks[1] = round_up_u32(y, compute_shader->info.cs.block_size[1]);
2935 blocks[2] = round_up_u32(z, compute_shader->info.cs.block_size[2]);
2936
2937 /* If aligned, these should be an entire block size, not 0 */
2938 remainder[0] = x + compute_shader->info.cs.block_size[0] - align_u32_npot(x, compute_shader->info.cs.block_size[0]);
2939 remainder[1] = y + compute_shader->info.cs.block_size[1] - align_u32_npot(y, compute_shader->info.cs.block_size[1]);
2940 remainder[2] = z + compute_shader->info.cs.block_size[2] - align_u32_npot(z, compute_shader->info.cs.block_size[2]);
2941
2942 radv_flush_compute_state(cmd_buffer);
2943
2944 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 15);
2945
2946 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B81C_COMPUTE_NUM_THREAD_X, 3);
2947 radeon_emit(cmd_buffer->cs,
2948 S_00B81C_NUM_THREAD_FULL(compute_shader->info.cs.block_size[0]) |
2949 S_00B81C_NUM_THREAD_PARTIAL(remainder[0]));
2950 radeon_emit(cmd_buffer->cs,
2951 S_00B81C_NUM_THREAD_FULL(compute_shader->info.cs.block_size[1]) |
2952 S_00B81C_NUM_THREAD_PARTIAL(remainder[1]));
2953 radeon_emit(cmd_buffer->cs,
2954 S_00B81C_NUM_THREAD_FULL(compute_shader->info.cs.block_size[2]) |
2955 S_00B81C_NUM_THREAD_PARTIAL(remainder[2]));
2956
2957 struct ac_userdata_info *loc = radv_lookup_user_sgpr(cmd_buffer->state.compute_pipeline,
2958 MESA_SHADER_COMPUTE, AC_UD_CS_GRID_SIZE);
2959 if (loc->sgpr_idx != -1) {
2960 uint8_t grid_used = cmd_buffer->state.compute_pipeline->shaders[MESA_SHADER_COMPUTE]->info.info.cs.grid_components_used;
2961 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B900_COMPUTE_USER_DATA_0 + loc->sgpr_idx * 4, grid_used);
2962 radeon_emit(cmd_buffer->cs, blocks[0]);
2963 if (grid_used > 1)
2964 radeon_emit(cmd_buffer->cs, blocks[1]);
2965 if (grid_used > 2)
2966 radeon_emit(cmd_buffer->cs, blocks[2]);
2967 }
2968 radeon_emit(cmd_buffer->cs, PKT3(PKT3_DISPATCH_DIRECT, 3, 0) |
2969 PKT3_SHADER_TYPE_S(1));
2970 radeon_emit(cmd_buffer->cs, blocks[0]);
2971 radeon_emit(cmd_buffer->cs, blocks[1]);
2972 radeon_emit(cmd_buffer->cs, blocks[2]);
2973 radeon_emit(cmd_buffer->cs, S_00B800_COMPUTE_SHADER_EN(1) |
2974 S_00B800_PARTIAL_TG_EN(1));
2975
2976 assert(cmd_buffer->cs->cdw <= cdw_max);
2977 radv_cmd_buffer_trace_emit(cmd_buffer);
2978 }
2979
2980 void radv_CmdEndRenderPass(
2981 VkCommandBuffer commandBuffer)
2982 {
2983 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2984
2985 radv_subpass_barrier(cmd_buffer, &cmd_buffer->state.pass->end_barrier);
2986
2987 radv_cmd_buffer_resolve_subpass(cmd_buffer);
2988
2989 for (unsigned i = 0; i < cmd_buffer->state.framebuffer->attachment_count; ++i) {
2990 VkImageLayout layout = cmd_buffer->state.pass->attachments[i].final_layout;
2991 radv_handle_subpass_image_transition(cmd_buffer,
2992 (VkAttachmentReference){i, layout});
2993 }
2994
2995 vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.attachments);
2996
2997 cmd_buffer->state.pass = NULL;
2998 cmd_buffer->state.subpass = NULL;
2999 cmd_buffer->state.attachments = NULL;
3000 cmd_buffer->state.framebuffer = NULL;
3001 }
3002
3003
3004 static void radv_initialize_htile(struct radv_cmd_buffer *cmd_buffer,
3005 struct radv_image *image,
3006 const VkImageSubresourceRange *range)
3007 {
3008 assert(range->baseMipLevel == 0);
3009 assert(range->levelCount == 1 || range->levelCount == VK_REMAINING_ARRAY_LAYERS);
3010 unsigned layer_count = radv_get_layerCount(image, range);
3011 uint64_t size = image->surface.htile_slice_size * layer_count;
3012 uint64_t offset = image->offset + image->htile_offset +
3013 image->surface.htile_slice_size * range->baseArrayLayer;
3014
3015 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB |
3016 RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
3017
3018 radv_fill_buffer(cmd_buffer, image->bo, offset, size, 0xffffffff);
3019
3020 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB_META |
3021 RADV_CMD_FLAG_CS_PARTIAL_FLUSH |
3022 RADV_CMD_FLAG_INV_VMEM_L1 |
3023 RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2;
3024 }
3025
3026 static void radv_handle_depth_image_transition(struct radv_cmd_buffer *cmd_buffer,
3027 struct radv_image *image,
3028 VkImageLayout src_layout,
3029 VkImageLayout dst_layout,
3030 const VkImageSubresourceRange *range,
3031 VkImageAspectFlags pending_clears)
3032 {
3033 if (dst_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL &&
3034 (pending_clears & vk_format_aspects(image->vk_format)) == vk_format_aspects(image->vk_format) &&
3035 cmd_buffer->state.render_area.offset.x == 0 && cmd_buffer->state.render_area.offset.y == 0 &&
3036 cmd_buffer->state.render_area.extent.width == image->info.width &&
3037 cmd_buffer->state.render_area.extent.height == image->info.height) {
3038 /* The clear will initialize htile. */
3039 return;
3040 } else if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED &&
3041 radv_layout_has_htile(image, dst_layout)) {
3042 /* TODO: merge with the clear if applicable */
3043 radv_initialize_htile(cmd_buffer, image, range);
3044 } else if (!radv_layout_has_htile(image, src_layout) &&
3045 radv_layout_has_htile(image, dst_layout)) {
3046 radv_initialize_htile(cmd_buffer, image, range);
3047 } else if ((radv_layout_has_htile(image, src_layout) &&
3048 !radv_layout_has_htile(image, dst_layout)) ||
3049 (radv_layout_is_htile_compressed(image, src_layout) &&
3050 !radv_layout_is_htile_compressed(image, dst_layout))) {
3051 VkImageSubresourceRange local_range = *range;
3052 local_range.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
3053 local_range.baseMipLevel = 0;
3054 local_range.levelCount = 1;
3055
3056 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB |
3057 RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
3058
3059 radv_decompress_depth_image_inplace(cmd_buffer, image, &local_range);
3060
3061 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB |
3062 RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
3063 }
3064 }
3065
3066 void radv_initialise_cmask(struct radv_cmd_buffer *cmd_buffer,
3067 struct radv_image *image, uint32_t value)
3068 {
3069 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
3070 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
3071
3072 radv_fill_buffer(cmd_buffer, image->bo, image->offset + image->cmask.offset,
3073 image->cmask.size, value);
3074
3075 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META |
3076 RADV_CMD_FLAG_CS_PARTIAL_FLUSH |
3077 RADV_CMD_FLAG_INV_VMEM_L1 |
3078 RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2;
3079 }
3080
3081 static void radv_handle_cmask_image_transition(struct radv_cmd_buffer *cmd_buffer,
3082 struct radv_image *image,
3083 VkImageLayout src_layout,
3084 VkImageLayout dst_layout,
3085 unsigned src_queue_mask,
3086 unsigned dst_queue_mask,
3087 const VkImageSubresourceRange *range,
3088 VkImageAspectFlags pending_clears)
3089 {
3090 if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED) {
3091 if (image->fmask.size)
3092 radv_initialise_cmask(cmd_buffer, image, 0xccccccccu);
3093 else
3094 radv_initialise_cmask(cmd_buffer, image, 0xffffffffu);
3095 } else if (radv_layout_can_fast_clear(image, src_layout, src_queue_mask) &&
3096 !radv_layout_can_fast_clear(image, dst_layout, dst_queue_mask)) {
3097 radv_fast_clear_flush_image_inplace(cmd_buffer, image, range);
3098 }
3099 }
3100
3101 void radv_initialize_dcc(struct radv_cmd_buffer *cmd_buffer,
3102 struct radv_image *image, uint32_t value)
3103 {
3104
3105 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
3106 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
3107
3108 radv_fill_buffer(cmd_buffer, image->bo, image->offset + image->dcc_offset,
3109 image->surface.dcc_size, value);
3110
3111 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
3112 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META |
3113 RADV_CMD_FLAG_CS_PARTIAL_FLUSH |
3114 RADV_CMD_FLAG_INV_VMEM_L1 |
3115 RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2;
3116 }
3117
3118 static void radv_handle_dcc_image_transition(struct radv_cmd_buffer *cmd_buffer,
3119 struct radv_image *image,
3120 VkImageLayout src_layout,
3121 VkImageLayout dst_layout,
3122 unsigned src_queue_mask,
3123 unsigned dst_queue_mask,
3124 const VkImageSubresourceRange *range,
3125 VkImageAspectFlags pending_clears)
3126 {
3127 if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED) {
3128 radv_initialize_dcc(cmd_buffer, image, 0x20202020u);
3129 } else if (radv_layout_can_fast_clear(image, src_layout, src_queue_mask) &&
3130 !radv_layout_can_fast_clear(image, dst_layout, dst_queue_mask)) {
3131 radv_fast_clear_flush_image_inplace(cmd_buffer, image, range);
3132 }
3133 }
3134
3135 static void radv_handle_image_transition(struct radv_cmd_buffer *cmd_buffer,
3136 struct radv_image *image,
3137 VkImageLayout src_layout,
3138 VkImageLayout dst_layout,
3139 uint32_t src_family,
3140 uint32_t dst_family,
3141 const VkImageSubresourceRange *range,
3142 VkImageAspectFlags pending_clears)
3143 {
3144 if (image->exclusive && src_family != dst_family) {
3145 /* This is an acquire or a release operation and there will be
3146 * a corresponding release/acquire. Do the transition in the
3147 * most flexible queue. */
3148
3149 assert(src_family == cmd_buffer->queue_family_index ||
3150 dst_family == cmd_buffer->queue_family_index);
3151
3152 if (cmd_buffer->queue_family_index == RADV_QUEUE_TRANSFER)
3153 return;
3154
3155 if (cmd_buffer->queue_family_index == RADV_QUEUE_COMPUTE &&
3156 (src_family == RADV_QUEUE_GENERAL ||
3157 dst_family == RADV_QUEUE_GENERAL))
3158 return;
3159 }
3160
3161 unsigned src_queue_mask = radv_image_queue_family_mask(image, src_family, cmd_buffer->queue_family_index);
3162 unsigned dst_queue_mask = radv_image_queue_family_mask(image, dst_family, cmd_buffer->queue_family_index);
3163
3164 if (image->surface.htile_size)
3165 radv_handle_depth_image_transition(cmd_buffer, image, src_layout,
3166 dst_layout, range, pending_clears);
3167
3168 if (image->cmask.size)
3169 radv_handle_cmask_image_transition(cmd_buffer, image, src_layout,
3170 dst_layout, src_queue_mask,
3171 dst_queue_mask, range,
3172 pending_clears);
3173
3174 if (image->surface.dcc_size)
3175 radv_handle_dcc_image_transition(cmd_buffer, image, src_layout,
3176 dst_layout, src_queue_mask,
3177 dst_queue_mask, range,
3178 pending_clears);
3179 }
3180
3181 void radv_CmdPipelineBarrier(
3182 VkCommandBuffer commandBuffer,
3183 VkPipelineStageFlags srcStageMask,
3184 VkPipelineStageFlags destStageMask,
3185 VkBool32 byRegion,
3186 uint32_t memoryBarrierCount,
3187 const VkMemoryBarrier* pMemoryBarriers,
3188 uint32_t bufferMemoryBarrierCount,
3189 const VkBufferMemoryBarrier* pBufferMemoryBarriers,
3190 uint32_t imageMemoryBarrierCount,
3191 const VkImageMemoryBarrier* pImageMemoryBarriers)
3192 {
3193 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3194 enum radv_cmd_flush_bits src_flush_bits = 0;
3195 enum radv_cmd_flush_bits dst_flush_bits = 0;
3196
3197 for (uint32_t i = 0; i < memoryBarrierCount; i++) {
3198 src_flush_bits |= radv_src_access_flush(cmd_buffer, pMemoryBarriers[i].srcAccessMask);
3199 dst_flush_bits |= radv_dst_access_flush(cmd_buffer, pMemoryBarriers[i].dstAccessMask,
3200 NULL);
3201 }
3202
3203 for (uint32_t i = 0; i < bufferMemoryBarrierCount; i++) {
3204 src_flush_bits |= radv_src_access_flush(cmd_buffer, pBufferMemoryBarriers[i].srcAccessMask);
3205 dst_flush_bits |= radv_dst_access_flush(cmd_buffer, pBufferMemoryBarriers[i].dstAccessMask,
3206 NULL);
3207 }
3208
3209 for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
3210 RADV_FROM_HANDLE(radv_image, image, pImageMemoryBarriers[i].image);
3211 src_flush_bits |= radv_src_access_flush(cmd_buffer, pImageMemoryBarriers[i].srcAccessMask);
3212 dst_flush_bits |= radv_dst_access_flush(cmd_buffer, pImageMemoryBarriers[i].dstAccessMask,
3213 image);
3214 }
3215
3216 radv_stage_flush(cmd_buffer, srcStageMask);
3217 cmd_buffer->state.flush_bits |= src_flush_bits;
3218
3219 for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
3220 RADV_FROM_HANDLE(radv_image, image, pImageMemoryBarriers[i].image);
3221 radv_handle_image_transition(cmd_buffer, image,
3222 pImageMemoryBarriers[i].oldLayout,
3223 pImageMemoryBarriers[i].newLayout,
3224 pImageMemoryBarriers[i].srcQueueFamilyIndex,
3225 pImageMemoryBarriers[i].dstQueueFamilyIndex,
3226 &pImageMemoryBarriers[i].subresourceRange,
3227 0);
3228 }
3229
3230 cmd_buffer->state.flush_bits |= dst_flush_bits;
3231 }
3232
3233
3234 static void write_event(struct radv_cmd_buffer *cmd_buffer,
3235 struct radv_event *event,
3236 VkPipelineStageFlags stageMask,
3237 unsigned value)
3238 {
3239 struct radeon_winsys_cs *cs = cmd_buffer->cs;
3240 uint64_t va = cmd_buffer->device->ws->buffer_get_va(event->bo);
3241
3242 cmd_buffer->device->ws->cs_add_buffer(cs, event->bo, 8);
3243
3244 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 12);
3245
3246 /* TODO: this is overkill. Probably should figure something out from
3247 * the stage mask. */
3248
3249 if (cmd_buffer->device->physical_device->rad_info.chip_class == CIK) {
3250 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0));
3251 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_BOTTOM_OF_PIPE_TS) |
3252 EVENT_INDEX(5));
3253 radeon_emit(cs, va);
3254 radeon_emit(cs, (va >> 32) | EOP_DATA_SEL(1));
3255 radeon_emit(cs, 2);
3256 radeon_emit(cs, 0);
3257 }
3258
3259 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0));
3260 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_BOTTOM_OF_PIPE_TS) |
3261 EVENT_INDEX(5));
3262 radeon_emit(cs, va);
3263 radeon_emit(cs, (va >> 32) | EOP_DATA_SEL(1));
3264 radeon_emit(cs, value);
3265 radeon_emit(cs, 0);
3266
3267 assert(cmd_buffer->cs->cdw <= cdw_max);
3268 }
3269
3270 void radv_CmdSetEvent(VkCommandBuffer commandBuffer,
3271 VkEvent _event,
3272 VkPipelineStageFlags stageMask)
3273 {
3274 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3275 RADV_FROM_HANDLE(radv_event, event, _event);
3276
3277 write_event(cmd_buffer, event, stageMask, 1);
3278 }
3279
3280 void radv_CmdResetEvent(VkCommandBuffer commandBuffer,
3281 VkEvent _event,
3282 VkPipelineStageFlags stageMask)
3283 {
3284 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3285 RADV_FROM_HANDLE(radv_event, event, _event);
3286
3287 write_event(cmd_buffer, event, stageMask, 0);
3288 }
3289
3290 void radv_CmdWaitEvents(VkCommandBuffer commandBuffer,
3291 uint32_t eventCount,
3292 const VkEvent* pEvents,
3293 VkPipelineStageFlags srcStageMask,
3294 VkPipelineStageFlags dstStageMask,
3295 uint32_t memoryBarrierCount,
3296 const VkMemoryBarrier* pMemoryBarriers,
3297 uint32_t bufferMemoryBarrierCount,
3298 const VkBufferMemoryBarrier* pBufferMemoryBarriers,
3299 uint32_t imageMemoryBarrierCount,
3300 const VkImageMemoryBarrier* pImageMemoryBarriers)
3301 {
3302 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3303 struct radeon_winsys_cs *cs = cmd_buffer->cs;
3304
3305 for (unsigned i = 0; i < eventCount; ++i) {
3306 RADV_FROM_HANDLE(radv_event, event, pEvents[i]);
3307 uint64_t va = cmd_buffer->device->ws->buffer_get_va(event->bo);
3308
3309 cmd_buffer->device->ws->cs_add_buffer(cs, event->bo, 8);
3310
3311 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 7);
3312
3313 radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, 0));
3314 radeon_emit(cs, WAIT_REG_MEM_EQUAL | WAIT_REG_MEM_MEM_SPACE(1));
3315 radeon_emit(cs, va);
3316 radeon_emit(cs, va >> 32);
3317 radeon_emit(cs, 1); /* reference value */
3318 radeon_emit(cs, 0xffffffff); /* mask */
3319 radeon_emit(cs, 4); /* poll interval */
3320
3321 assert(cmd_buffer->cs->cdw <= cdw_max);
3322 }
3323
3324
3325 for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
3326 RADV_FROM_HANDLE(radv_image, image, pImageMemoryBarriers[i].image);
3327
3328 radv_handle_image_transition(cmd_buffer, image,
3329 pImageMemoryBarriers[i].oldLayout,
3330 pImageMemoryBarriers[i].newLayout,
3331 pImageMemoryBarriers[i].srcQueueFamilyIndex,
3332 pImageMemoryBarriers[i].dstQueueFamilyIndex,
3333 &pImageMemoryBarriers[i].subresourceRange,
3334 0);
3335 }
3336
3337 /* TODO: figure out how to do memory barriers without waiting */
3338 cmd_buffer->state.flush_bits |= RADV_CMD_FLUSH_AND_INV_FRAMEBUFFER |
3339 RADV_CMD_FLAG_INV_GLOBAL_L2 |
3340 RADV_CMD_FLAG_INV_VMEM_L1 |
3341 RADV_CMD_FLAG_INV_SMEM_L1;
3342 }