vulkan/wsi/x11: add support to detect if we can support rendering (v3)
[mesa.git] / src / amd / vulkan / radv_cmd_buffer.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 */
27
28 #include "radv_private.h"
29 #include "radv_radeon_winsys.h"
30 #include "radv_cs.h"
31 #include "sid.h"
32 #include "vk_format.h"
33 #include "radv_meta.h"
34
35 #include "ac_debug.h"
36
37 static void radv_handle_image_transition(struct radv_cmd_buffer *cmd_buffer,
38 struct radv_image *image,
39 VkImageLayout src_layout,
40 VkImageLayout dst_layout,
41 uint32_t src_family,
42 uint32_t dst_family,
43 const VkImageSubresourceRange *range,
44 VkImageAspectFlags pending_clears);
45
46 const struct radv_dynamic_state default_dynamic_state = {
47 .viewport = {
48 .count = 0,
49 },
50 .scissor = {
51 .count = 0,
52 },
53 .line_width = 1.0f,
54 .depth_bias = {
55 .bias = 0.0f,
56 .clamp = 0.0f,
57 .slope = 0.0f,
58 },
59 .blend_constants = { 0.0f, 0.0f, 0.0f, 0.0f },
60 .depth_bounds = {
61 .min = 0.0f,
62 .max = 1.0f,
63 },
64 .stencil_compare_mask = {
65 .front = ~0u,
66 .back = ~0u,
67 },
68 .stencil_write_mask = {
69 .front = ~0u,
70 .back = ~0u,
71 },
72 .stencil_reference = {
73 .front = 0u,
74 .back = 0u,
75 },
76 };
77
78 void
79 radv_dynamic_state_copy(struct radv_dynamic_state *dest,
80 const struct radv_dynamic_state *src,
81 uint32_t copy_mask)
82 {
83 if (copy_mask & (1 << VK_DYNAMIC_STATE_VIEWPORT)) {
84 dest->viewport.count = src->viewport.count;
85 typed_memcpy(dest->viewport.viewports, src->viewport.viewports,
86 src->viewport.count);
87 }
88
89 if (copy_mask & (1 << VK_DYNAMIC_STATE_SCISSOR)) {
90 dest->scissor.count = src->scissor.count;
91 typed_memcpy(dest->scissor.scissors, src->scissor.scissors,
92 src->scissor.count);
93 }
94
95 if (copy_mask & (1 << VK_DYNAMIC_STATE_LINE_WIDTH))
96 dest->line_width = src->line_width;
97
98 if (copy_mask & (1 << VK_DYNAMIC_STATE_DEPTH_BIAS))
99 dest->depth_bias = src->depth_bias;
100
101 if (copy_mask & (1 << VK_DYNAMIC_STATE_BLEND_CONSTANTS))
102 typed_memcpy(dest->blend_constants, src->blend_constants, 4);
103
104 if (copy_mask & (1 << VK_DYNAMIC_STATE_DEPTH_BOUNDS))
105 dest->depth_bounds = src->depth_bounds;
106
107 if (copy_mask & (1 << VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK))
108 dest->stencil_compare_mask = src->stencil_compare_mask;
109
110 if (copy_mask & (1 << VK_DYNAMIC_STATE_STENCIL_WRITE_MASK))
111 dest->stencil_write_mask = src->stencil_write_mask;
112
113 if (copy_mask & (1 << VK_DYNAMIC_STATE_STENCIL_REFERENCE))
114 dest->stencil_reference = src->stencil_reference;
115 }
116
117 bool radv_cmd_buffer_uses_mec(struct radv_cmd_buffer *cmd_buffer)
118 {
119 return cmd_buffer->queue_family_index == RADV_QUEUE_COMPUTE &&
120 cmd_buffer->device->physical_device->rad_info.chip_class >= CIK;
121 }
122
123 enum ring_type radv_queue_family_to_ring(int f) {
124 switch (f) {
125 case RADV_QUEUE_GENERAL:
126 return RING_GFX;
127 case RADV_QUEUE_COMPUTE:
128 return RING_COMPUTE;
129 case RADV_QUEUE_TRANSFER:
130 return RING_DMA;
131 default:
132 unreachable("Unknown queue family");
133 }
134 }
135
136 static VkResult radv_create_cmd_buffer(
137 struct radv_device * device,
138 struct radv_cmd_pool * pool,
139 VkCommandBufferLevel level,
140 VkCommandBuffer* pCommandBuffer)
141 {
142 struct radv_cmd_buffer *cmd_buffer;
143 VkResult result;
144 unsigned ring;
145 cmd_buffer = vk_alloc(&pool->alloc, sizeof(*cmd_buffer), 8,
146 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
147 if (cmd_buffer == NULL)
148 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
149
150 memset(cmd_buffer, 0, sizeof(*cmd_buffer));
151 cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
152 cmd_buffer->device = device;
153 cmd_buffer->pool = pool;
154 cmd_buffer->level = level;
155
156 if (pool) {
157 list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
158 cmd_buffer->queue_family_index = pool->queue_family_index;
159
160 } else {
161 /* Init the pool_link so we can safefly call list_del when we destroy
162 * the command buffer
163 */
164 list_inithead(&cmd_buffer->pool_link);
165 cmd_buffer->queue_family_index = RADV_QUEUE_GENERAL;
166 }
167
168 ring = radv_queue_family_to_ring(cmd_buffer->queue_family_index);
169
170 cmd_buffer->cs = device->ws->cs_create(device->ws, ring);
171 if (!cmd_buffer->cs) {
172 result = VK_ERROR_OUT_OF_HOST_MEMORY;
173 goto fail;
174 }
175
176 *pCommandBuffer = radv_cmd_buffer_to_handle(cmd_buffer);
177
178 cmd_buffer->upload.offset = 0;
179 cmd_buffer->upload.size = 0;
180 list_inithead(&cmd_buffer->upload.list);
181
182 return VK_SUCCESS;
183
184 fail:
185 vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
186
187 return result;
188 }
189
190 static bool
191 radv_cmd_buffer_resize_upload_buf(struct radv_cmd_buffer *cmd_buffer,
192 uint64_t min_needed)
193 {
194 uint64_t new_size;
195 struct radeon_winsys_bo *bo;
196 struct radv_cmd_buffer_upload *upload;
197 struct radv_device *device = cmd_buffer->device;
198
199 new_size = MAX2(min_needed, 16 * 1024);
200 new_size = MAX2(new_size, 2 * cmd_buffer->upload.size);
201
202 bo = device->ws->buffer_create(device->ws,
203 new_size, 4096,
204 RADEON_DOMAIN_GTT,
205 RADEON_FLAG_CPU_ACCESS);
206
207 if (!bo) {
208 cmd_buffer->record_fail = true;
209 return false;
210 }
211
212 device->ws->cs_add_buffer(cmd_buffer->cs, bo, 8);
213 if (cmd_buffer->upload.upload_bo) {
214 upload = malloc(sizeof(*upload));
215
216 if (!upload) {
217 cmd_buffer->record_fail = true;
218 device->ws->buffer_destroy(bo);
219 return false;
220 }
221
222 memcpy(upload, &cmd_buffer->upload, sizeof(*upload));
223 list_add(&upload->list, &cmd_buffer->upload.list);
224 }
225
226 cmd_buffer->upload.upload_bo = bo;
227 cmd_buffer->upload.size = new_size;
228 cmd_buffer->upload.offset = 0;
229 cmd_buffer->upload.map = device->ws->buffer_map(cmd_buffer->upload.upload_bo);
230
231 if (!cmd_buffer->upload.map) {
232 cmd_buffer->record_fail = true;
233 return false;
234 }
235
236 return true;
237 }
238
239 bool
240 radv_cmd_buffer_upload_alloc(struct radv_cmd_buffer *cmd_buffer,
241 unsigned size,
242 unsigned alignment,
243 unsigned *out_offset,
244 void **ptr)
245 {
246 uint64_t offset = align(cmd_buffer->upload.offset, alignment);
247 if (offset + size > cmd_buffer->upload.size) {
248 if (!radv_cmd_buffer_resize_upload_buf(cmd_buffer, size))
249 return false;
250 offset = 0;
251 }
252
253 *out_offset = offset;
254 *ptr = cmd_buffer->upload.map + offset;
255
256 cmd_buffer->upload.offset = offset + size;
257 return true;
258 }
259
260 bool
261 radv_cmd_buffer_upload_data(struct radv_cmd_buffer *cmd_buffer,
262 unsigned size, unsigned alignment,
263 const void *data, unsigned *out_offset)
264 {
265 uint8_t *ptr;
266
267 if (!radv_cmd_buffer_upload_alloc(cmd_buffer, size, alignment,
268 out_offset, (void **)&ptr))
269 return false;
270
271 if (ptr)
272 memcpy(ptr, data, size);
273
274 return true;
275 }
276
277 void radv_cmd_buffer_trace_emit(struct radv_cmd_buffer *cmd_buffer)
278 {
279 struct radv_device *device = cmd_buffer->device;
280 struct radeon_winsys_cs *cs = cmd_buffer->cs;
281 uint64_t va;
282
283 if (!device->trace_bo)
284 return;
285
286 va = device->ws->buffer_get_va(device->trace_bo);
287
288 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 7);
289
290 ++cmd_buffer->state.trace_id;
291 device->ws->cs_add_buffer(cs, device->trace_bo, 8);
292 radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, 0));
293 radeon_emit(cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
294 S_370_WR_CONFIRM(1) |
295 S_370_ENGINE_SEL(V_370_ME));
296 radeon_emit(cs, va);
297 radeon_emit(cs, va >> 32);
298 radeon_emit(cs, cmd_buffer->state.trace_id);
299 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
300 radeon_emit(cs, AC_ENCODE_TRACE_POINT(cmd_buffer->state.trace_id));
301 }
302
303 static void
304 radv_emit_graphics_blend_state(struct radv_cmd_buffer *cmd_buffer,
305 struct radv_pipeline *pipeline)
306 {
307 radeon_set_context_reg_seq(cmd_buffer->cs, R_028780_CB_BLEND0_CONTROL, 8);
308 radeon_emit_array(cmd_buffer->cs, pipeline->graphics.blend.cb_blend_control,
309 8);
310 radeon_set_context_reg(cmd_buffer->cs, R_028808_CB_COLOR_CONTROL, pipeline->graphics.blend.cb_color_control);
311 radeon_set_context_reg(cmd_buffer->cs, R_028B70_DB_ALPHA_TO_MASK, pipeline->graphics.blend.db_alpha_to_mask);
312 }
313
314 static void
315 radv_emit_graphics_depth_stencil_state(struct radv_cmd_buffer *cmd_buffer,
316 struct radv_pipeline *pipeline)
317 {
318 struct radv_depth_stencil_state *ds = &pipeline->graphics.ds;
319 radeon_set_context_reg(cmd_buffer->cs, R_028800_DB_DEPTH_CONTROL, ds->db_depth_control);
320 radeon_set_context_reg(cmd_buffer->cs, R_02842C_DB_STENCIL_CONTROL, ds->db_stencil_control);
321
322 radeon_set_context_reg(cmd_buffer->cs, R_028000_DB_RENDER_CONTROL, ds->db_render_control);
323 radeon_set_context_reg(cmd_buffer->cs, R_028010_DB_RENDER_OVERRIDE2, ds->db_render_override2);
324 }
325
326 /* 12.4 fixed-point */
327 static unsigned radv_pack_float_12p4(float x)
328 {
329 return x <= 0 ? 0 :
330 x >= 4096 ? 0xffff : x * 16;
331 }
332
333 static uint32_t
334 shader_stage_to_user_data_0(gl_shader_stage stage, bool has_gs)
335 {
336 switch (stage) {
337 case MESA_SHADER_FRAGMENT:
338 return R_00B030_SPI_SHADER_USER_DATA_PS_0;
339 case MESA_SHADER_VERTEX:
340 return has_gs ? R_00B330_SPI_SHADER_USER_DATA_ES_0 : R_00B130_SPI_SHADER_USER_DATA_VS_0;
341 case MESA_SHADER_GEOMETRY:
342 return R_00B230_SPI_SHADER_USER_DATA_GS_0;
343 case MESA_SHADER_COMPUTE:
344 return R_00B900_COMPUTE_USER_DATA_0;
345 default:
346 unreachable("unknown shader");
347 }
348 }
349
350 static struct ac_userdata_info *
351 radv_lookup_user_sgpr(struct radv_pipeline *pipeline,
352 gl_shader_stage stage,
353 int idx)
354 {
355 return &pipeline->shaders[stage]->info.user_sgprs_locs.shader_data[idx];
356 }
357
358 static void
359 radv_emit_userdata_address(struct radv_cmd_buffer *cmd_buffer,
360 struct radv_pipeline *pipeline,
361 gl_shader_stage stage,
362 int idx, uint64_t va)
363 {
364 struct ac_userdata_info *loc = radv_lookup_user_sgpr(pipeline, stage, idx);
365 uint32_t base_reg = shader_stage_to_user_data_0(stage, radv_pipeline_has_gs(pipeline));
366 if (loc->sgpr_idx == -1)
367 return;
368 assert(loc->num_sgprs == 2);
369 assert(!loc->indirect);
370 radeon_set_sh_reg_seq(cmd_buffer->cs, base_reg + loc->sgpr_idx * 4, 2);
371 radeon_emit(cmd_buffer->cs, va);
372 radeon_emit(cmd_buffer->cs, va >> 32);
373 }
374
375 static void
376 radv_update_multisample_state(struct radv_cmd_buffer *cmd_buffer,
377 struct radv_pipeline *pipeline)
378 {
379 int num_samples = pipeline->graphics.ms.num_samples;
380 struct radv_multisample_state *ms = &pipeline->graphics.ms;
381 struct radv_pipeline *old_pipeline = cmd_buffer->state.emitted_pipeline;
382
383 radeon_set_context_reg_seq(cmd_buffer->cs, R_028C38_PA_SC_AA_MASK_X0Y0_X1Y0, 2);
384 radeon_emit(cmd_buffer->cs, ms->pa_sc_aa_mask[0]);
385 radeon_emit(cmd_buffer->cs, ms->pa_sc_aa_mask[1]);
386
387 radeon_set_context_reg(cmd_buffer->cs, CM_R_028804_DB_EQAA, ms->db_eqaa);
388 radeon_set_context_reg(cmd_buffer->cs, EG_R_028A4C_PA_SC_MODE_CNTL_1, ms->pa_sc_mode_cntl_1);
389
390 if (old_pipeline && num_samples == old_pipeline->graphics.ms.num_samples)
391 return;
392
393 radeon_set_context_reg_seq(cmd_buffer->cs, CM_R_028BDC_PA_SC_LINE_CNTL, 2);
394 radeon_emit(cmd_buffer->cs, ms->pa_sc_line_cntl);
395 radeon_emit(cmd_buffer->cs, ms->pa_sc_aa_config);
396
397 radv_cayman_emit_msaa_sample_locs(cmd_buffer->cs, num_samples);
398
399 uint32_t samples_offset;
400 void *samples_ptr;
401 void *src;
402 radv_cmd_buffer_upload_alloc(cmd_buffer, num_samples * 4 * 2, 256, &samples_offset,
403 &samples_ptr);
404 switch (num_samples) {
405 case 1:
406 src = cmd_buffer->device->sample_locations_1x;
407 break;
408 case 2:
409 src = cmd_buffer->device->sample_locations_2x;
410 break;
411 case 4:
412 src = cmd_buffer->device->sample_locations_4x;
413 break;
414 case 8:
415 src = cmd_buffer->device->sample_locations_8x;
416 break;
417 case 16:
418 src = cmd_buffer->device->sample_locations_16x;
419 break;
420 default:
421 unreachable("unknown number of samples");
422 }
423 memcpy(samples_ptr, src, num_samples * 4 * 2);
424
425 uint64_t va = cmd_buffer->device->ws->buffer_get_va(cmd_buffer->upload.upload_bo);
426 va += samples_offset;
427
428 radv_emit_userdata_address(cmd_buffer, pipeline, MESA_SHADER_FRAGMENT,
429 AC_UD_PS_SAMPLE_POS, va);
430 }
431
432 static void
433 radv_emit_graphics_raster_state(struct radv_cmd_buffer *cmd_buffer,
434 struct radv_pipeline *pipeline)
435 {
436 struct radv_raster_state *raster = &pipeline->graphics.raster;
437
438 radeon_set_context_reg(cmd_buffer->cs, R_028810_PA_CL_CLIP_CNTL,
439 raster->pa_cl_clip_cntl);
440
441 radeon_set_context_reg(cmd_buffer->cs, R_0286D4_SPI_INTERP_CONTROL_0,
442 raster->spi_interp_control);
443
444 radeon_set_context_reg_seq(cmd_buffer->cs, R_028A00_PA_SU_POINT_SIZE, 2);
445 unsigned tmp = (unsigned)(1.0 * 8.0);
446 radeon_emit(cmd_buffer->cs, S_028A00_HEIGHT(tmp) | S_028A00_WIDTH(tmp));
447 radeon_emit(cmd_buffer->cs, S_028A04_MIN_SIZE(radv_pack_float_12p4(0)) |
448 S_028A04_MAX_SIZE(radv_pack_float_12p4(8192/2))); /* R_028A04_PA_SU_POINT_MINMAX */
449
450 radeon_set_context_reg(cmd_buffer->cs, R_028BE4_PA_SU_VTX_CNTL,
451 raster->pa_su_vtx_cntl);
452
453 radeon_set_context_reg(cmd_buffer->cs, R_028814_PA_SU_SC_MODE_CNTL,
454 raster->pa_su_sc_mode_cntl);
455 }
456
457 static void
458 radv_emit_hw_vs(struct radv_cmd_buffer *cmd_buffer,
459 struct radv_pipeline *pipeline,
460 struct radv_shader_variant *shader)
461 {
462 struct radeon_winsys *ws = cmd_buffer->device->ws;
463 uint64_t va = ws->buffer_get_va(shader->bo);
464 unsigned export_count;
465
466 ws->cs_add_buffer(cmd_buffer->cs, shader->bo, 8);
467
468 export_count = MAX2(1, shader->info.vs.param_exports);
469 radeon_set_context_reg(cmd_buffer->cs, R_0286C4_SPI_VS_OUT_CONFIG,
470 S_0286C4_VS_EXPORT_COUNT(export_count - 1));
471
472 radeon_set_context_reg(cmd_buffer->cs, R_02870C_SPI_SHADER_POS_FORMAT,
473 S_02870C_POS0_EXPORT_FORMAT(V_02870C_SPI_SHADER_4COMP) |
474 S_02870C_POS1_EXPORT_FORMAT(shader->info.vs.pos_exports > 1 ?
475 V_02870C_SPI_SHADER_4COMP :
476 V_02870C_SPI_SHADER_NONE) |
477 S_02870C_POS2_EXPORT_FORMAT(shader->info.vs.pos_exports > 2 ?
478 V_02870C_SPI_SHADER_4COMP :
479 V_02870C_SPI_SHADER_NONE) |
480 S_02870C_POS3_EXPORT_FORMAT(shader->info.vs.pos_exports > 3 ?
481 V_02870C_SPI_SHADER_4COMP :
482 V_02870C_SPI_SHADER_NONE));
483
484
485 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B120_SPI_SHADER_PGM_LO_VS, 4);
486 radeon_emit(cmd_buffer->cs, va >> 8);
487 radeon_emit(cmd_buffer->cs, va >> 40);
488 radeon_emit(cmd_buffer->cs, shader->rsrc1);
489 radeon_emit(cmd_buffer->cs, shader->rsrc2);
490
491 radeon_set_context_reg(cmd_buffer->cs, R_028818_PA_CL_VTE_CNTL,
492 S_028818_VTX_W0_FMT(1) |
493 S_028818_VPORT_X_SCALE_ENA(1) | S_028818_VPORT_X_OFFSET_ENA(1) |
494 S_028818_VPORT_Y_SCALE_ENA(1) | S_028818_VPORT_Y_OFFSET_ENA(1) |
495 S_028818_VPORT_Z_SCALE_ENA(1) | S_028818_VPORT_Z_OFFSET_ENA(1));
496
497 unsigned clip_dist_mask, cull_dist_mask, total_mask;
498 clip_dist_mask = shader->info.vs.clip_dist_mask;
499 cull_dist_mask = shader->info.vs.cull_dist_mask;
500 total_mask = clip_dist_mask | cull_dist_mask;
501
502 radeon_set_context_reg(cmd_buffer->cs, R_02881C_PA_CL_VS_OUT_CNTL,
503 S_02881C_USE_VTX_POINT_SIZE(shader->info.vs.writes_pointsize) |
504 S_02881C_USE_VTX_RENDER_TARGET_INDX(shader->info.vs.writes_layer) |
505 S_02881C_USE_VTX_VIEWPORT_INDX(shader->info.vs.writes_viewport_index) |
506 S_02881C_VS_OUT_MISC_VEC_ENA(shader->info.vs.writes_pointsize ||
507 shader->info.vs.writes_layer ||
508 shader->info.vs.writes_viewport_index) |
509 S_02881C_VS_OUT_CCDIST0_VEC_ENA((total_mask & 0x0f) != 0) |
510 S_02881C_VS_OUT_CCDIST1_VEC_ENA((total_mask & 0xf0) != 0) |
511 pipeline->graphics.raster.pa_cl_vs_out_cntl |
512 cull_dist_mask << 8 |
513 clip_dist_mask);
514
515 radeon_set_context_reg(cmd_buffer->cs, R_028AB4_VGT_REUSE_OFF,
516 S_028AB4_REUSE_OFF(shader->info.vs.writes_viewport_index));
517 }
518
519 static void
520 radv_emit_hw_es(struct radv_cmd_buffer *cmd_buffer,
521 struct radv_shader_variant *shader)
522 {
523 struct radeon_winsys *ws = cmd_buffer->device->ws;
524 uint64_t va = ws->buffer_get_va(shader->bo);
525
526 ws->cs_add_buffer(cmd_buffer->cs, shader->bo, 8);
527
528 radeon_set_context_reg(cmd_buffer->cs, R_028AAC_VGT_ESGS_RING_ITEMSIZE,
529 shader->info.vs.esgs_itemsize / 4);
530 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B320_SPI_SHADER_PGM_LO_ES, 4);
531 radeon_emit(cmd_buffer->cs, va >> 8);
532 radeon_emit(cmd_buffer->cs, va >> 40);
533 radeon_emit(cmd_buffer->cs, shader->rsrc1);
534 radeon_emit(cmd_buffer->cs, shader->rsrc2);
535 }
536
537 static void
538 radv_emit_vertex_shader(struct radv_cmd_buffer *cmd_buffer,
539 struct radv_pipeline *pipeline)
540 {
541 struct radv_shader_variant *vs;
542
543 assert (pipeline->shaders[MESA_SHADER_VERTEX]);
544
545 vs = pipeline->shaders[MESA_SHADER_VERTEX];
546
547 if (vs->info.vs.as_es)
548 radv_emit_hw_es(cmd_buffer, vs);
549 else
550 radv_emit_hw_vs(cmd_buffer, pipeline, vs);
551
552 radeon_set_context_reg(cmd_buffer->cs, R_028A84_VGT_PRIMITIVEID_EN, 0);
553 }
554
555 static uint32_t si_vgt_gs_mode(struct radv_shader_variant *gs)
556 {
557 unsigned gs_max_vert_out = gs->info.gs.vertices_out;
558 unsigned cut_mode;
559
560 if (gs_max_vert_out <= 128) {
561 cut_mode = V_028A40_GS_CUT_128;
562 } else if (gs_max_vert_out <= 256) {
563 cut_mode = V_028A40_GS_CUT_256;
564 } else if (gs_max_vert_out <= 512) {
565 cut_mode = V_028A40_GS_CUT_512;
566 } else {
567 assert(gs_max_vert_out <= 1024);
568 cut_mode = V_028A40_GS_CUT_1024;
569 }
570
571 return S_028A40_MODE(V_028A40_GS_SCENARIO_G) |
572 S_028A40_CUT_MODE(cut_mode)|
573 S_028A40_ES_WRITE_OPTIMIZE(1) |
574 S_028A40_GS_WRITE_OPTIMIZE(1);
575 }
576
577 static void
578 radv_emit_geometry_shader(struct radv_cmd_buffer *cmd_buffer,
579 struct radv_pipeline *pipeline)
580 {
581 struct radeon_winsys *ws = cmd_buffer->device->ws;
582 struct radv_shader_variant *gs;
583 uint64_t va;
584
585 gs = pipeline->shaders[MESA_SHADER_GEOMETRY];
586 if (!gs) {
587 radeon_set_context_reg(cmd_buffer->cs, R_028A40_VGT_GS_MODE, 0);
588 return;
589 }
590
591 radeon_set_context_reg(cmd_buffer->cs, R_028A40_VGT_GS_MODE, si_vgt_gs_mode(gs));
592
593 uint32_t gsvs_itemsize = gs->info.gs.max_gsvs_emit_size >> 2;
594
595 radeon_set_context_reg_seq(cmd_buffer->cs, R_028A60_VGT_GSVS_RING_OFFSET_1, 3);
596 radeon_emit(cmd_buffer->cs, gsvs_itemsize);
597 radeon_emit(cmd_buffer->cs, gsvs_itemsize);
598 radeon_emit(cmd_buffer->cs, gsvs_itemsize);
599
600 radeon_set_context_reg(cmd_buffer->cs, R_028AB0_VGT_GSVS_RING_ITEMSIZE, gsvs_itemsize);
601
602 radeon_set_context_reg(cmd_buffer->cs, R_028B38_VGT_GS_MAX_VERT_OUT, gs->info.gs.vertices_out);
603
604 uint32_t gs_vert_itemsize = gs->info.gs.gsvs_vertex_size;
605 radeon_set_context_reg_seq(cmd_buffer->cs, R_028B5C_VGT_GS_VERT_ITEMSIZE, 4);
606 radeon_emit(cmd_buffer->cs, gs_vert_itemsize >> 2);
607 radeon_emit(cmd_buffer->cs, 0);
608 radeon_emit(cmd_buffer->cs, 0);
609 radeon_emit(cmd_buffer->cs, 0);
610
611 uint32_t gs_num_invocations = gs->info.gs.invocations;
612 radeon_set_context_reg(cmd_buffer->cs, R_028B90_VGT_GS_INSTANCE_CNT,
613 S_028B90_CNT(MIN2(gs_num_invocations, 127)) |
614 S_028B90_ENABLE(gs_num_invocations > 0));
615
616 va = ws->buffer_get_va(gs->bo);
617 ws->cs_add_buffer(cmd_buffer->cs, gs->bo, 8);
618 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B220_SPI_SHADER_PGM_LO_GS, 4);
619 radeon_emit(cmd_buffer->cs, va >> 8);
620 radeon_emit(cmd_buffer->cs, va >> 40);
621 radeon_emit(cmd_buffer->cs, gs->rsrc1);
622 radeon_emit(cmd_buffer->cs, gs->rsrc2);
623
624 radv_emit_hw_vs(cmd_buffer, pipeline, pipeline->gs_copy_shader);
625
626 struct ac_userdata_info *loc = radv_lookup_user_sgpr(cmd_buffer->state.pipeline, MESA_SHADER_GEOMETRY,
627 AC_UD_GS_VS_RING_STRIDE_ENTRIES);
628 if (loc->sgpr_idx != -1) {
629 uint32_t stride = gs->info.gs.max_gsvs_emit_size;
630 uint32_t num_entries = 64;
631 bool is_vi = cmd_buffer->device->physical_device->rad_info.chip_class >= VI;
632
633 if (is_vi)
634 num_entries *= stride;
635
636 stride = S_008F04_STRIDE(stride);
637 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B230_SPI_SHADER_USER_DATA_GS_0 + loc->sgpr_idx * 4, 2);
638 radeon_emit(cmd_buffer->cs, stride);
639 radeon_emit(cmd_buffer->cs, num_entries);
640 }
641 }
642
643 static void
644 radv_emit_fragment_shader(struct radv_cmd_buffer *cmd_buffer,
645 struct radv_pipeline *pipeline)
646 {
647 struct radeon_winsys *ws = cmd_buffer->device->ws;
648 struct radv_shader_variant *ps, *vs;
649 uint64_t va;
650 unsigned spi_baryc_cntl = S_0286E0_FRONT_FACE_ALL_BITS(1);
651 struct radv_blend_state *blend = &pipeline->graphics.blend;
652 unsigned ps_offset = 0;
653 unsigned z_order;
654 assert (pipeline->shaders[MESA_SHADER_FRAGMENT]);
655
656 ps = pipeline->shaders[MESA_SHADER_FRAGMENT];
657 vs = radv_pipeline_has_gs(pipeline) ? pipeline->gs_copy_shader : pipeline->shaders[MESA_SHADER_VERTEX];
658 va = ws->buffer_get_va(ps->bo);
659 ws->cs_add_buffer(cmd_buffer->cs, ps->bo, 8);
660
661 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B020_SPI_SHADER_PGM_LO_PS, 4);
662 radeon_emit(cmd_buffer->cs, va >> 8);
663 radeon_emit(cmd_buffer->cs, va >> 40);
664 radeon_emit(cmd_buffer->cs, ps->rsrc1);
665 radeon_emit(cmd_buffer->cs, ps->rsrc2);
666
667 if (ps->info.fs.early_fragment_test || !ps->info.fs.writes_memory)
668 z_order = V_02880C_EARLY_Z_THEN_LATE_Z;
669 else
670 z_order = V_02880C_LATE_Z;
671
672
673 radeon_set_context_reg(cmd_buffer->cs, R_02880C_DB_SHADER_CONTROL,
674 S_02880C_Z_EXPORT_ENABLE(ps->info.fs.writes_z) |
675 S_02880C_STENCIL_TEST_VAL_EXPORT_ENABLE(ps->info.fs.writes_stencil) |
676 S_02880C_KILL_ENABLE(!!ps->info.fs.can_discard) |
677 S_02880C_Z_ORDER(z_order) |
678 S_02880C_DEPTH_BEFORE_SHADER(ps->info.fs.early_fragment_test) |
679 S_02880C_EXEC_ON_HIER_FAIL(ps->info.fs.writes_memory) |
680 S_02880C_EXEC_ON_NOOP(ps->info.fs.writes_memory));
681
682 radeon_set_context_reg(cmd_buffer->cs, R_0286CC_SPI_PS_INPUT_ENA,
683 ps->config.spi_ps_input_ena);
684
685 radeon_set_context_reg(cmd_buffer->cs, R_0286D0_SPI_PS_INPUT_ADDR,
686 ps->config.spi_ps_input_addr);
687
688 spi_baryc_cntl |= S_0286E0_POS_FLOAT_LOCATION(0);
689 radeon_set_context_reg(cmd_buffer->cs, R_0286D8_SPI_PS_IN_CONTROL,
690 S_0286D8_NUM_INTERP(ps->info.fs.num_interp));
691
692 radeon_set_context_reg(cmd_buffer->cs, R_0286E0_SPI_BARYC_CNTL, spi_baryc_cntl);
693
694 radeon_set_context_reg(cmd_buffer->cs, R_028710_SPI_SHADER_Z_FORMAT,
695 ps->info.fs.writes_stencil ? V_028710_SPI_SHADER_32_GR :
696 ps->info.fs.writes_z ? V_028710_SPI_SHADER_32_R :
697 V_028710_SPI_SHADER_ZERO);
698
699 radeon_set_context_reg(cmd_buffer->cs, R_028714_SPI_SHADER_COL_FORMAT, blend->spi_shader_col_format);
700
701 radeon_set_context_reg(cmd_buffer->cs, R_028238_CB_TARGET_MASK, blend->cb_target_mask);
702 radeon_set_context_reg(cmd_buffer->cs, R_02823C_CB_SHADER_MASK, blend->cb_shader_mask);
703
704 if (ps->info.fs.has_pcoord) {
705 unsigned val;
706 val = S_028644_PT_SPRITE_TEX(1) | S_028644_OFFSET(0x20);
707 radeon_set_context_reg(cmd_buffer->cs, R_028644_SPI_PS_INPUT_CNTL_0 + 4 * ps_offset, val);
708 ps_offset++;
709 }
710
711 if (ps->info.fs.prim_id_input && (vs->info.vs.prim_id_output != 0xffffffff)) {
712 unsigned vs_offset, flat_shade;
713 unsigned val;
714 vs_offset = vs->info.vs.prim_id_output;
715 flat_shade = true;
716 val = S_028644_OFFSET(vs_offset) | S_028644_FLAT_SHADE(flat_shade);
717 radeon_set_context_reg(cmd_buffer->cs, R_028644_SPI_PS_INPUT_CNTL_0 + 4 * ps_offset, val);
718 ++ps_offset;
719 }
720
721 if (ps->info.fs.layer_input && (vs->info.vs.layer_output != 0xffffffff)) {
722 unsigned vs_offset, flat_shade;
723 unsigned val;
724 vs_offset = vs->info.vs.layer_output;
725 flat_shade = true;
726 val = S_028644_OFFSET(vs_offset) | S_028644_FLAT_SHADE(flat_shade);
727 radeon_set_context_reg(cmd_buffer->cs, R_028644_SPI_PS_INPUT_CNTL_0 + 4 * ps_offset, val);
728 ++ps_offset;
729 }
730
731 for (unsigned i = 0; i < 32 && (1u << i) <= ps->info.fs.input_mask; ++i) {
732 unsigned vs_offset, flat_shade;
733 unsigned val;
734
735 if (!(ps->info.fs.input_mask & (1u << i)))
736 continue;
737
738
739 if (!(vs->info.vs.export_mask & (1u << i))) {
740 radeon_set_context_reg(cmd_buffer->cs, R_028644_SPI_PS_INPUT_CNTL_0 + 4 * ps_offset,
741 S_028644_OFFSET(0x20));
742 ++ps_offset;
743 continue;
744 }
745
746 vs_offset = util_bitcount(vs->info.vs.export_mask & ((1u << i) - 1));
747 if (vs->info.vs.prim_id_output != 0xffffffff) {
748 if (vs_offset >= vs->info.vs.prim_id_output)
749 vs_offset++;
750 }
751 if (vs->info.vs.layer_output != 0xffffffff) {
752 if (vs_offset >= vs->info.vs.layer_output)
753 vs_offset++;
754 }
755 flat_shade = !!(ps->info.fs.flat_shaded_mask & (1u << ps_offset));
756
757 val = S_028644_OFFSET(vs_offset) | S_028644_FLAT_SHADE(flat_shade);
758 radeon_set_context_reg(cmd_buffer->cs, R_028644_SPI_PS_INPUT_CNTL_0 + 4 * ps_offset, val);
759 ++ps_offset;
760 }
761 }
762
763 static void
764 radv_emit_graphics_pipeline(struct radv_cmd_buffer *cmd_buffer,
765 struct radv_pipeline *pipeline)
766 {
767 if (!pipeline || cmd_buffer->state.emitted_pipeline == pipeline)
768 return;
769
770 radv_emit_graphics_depth_stencil_state(cmd_buffer, pipeline);
771 radv_emit_graphics_blend_state(cmd_buffer, pipeline);
772 radv_emit_graphics_raster_state(cmd_buffer, pipeline);
773 radv_update_multisample_state(cmd_buffer, pipeline);
774 radv_emit_vertex_shader(cmd_buffer, pipeline);
775 radv_emit_geometry_shader(cmd_buffer, pipeline);
776 radv_emit_fragment_shader(cmd_buffer, pipeline);
777
778 radeon_set_context_reg(cmd_buffer->cs, R_028A94_VGT_MULTI_PRIM_IB_RESET_EN,
779 pipeline->graphics.prim_restart_enable);
780
781 cmd_buffer->scratch_size_needed =
782 MAX2(cmd_buffer->scratch_size_needed,
783 pipeline->max_waves * pipeline->scratch_bytes_per_wave);
784
785 radeon_set_context_reg(cmd_buffer->cs, R_0286E8_SPI_TMPRING_SIZE,
786 S_0286E8_WAVES(pipeline->max_waves) |
787 S_0286E8_WAVESIZE(pipeline->scratch_bytes_per_wave >> 10));
788 cmd_buffer->state.emitted_pipeline = pipeline;
789 }
790
791 static void
792 radv_emit_viewport(struct radv_cmd_buffer *cmd_buffer)
793 {
794 si_write_viewport(cmd_buffer->cs, 0, cmd_buffer->state.dynamic.viewport.count,
795 cmd_buffer->state.dynamic.viewport.viewports);
796 }
797
798 static void
799 radv_emit_scissor(struct radv_cmd_buffer *cmd_buffer)
800 {
801 uint32_t count = cmd_buffer->state.dynamic.scissor.count;
802 si_write_scissors(cmd_buffer->cs, 0, count,
803 cmd_buffer->state.dynamic.scissor.scissors);
804 radeon_set_context_reg(cmd_buffer->cs, R_028A48_PA_SC_MODE_CNTL_0,
805 cmd_buffer->state.pipeline->graphics.ms.pa_sc_mode_cntl_0 | S_028A48_VPORT_SCISSOR_ENABLE(count ? 1 : 0));
806 }
807
808 static void
809 radv_emit_fb_color_state(struct radv_cmd_buffer *cmd_buffer,
810 int index,
811 struct radv_color_buffer_info *cb)
812 {
813 bool is_vi = cmd_buffer->device->physical_device->rad_info.chip_class >= VI;
814 radeon_set_context_reg_seq(cmd_buffer->cs, R_028C60_CB_COLOR0_BASE + index * 0x3c, 11);
815 radeon_emit(cmd_buffer->cs, cb->cb_color_base);
816 radeon_emit(cmd_buffer->cs, cb->cb_color_pitch);
817 radeon_emit(cmd_buffer->cs, cb->cb_color_slice);
818 radeon_emit(cmd_buffer->cs, cb->cb_color_view);
819 radeon_emit(cmd_buffer->cs, cb->cb_color_info);
820 radeon_emit(cmd_buffer->cs, cb->cb_color_attrib);
821 radeon_emit(cmd_buffer->cs, cb->cb_dcc_control);
822 radeon_emit(cmd_buffer->cs, cb->cb_color_cmask);
823 radeon_emit(cmd_buffer->cs, cb->cb_color_cmask_slice);
824 radeon_emit(cmd_buffer->cs, cb->cb_color_fmask);
825 radeon_emit(cmd_buffer->cs, cb->cb_color_fmask_slice);
826
827 if (is_vi) { /* DCC BASE */
828 radeon_set_context_reg(cmd_buffer->cs, R_028C94_CB_COLOR0_DCC_BASE + index * 0x3c, cb->cb_dcc_base);
829 }
830 }
831
832 static void
833 radv_emit_fb_ds_state(struct radv_cmd_buffer *cmd_buffer,
834 struct radv_ds_buffer_info *ds,
835 struct radv_image *image,
836 VkImageLayout layout)
837 {
838 uint32_t db_z_info = ds->db_z_info;
839
840 if (!radv_layout_has_htile(image, layout))
841 db_z_info &= C_028040_TILE_SURFACE_ENABLE;
842
843 if (!radv_layout_can_expclear(image, layout))
844 db_z_info &= C_028040_ALLOW_EXPCLEAR & C_028044_ALLOW_EXPCLEAR;
845
846 radeon_set_context_reg(cmd_buffer->cs, R_028008_DB_DEPTH_VIEW, ds->db_depth_view);
847 radeon_set_context_reg(cmd_buffer->cs, R_028014_DB_HTILE_DATA_BASE, ds->db_htile_data_base);
848
849 radeon_set_context_reg_seq(cmd_buffer->cs, R_02803C_DB_DEPTH_INFO, 9);
850 radeon_emit(cmd_buffer->cs, ds->db_depth_info); /* R_02803C_DB_DEPTH_INFO */
851 radeon_emit(cmd_buffer->cs, db_z_info); /* R_028040_DB_Z_INFO */
852 radeon_emit(cmd_buffer->cs, ds->db_stencil_info); /* R_028044_DB_STENCIL_INFO */
853 radeon_emit(cmd_buffer->cs, ds->db_z_read_base); /* R_028048_DB_Z_READ_BASE */
854 radeon_emit(cmd_buffer->cs, ds->db_stencil_read_base); /* R_02804C_DB_STENCIL_READ_BASE */
855 radeon_emit(cmd_buffer->cs, ds->db_z_write_base); /* R_028050_DB_Z_WRITE_BASE */
856 radeon_emit(cmd_buffer->cs, ds->db_stencil_write_base); /* R_028054_DB_STENCIL_WRITE_BASE */
857 radeon_emit(cmd_buffer->cs, ds->db_depth_size); /* R_028058_DB_DEPTH_SIZE */
858 radeon_emit(cmd_buffer->cs, ds->db_depth_slice); /* R_02805C_DB_DEPTH_SLICE */
859
860 radeon_set_context_reg(cmd_buffer->cs, R_028ABC_DB_HTILE_SURFACE, ds->db_htile_surface);
861 radeon_set_context_reg(cmd_buffer->cs, R_028B78_PA_SU_POLY_OFFSET_DB_FMT_CNTL,
862 ds->pa_su_poly_offset_db_fmt_cntl);
863 }
864
865 /*
866 * To hw resolve multisample images both src and dst need to have the same
867 * micro tiling mode. However we don't always know in advance when creating
868 * the images. This function gets called if we have a resolve attachment,
869 * and tests if the attachment image has the same tiling mode, then it
870 * checks if the generated framebuffer data has the same tiling mode, and
871 * updates it if not.
872 */
873 static void radv_set_optimal_micro_tile_mode(struct radv_device *device,
874 struct radv_attachment_info *att,
875 uint32_t micro_tile_mode)
876 {
877 struct radv_image *image = att->attachment->image;
878 uint32_t tile_mode_index;
879 if (image->surface.nsamples <= 1)
880 return;
881
882 if (image->surface.micro_tile_mode != micro_tile_mode) {
883 radv_image_set_optimal_micro_tile_mode(device, image, micro_tile_mode);
884 }
885
886 if (att->cb.micro_tile_mode != micro_tile_mode) {
887 tile_mode_index = image->surface.tiling_index[0];
888
889 att->cb.cb_color_attrib &= C_028C74_TILE_MODE_INDEX;
890 att->cb.cb_color_attrib |= S_028C74_TILE_MODE_INDEX(tile_mode_index);
891 att->cb.micro_tile_mode = micro_tile_mode;
892 }
893 }
894
895 void
896 radv_set_depth_clear_regs(struct radv_cmd_buffer *cmd_buffer,
897 struct radv_image *image,
898 VkClearDepthStencilValue ds_clear_value,
899 VkImageAspectFlags aspects)
900 {
901 uint64_t va = cmd_buffer->device->ws->buffer_get_va(image->bo);
902 va += image->offset + image->clear_value_offset;
903 unsigned reg_offset = 0, reg_count = 0;
904
905 if (!image->htile.size || !aspects)
906 return;
907
908 if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT) {
909 ++reg_count;
910 } else {
911 ++reg_offset;
912 va += 4;
913 }
914 if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT)
915 ++reg_count;
916
917 cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, image->bo, 8);
918
919 radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 2 + reg_count, 0));
920 radeon_emit(cmd_buffer->cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
921 S_370_WR_CONFIRM(1) |
922 S_370_ENGINE_SEL(V_370_PFP));
923 radeon_emit(cmd_buffer->cs, va);
924 radeon_emit(cmd_buffer->cs, va >> 32);
925 if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT)
926 radeon_emit(cmd_buffer->cs, ds_clear_value.stencil);
927 if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT)
928 radeon_emit(cmd_buffer->cs, fui(ds_clear_value.depth));
929
930 radeon_set_context_reg_seq(cmd_buffer->cs, R_028028_DB_STENCIL_CLEAR + 4 * reg_offset, reg_count);
931 if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT)
932 radeon_emit(cmd_buffer->cs, ds_clear_value.stencil); /* R_028028_DB_STENCIL_CLEAR */
933 if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT)
934 radeon_emit(cmd_buffer->cs, fui(ds_clear_value.depth)); /* R_02802C_DB_DEPTH_CLEAR */
935 }
936
937 static void
938 radv_load_depth_clear_regs(struct radv_cmd_buffer *cmd_buffer,
939 struct radv_image *image)
940 {
941 uint64_t va = cmd_buffer->device->ws->buffer_get_va(image->bo);
942 va += image->offset + image->clear_value_offset;
943
944 if (!image->htile.size)
945 return;
946
947 cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, image->bo, 8);
948
949 radeon_emit(cmd_buffer->cs, PKT3(PKT3_COPY_DATA, 4, 0));
950 radeon_emit(cmd_buffer->cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
951 COPY_DATA_DST_SEL(COPY_DATA_REG) |
952 COPY_DATA_COUNT_SEL);
953 radeon_emit(cmd_buffer->cs, va);
954 radeon_emit(cmd_buffer->cs, va >> 32);
955 radeon_emit(cmd_buffer->cs, R_028028_DB_STENCIL_CLEAR >> 2);
956 radeon_emit(cmd_buffer->cs, 0);
957
958 radeon_emit(cmd_buffer->cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
959 radeon_emit(cmd_buffer->cs, 0);
960 }
961
962 void
963 radv_set_color_clear_regs(struct radv_cmd_buffer *cmd_buffer,
964 struct radv_image *image,
965 int idx,
966 uint32_t color_values[2])
967 {
968 uint64_t va = cmd_buffer->device->ws->buffer_get_va(image->bo);
969 va += image->offset + image->clear_value_offset;
970
971 if (!image->cmask.size && !image->surface.dcc_size)
972 return;
973
974 cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, image->bo, 8);
975
976 radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 4, 0));
977 radeon_emit(cmd_buffer->cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
978 S_370_WR_CONFIRM(1) |
979 S_370_ENGINE_SEL(V_370_PFP));
980 radeon_emit(cmd_buffer->cs, va);
981 radeon_emit(cmd_buffer->cs, va >> 32);
982 radeon_emit(cmd_buffer->cs, color_values[0]);
983 radeon_emit(cmd_buffer->cs, color_values[1]);
984
985 radeon_set_context_reg_seq(cmd_buffer->cs, R_028C8C_CB_COLOR0_CLEAR_WORD0 + idx * 0x3c, 2);
986 radeon_emit(cmd_buffer->cs, color_values[0]);
987 radeon_emit(cmd_buffer->cs, color_values[1]);
988 }
989
990 static void
991 radv_load_color_clear_regs(struct radv_cmd_buffer *cmd_buffer,
992 struct radv_image *image,
993 int idx)
994 {
995 uint64_t va = cmd_buffer->device->ws->buffer_get_va(image->bo);
996 va += image->offset + image->clear_value_offset;
997
998 if (!image->cmask.size && !image->surface.dcc_size)
999 return;
1000
1001 uint32_t reg = R_028C8C_CB_COLOR0_CLEAR_WORD0 + idx * 0x3c;
1002 cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, image->bo, 8);
1003
1004 radeon_emit(cmd_buffer->cs, PKT3(PKT3_COPY_DATA, 4, 0));
1005 radeon_emit(cmd_buffer->cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
1006 COPY_DATA_DST_SEL(COPY_DATA_REG) |
1007 COPY_DATA_COUNT_SEL);
1008 radeon_emit(cmd_buffer->cs, va);
1009 radeon_emit(cmd_buffer->cs, va >> 32);
1010 radeon_emit(cmd_buffer->cs, reg >> 2);
1011 radeon_emit(cmd_buffer->cs, 0);
1012
1013 radeon_emit(cmd_buffer->cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
1014 radeon_emit(cmd_buffer->cs, 0);
1015 }
1016
1017 void
1018 radv_emit_framebuffer_state(struct radv_cmd_buffer *cmd_buffer)
1019 {
1020 int i;
1021 struct radv_framebuffer *framebuffer = cmd_buffer->state.framebuffer;
1022 const struct radv_subpass *subpass = cmd_buffer->state.subpass;
1023 int dst_resolve_micro_tile_mode = -1;
1024
1025 if (subpass->has_resolve) {
1026 uint32_t a = subpass->resolve_attachments[0].attachment;
1027 const struct radv_image *image = framebuffer->attachments[a].attachment->image;
1028 dst_resolve_micro_tile_mode = image->surface.micro_tile_mode;
1029 }
1030 for (i = 0; i < subpass->color_count; ++i) {
1031 int idx = subpass->color_attachments[i].attachment;
1032 struct radv_attachment_info *att = &framebuffer->attachments[idx];
1033
1034 if (dst_resolve_micro_tile_mode != -1) {
1035 radv_set_optimal_micro_tile_mode(cmd_buffer->device,
1036 att, dst_resolve_micro_tile_mode);
1037 }
1038 cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, att->attachment->bo, 8);
1039
1040 assert(att->attachment->aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT);
1041 radv_emit_fb_color_state(cmd_buffer, i, &att->cb);
1042
1043 radv_load_color_clear_regs(cmd_buffer, att->attachment->image, i);
1044 }
1045
1046 for (i = subpass->color_count; i < 8; i++)
1047 radeon_set_context_reg(cmd_buffer->cs, R_028C70_CB_COLOR0_INFO + i * 0x3C,
1048 S_028C70_FORMAT(V_028C70_COLOR_INVALID));
1049
1050 if(subpass->depth_stencil_attachment.attachment != VK_ATTACHMENT_UNUSED) {
1051 int idx = subpass->depth_stencil_attachment.attachment;
1052 VkImageLayout layout = subpass->depth_stencil_attachment.layout;
1053 struct radv_attachment_info *att = &framebuffer->attachments[idx];
1054 struct radv_image *image = att->attachment->image;
1055 cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, att->attachment->bo, 8);
1056
1057 radv_emit_fb_ds_state(cmd_buffer, &att->ds, image, layout);
1058
1059 if (att->ds.offset_scale != cmd_buffer->state.offset_scale) {
1060 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS;
1061 cmd_buffer->state.offset_scale = att->ds.offset_scale;
1062 }
1063 radv_load_depth_clear_regs(cmd_buffer, image);
1064 } else {
1065 radeon_set_context_reg_seq(cmd_buffer->cs, R_028040_DB_Z_INFO, 2);
1066 radeon_emit(cmd_buffer->cs, S_028040_FORMAT(V_028040_Z_INVALID)); /* R_028040_DB_Z_INFO */
1067 radeon_emit(cmd_buffer->cs, S_028044_FORMAT(V_028044_STENCIL_INVALID)); /* R_028044_DB_STENCIL_INFO */
1068 }
1069 radeon_set_context_reg(cmd_buffer->cs, R_028208_PA_SC_WINDOW_SCISSOR_BR,
1070 S_028208_BR_X(framebuffer->width) |
1071 S_028208_BR_Y(framebuffer->height));
1072 }
1073
1074 void radv_set_db_count_control(struct radv_cmd_buffer *cmd_buffer)
1075 {
1076 uint32_t db_count_control;
1077
1078 if(!cmd_buffer->state.active_occlusion_queries) {
1079 if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) {
1080 db_count_control = 0;
1081 } else {
1082 db_count_control = S_028004_ZPASS_INCREMENT_DISABLE(1);
1083 }
1084 } else {
1085 if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) {
1086 db_count_control = S_028004_PERFECT_ZPASS_COUNTS(1) |
1087 S_028004_SAMPLE_RATE(0) | /* TODO: set this to the number of samples of the current framebuffer */
1088 S_028004_ZPASS_ENABLE(1) |
1089 S_028004_SLICE_EVEN_ENABLE(1) |
1090 S_028004_SLICE_ODD_ENABLE(1);
1091 } else {
1092 db_count_control = S_028004_PERFECT_ZPASS_COUNTS(1) |
1093 S_028004_SAMPLE_RATE(0); /* TODO: set this to the number of samples of the current framebuffer */
1094 }
1095 }
1096
1097 radeon_set_context_reg(cmd_buffer->cs, R_028004_DB_COUNT_CONTROL, db_count_control);
1098 }
1099
1100 static void
1101 radv_cmd_buffer_flush_dynamic_state(struct radv_cmd_buffer *cmd_buffer)
1102 {
1103 struct radv_dynamic_state *d = &cmd_buffer->state.dynamic;
1104
1105 if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_DYNAMIC_LINE_WIDTH) {
1106 unsigned width = cmd_buffer->state.dynamic.line_width * 8;
1107 radeon_set_context_reg(cmd_buffer->cs, R_028A08_PA_SU_LINE_CNTL,
1108 S_028A08_WIDTH(CLAMP(width, 0, 0xFFF)));
1109 }
1110
1111 if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS) {
1112 radeon_set_context_reg_seq(cmd_buffer->cs, R_028414_CB_BLEND_RED, 4);
1113 radeon_emit_array(cmd_buffer->cs, (uint32_t*)d->blend_constants, 4);
1114 }
1115
1116 if (cmd_buffer->state.dirty & (RADV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE |
1117 RADV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK |
1118 RADV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK)) {
1119 radeon_set_context_reg_seq(cmd_buffer->cs, R_028430_DB_STENCILREFMASK, 2);
1120 radeon_emit(cmd_buffer->cs, S_028430_STENCILTESTVAL(d->stencil_reference.front) |
1121 S_028430_STENCILMASK(d->stencil_compare_mask.front) |
1122 S_028430_STENCILWRITEMASK(d->stencil_write_mask.front) |
1123 S_028430_STENCILOPVAL(1));
1124 radeon_emit(cmd_buffer->cs, S_028434_STENCILTESTVAL_BF(d->stencil_reference.back) |
1125 S_028434_STENCILMASK_BF(d->stencil_compare_mask.back) |
1126 S_028434_STENCILWRITEMASK_BF(d->stencil_write_mask.back) |
1127 S_028434_STENCILOPVAL_BF(1));
1128 }
1129
1130 if (cmd_buffer->state.dirty & (RADV_CMD_DIRTY_PIPELINE |
1131 RADV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS)) {
1132 radeon_set_context_reg(cmd_buffer->cs, R_028020_DB_DEPTH_BOUNDS_MIN, fui(d->depth_bounds.min));
1133 radeon_set_context_reg(cmd_buffer->cs, R_028024_DB_DEPTH_BOUNDS_MAX, fui(d->depth_bounds.max));
1134 }
1135
1136 if (cmd_buffer->state.dirty & (RADV_CMD_DIRTY_PIPELINE |
1137 RADV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS)) {
1138 struct radv_raster_state *raster = &cmd_buffer->state.pipeline->graphics.raster;
1139 unsigned slope = fui(d->depth_bias.slope * 16.0f);
1140 unsigned bias = fui(d->depth_bias.bias * cmd_buffer->state.offset_scale);
1141
1142 if (G_028814_POLY_OFFSET_FRONT_ENABLE(raster->pa_su_sc_mode_cntl)) {
1143 radeon_set_context_reg_seq(cmd_buffer->cs, R_028B7C_PA_SU_POLY_OFFSET_CLAMP, 5);
1144 radeon_emit(cmd_buffer->cs, fui(d->depth_bias.clamp)); /* CLAMP */
1145 radeon_emit(cmd_buffer->cs, slope); /* FRONT SCALE */
1146 radeon_emit(cmd_buffer->cs, bias); /* FRONT OFFSET */
1147 radeon_emit(cmd_buffer->cs, slope); /* BACK SCALE */
1148 radeon_emit(cmd_buffer->cs, bias); /* BACK OFFSET */
1149 }
1150 }
1151
1152 cmd_buffer->state.dirty = 0;
1153 }
1154
1155 static void
1156 emit_stage_descriptor_set_userdata(struct radv_cmd_buffer *cmd_buffer,
1157 struct radv_pipeline *pipeline,
1158 int idx,
1159 uint64_t va,
1160 gl_shader_stage stage)
1161 {
1162 struct ac_userdata_info *desc_set_loc = &pipeline->shaders[stage]->info.user_sgprs_locs.descriptor_sets[idx];
1163 uint32_t base_reg = shader_stage_to_user_data_0(stage, radv_pipeline_has_gs(pipeline));
1164
1165 if (desc_set_loc->sgpr_idx == -1)
1166 return;
1167
1168 assert(!desc_set_loc->indirect);
1169 assert(desc_set_loc->num_sgprs == 2);
1170 radeon_set_sh_reg_seq(cmd_buffer->cs,
1171 base_reg + desc_set_loc->sgpr_idx * 4, 2);
1172 radeon_emit(cmd_buffer->cs, va);
1173 radeon_emit(cmd_buffer->cs, va >> 32);
1174 }
1175
1176 static void
1177 radv_emit_descriptor_set_userdata(struct radv_cmd_buffer *cmd_buffer,
1178 struct radv_pipeline *pipeline,
1179 VkShaderStageFlags stages,
1180 struct radv_descriptor_set *set,
1181 unsigned idx)
1182 {
1183 if (stages & VK_SHADER_STAGE_FRAGMENT_BIT)
1184 emit_stage_descriptor_set_userdata(cmd_buffer, pipeline,
1185 idx, set->va,
1186 MESA_SHADER_FRAGMENT);
1187
1188 if (stages & VK_SHADER_STAGE_VERTEX_BIT)
1189 emit_stage_descriptor_set_userdata(cmd_buffer, pipeline,
1190 idx, set->va,
1191 MESA_SHADER_VERTEX);
1192
1193 if ((stages & VK_SHADER_STAGE_GEOMETRY_BIT) && radv_pipeline_has_gs(pipeline))
1194 emit_stage_descriptor_set_userdata(cmd_buffer, pipeline,
1195 idx, set->va,
1196 MESA_SHADER_GEOMETRY);
1197
1198 if (stages & VK_SHADER_STAGE_COMPUTE_BIT)
1199 emit_stage_descriptor_set_userdata(cmd_buffer, pipeline,
1200 idx, set->va,
1201 MESA_SHADER_COMPUTE);
1202 }
1203
1204 static void
1205 radv_flush_descriptors(struct radv_cmd_buffer *cmd_buffer,
1206 struct radv_pipeline *pipeline,
1207 VkShaderStageFlags stages)
1208 {
1209 unsigned i;
1210 if (!cmd_buffer->state.descriptors_dirty)
1211 return;
1212
1213 for (i = 0; i < MAX_SETS; i++) {
1214 if (!(cmd_buffer->state.descriptors_dirty & (1 << i)))
1215 continue;
1216 struct radv_descriptor_set *set = cmd_buffer->state.descriptors[i];
1217 if (!set)
1218 continue;
1219
1220 radv_emit_descriptor_set_userdata(cmd_buffer, pipeline, stages, set, i);
1221 }
1222 cmd_buffer->state.descriptors_dirty = 0;
1223 }
1224
1225 static void
1226 radv_flush_constants(struct radv_cmd_buffer *cmd_buffer,
1227 struct radv_pipeline *pipeline,
1228 VkShaderStageFlags stages)
1229 {
1230 struct radv_pipeline_layout *layout = pipeline->layout;
1231 unsigned offset;
1232 void *ptr;
1233 uint64_t va;
1234
1235 stages &= cmd_buffer->push_constant_stages;
1236 if (!stages || !layout || (!layout->push_constant_size && !layout->dynamic_offset_count))
1237 return;
1238
1239 radv_cmd_buffer_upload_alloc(cmd_buffer, layout->push_constant_size +
1240 16 * layout->dynamic_offset_count,
1241 256, &offset, &ptr);
1242
1243 memcpy(ptr, cmd_buffer->push_constants, layout->push_constant_size);
1244 memcpy((char*)ptr + layout->push_constant_size, cmd_buffer->dynamic_buffers,
1245 16 * layout->dynamic_offset_count);
1246
1247 va = cmd_buffer->device->ws->buffer_get_va(cmd_buffer->upload.upload_bo);
1248 va += offset;
1249
1250 if (stages & VK_SHADER_STAGE_VERTEX_BIT)
1251 radv_emit_userdata_address(cmd_buffer, pipeline, MESA_SHADER_VERTEX,
1252 AC_UD_PUSH_CONSTANTS, va);
1253
1254 if (stages & VK_SHADER_STAGE_FRAGMENT_BIT)
1255 radv_emit_userdata_address(cmd_buffer, pipeline, MESA_SHADER_FRAGMENT,
1256 AC_UD_PUSH_CONSTANTS, va);
1257
1258 if ((stages & VK_SHADER_STAGE_GEOMETRY_BIT) && radv_pipeline_has_gs(pipeline))
1259 radv_emit_userdata_address(cmd_buffer, pipeline, MESA_SHADER_GEOMETRY,
1260 AC_UD_PUSH_CONSTANTS, va);
1261
1262 if (stages & VK_SHADER_STAGE_COMPUTE_BIT)
1263 radv_emit_userdata_address(cmd_buffer, pipeline, MESA_SHADER_COMPUTE,
1264 AC_UD_PUSH_CONSTANTS, va);
1265
1266 cmd_buffer->push_constant_stages &= ~stages;
1267 }
1268
1269 static void
1270 radv_cmd_buffer_flush_state(struct radv_cmd_buffer *cmd_buffer, bool instanced_or_indirect_draw,
1271 uint32_t draw_vertex_count)
1272 {
1273 struct radv_pipeline *pipeline = cmd_buffer->state.pipeline;
1274 struct radv_device *device = cmd_buffer->device;
1275 uint32_t ia_multi_vgt_param;
1276 uint32_t ls_hs_config = 0;
1277
1278 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
1279 cmd_buffer->cs, 4096);
1280
1281 if ((cmd_buffer->state.vertex_descriptors_dirty || cmd_buffer->state.vb_dirty) &&
1282 cmd_buffer->state.pipeline->num_vertex_attribs) {
1283 unsigned vb_offset;
1284 void *vb_ptr;
1285 uint32_t i = 0;
1286 uint32_t num_attribs = cmd_buffer->state.pipeline->num_vertex_attribs;
1287 uint64_t va;
1288
1289 /* allocate some descriptor state for vertex buffers */
1290 radv_cmd_buffer_upload_alloc(cmd_buffer, num_attribs * 16, 256,
1291 &vb_offset, &vb_ptr);
1292
1293 for (i = 0; i < num_attribs; i++) {
1294 uint32_t *desc = &((uint32_t *)vb_ptr)[i * 4];
1295 uint32_t offset;
1296 int vb = cmd_buffer->state.pipeline->va_binding[i];
1297 struct radv_buffer *buffer = cmd_buffer->state.vertex_bindings[vb].buffer;
1298 uint32_t stride = cmd_buffer->state.pipeline->binding_stride[vb];
1299
1300 device->ws->cs_add_buffer(cmd_buffer->cs, buffer->bo, 8);
1301 va = device->ws->buffer_get_va(buffer->bo);
1302
1303 offset = cmd_buffer->state.vertex_bindings[vb].offset + cmd_buffer->state.pipeline->va_offset[i];
1304 va += offset + buffer->offset;
1305 desc[0] = va;
1306 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) | S_008F04_STRIDE(stride);
1307 if (cmd_buffer->device->physical_device->rad_info.chip_class <= CIK && stride)
1308 desc[2] = (buffer->size - offset - cmd_buffer->state.pipeline->va_format_size[i]) / stride + 1;
1309 else
1310 desc[2] = buffer->size - offset;
1311 desc[3] = cmd_buffer->state.pipeline->va_rsrc_word3[i];
1312 }
1313
1314 va = device->ws->buffer_get_va(cmd_buffer->upload.upload_bo);
1315 va += vb_offset;
1316
1317 radv_emit_userdata_address(cmd_buffer, pipeline, MESA_SHADER_VERTEX,
1318 AC_UD_VS_VERTEX_BUFFERS, va);
1319 }
1320
1321 cmd_buffer->state.vertex_descriptors_dirty = false;
1322 cmd_buffer->state.vb_dirty = 0;
1323 if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_PIPELINE)
1324 radv_emit_graphics_pipeline(cmd_buffer, pipeline);
1325
1326 if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_RENDER_TARGETS)
1327 radv_emit_framebuffer_state(cmd_buffer);
1328
1329 if (cmd_buffer->state.dirty & (RADV_CMD_DIRTY_DYNAMIC_VIEWPORT))
1330 radv_emit_viewport(cmd_buffer);
1331
1332 if (cmd_buffer->state.dirty & (RADV_CMD_DIRTY_DYNAMIC_SCISSOR))
1333 radv_emit_scissor(cmd_buffer);
1334
1335 ia_multi_vgt_param = si_get_ia_multi_vgt_param(cmd_buffer, instanced_or_indirect_draw, draw_vertex_count);
1336 if (cmd_buffer->state.last_ia_multi_vgt_param != ia_multi_vgt_param) {
1337 if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK)
1338 radeon_set_context_reg_idx(cmd_buffer->cs, R_028AA8_IA_MULTI_VGT_PARAM, 1, ia_multi_vgt_param);
1339 else
1340 radeon_set_context_reg(cmd_buffer->cs, R_028AA8_IA_MULTI_VGT_PARAM, ia_multi_vgt_param);
1341 cmd_buffer->state.last_ia_multi_vgt_param = ia_multi_vgt_param;
1342 }
1343
1344 if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_PIPELINE) {
1345 uint32_t stages = 0;
1346
1347 if (radv_pipeline_has_gs(cmd_buffer->state.pipeline))
1348 stages |= S_028B54_ES_EN(V_028B54_ES_STAGE_REAL) |
1349 S_028B54_GS_EN(1) |
1350 S_028B54_VS_EN(V_028B54_VS_STAGE_COPY_SHADER);
1351
1352 radeon_set_context_reg(cmd_buffer->cs, R_028B54_VGT_SHADER_STAGES_EN, stages);
1353
1354 if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) {
1355 radeon_set_context_reg_idx(cmd_buffer->cs, R_028B58_VGT_LS_HS_CONFIG, 2, ls_hs_config);
1356 radeon_set_uconfig_reg_idx(cmd_buffer->cs, R_030908_VGT_PRIMITIVE_TYPE, 1, cmd_buffer->state.pipeline->graphics.prim);
1357 } else {
1358 radeon_set_config_reg(cmd_buffer->cs, R_008958_VGT_PRIMITIVE_TYPE, cmd_buffer->state.pipeline->graphics.prim);
1359 radeon_set_context_reg(cmd_buffer->cs, R_028B58_VGT_LS_HS_CONFIG, ls_hs_config);
1360 }
1361 radeon_set_context_reg(cmd_buffer->cs, R_028A6C_VGT_GS_OUT_PRIM_TYPE, cmd_buffer->state.pipeline->graphics.gs_out);
1362 }
1363
1364 radv_cmd_buffer_flush_dynamic_state(cmd_buffer);
1365
1366 radv_flush_descriptors(cmd_buffer, cmd_buffer->state.pipeline,
1367 VK_SHADER_STAGE_ALL_GRAPHICS);
1368 radv_flush_constants(cmd_buffer, cmd_buffer->state.pipeline,
1369 VK_SHADER_STAGE_ALL_GRAPHICS);
1370
1371 assert(cmd_buffer->cs->cdw <= cdw_max);
1372
1373 si_emit_cache_flush(cmd_buffer);
1374 }
1375
1376 static void radv_stage_flush(struct radv_cmd_buffer *cmd_buffer,
1377 VkPipelineStageFlags src_stage_mask)
1378 {
1379 if (src_stage_mask & (VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT |
1380 VK_PIPELINE_STAGE_TRANSFER_BIT |
1381 VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT |
1382 VK_PIPELINE_STAGE_ALL_COMMANDS_BIT)) {
1383 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_CS_PARTIAL_FLUSH;
1384 }
1385
1386 if (src_stage_mask & (VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
1387 VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT |
1388 VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
1389 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
1390 VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT |
1391 VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT |
1392 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT |
1393 VK_PIPELINE_STAGE_TRANSFER_BIT |
1394 VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT |
1395 VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT |
1396 VK_PIPELINE_STAGE_ALL_COMMANDS_BIT)) {
1397 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_PS_PARTIAL_FLUSH;
1398 } else if (src_stage_mask & (VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT |
1399 VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT |
1400 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT |
1401 VK_PIPELINE_STAGE_VERTEX_SHADER_BIT)) {
1402 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_VS_PARTIAL_FLUSH;
1403 }
1404 }
1405
1406 static void radv_subpass_barrier(struct radv_cmd_buffer *cmd_buffer, const struct radv_subpass_barrier *barrier)
1407 {
1408 radv_stage_flush(cmd_buffer, barrier->src_stage_mask);
1409
1410 /* TODO: actual cache flushes */
1411 }
1412
1413 static void radv_handle_subpass_image_transition(struct radv_cmd_buffer *cmd_buffer,
1414 VkAttachmentReference att)
1415 {
1416 unsigned idx = att.attachment;
1417 struct radv_image_view *view = cmd_buffer->state.framebuffer->attachments[idx].attachment;
1418 VkImageSubresourceRange range;
1419 range.aspectMask = 0;
1420 range.baseMipLevel = view->base_mip;
1421 range.levelCount = 1;
1422 range.baseArrayLayer = view->base_layer;
1423 range.layerCount = cmd_buffer->state.framebuffer->layers;
1424
1425 radv_handle_image_transition(cmd_buffer,
1426 view->image,
1427 cmd_buffer->state.attachments[idx].current_layout,
1428 att.layout, 0, 0, &range,
1429 cmd_buffer->state.attachments[idx].pending_clear_aspects);
1430
1431 cmd_buffer->state.attachments[idx].current_layout = att.layout;
1432
1433
1434 }
1435
1436 void
1437 radv_cmd_buffer_set_subpass(struct radv_cmd_buffer *cmd_buffer,
1438 const struct radv_subpass *subpass, bool transitions)
1439 {
1440 if (transitions) {
1441 radv_subpass_barrier(cmd_buffer, &subpass->start_barrier);
1442
1443 for (unsigned i = 0; i < subpass->color_count; ++i) {
1444 radv_handle_subpass_image_transition(cmd_buffer,
1445 subpass->color_attachments[i]);
1446 }
1447
1448 for (unsigned i = 0; i < subpass->input_count; ++i) {
1449 radv_handle_subpass_image_transition(cmd_buffer,
1450 subpass->input_attachments[i]);
1451 }
1452
1453 if (subpass->depth_stencil_attachment.attachment != VK_ATTACHMENT_UNUSED) {
1454 radv_handle_subpass_image_transition(cmd_buffer,
1455 subpass->depth_stencil_attachment);
1456 }
1457 }
1458
1459 cmd_buffer->state.subpass = subpass;
1460
1461 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_RENDER_TARGETS;
1462 }
1463
1464 static void
1465 radv_cmd_state_setup_attachments(struct radv_cmd_buffer *cmd_buffer,
1466 struct radv_render_pass *pass,
1467 const VkRenderPassBeginInfo *info)
1468 {
1469 struct radv_cmd_state *state = &cmd_buffer->state;
1470
1471 if (pass->attachment_count == 0) {
1472 state->attachments = NULL;
1473 return;
1474 }
1475
1476 state->attachments = vk_alloc(&cmd_buffer->pool->alloc,
1477 pass->attachment_count *
1478 sizeof(state->attachments[0]),
1479 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1480 if (state->attachments == NULL) {
1481 /* FIXME: Propagate VK_ERROR_OUT_OF_HOST_MEMORY to vkEndCommandBuffer */
1482 abort();
1483 }
1484
1485 for (uint32_t i = 0; i < pass->attachment_count; ++i) {
1486 struct radv_render_pass_attachment *att = &pass->attachments[i];
1487 VkImageAspectFlags att_aspects = vk_format_aspects(att->format);
1488 VkImageAspectFlags clear_aspects = 0;
1489
1490 if (att_aspects == VK_IMAGE_ASPECT_COLOR_BIT) {
1491 /* color attachment */
1492 if (att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
1493 clear_aspects |= VK_IMAGE_ASPECT_COLOR_BIT;
1494 }
1495 } else {
1496 /* depthstencil attachment */
1497 if ((att_aspects & VK_IMAGE_ASPECT_DEPTH_BIT) &&
1498 att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
1499 clear_aspects |= VK_IMAGE_ASPECT_DEPTH_BIT;
1500 }
1501 if ((att_aspects & VK_IMAGE_ASPECT_STENCIL_BIT) &&
1502 att->stencil_load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
1503 clear_aspects |= VK_IMAGE_ASPECT_STENCIL_BIT;
1504 }
1505 }
1506
1507 state->attachments[i].pending_clear_aspects = clear_aspects;
1508 if (clear_aspects && info) {
1509 assert(info->clearValueCount > i);
1510 state->attachments[i].clear_value = info->pClearValues[i];
1511 }
1512
1513 state->attachments[i].current_layout = att->initial_layout;
1514 }
1515 }
1516
1517 VkResult radv_AllocateCommandBuffers(
1518 VkDevice _device,
1519 const VkCommandBufferAllocateInfo *pAllocateInfo,
1520 VkCommandBuffer *pCommandBuffers)
1521 {
1522 RADV_FROM_HANDLE(radv_device, device, _device);
1523 RADV_FROM_HANDLE(radv_cmd_pool, pool, pAllocateInfo->commandPool);
1524
1525 VkResult result = VK_SUCCESS;
1526 uint32_t i;
1527
1528 memset(pCommandBuffers, 0,
1529 sizeof(*pCommandBuffers)*pAllocateInfo->commandBufferCount);
1530
1531 for (i = 0; i < pAllocateInfo->commandBufferCount; i++) {
1532 result = radv_create_cmd_buffer(device, pool, pAllocateInfo->level,
1533 &pCommandBuffers[i]);
1534 if (result != VK_SUCCESS)
1535 break;
1536 }
1537
1538 if (result != VK_SUCCESS)
1539 radv_FreeCommandBuffers(_device, pAllocateInfo->commandPool,
1540 i, pCommandBuffers);
1541
1542 return result;
1543 }
1544
1545 static void
1546 radv_cmd_buffer_destroy(struct radv_cmd_buffer *cmd_buffer)
1547 {
1548 list_del(&cmd_buffer->pool_link);
1549
1550 list_for_each_entry_safe(struct radv_cmd_buffer_upload, up,
1551 &cmd_buffer->upload.list, list) {
1552 cmd_buffer->device->ws->buffer_destroy(up->upload_bo);
1553 list_del(&up->list);
1554 free(up);
1555 }
1556
1557 if (cmd_buffer->upload.upload_bo)
1558 cmd_buffer->device->ws->buffer_destroy(cmd_buffer->upload.upload_bo);
1559 cmd_buffer->device->ws->cs_destroy(cmd_buffer->cs);
1560 vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
1561 }
1562
1563 void radv_FreeCommandBuffers(
1564 VkDevice device,
1565 VkCommandPool commandPool,
1566 uint32_t commandBufferCount,
1567 const VkCommandBuffer *pCommandBuffers)
1568 {
1569 for (uint32_t i = 0; i < commandBufferCount; i++) {
1570 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, pCommandBuffers[i]);
1571
1572 if (cmd_buffer)
1573 radv_cmd_buffer_destroy(cmd_buffer);
1574 }
1575 }
1576
1577 static void radv_reset_cmd_buffer(struct radv_cmd_buffer *cmd_buffer)
1578 {
1579
1580 cmd_buffer->device->ws->cs_reset(cmd_buffer->cs);
1581
1582 list_for_each_entry_safe(struct radv_cmd_buffer_upload, up,
1583 &cmd_buffer->upload.list, list) {
1584 cmd_buffer->device->ws->buffer_destroy(up->upload_bo);
1585 list_del(&up->list);
1586 free(up);
1587 }
1588
1589 cmd_buffer->scratch_size_needed = 0;
1590 cmd_buffer->compute_scratch_size_needed = 0;
1591 cmd_buffer->esgs_ring_size_needed = 0;
1592 cmd_buffer->gsvs_ring_size_needed = 0;
1593
1594 if (cmd_buffer->upload.upload_bo)
1595 cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs,
1596 cmd_buffer->upload.upload_bo, 8);
1597 cmd_buffer->upload.offset = 0;
1598
1599 cmd_buffer->record_fail = false;
1600
1601 cmd_buffer->ring_offsets_idx = -1;
1602 }
1603
1604 VkResult radv_ResetCommandBuffer(
1605 VkCommandBuffer commandBuffer,
1606 VkCommandBufferResetFlags flags)
1607 {
1608 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
1609 radv_reset_cmd_buffer(cmd_buffer);
1610 return VK_SUCCESS;
1611 }
1612
1613 static void emit_gfx_buffer_state(struct radv_cmd_buffer *cmd_buffer)
1614 {
1615 struct radv_device *device = cmd_buffer->device;
1616 if (device->gfx_init) {
1617 uint64_t va = device->ws->buffer_get_va(device->gfx_init);
1618 device->ws->cs_add_buffer(cmd_buffer->cs, device->gfx_init, 8);
1619 radeon_emit(cmd_buffer->cs, PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0));
1620 radeon_emit(cmd_buffer->cs, va);
1621 radeon_emit(cmd_buffer->cs, (va >> 32) & 0xffff);
1622 radeon_emit(cmd_buffer->cs, device->gfx_init_size_dw & 0xffff);
1623 } else
1624 si_init_config(cmd_buffer);
1625 }
1626
1627 VkResult radv_BeginCommandBuffer(
1628 VkCommandBuffer commandBuffer,
1629 const VkCommandBufferBeginInfo *pBeginInfo)
1630 {
1631 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
1632 radv_reset_cmd_buffer(cmd_buffer);
1633
1634 memset(&cmd_buffer->state, 0, sizeof(cmd_buffer->state));
1635
1636 /* setup initial configuration into command buffer */
1637 if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
1638 switch (cmd_buffer->queue_family_index) {
1639 case RADV_QUEUE_GENERAL:
1640 /* Flush read caches at the beginning of CS not flushed by the kernel. */
1641 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_INV_ICACHE |
1642 RADV_CMD_FLAG_PS_PARTIAL_FLUSH |
1643 RADV_CMD_FLAG_CS_PARTIAL_FLUSH |
1644 RADV_CMD_FLAG_INV_VMEM_L1 |
1645 RADV_CMD_FLAG_INV_SMEM_L1 |
1646 RADV_CMD_FLUSH_AND_INV_FRAMEBUFFER |
1647 RADV_CMD_FLAG_INV_GLOBAL_L2;
1648 emit_gfx_buffer_state(cmd_buffer);
1649 radv_set_db_count_control(cmd_buffer);
1650 si_emit_cache_flush(cmd_buffer);
1651 break;
1652 case RADV_QUEUE_COMPUTE:
1653 cmd_buffer->state.flush_bits = RADV_CMD_FLAG_INV_ICACHE |
1654 RADV_CMD_FLAG_CS_PARTIAL_FLUSH |
1655 RADV_CMD_FLAG_INV_VMEM_L1 |
1656 RADV_CMD_FLAG_INV_SMEM_L1 |
1657 RADV_CMD_FLAG_INV_GLOBAL_L2;
1658 si_init_compute(cmd_buffer);
1659 si_emit_cache_flush(cmd_buffer);
1660 break;
1661 case RADV_QUEUE_TRANSFER:
1662 default:
1663 break;
1664 }
1665 }
1666
1667 if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
1668 cmd_buffer->state.framebuffer = radv_framebuffer_from_handle(pBeginInfo->pInheritanceInfo->framebuffer);
1669 cmd_buffer->state.pass = radv_render_pass_from_handle(pBeginInfo->pInheritanceInfo->renderPass);
1670
1671 struct radv_subpass *subpass =
1672 &cmd_buffer->state.pass->subpasses[pBeginInfo->pInheritanceInfo->subpass];
1673
1674 radv_cmd_state_setup_attachments(cmd_buffer, cmd_buffer->state.pass, NULL);
1675 radv_cmd_buffer_set_subpass(cmd_buffer, subpass, false);
1676 }
1677
1678 return VK_SUCCESS;
1679 }
1680
1681 void radv_CmdBindVertexBuffers(
1682 VkCommandBuffer commandBuffer,
1683 uint32_t firstBinding,
1684 uint32_t bindingCount,
1685 const VkBuffer* pBuffers,
1686 const VkDeviceSize* pOffsets)
1687 {
1688 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
1689 struct radv_vertex_binding *vb = cmd_buffer->state.vertex_bindings;
1690
1691 /* We have to defer setting up vertex buffer since we need the buffer
1692 * stride from the pipeline. */
1693
1694 assert(firstBinding + bindingCount < MAX_VBS);
1695 for (uint32_t i = 0; i < bindingCount; i++) {
1696 vb[firstBinding + i].buffer = radv_buffer_from_handle(pBuffers[i]);
1697 vb[firstBinding + i].offset = pOffsets[i];
1698 cmd_buffer->state.vb_dirty |= 1 << (firstBinding + i);
1699 }
1700 }
1701
1702 void radv_CmdBindIndexBuffer(
1703 VkCommandBuffer commandBuffer,
1704 VkBuffer buffer,
1705 VkDeviceSize offset,
1706 VkIndexType indexType)
1707 {
1708 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
1709
1710 cmd_buffer->state.index_buffer = radv_buffer_from_handle(buffer);
1711 cmd_buffer->state.index_offset = offset;
1712 cmd_buffer->state.index_type = indexType; /* vk matches hw */
1713 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_INDEX_BUFFER;
1714 cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, cmd_buffer->state.index_buffer->bo, 8);
1715 }
1716
1717
1718 void radv_bind_descriptor_set(struct radv_cmd_buffer *cmd_buffer,
1719 struct radv_descriptor_set *set,
1720 unsigned idx)
1721 {
1722 struct radeon_winsys *ws = cmd_buffer->device->ws;
1723
1724 cmd_buffer->state.descriptors[idx] = set;
1725 cmd_buffer->state.descriptors_dirty |= (1 << idx);
1726 if (!set)
1727 return;
1728
1729 for (unsigned j = 0; j < set->layout->buffer_count; ++j)
1730 if (set->descriptors[j])
1731 ws->cs_add_buffer(cmd_buffer->cs, set->descriptors[j], 7);
1732
1733 if(set->bo)
1734 ws->cs_add_buffer(cmd_buffer->cs, set->bo, 8);
1735 }
1736
1737 void radv_CmdBindDescriptorSets(
1738 VkCommandBuffer commandBuffer,
1739 VkPipelineBindPoint pipelineBindPoint,
1740 VkPipelineLayout _layout,
1741 uint32_t firstSet,
1742 uint32_t descriptorSetCount,
1743 const VkDescriptorSet* pDescriptorSets,
1744 uint32_t dynamicOffsetCount,
1745 const uint32_t* pDynamicOffsets)
1746 {
1747 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
1748 RADV_FROM_HANDLE(radv_pipeline_layout, layout, _layout);
1749 unsigned dyn_idx = 0;
1750
1751 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
1752 cmd_buffer->cs, MAX_SETS * 4 * 6);
1753
1754 for (unsigned i = 0; i < descriptorSetCount; ++i) {
1755 unsigned idx = i + firstSet;
1756 RADV_FROM_HANDLE(radv_descriptor_set, set, pDescriptorSets[i]);
1757 radv_bind_descriptor_set(cmd_buffer, set, idx);
1758
1759 for(unsigned j = 0; j < set->layout->dynamic_offset_count; ++j, ++dyn_idx) {
1760 unsigned idx = j + layout->set[i].dynamic_offset_start;
1761 uint32_t *dst = cmd_buffer->dynamic_buffers + idx * 4;
1762 assert(dyn_idx < dynamicOffsetCount);
1763
1764 struct radv_descriptor_range *range = set->dynamic_descriptors + j;
1765 uint64_t va = range->va + pDynamicOffsets[dyn_idx];
1766 dst[0] = va;
1767 dst[1] = S_008F04_BASE_ADDRESS_HI(va >> 32);
1768 dst[2] = range->size;
1769 dst[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1770 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1771 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1772 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1773 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1774 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
1775 cmd_buffer->push_constant_stages |=
1776 set->layout->dynamic_shader_stages;
1777 }
1778 }
1779
1780 assert(cmd_buffer->cs->cdw <= cdw_max);
1781 }
1782
1783 void radv_CmdPushConstants(VkCommandBuffer commandBuffer,
1784 VkPipelineLayout layout,
1785 VkShaderStageFlags stageFlags,
1786 uint32_t offset,
1787 uint32_t size,
1788 const void* pValues)
1789 {
1790 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
1791 memcpy(cmd_buffer->push_constants + offset, pValues, size);
1792 cmd_buffer->push_constant_stages |= stageFlags;
1793 }
1794
1795 VkResult radv_EndCommandBuffer(
1796 VkCommandBuffer commandBuffer)
1797 {
1798 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
1799
1800 if (cmd_buffer->queue_family_index != RADV_QUEUE_TRANSFER)
1801 si_emit_cache_flush(cmd_buffer);
1802
1803 if (!cmd_buffer->device->ws->cs_finalize(cmd_buffer->cs) ||
1804 cmd_buffer->record_fail)
1805 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
1806 return VK_SUCCESS;
1807 }
1808
1809 static void
1810 radv_emit_compute_pipeline(struct radv_cmd_buffer *cmd_buffer)
1811 {
1812 struct radeon_winsys *ws = cmd_buffer->device->ws;
1813 struct radv_shader_variant *compute_shader;
1814 struct radv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
1815 uint64_t va;
1816
1817 if (!pipeline || pipeline == cmd_buffer->state.emitted_compute_pipeline)
1818 return;
1819
1820 cmd_buffer->state.emitted_compute_pipeline = pipeline;
1821
1822 compute_shader = pipeline->shaders[MESA_SHADER_COMPUTE];
1823 va = ws->buffer_get_va(compute_shader->bo);
1824
1825 ws->cs_add_buffer(cmd_buffer->cs, compute_shader->bo, 8);
1826
1827 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
1828 cmd_buffer->cs, 16);
1829
1830 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B830_COMPUTE_PGM_LO, 2);
1831 radeon_emit(cmd_buffer->cs, va >> 8);
1832 radeon_emit(cmd_buffer->cs, va >> 40);
1833
1834 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B848_COMPUTE_PGM_RSRC1, 2);
1835 radeon_emit(cmd_buffer->cs, compute_shader->rsrc1);
1836 radeon_emit(cmd_buffer->cs, compute_shader->rsrc2);
1837
1838
1839 cmd_buffer->compute_scratch_size_needed =
1840 MAX2(cmd_buffer->compute_scratch_size_needed,
1841 pipeline->max_waves * pipeline->scratch_bytes_per_wave);
1842
1843 /* change these once we have scratch support */
1844 radeon_set_sh_reg(cmd_buffer->cs, R_00B860_COMPUTE_TMPRING_SIZE,
1845 S_00B860_WAVES(pipeline->max_waves) |
1846 S_00B860_WAVESIZE(pipeline->scratch_bytes_per_wave >> 10));
1847
1848 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B81C_COMPUTE_NUM_THREAD_X, 3);
1849 radeon_emit(cmd_buffer->cs,
1850 S_00B81C_NUM_THREAD_FULL(compute_shader->info.cs.block_size[0]));
1851 radeon_emit(cmd_buffer->cs,
1852 S_00B81C_NUM_THREAD_FULL(compute_shader->info.cs.block_size[1]));
1853 radeon_emit(cmd_buffer->cs,
1854 S_00B81C_NUM_THREAD_FULL(compute_shader->info.cs.block_size[2]));
1855
1856 assert(cmd_buffer->cs->cdw <= cdw_max);
1857 }
1858
1859
1860 void radv_CmdBindPipeline(
1861 VkCommandBuffer commandBuffer,
1862 VkPipelineBindPoint pipelineBindPoint,
1863 VkPipeline _pipeline)
1864 {
1865 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
1866 RADV_FROM_HANDLE(radv_pipeline, pipeline, _pipeline);
1867
1868 for (unsigned i = 0; i < MAX_SETS; i++) {
1869 if (cmd_buffer->state.descriptors[i])
1870 cmd_buffer->state.descriptors_dirty |= (1 << i);
1871 }
1872
1873 switch (pipelineBindPoint) {
1874 case VK_PIPELINE_BIND_POINT_COMPUTE:
1875 cmd_buffer->state.compute_pipeline = pipeline;
1876 cmd_buffer->push_constant_stages |= VK_SHADER_STAGE_COMPUTE_BIT;
1877 break;
1878 case VK_PIPELINE_BIND_POINT_GRAPHICS:
1879 cmd_buffer->state.pipeline = pipeline;
1880 cmd_buffer->state.vertex_descriptors_dirty = true;
1881 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_PIPELINE;
1882 cmd_buffer->push_constant_stages |= pipeline->active_stages;
1883
1884 /* Apply the dynamic state from the pipeline */
1885 cmd_buffer->state.dirty |= pipeline->dynamic_state_mask;
1886 radv_dynamic_state_copy(&cmd_buffer->state.dynamic,
1887 &pipeline->dynamic_state,
1888 pipeline->dynamic_state_mask);
1889
1890 if (pipeline->graphics.esgs_ring_size > cmd_buffer->esgs_ring_size_needed)
1891 cmd_buffer->esgs_ring_size_needed = pipeline->graphics.esgs_ring_size;
1892 if (pipeline->graphics.gsvs_ring_size > cmd_buffer->gsvs_ring_size_needed)
1893 cmd_buffer->gsvs_ring_size_needed = pipeline->graphics.gsvs_ring_size;
1894
1895 if (radv_pipeline_has_gs(pipeline)) {
1896 struct ac_userdata_info *loc = radv_lookup_user_sgpr(cmd_buffer->state.pipeline, MESA_SHADER_GEOMETRY,
1897 AC_UD_SCRATCH_RING_OFFSETS);
1898 if (cmd_buffer->ring_offsets_idx == -1)
1899 cmd_buffer->ring_offsets_idx = loc->sgpr_idx;
1900 else if (loc->sgpr_idx != -1)
1901 assert(loc->sgpr_idx == cmd_buffer->ring_offsets_idx);
1902 }
1903 break;
1904 default:
1905 assert(!"invalid bind point");
1906 break;
1907 }
1908 }
1909
1910 void radv_CmdSetViewport(
1911 VkCommandBuffer commandBuffer,
1912 uint32_t firstViewport,
1913 uint32_t viewportCount,
1914 const VkViewport* pViewports)
1915 {
1916 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
1917
1918 const uint32_t total_count = firstViewport + viewportCount;
1919 if (cmd_buffer->state.dynamic.viewport.count < total_count)
1920 cmd_buffer->state.dynamic.viewport.count = total_count;
1921
1922 memcpy(cmd_buffer->state.dynamic.viewport.viewports + firstViewport,
1923 pViewports, viewportCount * sizeof(*pViewports));
1924
1925 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_VIEWPORT;
1926 }
1927
1928 void radv_CmdSetScissor(
1929 VkCommandBuffer commandBuffer,
1930 uint32_t firstScissor,
1931 uint32_t scissorCount,
1932 const VkRect2D* pScissors)
1933 {
1934 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
1935
1936 const uint32_t total_count = firstScissor + scissorCount;
1937 if (cmd_buffer->state.dynamic.scissor.count < total_count)
1938 cmd_buffer->state.dynamic.scissor.count = total_count;
1939
1940 memcpy(cmd_buffer->state.dynamic.scissor.scissors + firstScissor,
1941 pScissors, scissorCount * sizeof(*pScissors));
1942 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_SCISSOR;
1943 }
1944
1945 void radv_CmdSetLineWidth(
1946 VkCommandBuffer commandBuffer,
1947 float lineWidth)
1948 {
1949 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
1950 cmd_buffer->state.dynamic.line_width = lineWidth;
1951 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_LINE_WIDTH;
1952 }
1953
1954 void radv_CmdSetDepthBias(
1955 VkCommandBuffer commandBuffer,
1956 float depthBiasConstantFactor,
1957 float depthBiasClamp,
1958 float depthBiasSlopeFactor)
1959 {
1960 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
1961
1962 cmd_buffer->state.dynamic.depth_bias.bias = depthBiasConstantFactor;
1963 cmd_buffer->state.dynamic.depth_bias.clamp = depthBiasClamp;
1964 cmd_buffer->state.dynamic.depth_bias.slope = depthBiasSlopeFactor;
1965
1966 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS;
1967 }
1968
1969 void radv_CmdSetBlendConstants(
1970 VkCommandBuffer commandBuffer,
1971 const float blendConstants[4])
1972 {
1973 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
1974
1975 memcpy(cmd_buffer->state.dynamic.blend_constants,
1976 blendConstants, sizeof(float) * 4);
1977
1978 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS;
1979 }
1980
1981 void radv_CmdSetDepthBounds(
1982 VkCommandBuffer commandBuffer,
1983 float minDepthBounds,
1984 float maxDepthBounds)
1985 {
1986 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
1987
1988 cmd_buffer->state.dynamic.depth_bounds.min = minDepthBounds;
1989 cmd_buffer->state.dynamic.depth_bounds.max = maxDepthBounds;
1990
1991 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS;
1992 }
1993
1994 void radv_CmdSetStencilCompareMask(
1995 VkCommandBuffer commandBuffer,
1996 VkStencilFaceFlags faceMask,
1997 uint32_t compareMask)
1998 {
1999 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2000
2001 if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
2002 cmd_buffer->state.dynamic.stencil_compare_mask.front = compareMask;
2003 if (faceMask & VK_STENCIL_FACE_BACK_BIT)
2004 cmd_buffer->state.dynamic.stencil_compare_mask.back = compareMask;
2005
2006 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK;
2007 }
2008
2009 void radv_CmdSetStencilWriteMask(
2010 VkCommandBuffer commandBuffer,
2011 VkStencilFaceFlags faceMask,
2012 uint32_t writeMask)
2013 {
2014 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2015
2016 if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
2017 cmd_buffer->state.dynamic.stencil_write_mask.front = writeMask;
2018 if (faceMask & VK_STENCIL_FACE_BACK_BIT)
2019 cmd_buffer->state.dynamic.stencil_write_mask.back = writeMask;
2020
2021 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK;
2022 }
2023
2024 void radv_CmdSetStencilReference(
2025 VkCommandBuffer commandBuffer,
2026 VkStencilFaceFlags faceMask,
2027 uint32_t reference)
2028 {
2029 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2030
2031 if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
2032 cmd_buffer->state.dynamic.stencil_reference.front = reference;
2033 if (faceMask & VK_STENCIL_FACE_BACK_BIT)
2034 cmd_buffer->state.dynamic.stencil_reference.back = reference;
2035
2036 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE;
2037 }
2038
2039
2040 void radv_CmdExecuteCommands(
2041 VkCommandBuffer commandBuffer,
2042 uint32_t commandBufferCount,
2043 const VkCommandBuffer* pCmdBuffers)
2044 {
2045 RADV_FROM_HANDLE(radv_cmd_buffer, primary, commandBuffer);
2046
2047 for (uint32_t i = 0; i < commandBufferCount; i++) {
2048 RADV_FROM_HANDLE(radv_cmd_buffer, secondary, pCmdBuffers[i]);
2049
2050 primary->scratch_size_needed = MAX2(primary->scratch_size_needed,
2051 secondary->scratch_size_needed);
2052 primary->compute_scratch_size_needed = MAX2(primary->compute_scratch_size_needed,
2053 secondary->compute_scratch_size_needed);
2054
2055 if (secondary->esgs_ring_size_needed > primary->esgs_ring_size_needed)
2056 primary->esgs_ring_size_needed = secondary->esgs_ring_size_needed;
2057 if (secondary->gsvs_ring_size_needed > primary->gsvs_ring_size_needed)
2058 primary->gsvs_ring_size_needed = secondary->gsvs_ring_size_needed;
2059
2060 if (secondary->ring_offsets_idx != -1) {
2061 if (primary->ring_offsets_idx == -1)
2062 primary->ring_offsets_idx = secondary->ring_offsets_idx;
2063 else
2064 assert(secondary->ring_offsets_idx == primary->ring_offsets_idx);
2065 }
2066 primary->device->ws->cs_execute_secondary(primary->cs, secondary->cs);
2067 }
2068
2069 /* if we execute secondary we need to re-emit out pipelines */
2070 if (commandBufferCount) {
2071 primary->state.emitted_pipeline = NULL;
2072 primary->state.dirty |= RADV_CMD_DIRTY_PIPELINE;
2073 primary->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_ALL;
2074 }
2075 }
2076
2077 VkResult radv_CreateCommandPool(
2078 VkDevice _device,
2079 const VkCommandPoolCreateInfo* pCreateInfo,
2080 const VkAllocationCallbacks* pAllocator,
2081 VkCommandPool* pCmdPool)
2082 {
2083 RADV_FROM_HANDLE(radv_device, device, _device);
2084 struct radv_cmd_pool *pool;
2085
2086 pool = vk_alloc2(&device->alloc, pAllocator, sizeof(*pool), 8,
2087 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2088 if (pool == NULL)
2089 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2090
2091 if (pAllocator)
2092 pool->alloc = *pAllocator;
2093 else
2094 pool->alloc = device->alloc;
2095
2096 list_inithead(&pool->cmd_buffers);
2097
2098 pool->queue_family_index = pCreateInfo->queueFamilyIndex;
2099
2100 *pCmdPool = radv_cmd_pool_to_handle(pool);
2101
2102 return VK_SUCCESS;
2103
2104 }
2105
2106 void radv_DestroyCommandPool(
2107 VkDevice _device,
2108 VkCommandPool commandPool,
2109 const VkAllocationCallbacks* pAllocator)
2110 {
2111 RADV_FROM_HANDLE(radv_device, device, _device);
2112 RADV_FROM_HANDLE(radv_cmd_pool, pool, commandPool);
2113
2114 if (!pool)
2115 return;
2116
2117 list_for_each_entry_safe(struct radv_cmd_buffer, cmd_buffer,
2118 &pool->cmd_buffers, pool_link) {
2119 radv_cmd_buffer_destroy(cmd_buffer);
2120 }
2121
2122 vk_free2(&device->alloc, pAllocator, pool);
2123 }
2124
2125 VkResult radv_ResetCommandPool(
2126 VkDevice device,
2127 VkCommandPool commandPool,
2128 VkCommandPoolResetFlags flags)
2129 {
2130 RADV_FROM_HANDLE(radv_cmd_pool, pool, commandPool);
2131
2132 list_for_each_entry(struct radv_cmd_buffer, cmd_buffer,
2133 &pool->cmd_buffers, pool_link) {
2134 radv_reset_cmd_buffer(cmd_buffer);
2135 }
2136
2137 return VK_SUCCESS;
2138 }
2139
2140 void radv_TrimCommandPoolKHR(
2141 VkDevice device,
2142 VkCommandPool commandPool,
2143 VkCommandPoolTrimFlagsKHR flags)
2144 {
2145 }
2146
2147 void radv_CmdBeginRenderPass(
2148 VkCommandBuffer commandBuffer,
2149 const VkRenderPassBeginInfo* pRenderPassBegin,
2150 VkSubpassContents contents)
2151 {
2152 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2153 RADV_FROM_HANDLE(radv_render_pass, pass, pRenderPassBegin->renderPass);
2154 RADV_FROM_HANDLE(radv_framebuffer, framebuffer, pRenderPassBegin->framebuffer);
2155
2156 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
2157 cmd_buffer->cs, 2048);
2158
2159 cmd_buffer->state.framebuffer = framebuffer;
2160 cmd_buffer->state.pass = pass;
2161 cmd_buffer->state.render_area = pRenderPassBegin->renderArea;
2162 radv_cmd_state_setup_attachments(cmd_buffer, pass, pRenderPassBegin);
2163
2164 si_emit_cache_flush(cmd_buffer);
2165
2166 radv_cmd_buffer_set_subpass(cmd_buffer, pass->subpasses, true);
2167 assert(cmd_buffer->cs->cdw <= cdw_max);
2168
2169 radv_cmd_buffer_clear_subpass(cmd_buffer);
2170 }
2171
2172 void radv_CmdNextSubpass(
2173 VkCommandBuffer commandBuffer,
2174 VkSubpassContents contents)
2175 {
2176 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2177
2178 si_emit_cache_flush(cmd_buffer);
2179 radv_cmd_buffer_resolve_subpass(cmd_buffer);
2180
2181 radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs,
2182 2048);
2183
2184 radv_cmd_buffer_set_subpass(cmd_buffer, cmd_buffer->state.subpass + 1, true);
2185 radv_cmd_buffer_clear_subpass(cmd_buffer);
2186 }
2187
2188 void radv_CmdDraw(
2189 VkCommandBuffer commandBuffer,
2190 uint32_t vertexCount,
2191 uint32_t instanceCount,
2192 uint32_t firstVertex,
2193 uint32_t firstInstance)
2194 {
2195 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2196
2197 radv_cmd_buffer_flush_state(cmd_buffer, (instanceCount > 1), vertexCount);
2198
2199 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 10);
2200
2201 struct ac_userdata_info *loc = radv_lookup_user_sgpr(cmd_buffer->state.pipeline, MESA_SHADER_VERTEX,
2202 AC_UD_VS_BASE_VERTEX_START_INSTANCE);
2203 if (loc->sgpr_idx != -1) {
2204 uint32_t base_reg = shader_stage_to_user_data_0(MESA_SHADER_VERTEX, radv_pipeline_has_gs(cmd_buffer->state.pipeline));
2205 radeon_set_sh_reg_seq(cmd_buffer->cs, base_reg + loc->sgpr_idx * 4, 3);
2206 radeon_emit(cmd_buffer->cs, firstVertex);
2207 radeon_emit(cmd_buffer->cs, firstInstance);
2208 radeon_emit(cmd_buffer->cs, 0);
2209 }
2210 radeon_emit(cmd_buffer->cs, PKT3(PKT3_NUM_INSTANCES, 0, 0));
2211 radeon_emit(cmd_buffer->cs, instanceCount);
2212
2213 radeon_emit(cmd_buffer->cs, PKT3(PKT3_DRAW_INDEX_AUTO, 1, 0));
2214 radeon_emit(cmd_buffer->cs, vertexCount);
2215 radeon_emit(cmd_buffer->cs, V_0287F0_DI_SRC_SEL_AUTO_INDEX |
2216 S_0287F0_USE_OPAQUE(0));
2217
2218 assert(cmd_buffer->cs->cdw <= cdw_max);
2219
2220 radv_cmd_buffer_trace_emit(cmd_buffer);
2221 }
2222
2223 static void radv_emit_primitive_reset_index(struct radv_cmd_buffer *cmd_buffer)
2224 {
2225 uint32_t primitive_reset_index = cmd_buffer->state.last_primitive_reset_index ? 0xffffffffu : 0xffffu;
2226
2227 if (cmd_buffer->state.pipeline->graphics.prim_restart_enable &&
2228 primitive_reset_index != cmd_buffer->state.last_primitive_reset_index) {
2229 cmd_buffer->state.last_primitive_reset_index = primitive_reset_index;
2230 radeon_set_context_reg(cmd_buffer->cs, R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX,
2231 primitive_reset_index);
2232 }
2233 }
2234
2235 void radv_CmdDrawIndexed(
2236 VkCommandBuffer commandBuffer,
2237 uint32_t indexCount,
2238 uint32_t instanceCount,
2239 uint32_t firstIndex,
2240 int32_t vertexOffset,
2241 uint32_t firstInstance)
2242 {
2243 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2244 int index_size = cmd_buffer->state.index_type ? 4 : 2;
2245 uint32_t index_max_size = (cmd_buffer->state.index_buffer->size - cmd_buffer->state.index_offset) / index_size;
2246 uint64_t index_va;
2247
2248 radv_cmd_buffer_flush_state(cmd_buffer, (instanceCount > 1), indexCount);
2249 radv_emit_primitive_reset_index(cmd_buffer);
2250
2251 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 15);
2252
2253 radeon_emit(cmd_buffer->cs, PKT3(PKT3_INDEX_TYPE, 0, 0));
2254 radeon_emit(cmd_buffer->cs, cmd_buffer->state.index_type);
2255
2256 struct ac_userdata_info *loc = radv_lookup_user_sgpr(cmd_buffer->state.pipeline, MESA_SHADER_VERTEX,
2257 AC_UD_VS_BASE_VERTEX_START_INSTANCE);
2258 if (loc->sgpr_idx != -1) {
2259 uint32_t base_reg = shader_stage_to_user_data_0(MESA_SHADER_VERTEX, radv_pipeline_has_gs(cmd_buffer->state.pipeline));
2260 radeon_set_sh_reg_seq(cmd_buffer->cs, base_reg + loc->sgpr_idx * 4, 3);
2261 radeon_emit(cmd_buffer->cs, vertexOffset);
2262 radeon_emit(cmd_buffer->cs, firstInstance);
2263 radeon_emit(cmd_buffer->cs, 0);
2264 }
2265 radeon_emit(cmd_buffer->cs, PKT3(PKT3_NUM_INSTANCES, 0, 0));
2266 radeon_emit(cmd_buffer->cs, instanceCount);
2267
2268 index_va = cmd_buffer->device->ws->buffer_get_va(cmd_buffer->state.index_buffer->bo);
2269 index_va += firstIndex * index_size + cmd_buffer->state.index_buffer->offset + cmd_buffer->state.index_offset;
2270 radeon_emit(cmd_buffer->cs, PKT3(PKT3_DRAW_INDEX_2, 4, false));
2271 radeon_emit(cmd_buffer->cs, index_max_size);
2272 radeon_emit(cmd_buffer->cs, index_va);
2273 radeon_emit(cmd_buffer->cs, (index_va >> 32UL) & 0xFF);
2274 radeon_emit(cmd_buffer->cs, indexCount);
2275 radeon_emit(cmd_buffer->cs, V_0287F0_DI_SRC_SEL_DMA);
2276
2277 assert(cmd_buffer->cs->cdw <= cdw_max);
2278 radv_cmd_buffer_trace_emit(cmd_buffer);
2279 }
2280
2281 static void
2282 radv_emit_indirect_draw(struct radv_cmd_buffer *cmd_buffer,
2283 VkBuffer _buffer,
2284 VkDeviceSize offset,
2285 VkBuffer _count_buffer,
2286 VkDeviceSize count_offset,
2287 uint32_t draw_count,
2288 uint32_t stride,
2289 bool indexed)
2290 {
2291 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
2292 RADV_FROM_HANDLE(radv_buffer, count_buffer, _count_buffer);
2293 struct radeon_winsys_cs *cs = cmd_buffer->cs;
2294 unsigned di_src_sel = indexed ? V_0287F0_DI_SRC_SEL_DMA
2295 : V_0287F0_DI_SRC_SEL_AUTO_INDEX;
2296 uint64_t indirect_va = cmd_buffer->device->ws->buffer_get_va(buffer->bo);
2297 indirect_va += offset + buffer->offset;
2298 uint64_t count_va = 0;
2299
2300 if (count_buffer) {
2301 count_va = cmd_buffer->device->ws->buffer_get_va(count_buffer->bo);
2302 count_va += count_offset + count_buffer->offset;
2303 }
2304
2305 if (!draw_count)
2306 return;
2307
2308 cmd_buffer->device->ws->cs_add_buffer(cs, buffer->bo, 8);
2309
2310 struct ac_userdata_info *loc = radv_lookup_user_sgpr(cmd_buffer->state.pipeline, MESA_SHADER_VERTEX,
2311 AC_UD_VS_BASE_VERTEX_START_INSTANCE);
2312 uint32_t base_reg = shader_stage_to_user_data_0(MESA_SHADER_VERTEX, radv_pipeline_has_gs(cmd_buffer->state.pipeline));
2313 assert(loc->sgpr_idx != -1);
2314 radeon_emit(cs, PKT3(PKT3_SET_BASE, 2, 0));
2315 radeon_emit(cs, 1);
2316 radeon_emit(cs, indirect_va);
2317 radeon_emit(cs, indirect_va >> 32);
2318
2319 radeon_emit(cs, PKT3(indexed ? PKT3_DRAW_INDEX_INDIRECT_MULTI :
2320 PKT3_DRAW_INDIRECT_MULTI,
2321 8, false));
2322 radeon_emit(cs, 0);
2323 radeon_emit(cs, ((base_reg + loc->sgpr_idx * 4) - SI_SH_REG_OFFSET) >> 2);
2324 radeon_emit(cs, ((base_reg + (loc->sgpr_idx + 1) * 4) - SI_SH_REG_OFFSET) >> 2);
2325 radeon_emit(cs, (((base_reg + (loc->sgpr_idx + 2) * 4) - SI_SH_REG_OFFSET) >> 2) |
2326 S_2C3_DRAW_INDEX_ENABLE(1) |
2327 S_2C3_COUNT_INDIRECT_ENABLE(!!count_va));
2328 radeon_emit(cs, draw_count); /* count */
2329 radeon_emit(cs, count_va); /* count_addr */
2330 radeon_emit(cs, count_va >> 32);
2331 radeon_emit(cs, stride); /* stride */
2332 radeon_emit(cs, di_src_sel);
2333 radv_cmd_buffer_trace_emit(cmd_buffer);
2334 }
2335
2336 static void
2337 radv_cmd_draw_indirect_count(VkCommandBuffer commandBuffer,
2338 VkBuffer buffer,
2339 VkDeviceSize offset,
2340 VkBuffer countBuffer,
2341 VkDeviceSize countBufferOffset,
2342 uint32_t maxDrawCount,
2343 uint32_t stride)
2344 {
2345 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2346 radv_cmd_buffer_flush_state(cmd_buffer, true, 0);
2347
2348 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
2349 cmd_buffer->cs, 14);
2350
2351 radv_emit_indirect_draw(cmd_buffer, buffer, offset,
2352 countBuffer, countBufferOffset, maxDrawCount, stride, false);
2353
2354 assert(cmd_buffer->cs->cdw <= cdw_max);
2355 }
2356
2357 static void
2358 radv_cmd_draw_indexed_indirect_count(
2359 VkCommandBuffer commandBuffer,
2360 VkBuffer buffer,
2361 VkDeviceSize offset,
2362 VkBuffer countBuffer,
2363 VkDeviceSize countBufferOffset,
2364 uint32_t maxDrawCount,
2365 uint32_t stride)
2366 {
2367 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2368 int index_size = cmd_buffer->state.index_type ? 4 : 2;
2369 uint32_t index_max_size = (cmd_buffer->state.index_buffer->size - cmd_buffer->state.index_offset) / index_size;
2370 uint64_t index_va;
2371 radv_cmd_buffer_flush_state(cmd_buffer, true, 0);
2372 radv_emit_primitive_reset_index(cmd_buffer);
2373
2374 index_va = cmd_buffer->device->ws->buffer_get_va(cmd_buffer->state.index_buffer->bo);
2375 index_va += cmd_buffer->state.index_buffer->offset + cmd_buffer->state.index_offset;
2376
2377 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 21);
2378
2379 radeon_emit(cmd_buffer->cs, PKT3(PKT3_INDEX_TYPE, 0, 0));
2380 radeon_emit(cmd_buffer->cs, cmd_buffer->state.index_type);
2381
2382 radeon_emit(cmd_buffer->cs, PKT3(PKT3_INDEX_BASE, 1, 0));
2383 radeon_emit(cmd_buffer->cs, index_va);
2384 radeon_emit(cmd_buffer->cs, index_va >> 32);
2385
2386 radeon_emit(cmd_buffer->cs, PKT3(PKT3_INDEX_BUFFER_SIZE, 0, 0));
2387 radeon_emit(cmd_buffer->cs, index_max_size);
2388
2389 radv_emit_indirect_draw(cmd_buffer, buffer, offset,
2390 countBuffer, countBufferOffset, maxDrawCount, stride, true);
2391
2392 assert(cmd_buffer->cs->cdw <= cdw_max);
2393 }
2394
2395 void radv_CmdDrawIndirect(
2396 VkCommandBuffer commandBuffer,
2397 VkBuffer buffer,
2398 VkDeviceSize offset,
2399 uint32_t drawCount,
2400 uint32_t stride)
2401 {
2402 radv_cmd_draw_indirect_count(commandBuffer, buffer, offset,
2403 VK_NULL_HANDLE, 0, drawCount, stride);
2404 }
2405
2406 void radv_CmdDrawIndexedIndirect(
2407 VkCommandBuffer commandBuffer,
2408 VkBuffer buffer,
2409 VkDeviceSize offset,
2410 uint32_t drawCount,
2411 uint32_t stride)
2412 {
2413 radv_cmd_draw_indexed_indirect_count(commandBuffer, buffer, offset,
2414 VK_NULL_HANDLE, 0, drawCount, stride);
2415 }
2416
2417 void radv_CmdDrawIndirectCountAMD(
2418 VkCommandBuffer commandBuffer,
2419 VkBuffer buffer,
2420 VkDeviceSize offset,
2421 VkBuffer countBuffer,
2422 VkDeviceSize countBufferOffset,
2423 uint32_t maxDrawCount,
2424 uint32_t stride)
2425 {
2426 radv_cmd_draw_indirect_count(commandBuffer, buffer, offset,
2427 countBuffer, countBufferOffset,
2428 maxDrawCount, stride);
2429 }
2430
2431 void radv_CmdDrawIndexedIndirectCountAMD(
2432 VkCommandBuffer commandBuffer,
2433 VkBuffer buffer,
2434 VkDeviceSize offset,
2435 VkBuffer countBuffer,
2436 VkDeviceSize countBufferOffset,
2437 uint32_t maxDrawCount,
2438 uint32_t stride)
2439 {
2440 radv_cmd_draw_indexed_indirect_count(commandBuffer, buffer, offset,
2441 countBuffer, countBufferOffset,
2442 maxDrawCount, stride);
2443 }
2444
2445 static void
2446 radv_flush_compute_state(struct radv_cmd_buffer *cmd_buffer)
2447 {
2448 radv_emit_compute_pipeline(cmd_buffer);
2449 radv_flush_descriptors(cmd_buffer, cmd_buffer->state.compute_pipeline,
2450 VK_SHADER_STAGE_COMPUTE_BIT);
2451 radv_flush_constants(cmd_buffer, cmd_buffer->state.compute_pipeline,
2452 VK_SHADER_STAGE_COMPUTE_BIT);
2453 si_emit_cache_flush(cmd_buffer);
2454 }
2455
2456 void radv_CmdDispatch(
2457 VkCommandBuffer commandBuffer,
2458 uint32_t x,
2459 uint32_t y,
2460 uint32_t z)
2461 {
2462 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2463
2464 radv_flush_compute_state(cmd_buffer);
2465
2466 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 10);
2467
2468 struct ac_userdata_info *loc = radv_lookup_user_sgpr(cmd_buffer->state.compute_pipeline,
2469 MESA_SHADER_COMPUTE, AC_UD_CS_GRID_SIZE);
2470 if (loc->sgpr_idx != -1) {
2471 assert(!loc->indirect);
2472 assert(loc->num_sgprs == 3);
2473 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B900_COMPUTE_USER_DATA_0 + loc->sgpr_idx * 4, 3);
2474 radeon_emit(cmd_buffer->cs, x);
2475 radeon_emit(cmd_buffer->cs, y);
2476 radeon_emit(cmd_buffer->cs, z);
2477 }
2478
2479 radeon_emit(cmd_buffer->cs, PKT3(PKT3_DISPATCH_DIRECT, 3, 0) |
2480 PKT3_SHADER_TYPE_S(1));
2481 radeon_emit(cmd_buffer->cs, x);
2482 radeon_emit(cmd_buffer->cs, y);
2483 radeon_emit(cmd_buffer->cs, z);
2484 radeon_emit(cmd_buffer->cs, 1);
2485
2486 assert(cmd_buffer->cs->cdw <= cdw_max);
2487 radv_cmd_buffer_trace_emit(cmd_buffer);
2488 }
2489
2490 void radv_CmdDispatchIndirect(
2491 VkCommandBuffer commandBuffer,
2492 VkBuffer _buffer,
2493 VkDeviceSize offset)
2494 {
2495 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2496 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
2497 uint64_t va = cmd_buffer->device->ws->buffer_get_va(buffer->bo);
2498 va += buffer->offset + offset;
2499
2500 cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, buffer->bo, 8);
2501
2502 radv_flush_compute_state(cmd_buffer);
2503
2504 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 25);
2505 struct ac_userdata_info *loc = radv_lookup_user_sgpr(cmd_buffer->state.compute_pipeline,
2506 MESA_SHADER_COMPUTE, AC_UD_CS_GRID_SIZE);
2507 if (loc->sgpr_idx != -1) {
2508 for (unsigned i = 0; i < 3; ++i) {
2509 radeon_emit(cmd_buffer->cs, PKT3(PKT3_COPY_DATA, 4, 0));
2510 radeon_emit(cmd_buffer->cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
2511 COPY_DATA_DST_SEL(COPY_DATA_REG));
2512 radeon_emit(cmd_buffer->cs, (va + 4 * i));
2513 radeon_emit(cmd_buffer->cs, (va + 4 * i) >> 32);
2514 radeon_emit(cmd_buffer->cs, ((R_00B900_COMPUTE_USER_DATA_0 + loc->sgpr_idx * 4) >> 2) + i);
2515 radeon_emit(cmd_buffer->cs, 0);
2516 }
2517 }
2518
2519 if (radv_cmd_buffer_uses_mec(cmd_buffer)) {
2520 radeon_emit(cmd_buffer->cs, PKT3(PKT3_DISPATCH_INDIRECT, 2, 0) |
2521 PKT3_SHADER_TYPE_S(1));
2522 radeon_emit(cmd_buffer->cs, va);
2523 radeon_emit(cmd_buffer->cs, va >> 32);
2524 radeon_emit(cmd_buffer->cs, 1);
2525 } else {
2526 radeon_emit(cmd_buffer->cs, PKT3(PKT3_SET_BASE, 2, 0) |
2527 PKT3_SHADER_TYPE_S(1));
2528 radeon_emit(cmd_buffer->cs, 1);
2529 radeon_emit(cmd_buffer->cs, va);
2530 radeon_emit(cmd_buffer->cs, va >> 32);
2531
2532 radeon_emit(cmd_buffer->cs, PKT3(PKT3_DISPATCH_INDIRECT, 1, 0) |
2533 PKT3_SHADER_TYPE_S(1));
2534 radeon_emit(cmd_buffer->cs, 0);
2535 radeon_emit(cmd_buffer->cs, 1);
2536 }
2537
2538 assert(cmd_buffer->cs->cdw <= cdw_max);
2539 radv_cmd_buffer_trace_emit(cmd_buffer);
2540 }
2541
2542 void radv_unaligned_dispatch(
2543 struct radv_cmd_buffer *cmd_buffer,
2544 uint32_t x,
2545 uint32_t y,
2546 uint32_t z)
2547 {
2548 struct radv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
2549 struct radv_shader_variant *compute_shader = pipeline->shaders[MESA_SHADER_COMPUTE];
2550 uint32_t blocks[3], remainder[3];
2551
2552 blocks[0] = round_up_u32(x, compute_shader->info.cs.block_size[0]);
2553 blocks[1] = round_up_u32(y, compute_shader->info.cs.block_size[1]);
2554 blocks[2] = round_up_u32(z, compute_shader->info.cs.block_size[2]);
2555
2556 /* If aligned, these should be an entire block size, not 0 */
2557 remainder[0] = x + compute_shader->info.cs.block_size[0] - align_u32_npot(x, compute_shader->info.cs.block_size[0]);
2558 remainder[1] = y + compute_shader->info.cs.block_size[1] - align_u32_npot(y, compute_shader->info.cs.block_size[1]);
2559 remainder[2] = z + compute_shader->info.cs.block_size[2] - align_u32_npot(z, compute_shader->info.cs.block_size[2]);
2560
2561 radv_flush_compute_state(cmd_buffer);
2562
2563 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 15);
2564
2565 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B81C_COMPUTE_NUM_THREAD_X, 3);
2566 radeon_emit(cmd_buffer->cs,
2567 S_00B81C_NUM_THREAD_FULL(compute_shader->info.cs.block_size[0]) |
2568 S_00B81C_NUM_THREAD_PARTIAL(remainder[0]));
2569 radeon_emit(cmd_buffer->cs,
2570 S_00B81C_NUM_THREAD_FULL(compute_shader->info.cs.block_size[1]) |
2571 S_00B81C_NUM_THREAD_PARTIAL(remainder[1]));
2572 radeon_emit(cmd_buffer->cs,
2573 S_00B81C_NUM_THREAD_FULL(compute_shader->info.cs.block_size[2]) |
2574 S_00B81C_NUM_THREAD_PARTIAL(remainder[2]));
2575
2576 struct ac_userdata_info *loc = radv_lookup_user_sgpr(cmd_buffer->state.compute_pipeline,
2577 MESA_SHADER_COMPUTE, AC_UD_CS_GRID_SIZE);
2578 if (loc->sgpr_idx != -1) {
2579 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B900_COMPUTE_USER_DATA_0 + loc->sgpr_idx * 4, 3);
2580 radeon_emit(cmd_buffer->cs, blocks[0]);
2581 radeon_emit(cmd_buffer->cs, blocks[1]);
2582 radeon_emit(cmd_buffer->cs, blocks[2]);
2583 }
2584 radeon_emit(cmd_buffer->cs, PKT3(PKT3_DISPATCH_DIRECT, 3, 0) |
2585 PKT3_SHADER_TYPE_S(1));
2586 radeon_emit(cmd_buffer->cs, blocks[0]);
2587 radeon_emit(cmd_buffer->cs, blocks[1]);
2588 radeon_emit(cmd_buffer->cs, blocks[2]);
2589 radeon_emit(cmd_buffer->cs, S_00B800_COMPUTE_SHADER_EN(1) |
2590 S_00B800_PARTIAL_TG_EN(1));
2591
2592 assert(cmd_buffer->cs->cdw <= cdw_max);
2593 radv_cmd_buffer_trace_emit(cmd_buffer);
2594 }
2595
2596 void radv_CmdEndRenderPass(
2597 VkCommandBuffer commandBuffer)
2598 {
2599 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2600
2601 radv_subpass_barrier(cmd_buffer, &cmd_buffer->state.pass->end_barrier);
2602
2603 si_emit_cache_flush(cmd_buffer);
2604 radv_cmd_buffer_resolve_subpass(cmd_buffer);
2605
2606 for (unsigned i = 0; i < cmd_buffer->state.framebuffer->attachment_count; ++i) {
2607 VkImageLayout layout = cmd_buffer->state.pass->attachments[i].final_layout;
2608 radv_handle_subpass_image_transition(cmd_buffer,
2609 (VkAttachmentReference){i, layout});
2610 }
2611
2612 vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.attachments);
2613
2614 cmd_buffer->state.pass = NULL;
2615 cmd_buffer->state.subpass = NULL;
2616 cmd_buffer->state.attachments = NULL;
2617 cmd_buffer->state.framebuffer = NULL;
2618 }
2619
2620
2621 static void radv_initialize_htile(struct radv_cmd_buffer *cmd_buffer,
2622 struct radv_image *image)
2623 {
2624
2625 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB |
2626 RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
2627
2628 radv_fill_buffer(cmd_buffer, image->bo, image->offset + image->htile.offset,
2629 image->htile.size, 0xffffffff);
2630
2631 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB_META |
2632 RADV_CMD_FLAG_CS_PARTIAL_FLUSH |
2633 RADV_CMD_FLAG_INV_VMEM_L1 |
2634 RADV_CMD_FLAG_INV_GLOBAL_L2;
2635 }
2636
2637 static void radv_handle_depth_image_transition(struct radv_cmd_buffer *cmd_buffer,
2638 struct radv_image *image,
2639 VkImageLayout src_layout,
2640 VkImageLayout dst_layout,
2641 const VkImageSubresourceRange *range,
2642 VkImageAspectFlags pending_clears)
2643 {
2644 if (dst_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL &&
2645 (pending_clears & vk_format_aspects(image->vk_format)) == vk_format_aspects(image->vk_format) &&
2646 cmd_buffer->state.render_area.offset.x == 0 && cmd_buffer->state.render_area.offset.y == 0 &&
2647 cmd_buffer->state.render_area.extent.width == image->extent.width &&
2648 cmd_buffer->state.render_area.extent.height == image->extent.height) {
2649 /* The clear will initialize htile. */
2650 return;
2651 } else if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED &&
2652 radv_layout_has_htile(image, dst_layout)) {
2653 /* TODO: merge with the clear if applicable */
2654 radv_initialize_htile(cmd_buffer, image);
2655 } else if (!radv_layout_has_htile(image, src_layout) &&
2656 radv_layout_has_htile(image, dst_layout)) {
2657 radv_initialize_htile(cmd_buffer, image);
2658 } else if ((radv_layout_has_htile(image, src_layout) &&
2659 !radv_layout_has_htile(image, dst_layout)) ||
2660 (radv_layout_is_htile_compressed(image, src_layout) &&
2661 !radv_layout_is_htile_compressed(image, dst_layout))) {
2662 VkImageSubresourceRange local_range = *range;
2663 local_range.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
2664 local_range.baseMipLevel = 0;
2665 local_range.levelCount = 1;
2666
2667 radv_decompress_depth_image_inplace(cmd_buffer, image, &local_range);
2668 }
2669 }
2670
2671 void radv_initialise_cmask(struct radv_cmd_buffer *cmd_buffer,
2672 struct radv_image *image, uint32_t value)
2673 {
2674 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
2675 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
2676
2677 radv_fill_buffer(cmd_buffer, image->bo, image->offset + image->cmask.offset,
2678 image->cmask.size, value);
2679
2680 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META |
2681 RADV_CMD_FLAG_CS_PARTIAL_FLUSH |
2682 RADV_CMD_FLAG_INV_VMEM_L1 |
2683 RADV_CMD_FLAG_INV_GLOBAL_L2;
2684 }
2685
2686 static void radv_handle_cmask_image_transition(struct radv_cmd_buffer *cmd_buffer,
2687 struct radv_image *image,
2688 VkImageLayout src_layout,
2689 VkImageLayout dst_layout,
2690 unsigned src_queue_mask,
2691 unsigned dst_queue_mask,
2692 const VkImageSubresourceRange *range,
2693 VkImageAspectFlags pending_clears)
2694 {
2695 if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED) {
2696 if (image->fmask.size)
2697 radv_initialise_cmask(cmd_buffer, image, 0xccccccccu);
2698 else
2699 radv_initialise_cmask(cmd_buffer, image, 0xffffffffu);
2700 } else if (radv_layout_can_fast_clear(image, src_layout, src_queue_mask) &&
2701 !radv_layout_can_fast_clear(image, dst_layout, dst_queue_mask)) {
2702 radv_fast_clear_flush_image_inplace(cmd_buffer, image, range);
2703 }
2704 }
2705
2706 void radv_initialize_dcc(struct radv_cmd_buffer *cmd_buffer,
2707 struct radv_image *image, uint32_t value)
2708 {
2709
2710 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
2711 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
2712
2713 radv_fill_buffer(cmd_buffer, image->bo, image->offset + image->dcc_offset,
2714 image->surface.dcc_size, value);
2715
2716 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
2717 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META |
2718 RADV_CMD_FLAG_CS_PARTIAL_FLUSH |
2719 RADV_CMD_FLAG_INV_VMEM_L1 |
2720 RADV_CMD_FLAG_INV_GLOBAL_L2;
2721 }
2722
2723 static void radv_handle_dcc_image_transition(struct radv_cmd_buffer *cmd_buffer,
2724 struct radv_image *image,
2725 VkImageLayout src_layout,
2726 VkImageLayout dst_layout,
2727 unsigned src_queue_mask,
2728 unsigned dst_queue_mask,
2729 const VkImageSubresourceRange *range,
2730 VkImageAspectFlags pending_clears)
2731 {
2732 if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED) {
2733 radv_initialize_dcc(cmd_buffer, image, 0x20202020u);
2734 } else if (radv_layout_can_fast_clear(image, src_layout, src_queue_mask) &&
2735 !radv_layout_can_fast_clear(image, dst_layout, dst_queue_mask)) {
2736 radv_fast_clear_flush_image_inplace(cmd_buffer, image, range);
2737 }
2738 }
2739
2740 static void radv_handle_image_transition(struct radv_cmd_buffer *cmd_buffer,
2741 struct radv_image *image,
2742 VkImageLayout src_layout,
2743 VkImageLayout dst_layout,
2744 uint32_t src_family,
2745 uint32_t dst_family,
2746 const VkImageSubresourceRange *range,
2747 VkImageAspectFlags pending_clears)
2748 {
2749 if (image->exclusive && src_family != dst_family) {
2750 /* This is an acquire or a release operation and there will be
2751 * a corresponding release/acquire. Do the transition in the
2752 * most flexible queue. */
2753
2754 assert(src_family == cmd_buffer->queue_family_index ||
2755 dst_family == cmd_buffer->queue_family_index);
2756
2757 if (cmd_buffer->queue_family_index == RADV_QUEUE_TRANSFER)
2758 return;
2759
2760 if (cmd_buffer->queue_family_index == RADV_QUEUE_COMPUTE &&
2761 (src_family == RADV_QUEUE_GENERAL ||
2762 dst_family == RADV_QUEUE_GENERAL))
2763 return;
2764 }
2765
2766 unsigned src_queue_mask = radv_image_queue_family_mask(image, src_family, cmd_buffer->queue_family_index);
2767 unsigned dst_queue_mask = radv_image_queue_family_mask(image, dst_family, cmd_buffer->queue_family_index);
2768
2769 if (image->htile.size)
2770 radv_handle_depth_image_transition(cmd_buffer, image, src_layout,
2771 dst_layout, range, pending_clears);
2772
2773 if (image->cmask.size)
2774 radv_handle_cmask_image_transition(cmd_buffer, image, src_layout,
2775 dst_layout, src_queue_mask,
2776 dst_queue_mask, range,
2777 pending_clears);
2778
2779 if (image->surface.dcc_size)
2780 radv_handle_dcc_image_transition(cmd_buffer, image, src_layout,
2781 dst_layout, src_queue_mask,
2782 dst_queue_mask, range,
2783 pending_clears);
2784 }
2785
2786 void radv_CmdPipelineBarrier(
2787 VkCommandBuffer commandBuffer,
2788 VkPipelineStageFlags srcStageMask,
2789 VkPipelineStageFlags destStageMask,
2790 VkBool32 byRegion,
2791 uint32_t memoryBarrierCount,
2792 const VkMemoryBarrier* pMemoryBarriers,
2793 uint32_t bufferMemoryBarrierCount,
2794 const VkBufferMemoryBarrier* pBufferMemoryBarriers,
2795 uint32_t imageMemoryBarrierCount,
2796 const VkImageMemoryBarrier* pImageMemoryBarriers)
2797 {
2798 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2799 VkAccessFlags src_flags = 0;
2800 VkAccessFlags dst_flags = 0;
2801 uint32_t b;
2802 for (uint32_t i = 0; i < memoryBarrierCount; i++) {
2803 src_flags |= pMemoryBarriers[i].srcAccessMask;
2804 dst_flags |= pMemoryBarriers[i].dstAccessMask;
2805 }
2806
2807 for (uint32_t i = 0; i < bufferMemoryBarrierCount; i++) {
2808 src_flags |= pBufferMemoryBarriers[i].srcAccessMask;
2809 dst_flags |= pBufferMemoryBarriers[i].dstAccessMask;
2810 }
2811
2812 for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
2813 src_flags |= pImageMemoryBarriers[i].srcAccessMask;
2814 dst_flags |= pImageMemoryBarriers[i].dstAccessMask;
2815 }
2816
2817 enum radv_cmd_flush_bits flush_bits = 0;
2818 for_each_bit(b, src_flags) {
2819 switch ((VkAccessFlagBits)(1 << b)) {
2820 case VK_ACCESS_SHADER_WRITE_BIT:
2821 flush_bits |= RADV_CMD_FLAG_INV_GLOBAL_L2;
2822 break;
2823 case VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT:
2824 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB;
2825 break;
2826 case VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT:
2827 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB;
2828 break;
2829 case VK_ACCESS_TRANSFER_WRITE_BIT:
2830 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB;
2831 break;
2832 default:
2833 break;
2834 }
2835 }
2836 cmd_buffer->state.flush_bits |= flush_bits;
2837
2838 for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
2839 RADV_FROM_HANDLE(radv_image, image, pImageMemoryBarriers[i].image);
2840 radv_handle_image_transition(cmd_buffer, image,
2841 pImageMemoryBarriers[i].oldLayout,
2842 pImageMemoryBarriers[i].newLayout,
2843 pImageMemoryBarriers[i].srcQueueFamilyIndex,
2844 pImageMemoryBarriers[i].dstQueueFamilyIndex,
2845 &pImageMemoryBarriers[i].subresourceRange,
2846 0);
2847 }
2848
2849 flush_bits = 0;
2850
2851 for_each_bit(b, dst_flags) {
2852 switch ((VkAccessFlagBits)(1 << b)) {
2853 case VK_ACCESS_INDIRECT_COMMAND_READ_BIT:
2854 case VK_ACCESS_INDEX_READ_BIT:
2855 case VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT:
2856 flush_bits |= RADV_CMD_FLAG_INV_VMEM_L1;
2857 break;
2858 case VK_ACCESS_UNIFORM_READ_BIT:
2859 flush_bits |= RADV_CMD_FLAG_INV_VMEM_L1 | RADV_CMD_FLAG_INV_SMEM_L1;
2860 break;
2861 case VK_ACCESS_SHADER_READ_BIT:
2862 flush_bits |= RADV_CMD_FLAG_INV_GLOBAL_L2;
2863 break;
2864 case VK_ACCESS_COLOR_ATTACHMENT_READ_BIT:
2865 case VK_ACCESS_TRANSFER_READ_BIT:
2866 case VK_ACCESS_TRANSFER_WRITE_BIT:
2867 case VK_ACCESS_INPUT_ATTACHMENT_READ_BIT:
2868 flush_bits |= RADV_CMD_FLUSH_AND_INV_FRAMEBUFFER | RADV_CMD_FLAG_INV_GLOBAL_L2;
2869 default:
2870 break;
2871 }
2872 }
2873
2874 flush_bits |= RADV_CMD_FLAG_CS_PARTIAL_FLUSH |
2875 RADV_CMD_FLAG_PS_PARTIAL_FLUSH;
2876
2877 cmd_buffer->state.flush_bits |= flush_bits;
2878 }
2879
2880
2881 static void write_event(struct radv_cmd_buffer *cmd_buffer,
2882 struct radv_event *event,
2883 VkPipelineStageFlags stageMask,
2884 unsigned value)
2885 {
2886 struct radeon_winsys_cs *cs = cmd_buffer->cs;
2887 uint64_t va = cmd_buffer->device->ws->buffer_get_va(event->bo);
2888
2889 cmd_buffer->device->ws->cs_add_buffer(cs, event->bo, 8);
2890
2891 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 12);
2892
2893 /* TODO: this is overkill. Probably should figure something out from
2894 * the stage mask. */
2895
2896 if (cmd_buffer->device->physical_device->rad_info.chip_class == CIK) {
2897 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0));
2898 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_BOTTOM_OF_PIPE_TS) |
2899 EVENT_INDEX(5));
2900 radeon_emit(cs, va);
2901 radeon_emit(cs, (va >> 32) | EOP_DATA_SEL(1));
2902 radeon_emit(cs, 2);
2903 radeon_emit(cs, 0);
2904 }
2905
2906 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0));
2907 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_BOTTOM_OF_PIPE_TS) |
2908 EVENT_INDEX(5));
2909 radeon_emit(cs, va);
2910 radeon_emit(cs, (va >> 32) | EOP_DATA_SEL(1));
2911 radeon_emit(cs, value);
2912 radeon_emit(cs, 0);
2913
2914 assert(cmd_buffer->cs->cdw <= cdw_max);
2915 }
2916
2917 void radv_CmdSetEvent(VkCommandBuffer commandBuffer,
2918 VkEvent _event,
2919 VkPipelineStageFlags stageMask)
2920 {
2921 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2922 RADV_FROM_HANDLE(radv_event, event, _event);
2923
2924 write_event(cmd_buffer, event, stageMask, 1);
2925 }
2926
2927 void radv_CmdResetEvent(VkCommandBuffer commandBuffer,
2928 VkEvent _event,
2929 VkPipelineStageFlags stageMask)
2930 {
2931 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2932 RADV_FROM_HANDLE(radv_event, event, _event);
2933
2934 write_event(cmd_buffer, event, stageMask, 0);
2935 }
2936
2937 void radv_CmdWaitEvents(VkCommandBuffer commandBuffer,
2938 uint32_t eventCount,
2939 const VkEvent* pEvents,
2940 VkPipelineStageFlags srcStageMask,
2941 VkPipelineStageFlags dstStageMask,
2942 uint32_t memoryBarrierCount,
2943 const VkMemoryBarrier* pMemoryBarriers,
2944 uint32_t bufferMemoryBarrierCount,
2945 const VkBufferMemoryBarrier* pBufferMemoryBarriers,
2946 uint32_t imageMemoryBarrierCount,
2947 const VkImageMemoryBarrier* pImageMemoryBarriers)
2948 {
2949 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2950 struct radeon_winsys_cs *cs = cmd_buffer->cs;
2951
2952 for (unsigned i = 0; i < eventCount; ++i) {
2953 RADV_FROM_HANDLE(radv_event, event, pEvents[i]);
2954 uint64_t va = cmd_buffer->device->ws->buffer_get_va(event->bo);
2955
2956 cmd_buffer->device->ws->cs_add_buffer(cs, event->bo, 8);
2957
2958 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 7);
2959
2960 radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, 0));
2961 radeon_emit(cs, WAIT_REG_MEM_EQUAL | WAIT_REG_MEM_MEM_SPACE(1));
2962 radeon_emit(cs, va);
2963 radeon_emit(cs, va >> 32);
2964 radeon_emit(cs, 1); /* reference value */
2965 radeon_emit(cs, 0xffffffff); /* mask */
2966 radeon_emit(cs, 4); /* poll interval */
2967
2968 assert(cmd_buffer->cs->cdw <= cdw_max);
2969 }
2970
2971
2972 for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
2973 RADV_FROM_HANDLE(radv_image, image, pImageMemoryBarriers[i].image);
2974
2975 radv_handle_image_transition(cmd_buffer, image,
2976 pImageMemoryBarriers[i].oldLayout,
2977 pImageMemoryBarriers[i].newLayout,
2978 pImageMemoryBarriers[i].srcQueueFamilyIndex,
2979 pImageMemoryBarriers[i].dstQueueFamilyIndex,
2980 &pImageMemoryBarriers[i].subresourceRange,
2981 0);
2982 }
2983
2984 /* TODO: figure out how to do memory barriers without waiting */
2985 cmd_buffer->state.flush_bits |= RADV_CMD_FLUSH_AND_INV_FRAMEBUFFER |
2986 RADV_CMD_FLAG_INV_GLOBAL_L2 |
2987 RADV_CMD_FLAG_INV_VMEM_L1 |
2988 RADV_CMD_FLAG_INV_SMEM_L1;
2989 }