turnip: use DIRTY SDS bit to avoid making copies of pipeline load state ib
[mesa.git] / src / freedreno / vulkan / tu_cmd_buffer.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 */
27
28 #include "tu_private.h"
29
30 #include "registers/adreno_pm4.xml.h"
31 #include "registers/adreno_common.xml.h"
32
33 #include "vk_format.h"
34
35 #include "tu_cs.h"
36
37 void
38 tu_bo_list_init(struct tu_bo_list *list)
39 {
40 list->count = list->capacity = 0;
41 list->bo_infos = NULL;
42 }
43
44 void
45 tu_bo_list_destroy(struct tu_bo_list *list)
46 {
47 free(list->bo_infos);
48 }
49
50 void
51 tu_bo_list_reset(struct tu_bo_list *list)
52 {
53 list->count = 0;
54 }
55
56 /**
57 * \a flags consists of MSM_SUBMIT_BO_FLAGS.
58 */
59 static uint32_t
60 tu_bo_list_add_info(struct tu_bo_list *list,
61 const struct drm_msm_gem_submit_bo *bo_info)
62 {
63 assert(bo_info->handle != 0);
64
65 for (uint32_t i = 0; i < list->count; ++i) {
66 if (list->bo_infos[i].handle == bo_info->handle) {
67 assert(list->bo_infos[i].presumed == bo_info->presumed);
68 list->bo_infos[i].flags |= bo_info->flags;
69 return i;
70 }
71 }
72
73 /* grow list->bo_infos if needed */
74 if (list->count == list->capacity) {
75 uint32_t new_capacity = MAX2(2 * list->count, 16);
76 struct drm_msm_gem_submit_bo *new_bo_infos = realloc(
77 list->bo_infos, new_capacity * sizeof(struct drm_msm_gem_submit_bo));
78 if (!new_bo_infos)
79 return TU_BO_LIST_FAILED;
80 list->bo_infos = new_bo_infos;
81 list->capacity = new_capacity;
82 }
83
84 list->bo_infos[list->count] = *bo_info;
85 return list->count++;
86 }
87
88 uint32_t
89 tu_bo_list_add(struct tu_bo_list *list,
90 const struct tu_bo *bo,
91 uint32_t flags)
92 {
93 return tu_bo_list_add_info(list, &(struct drm_msm_gem_submit_bo) {
94 .flags = flags,
95 .handle = bo->gem_handle,
96 .presumed = bo->iova,
97 });
98 }
99
100 VkResult
101 tu_bo_list_merge(struct tu_bo_list *list, const struct tu_bo_list *other)
102 {
103 for (uint32_t i = 0; i < other->count; i++) {
104 if (tu_bo_list_add_info(list, other->bo_infos + i) == TU_BO_LIST_FAILED)
105 return VK_ERROR_OUT_OF_HOST_MEMORY;
106 }
107
108 return VK_SUCCESS;
109 }
110
111 void
112 tu6_emit_event_write(struct tu_cmd_buffer *cmd,
113 struct tu_cs *cs,
114 enum vgt_event_type event)
115 {
116 bool need_seqno = false;
117 switch (event) {
118 case CACHE_FLUSH_TS:
119 case WT_DONE_TS:
120 case RB_DONE_TS:
121 case PC_CCU_FLUSH_DEPTH_TS:
122 case PC_CCU_FLUSH_COLOR_TS:
123 case PC_CCU_RESOLVE_TS:
124 need_seqno = true;
125 break;
126 default:
127 break;
128 }
129
130 tu_cs_emit_pkt7(cs, CP_EVENT_WRITE, need_seqno ? 4 : 1);
131 tu_cs_emit(cs, CP_EVENT_WRITE_0_EVENT(event));
132 if (need_seqno) {
133 tu_cs_emit_qw(cs, global_iova(cmd, seqno_dummy));
134 tu_cs_emit(cs, 0);
135 }
136 }
137
138 static void
139 tu6_emit_flushes(struct tu_cmd_buffer *cmd_buffer,
140 struct tu_cs *cs,
141 enum tu_cmd_flush_bits flushes)
142 {
143 /* Experiments show that invalidating CCU while it still has data in it
144 * doesn't work, so make sure to always flush before invalidating in case
145 * any data remains that hasn't yet been made available through a barrier.
146 * However it does seem to work for UCHE.
147 */
148 if (flushes & (TU_CMD_FLAG_CCU_FLUSH_COLOR |
149 TU_CMD_FLAG_CCU_INVALIDATE_COLOR))
150 tu6_emit_event_write(cmd_buffer, cs, PC_CCU_FLUSH_COLOR_TS);
151 if (flushes & (TU_CMD_FLAG_CCU_FLUSH_DEPTH |
152 TU_CMD_FLAG_CCU_INVALIDATE_DEPTH))
153 tu6_emit_event_write(cmd_buffer, cs, PC_CCU_FLUSH_DEPTH_TS);
154 if (flushes & TU_CMD_FLAG_CCU_INVALIDATE_COLOR)
155 tu6_emit_event_write(cmd_buffer, cs, PC_CCU_INVALIDATE_COLOR);
156 if (flushes & TU_CMD_FLAG_CCU_INVALIDATE_DEPTH)
157 tu6_emit_event_write(cmd_buffer, cs, PC_CCU_INVALIDATE_DEPTH);
158 if (flushes & TU_CMD_FLAG_CACHE_FLUSH)
159 tu6_emit_event_write(cmd_buffer, cs, CACHE_FLUSH_TS);
160 if (flushes & TU_CMD_FLAG_CACHE_INVALIDATE)
161 tu6_emit_event_write(cmd_buffer, cs, CACHE_INVALIDATE);
162 if (flushes & TU_CMD_FLAG_WFI)
163 tu_cs_emit_wfi(cs);
164 }
165
166 /* "Normal" cache flushes, that don't require any special handling */
167
168 static void
169 tu_emit_cache_flush(struct tu_cmd_buffer *cmd_buffer,
170 struct tu_cs *cs)
171 {
172 tu6_emit_flushes(cmd_buffer, cs, cmd_buffer->state.cache.flush_bits);
173 cmd_buffer->state.cache.flush_bits = 0;
174 }
175
176 /* Renderpass cache flushes */
177
178 void
179 tu_emit_cache_flush_renderpass(struct tu_cmd_buffer *cmd_buffer,
180 struct tu_cs *cs)
181 {
182 tu6_emit_flushes(cmd_buffer, cs, cmd_buffer->state.renderpass_cache.flush_bits);
183 cmd_buffer->state.renderpass_cache.flush_bits = 0;
184 }
185
186 /* Cache flushes for things that use the color/depth read/write path (i.e.
187 * blits and draws). This deals with changing CCU state as well as the usual
188 * cache flushing.
189 */
190
191 void
192 tu_emit_cache_flush_ccu(struct tu_cmd_buffer *cmd_buffer,
193 struct tu_cs *cs,
194 enum tu_cmd_ccu_state ccu_state)
195 {
196 enum tu_cmd_flush_bits flushes = cmd_buffer->state.cache.flush_bits;
197
198 assert(ccu_state != TU_CMD_CCU_UNKNOWN);
199
200 /* Changing CCU state must involve invalidating the CCU. In sysmem mode,
201 * the CCU may also contain data that we haven't flushed out yet, so we
202 * also need to flush. Also, in order to program RB_CCU_CNTL, we need to
203 * emit a WFI as it isn't pipelined.
204 */
205 if (ccu_state != cmd_buffer->state.ccu_state) {
206 if (cmd_buffer->state.ccu_state != TU_CMD_CCU_GMEM) {
207 flushes |=
208 TU_CMD_FLAG_CCU_FLUSH_COLOR |
209 TU_CMD_FLAG_CCU_FLUSH_DEPTH;
210 cmd_buffer->state.cache.pending_flush_bits &= ~(
211 TU_CMD_FLAG_CCU_FLUSH_COLOR |
212 TU_CMD_FLAG_CCU_FLUSH_DEPTH);
213 }
214 flushes |=
215 TU_CMD_FLAG_CCU_INVALIDATE_COLOR |
216 TU_CMD_FLAG_CCU_INVALIDATE_DEPTH |
217 TU_CMD_FLAG_WFI;
218 cmd_buffer->state.cache.pending_flush_bits &= ~(
219 TU_CMD_FLAG_CCU_INVALIDATE_COLOR |
220 TU_CMD_FLAG_CCU_INVALIDATE_DEPTH);
221 }
222
223 tu6_emit_flushes(cmd_buffer, cs, flushes);
224 cmd_buffer->state.cache.flush_bits = 0;
225
226 if (ccu_state != cmd_buffer->state.ccu_state) {
227 struct tu_physical_device *phys_dev = cmd_buffer->device->physical_device;
228 tu_cs_emit_regs(cs,
229 A6XX_RB_CCU_CNTL(.offset =
230 ccu_state == TU_CMD_CCU_GMEM ?
231 phys_dev->ccu_offset_gmem :
232 phys_dev->ccu_offset_bypass,
233 .gmem = ccu_state == TU_CMD_CCU_GMEM));
234 cmd_buffer->state.ccu_state = ccu_state;
235 }
236 }
237
238 static void
239 tu6_emit_zs(struct tu_cmd_buffer *cmd,
240 const struct tu_subpass *subpass,
241 struct tu_cs *cs)
242 {
243 const struct tu_framebuffer *fb = cmd->state.framebuffer;
244
245 const uint32_t a = subpass->depth_stencil_attachment.attachment;
246 if (a == VK_ATTACHMENT_UNUSED) {
247 tu_cs_emit_regs(cs,
248 A6XX_RB_DEPTH_BUFFER_INFO(.depth_format = DEPTH6_NONE),
249 A6XX_RB_DEPTH_BUFFER_PITCH(0),
250 A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH(0),
251 A6XX_RB_DEPTH_BUFFER_BASE(0),
252 A6XX_RB_DEPTH_BUFFER_BASE_GMEM(0));
253
254 tu_cs_emit_regs(cs,
255 A6XX_GRAS_SU_DEPTH_BUFFER_INFO(.depth_format = DEPTH6_NONE));
256
257 tu_cs_emit_regs(cs,
258 A6XX_GRAS_LRZ_BUFFER_BASE(0),
259 A6XX_GRAS_LRZ_BUFFER_PITCH(0),
260 A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE(0));
261
262 tu_cs_emit_regs(cs, A6XX_RB_STENCIL_INFO(0));
263
264 return;
265 }
266
267 const struct tu_image_view *iview = fb->attachments[a].attachment;
268 const struct tu_render_pass_attachment *attachment =
269 &cmd->state.pass->attachments[a];
270 enum a6xx_depth_format fmt = tu6_pipe2depth(attachment->format);
271
272 tu_cs_emit_pkt4(cs, REG_A6XX_RB_DEPTH_BUFFER_INFO, 6);
273 tu_cs_emit(cs, A6XX_RB_DEPTH_BUFFER_INFO(.depth_format = fmt).value);
274 tu_cs_image_ref(cs, iview, 0);
275 tu_cs_emit(cs, attachment->gmem_offset);
276
277 tu_cs_emit_regs(cs,
278 A6XX_GRAS_SU_DEPTH_BUFFER_INFO(.depth_format = fmt));
279
280 tu_cs_emit_pkt4(cs, REG_A6XX_RB_DEPTH_FLAG_BUFFER_BASE_LO, 3);
281 tu_cs_image_flag_ref(cs, iview, 0);
282
283 tu_cs_emit_regs(cs,
284 A6XX_GRAS_LRZ_BUFFER_BASE(0),
285 A6XX_GRAS_LRZ_BUFFER_PITCH(0),
286 A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE(0));
287
288 if (attachment->format == VK_FORMAT_S8_UINT) {
289 tu_cs_emit_pkt4(cs, REG_A6XX_RB_STENCIL_INFO, 6);
290 tu_cs_emit(cs, A6XX_RB_STENCIL_INFO(.separate_stencil = true).value);
291 tu_cs_image_ref(cs, iview, 0);
292 tu_cs_emit(cs, attachment->gmem_offset);
293 } else {
294 tu_cs_emit_regs(cs,
295 A6XX_RB_STENCIL_INFO(0));
296 }
297 }
298
299 static void
300 tu6_emit_mrt(struct tu_cmd_buffer *cmd,
301 const struct tu_subpass *subpass,
302 struct tu_cs *cs)
303 {
304 const struct tu_framebuffer *fb = cmd->state.framebuffer;
305
306 for (uint32_t i = 0; i < subpass->color_count; ++i) {
307 uint32_t a = subpass->color_attachments[i].attachment;
308 if (a == VK_ATTACHMENT_UNUSED)
309 continue;
310
311 const struct tu_image_view *iview = fb->attachments[a].attachment;
312
313 tu_cs_emit_pkt4(cs, REG_A6XX_RB_MRT_BUF_INFO(i), 6);
314 tu_cs_emit(cs, iview->RB_MRT_BUF_INFO);
315 tu_cs_image_ref(cs, iview, 0);
316 tu_cs_emit(cs, cmd->state.pass->attachments[a].gmem_offset);
317
318 tu_cs_emit_regs(cs,
319 A6XX_SP_FS_MRT_REG(i, .dword = iview->SP_FS_MRT_REG));
320
321 tu_cs_emit_pkt4(cs, REG_A6XX_RB_MRT_FLAG_BUFFER_ADDR_LO(i), 3);
322 tu_cs_image_flag_ref(cs, iview, 0);
323 }
324
325 tu_cs_emit_regs(cs,
326 A6XX_RB_SRGB_CNTL(.dword = subpass->srgb_cntl));
327 tu_cs_emit_regs(cs,
328 A6XX_SP_SRGB_CNTL(.dword = subpass->srgb_cntl));
329
330 tu_cs_emit_regs(cs, A6XX_GRAS_MAX_LAYER_INDEX(fb->layers - 1));
331 }
332
333 void
334 tu6_emit_msaa(struct tu_cs *cs, VkSampleCountFlagBits vk_samples)
335 {
336 const enum a3xx_msaa_samples samples = tu_msaa_samples(vk_samples);
337 bool msaa_disable = samples == MSAA_ONE;
338
339 tu_cs_emit_regs(cs,
340 A6XX_SP_TP_RAS_MSAA_CNTL(samples),
341 A6XX_SP_TP_DEST_MSAA_CNTL(.samples = samples,
342 .msaa_disable = msaa_disable));
343
344 tu_cs_emit_regs(cs,
345 A6XX_GRAS_RAS_MSAA_CNTL(samples),
346 A6XX_GRAS_DEST_MSAA_CNTL(.samples = samples,
347 .msaa_disable = msaa_disable));
348
349 tu_cs_emit_regs(cs,
350 A6XX_RB_RAS_MSAA_CNTL(samples),
351 A6XX_RB_DEST_MSAA_CNTL(.samples = samples,
352 .msaa_disable = msaa_disable));
353
354 tu_cs_emit_regs(cs,
355 A6XX_RB_MSAA_CNTL(samples));
356 }
357
358 static void
359 tu6_emit_bin_size(struct tu_cs *cs,
360 uint32_t bin_w, uint32_t bin_h, uint32_t flags)
361 {
362 tu_cs_emit_regs(cs,
363 A6XX_GRAS_BIN_CONTROL(.binw = bin_w,
364 .binh = bin_h,
365 .dword = flags));
366
367 tu_cs_emit_regs(cs,
368 A6XX_RB_BIN_CONTROL(.binw = bin_w,
369 .binh = bin_h,
370 .dword = flags));
371
372 /* no flag for RB_BIN_CONTROL2... */
373 tu_cs_emit_regs(cs,
374 A6XX_RB_BIN_CONTROL2(.binw = bin_w,
375 .binh = bin_h));
376 }
377
378 static void
379 tu6_emit_render_cntl(struct tu_cmd_buffer *cmd,
380 const struct tu_subpass *subpass,
381 struct tu_cs *cs,
382 bool binning)
383 {
384 const struct tu_framebuffer *fb = cmd->state.framebuffer;
385 uint32_t cntl = 0;
386 cntl |= A6XX_RB_RENDER_CNTL_UNK4;
387 if (binning) {
388 cntl |= A6XX_RB_RENDER_CNTL_BINNING;
389 } else {
390 uint32_t mrts_ubwc_enable = 0;
391 for (uint32_t i = 0; i < subpass->color_count; ++i) {
392 uint32_t a = subpass->color_attachments[i].attachment;
393 if (a == VK_ATTACHMENT_UNUSED)
394 continue;
395
396 const struct tu_image_view *iview = fb->attachments[a].attachment;
397 if (iview->ubwc_enabled)
398 mrts_ubwc_enable |= 1 << i;
399 }
400
401 cntl |= A6XX_RB_RENDER_CNTL_FLAG_MRTS(mrts_ubwc_enable);
402
403 const uint32_t a = subpass->depth_stencil_attachment.attachment;
404 if (a != VK_ATTACHMENT_UNUSED) {
405 const struct tu_image_view *iview = fb->attachments[a].attachment;
406 if (iview->ubwc_enabled)
407 cntl |= A6XX_RB_RENDER_CNTL_FLAG_DEPTH;
408 }
409
410 /* In the !binning case, we need to set RB_RENDER_CNTL in the draw_cs
411 * in order to set it correctly for the different subpasses. However,
412 * that means the packets we're emitting also happen during binning. So
413 * we need to guard the write on !BINNING at CP execution time.
414 */
415 tu_cs_reserve(cs, 3 + 4);
416 tu_cs_emit_pkt7(cs, CP_COND_REG_EXEC, 2);
417 tu_cs_emit(cs, CP_COND_REG_EXEC_0_MODE(RENDER_MODE) |
418 CP_COND_REG_EXEC_0_GMEM | CP_COND_REG_EXEC_0_SYSMEM);
419 tu_cs_emit(cs, CP_COND_REG_EXEC_1_DWORDS(4));
420 }
421
422 tu_cs_emit_pkt7(cs, CP_REG_WRITE, 3);
423 tu_cs_emit(cs, CP_REG_WRITE_0_TRACKER(TRACK_RENDER_CNTL));
424 tu_cs_emit(cs, REG_A6XX_RB_RENDER_CNTL);
425 tu_cs_emit(cs, cntl);
426 }
427
428 static void
429 tu6_emit_blit_scissor(struct tu_cmd_buffer *cmd, struct tu_cs *cs, bool align)
430 {
431 const VkRect2D *render_area = &cmd->state.render_area;
432 uint32_t x1 = render_area->offset.x;
433 uint32_t y1 = render_area->offset.y;
434 uint32_t x2 = x1 + render_area->extent.width - 1;
435 uint32_t y2 = y1 + render_area->extent.height - 1;
436
437 if (align) {
438 x1 = x1 & ~(GMEM_ALIGN_W - 1);
439 y1 = y1 & ~(GMEM_ALIGN_H - 1);
440 x2 = ALIGN_POT(x2 + 1, GMEM_ALIGN_W) - 1;
441 y2 = ALIGN_POT(y2 + 1, GMEM_ALIGN_H) - 1;
442 }
443
444 tu_cs_emit_regs(cs,
445 A6XX_RB_BLIT_SCISSOR_TL(.x = x1, .y = y1),
446 A6XX_RB_BLIT_SCISSOR_BR(.x = x2, .y = y2));
447 }
448
449 void
450 tu6_emit_window_scissor(struct tu_cs *cs,
451 uint32_t x1,
452 uint32_t y1,
453 uint32_t x2,
454 uint32_t y2)
455 {
456 tu_cs_emit_regs(cs,
457 A6XX_GRAS_SC_WINDOW_SCISSOR_TL(.x = x1, .y = y1),
458 A6XX_GRAS_SC_WINDOW_SCISSOR_BR(.x = x2, .y = y2));
459
460 tu_cs_emit_regs(cs,
461 A6XX_GRAS_RESOLVE_CNTL_1(.x = x1, .y = y1),
462 A6XX_GRAS_RESOLVE_CNTL_2(.x = x2, .y = y2));
463 }
464
465 void
466 tu6_emit_window_offset(struct tu_cs *cs, uint32_t x1, uint32_t y1)
467 {
468 tu_cs_emit_regs(cs,
469 A6XX_RB_WINDOW_OFFSET(.x = x1, .y = y1));
470
471 tu_cs_emit_regs(cs,
472 A6XX_RB_WINDOW_OFFSET2(.x = x1, .y = y1));
473
474 tu_cs_emit_regs(cs,
475 A6XX_SP_WINDOW_OFFSET(.x = x1, .y = y1));
476
477 tu_cs_emit_regs(cs,
478 A6XX_SP_TP_WINDOW_OFFSET(.x = x1, .y = y1));
479 }
480
481 static void
482 tu_cs_emit_draw_state(struct tu_cs *cs, uint32_t id, struct tu_draw_state state)
483 {
484 uint32_t enable_mask;
485 switch (id) {
486 case TU_DRAW_STATE_PROGRAM:
487 case TU_DRAW_STATE_VI:
488 case TU_DRAW_STATE_FS_CONST:
489 /* The blob seems to not enable this (DESC_SETS_LOAD) for binning, even
490 * when resources would actually be used in the binning shader.
491 * Presumably the overhead of prefetching the resources isn't
492 * worth it.
493 */
494 case TU_DRAW_STATE_DESC_SETS_LOAD:
495 enable_mask = CP_SET_DRAW_STATE__0_GMEM |
496 CP_SET_DRAW_STATE__0_SYSMEM;
497 break;
498 case TU_DRAW_STATE_PROGRAM_BINNING:
499 case TU_DRAW_STATE_VI_BINNING:
500 enable_mask = CP_SET_DRAW_STATE__0_BINNING;
501 break;
502 case TU_DRAW_STATE_INPUT_ATTACHMENTS_GMEM:
503 enable_mask = CP_SET_DRAW_STATE__0_GMEM;
504 break;
505 case TU_DRAW_STATE_INPUT_ATTACHMENTS_SYSMEM:
506 enable_mask = CP_SET_DRAW_STATE__0_SYSMEM;
507 break;
508 default:
509 enable_mask = CP_SET_DRAW_STATE__0_GMEM |
510 CP_SET_DRAW_STATE__0_SYSMEM |
511 CP_SET_DRAW_STATE__0_BINNING;
512 break;
513 }
514
515 /* We need to reload the descriptors every time the descriptor sets
516 * change. However, the commands we send only depend on the pipeline
517 * because the whole point is to cache descriptors which are used by the
518 * pipeline. There's a problem here, in that the firmware has an
519 * "optimization" which skips executing groups that are set to the same
520 * value as the last draw. This means that if the descriptor sets change
521 * but not the pipeline, we'd try to re-execute the same buffer which
522 * the firmware would ignore and we wouldn't pre-load the new
523 * descriptors. Set the DIRTY bit to avoid this optimization
524 */
525 if (id == TU_DRAW_STATE_DESC_SETS_LOAD)
526 enable_mask |= CP_SET_DRAW_STATE__0_DIRTY;
527
528 tu_cs_emit(cs, CP_SET_DRAW_STATE__0_COUNT(state.size) |
529 enable_mask |
530 CP_SET_DRAW_STATE__0_GROUP_ID(id) |
531 COND(!state.size, CP_SET_DRAW_STATE__0_DISABLE));
532 tu_cs_emit_qw(cs, state.iova);
533 }
534
535 /* note: get rid of this eventually */
536 static void
537 tu_cs_emit_sds_ib(struct tu_cs *cs, uint32_t id, struct tu_cs_entry entry)
538 {
539 tu_cs_emit_draw_state(cs, id, (struct tu_draw_state) {
540 .iova = entry.size ? entry.bo->iova + entry.offset : 0,
541 .size = entry.size / 4,
542 });
543 }
544
545 static bool
546 use_hw_binning(struct tu_cmd_buffer *cmd)
547 {
548 const struct tu_framebuffer *fb = cmd->state.framebuffer;
549
550 /* XFB commands are emitted for BINNING || SYSMEM, which makes it incompatible
551 * with non-hw binning GMEM rendering. this is required because some of the
552 * XFB commands need to only be executed once
553 */
554 if (cmd->state.xfb_used)
555 return true;
556
557 if (unlikely(cmd->device->physical_device->instance->debug_flags & TU_DEBUG_NOBIN))
558 return false;
559
560 if (unlikely(cmd->device->physical_device->instance->debug_flags & TU_DEBUG_FORCEBIN))
561 return true;
562
563 return (fb->tile_count.width * fb->tile_count.height) > 2;
564 }
565
566 static bool
567 use_sysmem_rendering(struct tu_cmd_buffer *cmd)
568 {
569 if (unlikely(cmd->device->physical_device->instance->debug_flags & TU_DEBUG_SYSMEM))
570 return true;
571
572 /* can't fit attachments into gmem */
573 if (!cmd->state.pass->gmem_pixels)
574 return true;
575
576 if (cmd->state.framebuffer->layers > 1)
577 return true;
578
579 if (cmd->has_tess)
580 return true;
581
582 return false;
583 }
584
585 static void
586 tu6_emit_tile_select(struct tu_cmd_buffer *cmd,
587 struct tu_cs *cs,
588 uint32_t tx, uint32_t ty, uint32_t pipe, uint32_t slot)
589 {
590 const struct tu_framebuffer *fb = cmd->state.framebuffer;
591
592 tu_cs_emit_pkt7(cs, CP_SET_MARKER, 1);
593 tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(RM6_YIELD));
594
595 tu_cs_emit_pkt7(cs, CP_SET_MARKER, 1);
596 tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(RM6_GMEM));
597
598 const uint32_t x1 = fb->tile0.width * tx;
599 const uint32_t y1 = fb->tile0.height * ty;
600 const uint32_t x2 = x1 + fb->tile0.width - 1;
601 const uint32_t y2 = y1 + fb->tile0.height - 1;
602 tu6_emit_window_scissor(cs, x1, y1, x2, y2);
603 tu6_emit_window_offset(cs, x1, y1);
604
605 tu_cs_emit_regs(cs,
606 A6XX_VPC_SO_OVERRIDE(.so_disable = false));
607
608 if (use_hw_binning(cmd)) {
609 tu_cs_emit_pkt7(cs, CP_WAIT_FOR_ME, 0);
610
611 tu_cs_emit_pkt7(cs, CP_SET_MODE, 1);
612 tu_cs_emit(cs, 0x0);
613
614 tu_cs_emit_pkt7(cs, CP_SET_BIN_DATA5_OFFSET, 4);
615 tu_cs_emit(cs, fb->pipe_sizes[pipe] |
616 CP_SET_BIN_DATA5_0_VSC_N(slot));
617 tu_cs_emit(cs, pipe * cmd->vsc_draw_strm_pitch);
618 tu_cs_emit(cs, pipe * 4);
619 tu_cs_emit(cs, pipe * cmd->vsc_prim_strm_pitch);
620
621 tu_cs_emit_pkt7(cs, CP_SET_VISIBILITY_OVERRIDE, 1);
622 tu_cs_emit(cs, 0x0);
623
624 tu_cs_emit_pkt7(cs, CP_SET_MODE, 1);
625 tu_cs_emit(cs, 0x0);
626 } else {
627 tu_cs_emit_pkt7(cs, CP_SET_VISIBILITY_OVERRIDE, 1);
628 tu_cs_emit(cs, 0x1);
629
630 tu_cs_emit_pkt7(cs, CP_SET_MODE, 1);
631 tu_cs_emit(cs, 0x0);
632 }
633 }
634
635 static void
636 tu6_emit_sysmem_resolve(struct tu_cmd_buffer *cmd,
637 struct tu_cs *cs,
638 uint32_t a,
639 uint32_t gmem_a)
640 {
641 const struct tu_framebuffer *fb = cmd->state.framebuffer;
642 struct tu_image_view *dst = fb->attachments[a].attachment;
643 struct tu_image_view *src = fb->attachments[gmem_a].attachment;
644
645 tu_resolve_sysmem(cmd, cs, src, dst, fb->layers, &cmd->state.render_area);
646 }
647
648 static void
649 tu6_emit_sysmem_resolves(struct tu_cmd_buffer *cmd,
650 struct tu_cs *cs,
651 const struct tu_subpass *subpass)
652 {
653 if (subpass->resolve_attachments) {
654 /* From the documentation for vkCmdNextSubpass, section 7.4 "Render Pass
655 * Commands":
656 *
657 * End-of-subpass multisample resolves are treated as color
658 * attachment writes for the purposes of synchronization. That is,
659 * they are considered to execute in the
660 * VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT pipeline stage and
661 * their writes are synchronized with
662 * VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT. Synchronization between
663 * rendering within a subpass and any resolve operations at the end
664 * of the subpass occurs automatically, without need for explicit
665 * dependencies or pipeline barriers. However, if the resolve
666 * attachment is also used in a different subpass, an explicit
667 * dependency is needed.
668 *
669 * We use the CP_BLIT path for sysmem resolves, which is really a
670 * transfer command, so we have to manually flush similar to the gmem
671 * resolve case. However, a flush afterwards isn't needed because of the
672 * last sentence and the fact that we're in sysmem mode.
673 */
674 tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_COLOR_TS);
675 tu6_emit_event_write(cmd, cs, CACHE_INVALIDATE);
676
677 /* Wait for the flushes to land before using the 2D engine */
678 tu_cs_emit_wfi(cs);
679
680 for (unsigned i = 0; i < subpass->color_count; i++) {
681 uint32_t a = subpass->resolve_attachments[i].attachment;
682 if (a == VK_ATTACHMENT_UNUSED)
683 continue;
684
685 tu6_emit_sysmem_resolve(cmd, cs, a,
686 subpass->color_attachments[i].attachment);
687 }
688 }
689 }
690
691 static void
692 tu6_emit_tile_store(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
693 {
694 const struct tu_render_pass *pass = cmd->state.pass;
695 const struct tu_subpass *subpass = &pass->subpasses[pass->subpass_count-1];
696
697 tu_cs_emit_pkt7(cs, CP_SET_DRAW_STATE, 3);
698 tu_cs_emit(cs, CP_SET_DRAW_STATE__0_COUNT(0) |
699 CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS |
700 CP_SET_DRAW_STATE__0_GROUP_ID(0));
701 tu_cs_emit(cs, CP_SET_DRAW_STATE__1_ADDR_LO(0));
702 tu_cs_emit(cs, CP_SET_DRAW_STATE__2_ADDR_HI(0));
703
704 tu_cs_emit_pkt7(cs, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
705 tu_cs_emit(cs, 0x0);
706
707 tu_cs_emit_pkt7(cs, CP_SET_MARKER, 1);
708 tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(RM6_RESOLVE));
709
710 tu6_emit_blit_scissor(cmd, cs, true);
711
712 for (uint32_t a = 0; a < pass->attachment_count; ++a) {
713 if (pass->attachments[a].gmem_offset >= 0)
714 tu_store_gmem_attachment(cmd, cs, a, a);
715 }
716
717 if (subpass->resolve_attachments) {
718 for (unsigned i = 0; i < subpass->color_count; i++) {
719 uint32_t a = subpass->resolve_attachments[i].attachment;
720 if (a != VK_ATTACHMENT_UNUSED)
721 tu_store_gmem_attachment(cmd, cs, a,
722 subpass->color_attachments[i].attachment);
723 }
724 }
725 }
726
727 static void
728 tu6_init_hw(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
729 {
730 struct tu_device *dev = cmd->device;
731 const struct tu_physical_device *phys_dev = dev->physical_device;
732
733 tu6_emit_event_write(cmd, cs, CACHE_INVALIDATE);
734
735 tu_cs_emit_regs(cs, A6XX_HLSQ_INVALIDATE_CMD(
736 .vs_state = true,
737 .hs_state = true,
738 .ds_state = true,
739 .gs_state = true,
740 .fs_state = true,
741 .cs_state = true,
742 .gfx_ibo = true,
743 .cs_ibo = true,
744 .gfx_shared_const = true,
745 .cs_shared_const = true,
746 .gfx_bindless = 0x1f,
747 .cs_bindless = 0x1f));
748
749 tu_cs_emit_regs(cs,
750 A6XX_RB_CCU_CNTL(.offset = phys_dev->ccu_offset_bypass));
751 cmd->state.ccu_state = TU_CMD_CCU_SYSMEM;
752 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8E04, 0x00100000);
753 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_AE04, 0x8);
754 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_AE00, 0);
755 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_AE0F, 0x3f);
756 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_B605, 0x44);
757 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_B600, 0x100000);
758 tu_cs_emit_write_reg(cs, REG_A6XX_HLSQ_UNKNOWN_BE00, 0x80);
759 tu_cs_emit_write_reg(cs, REG_A6XX_HLSQ_UNKNOWN_BE01, 0);
760
761 tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9600, 0);
762 tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_8600, 0x880);
763 tu_cs_emit_write_reg(cs, REG_A6XX_HLSQ_UNKNOWN_BE04, 0);
764 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_AE03, 0x00000410);
765 tu_cs_emit_write_reg(cs, REG_A6XX_SP_IBO_COUNT, 0);
766 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_B182, 0);
767 tu_cs_emit_write_reg(cs, REG_A6XX_HLSQ_SHARED_CONSTS, 0);
768 tu_cs_emit_write_reg(cs, REG_A6XX_UCHE_UNKNOWN_0E12, 0x3200000);
769 tu_cs_emit_write_reg(cs, REG_A6XX_UCHE_CLIENT_PF, 4);
770 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8E01, 0x0);
771 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_A982, 0);
772 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_A9A8, 0);
773 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_AB00, 0x5);
774
775 /* TODO: set A6XX_VFD_ADD_OFFSET_INSTANCE and fix ir3 to avoid adding base instance */
776 tu_cs_emit_write_reg(cs, REG_A6XX_VFD_ADD_OFFSET, A6XX_VFD_ADD_OFFSET_VERTEX);
777 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8811, 0x00000010);
778 tu_cs_emit_write_reg(cs, REG_A6XX_PC_MODE_CNTL, 0x1f);
779
780 tu_cs_emit_write_reg(cs, REG_A6XX_RB_SRGB_CNTL, 0);
781
782 tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_8110, 0);
783
784 tu_cs_emit_write_reg(cs, REG_A6XX_RB_RENDER_CONTROL0, 0x401);
785 tu_cs_emit_write_reg(cs, REG_A6XX_RB_RENDER_CONTROL1, 0);
786 tu_cs_emit_write_reg(cs, REG_A6XX_RB_FS_OUTPUT_CNTL0, 0);
787 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8818, 0);
788 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8819, 0);
789 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_881A, 0);
790 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_881B, 0);
791 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_881C, 0);
792 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_881D, 0);
793 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_881E, 0);
794 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_88F0, 0);
795
796 tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9107, 0);
797
798 tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9236,
799 A6XX_VPC_UNKNOWN_9236_POINT_COORD_INVERT(0));
800 tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9300, 0);
801
802 tu_cs_emit_write_reg(cs, REG_A6XX_VPC_SO_OVERRIDE,
803 A6XX_VPC_SO_OVERRIDE_SO_DISABLE);
804
805 tu_cs_emit_write_reg(cs, REG_A6XX_PC_UNKNOWN_9801, 0);
806 tu_cs_emit_write_reg(cs, REG_A6XX_PC_UNKNOWN_9980, 0);
807 tu_cs_emit_write_reg(cs, REG_A6XX_PC_UNKNOWN_9990, 0);
808
809 tu_cs_emit_write_reg(cs, REG_A6XX_PC_PRIMITIVE_CNTL_6, 0);
810 tu_cs_emit_write_reg(cs, REG_A6XX_PC_UNKNOWN_9B07, 0);
811
812 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_A81B, 0);
813
814 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_B183, 0);
815
816 tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_8099, 0);
817 tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_80A0, 2);
818 tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_80AF, 0);
819 tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9210, 0);
820 tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9211, 0);
821 tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9602, 0);
822 tu_cs_emit_write_reg(cs, REG_A6XX_PC_UNKNOWN_9E72, 0);
823 tu_cs_emit_write_reg(cs, REG_A6XX_SP_TP_UNKNOWN_B309, 0x000000a2);
824 tu_cs_emit_write_reg(cs, REG_A6XX_HLSQ_CONTROL_5_REG, 0xfc);
825
826 tu_cs_emit_write_reg(cs, REG_A6XX_VFD_MODE_CNTL, 0x00000000);
827
828 tu_cs_emit_write_reg(cs, REG_A6XX_VFD_UNKNOWN_A008, 0);
829
830 tu_cs_emit_write_reg(cs, REG_A6XX_PC_MODE_CNTL, 0x0000001f);
831
832 /* we don't use this yet.. probably best to disable.. */
833 tu_cs_emit_pkt7(cs, CP_SET_DRAW_STATE, 3);
834 tu_cs_emit(cs, CP_SET_DRAW_STATE__0_COUNT(0) |
835 CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS |
836 CP_SET_DRAW_STATE__0_GROUP_ID(0));
837 tu_cs_emit(cs, CP_SET_DRAW_STATE__1_ADDR_LO(0));
838 tu_cs_emit(cs, CP_SET_DRAW_STATE__2_ADDR_HI(0));
839
840 tu_cs_emit_regs(cs,
841 A6XX_SP_HS_CTRL_REG0(0));
842
843 tu_cs_emit_regs(cs,
844 A6XX_SP_GS_CTRL_REG0(0));
845
846 tu_cs_emit_regs(cs,
847 A6XX_GRAS_LRZ_CNTL(0));
848
849 tu_cs_emit_regs(cs,
850 A6XX_RB_LRZ_CNTL(0));
851
852 tu_cs_emit_regs(cs,
853 A6XX_SP_TP_BORDER_COLOR_BASE_ADDR(.bo = &dev->global_bo,
854 .bo_offset = gb_offset(border_color)));
855 tu_cs_emit_regs(cs,
856 A6XX_SP_PS_TP_BORDER_COLOR_BASE_ADDR(.bo = &dev->global_bo,
857 .bo_offset = gb_offset(border_color)));
858
859 /* VSC buffers:
860 * use vsc pitches from the largest values used so far with this device
861 * if there hasn't been overflow, there will already be a scratch bo
862 * allocated for these sizes
863 *
864 * if overflow is detected, the stream size is increased by 2x
865 */
866 mtx_lock(&dev->vsc_pitch_mtx);
867
868 struct tu6_global *global = dev->global_bo.map;
869
870 uint32_t vsc_draw_overflow = global->vsc_draw_overflow;
871 uint32_t vsc_prim_overflow = global->vsc_prim_overflow;
872
873 if (vsc_draw_overflow >= dev->vsc_draw_strm_pitch)
874 dev->vsc_draw_strm_pitch = (dev->vsc_draw_strm_pitch - VSC_PAD) * 2 + VSC_PAD;
875
876 if (vsc_prim_overflow >= dev->vsc_prim_strm_pitch)
877 dev->vsc_prim_strm_pitch = (dev->vsc_prim_strm_pitch - VSC_PAD) * 2 + VSC_PAD;
878
879 cmd->vsc_prim_strm_pitch = dev->vsc_prim_strm_pitch;
880 cmd->vsc_draw_strm_pitch = dev->vsc_draw_strm_pitch;
881
882 mtx_unlock(&dev->vsc_pitch_mtx);
883
884 struct tu_bo *vsc_bo;
885 uint32_t size0 = cmd->vsc_prim_strm_pitch * MAX_VSC_PIPES +
886 cmd->vsc_draw_strm_pitch * MAX_VSC_PIPES;
887
888 tu_get_scratch_bo(dev, size0 + MAX_VSC_PIPES * 4, &vsc_bo);
889
890 tu_cs_emit_regs(cs,
891 A6XX_VSC_DRAW_STRM_SIZE_ADDRESS(.bo = vsc_bo, .bo_offset = size0));
892 tu_cs_emit_regs(cs,
893 A6XX_VSC_PRIM_STRM_ADDRESS(.bo = vsc_bo));
894 tu_cs_emit_regs(cs,
895 A6XX_VSC_DRAW_STRM_ADDRESS(.bo = vsc_bo,
896 .bo_offset = cmd->vsc_prim_strm_pitch * MAX_VSC_PIPES));
897
898 tu_bo_list_add(&cmd->bo_list, vsc_bo, MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE);
899
900 tu_cs_sanity_check(cs);
901 }
902
903 static void
904 update_vsc_pipe(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
905 {
906 const struct tu_framebuffer *fb = cmd->state.framebuffer;
907
908 tu_cs_emit_regs(cs,
909 A6XX_VSC_BIN_SIZE(.width = fb->tile0.width,
910 .height = fb->tile0.height));
911
912 tu_cs_emit_regs(cs,
913 A6XX_VSC_BIN_COUNT(.nx = fb->tile_count.width,
914 .ny = fb->tile_count.height));
915
916 tu_cs_emit_pkt4(cs, REG_A6XX_VSC_PIPE_CONFIG_REG(0), 32);
917 tu_cs_emit_array(cs, fb->pipe_config, 32);
918
919 tu_cs_emit_regs(cs,
920 A6XX_VSC_PRIM_STRM_PITCH(cmd->vsc_prim_strm_pitch),
921 A6XX_VSC_PRIM_STRM_LIMIT(cmd->vsc_prim_strm_pitch - VSC_PAD));
922
923 tu_cs_emit_regs(cs,
924 A6XX_VSC_DRAW_STRM_PITCH(cmd->vsc_draw_strm_pitch),
925 A6XX_VSC_DRAW_STRM_LIMIT(cmd->vsc_draw_strm_pitch - VSC_PAD));
926 }
927
928 static void
929 emit_vsc_overflow_test(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
930 {
931 const struct tu_framebuffer *fb = cmd->state.framebuffer;
932 const uint32_t used_pipe_count =
933 fb->pipe_count.width * fb->pipe_count.height;
934
935 for (int i = 0; i < used_pipe_count; i++) {
936 tu_cs_emit_pkt7(cs, CP_COND_WRITE5, 8);
937 tu_cs_emit(cs, CP_COND_WRITE5_0_FUNCTION(WRITE_GE) |
938 CP_COND_WRITE5_0_WRITE_MEMORY);
939 tu_cs_emit(cs, CP_COND_WRITE5_1_POLL_ADDR_LO(REG_A6XX_VSC_DRAW_STRM_SIZE_REG(i)));
940 tu_cs_emit(cs, CP_COND_WRITE5_2_POLL_ADDR_HI(0));
941 tu_cs_emit(cs, CP_COND_WRITE5_3_REF(cmd->vsc_draw_strm_pitch - VSC_PAD));
942 tu_cs_emit(cs, CP_COND_WRITE5_4_MASK(~0));
943 tu_cs_emit_qw(cs, global_iova(cmd, vsc_draw_overflow));
944 tu_cs_emit(cs, CP_COND_WRITE5_7_WRITE_DATA(cmd->vsc_draw_strm_pitch));
945
946 tu_cs_emit_pkt7(cs, CP_COND_WRITE5, 8);
947 tu_cs_emit(cs, CP_COND_WRITE5_0_FUNCTION(WRITE_GE) |
948 CP_COND_WRITE5_0_WRITE_MEMORY);
949 tu_cs_emit(cs, CP_COND_WRITE5_1_POLL_ADDR_LO(REG_A6XX_VSC_PRIM_STRM_SIZE_REG(i)));
950 tu_cs_emit(cs, CP_COND_WRITE5_2_POLL_ADDR_HI(0));
951 tu_cs_emit(cs, CP_COND_WRITE5_3_REF(cmd->vsc_prim_strm_pitch - VSC_PAD));
952 tu_cs_emit(cs, CP_COND_WRITE5_4_MASK(~0));
953 tu_cs_emit_qw(cs, global_iova(cmd, vsc_prim_overflow));
954 tu_cs_emit(cs, CP_COND_WRITE5_7_WRITE_DATA(cmd->vsc_prim_strm_pitch));
955 }
956
957 tu_cs_emit_pkt7(cs, CP_WAIT_MEM_WRITES, 0);
958 }
959
960 static void
961 tu6_emit_binning_pass(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
962 {
963 struct tu_physical_device *phys_dev = cmd->device->physical_device;
964 const struct tu_framebuffer *fb = cmd->state.framebuffer;
965
966 tu6_emit_window_scissor(cs, 0, 0, fb->width - 1, fb->height - 1);
967
968 tu_cs_emit_pkt7(cs, CP_SET_MARKER, 1);
969 tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(RM6_BINNING));
970
971 tu_cs_emit_pkt7(cs, CP_SET_VISIBILITY_OVERRIDE, 1);
972 tu_cs_emit(cs, 0x1);
973
974 tu_cs_emit_pkt7(cs, CP_SET_MODE, 1);
975 tu_cs_emit(cs, 0x1);
976
977 tu_cs_emit_wfi(cs);
978
979 tu_cs_emit_regs(cs,
980 A6XX_VFD_MODE_CNTL(.binning_pass = true));
981
982 update_vsc_pipe(cmd, cs);
983
984 tu_cs_emit_regs(cs,
985 A6XX_PC_UNKNOWN_9805(.unknown = phys_dev->magic.PC_UNKNOWN_9805));
986
987 tu_cs_emit_regs(cs,
988 A6XX_SP_UNKNOWN_A0F8(.unknown = phys_dev->magic.SP_UNKNOWN_A0F8));
989
990 tu_cs_emit_pkt7(cs, CP_EVENT_WRITE, 1);
991 tu_cs_emit(cs, UNK_2C);
992
993 tu_cs_emit_regs(cs,
994 A6XX_RB_WINDOW_OFFSET(.x = 0, .y = 0));
995
996 tu_cs_emit_regs(cs,
997 A6XX_SP_TP_WINDOW_OFFSET(.x = 0, .y = 0));
998
999 /* emit IB to binning drawcmds: */
1000 tu_cs_emit_call(cs, &cmd->draw_cs);
1001
1002 tu_cs_emit_pkt7(cs, CP_SET_DRAW_STATE, 3);
1003 tu_cs_emit(cs, CP_SET_DRAW_STATE__0_COUNT(0) |
1004 CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS |
1005 CP_SET_DRAW_STATE__0_GROUP_ID(0));
1006 tu_cs_emit(cs, CP_SET_DRAW_STATE__1_ADDR_LO(0));
1007 tu_cs_emit(cs, CP_SET_DRAW_STATE__2_ADDR_HI(0));
1008
1009 tu_cs_emit_pkt7(cs, CP_EVENT_WRITE, 1);
1010 tu_cs_emit(cs, UNK_2D);
1011
1012 /* This flush is probably required because the VSC, which produces the
1013 * visibility stream, is a client of UCHE, whereas the CP needs to read the
1014 * visibility stream (without caching) to do draw skipping. The
1015 * WFI+WAIT_FOR_ME combination guarantees that the binning commands
1016 * submitted are finished before reading the VSC regs (in
1017 * emit_vsc_overflow_test) or the VSC_DATA buffer directly (implicitly as
1018 * part of draws).
1019 */
1020 tu6_emit_event_write(cmd, cs, CACHE_FLUSH_TS);
1021
1022 tu_cs_emit_wfi(cs);
1023
1024 tu_cs_emit_pkt7(cs, CP_WAIT_FOR_ME, 0);
1025
1026 emit_vsc_overflow_test(cmd, cs);
1027
1028 tu_cs_emit_pkt7(cs, CP_SET_VISIBILITY_OVERRIDE, 1);
1029 tu_cs_emit(cs, 0x0);
1030
1031 tu_cs_emit_pkt7(cs, CP_SET_MODE, 1);
1032 tu_cs_emit(cs, 0x0);
1033 }
1034
1035 static void
1036 tu_emit_input_attachments(struct tu_cmd_buffer *cmd,
1037 const struct tu_subpass *subpass,
1038 struct tu_cs_entry *ib,
1039 bool gmem)
1040 {
1041 /* note: we can probably emit input attachments just once for the whole
1042 * renderpass, this would avoid emitting both sysmem/gmem versions
1043 *
1044 * emit two texture descriptors for each input, as a workaround for
1045 * d24s8, which can be sampled as both float (depth) and integer (stencil)
1046 * tu_shader lowers uint input attachment loads to use the 2nd descriptor
1047 * in the pair
1048 * TODO: a smarter workaround
1049 */
1050
1051 if (!subpass->input_count)
1052 return;
1053
1054 struct tu_cs_memory texture;
1055 VkResult result = tu_cs_alloc(&cmd->sub_cs, subpass->input_count * 2,
1056 A6XX_TEX_CONST_DWORDS, &texture);
1057 assert(result == VK_SUCCESS);
1058
1059 for (unsigned i = 0; i < subpass->input_count * 2; i++) {
1060 uint32_t a = subpass->input_attachments[i / 2].attachment;
1061 if (a == VK_ATTACHMENT_UNUSED)
1062 continue;
1063
1064 struct tu_image_view *iview =
1065 cmd->state.framebuffer->attachments[a].attachment;
1066 const struct tu_render_pass_attachment *att =
1067 &cmd->state.pass->attachments[a];
1068 uint32_t *dst = &texture.map[A6XX_TEX_CONST_DWORDS * i];
1069
1070 memcpy(dst, iview->descriptor, A6XX_TEX_CONST_DWORDS * 4);
1071
1072 if (i % 2 == 1 && att->format == VK_FORMAT_D24_UNORM_S8_UINT) {
1073 /* note this works because spec says fb and input attachments
1074 * must use identity swizzle
1075 */
1076 dst[0] &= ~(A6XX_TEX_CONST_0_FMT__MASK |
1077 A6XX_TEX_CONST_0_SWIZ_X__MASK | A6XX_TEX_CONST_0_SWIZ_Y__MASK |
1078 A6XX_TEX_CONST_0_SWIZ_Z__MASK | A6XX_TEX_CONST_0_SWIZ_W__MASK);
1079 dst[0] |= A6XX_TEX_CONST_0_FMT(FMT6_S8Z24_UINT) |
1080 A6XX_TEX_CONST_0_SWIZ_X(A6XX_TEX_Y) |
1081 A6XX_TEX_CONST_0_SWIZ_Y(A6XX_TEX_ZERO) |
1082 A6XX_TEX_CONST_0_SWIZ_Z(A6XX_TEX_ZERO) |
1083 A6XX_TEX_CONST_0_SWIZ_W(A6XX_TEX_ONE);
1084 }
1085
1086 if (!gmem)
1087 continue;
1088
1089 /* patched for gmem */
1090 dst[0] &= ~(A6XX_TEX_CONST_0_SWAP__MASK | A6XX_TEX_CONST_0_TILE_MODE__MASK);
1091 dst[0] |= A6XX_TEX_CONST_0_TILE_MODE(TILE6_2);
1092 dst[2] =
1093 A6XX_TEX_CONST_2_TYPE(A6XX_TEX_2D) |
1094 A6XX_TEX_CONST_2_PITCH(cmd->state.framebuffer->tile0.width * att->cpp);
1095 dst[3] = 0;
1096 dst[4] = cmd->device->physical_device->gmem_base + att->gmem_offset;
1097 dst[5] = A6XX_TEX_CONST_5_DEPTH(1);
1098 for (unsigned i = 6; i < A6XX_TEX_CONST_DWORDS; i++)
1099 dst[i] = 0;
1100 }
1101
1102 struct tu_cs cs;
1103 tu_cs_begin_sub_stream(&cmd->sub_cs, 9, &cs);
1104
1105 tu_cs_emit_pkt7(&cs, CP_LOAD_STATE6_FRAG, 3);
1106 tu_cs_emit(&cs, CP_LOAD_STATE6_0_DST_OFF(0) |
1107 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
1108 CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT) |
1109 CP_LOAD_STATE6_0_STATE_BLOCK(SB6_FS_TEX) |
1110 CP_LOAD_STATE6_0_NUM_UNIT(subpass->input_count * 2));
1111 tu_cs_emit_qw(&cs, texture.iova);
1112
1113 tu_cs_emit_pkt4(&cs, REG_A6XX_SP_FS_TEX_CONST_LO, 2);
1114 tu_cs_emit_qw(&cs, texture.iova);
1115
1116 tu_cs_emit_regs(&cs, A6XX_SP_FS_TEX_COUNT(subpass->input_count * 2));
1117
1118 *ib = tu_cs_end_sub_stream(&cmd->sub_cs, &cs);
1119 }
1120
1121 static void
1122 tu_set_input_attachments(struct tu_cmd_buffer *cmd, const struct tu_subpass *subpass)
1123 {
1124 struct tu_cs *cs = &cmd->draw_cs;
1125
1126 tu_emit_input_attachments(cmd, subpass, &cmd->state.ia_gmem_ib, true);
1127 tu_emit_input_attachments(cmd, subpass, &cmd->state.ia_sysmem_ib, false);
1128
1129 tu_cs_emit_pkt7(cs, CP_SET_DRAW_STATE, 6);
1130 tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_INPUT_ATTACHMENTS_GMEM, cmd->state.ia_gmem_ib);
1131 tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_INPUT_ATTACHMENTS_SYSMEM, cmd->state.ia_sysmem_ib);
1132 }
1133
1134 static void
1135 tu_emit_renderpass_begin(struct tu_cmd_buffer *cmd,
1136 const VkRenderPassBeginInfo *info)
1137 {
1138 struct tu_cs *cs = &cmd->draw_cs;
1139
1140 tu_cond_exec_start(cs, CP_COND_EXEC_0_RENDER_MODE_GMEM);
1141
1142 tu6_emit_blit_scissor(cmd, cs, true);
1143
1144 for (uint32_t i = 0; i < cmd->state.pass->attachment_count; ++i)
1145 tu_load_gmem_attachment(cmd, cs, i, false);
1146
1147 tu6_emit_blit_scissor(cmd, cs, false);
1148
1149 for (uint32_t i = 0; i < cmd->state.pass->attachment_count; ++i)
1150 tu_clear_gmem_attachment(cmd, cs, i, info);
1151
1152 tu_cond_exec_end(cs);
1153
1154 tu_cond_exec_start(cs, CP_COND_EXEC_0_RENDER_MODE_SYSMEM);
1155
1156 for (uint32_t i = 0; i < cmd->state.pass->attachment_count; ++i)
1157 tu_clear_sysmem_attachment(cmd, cs, i, info);
1158
1159 tu_cond_exec_end(cs);
1160 }
1161
1162 static void
1163 tu6_sysmem_render_begin(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
1164 {
1165 const struct tu_framebuffer *fb = cmd->state.framebuffer;
1166
1167 assert(fb->width > 0 && fb->height > 0);
1168 tu6_emit_window_scissor(cs, 0, 0, fb->width - 1, fb->height - 1);
1169 tu6_emit_window_offset(cs, 0, 0);
1170
1171 tu6_emit_bin_size(cs, 0, 0, 0xc00000); /* 0xc00000 = BYPASS? */
1172
1173 tu6_emit_event_write(cmd, cs, LRZ_FLUSH);
1174
1175 tu_cs_emit_pkt7(cs, CP_SET_MARKER, 1);
1176 tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(RM6_BYPASS));
1177
1178 tu_cs_emit_pkt7(cs, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
1179 tu_cs_emit(cs, 0x0);
1180
1181 tu_emit_cache_flush_ccu(cmd, cs, TU_CMD_CCU_SYSMEM);
1182
1183 /* enable stream-out, with sysmem there is only one pass: */
1184 tu_cs_emit_regs(cs,
1185 A6XX_VPC_SO_OVERRIDE(.so_disable = false));
1186
1187 tu_cs_emit_pkt7(cs, CP_SET_VISIBILITY_OVERRIDE, 1);
1188 tu_cs_emit(cs, 0x1);
1189
1190 tu_cs_emit_pkt7(cs, CP_SET_MODE, 1);
1191 tu_cs_emit(cs, 0x0);
1192
1193 tu_cs_sanity_check(cs);
1194 }
1195
1196 static void
1197 tu6_sysmem_render_end(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
1198 {
1199 /* Do any resolves of the last subpass. These are handled in the
1200 * tile_store_ib in the gmem path.
1201 */
1202 tu6_emit_sysmem_resolves(cmd, cs, cmd->state.subpass);
1203
1204 tu_cs_emit_call(cs, &cmd->draw_epilogue_cs);
1205
1206 tu_cs_emit_pkt7(cs, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
1207 tu_cs_emit(cs, 0x0);
1208
1209 tu6_emit_event_write(cmd, cs, LRZ_FLUSH);
1210
1211 tu_cs_sanity_check(cs);
1212 }
1213
1214 static void
1215 tu6_tile_render_begin(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
1216 {
1217 struct tu_physical_device *phys_dev = cmd->device->physical_device;
1218
1219 tu6_emit_event_write(cmd, cs, LRZ_FLUSH);
1220
1221 /* lrz clear? */
1222
1223 tu_cs_emit_pkt7(cs, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
1224 tu_cs_emit(cs, 0x0);
1225
1226 tu_emit_cache_flush_ccu(cmd, cs, TU_CMD_CCU_GMEM);
1227
1228 const struct tu_framebuffer *fb = cmd->state.framebuffer;
1229 if (use_hw_binning(cmd)) {
1230 /* enable stream-out during binning pass: */
1231 tu_cs_emit_regs(cs, A6XX_VPC_SO_OVERRIDE(.so_disable=false));
1232
1233 tu6_emit_bin_size(cs, fb->tile0.width, fb->tile0.height,
1234 A6XX_RB_BIN_CONTROL_BINNING_PASS | 0x6000000);
1235
1236 tu6_emit_render_cntl(cmd, cmd->state.subpass, cs, true);
1237
1238 tu6_emit_binning_pass(cmd, cs);
1239
1240 /* and disable stream-out for draw pass: */
1241 tu_cs_emit_regs(cs, A6XX_VPC_SO_OVERRIDE(.so_disable=true));
1242
1243 tu6_emit_bin_size(cs, fb->tile0.width, fb->tile0.height,
1244 A6XX_RB_BIN_CONTROL_USE_VIZ | 0x6000000);
1245
1246 tu_cs_emit_regs(cs,
1247 A6XX_VFD_MODE_CNTL(0));
1248
1249 tu_cs_emit_regs(cs, A6XX_PC_UNKNOWN_9805(.unknown = phys_dev->magic.PC_UNKNOWN_9805));
1250
1251 tu_cs_emit_regs(cs, A6XX_SP_UNKNOWN_A0F8(.unknown = phys_dev->magic.SP_UNKNOWN_A0F8));
1252
1253 tu_cs_emit_pkt7(cs, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
1254 tu_cs_emit(cs, 0x1);
1255 } else {
1256 /* no binning pass, so enable stream-out for draw pass:: */
1257 tu_cs_emit_regs(cs, A6XX_VPC_SO_OVERRIDE(.so_disable=false));
1258
1259 tu6_emit_bin_size(cs, fb->tile0.width, fb->tile0.height, 0x6000000);
1260 }
1261
1262 tu_cs_sanity_check(cs);
1263 }
1264
1265 static void
1266 tu6_render_tile(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
1267 {
1268 tu_cs_emit_call(cs, &cmd->draw_cs);
1269
1270 if (use_hw_binning(cmd)) {
1271 tu_cs_emit_pkt7(cs, CP_SET_MARKER, 1);
1272 tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(RM6_ENDVIS));
1273 }
1274
1275 tu_cs_emit_ib(cs, &cmd->state.tile_store_ib);
1276
1277 tu_cs_sanity_check(cs);
1278 }
1279
1280 static void
1281 tu6_tile_render_end(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
1282 {
1283 tu_cs_emit_call(cs, &cmd->draw_epilogue_cs);
1284
1285 tu_cs_emit_regs(cs,
1286 A6XX_GRAS_LRZ_CNTL(0));
1287
1288 tu6_emit_event_write(cmd, cs, LRZ_FLUSH);
1289
1290 tu6_emit_event_write(cmd, cs, PC_CCU_RESOLVE_TS);
1291
1292 tu_cs_sanity_check(cs);
1293 }
1294
1295 static void
1296 tu_cmd_render_tiles(struct tu_cmd_buffer *cmd)
1297 {
1298 const struct tu_framebuffer *fb = cmd->state.framebuffer;
1299
1300 tu6_tile_render_begin(cmd, &cmd->cs);
1301
1302 uint32_t pipe = 0;
1303 for (uint32_t py = 0; py < fb->pipe_count.height; py++) {
1304 for (uint32_t px = 0; px < fb->pipe_count.width; px++, pipe++) {
1305 uint32_t tx1 = px * fb->pipe0.width;
1306 uint32_t ty1 = py * fb->pipe0.height;
1307 uint32_t tx2 = MIN2(tx1 + fb->pipe0.width, fb->tile_count.width);
1308 uint32_t ty2 = MIN2(ty1 + fb->pipe0.height, fb->tile_count.height);
1309 uint32_t slot = 0;
1310 for (uint32_t ty = ty1; ty < ty2; ty++) {
1311 for (uint32_t tx = tx1; tx < tx2; tx++, slot++) {
1312 tu6_emit_tile_select(cmd, &cmd->cs, tx, ty, pipe, slot);
1313 tu6_render_tile(cmd, &cmd->cs);
1314 }
1315 }
1316 }
1317 }
1318
1319 tu6_tile_render_end(cmd, &cmd->cs);
1320 }
1321
1322 static void
1323 tu_cmd_render_sysmem(struct tu_cmd_buffer *cmd)
1324 {
1325 tu6_sysmem_render_begin(cmd, &cmd->cs);
1326
1327 tu_cs_emit_call(&cmd->cs, &cmd->draw_cs);
1328
1329 tu6_sysmem_render_end(cmd, &cmd->cs);
1330 }
1331
1332 static void
1333 tu_cmd_prepare_tile_store_ib(struct tu_cmd_buffer *cmd)
1334 {
1335 const uint32_t tile_store_space = 11 + (35 * 2) * cmd->state.pass->attachment_count;
1336 struct tu_cs sub_cs;
1337
1338 VkResult result =
1339 tu_cs_begin_sub_stream(&cmd->sub_cs, tile_store_space, &sub_cs);
1340 if (result != VK_SUCCESS) {
1341 cmd->record_result = result;
1342 return;
1343 }
1344
1345 /* emit to tile-store sub_cs */
1346 tu6_emit_tile_store(cmd, &sub_cs);
1347
1348 cmd->state.tile_store_ib = tu_cs_end_sub_stream(&cmd->sub_cs, &sub_cs);
1349 }
1350
1351 static VkResult
1352 tu_create_cmd_buffer(struct tu_device *device,
1353 struct tu_cmd_pool *pool,
1354 VkCommandBufferLevel level,
1355 VkCommandBuffer *pCommandBuffer)
1356 {
1357 struct tu_cmd_buffer *cmd_buffer;
1358
1359 cmd_buffer = vk_object_zalloc(&device->vk, NULL, sizeof(*cmd_buffer),
1360 VK_OBJECT_TYPE_COMMAND_BUFFER);
1361 if (cmd_buffer == NULL)
1362 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1363
1364 cmd_buffer->device = device;
1365 cmd_buffer->pool = pool;
1366 cmd_buffer->level = level;
1367
1368 if (pool) {
1369 list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
1370 cmd_buffer->queue_family_index = pool->queue_family_index;
1371
1372 } else {
1373 /* Init the pool_link so we can safely call list_del when we destroy
1374 * the command buffer
1375 */
1376 list_inithead(&cmd_buffer->pool_link);
1377 cmd_buffer->queue_family_index = TU_QUEUE_GENERAL;
1378 }
1379
1380 tu_bo_list_init(&cmd_buffer->bo_list);
1381 tu_cs_init(&cmd_buffer->cs, device, TU_CS_MODE_GROW, 4096);
1382 tu_cs_init(&cmd_buffer->draw_cs, device, TU_CS_MODE_GROW, 4096);
1383 tu_cs_init(&cmd_buffer->draw_epilogue_cs, device, TU_CS_MODE_GROW, 4096);
1384 tu_cs_init(&cmd_buffer->sub_cs, device, TU_CS_MODE_SUB_STREAM, 2048);
1385
1386 *pCommandBuffer = tu_cmd_buffer_to_handle(cmd_buffer);
1387
1388 list_inithead(&cmd_buffer->upload.list);
1389
1390 return VK_SUCCESS;
1391 }
1392
1393 static void
1394 tu_cmd_buffer_destroy(struct tu_cmd_buffer *cmd_buffer)
1395 {
1396 list_del(&cmd_buffer->pool_link);
1397
1398 tu_cs_finish(&cmd_buffer->cs);
1399 tu_cs_finish(&cmd_buffer->draw_cs);
1400 tu_cs_finish(&cmd_buffer->draw_epilogue_cs);
1401 tu_cs_finish(&cmd_buffer->sub_cs);
1402
1403 tu_bo_list_destroy(&cmd_buffer->bo_list);
1404 vk_object_free(&cmd_buffer->device->vk, &cmd_buffer->pool->alloc, cmd_buffer);
1405 }
1406
1407 static VkResult
1408 tu_reset_cmd_buffer(struct tu_cmd_buffer *cmd_buffer)
1409 {
1410 cmd_buffer->record_result = VK_SUCCESS;
1411
1412 tu_bo_list_reset(&cmd_buffer->bo_list);
1413 tu_cs_reset(&cmd_buffer->cs);
1414 tu_cs_reset(&cmd_buffer->draw_cs);
1415 tu_cs_reset(&cmd_buffer->draw_epilogue_cs);
1416 tu_cs_reset(&cmd_buffer->sub_cs);
1417
1418 for (unsigned i = 0; i < MAX_BIND_POINTS; i++)
1419 memset(&cmd_buffer->descriptors[i].sets, 0, sizeof(cmd_buffer->descriptors[i].sets));
1420
1421 cmd_buffer->status = TU_CMD_BUFFER_STATUS_INITIAL;
1422
1423 return cmd_buffer->record_result;
1424 }
1425
1426 VkResult
1427 tu_AllocateCommandBuffers(VkDevice _device,
1428 const VkCommandBufferAllocateInfo *pAllocateInfo,
1429 VkCommandBuffer *pCommandBuffers)
1430 {
1431 TU_FROM_HANDLE(tu_device, device, _device);
1432 TU_FROM_HANDLE(tu_cmd_pool, pool, pAllocateInfo->commandPool);
1433
1434 VkResult result = VK_SUCCESS;
1435 uint32_t i;
1436
1437 for (i = 0; i < pAllocateInfo->commandBufferCount; i++) {
1438
1439 if (!list_is_empty(&pool->free_cmd_buffers)) {
1440 struct tu_cmd_buffer *cmd_buffer = list_first_entry(
1441 &pool->free_cmd_buffers, struct tu_cmd_buffer, pool_link);
1442
1443 list_del(&cmd_buffer->pool_link);
1444 list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
1445
1446 result = tu_reset_cmd_buffer(cmd_buffer);
1447 cmd_buffer->level = pAllocateInfo->level;
1448
1449 pCommandBuffers[i] = tu_cmd_buffer_to_handle(cmd_buffer);
1450 } else {
1451 result = tu_create_cmd_buffer(device, pool, pAllocateInfo->level,
1452 &pCommandBuffers[i]);
1453 }
1454 if (result != VK_SUCCESS)
1455 break;
1456 }
1457
1458 if (result != VK_SUCCESS) {
1459 tu_FreeCommandBuffers(_device, pAllocateInfo->commandPool, i,
1460 pCommandBuffers);
1461
1462 /* From the Vulkan 1.0.66 spec:
1463 *
1464 * "vkAllocateCommandBuffers can be used to create multiple
1465 * command buffers. If the creation of any of those command
1466 * buffers fails, the implementation must destroy all
1467 * successfully created command buffer objects from this
1468 * command, set all entries of the pCommandBuffers array to
1469 * NULL and return the error."
1470 */
1471 memset(pCommandBuffers, 0,
1472 sizeof(*pCommandBuffers) * pAllocateInfo->commandBufferCount);
1473 }
1474
1475 return result;
1476 }
1477
1478 void
1479 tu_FreeCommandBuffers(VkDevice device,
1480 VkCommandPool commandPool,
1481 uint32_t commandBufferCount,
1482 const VkCommandBuffer *pCommandBuffers)
1483 {
1484 for (uint32_t i = 0; i < commandBufferCount; i++) {
1485 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, pCommandBuffers[i]);
1486
1487 if (cmd_buffer) {
1488 if (cmd_buffer->pool) {
1489 list_del(&cmd_buffer->pool_link);
1490 list_addtail(&cmd_buffer->pool_link,
1491 &cmd_buffer->pool->free_cmd_buffers);
1492 } else
1493 tu_cmd_buffer_destroy(cmd_buffer);
1494 }
1495 }
1496 }
1497
1498 VkResult
1499 tu_ResetCommandBuffer(VkCommandBuffer commandBuffer,
1500 VkCommandBufferResetFlags flags)
1501 {
1502 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
1503 return tu_reset_cmd_buffer(cmd_buffer);
1504 }
1505
1506 /* Initialize the cache, assuming all necessary flushes have happened but *not*
1507 * invalidations.
1508 */
1509 static void
1510 tu_cache_init(struct tu_cache_state *cache)
1511 {
1512 cache->flush_bits = 0;
1513 cache->pending_flush_bits = TU_CMD_FLAG_ALL_INVALIDATE;
1514 }
1515
1516 VkResult
1517 tu_BeginCommandBuffer(VkCommandBuffer commandBuffer,
1518 const VkCommandBufferBeginInfo *pBeginInfo)
1519 {
1520 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
1521 VkResult result = VK_SUCCESS;
1522
1523 if (cmd_buffer->status != TU_CMD_BUFFER_STATUS_INITIAL) {
1524 /* If the command buffer has already been resetted with
1525 * vkResetCommandBuffer, no need to do it again.
1526 */
1527 result = tu_reset_cmd_buffer(cmd_buffer);
1528 if (result != VK_SUCCESS)
1529 return result;
1530 }
1531
1532 memset(&cmd_buffer->state, 0, sizeof(cmd_buffer->state));
1533 cmd_buffer->state.index_size = 0xff; /* dirty restart index */
1534
1535 tu_cache_init(&cmd_buffer->state.cache);
1536 tu_cache_init(&cmd_buffer->state.renderpass_cache);
1537 cmd_buffer->usage_flags = pBeginInfo->flags;
1538
1539 tu_cs_begin(&cmd_buffer->cs);
1540 tu_cs_begin(&cmd_buffer->draw_cs);
1541 tu_cs_begin(&cmd_buffer->draw_epilogue_cs);
1542
1543 /* setup initial configuration into command buffer */
1544 if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
1545 switch (cmd_buffer->queue_family_index) {
1546 case TU_QUEUE_GENERAL:
1547 tu6_init_hw(cmd_buffer, &cmd_buffer->cs);
1548 break;
1549 default:
1550 break;
1551 }
1552 } else if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
1553 if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
1554 assert(pBeginInfo->pInheritanceInfo);
1555 cmd_buffer->state.pass = tu_render_pass_from_handle(pBeginInfo->pInheritanceInfo->renderPass);
1556 cmd_buffer->state.subpass =
1557 &cmd_buffer->state.pass->subpasses[pBeginInfo->pInheritanceInfo->subpass];
1558 } else {
1559 /* When executing in the middle of another command buffer, the CCU
1560 * state is unknown.
1561 */
1562 cmd_buffer->state.ccu_state = TU_CMD_CCU_UNKNOWN;
1563 }
1564 }
1565
1566 cmd_buffer->status = TU_CMD_BUFFER_STATUS_RECORDING;
1567
1568 return VK_SUCCESS;
1569 }
1570
1571 /* Sets vertex buffers to HW binding points. We emit VBs in SDS (so that bin
1572 * rendering can skip over unused state), so we need to collect all the
1573 * bindings together into a single state emit at draw time.
1574 */
1575 void
1576 tu_CmdBindVertexBuffers(VkCommandBuffer commandBuffer,
1577 uint32_t firstBinding,
1578 uint32_t bindingCount,
1579 const VkBuffer *pBuffers,
1580 const VkDeviceSize *pOffsets)
1581 {
1582 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
1583
1584 assert(firstBinding + bindingCount <= MAX_VBS);
1585
1586 for (uint32_t i = 0; i < bindingCount; i++) {
1587 struct tu_buffer *buf = tu_buffer_from_handle(pBuffers[i]);
1588
1589 cmd->state.vb.buffers[firstBinding + i] = buf;
1590 cmd->state.vb.offsets[firstBinding + i] = pOffsets[i];
1591
1592 tu_bo_list_add(&cmd->bo_list, buf->bo, MSM_SUBMIT_BO_READ);
1593 }
1594
1595 cmd->state.dirty |= TU_CMD_DIRTY_VERTEX_BUFFERS;
1596 }
1597
1598 void
1599 tu_CmdBindIndexBuffer(VkCommandBuffer commandBuffer,
1600 VkBuffer buffer,
1601 VkDeviceSize offset,
1602 VkIndexType indexType)
1603 {
1604 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
1605 TU_FROM_HANDLE(tu_buffer, buf, buffer);
1606
1607
1608
1609 uint32_t index_size, index_shift, restart_index;
1610
1611 switch (indexType) {
1612 case VK_INDEX_TYPE_UINT16:
1613 index_size = INDEX4_SIZE_16_BIT;
1614 index_shift = 1;
1615 restart_index = 0xffff;
1616 break;
1617 case VK_INDEX_TYPE_UINT32:
1618 index_size = INDEX4_SIZE_32_BIT;
1619 index_shift = 2;
1620 restart_index = 0xffffffff;
1621 break;
1622 case VK_INDEX_TYPE_UINT8_EXT:
1623 index_size = INDEX4_SIZE_8_BIT;
1624 index_shift = 0;
1625 restart_index = 0xff;
1626 break;
1627 default:
1628 unreachable("invalid VkIndexType");
1629 }
1630
1631 /* initialize/update the restart index */
1632 if (cmd->state.index_size != index_size)
1633 tu_cs_emit_regs(&cmd->draw_cs, A6XX_PC_RESTART_INDEX(restart_index));
1634
1635 assert(buf->size >= offset);
1636
1637 cmd->state.index_va = buf->bo->iova + buf->bo_offset + offset;
1638 cmd->state.max_index_count = (buf->size - offset) >> index_shift;
1639 cmd->state.index_size = index_size;
1640
1641 tu_bo_list_add(&cmd->bo_list, buf->bo, MSM_SUBMIT_BO_READ);
1642 }
1643
1644 void
1645 tu_CmdBindDescriptorSets(VkCommandBuffer commandBuffer,
1646 VkPipelineBindPoint pipelineBindPoint,
1647 VkPipelineLayout _layout,
1648 uint32_t firstSet,
1649 uint32_t descriptorSetCount,
1650 const VkDescriptorSet *pDescriptorSets,
1651 uint32_t dynamicOffsetCount,
1652 const uint32_t *pDynamicOffsets)
1653 {
1654 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
1655 TU_FROM_HANDLE(tu_pipeline_layout, layout, _layout);
1656 unsigned dyn_idx = 0;
1657
1658 struct tu_descriptor_state *descriptors_state =
1659 tu_get_descriptors_state(cmd, pipelineBindPoint);
1660
1661 for (unsigned i = 0; i < descriptorSetCount; ++i) {
1662 unsigned idx = i + firstSet;
1663 TU_FROM_HANDLE(tu_descriptor_set, set, pDescriptorSets[i]);
1664
1665 descriptors_state->sets[idx] = set;
1666
1667 for(unsigned j = 0; j < set->layout->dynamic_offset_count; ++j, ++dyn_idx) {
1668 /* update the contents of the dynamic descriptor set */
1669 unsigned src_idx = j;
1670 unsigned dst_idx = j + layout->set[idx].dynamic_offset_start;
1671 assert(dyn_idx < dynamicOffsetCount);
1672
1673 uint32_t *dst =
1674 &descriptors_state->dynamic_descriptors[dst_idx * A6XX_TEX_CONST_DWORDS];
1675 uint32_t *src =
1676 &set->dynamic_descriptors[src_idx * A6XX_TEX_CONST_DWORDS];
1677 uint32_t offset = pDynamicOffsets[dyn_idx];
1678
1679 /* Patch the storage/uniform descriptors right away. */
1680 if (layout->set[idx].layout->dynamic_ubo & (1 << j)) {
1681 /* Note: we can assume here that the addition won't roll over and
1682 * change the SIZE field.
1683 */
1684 uint64_t va = src[0] | ((uint64_t)src[1] << 32);
1685 va += offset;
1686 dst[0] = va;
1687 dst[1] = va >> 32;
1688 } else {
1689 memcpy(dst, src, A6XX_TEX_CONST_DWORDS * 4);
1690 /* Note: A6XX_IBO_5_DEPTH is always 0 */
1691 uint64_t va = dst[4] | ((uint64_t)dst[5] << 32);
1692 va += offset;
1693 dst[4] = va;
1694 dst[5] = va >> 32;
1695 }
1696 }
1697
1698 for (unsigned j = 0; j < set->layout->buffer_count; ++j) {
1699 if (set->buffers[j]) {
1700 tu_bo_list_add(&cmd->bo_list, set->buffers[j],
1701 MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE);
1702 }
1703 }
1704
1705 if (set->size > 0) {
1706 tu_bo_list_add(&cmd->bo_list, &set->pool->bo,
1707 MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
1708 }
1709 }
1710 assert(dyn_idx == dynamicOffsetCount);
1711
1712 uint32_t sp_bindless_base_reg, hlsq_bindless_base_reg, hlsq_invalidate_value;
1713 uint64_t addr[MAX_SETS + 1] = {};
1714 struct tu_cs cs;
1715
1716 for (uint32_t i = 0; i < MAX_SETS; i++) {
1717 struct tu_descriptor_set *set = descriptors_state->sets[i];
1718 if (set)
1719 addr[i] = set->va | 3;
1720 }
1721
1722 if (layout->dynamic_offset_count) {
1723 /* allocate and fill out dynamic descriptor set */
1724 struct tu_cs_memory dynamic_desc_set;
1725 VkResult result = tu_cs_alloc(&cmd->sub_cs, layout->dynamic_offset_count,
1726 A6XX_TEX_CONST_DWORDS, &dynamic_desc_set);
1727 assert(result == VK_SUCCESS);
1728
1729 memcpy(dynamic_desc_set.map, descriptors_state->dynamic_descriptors,
1730 layout->dynamic_offset_count * A6XX_TEX_CONST_DWORDS * 4);
1731 addr[MAX_SETS] = dynamic_desc_set.iova | 3;
1732 }
1733
1734 if (pipelineBindPoint == VK_PIPELINE_BIND_POINT_GRAPHICS) {
1735 sp_bindless_base_reg = REG_A6XX_SP_BINDLESS_BASE(0);
1736 hlsq_bindless_base_reg = REG_A6XX_HLSQ_BINDLESS_BASE(0);
1737 hlsq_invalidate_value = A6XX_HLSQ_INVALIDATE_CMD_GFX_BINDLESS(0x1f);
1738
1739 cmd->state.dirty |= TU_CMD_DIRTY_DESC_SETS_LOAD | TU_CMD_DIRTY_SHADER_CONSTS;
1740 } else {
1741 assert(pipelineBindPoint == VK_PIPELINE_BIND_POINT_COMPUTE);
1742
1743 sp_bindless_base_reg = REG_A6XX_SP_CS_BINDLESS_BASE(0);
1744 hlsq_bindless_base_reg = REG_A6XX_HLSQ_CS_BINDLESS_BASE(0);
1745 hlsq_invalidate_value = A6XX_HLSQ_INVALIDATE_CMD_CS_BINDLESS(0x1f);
1746
1747 cmd->state.dirty |= TU_CMD_DIRTY_COMPUTE_DESCRIPTOR_SETS;
1748 }
1749
1750 tu_cs_begin_sub_stream(&cmd->sub_cs, 24, &cs);
1751
1752 tu_cs_emit_pkt4(&cs, sp_bindless_base_reg, 10);
1753 tu_cs_emit_array(&cs, (const uint32_t*) addr, 10);
1754 tu_cs_emit_pkt4(&cs, hlsq_bindless_base_reg, 10);
1755 tu_cs_emit_array(&cs, (const uint32_t*) addr, 10);
1756 tu_cs_emit_regs(&cs, A6XX_HLSQ_INVALIDATE_CMD(.dword = hlsq_invalidate_value));
1757
1758 struct tu_cs_entry ib = tu_cs_end_sub_stream(&cmd->sub_cs, &cs);
1759 if (pipelineBindPoint == VK_PIPELINE_BIND_POINT_GRAPHICS) {
1760 tu_cs_emit_pkt7(&cmd->draw_cs, CP_SET_DRAW_STATE, 3);
1761 tu_cs_emit_sds_ib(&cmd->draw_cs, TU_DRAW_STATE_DESC_SETS, ib);
1762 cmd->state.desc_sets_ib = ib;
1763 } else {
1764 /* note: for compute we could emit directly, instead of a CP_INDIRECT
1765 * however, the blob uses draw states for compute
1766 */
1767 tu_cs_emit_ib(&cmd->cs, &ib);
1768 }
1769 }
1770
1771 void tu_CmdBindTransformFeedbackBuffersEXT(VkCommandBuffer commandBuffer,
1772 uint32_t firstBinding,
1773 uint32_t bindingCount,
1774 const VkBuffer *pBuffers,
1775 const VkDeviceSize *pOffsets,
1776 const VkDeviceSize *pSizes)
1777 {
1778 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
1779 struct tu_cs *cs = &cmd->draw_cs;
1780
1781 /* using COND_REG_EXEC for xfb commands matches the blob behavior
1782 * presumably there isn't any benefit using a draw state when the
1783 * condition is (SYSMEM | BINNING)
1784 */
1785 tu_cond_exec_start(cs, CP_COND_REG_EXEC_0_MODE(RENDER_MODE) |
1786 CP_COND_REG_EXEC_0_SYSMEM |
1787 CP_COND_REG_EXEC_0_BINNING);
1788
1789 for (uint32_t i = 0; i < bindingCount; i++) {
1790 TU_FROM_HANDLE(tu_buffer, buf, pBuffers[i]);
1791 uint64_t iova = buf->bo->iova + pOffsets[i];
1792 uint32_t size = buf->bo->size - pOffsets[i];
1793 uint32_t idx = i + firstBinding;
1794
1795 if (pSizes && pSizes[i] != VK_WHOLE_SIZE)
1796 size = pSizes[i];
1797
1798 /* BUFFER_BASE is 32-byte aligned, add remaining offset to BUFFER_OFFSET */
1799 uint32_t offset = iova & 0x1f;
1800 iova &= ~(uint64_t) 0x1f;
1801
1802 tu_cs_emit_pkt4(cs, REG_A6XX_VPC_SO_BUFFER_BASE(idx), 3);
1803 tu_cs_emit_qw(cs, iova);
1804 tu_cs_emit(cs, size + offset);
1805
1806 cmd->state.streamout_offset[idx] = offset;
1807
1808 tu_bo_list_add(&cmd->bo_list, buf->bo, MSM_SUBMIT_BO_WRITE);
1809 }
1810
1811 tu_cond_exec_end(cs);
1812 }
1813
1814 void
1815 tu_CmdBeginTransformFeedbackEXT(VkCommandBuffer commandBuffer,
1816 uint32_t firstCounterBuffer,
1817 uint32_t counterBufferCount,
1818 const VkBuffer *pCounterBuffers,
1819 const VkDeviceSize *pCounterBufferOffsets)
1820 {
1821 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
1822 struct tu_cs *cs = &cmd->draw_cs;
1823
1824 tu_cond_exec_start(cs, CP_COND_REG_EXEC_0_MODE(RENDER_MODE) |
1825 CP_COND_REG_EXEC_0_SYSMEM |
1826 CP_COND_REG_EXEC_0_BINNING);
1827
1828 /* TODO: only update offset for active buffers */
1829 for (uint32_t i = 0; i < IR3_MAX_SO_BUFFERS; i++)
1830 tu_cs_emit_regs(cs, A6XX_VPC_SO_BUFFER_OFFSET(i, cmd->state.streamout_offset[i]));
1831
1832 for (uint32_t i = 0; i < counterBufferCount; i++) {
1833 uint32_t idx = firstCounterBuffer + i;
1834 uint32_t offset = cmd->state.streamout_offset[idx];
1835
1836 if (!pCounterBuffers[i])
1837 continue;
1838
1839 TU_FROM_HANDLE(tu_buffer, buf, pCounterBuffers[i]);
1840
1841 tu_bo_list_add(&cmd->bo_list, buf->bo, MSM_SUBMIT_BO_READ);
1842
1843 tu_cs_emit_pkt7(cs, CP_MEM_TO_REG, 3);
1844 tu_cs_emit(cs, CP_MEM_TO_REG_0_REG(REG_A6XX_VPC_SO_BUFFER_OFFSET(idx)) |
1845 CP_MEM_TO_REG_0_UNK31 |
1846 CP_MEM_TO_REG_0_CNT(1));
1847 tu_cs_emit_qw(cs, buf->bo->iova + pCounterBufferOffsets[i]);
1848
1849 if (offset) {
1850 tu_cs_emit_pkt7(cs, CP_REG_RMW, 3);
1851 tu_cs_emit(cs, CP_REG_RMW_0_DST_REG(REG_A6XX_VPC_SO_BUFFER_OFFSET(idx)) |
1852 CP_REG_RMW_0_SRC1_ADD);
1853 tu_cs_emit_qw(cs, 0xffffffff);
1854 tu_cs_emit_qw(cs, offset);
1855 }
1856 }
1857
1858 tu_cond_exec_end(cs);
1859 }
1860
1861 void tu_CmdEndTransformFeedbackEXT(VkCommandBuffer commandBuffer,
1862 uint32_t firstCounterBuffer,
1863 uint32_t counterBufferCount,
1864 const VkBuffer *pCounterBuffers,
1865 const VkDeviceSize *pCounterBufferOffsets)
1866 {
1867 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
1868 struct tu_cs *cs = &cmd->draw_cs;
1869
1870 tu_cond_exec_start(cs, CP_COND_REG_EXEC_0_MODE(RENDER_MODE) |
1871 CP_COND_REG_EXEC_0_SYSMEM |
1872 CP_COND_REG_EXEC_0_BINNING);
1873
1874 /* TODO: only flush buffers that need to be flushed */
1875 for (uint32_t i = 0; i < IR3_MAX_SO_BUFFERS; i++) {
1876 /* note: FLUSH_BASE is always the same, so it could go in init_hw()? */
1877 tu_cs_emit_pkt4(cs, REG_A6XX_VPC_SO_FLUSH_BASE(i), 2);
1878 tu_cs_emit_qw(cs, global_iova(cmd, flush_base[i]));
1879 tu6_emit_event_write(cmd, cs, FLUSH_SO_0 + i);
1880 }
1881
1882 for (uint32_t i = 0; i < counterBufferCount; i++) {
1883 uint32_t idx = firstCounterBuffer + i;
1884 uint32_t offset = cmd->state.streamout_offset[idx];
1885
1886 if (!pCounterBuffers[i])
1887 continue;
1888
1889 TU_FROM_HANDLE(tu_buffer, buf, pCounterBuffers[i]);
1890
1891 tu_bo_list_add(&cmd->bo_list, buf->bo, MSM_SUBMIT_BO_WRITE);
1892
1893 /* VPC_SO_FLUSH_BASE has dwords counter, but counter should be in bytes */
1894 tu_cs_emit_pkt7(cs, CP_MEM_TO_REG, 3);
1895 tu_cs_emit(cs, CP_MEM_TO_REG_0_REG(REG_A6XX_CP_SCRATCH_REG(0)) |
1896 CP_MEM_TO_REG_0_SHIFT_BY_2 |
1897 0x40000 | /* ??? */
1898 CP_MEM_TO_REG_0_UNK31 |
1899 CP_MEM_TO_REG_0_CNT(1));
1900 tu_cs_emit_qw(cs, global_iova(cmd, flush_base[idx]));
1901
1902 if (offset) {
1903 tu_cs_emit_pkt7(cs, CP_REG_RMW, 3);
1904 tu_cs_emit(cs, CP_REG_RMW_0_DST_REG(REG_A6XX_CP_SCRATCH_REG(0)) |
1905 CP_REG_RMW_0_SRC1_ADD);
1906 tu_cs_emit_qw(cs, 0xffffffff);
1907 tu_cs_emit_qw(cs, -offset);
1908 }
1909
1910 tu_cs_emit_pkt7(cs, CP_REG_TO_MEM, 3);
1911 tu_cs_emit(cs, CP_REG_TO_MEM_0_REG(REG_A6XX_CP_SCRATCH_REG(0)) |
1912 CP_REG_TO_MEM_0_CNT(1));
1913 tu_cs_emit_qw(cs, buf->bo->iova + pCounterBufferOffsets[i]);
1914 }
1915
1916 tu_cond_exec_end(cs);
1917
1918 cmd->state.xfb_used = true;
1919 }
1920
1921 void
1922 tu_CmdPushConstants(VkCommandBuffer commandBuffer,
1923 VkPipelineLayout layout,
1924 VkShaderStageFlags stageFlags,
1925 uint32_t offset,
1926 uint32_t size,
1927 const void *pValues)
1928 {
1929 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
1930 memcpy((void*) cmd->push_constants + offset, pValues, size);
1931 cmd->state.dirty |= TU_CMD_DIRTY_SHADER_CONSTS;
1932 }
1933
1934 /* Flush everything which has been made available but we haven't actually
1935 * flushed yet.
1936 */
1937 static void
1938 tu_flush_all_pending(struct tu_cache_state *cache)
1939 {
1940 cache->flush_bits |= cache->pending_flush_bits & TU_CMD_FLAG_ALL_FLUSH;
1941 cache->pending_flush_bits &= ~TU_CMD_FLAG_ALL_FLUSH;
1942 }
1943
1944 VkResult
1945 tu_EndCommandBuffer(VkCommandBuffer commandBuffer)
1946 {
1947 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
1948
1949 /* We currently flush CCU at the end of the command buffer, like
1950 * what the blob does. There's implicit synchronization around every
1951 * vkQueueSubmit, but the kernel only flushes the UCHE, and we don't
1952 * know yet if this command buffer will be the last in the submit so we
1953 * have to defensively flush everything else.
1954 *
1955 * TODO: We could definitely do better than this, since these flushes
1956 * aren't required by Vulkan, but we'd need kernel support to do that.
1957 * Ideally, we'd like the kernel to flush everything afterwards, so that we
1958 * wouldn't have to do any flushes here, and when submitting multiple
1959 * command buffers there wouldn't be any unnecessary flushes in between.
1960 */
1961 if (cmd_buffer->state.pass) {
1962 tu_flush_all_pending(&cmd_buffer->state.renderpass_cache);
1963 tu_emit_cache_flush_renderpass(cmd_buffer, &cmd_buffer->draw_cs);
1964 } else {
1965 tu_flush_all_pending(&cmd_buffer->state.cache);
1966 cmd_buffer->state.cache.flush_bits |=
1967 TU_CMD_FLAG_CCU_FLUSH_COLOR |
1968 TU_CMD_FLAG_CCU_FLUSH_DEPTH;
1969 tu_emit_cache_flush(cmd_buffer, &cmd_buffer->cs);
1970 }
1971
1972 tu_bo_list_add(&cmd_buffer->bo_list, &cmd_buffer->device->global_bo,
1973 MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE);
1974
1975 for (uint32_t i = 0; i < cmd_buffer->draw_cs.bo_count; i++) {
1976 tu_bo_list_add(&cmd_buffer->bo_list, cmd_buffer->draw_cs.bos[i],
1977 MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
1978 }
1979
1980 for (uint32_t i = 0; i < cmd_buffer->draw_epilogue_cs.bo_count; i++) {
1981 tu_bo_list_add(&cmd_buffer->bo_list, cmd_buffer->draw_epilogue_cs.bos[i],
1982 MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
1983 }
1984
1985 for (uint32_t i = 0; i < cmd_buffer->sub_cs.bo_count; i++) {
1986 tu_bo_list_add(&cmd_buffer->bo_list, cmd_buffer->sub_cs.bos[i],
1987 MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
1988 }
1989
1990 tu_cs_end(&cmd_buffer->cs);
1991 tu_cs_end(&cmd_buffer->draw_cs);
1992 tu_cs_end(&cmd_buffer->draw_epilogue_cs);
1993
1994 cmd_buffer->status = TU_CMD_BUFFER_STATUS_EXECUTABLE;
1995
1996 return cmd_buffer->record_result;
1997 }
1998
1999 static struct tu_cs
2000 tu_cmd_dynamic_state(struct tu_cmd_buffer *cmd, uint32_t id, uint32_t size)
2001 {
2002 struct tu_cs_memory memory;
2003 struct tu_cs cs;
2004
2005 /* TODO: share this logic with tu_pipeline_static_state */
2006 tu_cs_alloc(&cmd->sub_cs, size, 1, &memory);
2007 tu_cs_init_external(&cs, memory.map, memory.map + size);
2008 tu_cs_begin(&cs);
2009 tu_cs_reserve_space(&cs, size);
2010
2011 assert(id < ARRAY_SIZE(cmd->state.dynamic_state));
2012 cmd->state.dynamic_state[id].iova = memory.iova;
2013 cmd->state.dynamic_state[id].size = size;
2014
2015 tu_cs_emit_pkt7(&cmd->draw_cs, CP_SET_DRAW_STATE, 3);
2016 tu_cs_emit_draw_state(&cmd->draw_cs, TU_DRAW_STATE_DYNAMIC + id, cmd->state.dynamic_state[id]);
2017
2018 return cs;
2019 }
2020
2021 void
2022 tu_CmdBindPipeline(VkCommandBuffer commandBuffer,
2023 VkPipelineBindPoint pipelineBindPoint,
2024 VkPipeline _pipeline)
2025 {
2026 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2027 TU_FROM_HANDLE(tu_pipeline, pipeline, _pipeline);
2028
2029 for (uint32_t i = 0; i < pipeline->cs.bo_count; i++) {
2030 tu_bo_list_add(&cmd->bo_list, pipeline->cs.bos[i],
2031 MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
2032 }
2033
2034 if (pipelineBindPoint == VK_PIPELINE_BIND_POINT_COMPUTE) {
2035 cmd->state.compute_pipeline = pipeline;
2036 cmd->state.dirty |= TU_CMD_DIRTY_COMPUTE_PIPELINE;
2037 return;
2038 }
2039
2040 assert(pipelineBindPoint == VK_PIPELINE_BIND_POINT_GRAPHICS);
2041
2042 cmd->state.pipeline = pipeline;
2043 cmd->state.dirty |= TU_CMD_DIRTY_DESC_SETS_LOAD | TU_CMD_DIRTY_SHADER_CONSTS;
2044
2045 struct tu_cs *cs = &cmd->draw_cs;
2046 uint32_t mask = ~pipeline->dynamic_state_mask & BITFIELD_MASK(TU_DYNAMIC_STATE_COUNT);
2047 uint32_t i;
2048
2049 tu_cs_emit_pkt7(cs, CP_SET_DRAW_STATE, 3 * (7 + util_bitcount(mask)));
2050 tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_PROGRAM, pipeline->program.state_ib);
2051 tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_PROGRAM_BINNING, pipeline->program.binning_state_ib);
2052 tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_VI, pipeline->vi.state_ib);
2053 tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_VI_BINNING, pipeline->vi.binning_state_ib);
2054 tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_RAST, pipeline->rast.state_ib);
2055 tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_DS, pipeline->ds.state_ib);
2056 tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_BLEND, pipeline->blend.state_ib);
2057 for_each_bit(i, mask)
2058 tu_cs_emit_draw_state(cs, TU_DRAW_STATE_DYNAMIC + i, pipeline->dynamic_state[i]);
2059
2060 /* If the new pipeline requires more VBs than we had previously set up, we
2061 * need to re-emit them in SDS. If it requires the same set or fewer, we
2062 * can just re-use the old SDS.
2063 */
2064 if (pipeline->vi.bindings_used & ~cmd->vertex_bindings_set)
2065 cmd->state.dirty |= TU_CMD_DIRTY_VERTEX_BUFFERS;
2066
2067 /* dynamic linewidth state depends pipeline state's gras_su_cntl
2068 * so the dynamic state ib must be updated when pipeline changes
2069 */
2070 if (pipeline->dynamic_state_mask & BIT(VK_DYNAMIC_STATE_LINE_WIDTH)) {
2071 struct tu_cs cs = tu_cmd_dynamic_state(cmd, VK_DYNAMIC_STATE_LINE_WIDTH, 2);
2072
2073 cmd->state.dynamic_gras_su_cntl &= A6XX_GRAS_SU_CNTL_LINEHALFWIDTH__MASK;
2074 cmd->state.dynamic_gras_su_cntl |= pipeline->gras_su_cntl;
2075
2076 tu_cs_emit_regs(&cs, A6XX_GRAS_SU_CNTL(.dword = cmd->state.dynamic_gras_su_cntl));
2077 }
2078 }
2079
2080 void
2081 tu_CmdSetViewport(VkCommandBuffer commandBuffer,
2082 uint32_t firstViewport,
2083 uint32_t viewportCount,
2084 const VkViewport *pViewports)
2085 {
2086 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2087 struct tu_cs cs = tu_cmd_dynamic_state(cmd, VK_DYNAMIC_STATE_VIEWPORT, 18);
2088
2089 assert(firstViewport == 0 && viewportCount == 1);
2090
2091 tu6_emit_viewport(&cs, pViewports);
2092 }
2093
2094 void
2095 tu_CmdSetScissor(VkCommandBuffer commandBuffer,
2096 uint32_t firstScissor,
2097 uint32_t scissorCount,
2098 const VkRect2D *pScissors)
2099 {
2100 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2101 struct tu_cs cs = tu_cmd_dynamic_state(cmd, VK_DYNAMIC_STATE_SCISSOR, 3);
2102
2103 assert(firstScissor == 0 && scissorCount == 1);
2104
2105 tu6_emit_scissor(&cs, pScissors);
2106 }
2107
2108 void
2109 tu_CmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth)
2110 {
2111 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2112 struct tu_cs cs = tu_cmd_dynamic_state(cmd, VK_DYNAMIC_STATE_LINE_WIDTH, 2);
2113
2114 cmd->state.dynamic_gras_su_cntl &= ~A6XX_GRAS_SU_CNTL_LINEHALFWIDTH__MASK;
2115 cmd->state.dynamic_gras_su_cntl |= A6XX_GRAS_SU_CNTL_LINEHALFWIDTH(lineWidth / 2.0f);
2116
2117 tu_cs_emit_regs(&cs, A6XX_GRAS_SU_CNTL(.dword = cmd->state.dynamic_gras_su_cntl));
2118 }
2119
2120 void
2121 tu_CmdSetDepthBias(VkCommandBuffer commandBuffer,
2122 float depthBiasConstantFactor,
2123 float depthBiasClamp,
2124 float depthBiasSlopeFactor)
2125 {
2126 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2127 struct tu_cs cs = tu_cmd_dynamic_state(cmd, VK_DYNAMIC_STATE_DEPTH_BIAS, 4);
2128
2129 tu6_emit_depth_bias(&cs, depthBiasConstantFactor, depthBiasClamp, depthBiasSlopeFactor);
2130 }
2131
2132 void
2133 tu_CmdSetBlendConstants(VkCommandBuffer commandBuffer,
2134 const float blendConstants[4])
2135 {
2136 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2137 struct tu_cs cs = tu_cmd_dynamic_state(cmd, VK_DYNAMIC_STATE_BLEND_CONSTANTS, 5);
2138
2139 tu_cs_emit_pkt4(&cs, REG_A6XX_RB_BLEND_RED_F32, 4);
2140 tu_cs_emit_array(&cs, (const uint32_t *) blendConstants, 4);
2141 }
2142
2143 void
2144 tu_CmdSetDepthBounds(VkCommandBuffer commandBuffer,
2145 float minDepthBounds,
2146 float maxDepthBounds)
2147 {
2148 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2149 struct tu_cs cs = tu_cmd_dynamic_state(cmd, VK_DYNAMIC_STATE_DEPTH_BOUNDS, 3);
2150
2151 tu_cs_emit_regs(&cs,
2152 A6XX_RB_Z_BOUNDS_MIN(minDepthBounds),
2153 A6XX_RB_Z_BOUNDS_MAX(maxDepthBounds));
2154 }
2155
2156 static void
2157 update_stencil_mask(uint32_t *value, VkStencilFaceFlags face, uint32_t mask)
2158 {
2159 if (face & VK_STENCIL_FACE_FRONT_BIT)
2160 *value = (*value & 0xff00) | (mask & 0xff);
2161 if (face & VK_STENCIL_FACE_BACK_BIT)
2162 *value = (*value & 0xff) | (mask & 0xff) << 8;
2163 }
2164
2165 void
2166 tu_CmdSetStencilCompareMask(VkCommandBuffer commandBuffer,
2167 VkStencilFaceFlags faceMask,
2168 uint32_t compareMask)
2169 {
2170 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2171 struct tu_cs cs = tu_cmd_dynamic_state(cmd, VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK, 2);
2172
2173 update_stencil_mask(&cmd->state.dynamic_stencil_mask, faceMask, compareMask);
2174
2175 tu_cs_emit_regs(&cs, A6XX_RB_STENCILMASK(.dword = cmd->state.dynamic_stencil_mask));
2176 }
2177
2178 void
2179 tu_CmdSetStencilWriteMask(VkCommandBuffer commandBuffer,
2180 VkStencilFaceFlags faceMask,
2181 uint32_t writeMask)
2182 {
2183 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2184 struct tu_cs cs = tu_cmd_dynamic_state(cmd, VK_DYNAMIC_STATE_STENCIL_WRITE_MASK, 2);
2185
2186 update_stencil_mask(&cmd->state.dynamic_stencil_wrmask, faceMask, writeMask);
2187
2188 tu_cs_emit_regs(&cs, A6XX_RB_STENCILWRMASK(.dword = cmd->state.dynamic_stencil_wrmask));
2189 }
2190
2191 void
2192 tu_CmdSetStencilReference(VkCommandBuffer commandBuffer,
2193 VkStencilFaceFlags faceMask,
2194 uint32_t reference)
2195 {
2196 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2197 struct tu_cs cs = tu_cmd_dynamic_state(cmd, VK_DYNAMIC_STATE_STENCIL_REFERENCE, 2);
2198
2199 update_stencil_mask(&cmd->state.dynamic_stencil_ref, faceMask, reference);
2200
2201 tu_cs_emit_regs(&cs, A6XX_RB_STENCILREF(.dword = cmd->state.dynamic_stencil_ref));
2202 }
2203
2204 void
2205 tu_CmdSetSampleLocationsEXT(VkCommandBuffer commandBuffer,
2206 const VkSampleLocationsInfoEXT* pSampleLocationsInfo)
2207 {
2208 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2209 struct tu_cs cs = tu_cmd_dynamic_state(cmd, TU_DYNAMIC_STATE_SAMPLE_LOCATIONS, 9);
2210
2211 assert(pSampleLocationsInfo);
2212
2213 tu6_emit_sample_locations(&cs, pSampleLocationsInfo);
2214 }
2215
2216 static void
2217 tu_flush_for_access(struct tu_cache_state *cache,
2218 enum tu_cmd_access_mask src_mask,
2219 enum tu_cmd_access_mask dst_mask)
2220 {
2221 enum tu_cmd_flush_bits flush_bits = 0;
2222
2223 if (src_mask & TU_ACCESS_SYSMEM_WRITE) {
2224 cache->pending_flush_bits |= TU_CMD_FLAG_ALL_INVALIDATE;
2225 }
2226
2227 #define SRC_FLUSH(domain, flush, invalidate) \
2228 if (src_mask & TU_ACCESS_##domain##_WRITE) { \
2229 cache->pending_flush_bits |= TU_CMD_FLAG_##flush | \
2230 (TU_CMD_FLAG_ALL_INVALIDATE & ~TU_CMD_FLAG_##invalidate); \
2231 }
2232
2233 SRC_FLUSH(UCHE, CACHE_FLUSH, CACHE_INVALIDATE)
2234 SRC_FLUSH(CCU_COLOR, CCU_FLUSH_COLOR, CCU_INVALIDATE_COLOR)
2235 SRC_FLUSH(CCU_DEPTH, CCU_FLUSH_DEPTH, CCU_INVALIDATE_DEPTH)
2236
2237 #undef SRC_FLUSH
2238
2239 #define SRC_INCOHERENT_FLUSH(domain, flush, invalidate) \
2240 if (src_mask & TU_ACCESS_##domain##_INCOHERENT_WRITE) { \
2241 flush_bits |= TU_CMD_FLAG_##flush; \
2242 cache->pending_flush_bits |= \
2243 (TU_CMD_FLAG_ALL_INVALIDATE & ~TU_CMD_FLAG_##invalidate); \
2244 }
2245
2246 SRC_INCOHERENT_FLUSH(CCU_COLOR, CCU_FLUSH_COLOR, CCU_INVALIDATE_COLOR)
2247 SRC_INCOHERENT_FLUSH(CCU_DEPTH, CCU_FLUSH_DEPTH, CCU_INVALIDATE_DEPTH)
2248
2249 #undef SRC_INCOHERENT_FLUSH
2250
2251 if (dst_mask & (TU_ACCESS_SYSMEM_READ | TU_ACCESS_SYSMEM_WRITE)) {
2252 flush_bits |= cache->pending_flush_bits & TU_CMD_FLAG_ALL_FLUSH;
2253 }
2254
2255 #define DST_FLUSH(domain, flush, invalidate) \
2256 if (dst_mask & (TU_ACCESS_##domain##_READ | \
2257 TU_ACCESS_##domain##_WRITE)) { \
2258 flush_bits |= cache->pending_flush_bits & \
2259 (TU_CMD_FLAG_##invalidate | \
2260 (TU_CMD_FLAG_ALL_FLUSH & ~TU_CMD_FLAG_##flush)); \
2261 }
2262
2263 DST_FLUSH(UCHE, CACHE_FLUSH, CACHE_INVALIDATE)
2264 DST_FLUSH(CCU_COLOR, CCU_FLUSH_COLOR, CCU_INVALIDATE_COLOR)
2265 DST_FLUSH(CCU_DEPTH, CCU_FLUSH_DEPTH, CCU_INVALIDATE_DEPTH)
2266
2267 #undef DST_FLUSH
2268
2269 #define DST_INCOHERENT_FLUSH(domain, flush, invalidate) \
2270 if (dst_mask & (TU_ACCESS_##domain##_READ | \
2271 TU_ACCESS_##domain##_WRITE)) { \
2272 flush_bits |= TU_CMD_FLAG_##invalidate | \
2273 (cache->pending_flush_bits & \
2274 (TU_CMD_FLAG_ALL_FLUSH & ~TU_CMD_FLAG_##flush)); \
2275 }
2276
2277 DST_INCOHERENT_FLUSH(CCU_COLOR, CCU_FLUSH_COLOR, CCU_INVALIDATE_COLOR)
2278 DST_INCOHERENT_FLUSH(CCU_DEPTH, CCU_FLUSH_DEPTH, CCU_INVALIDATE_DEPTH)
2279
2280 #undef DST_INCOHERENT_FLUSH
2281
2282 if (dst_mask & TU_ACCESS_WFI_READ) {
2283 flush_bits |= TU_CMD_FLAG_WFI;
2284 }
2285
2286 cache->flush_bits |= flush_bits;
2287 cache->pending_flush_bits &= ~flush_bits;
2288 }
2289
2290 static enum tu_cmd_access_mask
2291 vk2tu_access(VkAccessFlags flags, bool gmem)
2292 {
2293 enum tu_cmd_access_mask mask = 0;
2294
2295 /* If the GPU writes a buffer that is then read by an indirect draw
2296 * command, we theoretically need a WFI + WAIT_FOR_ME combination to
2297 * wait for the writes to complete. The WAIT_FOR_ME is performed as part
2298 * of the draw by the firmware, so we just need to execute a WFI.
2299 */
2300 if (flags &
2301 (VK_ACCESS_INDIRECT_COMMAND_READ_BIT |
2302 VK_ACCESS_MEMORY_READ_BIT)) {
2303 mask |= TU_ACCESS_WFI_READ;
2304 }
2305
2306 if (flags &
2307 (VK_ACCESS_INDIRECT_COMMAND_READ_BIT | /* Read performed by CP */
2308 VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_READ_BIT_EXT | /* Read performed by CP, I think */
2309 VK_ACCESS_CONDITIONAL_RENDERING_READ_BIT_EXT | /* Read performed by CP */
2310 VK_ACCESS_HOST_READ_BIT | /* sysmem by definition */
2311 VK_ACCESS_MEMORY_READ_BIT)) {
2312 mask |= TU_ACCESS_SYSMEM_READ;
2313 }
2314
2315 if (flags &
2316 (VK_ACCESS_HOST_WRITE_BIT |
2317 VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT | /* Write performed by CP, I think */
2318 VK_ACCESS_MEMORY_WRITE_BIT)) {
2319 mask |= TU_ACCESS_SYSMEM_WRITE;
2320 }
2321
2322 if (flags &
2323 (VK_ACCESS_INDEX_READ_BIT | /* Read performed by PC, I think */
2324 VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT | /* Read performed by VFD */
2325 VK_ACCESS_UNIFORM_READ_BIT | /* Read performed by SP */
2326 /* TODO: Is there a no-cache bit for textures so that we can ignore
2327 * these?
2328 */
2329 VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | /* Read performed by TP */
2330 VK_ACCESS_SHADER_READ_BIT | /* Read perfomed by SP/TP */
2331 VK_ACCESS_MEMORY_READ_BIT)) {
2332 mask |= TU_ACCESS_UCHE_READ;
2333 }
2334
2335 if (flags &
2336 (VK_ACCESS_SHADER_WRITE_BIT | /* Write performed by SP */
2337 VK_ACCESS_TRANSFORM_FEEDBACK_WRITE_BIT_EXT | /* Write performed by VPC */
2338 VK_ACCESS_MEMORY_WRITE_BIT)) {
2339 mask |= TU_ACCESS_UCHE_WRITE;
2340 }
2341
2342 /* When using GMEM, the CCU is always flushed automatically to GMEM, and
2343 * then GMEM is flushed to sysmem. Furthermore, we already had to flush any
2344 * previous writes in sysmem mode when transitioning to GMEM. Therefore we
2345 * can ignore CCU and pretend that color attachments and transfers use
2346 * sysmem directly.
2347 */
2348
2349 if (flags &
2350 (VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
2351 VK_ACCESS_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT |
2352 VK_ACCESS_MEMORY_READ_BIT)) {
2353 if (gmem)
2354 mask |= TU_ACCESS_SYSMEM_READ;
2355 else
2356 mask |= TU_ACCESS_CCU_COLOR_INCOHERENT_READ;
2357 }
2358
2359 if (flags &
2360 (VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT |
2361 VK_ACCESS_MEMORY_READ_BIT)) {
2362 if (gmem)
2363 mask |= TU_ACCESS_SYSMEM_READ;
2364 else
2365 mask |= TU_ACCESS_CCU_DEPTH_INCOHERENT_READ;
2366 }
2367
2368 if (flags &
2369 (VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
2370 VK_ACCESS_MEMORY_WRITE_BIT)) {
2371 if (gmem) {
2372 mask |= TU_ACCESS_SYSMEM_WRITE;
2373 } else {
2374 mask |= TU_ACCESS_CCU_COLOR_INCOHERENT_WRITE;
2375 }
2376 }
2377
2378 if (flags &
2379 (VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
2380 VK_ACCESS_MEMORY_WRITE_BIT)) {
2381 if (gmem) {
2382 mask |= TU_ACCESS_SYSMEM_WRITE;
2383 } else {
2384 mask |= TU_ACCESS_CCU_DEPTH_INCOHERENT_WRITE;
2385 }
2386 }
2387
2388 /* When the dst access is a transfer read/write, it seems we sometimes need
2389 * to insert a WFI after any flushes, to guarantee that the flushes finish
2390 * before the 2D engine starts. However the opposite (i.e. a WFI after
2391 * CP_BLIT and before any subsequent flush) does not seem to be needed, and
2392 * the blob doesn't emit such a WFI.
2393 */
2394
2395 if (flags &
2396 (VK_ACCESS_TRANSFER_WRITE_BIT |
2397 VK_ACCESS_MEMORY_WRITE_BIT)) {
2398 if (gmem) {
2399 mask |= TU_ACCESS_SYSMEM_WRITE;
2400 } else {
2401 mask |= TU_ACCESS_CCU_COLOR_WRITE;
2402 }
2403 mask |= TU_ACCESS_WFI_READ;
2404 }
2405
2406 if (flags &
2407 (VK_ACCESS_TRANSFER_READ_BIT | /* Access performed by TP */
2408 VK_ACCESS_MEMORY_READ_BIT)) {
2409 mask |= TU_ACCESS_UCHE_READ | TU_ACCESS_WFI_READ;
2410 }
2411
2412 return mask;
2413 }
2414
2415
2416 void
2417 tu_CmdExecuteCommands(VkCommandBuffer commandBuffer,
2418 uint32_t commandBufferCount,
2419 const VkCommandBuffer *pCmdBuffers)
2420 {
2421 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2422 VkResult result;
2423
2424 assert(commandBufferCount > 0);
2425
2426 /* Emit any pending flushes. */
2427 if (cmd->state.pass) {
2428 tu_flush_all_pending(&cmd->state.renderpass_cache);
2429 tu_emit_cache_flush_renderpass(cmd, &cmd->draw_cs);
2430 } else {
2431 tu_flush_all_pending(&cmd->state.cache);
2432 tu_emit_cache_flush(cmd, &cmd->cs);
2433 }
2434
2435 for (uint32_t i = 0; i < commandBufferCount; i++) {
2436 TU_FROM_HANDLE(tu_cmd_buffer, secondary, pCmdBuffers[i]);
2437
2438 result = tu_bo_list_merge(&cmd->bo_list, &secondary->bo_list);
2439 if (result != VK_SUCCESS) {
2440 cmd->record_result = result;
2441 break;
2442 }
2443
2444 if (secondary->usage_flags &
2445 VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
2446 assert(tu_cs_is_empty(&secondary->cs));
2447
2448 result = tu_cs_add_entries(&cmd->draw_cs, &secondary->draw_cs);
2449 if (result != VK_SUCCESS) {
2450 cmd->record_result = result;
2451 break;
2452 }
2453
2454 result = tu_cs_add_entries(&cmd->draw_epilogue_cs,
2455 &secondary->draw_epilogue_cs);
2456 if (result != VK_SUCCESS) {
2457 cmd->record_result = result;
2458 break;
2459 }
2460
2461 if (secondary->has_tess)
2462 cmd->has_tess = true;
2463 } else {
2464 assert(tu_cs_is_empty(&secondary->draw_cs));
2465 assert(tu_cs_is_empty(&secondary->draw_epilogue_cs));
2466
2467 for (uint32_t j = 0; j < secondary->cs.bo_count; j++) {
2468 tu_bo_list_add(&cmd->bo_list, secondary->cs.bos[j],
2469 MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
2470 }
2471
2472 tu_cs_add_entries(&cmd->cs, &secondary->cs);
2473 }
2474
2475 cmd->state.index_size = secondary->state.index_size; /* for restart index update */
2476 }
2477 cmd->state.dirty = ~0u; /* TODO: set dirty only what needs to be */
2478
2479 /* After executing secondary command buffers, there may have been arbitrary
2480 * flushes executed, so when we encounter a pipeline barrier with a
2481 * srcMask, we have to assume that we need to invalidate. Therefore we need
2482 * to re-initialize the cache with all pending invalidate bits set.
2483 */
2484 if (cmd->state.pass) {
2485 tu_cache_init(&cmd->state.renderpass_cache);
2486 } else {
2487 tu_cache_init(&cmd->state.cache);
2488 }
2489 }
2490
2491 VkResult
2492 tu_CreateCommandPool(VkDevice _device,
2493 const VkCommandPoolCreateInfo *pCreateInfo,
2494 const VkAllocationCallbacks *pAllocator,
2495 VkCommandPool *pCmdPool)
2496 {
2497 TU_FROM_HANDLE(tu_device, device, _device);
2498 struct tu_cmd_pool *pool;
2499
2500 pool = vk_object_alloc(&device->vk, pAllocator, sizeof(*pool),
2501 VK_OBJECT_TYPE_COMMAND_POOL);
2502 if (pool == NULL)
2503 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
2504
2505 if (pAllocator)
2506 pool->alloc = *pAllocator;
2507 else
2508 pool->alloc = device->vk.alloc;
2509
2510 list_inithead(&pool->cmd_buffers);
2511 list_inithead(&pool->free_cmd_buffers);
2512
2513 pool->queue_family_index = pCreateInfo->queueFamilyIndex;
2514
2515 *pCmdPool = tu_cmd_pool_to_handle(pool);
2516
2517 return VK_SUCCESS;
2518 }
2519
2520 void
2521 tu_DestroyCommandPool(VkDevice _device,
2522 VkCommandPool commandPool,
2523 const VkAllocationCallbacks *pAllocator)
2524 {
2525 TU_FROM_HANDLE(tu_device, device, _device);
2526 TU_FROM_HANDLE(tu_cmd_pool, pool, commandPool);
2527
2528 if (!pool)
2529 return;
2530
2531 list_for_each_entry_safe(struct tu_cmd_buffer, cmd_buffer,
2532 &pool->cmd_buffers, pool_link)
2533 {
2534 tu_cmd_buffer_destroy(cmd_buffer);
2535 }
2536
2537 list_for_each_entry_safe(struct tu_cmd_buffer, cmd_buffer,
2538 &pool->free_cmd_buffers, pool_link)
2539 {
2540 tu_cmd_buffer_destroy(cmd_buffer);
2541 }
2542
2543 vk_object_free(&device->vk, pAllocator, pool);
2544 }
2545
2546 VkResult
2547 tu_ResetCommandPool(VkDevice device,
2548 VkCommandPool commandPool,
2549 VkCommandPoolResetFlags flags)
2550 {
2551 TU_FROM_HANDLE(tu_cmd_pool, pool, commandPool);
2552 VkResult result;
2553
2554 list_for_each_entry(struct tu_cmd_buffer, cmd_buffer, &pool->cmd_buffers,
2555 pool_link)
2556 {
2557 result = tu_reset_cmd_buffer(cmd_buffer);
2558 if (result != VK_SUCCESS)
2559 return result;
2560 }
2561
2562 return VK_SUCCESS;
2563 }
2564
2565 void
2566 tu_TrimCommandPool(VkDevice device,
2567 VkCommandPool commandPool,
2568 VkCommandPoolTrimFlags flags)
2569 {
2570 TU_FROM_HANDLE(tu_cmd_pool, pool, commandPool);
2571
2572 if (!pool)
2573 return;
2574
2575 list_for_each_entry_safe(struct tu_cmd_buffer, cmd_buffer,
2576 &pool->free_cmd_buffers, pool_link)
2577 {
2578 tu_cmd_buffer_destroy(cmd_buffer);
2579 }
2580 }
2581
2582 static void
2583 tu_subpass_barrier(struct tu_cmd_buffer *cmd_buffer,
2584 const struct tu_subpass_barrier *barrier,
2585 bool external)
2586 {
2587 /* Note: we don't know until the end of the subpass whether we'll use
2588 * sysmem, so assume sysmem here to be safe.
2589 */
2590 struct tu_cache_state *cache =
2591 external ? &cmd_buffer->state.cache : &cmd_buffer->state.renderpass_cache;
2592 enum tu_cmd_access_mask src_flags =
2593 vk2tu_access(barrier->src_access_mask, false);
2594 enum tu_cmd_access_mask dst_flags =
2595 vk2tu_access(barrier->dst_access_mask, false);
2596
2597 if (barrier->incoherent_ccu_color)
2598 src_flags |= TU_ACCESS_CCU_COLOR_INCOHERENT_WRITE;
2599 if (barrier->incoherent_ccu_depth)
2600 src_flags |= TU_ACCESS_CCU_DEPTH_INCOHERENT_WRITE;
2601
2602 tu_flush_for_access(cache, src_flags, dst_flags);
2603 }
2604
2605 void
2606 tu_CmdBeginRenderPass(VkCommandBuffer commandBuffer,
2607 const VkRenderPassBeginInfo *pRenderPassBegin,
2608 VkSubpassContents contents)
2609 {
2610 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2611 TU_FROM_HANDLE(tu_render_pass, pass, pRenderPassBegin->renderPass);
2612 TU_FROM_HANDLE(tu_framebuffer, fb, pRenderPassBegin->framebuffer);
2613
2614 cmd->state.pass = pass;
2615 cmd->state.subpass = pass->subpasses;
2616 cmd->state.framebuffer = fb;
2617 cmd->state.render_area = pRenderPassBegin->renderArea;
2618
2619 tu_cmd_prepare_tile_store_ib(cmd);
2620
2621 /* Note: because this is external, any flushes will happen before draw_cs
2622 * gets called. However deferred flushes could have to happen later as part
2623 * of the subpass.
2624 */
2625 tu_subpass_barrier(cmd, &pass->subpasses[0].start_barrier, true);
2626 cmd->state.renderpass_cache.pending_flush_bits =
2627 cmd->state.cache.pending_flush_bits;
2628 cmd->state.renderpass_cache.flush_bits = 0;
2629
2630 tu_emit_renderpass_begin(cmd, pRenderPassBegin);
2631
2632 tu6_emit_zs(cmd, cmd->state.subpass, &cmd->draw_cs);
2633 tu6_emit_mrt(cmd, cmd->state.subpass, &cmd->draw_cs);
2634 tu6_emit_msaa(&cmd->draw_cs, cmd->state.subpass->samples);
2635 tu6_emit_render_cntl(cmd, cmd->state.subpass, &cmd->draw_cs, false);
2636
2637 tu_set_input_attachments(cmd, cmd->state.subpass);
2638
2639 for (uint32_t i = 0; i < fb->attachment_count; ++i) {
2640 const struct tu_image_view *iview = fb->attachments[i].attachment;
2641 tu_bo_list_add(&cmd->bo_list, iview->image->bo,
2642 MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE);
2643 }
2644
2645 cmd->state.dirty |= TU_CMD_DIRTY_DRAW_STATE;
2646 }
2647
2648 void
2649 tu_CmdBeginRenderPass2(VkCommandBuffer commandBuffer,
2650 const VkRenderPassBeginInfo *pRenderPassBeginInfo,
2651 const VkSubpassBeginInfoKHR *pSubpassBeginInfo)
2652 {
2653 tu_CmdBeginRenderPass(commandBuffer, pRenderPassBeginInfo,
2654 pSubpassBeginInfo->contents);
2655 }
2656
2657 void
2658 tu_CmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents)
2659 {
2660 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2661 const struct tu_render_pass *pass = cmd->state.pass;
2662 struct tu_cs *cs = &cmd->draw_cs;
2663
2664 const struct tu_subpass *subpass = cmd->state.subpass++;
2665
2666 tu_cond_exec_start(cs, CP_COND_EXEC_0_RENDER_MODE_GMEM);
2667
2668 if (subpass->resolve_attachments) {
2669 tu6_emit_blit_scissor(cmd, cs, true);
2670
2671 for (unsigned i = 0; i < subpass->color_count; i++) {
2672 uint32_t a = subpass->resolve_attachments[i].attachment;
2673 if (a == VK_ATTACHMENT_UNUSED)
2674 continue;
2675
2676 tu_store_gmem_attachment(cmd, cs, a,
2677 subpass->color_attachments[i].attachment);
2678
2679 if (pass->attachments[a].gmem_offset < 0)
2680 continue;
2681
2682 /* TODO:
2683 * check if the resolved attachment is needed by later subpasses,
2684 * if it is, should be doing a GMEM->GMEM resolve instead of GMEM->MEM->GMEM..
2685 */
2686 tu_finishme("missing GMEM->GMEM resolve path\n");
2687 tu_load_gmem_attachment(cmd, cs, a, true);
2688 }
2689 }
2690
2691 tu_cond_exec_end(cs);
2692
2693 tu_cond_exec_start(cs, CP_COND_EXEC_0_RENDER_MODE_SYSMEM);
2694
2695 tu6_emit_sysmem_resolves(cmd, cs, subpass);
2696
2697 tu_cond_exec_end(cs);
2698
2699 /* Handle dependencies for the next subpass */
2700 tu_subpass_barrier(cmd, &cmd->state.subpass->start_barrier, false);
2701
2702 /* emit mrt/zs/msaa/ubwc state for the subpass that is starting */
2703 tu6_emit_zs(cmd, cmd->state.subpass, cs);
2704 tu6_emit_mrt(cmd, cmd->state.subpass, cs);
2705 tu6_emit_msaa(cs, cmd->state.subpass->samples);
2706 tu6_emit_render_cntl(cmd, cmd->state.subpass, cs, false);
2707
2708 tu_set_input_attachments(cmd, cmd->state.subpass);
2709 }
2710
2711 void
2712 tu_CmdNextSubpass2(VkCommandBuffer commandBuffer,
2713 const VkSubpassBeginInfoKHR *pSubpassBeginInfo,
2714 const VkSubpassEndInfoKHR *pSubpassEndInfo)
2715 {
2716 tu_CmdNextSubpass(commandBuffer, pSubpassBeginInfo->contents);
2717 }
2718
2719 static void
2720 tu6_emit_user_consts(struct tu_cs *cs, const struct tu_pipeline *pipeline,
2721 struct tu_descriptor_state *descriptors_state,
2722 gl_shader_stage type,
2723 uint32_t *push_constants)
2724 {
2725 const struct tu_program_descriptor_linkage *link =
2726 &pipeline->program.link[type];
2727 const struct ir3_ubo_analysis_state *state = &link->const_state.ubo_state;
2728
2729 if (link->push_consts.count > 0) {
2730 unsigned num_units = link->push_consts.count;
2731 unsigned offset = link->push_consts.lo;
2732 tu_cs_emit_pkt7(cs, tu6_stage2opcode(type), 3 + num_units * 4);
2733 tu_cs_emit(cs, CP_LOAD_STATE6_0_DST_OFF(offset) |
2734 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
2735 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
2736 CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type)) |
2737 CP_LOAD_STATE6_0_NUM_UNIT(num_units));
2738 tu_cs_emit(cs, 0);
2739 tu_cs_emit(cs, 0);
2740 for (unsigned i = 0; i < num_units * 4; i++)
2741 tu_cs_emit(cs, push_constants[i + offset * 4]);
2742 }
2743
2744 for (uint32_t i = 0; i < state->num_enabled; i++) {
2745 uint32_t size = state->range[i].end - state->range[i].start;
2746 uint32_t offset = state->range[i].start;
2747
2748 /* and even if the start of the const buffer is before
2749 * first_immediate, the end may not be:
2750 */
2751 size = MIN2(size, (16 * link->constlen) - state->range[i].offset);
2752
2753 if (size == 0)
2754 continue;
2755
2756 /* things should be aligned to vec4: */
2757 debug_assert((state->range[i].offset % 16) == 0);
2758 debug_assert((size % 16) == 0);
2759 debug_assert((offset % 16) == 0);
2760
2761 /* Dig out the descriptor from the descriptor state and read the VA from
2762 * it.
2763 */
2764 assert(state->range[i].ubo.bindless);
2765 uint32_t *base = state->range[i].ubo.bindless_base == MAX_SETS ?
2766 descriptors_state->dynamic_descriptors :
2767 descriptors_state->sets[state->range[i].ubo.bindless_base]->mapped_ptr;
2768 unsigned block = state->range[i].ubo.block;
2769 uint32_t *desc = base + block * A6XX_TEX_CONST_DWORDS;
2770 uint64_t va = desc[0] | ((uint64_t)(desc[1] & A6XX_UBO_1_BASE_HI__MASK) << 32);
2771 assert(va);
2772
2773 tu_cs_emit_pkt7(cs, tu6_stage2opcode(type), 3);
2774 tu_cs_emit(cs, CP_LOAD_STATE6_0_DST_OFF(state->range[i].offset / 16) |
2775 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
2776 CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT) |
2777 CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type)) |
2778 CP_LOAD_STATE6_0_NUM_UNIT(size / 16));
2779 tu_cs_emit_qw(cs, va + offset);
2780 }
2781 }
2782
2783 static struct tu_cs_entry
2784 tu6_emit_consts(struct tu_cmd_buffer *cmd,
2785 const struct tu_pipeline *pipeline,
2786 struct tu_descriptor_state *descriptors_state,
2787 gl_shader_stage type)
2788 {
2789 struct tu_cs cs;
2790 tu_cs_begin_sub_stream(&cmd->sub_cs, 512, &cs); /* TODO: maximum size? */
2791
2792 tu6_emit_user_consts(&cs, pipeline, descriptors_state, type, cmd->push_constants);
2793
2794 return tu_cs_end_sub_stream(&cmd->sub_cs, &cs);
2795 }
2796
2797 static struct tu_cs_entry
2798 tu6_emit_vertex_buffers(struct tu_cmd_buffer *cmd,
2799 const struct tu_pipeline *pipeline)
2800 {
2801 struct tu_cs cs;
2802 tu_cs_begin_sub_stream(&cmd->sub_cs, 4 * MAX_VBS, &cs);
2803
2804 int binding;
2805 for_each_bit(binding, pipeline->vi.bindings_used) {
2806 const struct tu_buffer *buf = cmd->state.vb.buffers[binding];
2807 const VkDeviceSize offset = buf->bo_offset +
2808 cmd->state.vb.offsets[binding];
2809
2810 tu_cs_emit_regs(&cs,
2811 A6XX_VFD_FETCH_BASE(binding, .bo = buf->bo, .bo_offset = offset),
2812 A6XX_VFD_FETCH_SIZE(binding, buf->size - offset));
2813
2814 }
2815
2816 cmd->vertex_bindings_set = pipeline->vi.bindings_used;
2817
2818 return tu_cs_end_sub_stream(&cmd->sub_cs, &cs);
2819 }
2820
2821 static uint64_t
2822 get_tess_param_bo_size(const struct tu_pipeline *pipeline,
2823 uint32_t draw_count)
2824 {
2825 /* TODO: For indirect draws, we can't compute the BO size ahead of time.
2826 * Still not sure what to do here, so just allocate a reasonably large
2827 * BO and hope for the best for now. */
2828 if (!draw_count)
2829 draw_count = 2048;
2830
2831 /* the tess param BO is pipeline->tess.param_stride bytes per patch,
2832 * which includes both the per-vertex outputs and per-patch outputs
2833 * build_primitive_map in ir3 calculates this stride
2834 */
2835 uint32_t verts_per_patch = pipeline->ia.primtype - DI_PT_PATCHES0;
2836 uint32_t num_patches = draw_count / verts_per_patch;
2837 return num_patches * pipeline->tess.param_stride;
2838 }
2839
2840 static uint64_t
2841 get_tess_factor_bo_size(const struct tu_pipeline *pipeline,
2842 uint32_t draw_count)
2843 {
2844 /* TODO: For indirect draws, we can't compute the BO size ahead of time.
2845 * Still not sure what to do here, so just allocate a reasonably large
2846 * BO and hope for the best for now. */
2847 if (!draw_count)
2848 draw_count = 2048;
2849
2850 /* Each distinct patch gets its own tess factor output. */
2851 uint32_t verts_per_patch = pipeline->ia.primtype - DI_PT_PATCHES0;
2852 uint32_t num_patches = draw_count / verts_per_patch;
2853 uint32_t factor_stride;
2854 switch (pipeline->tess.patch_type) {
2855 case IR3_TESS_ISOLINES:
2856 factor_stride = 12;
2857 break;
2858 case IR3_TESS_TRIANGLES:
2859 factor_stride = 20;
2860 break;
2861 case IR3_TESS_QUADS:
2862 factor_stride = 28;
2863 break;
2864 default:
2865 unreachable("bad tessmode");
2866 }
2867 return factor_stride * num_patches;
2868 }
2869
2870 static VkResult
2871 tu6_emit_tess_consts(struct tu_cmd_buffer *cmd,
2872 uint32_t draw_count,
2873 const struct tu_pipeline *pipeline,
2874 struct tu_cs_entry *entry)
2875 {
2876 struct tu_cs cs;
2877 VkResult result = tu_cs_begin_sub_stream(&cmd->sub_cs, 20, &cs);
2878 if (result != VK_SUCCESS)
2879 return result;
2880
2881 uint64_t tess_factor_size = get_tess_factor_bo_size(pipeline, draw_count);
2882 uint64_t tess_param_size = get_tess_param_bo_size(pipeline, draw_count);
2883 uint64_t tess_bo_size = tess_factor_size + tess_param_size;
2884 if (tess_bo_size > 0) {
2885 struct tu_bo *tess_bo;
2886 result = tu_get_scratch_bo(cmd->device, tess_bo_size, &tess_bo);
2887 if (result != VK_SUCCESS)
2888 return result;
2889
2890 tu_bo_list_add(&cmd->bo_list, tess_bo,
2891 MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE);
2892 uint64_t tess_factor_iova = tess_bo->iova;
2893 uint64_t tess_param_iova = tess_factor_iova + tess_factor_size;
2894
2895 tu_cs_emit_pkt7(&cs, CP_LOAD_STATE6_GEOM, 3 + 4);
2896 tu_cs_emit(&cs, CP_LOAD_STATE6_0_DST_OFF(pipeline->tess.hs_bo_regid) |
2897 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
2898 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
2899 CP_LOAD_STATE6_0_STATE_BLOCK(SB6_HS_SHADER) |
2900 CP_LOAD_STATE6_0_NUM_UNIT(1));
2901 tu_cs_emit(&cs, CP_LOAD_STATE6_1_EXT_SRC_ADDR(0));
2902 tu_cs_emit(&cs, CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI(0));
2903 tu_cs_emit_qw(&cs, tess_param_iova);
2904 tu_cs_emit_qw(&cs, tess_factor_iova);
2905
2906 tu_cs_emit_pkt7(&cs, CP_LOAD_STATE6_GEOM, 3 + 4);
2907 tu_cs_emit(&cs, CP_LOAD_STATE6_0_DST_OFF(pipeline->tess.ds_bo_regid) |
2908 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
2909 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
2910 CP_LOAD_STATE6_0_STATE_BLOCK(SB6_DS_SHADER) |
2911 CP_LOAD_STATE6_0_NUM_UNIT(1));
2912 tu_cs_emit(&cs, CP_LOAD_STATE6_1_EXT_SRC_ADDR(0));
2913 tu_cs_emit(&cs, CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI(0));
2914 tu_cs_emit_qw(&cs, tess_param_iova);
2915 tu_cs_emit_qw(&cs, tess_factor_iova);
2916
2917 tu_cs_emit_pkt4(&cs, REG_A6XX_PC_TESSFACTOR_ADDR_LO, 2);
2918 tu_cs_emit_qw(&cs, tess_factor_iova);
2919
2920 /* TODO: Without this WFI here, the hardware seems unable to read these
2921 * addresses we just emitted. Freedreno emits these consts as part of
2922 * IB1 instead of in a draw state which might make this WFI unnecessary,
2923 * but it requires a bit more indirection (SS6_INDIRECT for consts). */
2924 tu_cs_emit_wfi(&cs);
2925 }
2926 *entry = tu_cs_end_sub_stream(&cmd->sub_cs, &cs);
2927 return VK_SUCCESS;
2928 }
2929
2930 static VkResult
2931 tu6_draw_common(struct tu_cmd_buffer *cmd,
2932 struct tu_cs *cs,
2933 bool indexed,
2934 /* note: draw_count is 0 for indirect */
2935 uint32_t draw_count)
2936 {
2937 const struct tu_pipeline *pipeline = cmd->state.pipeline;
2938 VkResult result;
2939
2940 struct tu_descriptor_state *descriptors_state =
2941 &cmd->descriptors[VK_PIPELINE_BIND_POINT_GRAPHICS];
2942
2943 tu_emit_cache_flush_renderpass(cmd, cs);
2944
2945 /* TODO lrz */
2946
2947 tu_cs_emit_regs(cs, A6XX_PC_PRIMITIVE_CNTL_0(
2948 .primitive_restart =
2949 pipeline->ia.primitive_restart && indexed,
2950 .tess_upper_left_domain_origin =
2951 pipeline->tess.upper_left_domain_origin));
2952
2953 if (cmd->state.dirty & TU_CMD_DIRTY_SHADER_CONSTS) {
2954 cmd->state.shader_const_ib[MESA_SHADER_VERTEX] =
2955 tu6_emit_consts(cmd, pipeline, descriptors_state, MESA_SHADER_VERTEX);
2956 cmd->state.shader_const_ib[MESA_SHADER_TESS_CTRL] =
2957 tu6_emit_consts(cmd, pipeline, descriptors_state, MESA_SHADER_TESS_CTRL);
2958 cmd->state.shader_const_ib[MESA_SHADER_TESS_EVAL] =
2959 tu6_emit_consts(cmd, pipeline, descriptors_state, MESA_SHADER_TESS_EVAL);
2960 cmd->state.shader_const_ib[MESA_SHADER_GEOMETRY] =
2961 tu6_emit_consts(cmd, pipeline, descriptors_state, MESA_SHADER_GEOMETRY);
2962 cmd->state.shader_const_ib[MESA_SHADER_FRAGMENT] =
2963 tu6_emit_consts(cmd, pipeline, descriptors_state, MESA_SHADER_FRAGMENT);
2964 }
2965
2966 if (cmd->state.dirty & TU_CMD_DIRTY_VERTEX_BUFFERS)
2967 cmd->state.vertex_buffers_ib = tu6_emit_vertex_buffers(cmd, pipeline);
2968
2969 bool has_tess =
2970 pipeline->active_stages & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT;
2971 struct tu_cs_entry tess_consts = {};
2972 if (has_tess) {
2973 cmd->has_tess = true;
2974 result = tu6_emit_tess_consts(cmd, draw_count, pipeline, &tess_consts);
2975 if (result != VK_SUCCESS)
2976 return result;
2977 }
2978
2979 /* for the first draw in a renderpass, re-emit all the draw states
2980 *
2981 * and if a draw-state disabling path (CmdClearAttachments 3D fallback) was
2982 * used, then draw states must be re-emitted. note however this only happens
2983 * in the sysmem path, so this can be skipped this for the gmem path (TODO)
2984 *
2985 * the two input attachment states are excluded because secondary command
2986 * buffer doesn't have a state ib to restore it, and not re-emitting them
2987 * is OK since CmdClearAttachments won't disable/overwrite them
2988 */
2989 if (cmd->state.dirty & TU_CMD_DIRTY_DRAW_STATE) {
2990 tu_cs_emit_pkt7(cs, CP_SET_DRAW_STATE, 3 * (TU_DRAW_STATE_COUNT - 2));
2991
2992 tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_PROGRAM, pipeline->program.state_ib);
2993 tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_PROGRAM_BINNING, pipeline->program.binning_state_ib);
2994 tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_TESS, tess_consts);
2995 tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_VI, pipeline->vi.state_ib);
2996 tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_VI_BINNING, pipeline->vi.binning_state_ib);
2997 tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_RAST, pipeline->rast.state_ib);
2998 tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_DS, pipeline->ds.state_ib);
2999 tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_BLEND, pipeline->blend.state_ib);
3000 tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_VS_CONST, cmd->state.shader_const_ib[MESA_SHADER_VERTEX]);
3001 tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_HS_CONST, cmd->state.shader_const_ib[MESA_SHADER_TESS_CTRL]);
3002 tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_DS_CONST, cmd->state.shader_const_ib[MESA_SHADER_TESS_EVAL]);
3003 tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_GS_CONST, cmd->state.shader_const_ib[MESA_SHADER_GEOMETRY]);
3004 tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_FS_CONST, cmd->state.shader_const_ib[MESA_SHADER_FRAGMENT]);
3005 tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_DESC_SETS, cmd->state.desc_sets_ib);
3006 tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_DESC_SETS_LOAD, pipeline->load_state.state_ib);
3007 tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_VB, cmd->state.vertex_buffers_ib);
3008 tu_cs_emit_draw_state(cs, TU_DRAW_STATE_VS_PARAMS, cmd->state.vs_params);
3009
3010 for (uint32_t i = 0; i < ARRAY_SIZE(cmd->state.dynamic_state); i++) {
3011 tu_cs_emit_draw_state(cs, TU_DRAW_STATE_DYNAMIC + i,
3012 ((pipeline->dynamic_state_mask & BIT(i)) ?
3013 cmd->state.dynamic_state[i] :
3014 pipeline->dynamic_state[i]));
3015 }
3016 } else {
3017
3018 /* emit draw states that were just updated
3019 * note we eventually don't want to have to emit anything here
3020 */
3021 uint32_t draw_state_count =
3022 has_tess +
3023 ((cmd->state.dirty & TU_CMD_DIRTY_SHADER_CONSTS) ? 5 : 0) +
3024 ((cmd->state.dirty & TU_CMD_DIRTY_DESC_SETS_LOAD) ? 1 : 0) +
3025 ((cmd->state.dirty & TU_CMD_DIRTY_VERTEX_BUFFERS) ? 1 : 0) +
3026 1; /* vs_params */
3027
3028 tu_cs_emit_pkt7(cs, CP_SET_DRAW_STATE, 3 * draw_state_count);
3029
3030 /* We may need to re-emit tess consts if the current draw call is
3031 * sufficiently larger than the last draw call. */
3032 if (has_tess)
3033 tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_TESS, tess_consts);
3034 if (cmd->state.dirty & TU_CMD_DIRTY_SHADER_CONSTS) {
3035 tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_VS_CONST, cmd->state.shader_const_ib[MESA_SHADER_VERTEX]);
3036 tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_HS_CONST, cmd->state.shader_const_ib[MESA_SHADER_TESS_CTRL]);
3037 tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_DS_CONST, cmd->state.shader_const_ib[MESA_SHADER_TESS_EVAL]);
3038 tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_GS_CONST, cmd->state.shader_const_ib[MESA_SHADER_GEOMETRY]);
3039 tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_FS_CONST, cmd->state.shader_const_ib[MESA_SHADER_FRAGMENT]);
3040 }
3041 if (cmd->state.dirty & TU_CMD_DIRTY_DESC_SETS_LOAD)
3042 tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_DESC_SETS_LOAD, pipeline->load_state.state_ib);
3043 if (cmd->state.dirty & TU_CMD_DIRTY_VERTEX_BUFFERS)
3044 tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_VB, cmd->state.vertex_buffers_ib);
3045 tu_cs_emit_draw_state(cs, TU_DRAW_STATE_VS_PARAMS, cmd->state.vs_params);
3046 }
3047
3048 tu_cs_sanity_check(cs);
3049
3050 /* There are too many graphics dirty bits to list here, so just list the
3051 * bits to preserve instead. The only things not emitted here are
3052 * compute-related state.
3053 */
3054 cmd->state.dirty &= (TU_CMD_DIRTY_COMPUTE_DESCRIPTOR_SETS | TU_CMD_DIRTY_COMPUTE_PIPELINE);
3055 return VK_SUCCESS;
3056 }
3057
3058 static uint32_t
3059 tu_draw_initiator(struct tu_cmd_buffer *cmd, enum pc_di_src_sel src_sel)
3060 {
3061 const struct tu_pipeline *pipeline = cmd->state.pipeline;
3062 uint32_t initiator =
3063 CP_DRAW_INDX_OFFSET_0_PRIM_TYPE(pipeline->ia.primtype) |
3064 CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(src_sel) |
3065 CP_DRAW_INDX_OFFSET_0_INDEX_SIZE(cmd->state.index_size) |
3066 CP_DRAW_INDX_OFFSET_0_VIS_CULL(USE_VISIBILITY);
3067
3068 if (pipeline->active_stages & VK_SHADER_STAGE_GEOMETRY_BIT)
3069 initiator |= CP_DRAW_INDX_OFFSET_0_GS_ENABLE;
3070
3071 switch (pipeline->tess.patch_type) {
3072 case IR3_TESS_TRIANGLES:
3073 initiator |= CP_DRAW_INDX_OFFSET_0_PATCH_TYPE(TESS_TRIANGLES) |
3074 CP_DRAW_INDX_OFFSET_0_TESS_ENABLE;
3075 break;
3076 case IR3_TESS_ISOLINES:
3077 initiator |= CP_DRAW_INDX_OFFSET_0_PATCH_TYPE(TESS_ISOLINES) |
3078 CP_DRAW_INDX_OFFSET_0_TESS_ENABLE;
3079 break;
3080 case IR3_TESS_NONE:
3081 initiator |= CP_DRAW_INDX_OFFSET_0_PATCH_TYPE(TESS_QUADS);
3082 break;
3083 case IR3_TESS_QUADS:
3084 initiator |= CP_DRAW_INDX_OFFSET_0_PATCH_TYPE(TESS_QUADS) |
3085 CP_DRAW_INDX_OFFSET_0_TESS_ENABLE;
3086 break;
3087 }
3088 return initiator;
3089 }
3090
3091
3092 static uint32_t
3093 vs_params_offset(struct tu_cmd_buffer *cmd)
3094 {
3095 const struct tu_program_descriptor_linkage *link =
3096 &cmd->state.pipeline->program.link[MESA_SHADER_VERTEX];
3097 const struct ir3_const_state *const_state = &link->const_state;
3098
3099 if (const_state->offsets.driver_param >= link->constlen)
3100 return 0;
3101
3102 /* this layout is required by CP_DRAW_INDIRECT_MULTI */
3103 STATIC_ASSERT(IR3_DP_DRAWID == 0);
3104 STATIC_ASSERT(IR3_DP_VTXID_BASE == 1);
3105 STATIC_ASSERT(IR3_DP_INSTID_BASE == 2);
3106
3107 /* 0 means disabled for CP_DRAW_INDIRECT_MULTI */
3108 assert(const_state->offsets.driver_param != 0);
3109
3110 return const_state->offsets.driver_param;
3111 }
3112
3113 static struct tu_draw_state
3114 tu6_emit_vs_params(struct tu_cmd_buffer *cmd,
3115 uint32_t vertex_offset,
3116 uint32_t first_instance)
3117 {
3118 uint32_t offset = vs_params_offset(cmd);
3119
3120 struct tu_cs cs;
3121 VkResult result = tu_cs_begin_sub_stream(&cmd->sub_cs, 3 + (offset ? 8 : 0), &cs);
3122 if (result != VK_SUCCESS) {
3123 cmd->record_result = result;
3124 return (struct tu_draw_state) {};
3125 }
3126
3127 /* TODO: don't make a new draw state when it doesn't change */
3128
3129 tu_cs_emit_regs(&cs,
3130 A6XX_VFD_INDEX_OFFSET(vertex_offset),
3131 A6XX_VFD_INSTANCE_START_OFFSET(first_instance));
3132
3133 if (offset) {
3134 tu_cs_emit_pkt7(&cs, CP_LOAD_STATE6_GEOM, 3 + 4);
3135 tu_cs_emit(&cs, CP_LOAD_STATE6_0_DST_OFF(offset) |
3136 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
3137 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
3138 CP_LOAD_STATE6_0_STATE_BLOCK(SB6_VS_SHADER) |
3139 CP_LOAD_STATE6_0_NUM_UNIT(1));
3140 tu_cs_emit(&cs, 0);
3141 tu_cs_emit(&cs, 0);
3142
3143 tu_cs_emit(&cs, 0);
3144 tu_cs_emit(&cs, vertex_offset);
3145 tu_cs_emit(&cs, first_instance);
3146 tu_cs_emit(&cs, 0);
3147 }
3148
3149 struct tu_cs_entry entry = tu_cs_end_sub_stream(&cmd->sub_cs, &cs);
3150 return (struct tu_draw_state) {entry.bo->iova + entry.offset, entry.size / 4};
3151 }
3152
3153 void
3154 tu_CmdDraw(VkCommandBuffer commandBuffer,
3155 uint32_t vertexCount,
3156 uint32_t instanceCount,
3157 uint32_t firstVertex,
3158 uint32_t firstInstance)
3159 {
3160 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
3161 struct tu_cs *cs = &cmd->draw_cs;
3162
3163 cmd->state.vs_params = tu6_emit_vs_params(cmd, firstVertex, firstInstance);
3164
3165 tu6_draw_common(cmd, cs, false, vertexCount);
3166
3167 tu_cs_emit_pkt7(cs, CP_DRAW_INDX_OFFSET, 3);
3168 tu_cs_emit(cs, tu_draw_initiator(cmd, DI_SRC_SEL_AUTO_INDEX));
3169 tu_cs_emit(cs, instanceCount);
3170 tu_cs_emit(cs, vertexCount);
3171 }
3172
3173 void
3174 tu_CmdDrawIndexed(VkCommandBuffer commandBuffer,
3175 uint32_t indexCount,
3176 uint32_t instanceCount,
3177 uint32_t firstIndex,
3178 int32_t vertexOffset,
3179 uint32_t firstInstance)
3180 {
3181 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
3182 struct tu_cs *cs = &cmd->draw_cs;
3183
3184 cmd->state.vs_params = tu6_emit_vs_params(cmd, vertexOffset, firstInstance);
3185
3186 tu6_draw_common(cmd, cs, true, indexCount);
3187
3188 tu_cs_emit_pkt7(cs, CP_DRAW_INDX_OFFSET, 7);
3189 tu_cs_emit(cs, tu_draw_initiator(cmd, DI_SRC_SEL_DMA));
3190 tu_cs_emit(cs, instanceCount);
3191 tu_cs_emit(cs, indexCount);
3192 tu_cs_emit(cs, firstIndex);
3193 tu_cs_emit_qw(cs, cmd->state.index_va);
3194 tu_cs_emit(cs, cmd->state.max_index_count);
3195 }
3196
3197 void
3198 tu_CmdDrawIndirect(VkCommandBuffer commandBuffer,
3199 VkBuffer _buffer,
3200 VkDeviceSize offset,
3201 uint32_t drawCount,
3202 uint32_t stride)
3203 {
3204 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
3205 TU_FROM_HANDLE(tu_buffer, buf, _buffer);
3206 struct tu_cs *cs = &cmd->draw_cs;
3207
3208 cmd->state.vs_params = (struct tu_draw_state) {};
3209
3210 tu6_draw_common(cmd, cs, false, 0);
3211
3212 /* workaround for a firmware bug with CP_DRAW_INDIRECT_MULTI, where it
3213 * doesn't wait for WFIs to be completed and leads to GPU fault/hang
3214 * TODO: this could be worked around in a more performant way,
3215 * or there may exist newer firmware that has been fixed
3216 */
3217 if (cmd->device->physical_device->gpu_id != 650)
3218 tu_cs_emit_pkt7(cs, CP_WAIT_FOR_ME, 0);
3219
3220 tu_cs_emit_pkt7(cs, CP_DRAW_INDIRECT_MULTI, 6);
3221 tu_cs_emit(cs, tu_draw_initiator(cmd, DI_SRC_SEL_AUTO_INDEX));
3222 tu_cs_emit(cs, A6XX_CP_DRAW_INDIRECT_MULTI_1_OPCODE(INDIRECT_OP_NORMAL) |
3223 A6XX_CP_DRAW_INDIRECT_MULTI_1_DST_OFF(vs_params_offset(cmd)));
3224 tu_cs_emit(cs, drawCount);
3225 tu_cs_emit_qw(cs, buf->bo->iova + buf->bo_offset + offset);
3226 tu_cs_emit(cs, stride);
3227
3228 tu_bo_list_add(&cmd->bo_list, buf->bo, MSM_SUBMIT_BO_READ);
3229 }
3230
3231 void
3232 tu_CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer,
3233 VkBuffer _buffer,
3234 VkDeviceSize offset,
3235 uint32_t drawCount,
3236 uint32_t stride)
3237 {
3238 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
3239 TU_FROM_HANDLE(tu_buffer, buf, _buffer);
3240 struct tu_cs *cs = &cmd->draw_cs;
3241
3242 cmd->state.vs_params = (struct tu_draw_state) {};
3243
3244 tu6_draw_common(cmd, cs, true, 0);
3245
3246 /* workaround for a firmware bug with CP_DRAW_INDIRECT_MULTI, where it
3247 * doesn't wait for WFIs to be completed and leads to GPU fault/hang
3248 * TODO: this could be worked around in a more performant way,
3249 * or there may exist newer firmware that has been fixed
3250 */
3251 if (cmd->device->physical_device->gpu_id != 650)
3252 tu_cs_emit_pkt7(cs, CP_WAIT_FOR_ME, 0);
3253
3254 tu_cs_emit_pkt7(cs, CP_DRAW_INDIRECT_MULTI, 9);
3255 tu_cs_emit(cs, tu_draw_initiator(cmd, DI_SRC_SEL_DMA));
3256 tu_cs_emit(cs, A6XX_CP_DRAW_INDIRECT_MULTI_1_OPCODE(INDIRECT_OP_INDEXED) |
3257 A6XX_CP_DRAW_INDIRECT_MULTI_1_DST_OFF(vs_params_offset(cmd)));
3258 tu_cs_emit(cs, drawCount);
3259 tu_cs_emit_qw(cs, cmd->state.index_va);
3260 tu_cs_emit(cs, cmd->state.max_index_count);
3261 tu_cs_emit_qw(cs, buf->bo->iova + buf->bo_offset + offset);
3262 tu_cs_emit(cs, stride);
3263
3264 tu_bo_list_add(&cmd->bo_list, buf->bo, MSM_SUBMIT_BO_READ);
3265 }
3266
3267 void tu_CmdDrawIndirectByteCountEXT(VkCommandBuffer commandBuffer,
3268 uint32_t instanceCount,
3269 uint32_t firstInstance,
3270 VkBuffer _counterBuffer,
3271 VkDeviceSize counterBufferOffset,
3272 uint32_t counterOffset,
3273 uint32_t vertexStride)
3274 {
3275 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
3276 TU_FROM_HANDLE(tu_buffer, buf, _counterBuffer);
3277 struct tu_cs *cs = &cmd->draw_cs;
3278
3279 cmd->state.vs_params = tu6_emit_vs_params(cmd, 0, firstInstance);
3280
3281 tu6_draw_common(cmd, cs, false, 0);
3282
3283 tu_cs_emit_pkt7(cs, CP_DRAW_AUTO, 6);
3284 tu_cs_emit(cs, tu_draw_initiator(cmd, DI_SRC_SEL_AUTO_XFB));
3285 tu_cs_emit(cs, instanceCount);
3286 tu_cs_emit_qw(cs, buf->bo->iova + buf->bo_offset + counterBufferOffset);
3287 tu_cs_emit(cs, counterOffset);
3288 tu_cs_emit(cs, vertexStride);
3289
3290 tu_bo_list_add(&cmd->bo_list, buf->bo, MSM_SUBMIT_BO_READ);
3291 }
3292
3293 struct tu_dispatch_info
3294 {
3295 /**
3296 * Determine the layout of the grid (in block units) to be used.
3297 */
3298 uint32_t blocks[3];
3299
3300 /**
3301 * A starting offset for the grid. If unaligned is set, the offset
3302 * must still be aligned.
3303 */
3304 uint32_t offsets[3];
3305 /**
3306 * Whether it's an unaligned compute dispatch.
3307 */
3308 bool unaligned;
3309
3310 /**
3311 * Indirect compute parameters resource.
3312 */
3313 struct tu_buffer *indirect;
3314 uint64_t indirect_offset;
3315 };
3316
3317 static void
3318 tu_emit_compute_driver_params(struct tu_cs *cs, struct tu_pipeline *pipeline,
3319 const struct tu_dispatch_info *info)
3320 {
3321 gl_shader_stage type = MESA_SHADER_COMPUTE;
3322 const struct tu_program_descriptor_linkage *link =
3323 &pipeline->program.link[type];
3324 const struct ir3_const_state *const_state = &link->const_state;
3325 uint32_t offset = const_state->offsets.driver_param;
3326
3327 if (link->constlen <= offset)
3328 return;
3329
3330 if (!info->indirect) {
3331 uint32_t driver_params[IR3_DP_CS_COUNT] = {
3332 [IR3_DP_NUM_WORK_GROUPS_X] = info->blocks[0],
3333 [IR3_DP_NUM_WORK_GROUPS_Y] = info->blocks[1],
3334 [IR3_DP_NUM_WORK_GROUPS_Z] = info->blocks[2],
3335 [IR3_DP_LOCAL_GROUP_SIZE_X] = pipeline->compute.local_size[0],
3336 [IR3_DP_LOCAL_GROUP_SIZE_Y] = pipeline->compute.local_size[1],
3337 [IR3_DP_LOCAL_GROUP_SIZE_Z] = pipeline->compute.local_size[2],
3338 };
3339
3340 uint32_t num_consts = MIN2(const_state->num_driver_params,
3341 (link->constlen - offset) * 4);
3342 /* push constants */
3343 tu_cs_emit_pkt7(cs, tu6_stage2opcode(type), 3 + num_consts);
3344 tu_cs_emit(cs, CP_LOAD_STATE6_0_DST_OFF(offset) |
3345 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
3346 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
3347 CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type)) |
3348 CP_LOAD_STATE6_0_NUM_UNIT(num_consts / 4));
3349 tu_cs_emit(cs, 0);
3350 tu_cs_emit(cs, 0);
3351 uint32_t i;
3352 for (i = 0; i < num_consts; i++)
3353 tu_cs_emit(cs, driver_params[i]);
3354 } else {
3355 tu_finishme("Indirect driver params");
3356 }
3357 }
3358
3359 static void
3360 tu_dispatch(struct tu_cmd_buffer *cmd,
3361 const struct tu_dispatch_info *info)
3362 {
3363 struct tu_cs *cs = &cmd->cs;
3364 struct tu_pipeline *pipeline = cmd->state.compute_pipeline;
3365 struct tu_descriptor_state *descriptors_state =
3366 &cmd->descriptors[VK_PIPELINE_BIND_POINT_COMPUTE];
3367
3368 /* TODO: We could probably flush less if we add a compute_flush_bits
3369 * bitfield.
3370 */
3371 tu_emit_cache_flush(cmd, cs);
3372
3373 if (cmd->state.dirty & TU_CMD_DIRTY_COMPUTE_PIPELINE)
3374 tu_cs_emit_ib(cs, &pipeline->program.state_ib);
3375
3376 struct tu_cs_entry ib;
3377
3378 ib = tu6_emit_consts(cmd, pipeline, descriptors_state, MESA_SHADER_COMPUTE);
3379 if (ib.size)
3380 tu_cs_emit_ib(cs, &ib);
3381
3382 tu_emit_compute_driver_params(cs, pipeline, info);
3383
3384 if ((cmd->state.dirty & TU_CMD_DIRTY_COMPUTE_DESCRIPTOR_SETS) &&
3385 pipeline->load_state.state_ib.size > 0) {
3386 tu_cs_emit_ib(cs, &pipeline->load_state.state_ib);
3387 }
3388
3389 cmd->state.dirty &=
3390 ~(TU_CMD_DIRTY_COMPUTE_DESCRIPTOR_SETS | TU_CMD_DIRTY_COMPUTE_PIPELINE);
3391
3392 tu_cs_emit_pkt7(cs, CP_SET_MARKER, 1);
3393 tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(RM6_COMPUTE));
3394
3395 const uint32_t *local_size = pipeline->compute.local_size;
3396 const uint32_t *num_groups = info->blocks;
3397 tu_cs_emit_regs(cs,
3398 A6XX_HLSQ_CS_NDRANGE_0(.kerneldim = 3,
3399 .localsizex = local_size[0] - 1,
3400 .localsizey = local_size[1] - 1,
3401 .localsizez = local_size[2] - 1),
3402 A6XX_HLSQ_CS_NDRANGE_1(.globalsize_x = local_size[0] * num_groups[0]),
3403 A6XX_HLSQ_CS_NDRANGE_2(.globaloff_x = 0),
3404 A6XX_HLSQ_CS_NDRANGE_3(.globalsize_y = local_size[1] * num_groups[1]),
3405 A6XX_HLSQ_CS_NDRANGE_4(.globaloff_y = 0),
3406 A6XX_HLSQ_CS_NDRANGE_5(.globalsize_z = local_size[2] * num_groups[2]),
3407 A6XX_HLSQ_CS_NDRANGE_6(.globaloff_z = 0));
3408
3409 tu_cs_emit_regs(cs,
3410 A6XX_HLSQ_CS_KERNEL_GROUP_X(1),
3411 A6XX_HLSQ_CS_KERNEL_GROUP_Y(1),
3412 A6XX_HLSQ_CS_KERNEL_GROUP_Z(1));
3413
3414 if (info->indirect) {
3415 uint64_t iova = tu_buffer_iova(info->indirect) + info->indirect_offset;
3416
3417 tu_bo_list_add(&cmd->bo_list, info->indirect->bo,
3418 MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE);
3419
3420 tu_cs_emit_pkt7(cs, CP_EXEC_CS_INDIRECT, 4);
3421 tu_cs_emit(cs, 0x00000000);
3422 tu_cs_emit_qw(cs, iova);
3423 tu_cs_emit(cs,
3424 A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEX(local_size[0] - 1) |
3425 A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEY(local_size[1] - 1) |
3426 A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEZ(local_size[2] - 1));
3427 } else {
3428 tu_cs_emit_pkt7(cs, CP_EXEC_CS, 4);
3429 tu_cs_emit(cs, 0x00000000);
3430 tu_cs_emit(cs, CP_EXEC_CS_1_NGROUPS_X(info->blocks[0]));
3431 tu_cs_emit(cs, CP_EXEC_CS_2_NGROUPS_Y(info->blocks[1]));
3432 tu_cs_emit(cs, CP_EXEC_CS_3_NGROUPS_Z(info->blocks[2]));
3433 }
3434
3435 tu_cs_emit_wfi(cs);
3436 }
3437
3438 void
3439 tu_CmdDispatchBase(VkCommandBuffer commandBuffer,
3440 uint32_t base_x,
3441 uint32_t base_y,
3442 uint32_t base_z,
3443 uint32_t x,
3444 uint32_t y,
3445 uint32_t z)
3446 {
3447 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
3448 struct tu_dispatch_info info = {};
3449
3450 info.blocks[0] = x;
3451 info.blocks[1] = y;
3452 info.blocks[2] = z;
3453
3454 info.offsets[0] = base_x;
3455 info.offsets[1] = base_y;
3456 info.offsets[2] = base_z;
3457 tu_dispatch(cmd_buffer, &info);
3458 }
3459
3460 void
3461 tu_CmdDispatch(VkCommandBuffer commandBuffer,
3462 uint32_t x,
3463 uint32_t y,
3464 uint32_t z)
3465 {
3466 tu_CmdDispatchBase(commandBuffer, 0, 0, 0, x, y, z);
3467 }
3468
3469 void
3470 tu_CmdDispatchIndirect(VkCommandBuffer commandBuffer,
3471 VkBuffer _buffer,
3472 VkDeviceSize offset)
3473 {
3474 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
3475 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
3476 struct tu_dispatch_info info = {};
3477
3478 info.indirect = buffer;
3479 info.indirect_offset = offset;
3480
3481 tu_dispatch(cmd_buffer, &info);
3482 }
3483
3484 void
3485 tu_CmdEndRenderPass(VkCommandBuffer commandBuffer)
3486 {
3487 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
3488
3489 tu_cs_end(&cmd_buffer->draw_cs);
3490 tu_cs_end(&cmd_buffer->draw_epilogue_cs);
3491
3492 if (use_sysmem_rendering(cmd_buffer))
3493 tu_cmd_render_sysmem(cmd_buffer);
3494 else
3495 tu_cmd_render_tiles(cmd_buffer);
3496
3497 /* discard draw_cs and draw_epilogue_cs entries now that the tiles are
3498 rendered */
3499 tu_cs_discard_entries(&cmd_buffer->draw_cs);
3500 tu_cs_begin(&cmd_buffer->draw_cs);
3501 tu_cs_discard_entries(&cmd_buffer->draw_epilogue_cs);
3502 tu_cs_begin(&cmd_buffer->draw_epilogue_cs);
3503
3504 cmd_buffer->state.cache.pending_flush_bits |=
3505 cmd_buffer->state.renderpass_cache.pending_flush_bits;
3506 tu_subpass_barrier(cmd_buffer, &cmd_buffer->state.pass->end_barrier, true);
3507
3508 cmd_buffer->state.pass = NULL;
3509 cmd_buffer->state.subpass = NULL;
3510 cmd_buffer->state.framebuffer = NULL;
3511 }
3512
3513 void
3514 tu_CmdEndRenderPass2(VkCommandBuffer commandBuffer,
3515 const VkSubpassEndInfoKHR *pSubpassEndInfo)
3516 {
3517 tu_CmdEndRenderPass(commandBuffer);
3518 }
3519
3520 struct tu_barrier_info
3521 {
3522 uint32_t eventCount;
3523 const VkEvent *pEvents;
3524 VkPipelineStageFlags srcStageMask;
3525 };
3526
3527 static void
3528 tu_barrier(struct tu_cmd_buffer *cmd,
3529 uint32_t memoryBarrierCount,
3530 const VkMemoryBarrier *pMemoryBarriers,
3531 uint32_t bufferMemoryBarrierCount,
3532 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
3533 uint32_t imageMemoryBarrierCount,
3534 const VkImageMemoryBarrier *pImageMemoryBarriers,
3535 const struct tu_barrier_info *info)
3536 {
3537 struct tu_cs *cs = cmd->state.pass ? &cmd->draw_cs : &cmd->cs;
3538 VkAccessFlags srcAccessMask = 0;
3539 VkAccessFlags dstAccessMask = 0;
3540
3541 for (uint32_t i = 0; i < memoryBarrierCount; i++) {
3542 srcAccessMask |= pMemoryBarriers[i].srcAccessMask;
3543 dstAccessMask |= pMemoryBarriers[i].dstAccessMask;
3544 }
3545
3546 for (uint32_t i = 0; i < bufferMemoryBarrierCount; i++) {
3547 srcAccessMask |= pBufferMemoryBarriers[i].srcAccessMask;
3548 dstAccessMask |= pBufferMemoryBarriers[i].dstAccessMask;
3549 }
3550
3551 enum tu_cmd_access_mask src_flags = 0;
3552 enum tu_cmd_access_mask dst_flags = 0;
3553
3554 for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
3555 TU_FROM_HANDLE(tu_image, image, pImageMemoryBarriers[i].image);
3556 VkImageLayout old_layout = pImageMemoryBarriers[i].oldLayout;
3557 /* For non-linear images, PREINITIALIZED is the same as UNDEFINED */
3558 if (old_layout == VK_IMAGE_LAYOUT_UNDEFINED ||
3559 (image->tiling != VK_IMAGE_TILING_LINEAR &&
3560 old_layout == VK_IMAGE_LAYOUT_PREINITIALIZED)) {
3561 /* The underlying memory for this image may have been used earlier
3562 * within the same queue submission for a different image, which
3563 * means that there may be old, stale cache entries which are in the
3564 * "wrong" location, which could cause problems later after writing
3565 * to the image. We don't want these entries being flushed later and
3566 * overwriting the actual image, so we need to flush the CCU.
3567 */
3568 src_flags |= TU_ACCESS_CCU_COLOR_INCOHERENT_WRITE;
3569 }
3570 srcAccessMask |= pImageMemoryBarriers[i].srcAccessMask;
3571 dstAccessMask |= pImageMemoryBarriers[i].dstAccessMask;
3572 }
3573
3574 /* Inside a renderpass, we don't know yet whether we'll be using sysmem
3575 * so we have to use the sysmem flushes.
3576 */
3577 bool gmem = cmd->state.ccu_state == TU_CMD_CCU_GMEM &&
3578 !cmd->state.pass;
3579 src_flags |= vk2tu_access(srcAccessMask, gmem);
3580 dst_flags |= vk2tu_access(dstAccessMask, gmem);
3581
3582 struct tu_cache_state *cache =
3583 cmd->state.pass ? &cmd->state.renderpass_cache : &cmd->state.cache;
3584 tu_flush_for_access(cache, src_flags, dst_flags);
3585
3586 for (uint32_t i = 0; i < info->eventCount; i++) {
3587 TU_FROM_HANDLE(tu_event, event, info->pEvents[i]);
3588
3589 tu_bo_list_add(&cmd->bo_list, &event->bo, MSM_SUBMIT_BO_READ);
3590
3591 tu_cs_emit_pkt7(cs, CP_WAIT_REG_MEM, 6);
3592 tu_cs_emit(cs, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ) |
3593 CP_WAIT_REG_MEM_0_POLL_MEMORY);
3594 tu_cs_emit_qw(cs, event->bo.iova); /* POLL_ADDR_LO/HI */
3595 tu_cs_emit(cs, CP_WAIT_REG_MEM_3_REF(1));
3596 tu_cs_emit(cs, CP_WAIT_REG_MEM_4_MASK(~0u));
3597 tu_cs_emit(cs, CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(20));
3598 }
3599 }
3600
3601 void
3602 tu_CmdPipelineBarrier(VkCommandBuffer commandBuffer,
3603 VkPipelineStageFlags srcStageMask,
3604 VkPipelineStageFlags dstStageMask,
3605 VkDependencyFlags dependencyFlags,
3606 uint32_t memoryBarrierCount,
3607 const VkMemoryBarrier *pMemoryBarriers,
3608 uint32_t bufferMemoryBarrierCount,
3609 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
3610 uint32_t imageMemoryBarrierCount,
3611 const VkImageMemoryBarrier *pImageMemoryBarriers)
3612 {
3613 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
3614 struct tu_barrier_info info;
3615
3616 info.eventCount = 0;
3617 info.pEvents = NULL;
3618 info.srcStageMask = srcStageMask;
3619
3620 tu_barrier(cmd_buffer, memoryBarrierCount, pMemoryBarriers,
3621 bufferMemoryBarrierCount, pBufferMemoryBarriers,
3622 imageMemoryBarrierCount, pImageMemoryBarriers, &info);
3623 }
3624
3625 static void
3626 write_event(struct tu_cmd_buffer *cmd, struct tu_event *event,
3627 VkPipelineStageFlags stageMask, unsigned value)
3628 {
3629 struct tu_cs *cs = &cmd->cs;
3630
3631 /* vkCmdSetEvent/vkCmdResetEvent cannot be called inside a render pass */
3632 assert(!cmd->state.pass);
3633
3634 tu_emit_cache_flush(cmd, cs);
3635
3636 tu_bo_list_add(&cmd->bo_list, &event->bo, MSM_SUBMIT_BO_WRITE);
3637
3638 /* Flags that only require a top-of-pipe event. DrawIndirect parameters are
3639 * read by the CP, so the draw indirect stage counts as top-of-pipe too.
3640 */
3641 VkPipelineStageFlags top_of_pipe_flags =
3642 VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT |
3643 VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT;
3644
3645 if (!(stageMask & ~top_of_pipe_flags)) {
3646 tu_cs_emit_pkt7(cs, CP_MEM_WRITE, 3);
3647 tu_cs_emit_qw(cs, event->bo.iova); /* ADDR_LO/HI */
3648 tu_cs_emit(cs, value);
3649 } else {
3650 /* Use a RB_DONE_TS event to wait for everything to complete. */
3651 tu_cs_emit_pkt7(cs, CP_EVENT_WRITE, 4);
3652 tu_cs_emit(cs, CP_EVENT_WRITE_0_EVENT(RB_DONE_TS));
3653 tu_cs_emit_qw(cs, event->bo.iova);
3654 tu_cs_emit(cs, value);
3655 }
3656 }
3657
3658 void
3659 tu_CmdSetEvent(VkCommandBuffer commandBuffer,
3660 VkEvent _event,
3661 VkPipelineStageFlags stageMask)
3662 {
3663 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
3664 TU_FROM_HANDLE(tu_event, event, _event);
3665
3666 write_event(cmd, event, stageMask, 1);
3667 }
3668
3669 void
3670 tu_CmdResetEvent(VkCommandBuffer commandBuffer,
3671 VkEvent _event,
3672 VkPipelineStageFlags stageMask)
3673 {
3674 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
3675 TU_FROM_HANDLE(tu_event, event, _event);
3676
3677 write_event(cmd, event, stageMask, 0);
3678 }
3679
3680 void
3681 tu_CmdWaitEvents(VkCommandBuffer commandBuffer,
3682 uint32_t eventCount,
3683 const VkEvent *pEvents,
3684 VkPipelineStageFlags srcStageMask,
3685 VkPipelineStageFlags dstStageMask,
3686 uint32_t memoryBarrierCount,
3687 const VkMemoryBarrier *pMemoryBarriers,
3688 uint32_t bufferMemoryBarrierCount,
3689 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
3690 uint32_t imageMemoryBarrierCount,
3691 const VkImageMemoryBarrier *pImageMemoryBarriers)
3692 {
3693 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
3694 struct tu_barrier_info info;
3695
3696 info.eventCount = eventCount;
3697 info.pEvents = pEvents;
3698 info.srcStageMask = 0;
3699
3700 tu_barrier(cmd, memoryBarrierCount, pMemoryBarriers,
3701 bufferMemoryBarrierCount, pBufferMemoryBarriers,
3702 imageMemoryBarrierCount, pImageMemoryBarriers, &info);
3703 }
3704
3705 void
3706 tu_CmdSetDeviceMask(VkCommandBuffer commandBuffer, uint32_t deviceMask)
3707 {
3708 /* No-op */
3709 }