6d76f00f9e0c39e479a4ef86174739f0d051ec60
[mesa.git] / src / freedreno / vulkan / tu_cmd_buffer.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 */
27
28 #include "tu_private.h"
29
30 #include "adreno_pm4.xml.h"
31 #include "adreno_common.xml.h"
32
33 #include "vk_format.h"
34 #include "vk_util.h"
35
36 #include "tu_cs.h"
37
38 void
39 tu_bo_list_init(struct tu_bo_list *list)
40 {
41 list->count = list->capacity = 0;
42 list->bo_infos = NULL;
43 }
44
45 void
46 tu_bo_list_destroy(struct tu_bo_list *list)
47 {
48 free(list->bo_infos);
49 }
50
51 void
52 tu_bo_list_reset(struct tu_bo_list *list)
53 {
54 list->count = 0;
55 }
56
57 /**
58 * \a flags consists of MSM_SUBMIT_BO_FLAGS.
59 */
60 static uint32_t
61 tu_bo_list_add_info(struct tu_bo_list *list,
62 const struct drm_msm_gem_submit_bo *bo_info)
63 {
64 assert(bo_info->handle != 0);
65
66 for (uint32_t i = 0; i < list->count; ++i) {
67 if (list->bo_infos[i].handle == bo_info->handle) {
68 assert(list->bo_infos[i].presumed == bo_info->presumed);
69 list->bo_infos[i].flags |= bo_info->flags;
70 return i;
71 }
72 }
73
74 /* grow list->bo_infos if needed */
75 if (list->count == list->capacity) {
76 uint32_t new_capacity = MAX2(2 * list->count, 16);
77 struct drm_msm_gem_submit_bo *new_bo_infos = realloc(
78 list->bo_infos, new_capacity * sizeof(struct drm_msm_gem_submit_bo));
79 if (!new_bo_infos)
80 return TU_BO_LIST_FAILED;
81 list->bo_infos = new_bo_infos;
82 list->capacity = new_capacity;
83 }
84
85 list->bo_infos[list->count] = *bo_info;
86 return list->count++;
87 }
88
89 uint32_t
90 tu_bo_list_add(struct tu_bo_list *list,
91 const struct tu_bo *bo,
92 uint32_t flags)
93 {
94 return tu_bo_list_add_info(list, &(struct drm_msm_gem_submit_bo) {
95 .flags = flags,
96 .handle = bo->gem_handle,
97 .presumed = bo->iova,
98 });
99 }
100
101 VkResult
102 tu_bo_list_merge(struct tu_bo_list *list, const struct tu_bo_list *other)
103 {
104 for (uint32_t i = 0; i < other->count; i++) {
105 if (tu_bo_list_add_info(list, other->bo_infos + i) == TU_BO_LIST_FAILED)
106 return VK_ERROR_OUT_OF_HOST_MEMORY;
107 }
108
109 return VK_SUCCESS;
110 }
111
112 void
113 tu6_emit_event_write(struct tu_cmd_buffer *cmd,
114 struct tu_cs *cs,
115 enum vgt_event_type event)
116 {
117 bool need_seqno = false;
118 switch (event) {
119 case CACHE_FLUSH_TS:
120 case WT_DONE_TS:
121 case RB_DONE_TS:
122 case PC_CCU_FLUSH_DEPTH_TS:
123 case PC_CCU_FLUSH_COLOR_TS:
124 case PC_CCU_RESOLVE_TS:
125 need_seqno = true;
126 break;
127 default:
128 break;
129 }
130
131 tu_cs_emit_pkt7(cs, CP_EVENT_WRITE, need_seqno ? 4 : 1);
132 tu_cs_emit(cs, CP_EVENT_WRITE_0_EVENT(event));
133 if (need_seqno) {
134 tu_cs_emit_qw(cs, global_iova(cmd, seqno_dummy));
135 tu_cs_emit(cs, 0);
136 }
137 }
138
139 static void
140 tu6_emit_flushes(struct tu_cmd_buffer *cmd_buffer,
141 struct tu_cs *cs,
142 enum tu_cmd_flush_bits flushes)
143 {
144 /* Experiments show that invalidating CCU while it still has data in it
145 * doesn't work, so make sure to always flush before invalidating in case
146 * any data remains that hasn't yet been made available through a barrier.
147 * However it does seem to work for UCHE.
148 */
149 if (flushes & (TU_CMD_FLAG_CCU_FLUSH_COLOR |
150 TU_CMD_FLAG_CCU_INVALIDATE_COLOR))
151 tu6_emit_event_write(cmd_buffer, cs, PC_CCU_FLUSH_COLOR_TS);
152 if (flushes & (TU_CMD_FLAG_CCU_FLUSH_DEPTH |
153 TU_CMD_FLAG_CCU_INVALIDATE_DEPTH))
154 tu6_emit_event_write(cmd_buffer, cs, PC_CCU_FLUSH_DEPTH_TS);
155 if (flushes & TU_CMD_FLAG_CCU_INVALIDATE_COLOR)
156 tu6_emit_event_write(cmd_buffer, cs, PC_CCU_INVALIDATE_COLOR);
157 if (flushes & TU_CMD_FLAG_CCU_INVALIDATE_DEPTH)
158 tu6_emit_event_write(cmd_buffer, cs, PC_CCU_INVALIDATE_DEPTH);
159 if (flushes & TU_CMD_FLAG_CACHE_FLUSH)
160 tu6_emit_event_write(cmd_buffer, cs, CACHE_FLUSH_TS);
161 if (flushes & TU_CMD_FLAG_CACHE_INVALIDATE)
162 tu6_emit_event_write(cmd_buffer, cs, CACHE_INVALIDATE);
163 if (flushes & TU_CMD_FLAG_WAIT_MEM_WRITES)
164 tu_cs_emit_pkt7(cs, CP_WAIT_MEM_WRITES, 0);
165 if (flushes & TU_CMD_FLAG_WAIT_FOR_IDLE)
166 tu_cs_emit_wfi(cs);
167 if (flushes & TU_CMD_FLAG_WAIT_FOR_ME)
168 tu_cs_emit_pkt7(cs, CP_WAIT_FOR_ME, 0);
169 }
170
171 /* "Normal" cache flushes, that don't require any special handling */
172
173 static void
174 tu_emit_cache_flush(struct tu_cmd_buffer *cmd_buffer,
175 struct tu_cs *cs)
176 {
177 tu6_emit_flushes(cmd_buffer, cs, cmd_buffer->state.cache.flush_bits);
178 cmd_buffer->state.cache.flush_bits = 0;
179 }
180
181 /* Renderpass cache flushes */
182
183 void
184 tu_emit_cache_flush_renderpass(struct tu_cmd_buffer *cmd_buffer,
185 struct tu_cs *cs)
186 {
187 tu6_emit_flushes(cmd_buffer, cs, cmd_buffer->state.renderpass_cache.flush_bits);
188 cmd_buffer->state.renderpass_cache.flush_bits = 0;
189 }
190
191 /* Cache flushes for things that use the color/depth read/write path (i.e.
192 * blits and draws). This deals with changing CCU state as well as the usual
193 * cache flushing.
194 */
195
196 void
197 tu_emit_cache_flush_ccu(struct tu_cmd_buffer *cmd_buffer,
198 struct tu_cs *cs,
199 enum tu_cmd_ccu_state ccu_state)
200 {
201 enum tu_cmd_flush_bits flushes = cmd_buffer->state.cache.flush_bits;
202
203 assert(ccu_state != TU_CMD_CCU_UNKNOWN);
204
205 /* Changing CCU state must involve invalidating the CCU. In sysmem mode,
206 * the CCU may also contain data that we haven't flushed out yet, so we
207 * also need to flush. Also, in order to program RB_CCU_CNTL, we need to
208 * emit a WFI as it isn't pipelined.
209 */
210 if (ccu_state != cmd_buffer->state.ccu_state) {
211 if (cmd_buffer->state.ccu_state != TU_CMD_CCU_GMEM) {
212 flushes |=
213 TU_CMD_FLAG_CCU_FLUSH_COLOR |
214 TU_CMD_FLAG_CCU_FLUSH_DEPTH;
215 cmd_buffer->state.cache.pending_flush_bits &= ~(
216 TU_CMD_FLAG_CCU_FLUSH_COLOR |
217 TU_CMD_FLAG_CCU_FLUSH_DEPTH);
218 }
219 flushes |=
220 TU_CMD_FLAG_CCU_INVALIDATE_COLOR |
221 TU_CMD_FLAG_CCU_INVALIDATE_DEPTH |
222 TU_CMD_FLAG_WAIT_FOR_IDLE;
223 cmd_buffer->state.cache.pending_flush_bits &= ~(
224 TU_CMD_FLAG_CCU_INVALIDATE_COLOR |
225 TU_CMD_FLAG_CCU_INVALIDATE_DEPTH |
226 TU_CMD_FLAG_WAIT_FOR_IDLE);
227 }
228
229 tu6_emit_flushes(cmd_buffer, cs, flushes);
230 cmd_buffer->state.cache.flush_bits = 0;
231
232 if (ccu_state != cmd_buffer->state.ccu_state) {
233 struct tu_physical_device *phys_dev = cmd_buffer->device->physical_device;
234 tu_cs_emit_regs(cs,
235 A6XX_RB_CCU_CNTL(.offset =
236 ccu_state == TU_CMD_CCU_GMEM ?
237 phys_dev->ccu_offset_gmem :
238 phys_dev->ccu_offset_bypass,
239 .gmem = ccu_state == TU_CMD_CCU_GMEM));
240 cmd_buffer->state.ccu_state = ccu_state;
241 }
242 }
243
244 static void
245 tu6_emit_zs(struct tu_cmd_buffer *cmd,
246 const struct tu_subpass *subpass,
247 struct tu_cs *cs)
248 {
249 const struct tu_framebuffer *fb = cmd->state.framebuffer;
250
251 const uint32_t a = subpass->depth_stencil_attachment.attachment;
252 if (a == VK_ATTACHMENT_UNUSED) {
253 tu_cs_emit_regs(cs,
254 A6XX_RB_DEPTH_BUFFER_INFO(.depth_format = DEPTH6_NONE),
255 A6XX_RB_DEPTH_BUFFER_PITCH(0),
256 A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH(0),
257 A6XX_RB_DEPTH_BUFFER_BASE(0),
258 A6XX_RB_DEPTH_BUFFER_BASE_GMEM(0));
259
260 tu_cs_emit_regs(cs,
261 A6XX_GRAS_SU_DEPTH_BUFFER_INFO(.depth_format = DEPTH6_NONE));
262
263 tu_cs_emit_regs(cs,
264 A6XX_GRAS_LRZ_BUFFER_BASE(0),
265 A6XX_GRAS_LRZ_BUFFER_PITCH(0),
266 A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE(0));
267
268 tu_cs_emit_regs(cs, A6XX_RB_STENCIL_INFO(0));
269
270 return;
271 }
272
273 const struct tu_image_view *iview = fb->attachments[a].attachment;
274 const struct tu_render_pass_attachment *attachment =
275 &cmd->state.pass->attachments[a];
276 enum a6xx_depth_format fmt = tu6_pipe2depth(attachment->format);
277
278 tu_cs_emit_pkt4(cs, REG_A6XX_RB_DEPTH_BUFFER_INFO, 6);
279 tu_cs_emit(cs, A6XX_RB_DEPTH_BUFFER_INFO(.depth_format = fmt).value);
280 tu_cs_image_ref(cs, iview, 0);
281 tu_cs_emit(cs, attachment->gmem_offset);
282
283 tu_cs_emit_regs(cs,
284 A6XX_GRAS_SU_DEPTH_BUFFER_INFO(.depth_format = fmt));
285
286 tu_cs_emit_pkt4(cs, REG_A6XX_RB_DEPTH_FLAG_BUFFER_BASE_LO, 3);
287 tu_cs_image_flag_ref(cs, iview, 0);
288
289 tu_cs_emit_regs(cs,
290 A6XX_GRAS_LRZ_BUFFER_BASE(0),
291 A6XX_GRAS_LRZ_BUFFER_PITCH(0),
292 A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE(0));
293
294 if (attachment->format == VK_FORMAT_D32_SFLOAT_S8_UINT ||
295 attachment->format == VK_FORMAT_S8_UINT) {
296
297 tu_cs_emit_pkt4(cs, REG_A6XX_RB_STENCIL_INFO, 6);
298 tu_cs_emit(cs, A6XX_RB_STENCIL_INFO(.separate_stencil = true).value);
299 if (attachment->format == VK_FORMAT_D32_SFLOAT_S8_UINT) {
300 tu_cs_image_stencil_ref(cs, iview, 0);
301 tu_cs_emit(cs, attachment->gmem_offset_stencil);
302 } else {
303 tu_cs_image_ref(cs, iview, 0);
304 tu_cs_emit(cs, attachment->gmem_offset);
305 }
306 } else {
307 tu_cs_emit_regs(cs,
308 A6XX_RB_STENCIL_INFO(0));
309 }
310 }
311
312 static void
313 tu6_emit_mrt(struct tu_cmd_buffer *cmd,
314 const struct tu_subpass *subpass,
315 struct tu_cs *cs)
316 {
317 const struct tu_framebuffer *fb = cmd->state.framebuffer;
318
319 for (uint32_t i = 0; i < subpass->color_count; ++i) {
320 uint32_t a = subpass->color_attachments[i].attachment;
321 if (a == VK_ATTACHMENT_UNUSED)
322 continue;
323
324 const struct tu_image_view *iview = fb->attachments[a].attachment;
325
326 tu_cs_emit_pkt4(cs, REG_A6XX_RB_MRT_BUF_INFO(i), 6);
327 tu_cs_emit(cs, iview->RB_MRT_BUF_INFO);
328 tu_cs_image_ref(cs, iview, 0);
329 tu_cs_emit(cs, cmd->state.pass->attachments[a].gmem_offset);
330
331 tu_cs_emit_regs(cs,
332 A6XX_SP_FS_MRT_REG(i, .dword = iview->SP_FS_MRT_REG));
333
334 tu_cs_emit_pkt4(cs, REG_A6XX_RB_MRT_FLAG_BUFFER_ADDR_LO(i), 3);
335 tu_cs_image_flag_ref(cs, iview, 0);
336 }
337
338 tu_cs_emit_regs(cs,
339 A6XX_RB_SRGB_CNTL(.dword = subpass->srgb_cntl));
340 tu_cs_emit_regs(cs,
341 A6XX_SP_SRGB_CNTL(.dword = subpass->srgb_cntl));
342
343 tu_cs_emit_regs(cs, A6XX_GRAS_MAX_LAYER_INDEX(fb->layers - 1));
344 }
345
346 void
347 tu6_emit_msaa(struct tu_cs *cs, VkSampleCountFlagBits vk_samples)
348 {
349 const enum a3xx_msaa_samples samples = tu_msaa_samples(vk_samples);
350 bool msaa_disable = samples == MSAA_ONE;
351
352 tu_cs_emit_regs(cs,
353 A6XX_SP_TP_RAS_MSAA_CNTL(samples),
354 A6XX_SP_TP_DEST_MSAA_CNTL(.samples = samples,
355 .msaa_disable = msaa_disable));
356
357 tu_cs_emit_regs(cs,
358 A6XX_GRAS_RAS_MSAA_CNTL(samples),
359 A6XX_GRAS_DEST_MSAA_CNTL(.samples = samples,
360 .msaa_disable = msaa_disable));
361
362 tu_cs_emit_regs(cs,
363 A6XX_RB_RAS_MSAA_CNTL(samples),
364 A6XX_RB_DEST_MSAA_CNTL(.samples = samples,
365 .msaa_disable = msaa_disable));
366
367 tu_cs_emit_regs(cs,
368 A6XX_RB_MSAA_CNTL(samples));
369 }
370
371 static void
372 tu6_emit_bin_size(struct tu_cs *cs,
373 uint32_t bin_w, uint32_t bin_h, uint32_t flags)
374 {
375 tu_cs_emit_regs(cs,
376 A6XX_GRAS_BIN_CONTROL(.binw = bin_w,
377 .binh = bin_h,
378 .dword = flags));
379
380 tu_cs_emit_regs(cs,
381 A6XX_RB_BIN_CONTROL(.binw = bin_w,
382 .binh = bin_h,
383 .dword = flags));
384
385 /* no flag for RB_BIN_CONTROL2... */
386 tu_cs_emit_regs(cs,
387 A6XX_RB_BIN_CONTROL2(.binw = bin_w,
388 .binh = bin_h));
389 }
390
391 static void
392 tu6_emit_render_cntl(struct tu_cmd_buffer *cmd,
393 const struct tu_subpass *subpass,
394 struct tu_cs *cs,
395 bool binning)
396 {
397 const struct tu_framebuffer *fb = cmd->state.framebuffer;
398 uint32_t cntl = 0;
399 cntl |= A6XX_RB_RENDER_CNTL_UNK4;
400 if (binning) {
401 cntl |= A6XX_RB_RENDER_CNTL_BINNING;
402 } else {
403 uint32_t mrts_ubwc_enable = 0;
404 for (uint32_t i = 0; i < subpass->color_count; ++i) {
405 uint32_t a = subpass->color_attachments[i].attachment;
406 if (a == VK_ATTACHMENT_UNUSED)
407 continue;
408
409 const struct tu_image_view *iview = fb->attachments[a].attachment;
410 if (iview->ubwc_enabled)
411 mrts_ubwc_enable |= 1 << i;
412 }
413
414 cntl |= A6XX_RB_RENDER_CNTL_FLAG_MRTS(mrts_ubwc_enable);
415
416 const uint32_t a = subpass->depth_stencil_attachment.attachment;
417 if (a != VK_ATTACHMENT_UNUSED) {
418 const struct tu_image_view *iview = fb->attachments[a].attachment;
419 if (iview->ubwc_enabled)
420 cntl |= A6XX_RB_RENDER_CNTL_FLAG_DEPTH;
421 }
422
423 /* In the !binning case, we need to set RB_RENDER_CNTL in the draw_cs
424 * in order to set it correctly for the different subpasses. However,
425 * that means the packets we're emitting also happen during binning. So
426 * we need to guard the write on !BINNING at CP execution time.
427 */
428 tu_cs_reserve(cs, 3 + 4);
429 tu_cs_emit_pkt7(cs, CP_COND_REG_EXEC, 2);
430 tu_cs_emit(cs, CP_COND_REG_EXEC_0_MODE(RENDER_MODE) |
431 CP_COND_REG_EXEC_0_GMEM | CP_COND_REG_EXEC_0_SYSMEM);
432 tu_cs_emit(cs, CP_COND_REG_EXEC_1_DWORDS(4));
433 }
434
435 tu_cs_emit_pkt7(cs, CP_REG_WRITE, 3);
436 tu_cs_emit(cs, CP_REG_WRITE_0_TRACKER(TRACK_RENDER_CNTL));
437 tu_cs_emit(cs, REG_A6XX_RB_RENDER_CNTL);
438 tu_cs_emit(cs, cntl);
439 }
440
441 static void
442 tu6_emit_blit_scissor(struct tu_cmd_buffer *cmd, struct tu_cs *cs, bool align)
443 {
444
445 const VkRect2D *render_area = &cmd->state.render_area;
446
447 /* Avoid assertion fails with an empty render area at (0, 0) where the
448 * subtraction below wraps around. Empty render areas should be forced to
449 * the sysmem path by use_sysmem_rendering(). It's not even clear whether
450 * an empty scissor here works, and the blob seems to force sysmem too as
451 * it sets something wrong (non-empty) for the scissor.
452 */
453 if (render_area->extent.width == 0 ||
454 render_area->extent.height == 0)
455 return;
456
457 uint32_t x1 = render_area->offset.x;
458 uint32_t y1 = render_area->offset.y;
459 uint32_t x2 = x1 + render_area->extent.width - 1;
460 uint32_t y2 = y1 + render_area->extent.height - 1;
461
462 if (align) {
463 x1 = x1 & ~(GMEM_ALIGN_W - 1);
464 y1 = y1 & ~(GMEM_ALIGN_H - 1);
465 x2 = ALIGN_POT(x2 + 1, GMEM_ALIGN_W) - 1;
466 y2 = ALIGN_POT(y2 + 1, GMEM_ALIGN_H) - 1;
467 }
468
469 tu_cs_emit_regs(cs,
470 A6XX_RB_BLIT_SCISSOR_TL(.x = x1, .y = y1),
471 A6XX_RB_BLIT_SCISSOR_BR(.x = x2, .y = y2));
472 }
473
474 void
475 tu6_emit_window_scissor(struct tu_cs *cs,
476 uint32_t x1,
477 uint32_t y1,
478 uint32_t x2,
479 uint32_t y2)
480 {
481 tu_cs_emit_regs(cs,
482 A6XX_GRAS_SC_WINDOW_SCISSOR_TL(.x = x1, .y = y1),
483 A6XX_GRAS_SC_WINDOW_SCISSOR_BR(.x = x2, .y = y2));
484
485 tu_cs_emit_regs(cs,
486 A6XX_GRAS_2D_RESOLVE_CNTL_1(.x = x1, .y = y1),
487 A6XX_GRAS_2D_RESOLVE_CNTL_2(.x = x2, .y = y2));
488 }
489
490 void
491 tu6_emit_window_offset(struct tu_cs *cs, uint32_t x1, uint32_t y1)
492 {
493 tu_cs_emit_regs(cs,
494 A6XX_RB_WINDOW_OFFSET(.x = x1, .y = y1));
495
496 tu_cs_emit_regs(cs,
497 A6XX_RB_WINDOW_OFFSET2(.x = x1, .y = y1));
498
499 tu_cs_emit_regs(cs,
500 A6XX_SP_WINDOW_OFFSET(.x = x1, .y = y1));
501
502 tu_cs_emit_regs(cs,
503 A6XX_SP_TP_WINDOW_OFFSET(.x = x1, .y = y1));
504 }
505
506 static void
507 tu_cs_emit_draw_state(struct tu_cs *cs, uint32_t id, struct tu_draw_state state)
508 {
509 uint32_t enable_mask;
510 switch (id) {
511 case TU_DRAW_STATE_PROGRAM:
512 case TU_DRAW_STATE_VI:
513 case TU_DRAW_STATE_FS_CONST:
514 /* The blob seems to not enable this (DESC_SETS_LOAD) for binning, even
515 * when resources would actually be used in the binning shader.
516 * Presumably the overhead of prefetching the resources isn't
517 * worth it.
518 */
519 case TU_DRAW_STATE_DESC_SETS_LOAD:
520 enable_mask = CP_SET_DRAW_STATE__0_GMEM |
521 CP_SET_DRAW_STATE__0_SYSMEM;
522 break;
523 case TU_DRAW_STATE_PROGRAM_BINNING:
524 case TU_DRAW_STATE_VI_BINNING:
525 enable_mask = CP_SET_DRAW_STATE__0_BINNING;
526 break;
527 case TU_DRAW_STATE_INPUT_ATTACHMENTS_GMEM:
528 enable_mask = CP_SET_DRAW_STATE__0_GMEM;
529 break;
530 case TU_DRAW_STATE_INPUT_ATTACHMENTS_SYSMEM:
531 enable_mask = CP_SET_DRAW_STATE__0_SYSMEM;
532 break;
533 default:
534 enable_mask = CP_SET_DRAW_STATE__0_GMEM |
535 CP_SET_DRAW_STATE__0_SYSMEM |
536 CP_SET_DRAW_STATE__0_BINNING;
537 break;
538 }
539
540 /* We need to reload the descriptors every time the descriptor sets
541 * change. However, the commands we send only depend on the pipeline
542 * because the whole point is to cache descriptors which are used by the
543 * pipeline. There's a problem here, in that the firmware has an
544 * "optimization" which skips executing groups that are set to the same
545 * value as the last draw. This means that if the descriptor sets change
546 * but not the pipeline, we'd try to re-execute the same buffer which
547 * the firmware would ignore and we wouldn't pre-load the new
548 * descriptors. Set the DIRTY bit to avoid this optimization
549 */
550 if (id == TU_DRAW_STATE_DESC_SETS_LOAD)
551 enable_mask |= CP_SET_DRAW_STATE__0_DIRTY;
552
553 tu_cs_emit(cs, CP_SET_DRAW_STATE__0_COUNT(state.size) |
554 enable_mask |
555 CP_SET_DRAW_STATE__0_GROUP_ID(id) |
556 COND(!state.size, CP_SET_DRAW_STATE__0_DISABLE));
557 tu_cs_emit_qw(cs, state.iova);
558 }
559
560 static bool
561 use_hw_binning(struct tu_cmd_buffer *cmd)
562 {
563 const struct tu_framebuffer *fb = cmd->state.framebuffer;
564
565 /* XFB commands are emitted for BINNING || SYSMEM, which makes it incompatible
566 * with non-hw binning GMEM rendering. this is required because some of the
567 * XFB commands need to only be executed once
568 */
569 if (cmd->state.xfb_used)
570 return true;
571
572 /* Some devices have a newer a630_sqe.fw in which, only in CP_DRAW_INDX and
573 * CP_DRAW_INDX_OFFSET, visibility-based skipping happens *before*
574 * predication-based skipping. It seems this breaks predication, because
575 * draws skipped by predication will not be executed in the binning phase,
576 * and therefore won't have an entry in the draw stream, but the
577 * visibility-based skipping will expect it to have an entry. The result is
578 * a GPU hang when actually executing the first non-predicated draw.
579 * However, it seems that things still work if the whole renderpass is
580 * predicated. Affected tests are
581 * dEQP-VK.conditional_rendering.draw_clear.draw.case_2 as well as a few
582 * other case_N.
583 *
584 * Broken FW version: 016ee181
585 * linux-firmware (working) FW version: 016ee176
586 *
587 * All known a650_sqe.fw versions don't have this bug.
588 *
589 * TODO: we should do version detection of the FW so that devices using the
590 * linux-firmware version of a630_sqe.fw don't need this workaround.
591 */
592 if (cmd->state.has_subpass_predication && cmd->device->physical_device->gpu_id != 650)
593 return false;
594
595 if (unlikely(cmd->device->physical_device->instance->debug_flags & TU_DEBUG_NOBIN))
596 return false;
597
598 if (unlikely(cmd->device->physical_device->instance->debug_flags & TU_DEBUG_FORCEBIN))
599 return true;
600
601 return (fb->tile_count.width * fb->tile_count.height) > 2;
602 }
603
604 static bool
605 use_sysmem_rendering(struct tu_cmd_buffer *cmd)
606 {
607 if (unlikely(cmd->device->physical_device->instance->debug_flags & TU_DEBUG_SYSMEM))
608 return true;
609
610 /* If hw binning is required because of XFB but doesn't work because of the
611 * conditional rendering bug, fallback to sysmem.
612 */
613 if (cmd->state.xfb_used && cmd->state.has_subpass_predication &&
614 cmd->device->physical_device->gpu_id != 650)
615 return true;
616
617 /* can't fit attachments into gmem */
618 if (!cmd->state.pass->gmem_pixels)
619 return true;
620
621 if (cmd->state.framebuffer->layers > 1)
622 return true;
623
624 /* Use sysmem for empty render areas */
625 if (cmd->state.render_area.extent.width == 0 ||
626 cmd->state.render_area.extent.height == 0)
627 return true;
628
629 if (cmd->state.has_tess)
630 return true;
631
632 return false;
633 }
634
635 static void
636 tu6_emit_tile_select(struct tu_cmd_buffer *cmd,
637 struct tu_cs *cs,
638 uint32_t tx, uint32_t ty, uint32_t pipe, uint32_t slot)
639 {
640 const struct tu_framebuffer *fb = cmd->state.framebuffer;
641
642 tu_cs_emit_pkt7(cs, CP_SET_MARKER, 1);
643 tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(RM6_YIELD));
644
645 tu_cs_emit_pkt7(cs, CP_SET_MARKER, 1);
646 tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(RM6_GMEM));
647
648 const uint32_t x1 = fb->tile0.width * tx;
649 const uint32_t y1 = fb->tile0.height * ty;
650 const uint32_t x2 = x1 + fb->tile0.width - 1;
651 const uint32_t y2 = y1 + fb->tile0.height - 1;
652 tu6_emit_window_scissor(cs, x1, y1, x2, y2);
653 tu6_emit_window_offset(cs, x1, y1);
654
655 tu_cs_emit_regs(cs, A6XX_VPC_SO_DISABLE(false));
656
657 if (use_hw_binning(cmd)) {
658 tu_cs_emit_pkt7(cs, CP_WAIT_FOR_ME, 0);
659
660 tu_cs_emit_pkt7(cs, CP_SET_MODE, 1);
661 tu_cs_emit(cs, 0x0);
662
663 tu_cs_emit_pkt7(cs, CP_SET_BIN_DATA5_OFFSET, 4);
664 tu_cs_emit(cs, fb->pipe_sizes[pipe] |
665 CP_SET_BIN_DATA5_0_VSC_N(slot));
666 tu_cs_emit(cs, pipe * cmd->vsc_draw_strm_pitch);
667 tu_cs_emit(cs, pipe * 4);
668 tu_cs_emit(cs, pipe * cmd->vsc_prim_strm_pitch);
669
670 tu_cs_emit_pkt7(cs, CP_SET_VISIBILITY_OVERRIDE, 1);
671 tu_cs_emit(cs, 0x0);
672
673 tu_cs_emit_pkt7(cs, CP_SET_MODE, 1);
674 tu_cs_emit(cs, 0x0);
675 } else {
676 tu_cs_emit_pkt7(cs, CP_SET_VISIBILITY_OVERRIDE, 1);
677 tu_cs_emit(cs, 0x1);
678
679 tu_cs_emit_pkt7(cs, CP_SET_MODE, 1);
680 tu_cs_emit(cs, 0x0);
681 }
682 }
683
684 static void
685 tu6_emit_sysmem_resolve(struct tu_cmd_buffer *cmd,
686 struct tu_cs *cs,
687 uint32_t a,
688 uint32_t gmem_a)
689 {
690 const struct tu_framebuffer *fb = cmd->state.framebuffer;
691 struct tu_image_view *dst = fb->attachments[a].attachment;
692 struct tu_image_view *src = fb->attachments[gmem_a].attachment;
693
694 tu_resolve_sysmem(cmd, cs, src, dst, fb->layers, &cmd->state.render_area);
695 }
696
697 static void
698 tu6_emit_sysmem_resolves(struct tu_cmd_buffer *cmd,
699 struct tu_cs *cs,
700 const struct tu_subpass *subpass)
701 {
702 if (subpass->resolve_attachments) {
703 /* From the documentation for vkCmdNextSubpass, section 7.4 "Render Pass
704 * Commands":
705 *
706 * End-of-subpass multisample resolves are treated as color
707 * attachment writes for the purposes of synchronization. That is,
708 * they are considered to execute in the
709 * VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT pipeline stage and
710 * their writes are synchronized with
711 * VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT. Synchronization between
712 * rendering within a subpass and any resolve operations at the end
713 * of the subpass occurs automatically, without need for explicit
714 * dependencies or pipeline barriers. However, if the resolve
715 * attachment is also used in a different subpass, an explicit
716 * dependency is needed.
717 *
718 * We use the CP_BLIT path for sysmem resolves, which is really a
719 * transfer command, so we have to manually flush similar to the gmem
720 * resolve case. However, a flush afterwards isn't needed because of the
721 * last sentence and the fact that we're in sysmem mode.
722 */
723 tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_COLOR_TS);
724 tu6_emit_event_write(cmd, cs, CACHE_INVALIDATE);
725
726 /* Wait for the flushes to land before using the 2D engine */
727 tu_cs_emit_wfi(cs);
728
729 for (unsigned i = 0; i < subpass->color_count; i++) {
730 uint32_t a = subpass->resolve_attachments[i].attachment;
731 if (a == VK_ATTACHMENT_UNUSED)
732 continue;
733
734 tu6_emit_sysmem_resolve(cmd, cs, a,
735 subpass->color_attachments[i].attachment);
736 }
737 }
738 }
739
740 static void
741 tu6_emit_tile_store(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
742 {
743 const struct tu_render_pass *pass = cmd->state.pass;
744 const struct tu_subpass *subpass = &pass->subpasses[pass->subpass_count-1];
745
746 tu_cs_emit_pkt7(cs, CP_SET_DRAW_STATE, 3);
747 tu_cs_emit(cs, CP_SET_DRAW_STATE__0_COUNT(0) |
748 CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS |
749 CP_SET_DRAW_STATE__0_GROUP_ID(0));
750 tu_cs_emit(cs, CP_SET_DRAW_STATE__1_ADDR_LO(0));
751 tu_cs_emit(cs, CP_SET_DRAW_STATE__2_ADDR_HI(0));
752
753 tu_cs_emit_pkt7(cs, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
754 tu_cs_emit(cs, 0x0);
755
756 tu_cs_emit_pkt7(cs, CP_SET_MARKER, 1);
757 tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(RM6_RESOLVE));
758
759 tu6_emit_blit_scissor(cmd, cs, true);
760
761 for (uint32_t a = 0; a < pass->attachment_count; ++a) {
762 if (pass->attachments[a].gmem_offset >= 0)
763 tu_store_gmem_attachment(cmd, cs, a, a);
764 }
765
766 if (subpass->resolve_attachments) {
767 for (unsigned i = 0; i < subpass->color_count; i++) {
768 uint32_t a = subpass->resolve_attachments[i].attachment;
769 if (a != VK_ATTACHMENT_UNUSED)
770 tu_store_gmem_attachment(cmd, cs, a,
771 subpass->color_attachments[i].attachment);
772 }
773 }
774 }
775
776 static void
777 tu6_init_hw(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
778 {
779 struct tu_device *dev = cmd->device;
780 const struct tu_physical_device *phys_dev = dev->physical_device;
781
782 tu6_emit_event_write(cmd, cs, CACHE_INVALIDATE);
783
784 tu_cs_emit_regs(cs, A6XX_HLSQ_INVALIDATE_CMD(
785 .vs_state = true,
786 .hs_state = true,
787 .ds_state = true,
788 .gs_state = true,
789 .fs_state = true,
790 .cs_state = true,
791 .gfx_ibo = true,
792 .cs_ibo = true,
793 .gfx_shared_const = true,
794 .cs_shared_const = true,
795 .gfx_bindless = 0x1f,
796 .cs_bindless = 0x1f));
797
798 tu_cs_emit_wfi(cs);
799
800 cmd->state.cache.pending_flush_bits &=
801 ~(TU_CMD_FLAG_WAIT_FOR_IDLE | TU_CMD_FLAG_CACHE_INVALIDATE);
802
803 tu_cs_emit_regs(cs,
804 A6XX_RB_CCU_CNTL(.offset = phys_dev->ccu_offset_bypass));
805 cmd->state.ccu_state = TU_CMD_CCU_SYSMEM;
806 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8E04, 0x00100000);
807 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_AE04, 0x8);
808 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_AE00, 0);
809 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_AE0F, 0x3f);
810 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_B605, 0x44);
811 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_B600, 0x100000);
812 tu_cs_emit_write_reg(cs, REG_A6XX_HLSQ_UNKNOWN_BE00, 0x80);
813 tu_cs_emit_write_reg(cs, REG_A6XX_HLSQ_UNKNOWN_BE01, 0);
814
815 tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9600, 0);
816 tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_8600, 0x880);
817 tu_cs_emit_write_reg(cs, REG_A6XX_HLSQ_UNKNOWN_BE04, 0);
818 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_AE03, 0x00000410);
819 tu_cs_emit_write_reg(cs, REG_A6XX_SP_IBO_COUNT, 0);
820 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_B182, 0);
821 tu_cs_emit_write_reg(cs, REG_A6XX_HLSQ_SHARED_CONSTS, 0);
822 tu_cs_emit_write_reg(cs, REG_A6XX_UCHE_UNKNOWN_0E12, 0x3200000);
823 tu_cs_emit_write_reg(cs, REG_A6XX_UCHE_CLIENT_PF, 4);
824 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8E01, 0x0);
825 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_A982, 0);
826 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_A9A8, 0);
827 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_AB00, 0x5);
828
829 /* TODO: set A6XX_VFD_ADD_OFFSET_INSTANCE and fix ir3 to avoid adding base instance */
830 tu_cs_emit_write_reg(cs, REG_A6XX_VFD_ADD_OFFSET, A6XX_VFD_ADD_OFFSET_VERTEX);
831 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8811, 0x00000010);
832 tu_cs_emit_write_reg(cs, REG_A6XX_PC_MODE_CNTL, 0x1f);
833
834 tu_cs_emit_write_reg(cs, REG_A6XX_RB_SRGB_CNTL, 0);
835
836 tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_8110, 0);
837
838 tu_cs_emit_write_reg(cs, REG_A6XX_RB_RENDER_CONTROL0, 0x401);
839 tu_cs_emit_write_reg(cs, REG_A6XX_RB_RENDER_CONTROL1, 0);
840 tu_cs_emit_write_reg(cs, REG_A6XX_RB_FS_OUTPUT_CNTL0, 0);
841 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8818, 0);
842 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8819, 0);
843 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_881A, 0);
844 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_881B, 0);
845 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_881C, 0);
846 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_881D, 0);
847 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_881E, 0);
848 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_88F0, 0);
849
850 tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9107, 0);
851
852 tu_cs_emit_regs(cs, A6XX_VPC_POINT_COORD_INVERT(false));
853 tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9300, 0);
854
855 tu_cs_emit_regs(cs, A6XX_VPC_SO_DISABLE(true));
856
857 tu_cs_emit_write_reg(cs, REG_A6XX_PC_UNKNOWN_9980, 0);
858
859 tu_cs_emit_write_reg(cs, REG_A6XX_PC_PRIMITIVE_CNTL_6, 0);
860 tu_cs_emit_write_reg(cs, REG_A6XX_PC_UNKNOWN_9B07, 0);
861
862 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_A81B, 0);
863
864 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_B183, 0);
865
866 tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_8099, 0);
867 tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_80A0, 2);
868 tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_80AF, 0);
869 tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9210, 0);
870 tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9211, 0);
871 tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9602, 0);
872 tu_cs_emit_write_reg(cs, REG_A6XX_PC_UNKNOWN_9E72, 0);
873 tu_cs_emit_write_reg(cs, REG_A6XX_SP_TP_UNKNOWN_B309, 0x000000a2);
874 tu_cs_emit_write_reg(cs, REG_A6XX_HLSQ_CONTROL_5_REG, 0xfc);
875
876 tu_cs_emit_write_reg(cs, REG_A6XX_VFD_MODE_CNTL, 0x00000000);
877
878 tu_cs_emit_write_reg(cs, REG_A6XX_VFD_UNKNOWN_A008, 0);
879
880 tu_cs_emit_write_reg(cs, REG_A6XX_PC_MODE_CNTL, 0x0000001f);
881
882 /* we don't use this yet.. probably best to disable.. */
883 tu_cs_emit_pkt7(cs, CP_SET_DRAW_STATE, 3);
884 tu_cs_emit(cs, CP_SET_DRAW_STATE__0_COUNT(0) |
885 CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS |
886 CP_SET_DRAW_STATE__0_GROUP_ID(0));
887 tu_cs_emit(cs, CP_SET_DRAW_STATE__1_ADDR_LO(0));
888 tu_cs_emit(cs, CP_SET_DRAW_STATE__2_ADDR_HI(0));
889
890 tu_cs_emit_regs(cs,
891 A6XX_SP_HS_CTRL_REG0(0));
892
893 tu_cs_emit_regs(cs,
894 A6XX_SP_GS_CTRL_REG0(0));
895
896 tu_cs_emit_regs(cs,
897 A6XX_GRAS_LRZ_CNTL(0));
898
899 tu_cs_emit_regs(cs,
900 A6XX_RB_LRZ_CNTL(0));
901
902 tu_cs_emit_regs(cs,
903 A6XX_SP_TP_BORDER_COLOR_BASE_ADDR(.bo = &dev->global_bo,
904 .bo_offset = gb_offset(border_color)));
905 tu_cs_emit_regs(cs,
906 A6XX_SP_PS_TP_BORDER_COLOR_BASE_ADDR(.bo = &dev->global_bo,
907 .bo_offset = gb_offset(border_color)));
908
909 /* VSC buffers:
910 * use vsc pitches from the largest values used so far with this device
911 * if there hasn't been overflow, there will already be a scratch bo
912 * allocated for these sizes
913 *
914 * if overflow is detected, the stream size is increased by 2x
915 */
916 mtx_lock(&dev->vsc_pitch_mtx);
917
918 struct tu6_global *global = dev->global_bo.map;
919
920 uint32_t vsc_draw_overflow = global->vsc_draw_overflow;
921 uint32_t vsc_prim_overflow = global->vsc_prim_overflow;
922
923 if (vsc_draw_overflow >= dev->vsc_draw_strm_pitch)
924 dev->vsc_draw_strm_pitch = (dev->vsc_draw_strm_pitch - VSC_PAD) * 2 + VSC_PAD;
925
926 if (vsc_prim_overflow >= dev->vsc_prim_strm_pitch)
927 dev->vsc_prim_strm_pitch = (dev->vsc_prim_strm_pitch - VSC_PAD) * 2 + VSC_PAD;
928
929 cmd->vsc_prim_strm_pitch = dev->vsc_prim_strm_pitch;
930 cmd->vsc_draw_strm_pitch = dev->vsc_draw_strm_pitch;
931
932 mtx_unlock(&dev->vsc_pitch_mtx);
933
934 struct tu_bo *vsc_bo;
935 uint32_t size0 = cmd->vsc_prim_strm_pitch * MAX_VSC_PIPES +
936 cmd->vsc_draw_strm_pitch * MAX_VSC_PIPES;
937
938 tu_get_scratch_bo(dev, size0 + MAX_VSC_PIPES * 4, &vsc_bo);
939
940 tu_cs_emit_regs(cs,
941 A6XX_VSC_DRAW_STRM_SIZE_ADDRESS(.bo = vsc_bo, .bo_offset = size0));
942 tu_cs_emit_regs(cs,
943 A6XX_VSC_PRIM_STRM_ADDRESS(.bo = vsc_bo));
944 tu_cs_emit_regs(cs,
945 A6XX_VSC_DRAW_STRM_ADDRESS(.bo = vsc_bo,
946 .bo_offset = cmd->vsc_prim_strm_pitch * MAX_VSC_PIPES));
947
948 tu_bo_list_add(&cmd->bo_list, vsc_bo, MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE);
949
950 tu_cs_sanity_check(cs);
951 }
952
953 static void
954 update_vsc_pipe(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
955 {
956 const struct tu_framebuffer *fb = cmd->state.framebuffer;
957
958 tu_cs_emit_regs(cs,
959 A6XX_VSC_BIN_SIZE(.width = fb->tile0.width,
960 .height = fb->tile0.height));
961
962 tu_cs_emit_regs(cs,
963 A6XX_VSC_BIN_COUNT(.nx = fb->tile_count.width,
964 .ny = fb->tile_count.height));
965
966 tu_cs_emit_pkt4(cs, REG_A6XX_VSC_PIPE_CONFIG_REG(0), 32);
967 tu_cs_emit_array(cs, fb->pipe_config, 32);
968
969 tu_cs_emit_regs(cs,
970 A6XX_VSC_PRIM_STRM_PITCH(cmd->vsc_prim_strm_pitch),
971 A6XX_VSC_PRIM_STRM_LIMIT(cmd->vsc_prim_strm_pitch - VSC_PAD));
972
973 tu_cs_emit_regs(cs,
974 A6XX_VSC_DRAW_STRM_PITCH(cmd->vsc_draw_strm_pitch),
975 A6XX_VSC_DRAW_STRM_LIMIT(cmd->vsc_draw_strm_pitch - VSC_PAD));
976 }
977
978 static void
979 emit_vsc_overflow_test(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
980 {
981 const struct tu_framebuffer *fb = cmd->state.framebuffer;
982 const uint32_t used_pipe_count =
983 fb->pipe_count.width * fb->pipe_count.height;
984
985 for (int i = 0; i < used_pipe_count; i++) {
986 tu_cs_emit_pkt7(cs, CP_COND_WRITE5, 8);
987 tu_cs_emit(cs, CP_COND_WRITE5_0_FUNCTION(WRITE_GE) |
988 CP_COND_WRITE5_0_WRITE_MEMORY);
989 tu_cs_emit(cs, CP_COND_WRITE5_1_POLL_ADDR_LO(REG_A6XX_VSC_DRAW_STRM_SIZE_REG(i)));
990 tu_cs_emit(cs, CP_COND_WRITE5_2_POLL_ADDR_HI(0));
991 tu_cs_emit(cs, CP_COND_WRITE5_3_REF(cmd->vsc_draw_strm_pitch - VSC_PAD));
992 tu_cs_emit(cs, CP_COND_WRITE5_4_MASK(~0));
993 tu_cs_emit_qw(cs, global_iova(cmd, vsc_draw_overflow));
994 tu_cs_emit(cs, CP_COND_WRITE5_7_WRITE_DATA(cmd->vsc_draw_strm_pitch));
995
996 tu_cs_emit_pkt7(cs, CP_COND_WRITE5, 8);
997 tu_cs_emit(cs, CP_COND_WRITE5_0_FUNCTION(WRITE_GE) |
998 CP_COND_WRITE5_0_WRITE_MEMORY);
999 tu_cs_emit(cs, CP_COND_WRITE5_1_POLL_ADDR_LO(REG_A6XX_VSC_PRIM_STRM_SIZE_REG(i)));
1000 tu_cs_emit(cs, CP_COND_WRITE5_2_POLL_ADDR_HI(0));
1001 tu_cs_emit(cs, CP_COND_WRITE5_3_REF(cmd->vsc_prim_strm_pitch - VSC_PAD));
1002 tu_cs_emit(cs, CP_COND_WRITE5_4_MASK(~0));
1003 tu_cs_emit_qw(cs, global_iova(cmd, vsc_prim_overflow));
1004 tu_cs_emit(cs, CP_COND_WRITE5_7_WRITE_DATA(cmd->vsc_prim_strm_pitch));
1005 }
1006
1007 tu_cs_emit_pkt7(cs, CP_WAIT_MEM_WRITES, 0);
1008 }
1009
1010 static void
1011 tu6_emit_binning_pass(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
1012 {
1013 struct tu_physical_device *phys_dev = cmd->device->physical_device;
1014 const struct tu_framebuffer *fb = cmd->state.framebuffer;
1015
1016 tu6_emit_window_scissor(cs, 0, 0, fb->width - 1, fb->height - 1);
1017
1018 tu_cs_emit_pkt7(cs, CP_SET_MARKER, 1);
1019 tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(RM6_BINNING));
1020
1021 tu_cs_emit_pkt7(cs, CP_SET_VISIBILITY_OVERRIDE, 1);
1022 tu_cs_emit(cs, 0x1);
1023
1024 tu_cs_emit_pkt7(cs, CP_SET_MODE, 1);
1025 tu_cs_emit(cs, 0x1);
1026
1027 tu_cs_emit_wfi(cs);
1028
1029 tu_cs_emit_regs(cs,
1030 A6XX_VFD_MODE_CNTL(.binning_pass = true));
1031
1032 update_vsc_pipe(cmd, cs);
1033
1034 tu_cs_emit_regs(cs,
1035 A6XX_PC_UNKNOWN_9805(.unknown = phys_dev->magic.PC_UNKNOWN_9805));
1036
1037 tu_cs_emit_regs(cs,
1038 A6XX_SP_UNKNOWN_A0F8(.unknown = phys_dev->magic.SP_UNKNOWN_A0F8));
1039
1040 tu_cs_emit_pkt7(cs, CP_EVENT_WRITE, 1);
1041 tu_cs_emit(cs, UNK_2C);
1042
1043 tu_cs_emit_regs(cs,
1044 A6XX_RB_WINDOW_OFFSET(.x = 0, .y = 0));
1045
1046 tu_cs_emit_regs(cs,
1047 A6XX_SP_TP_WINDOW_OFFSET(.x = 0, .y = 0));
1048
1049 /* emit IB to binning drawcmds: */
1050 tu_cs_emit_call(cs, &cmd->draw_cs);
1051
1052 tu_cs_emit_pkt7(cs, CP_SET_DRAW_STATE, 3);
1053 tu_cs_emit(cs, CP_SET_DRAW_STATE__0_COUNT(0) |
1054 CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS |
1055 CP_SET_DRAW_STATE__0_GROUP_ID(0));
1056 tu_cs_emit(cs, CP_SET_DRAW_STATE__1_ADDR_LO(0));
1057 tu_cs_emit(cs, CP_SET_DRAW_STATE__2_ADDR_HI(0));
1058
1059 tu_cs_emit_pkt7(cs, CP_EVENT_WRITE, 1);
1060 tu_cs_emit(cs, UNK_2D);
1061
1062 /* This flush is probably required because the VSC, which produces the
1063 * visibility stream, is a client of UCHE, whereas the CP needs to read the
1064 * visibility stream (without caching) to do draw skipping. The
1065 * WFI+WAIT_FOR_ME combination guarantees that the binning commands
1066 * submitted are finished before reading the VSC regs (in
1067 * emit_vsc_overflow_test) or the VSC_DATA buffer directly (implicitly as
1068 * part of draws).
1069 */
1070 tu6_emit_event_write(cmd, cs, CACHE_FLUSH_TS);
1071
1072 tu_cs_emit_wfi(cs);
1073
1074 tu_cs_emit_pkt7(cs, CP_WAIT_FOR_ME, 0);
1075
1076 emit_vsc_overflow_test(cmd, cs);
1077
1078 tu_cs_emit_pkt7(cs, CP_SET_VISIBILITY_OVERRIDE, 1);
1079 tu_cs_emit(cs, 0x0);
1080
1081 tu_cs_emit_pkt7(cs, CP_SET_MODE, 1);
1082 tu_cs_emit(cs, 0x0);
1083 }
1084
1085 static struct tu_draw_state
1086 tu_emit_input_attachments(struct tu_cmd_buffer *cmd,
1087 const struct tu_subpass *subpass,
1088 bool gmem)
1089 {
1090 /* note: we can probably emit input attachments just once for the whole
1091 * renderpass, this would avoid emitting both sysmem/gmem versions
1092 *
1093 * emit two texture descriptors for each input, as a workaround for
1094 * d24s8/d32s8, which can be sampled as both float (depth) and integer (stencil)
1095 * tu_shader lowers uint input attachment loads to use the 2nd descriptor
1096 * in the pair
1097 * TODO: a smarter workaround
1098 */
1099
1100 if (!subpass->input_count)
1101 return (struct tu_draw_state) {};
1102
1103 struct tu_cs_memory texture;
1104 VkResult result = tu_cs_alloc(&cmd->sub_cs, subpass->input_count * 2,
1105 A6XX_TEX_CONST_DWORDS, &texture);
1106 assert(result == VK_SUCCESS);
1107
1108 for (unsigned i = 0; i < subpass->input_count * 2; i++) {
1109 uint32_t a = subpass->input_attachments[i / 2].attachment;
1110 if (a == VK_ATTACHMENT_UNUSED)
1111 continue;
1112
1113 struct tu_image_view *iview =
1114 cmd->state.framebuffer->attachments[a].attachment;
1115 const struct tu_render_pass_attachment *att =
1116 &cmd->state.pass->attachments[a];
1117 uint32_t *dst = &texture.map[A6XX_TEX_CONST_DWORDS * i];
1118 uint32_t gmem_offset = att->gmem_offset;
1119 uint32_t cpp = att->cpp;
1120
1121 memcpy(dst, iview->descriptor, A6XX_TEX_CONST_DWORDS * 4);
1122
1123 if (i % 2 == 1 && att->format == VK_FORMAT_D24_UNORM_S8_UINT) {
1124 /* note this works because spec says fb and input attachments
1125 * must use identity swizzle
1126 */
1127 dst[0] &= ~(A6XX_TEX_CONST_0_FMT__MASK |
1128 A6XX_TEX_CONST_0_SWIZ_X__MASK | A6XX_TEX_CONST_0_SWIZ_Y__MASK |
1129 A6XX_TEX_CONST_0_SWIZ_Z__MASK | A6XX_TEX_CONST_0_SWIZ_W__MASK);
1130 if (cmd->device->physical_device->limited_z24s8) {
1131 dst[0] |= A6XX_TEX_CONST_0_FMT(FMT6_8_8_8_8_UINT) |
1132 A6XX_TEX_CONST_0_SWIZ_X(A6XX_TEX_W) |
1133 A6XX_TEX_CONST_0_SWIZ_Y(A6XX_TEX_ZERO) |
1134 A6XX_TEX_CONST_0_SWIZ_Z(A6XX_TEX_ZERO) |
1135 A6XX_TEX_CONST_0_SWIZ_W(A6XX_TEX_ONE);
1136 } else {
1137 dst[0] |= A6XX_TEX_CONST_0_FMT(FMT6_Z24_UINT_S8_UINT) |
1138 A6XX_TEX_CONST_0_SWIZ_X(A6XX_TEX_Y) |
1139 A6XX_TEX_CONST_0_SWIZ_Y(A6XX_TEX_ZERO) |
1140 A6XX_TEX_CONST_0_SWIZ_Z(A6XX_TEX_ZERO) |
1141 A6XX_TEX_CONST_0_SWIZ_W(A6XX_TEX_ONE);
1142 }
1143 }
1144
1145 if (i % 2 == 1 && att->format == VK_FORMAT_D32_SFLOAT_S8_UINT) {
1146 dst[0] &= ~A6XX_TEX_CONST_0_FMT__MASK;
1147 dst[0] |= A6XX_TEX_CONST_0_FMT(FMT6_8_UINT);
1148 dst[2] &= ~(A6XX_TEX_CONST_2_PITCHALIGN__MASK | A6XX_TEX_CONST_2_PITCH__MASK);
1149 dst[2] |= A6XX_TEX_CONST_2_PITCH(iview->stencil_PITCH << 6);
1150 dst[3] = 0;
1151 dst[4] = iview->stencil_base_addr;
1152 dst[5] = (dst[5] & 0xffff) | iview->stencil_base_addr >> 32;
1153
1154 cpp = att->samples;
1155 gmem_offset = att->gmem_offset_stencil;
1156 }
1157
1158 if (!gmem)
1159 continue;
1160
1161 /* patched for gmem */
1162 dst[0] &= ~(A6XX_TEX_CONST_0_SWAP__MASK | A6XX_TEX_CONST_0_TILE_MODE__MASK);
1163 dst[0] |= A6XX_TEX_CONST_0_TILE_MODE(TILE6_2);
1164 dst[2] =
1165 A6XX_TEX_CONST_2_TYPE(A6XX_TEX_2D) |
1166 A6XX_TEX_CONST_2_PITCH(cmd->state.framebuffer->tile0.width * cpp);
1167 dst[3] = 0;
1168 dst[4] = cmd->device->physical_device->gmem_base + gmem_offset;
1169 dst[5] = A6XX_TEX_CONST_5_DEPTH(1);
1170 for (unsigned i = 6; i < A6XX_TEX_CONST_DWORDS; i++)
1171 dst[i] = 0;
1172 }
1173
1174 struct tu_cs cs;
1175 struct tu_draw_state ds = tu_cs_draw_state(&cmd->sub_cs, &cs, 9);
1176
1177 tu_cs_emit_pkt7(&cs, CP_LOAD_STATE6_FRAG, 3);
1178 tu_cs_emit(&cs, CP_LOAD_STATE6_0_DST_OFF(0) |
1179 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
1180 CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT) |
1181 CP_LOAD_STATE6_0_STATE_BLOCK(SB6_FS_TEX) |
1182 CP_LOAD_STATE6_0_NUM_UNIT(subpass->input_count * 2));
1183 tu_cs_emit_qw(&cs, texture.iova);
1184
1185 tu_cs_emit_pkt4(&cs, REG_A6XX_SP_FS_TEX_CONST_LO, 2);
1186 tu_cs_emit_qw(&cs, texture.iova);
1187
1188 tu_cs_emit_regs(&cs, A6XX_SP_FS_TEX_COUNT(subpass->input_count * 2));
1189
1190 assert(cs.cur == cs.end); /* validate draw state size */
1191
1192 return ds;
1193 }
1194
1195 static void
1196 tu_set_input_attachments(struct tu_cmd_buffer *cmd, const struct tu_subpass *subpass)
1197 {
1198 struct tu_cs *cs = &cmd->draw_cs;
1199
1200 tu_cs_emit_pkt7(cs, CP_SET_DRAW_STATE, 6);
1201 tu_cs_emit_draw_state(cs, TU_DRAW_STATE_INPUT_ATTACHMENTS_GMEM,
1202 tu_emit_input_attachments(cmd, subpass, true));
1203 tu_cs_emit_draw_state(cs, TU_DRAW_STATE_INPUT_ATTACHMENTS_SYSMEM,
1204 tu_emit_input_attachments(cmd, subpass, false));
1205 }
1206
1207 static void
1208 tu_emit_renderpass_begin(struct tu_cmd_buffer *cmd,
1209 const VkRenderPassBeginInfo *info)
1210 {
1211 struct tu_cs *cs = &cmd->draw_cs;
1212
1213 tu_cond_exec_start(cs, CP_COND_EXEC_0_RENDER_MODE_GMEM);
1214
1215 tu6_emit_blit_scissor(cmd, cs, true);
1216
1217 for (uint32_t i = 0; i < cmd->state.pass->attachment_count; ++i)
1218 tu_load_gmem_attachment(cmd, cs, i, false);
1219
1220 tu6_emit_blit_scissor(cmd, cs, false);
1221
1222 for (uint32_t i = 0; i < cmd->state.pass->attachment_count; ++i)
1223 tu_clear_gmem_attachment(cmd, cs, i, info);
1224
1225 tu_cond_exec_end(cs);
1226
1227 tu_cond_exec_start(cs, CP_COND_EXEC_0_RENDER_MODE_SYSMEM);
1228
1229 for (uint32_t i = 0; i < cmd->state.pass->attachment_count; ++i)
1230 tu_clear_sysmem_attachment(cmd, cs, i, info);
1231
1232 tu_cond_exec_end(cs);
1233 }
1234
1235 static void
1236 tu6_sysmem_render_begin(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
1237 {
1238 const struct tu_framebuffer *fb = cmd->state.framebuffer;
1239
1240 assert(fb->width > 0 && fb->height > 0);
1241 tu6_emit_window_scissor(cs, 0, 0, fb->width - 1, fb->height - 1);
1242 tu6_emit_window_offset(cs, 0, 0);
1243
1244 tu6_emit_bin_size(cs, 0, 0, 0xc00000); /* 0xc00000 = BYPASS? */
1245
1246 tu6_emit_event_write(cmd, cs, LRZ_FLUSH);
1247
1248 tu_cs_emit_pkt7(cs, CP_SET_MARKER, 1);
1249 tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(RM6_BYPASS));
1250
1251 tu_cs_emit_pkt7(cs, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
1252 tu_cs_emit(cs, 0x0);
1253
1254 tu_emit_cache_flush_ccu(cmd, cs, TU_CMD_CCU_SYSMEM);
1255
1256 /* enable stream-out, with sysmem there is only one pass: */
1257 tu_cs_emit_regs(cs, A6XX_VPC_SO_DISABLE(false));
1258
1259 tu_cs_emit_pkt7(cs, CP_SET_VISIBILITY_OVERRIDE, 1);
1260 tu_cs_emit(cs, 0x1);
1261
1262 tu_cs_emit_pkt7(cs, CP_SET_MODE, 1);
1263 tu_cs_emit(cs, 0x0);
1264
1265 tu_cs_sanity_check(cs);
1266 }
1267
1268 static void
1269 tu6_sysmem_render_end(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
1270 {
1271 /* Do any resolves of the last subpass. These are handled in the
1272 * tile_store_ib in the gmem path.
1273 */
1274 tu6_emit_sysmem_resolves(cmd, cs, cmd->state.subpass);
1275
1276 tu_cs_emit_call(cs, &cmd->draw_epilogue_cs);
1277
1278 tu_cs_emit_pkt7(cs, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
1279 tu_cs_emit(cs, 0x0);
1280
1281 tu6_emit_event_write(cmd, cs, LRZ_FLUSH);
1282
1283 tu_cs_sanity_check(cs);
1284 }
1285
1286 static void
1287 tu6_tile_render_begin(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
1288 {
1289 struct tu_physical_device *phys_dev = cmd->device->physical_device;
1290
1291 tu6_emit_event_write(cmd, cs, LRZ_FLUSH);
1292
1293 /* lrz clear? */
1294
1295 tu_cs_emit_pkt7(cs, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
1296 tu_cs_emit(cs, 0x0);
1297
1298 tu_emit_cache_flush_ccu(cmd, cs, TU_CMD_CCU_GMEM);
1299
1300 const struct tu_framebuffer *fb = cmd->state.framebuffer;
1301 if (use_hw_binning(cmd)) {
1302 /* enable stream-out during binning pass: */
1303 tu_cs_emit_regs(cs, A6XX_VPC_SO_DISABLE(false));
1304
1305 tu6_emit_bin_size(cs, fb->tile0.width, fb->tile0.height,
1306 A6XX_RB_BIN_CONTROL_BINNING_PASS | 0x6000000);
1307
1308 tu6_emit_render_cntl(cmd, cmd->state.subpass, cs, true);
1309
1310 tu6_emit_binning_pass(cmd, cs);
1311
1312 /* and disable stream-out for draw pass: */
1313 tu_cs_emit_regs(cs, A6XX_VPC_SO_DISABLE(true));
1314
1315 tu6_emit_bin_size(cs, fb->tile0.width, fb->tile0.height,
1316 A6XX_RB_BIN_CONTROL_USE_VIZ | 0x6000000);
1317
1318 tu_cs_emit_regs(cs,
1319 A6XX_VFD_MODE_CNTL(0));
1320
1321 tu_cs_emit_regs(cs, A6XX_PC_UNKNOWN_9805(.unknown = phys_dev->magic.PC_UNKNOWN_9805));
1322
1323 tu_cs_emit_regs(cs, A6XX_SP_UNKNOWN_A0F8(.unknown = phys_dev->magic.SP_UNKNOWN_A0F8));
1324
1325 tu_cs_emit_pkt7(cs, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
1326 tu_cs_emit(cs, 0x1);
1327 } else {
1328 /* no binning pass, so enable stream-out for draw pass:: */
1329 tu_cs_emit_regs(cs, A6XX_VPC_SO_DISABLE(false));
1330
1331 tu6_emit_bin_size(cs, fb->tile0.width, fb->tile0.height, 0x6000000);
1332 }
1333
1334 tu_cs_sanity_check(cs);
1335 }
1336
1337 static void
1338 tu6_render_tile(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
1339 {
1340 tu_cs_emit_call(cs, &cmd->draw_cs);
1341
1342 if (use_hw_binning(cmd)) {
1343 tu_cs_emit_pkt7(cs, CP_SET_MARKER, 1);
1344 tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(RM6_ENDVIS));
1345 }
1346
1347 tu_cs_emit_ib(cs, &cmd->state.tile_store_ib);
1348
1349 tu_cs_sanity_check(cs);
1350 }
1351
1352 static void
1353 tu6_tile_render_end(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
1354 {
1355 tu_cs_emit_call(cs, &cmd->draw_epilogue_cs);
1356
1357 tu_cs_emit_regs(cs,
1358 A6XX_GRAS_LRZ_CNTL(0));
1359
1360 tu6_emit_event_write(cmd, cs, LRZ_FLUSH);
1361
1362 tu6_emit_event_write(cmd, cs, PC_CCU_RESOLVE_TS);
1363
1364 tu_cs_sanity_check(cs);
1365 }
1366
1367 static void
1368 tu_cmd_render_tiles(struct tu_cmd_buffer *cmd)
1369 {
1370 const struct tu_framebuffer *fb = cmd->state.framebuffer;
1371
1372 tu6_tile_render_begin(cmd, &cmd->cs);
1373
1374 uint32_t pipe = 0;
1375 for (uint32_t py = 0; py < fb->pipe_count.height; py++) {
1376 for (uint32_t px = 0; px < fb->pipe_count.width; px++, pipe++) {
1377 uint32_t tx1 = px * fb->pipe0.width;
1378 uint32_t ty1 = py * fb->pipe0.height;
1379 uint32_t tx2 = MIN2(tx1 + fb->pipe0.width, fb->tile_count.width);
1380 uint32_t ty2 = MIN2(ty1 + fb->pipe0.height, fb->tile_count.height);
1381 uint32_t slot = 0;
1382 for (uint32_t ty = ty1; ty < ty2; ty++) {
1383 for (uint32_t tx = tx1; tx < tx2; tx++, slot++) {
1384 tu6_emit_tile_select(cmd, &cmd->cs, tx, ty, pipe, slot);
1385 tu6_render_tile(cmd, &cmd->cs);
1386 }
1387 }
1388 }
1389 }
1390
1391 tu6_tile_render_end(cmd, &cmd->cs);
1392 }
1393
1394 static void
1395 tu_cmd_render_sysmem(struct tu_cmd_buffer *cmd)
1396 {
1397 tu6_sysmem_render_begin(cmd, &cmd->cs);
1398
1399 tu_cs_emit_call(&cmd->cs, &cmd->draw_cs);
1400
1401 tu6_sysmem_render_end(cmd, &cmd->cs);
1402 }
1403
1404 static void
1405 tu_cmd_prepare_tile_store_ib(struct tu_cmd_buffer *cmd)
1406 {
1407 const uint32_t tile_store_space = 11 + (35 * 2) * cmd->state.pass->attachment_count;
1408 struct tu_cs sub_cs;
1409
1410 VkResult result =
1411 tu_cs_begin_sub_stream(&cmd->sub_cs, tile_store_space, &sub_cs);
1412 if (result != VK_SUCCESS) {
1413 cmd->record_result = result;
1414 return;
1415 }
1416
1417 /* emit to tile-store sub_cs */
1418 tu6_emit_tile_store(cmd, &sub_cs);
1419
1420 cmd->state.tile_store_ib = tu_cs_end_sub_stream(&cmd->sub_cs, &sub_cs);
1421 }
1422
1423 static VkResult
1424 tu_create_cmd_buffer(struct tu_device *device,
1425 struct tu_cmd_pool *pool,
1426 VkCommandBufferLevel level,
1427 VkCommandBuffer *pCommandBuffer)
1428 {
1429 struct tu_cmd_buffer *cmd_buffer;
1430
1431 cmd_buffer = vk_object_zalloc(&device->vk, NULL, sizeof(*cmd_buffer),
1432 VK_OBJECT_TYPE_COMMAND_BUFFER);
1433 if (cmd_buffer == NULL)
1434 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1435
1436 cmd_buffer->device = device;
1437 cmd_buffer->pool = pool;
1438 cmd_buffer->level = level;
1439
1440 if (pool) {
1441 list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
1442 cmd_buffer->queue_family_index = pool->queue_family_index;
1443
1444 } else {
1445 /* Init the pool_link so we can safely call list_del when we destroy
1446 * the command buffer
1447 */
1448 list_inithead(&cmd_buffer->pool_link);
1449 cmd_buffer->queue_family_index = TU_QUEUE_GENERAL;
1450 }
1451
1452 tu_bo_list_init(&cmd_buffer->bo_list);
1453 tu_cs_init(&cmd_buffer->cs, device, TU_CS_MODE_GROW, 4096);
1454 tu_cs_init(&cmd_buffer->draw_cs, device, TU_CS_MODE_GROW, 4096);
1455 tu_cs_init(&cmd_buffer->draw_epilogue_cs, device, TU_CS_MODE_GROW, 4096);
1456 tu_cs_init(&cmd_buffer->sub_cs, device, TU_CS_MODE_SUB_STREAM, 2048);
1457
1458 *pCommandBuffer = tu_cmd_buffer_to_handle(cmd_buffer);
1459
1460 list_inithead(&cmd_buffer->upload.list);
1461
1462 return VK_SUCCESS;
1463 }
1464
1465 static void
1466 tu_cmd_buffer_destroy(struct tu_cmd_buffer *cmd_buffer)
1467 {
1468 list_del(&cmd_buffer->pool_link);
1469
1470 tu_cs_finish(&cmd_buffer->cs);
1471 tu_cs_finish(&cmd_buffer->draw_cs);
1472 tu_cs_finish(&cmd_buffer->draw_epilogue_cs);
1473 tu_cs_finish(&cmd_buffer->sub_cs);
1474
1475 tu_bo_list_destroy(&cmd_buffer->bo_list);
1476 vk_object_free(&cmd_buffer->device->vk, &cmd_buffer->pool->alloc, cmd_buffer);
1477 }
1478
1479 static VkResult
1480 tu_reset_cmd_buffer(struct tu_cmd_buffer *cmd_buffer)
1481 {
1482 cmd_buffer->record_result = VK_SUCCESS;
1483
1484 tu_bo_list_reset(&cmd_buffer->bo_list);
1485 tu_cs_reset(&cmd_buffer->cs);
1486 tu_cs_reset(&cmd_buffer->draw_cs);
1487 tu_cs_reset(&cmd_buffer->draw_epilogue_cs);
1488 tu_cs_reset(&cmd_buffer->sub_cs);
1489
1490 for (unsigned i = 0; i < MAX_BIND_POINTS; i++)
1491 memset(&cmd_buffer->descriptors[i].sets, 0, sizeof(cmd_buffer->descriptors[i].sets));
1492
1493 cmd_buffer->status = TU_CMD_BUFFER_STATUS_INITIAL;
1494
1495 return cmd_buffer->record_result;
1496 }
1497
1498 VkResult
1499 tu_AllocateCommandBuffers(VkDevice _device,
1500 const VkCommandBufferAllocateInfo *pAllocateInfo,
1501 VkCommandBuffer *pCommandBuffers)
1502 {
1503 TU_FROM_HANDLE(tu_device, device, _device);
1504 TU_FROM_HANDLE(tu_cmd_pool, pool, pAllocateInfo->commandPool);
1505
1506 VkResult result = VK_SUCCESS;
1507 uint32_t i;
1508
1509 for (i = 0; i < pAllocateInfo->commandBufferCount; i++) {
1510
1511 if (!list_is_empty(&pool->free_cmd_buffers)) {
1512 struct tu_cmd_buffer *cmd_buffer = list_first_entry(
1513 &pool->free_cmd_buffers, struct tu_cmd_buffer, pool_link);
1514
1515 list_del(&cmd_buffer->pool_link);
1516 list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
1517
1518 result = tu_reset_cmd_buffer(cmd_buffer);
1519 cmd_buffer->level = pAllocateInfo->level;
1520
1521 pCommandBuffers[i] = tu_cmd_buffer_to_handle(cmd_buffer);
1522 } else {
1523 result = tu_create_cmd_buffer(device, pool, pAllocateInfo->level,
1524 &pCommandBuffers[i]);
1525 }
1526 if (result != VK_SUCCESS)
1527 break;
1528 }
1529
1530 if (result != VK_SUCCESS) {
1531 tu_FreeCommandBuffers(_device, pAllocateInfo->commandPool, i,
1532 pCommandBuffers);
1533
1534 /* From the Vulkan 1.0.66 spec:
1535 *
1536 * "vkAllocateCommandBuffers can be used to create multiple
1537 * command buffers. If the creation of any of those command
1538 * buffers fails, the implementation must destroy all
1539 * successfully created command buffer objects from this
1540 * command, set all entries of the pCommandBuffers array to
1541 * NULL and return the error."
1542 */
1543 memset(pCommandBuffers, 0,
1544 sizeof(*pCommandBuffers) * pAllocateInfo->commandBufferCount);
1545 }
1546
1547 return result;
1548 }
1549
1550 void
1551 tu_FreeCommandBuffers(VkDevice device,
1552 VkCommandPool commandPool,
1553 uint32_t commandBufferCount,
1554 const VkCommandBuffer *pCommandBuffers)
1555 {
1556 for (uint32_t i = 0; i < commandBufferCount; i++) {
1557 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, pCommandBuffers[i]);
1558
1559 if (cmd_buffer) {
1560 if (cmd_buffer->pool) {
1561 list_del(&cmd_buffer->pool_link);
1562 list_addtail(&cmd_buffer->pool_link,
1563 &cmd_buffer->pool->free_cmd_buffers);
1564 } else
1565 tu_cmd_buffer_destroy(cmd_buffer);
1566 }
1567 }
1568 }
1569
1570 VkResult
1571 tu_ResetCommandBuffer(VkCommandBuffer commandBuffer,
1572 VkCommandBufferResetFlags flags)
1573 {
1574 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
1575 return tu_reset_cmd_buffer(cmd_buffer);
1576 }
1577
1578 /* Initialize the cache, assuming all necessary flushes have happened but *not*
1579 * invalidations.
1580 */
1581 static void
1582 tu_cache_init(struct tu_cache_state *cache)
1583 {
1584 cache->flush_bits = 0;
1585 cache->pending_flush_bits = TU_CMD_FLAG_ALL_INVALIDATE;
1586 }
1587
1588 VkResult
1589 tu_BeginCommandBuffer(VkCommandBuffer commandBuffer,
1590 const VkCommandBufferBeginInfo *pBeginInfo)
1591 {
1592 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
1593 VkResult result = VK_SUCCESS;
1594
1595 if (cmd_buffer->status != TU_CMD_BUFFER_STATUS_INITIAL) {
1596 /* If the command buffer has already been resetted with
1597 * vkResetCommandBuffer, no need to do it again.
1598 */
1599 result = tu_reset_cmd_buffer(cmd_buffer);
1600 if (result != VK_SUCCESS)
1601 return result;
1602 }
1603
1604 memset(&cmd_buffer->state, 0, sizeof(cmd_buffer->state));
1605 cmd_buffer->state.index_size = 0xff; /* dirty restart index */
1606
1607 tu_cache_init(&cmd_buffer->state.cache);
1608 tu_cache_init(&cmd_buffer->state.renderpass_cache);
1609 cmd_buffer->usage_flags = pBeginInfo->flags;
1610
1611 tu_cs_begin(&cmd_buffer->cs);
1612 tu_cs_begin(&cmd_buffer->draw_cs);
1613 tu_cs_begin(&cmd_buffer->draw_epilogue_cs);
1614
1615 /* setup initial configuration into command buffer */
1616 if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
1617 switch (cmd_buffer->queue_family_index) {
1618 case TU_QUEUE_GENERAL:
1619 tu6_init_hw(cmd_buffer, &cmd_buffer->cs);
1620 break;
1621 default:
1622 break;
1623 }
1624 } else if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
1625 assert(pBeginInfo->pInheritanceInfo);
1626
1627 vk_foreach_struct(ext, pBeginInfo->pInheritanceInfo) {
1628 switch (ext->sType) {
1629 case VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_CONDITIONAL_RENDERING_INFO_EXT: {
1630 const VkCommandBufferInheritanceConditionalRenderingInfoEXT *cond_rend = (void *) ext;
1631 cmd_buffer->state.predication_active = cond_rend->conditionalRenderingEnable;
1632 break;
1633 default:
1634 break;
1635 }
1636 }
1637 }
1638
1639 if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
1640 cmd_buffer->state.pass = tu_render_pass_from_handle(pBeginInfo->pInheritanceInfo->renderPass);
1641 cmd_buffer->state.subpass =
1642 &cmd_buffer->state.pass->subpasses[pBeginInfo->pInheritanceInfo->subpass];
1643 } else {
1644 /* When executing in the middle of another command buffer, the CCU
1645 * state is unknown.
1646 */
1647 cmd_buffer->state.ccu_state = TU_CMD_CCU_UNKNOWN;
1648 }
1649 }
1650
1651 cmd_buffer->status = TU_CMD_BUFFER_STATUS_RECORDING;
1652
1653 return VK_SUCCESS;
1654 }
1655
1656 /* Sets vertex buffers to HW binding points. We emit VBs in SDS (so that bin
1657 * rendering can skip over unused state), so we need to collect all the
1658 * bindings together into a single state emit at draw time.
1659 */
1660 void
1661 tu_CmdBindVertexBuffers(VkCommandBuffer commandBuffer,
1662 uint32_t firstBinding,
1663 uint32_t bindingCount,
1664 const VkBuffer *pBuffers,
1665 const VkDeviceSize *pOffsets)
1666 {
1667 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
1668
1669 assert(firstBinding + bindingCount <= MAX_VBS);
1670
1671 for (uint32_t i = 0; i < bindingCount; i++) {
1672 struct tu_buffer *buf = tu_buffer_from_handle(pBuffers[i]);
1673
1674 cmd->state.vb.buffers[firstBinding + i] = buf;
1675 cmd->state.vb.offsets[firstBinding + i] = pOffsets[i];
1676
1677 tu_bo_list_add(&cmd->bo_list, buf->bo, MSM_SUBMIT_BO_READ);
1678 }
1679
1680 cmd->state.dirty |= TU_CMD_DIRTY_VERTEX_BUFFERS;
1681 }
1682
1683 void
1684 tu_CmdBindIndexBuffer(VkCommandBuffer commandBuffer,
1685 VkBuffer buffer,
1686 VkDeviceSize offset,
1687 VkIndexType indexType)
1688 {
1689 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
1690 TU_FROM_HANDLE(tu_buffer, buf, buffer);
1691
1692
1693
1694 uint32_t index_size, index_shift, restart_index;
1695
1696 switch (indexType) {
1697 case VK_INDEX_TYPE_UINT16:
1698 index_size = INDEX4_SIZE_16_BIT;
1699 index_shift = 1;
1700 restart_index = 0xffff;
1701 break;
1702 case VK_INDEX_TYPE_UINT32:
1703 index_size = INDEX4_SIZE_32_BIT;
1704 index_shift = 2;
1705 restart_index = 0xffffffff;
1706 break;
1707 case VK_INDEX_TYPE_UINT8_EXT:
1708 index_size = INDEX4_SIZE_8_BIT;
1709 index_shift = 0;
1710 restart_index = 0xff;
1711 break;
1712 default:
1713 unreachable("invalid VkIndexType");
1714 }
1715
1716 /* initialize/update the restart index */
1717 if (cmd->state.index_size != index_size)
1718 tu_cs_emit_regs(&cmd->draw_cs, A6XX_PC_RESTART_INDEX(restart_index));
1719
1720 assert(buf->size >= offset);
1721
1722 cmd->state.index_va = buf->bo->iova + buf->bo_offset + offset;
1723 cmd->state.max_index_count = (buf->size - offset) >> index_shift;
1724 cmd->state.index_size = index_size;
1725
1726 tu_bo_list_add(&cmd->bo_list, buf->bo, MSM_SUBMIT_BO_READ);
1727 }
1728
1729 void
1730 tu_CmdBindDescriptorSets(VkCommandBuffer commandBuffer,
1731 VkPipelineBindPoint pipelineBindPoint,
1732 VkPipelineLayout _layout,
1733 uint32_t firstSet,
1734 uint32_t descriptorSetCount,
1735 const VkDescriptorSet *pDescriptorSets,
1736 uint32_t dynamicOffsetCount,
1737 const uint32_t *pDynamicOffsets)
1738 {
1739 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
1740 TU_FROM_HANDLE(tu_pipeline_layout, layout, _layout);
1741 unsigned dyn_idx = 0;
1742
1743 struct tu_descriptor_state *descriptors_state =
1744 tu_get_descriptors_state(cmd, pipelineBindPoint);
1745
1746 for (unsigned i = 0; i < descriptorSetCount; ++i) {
1747 unsigned idx = i + firstSet;
1748 TU_FROM_HANDLE(tu_descriptor_set, set, pDescriptorSets[i]);
1749
1750 descriptors_state->sets[idx] = set;
1751
1752 for(unsigned j = 0; j < set->layout->dynamic_offset_count; ++j, ++dyn_idx) {
1753 /* update the contents of the dynamic descriptor set */
1754 unsigned src_idx = j;
1755 unsigned dst_idx = j + layout->set[idx].dynamic_offset_start;
1756 assert(dyn_idx < dynamicOffsetCount);
1757
1758 uint32_t *dst =
1759 &descriptors_state->dynamic_descriptors[dst_idx * A6XX_TEX_CONST_DWORDS];
1760 uint32_t *src =
1761 &set->dynamic_descriptors[src_idx * A6XX_TEX_CONST_DWORDS];
1762 uint32_t offset = pDynamicOffsets[dyn_idx];
1763
1764 /* Patch the storage/uniform descriptors right away. */
1765 if (layout->set[idx].layout->dynamic_ubo & (1 << j)) {
1766 /* Note: we can assume here that the addition won't roll over and
1767 * change the SIZE field.
1768 */
1769 uint64_t va = src[0] | ((uint64_t)src[1] << 32);
1770 va += offset;
1771 dst[0] = va;
1772 dst[1] = va >> 32;
1773 } else {
1774 memcpy(dst, src, A6XX_TEX_CONST_DWORDS * 4);
1775 /* Note: A6XX_IBO_5_DEPTH is always 0 */
1776 uint64_t va = dst[4] | ((uint64_t)dst[5] << 32);
1777 va += offset;
1778 dst[4] = va;
1779 dst[5] = va >> 32;
1780 }
1781 }
1782
1783 for (unsigned j = 0; j < set->layout->buffer_count; ++j) {
1784 if (set->buffers[j]) {
1785 tu_bo_list_add(&cmd->bo_list, set->buffers[j],
1786 MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE);
1787 }
1788 }
1789
1790 if (set->size > 0) {
1791 tu_bo_list_add(&cmd->bo_list, &set->pool->bo,
1792 MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
1793 }
1794 }
1795 assert(dyn_idx == dynamicOffsetCount);
1796
1797 uint32_t sp_bindless_base_reg, hlsq_bindless_base_reg, hlsq_invalidate_value;
1798 uint64_t addr[MAX_SETS + 1] = {};
1799 struct tu_cs *cs, state_cs;
1800
1801 for (uint32_t i = 0; i < MAX_SETS; i++) {
1802 struct tu_descriptor_set *set = descriptors_state->sets[i];
1803 if (set)
1804 addr[i] = set->va | 3;
1805 }
1806
1807 if (layout->dynamic_offset_count) {
1808 /* allocate and fill out dynamic descriptor set */
1809 struct tu_cs_memory dynamic_desc_set;
1810 VkResult result = tu_cs_alloc(&cmd->sub_cs, layout->dynamic_offset_count,
1811 A6XX_TEX_CONST_DWORDS, &dynamic_desc_set);
1812 assert(result == VK_SUCCESS);
1813
1814 memcpy(dynamic_desc_set.map, descriptors_state->dynamic_descriptors,
1815 layout->dynamic_offset_count * A6XX_TEX_CONST_DWORDS * 4);
1816 addr[MAX_SETS] = dynamic_desc_set.iova | 3;
1817 }
1818
1819 if (pipelineBindPoint == VK_PIPELINE_BIND_POINT_GRAPHICS) {
1820 sp_bindless_base_reg = REG_A6XX_SP_BINDLESS_BASE(0);
1821 hlsq_bindless_base_reg = REG_A6XX_HLSQ_BINDLESS_BASE(0);
1822 hlsq_invalidate_value = A6XX_HLSQ_INVALIDATE_CMD_GFX_BINDLESS(0x1f);
1823
1824 cmd->state.desc_sets = tu_cs_draw_state(&cmd->sub_cs, &state_cs, 24);
1825 cmd->state.dirty |= TU_CMD_DIRTY_DESC_SETS_LOAD | TU_CMD_DIRTY_SHADER_CONSTS;
1826 cs = &state_cs;
1827 } else {
1828 assert(pipelineBindPoint == VK_PIPELINE_BIND_POINT_COMPUTE);
1829
1830 sp_bindless_base_reg = REG_A6XX_SP_CS_BINDLESS_BASE(0);
1831 hlsq_bindless_base_reg = REG_A6XX_HLSQ_CS_BINDLESS_BASE(0);
1832 hlsq_invalidate_value = A6XX_HLSQ_INVALIDATE_CMD_CS_BINDLESS(0x1f);
1833
1834 cmd->state.dirty |= TU_CMD_DIRTY_COMPUTE_DESC_SETS_LOAD;
1835 cs = &cmd->cs;
1836 }
1837
1838 tu_cs_emit_pkt4(cs, sp_bindless_base_reg, 10);
1839 tu_cs_emit_array(cs, (const uint32_t*) addr, 10);
1840 tu_cs_emit_pkt4(cs, hlsq_bindless_base_reg, 10);
1841 tu_cs_emit_array(cs, (const uint32_t*) addr, 10);
1842 tu_cs_emit_regs(cs, A6XX_HLSQ_INVALIDATE_CMD(.dword = hlsq_invalidate_value));
1843
1844 if (pipelineBindPoint == VK_PIPELINE_BIND_POINT_GRAPHICS) {
1845 assert(cs->cur == cs->end); /* validate draw state size */
1846 tu_cs_emit_pkt7(&cmd->draw_cs, CP_SET_DRAW_STATE, 3);
1847 tu_cs_emit_draw_state(&cmd->draw_cs, TU_DRAW_STATE_DESC_SETS, cmd->state.desc_sets);
1848 }
1849 }
1850
1851 void tu_CmdBindTransformFeedbackBuffersEXT(VkCommandBuffer commandBuffer,
1852 uint32_t firstBinding,
1853 uint32_t bindingCount,
1854 const VkBuffer *pBuffers,
1855 const VkDeviceSize *pOffsets,
1856 const VkDeviceSize *pSizes)
1857 {
1858 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
1859 struct tu_cs *cs = &cmd->draw_cs;
1860
1861 /* using COND_REG_EXEC for xfb commands matches the blob behavior
1862 * presumably there isn't any benefit using a draw state when the
1863 * condition is (SYSMEM | BINNING)
1864 */
1865 tu_cond_exec_start(cs, CP_COND_REG_EXEC_0_MODE(RENDER_MODE) |
1866 CP_COND_REG_EXEC_0_SYSMEM |
1867 CP_COND_REG_EXEC_0_BINNING);
1868
1869 for (uint32_t i = 0; i < bindingCount; i++) {
1870 TU_FROM_HANDLE(tu_buffer, buf, pBuffers[i]);
1871 uint64_t iova = buf->bo->iova + pOffsets[i];
1872 uint32_t size = buf->bo->size - pOffsets[i];
1873 uint32_t idx = i + firstBinding;
1874
1875 if (pSizes && pSizes[i] != VK_WHOLE_SIZE)
1876 size = pSizes[i];
1877
1878 /* BUFFER_BASE is 32-byte aligned, add remaining offset to BUFFER_OFFSET */
1879 uint32_t offset = iova & 0x1f;
1880 iova &= ~(uint64_t) 0x1f;
1881
1882 tu_cs_emit_pkt4(cs, REG_A6XX_VPC_SO_BUFFER_BASE(idx), 3);
1883 tu_cs_emit_qw(cs, iova);
1884 tu_cs_emit(cs, size + offset);
1885
1886 cmd->state.streamout_offset[idx] = offset;
1887
1888 tu_bo_list_add(&cmd->bo_list, buf->bo, MSM_SUBMIT_BO_WRITE);
1889 }
1890
1891 tu_cond_exec_end(cs);
1892 }
1893
1894 void
1895 tu_CmdBeginTransformFeedbackEXT(VkCommandBuffer commandBuffer,
1896 uint32_t firstCounterBuffer,
1897 uint32_t counterBufferCount,
1898 const VkBuffer *pCounterBuffers,
1899 const VkDeviceSize *pCounterBufferOffsets)
1900 {
1901 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
1902 struct tu_cs *cs = &cmd->draw_cs;
1903
1904 tu_cond_exec_start(cs, CP_COND_REG_EXEC_0_MODE(RENDER_MODE) |
1905 CP_COND_REG_EXEC_0_SYSMEM |
1906 CP_COND_REG_EXEC_0_BINNING);
1907
1908 /* TODO: only update offset for active buffers */
1909 for (uint32_t i = 0; i < IR3_MAX_SO_BUFFERS; i++)
1910 tu_cs_emit_regs(cs, A6XX_VPC_SO_BUFFER_OFFSET(i, cmd->state.streamout_offset[i]));
1911
1912 for (uint32_t i = 0; i < counterBufferCount; i++) {
1913 uint32_t idx = firstCounterBuffer + i;
1914 uint32_t offset = cmd->state.streamout_offset[idx];
1915
1916 if (!pCounterBuffers[i])
1917 continue;
1918
1919 TU_FROM_HANDLE(tu_buffer, buf, pCounterBuffers[i]);
1920
1921 tu_bo_list_add(&cmd->bo_list, buf->bo, MSM_SUBMIT_BO_READ);
1922
1923 tu_cs_emit_pkt7(cs, CP_MEM_TO_REG, 3);
1924 tu_cs_emit(cs, CP_MEM_TO_REG_0_REG(REG_A6XX_VPC_SO_BUFFER_OFFSET(idx)) |
1925 CP_MEM_TO_REG_0_UNK31 |
1926 CP_MEM_TO_REG_0_CNT(1));
1927 tu_cs_emit_qw(cs, buf->bo->iova + pCounterBufferOffsets[i]);
1928
1929 if (offset) {
1930 tu_cs_emit_pkt7(cs, CP_REG_RMW, 3);
1931 tu_cs_emit(cs, CP_REG_RMW_0_DST_REG(REG_A6XX_VPC_SO_BUFFER_OFFSET(idx)) |
1932 CP_REG_RMW_0_SRC1_ADD);
1933 tu_cs_emit_qw(cs, 0xffffffff);
1934 tu_cs_emit_qw(cs, offset);
1935 }
1936 }
1937
1938 tu_cond_exec_end(cs);
1939 }
1940
1941 void tu_CmdEndTransformFeedbackEXT(VkCommandBuffer commandBuffer,
1942 uint32_t firstCounterBuffer,
1943 uint32_t counterBufferCount,
1944 const VkBuffer *pCounterBuffers,
1945 const VkDeviceSize *pCounterBufferOffsets)
1946 {
1947 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
1948 struct tu_cs *cs = &cmd->draw_cs;
1949
1950 tu_cond_exec_start(cs, CP_COND_REG_EXEC_0_MODE(RENDER_MODE) |
1951 CP_COND_REG_EXEC_0_SYSMEM |
1952 CP_COND_REG_EXEC_0_BINNING);
1953
1954 /* TODO: only flush buffers that need to be flushed */
1955 for (uint32_t i = 0; i < IR3_MAX_SO_BUFFERS; i++) {
1956 /* note: FLUSH_BASE is always the same, so it could go in init_hw()? */
1957 tu_cs_emit_pkt4(cs, REG_A6XX_VPC_SO_FLUSH_BASE(i), 2);
1958 tu_cs_emit_qw(cs, global_iova(cmd, flush_base[i]));
1959 tu6_emit_event_write(cmd, cs, FLUSH_SO_0 + i);
1960 }
1961
1962 for (uint32_t i = 0; i < counterBufferCount; i++) {
1963 uint32_t idx = firstCounterBuffer + i;
1964 uint32_t offset = cmd->state.streamout_offset[idx];
1965
1966 if (!pCounterBuffers[i])
1967 continue;
1968
1969 TU_FROM_HANDLE(tu_buffer, buf, pCounterBuffers[i]);
1970
1971 tu_bo_list_add(&cmd->bo_list, buf->bo, MSM_SUBMIT_BO_WRITE);
1972
1973 /* VPC_SO_FLUSH_BASE has dwords counter, but counter should be in bytes */
1974 tu_cs_emit_pkt7(cs, CP_MEM_TO_REG, 3);
1975 tu_cs_emit(cs, CP_MEM_TO_REG_0_REG(REG_A6XX_CP_SCRATCH_REG(0)) |
1976 CP_MEM_TO_REG_0_SHIFT_BY_2 |
1977 0x40000 | /* ??? */
1978 CP_MEM_TO_REG_0_UNK31 |
1979 CP_MEM_TO_REG_0_CNT(1));
1980 tu_cs_emit_qw(cs, global_iova(cmd, flush_base[idx]));
1981
1982 if (offset) {
1983 tu_cs_emit_pkt7(cs, CP_REG_RMW, 3);
1984 tu_cs_emit(cs, CP_REG_RMW_0_DST_REG(REG_A6XX_CP_SCRATCH_REG(0)) |
1985 CP_REG_RMW_0_SRC1_ADD);
1986 tu_cs_emit_qw(cs, 0xffffffff);
1987 tu_cs_emit_qw(cs, -offset);
1988 }
1989
1990 tu_cs_emit_pkt7(cs, CP_REG_TO_MEM, 3);
1991 tu_cs_emit(cs, CP_REG_TO_MEM_0_REG(REG_A6XX_CP_SCRATCH_REG(0)) |
1992 CP_REG_TO_MEM_0_CNT(1));
1993 tu_cs_emit_qw(cs, buf->bo->iova + pCounterBufferOffsets[i]);
1994 }
1995
1996 tu_cond_exec_end(cs);
1997
1998 cmd->state.xfb_used = true;
1999 }
2000
2001 void
2002 tu_CmdPushConstants(VkCommandBuffer commandBuffer,
2003 VkPipelineLayout layout,
2004 VkShaderStageFlags stageFlags,
2005 uint32_t offset,
2006 uint32_t size,
2007 const void *pValues)
2008 {
2009 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2010 memcpy((void*) cmd->push_constants + offset, pValues, size);
2011 cmd->state.dirty |= TU_CMD_DIRTY_SHADER_CONSTS;
2012 }
2013
2014 /* Flush everything which has been made available but we haven't actually
2015 * flushed yet.
2016 */
2017 static void
2018 tu_flush_all_pending(struct tu_cache_state *cache)
2019 {
2020 cache->flush_bits |= cache->pending_flush_bits & TU_CMD_FLAG_ALL_FLUSH;
2021 cache->pending_flush_bits &= ~TU_CMD_FLAG_ALL_FLUSH;
2022 }
2023
2024 VkResult
2025 tu_EndCommandBuffer(VkCommandBuffer commandBuffer)
2026 {
2027 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
2028
2029 /* We currently flush CCU at the end of the command buffer, like
2030 * what the blob does. There's implicit synchronization around every
2031 * vkQueueSubmit, but the kernel only flushes the UCHE, and we don't
2032 * know yet if this command buffer will be the last in the submit so we
2033 * have to defensively flush everything else.
2034 *
2035 * TODO: We could definitely do better than this, since these flushes
2036 * aren't required by Vulkan, but we'd need kernel support to do that.
2037 * Ideally, we'd like the kernel to flush everything afterwards, so that we
2038 * wouldn't have to do any flushes here, and when submitting multiple
2039 * command buffers there wouldn't be any unnecessary flushes in between.
2040 */
2041 if (cmd_buffer->state.pass) {
2042 tu_flush_all_pending(&cmd_buffer->state.renderpass_cache);
2043 tu_emit_cache_flush_renderpass(cmd_buffer, &cmd_buffer->draw_cs);
2044 } else {
2045 tu_flush_all_pending(&cmd_buffer->state.cache);
2046 cmd_buffer->state.cache.flush_bits |=
2047 TU_CMD_FLAG_CCU_FLUSH_COLOR |
2048 TU_CMD_FLAG_CCU_FLUSH_DEPTH;
2049 tu_emit_cache_flush(cmd_buffer, &cmd_buffer->cs);
2050 }
2051
2052 tu_bo_list_add(&cmd_buffer->bo_list, &cmd_buffer->device->global_bo,
2053 MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE);
2054
2055 for (uint32_t i = 0; i < cmd_buffer->draw_cs.bo_count; i++) {
2056 tu_bo_list_add(&cmd_buffer->bo_list, cmd_buffer->draw_cs.bos[i],
2057 MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
2058 }
2059
2060 for (uint32_t i = 0; i < cmd_buffer->draw_epilogue_cs.bo_count; i++) {
2061 tu_bo_list_add(&cmd_buffer->bo_list, cmd_buffer->draw_epilogue_cs.bos[i],
2062 MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
2063 }
2064
2065 for (uint32_t i = 0; i < cmd_buffer->sub_cs.bo_count; i++) {
2066 tu_bo_list_add(&cmd_buffer->bo_list, cmd_buffer->sub_cs.bos[i],
2067 MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
2068 }
2069
2070 tu_cs_end(&cmd_buffer->cs);
2071 tu_cs_end(&cmd_buffer->draw_cs);
2072 tu_cs_end(&cmd_buffer->draw_epilogue_cs);
2073
2074 cmd_buffer->status = TU_CMD_BUFFER_STATUS_EXECUTABLE;
2075
2076 return cmd_buffer->record_result;
2077 }
2078
2079 static struct tu_cs
2080 tu_cmd_dynamic_state(struct tu_cmd_buffer *cmd, uint32_t id, uint32_t size)
2081 {
2082 struct tu_cs cs;
2083
2084 assert(id < ARRAY_SIZE(cmd->state.dynamic_state));
2085 cmd->state.dynamic_state[id] = tu_cs_draw_state(&cmd->sub_cs, &cs, size);
2086
2087 tu_cs_emit_pkt7(&cmd->draw_cs, CP_SET_DRAW_STATE, 3);
2088 tu_cs_emit_draw_state(&cmd->draw_cs, TU_DRAW_STATE_DYNAMIC + id, cmd->state.dynamic_state[id]);
2089
2090 return cs;
2091 }
2092
2093 void
2094 tu_CmdBindPipeline(VkCommandBuffer commandBuffer,
2095 VkPipelineBindPoint pipelineBindPoint,
2096 VkPipeline _pipeline)
2097 {
2098 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2099 TU_FROM_HANDLE(tu_pipeline, pipeline, _pipeline);
2100
2101 for (uint32_t i = 0; i < pipeline->cs.bo_count; i++) {
2102 tu_bo_list_add(&cmd->bo_list, pipeline->cs.bos[i],
2103 MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
2104 }
2105
2106 if (pipelineBindPoint == VK_PIPELINE_BIND_POINT_COMPUTE) {
2107 cmd->state.compute_pipeline = pipeline;
2108 tu_cs_emit_state_ib(&cmd->cs, pipeline->program.state);
2109 return;
2110 }
2111
2112 assert(pipelineBindPoint == VK_PIPELINE_BIND_POINT_GRAPHICS);
2113
2114 cmd->state.pipeline = pipeline;
2115 cmd->state.dirty |= TU_CMD_DIRTY_DESC_SETS_LOAD | TU_CMD_DIRTY_SHADER_CONSTS;
2116
2117 struct tu_cs *cs = &cmd->draw_cs;
2118 uint32_t mask = ~pipeline->dynamic_state_mask & BITFIELD_MASK(TU_DYNAMIC_STATE_COUNT);
2119 uint32_t i;
2120
2121 tu_cs_emit_pkt7(cs, CP_SET_DRAW_STATE, 3 * (7 + util_bitcount(mask)));
2122 tu_cs_emit_draw_state(cs, TU_DRAW_STATE_PROGRAM, pipeline->program.state);
2123 tu_cs_emit_draw_state(cs, TU_DRAW_STATE_PROGRAM_BINNING, pipeline->program.binning_state);
2124 tu_cs_emit_draw_state(cs, TU_DRAW_STATE_VI, pipeline->vi.state);
2125 tu_cs_emit_draw_state(cs, TU_DRAW_STATE_VI_BINNING, pipeline->vi.binning_state);
2126 tu_cs_emit_draw_state(cs, TU_DRAW_STATE_RAST, pipeline->rast_state);
2127 tu_cs_emit_draw_state(cs, TU_DRAW_STATE_DS, pipeline->ds_state);
2128 tu_cs_emit_draw_state(cs, TU_DRAW_STATE_BLEND, pipeline->blend_state);
2129 for_each_bit(i, mask)
2130 tu_cs_emit_draw_state(cs, TU_DRAW_STATE_DYNAMIC + i, pipeline->dynamic_state[i]);
2131
2132 /* If the new pipeline requires more VBs than we had previously set up, we
2133 * need to re-emit them in SDS. If it requires the same set or fewer, we
2134 * can just re-use the old SDS.
2135 */
2136 if (pipeline->vi.bindings_used & ~cmd->vertex_bindings_set)
2137 cmd->state.dirty |= TU_CMD_DIRTY_VERTEX_BUFFERS;
2138
2139 /* dynamic linewidth state depends pipeline state's gras_su_cntl
2140 * so the dynamic state ib must be updated when pipeline changes
2141 */
2142 if (pipeline->dynamic_state_mask & BIT(VK_DYNAMIC_STATE_LINE_WIDTH)) {
2143 struct tu_cs cs = tu_cmd_dynamic_state(cmd, VK_DYNAMIC_STATE_LINE_WIDTH, 2);
2144
2145 cmd->state.dynamic_gras_su_cntl &= A6XX_GRAS_SU_CNTL_LINEHALFWIDTH__MASK;
2146 cmd->state.dynamic_gras_su_cntl |= pipeline->gras_su_cntl;
2147
2148 tu_cs_emit_regs(&cs, A6XX_GRAS_SU_CNTL(.dword = cmd->state.dynamic_gras_su_cntl));
2149 }
2150 }
2151
2152 void
2153 tu_CmdSetViewport(VkCommandBuffer commandBuffer,
2154 uint32_t firstViewport,
2155 uint32_t viewportCount,
2156 const VkViewport *pViewports)
2157 {
2158 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2159 struct tu_cs cs = tu_cmd_dynamic_state(cmd, VK_DYNAMIC_STATE_VIEWPORT, 18);
2160
2161 assert(firstViewport == 0 && viewportCount == 1);
2162
2163 tu6_emit_viewport(&cs, pViewports);
2164 }
2165
2166 void
2167 tu_CmdSetScissor(VkCommandBuffer commandBuffer,
2168 uint32_t firstScissor,
2169 uint32_t scissorCount,
2170 const VkRect2D *pScissors)
2171 {
2172 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2173 struct tu_cs cs = tu_cmd_dynamic_state(cmd, VK_DYNAMIC_STATE_SCISSOR, 3);
2174
2175 assert(firstScissor == 0 && scissorCount == 1);
2176
2177 tu6_emit_scissor(&cs, pScissors);
2178 }
2179
2180 void
2181 tu_CmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth)
2182 {
2183 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2184 struct tu_cs cs = tu_cmd_dynamic_state(cmd, VK_DYNAMIC_STATE_LINE_WIDTH, 2);
2185
2186 cmd->state.dynamic_gras_su_cntl &= ~A6XX_GRAS_SU_CNTL_LINEHALFWIDTH__MASK;
2187 cmd->state.dynamic_gras_su_cntl |= A6XX_GRAS_SU_CNTL_LINEHALFWIDTH(lineWidth / 2.0f);
2188
2189 tu_cs_emit_regs(&cs, A6XX_GRAS_SU_CNTL(.dword = cmd->state.dynamic_gras_su_cntl));
2190 }
2191
2192 void
2193 tu_CmdSetDepthBias(VkCommandBuffer commandBuffer,
2194 float depthBiasConstantFactor,
2195 float depthBiasClamp,
2196 float depthBiasSlopeFactor)
2197 {
2198 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2199 struct tu_cs cs = tu_cmd_dynamic_state(cmd, VK_DYNAMIC_STATE_DEPTH_BIAS, 4);
2200
2201 tu6_emit_depth_bias(&cs, depthBiasConstantFactor, depthBiasClamp, depthBiasSlopeFactor);
2202 }
2203
2204 void
2205 tu_CmdSetBlendConstants(VkCommandBuffer commandBuffer,
2206 const float blendConstants[4])
2207 {
2208 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2209 struct tu_cs cs = tu_cmd_dynamic_state(cmd, VK_DYNAMIC_STATE_BLEND_CONSTANTS, 5);
2210
2211 tu_cs_emit_pkt4(&cs, REG_A6XX_RB_BLEND_RED_F32, 4);
2212 tu_cs_emit_array(&cs, (const uint32_t *) blendConstants, 4);
2213 }
2214
2215 void
2216 tu_CmdSetDepthBounds(VkCommandBuffer commandBuffer,
2217 float minDepthBounds,
2218 float maxDepthBounds)
2219 {
2220 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2221 struct tu_cs cs = tu_cmd_dynamic_state(cmd, VK_DYNAMIC_STATE_DEPTH_BOUNDS, 3);
2222
2223 tu_cs_emit_regs(&cs,
2224 A6XX_RB_Z_BOUNDS_MIN(minDepthBounds),
2225 A6XX_RB_Z_BOUNDS_MAX(maxDepthBounds));
2226 }
2227
2228 static void
2229 update_stencil_mask(uint32_t *value, VkStencilFaceFlags face, uint32_t mask)
2230 {
2231 if (face & VK_STENCIL_FACE_FRONT_BIT)
2232 *value = (*value & 0xff00) | (mask & 0xff);
2233 if (face & VK_STENCIL_FACE_BACK_BIT)
2234 *value = (*value & 0xff) | (mask & 0xff) << 8;
2235 }
2236
2237 void
2238 tu_CmdSetStencilCompareMask(VkCommandBuffer commandBuffer,
2239 VkStencilFaceFlags faceMask,
2240 uint32_t compareMask)
2241 {
2242 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2243 struct tu_cs cs = tu_cmd_dynamic_state(cmd, VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK, 2);
2244
2245 update_stencil_mask(&cmd->state.dynamic_stencil_mask, faceMask, compareMask);
2246
2247 tu_cs_emit_regs(&cs, A6XX_RB_STENCILMASK(.dword = cmd->state.dynamic_stencil_mask));
2248 }
2249
2250 void
2251 tu_CmdSetStencilWriteMask(VkCommandBuffer commandBuffer,
2252 VkStencilFaceFlags faceMask,
2253 uint32_t writeMask)
2254 {
2255 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2256 struct tu_cs cs = tu_cmd_dynamic_state(cmd, VK_DYNAMIC_STATE_STENCIL_WRITE_MASK, 2);
2257
2258 update_stencil_mask(&cmd->state.dynamic_stencil_wrmask, faceMask, writeMask);
2259
2260 tu_cs_emit_regs(&cs, A6XX_RB_STENCILWRMASK(.dword = cmd->state.dynamic_stencil_wrmask));
2261 }
2262
2263 void
2264 tu_CmdSetStencilReference(VkCommandBuffer commandBuffer,
2265 VkStencilFaceFlags faceMask,
2266 uint32_t reference)
2267 {
2268 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2269 struct tu_cs cs = tu_cmd_dynamic_state(cmd, VK_DYNAMIC_STATE_STENCIL_REFERENCE, 2);
2270
2271 update_stencil_mask(&cmd->state.dynamic_stencil_ref, faceMask, reference);
2272
2273 tu_cs_emit_regs(&cs, A6XX_RB_STENCILREF(.dword = cmd->state.dynamic_stencil_ref));
2274 }
2275
2276 void
2277 tu_CmdSetSampleLocationsEXT(VkCommandBuffer commandBuffer,
2278 const VkSampleLocationsInfoEXT* pSampleLocationsInfo)
2279 {
2280 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2281 struct tu_cs cs = tu_cmd_dynamic_state(cmd, TU_DYNAMIC_STATE_SAMPLE_LOCATIONS, 9);
2282
2283 assert(pSampleLocationsInfo);
2284
2285 tu6_emit_sample_locations(&cs, pSampleLocationsInfo);
2286 }
2287
2288 static void
2289 tu_flush_for_access(struct tu_cache_state *cache,
2290 enum tu_cmd_access_mask src_mask,
2291 enum tu_cmd_access_mask dst_mask)
2292 {
2293 enum tu_cmd_flush_bits flush_bits = 0;
2294
2295 if (src_mask & TU_ACCESS_HOST_WRITE) {
2296 /* Host writes are always visible to CP, so only invalidate GPU caches */
2297 cache->pending_flush_bits |= TU_CMD_FLAG_GPU_INVALIDATE;
2298 }
2299
2300 if (src_mask & TU_ACCESS_SYSMEM_WRITE) {
2301 /* Invalidate CP and 2D engine (make it do WFI + WFM if necessary) as
2302 * well.
2303 */
2304 cache->pending_flush_bits |= TU_CMD_FLAG_ALL_INVALIDATE;
2305 }
2306
2307 if (src_mask & TU_ACCESS_CP_WRITE) {
2308 /* Flush the CP write queue. However a WFI shouldn't be necessary as
2309 * WAIT_MEM_WRITES should cover it.
2310 */
2311 cache->pending_flush_bits |=
2312 TU_CMD_FLAG_WAIT_MEM_WRITES |
2313 TU_CMD_FLAG_GPU_INVALIDATE |
2314 TU_CMD_FLAG_WAIT_FOR_ME;
2315 }
2316
2317 #define SRC_FLUSH(domain, flush, invalidate) \
2318 if (src_mask & TU_ACCESS_##domain##_WRITE) { \
2319 cache->pending_flush_bits |= TU_CMD_FLAG_##flush | \
2320 (TU_CMD_FLAG_ALL_INVALIDATE & ~TU_CMD_FLAG_##invalidate); \
2321 }
2322
2323 SRC_FLUSH(UCHE, CACHE_FLUSH, CACHE_INVALIDATE)
2324 SRC_FLUSH(CCU_COLOR, CCU_FLUSH_COLOR, CCU_INVALIDATE_COLOR)
2325 SRC_FLUSH(CCU_DEPTH, CCU_FLUSH_DEPTH, CCU_INVALIDATE_DEPTH)
2326
2327 #undef SRC_FLUSH
2328
2329 #define SRC_INCOHERENT_FLUSH(domain, flush, invalidate) \
2330 if (src_mask & TU_ACCESS_##domain##_INCOHERENT_WRITE) { \
2331 flush_bits |= TU_CMD_FLAG_##flush; \
2332 cache->pending_flush_bits |= \
2333 (TU_CMD_FLAG_ALL_INVALIDATE & ~TU_CMD_FLAG_##invalidate); \
2334 }
2335
2336 SRC_INCOHERENT_FLUSH(CCU_COLOR, CCU_FLUSH_COLOR, CCU_INVALIDATE_COLOR)
2337 SRC_INCOHERENT_FLUSH(CCU_DEPTH, CCU_FLUSH_DEPTH, CCU_INVALIDATE_DEPTH)
2338
2339 #undef SRC_INCOHERENT_FLUSH
2340
2341 /* Treat host & sysmem write accesses the same, since the kernel implicitly
2342 * drains the queue before signalling completion to the host.
2343 */
2344 if (dst_mask & (TU_ACCESS_SYSMEM_READ | TU_ACCESS_SYSMEM_WRITE |
2345 TU_ACCESS_HOST_READ | TU_ACCESS_HOST_WRITE)) {
2346 flush_bits |= cache->pending_flush_bits & TU_CMD_FLAG_ALL_FLUSH;
2347 }
2348
2349 #define DST_FLUSH(domain, flush, invalidate) \
2350 if (dst_mask & (TU_ACCESS_##domain##_READ | \
2351 TU_ACCESS_##domain##_WRITE)) { \
2352 flush_bits |= cache->pending_flush_bits & \
2353 (TU_CMD_FLAG_##invalidate | \
2354 (TU_CMD_FLAG_ALL_FLUSH & ~TU_CMD_FLAG_##flush)); \
2355 }
2356
2357 DST_FLUSH(UCHE, CACHE_FLUSH, CACHE_INVALIDATE)
2358 DST_FLUSH(CCU_COLOR, CCU_FLUSH_COLOR, CCU_INVALIDATE_COLOR)
2359 DST_FLUSH(CCU_DEPTH, CCU_FLUSH_DEPTH, CCU_INVALIDATE_DEPTH)
2360
2361 #undef DST_FLUSH
2362
2363 #define DST_INCOHERENT_FLUSH(domain, flush, invalidate) \
2364 if (dst_mask & (TU_ACCESS_##domain##_INCOHERENT_READ | \
2365 TU_ACCESS_##domain##_INCOHERENT_WRITE)) { \
2366 flush_bits |= TU_CMD_FLAG_##invalidate | \
2367 (cache->pending_flush_bits & \
2368 (TU_CMD_FLAG_ALL_FLUSH & ~TU_CMD_FLAG_##flush)); \
2369 }
2370
2371 DST_INCOHERENT_FLUSH(CCU_COLOR, CCU_FLUSH_COLOR, CCU_INVALIDATE_COLOR)
2372 DST_INCOHERENT_FLUSH(CCU_DEPTH, CCU_FLUSH_DEPTH, CCU_INVALIDATE_DEPTH)
2373
2374 #undef DST_INCOHERENT_FLUSH
2375
2376 if (dst_mask & TU_ACCESS_WFI_READ) {
2377 flush_bits |= cache->pending_flush_bits &
2378 (TU_CMD_FLAG_ALL_FLUSH | TU_CMD_FLAG_WAIT_FOR_IDLE);
2379 }
2380
2381 if (dst_mask & TU_ACCESS_WFM_READ) {
2382 flush_bits |= cache->pending_flush_bits &
2383 (TU_CMD_FLAG_ALL_FLUSH | TU_CMD_FLAG_WAIT_FOR_ME);
2384 }
2385
2386 cache->flush_bits |= flush_bits;
2387 cache->pending_flush_bits &= ~flush_bits;
2388 }
2389
2390 static enum tu_cmd_access_mask
2391 vk2tu_access(VkAccessFlags flags, bool gmem)
2392 {
2393 enum tu_cmd_access_mask mask = 0;
2394
2395 /* If the GPU writes a buffer that is then read by an indirect draw
2396 * command, we theoretically need to emit a WFI to wait for any cache
2397 * flushes, and then a WAIT_FOR_ME to wait on the CP for the WFI to
2398 * complete. Waiting for the WFI to complete is performed as part of the
2399 * draw by the firmware, so we just need to execute the WFI.
2400 *
2401 * Transform feedback counters are read via CP_MEM_TO_REG, which implicitly
2402 * does CP_WAIT_FOR_ME, but we still need a WFI if the GPU writes it.
2403 *
2404 * Currently we read the draw predicate using CP_MEM_TO_MEM, which
2405 * also implicitly does CP_WAIT_FOR_ME. However CP_DRAW_PRED_SET does *not*
2406 * implicitly do CP_WAIT_FOR_ME, it seems to only wait for counters to
2407 * complete since it's written for DX11 where you can only predicate on the
2408 * result of a query object. So if we implement 64-bit comparisons in the
2409 * future, or if CP_DRAW_PRED_SET grows the capability to do 32-bit
2410 * comparisons, then this will have to be dealt with.
2411 */
2412 if (flags &
2413 (VK_ACCESS_INDIRECT_COMMAND_READ_BIT |
2414 VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_READ_BIT_EXT |
2415 VK_ACCESS_CONDITIONAL_RENDERING_READ_BIT_EXT |
2416 VK_ACCESS_MEMORY_READ_BIT)) {
2417 mask |= TU_ACCESS_WFI_READ;
2418 }
2419
2420 if (flags &
2421 (VK_ACCESS_INDIRECT_COMMAND_READ_BIT | /* Read performed by CP */
2422 VK_ACCESS_CONDITIONAL_RENDERING_READ_BIT_EXT | /* Read performed by CP */
2423 VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_READ_BIT_EXT | /* Read performed by CP */
2424 VK_ACCESS_MEMORY_READ_BIT)) {
2425 mask |= TU_ACCESS_SYSMEM_READ;
2426 }
2427
2428 if (flags &
2429 (VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT |
2430 VK_ACCESS_MEMORY_WRITE_BIT)) {
2431 mask |= TU_ACCESS_CP_WRITE;
2432 }
2433
2434 if (flags &
2435 (VK_ACCESS_HOST_READ_BIT |
2436 VK_ACCESS_MEMORY_WRITE_BIT)) {
2437 mask |= TU_ACCESS_HOST_READ;
2438 }
2439
2440 if (flags &
2441 (VK_ACCESS_HOST_WRITE_BIT |
2442 VK_ACCESS_MEMORY_WRITE_BIT)) {
2443 mask |= TU_ACCESS_HOST_WRITE;
2444 }
2445
2446 if (flags &
2447 (VK_ACCESS_INDEX_READ_BIT | /* Read performed by PC, I think */
2448 VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT | /* Read performed by VFD */
2449 VK_ACCESS_UNIFORM_READ_BIT | /* Read performed by SP */
2450 /* TODO: Is there a no-cache bit for textures so that we can ignore
2451 * these?
2452 */
2453 VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | /* Read performed by TP */
2454 VK_ACCESS_SHADER_READ_BIT | /* Read perfomed by SP/TP */
2455 VK_ACCESS_MEMORY_READ_BIT)) {
2456 mask |= TU_ACCESS_UCHE_READ;
2457 }
2458
2459 if (flags &
2460 (VK_ACCESS_SHADER_WRITE_BIT | /* Write performed by SP */
2461 VK_ACCESS_TRANSFORM_FEEDBACK_WRITE_BIT_EXT | /* Write performed by VPC */
2462 VK_ACCESS_MEMORY_WRITE_BIT)) {
2463 mask |= TU_ACCESS_UCHE_WRITE;
2464 }
2465
2466 /* When using GMEM, the CCU is always flushed automatically to GMEM, and
2467 * then GMEM is flushed to sysmem. Furthermore, we already had to flush any
2468 * previous writes in sysmem mode when transitioning to GMEM. Therefore we
2469 * can ignore CCU and pretend that color attachments and transfers use
2470 * sysmem directly.
2471 */
2472
2473 if (flags &
2474 (VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
2475 VK_ACCESS_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT |
2476 VK_ACCESS_MEMORY_READ_BIT)) {
2477 if (gmem)
2478 mask |= TU_ACCESS_SYSMEM_READ;
2479 else
2480 mask |= TU_ACCESS_CCU_COLOR_INCOHERENT_READ;
2481 }
2482
2483 if (flags &
2484 (VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT |
2485 VK_ACCESS_MEMORY_READ_BIT)) {
2486 if (gmem)
2487 mask |= TU_ACCESS_SYSMEM_READ;
2488 else
2489 mask |= TU_ACCESS_CCU_DEPTH_INCOHERENT_READ;
2490 }
2491
2492 if (flags &
2493 (VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
2494 VK_ACCESS_MEMORY_WRITE_BIT)) {
2495 if (gmem) {
2496 mask |= TU_ACCESS_SYSMEM_WRITE;
2497 } else {
2498 mask |= TU_ACCESS_CCU_COLOR_INCOHERENT_WRITE;
2499 }
2500 }
2501
2502 if (flags &
2503 (VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
2504 VK_ACCESS_MEMORY_WRITE_BIT)) {
2505 if (gmem) {
2506 mask |= TU_ACCESS_SYSMEM_WRITE;
2507 } else {
2508 mask |= TU_ACCESS_CCU_DEPTH_INCOHERENT_WRITE;
2509 }
2510 }
2511
2512 /* When the dst access is a transfer read/write, it seems we sometimes need
2513 * to insert a WFI after any flushes, to guarantee that the flushes finish
2514 * before the 2D engine starts. However the opposite (i.e. a WFI after
2515 * CP_BLIT and before any subsequent flush) does not seem to be needed, and
2516 * the blob doesn't emit such a WFI.
2517 */
2518
2519 if (flags &
2520 (VK_ACCESS_TRANSFER_WRITE_BIT |
2521 VK_ACCESS_MEMORY_WRITE_BIT)) {
2522 if (gmem) {
2523 mask |= TU_ACCESS_SYSMEM_WRITE;
2524 } else {
2525 mask |= TU_ACCESS_CCU_COLOR_WRITE;
2526 }
2527 mask |= TU_ACCESS_WFI_READ;
2528 }
2529
2530 if (flags &
2531 (VK_ACCESS_TRANSFER_READ_BIT | /* Access performed by TP */
2532 VK_ACCESS_MEMORY_READ_BIT)) {
2533 mask |= TU_ACCESS_UCHE_READ | TU_ACCESS_WFI_READ;
2534 }
2535
2536 return mask;
2537 }
2538
2539
2540 void
2541 tu_CmdExecuteCommands(VkCommandBuffer commandBuffer,
2542 uint32_t commandBufferCount,
2543 const VkCommandBuffer *pCmdBuffers)
2544 {
2545 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2546 VkResult result;
2547
2548 assert(commandBufferCount > 0);
2549
2550 /* Emit any pending flushes. */
2551 if (cmd->state.pass) {
2552 tu_flush_all_pending(&cmd->state.renderpass_cache);
2553 tu_emit_cache_flush_renderpass(cmd, &cmd->draw_cs);
2554 } else {
2555 tu_flush_all_pending(&cmd->state.cache);
2556 tu_emit_cache_flush(cmd, &cmd->cs);
2557 }
2558
2559 for (uint32_t i = 0; i < commandBufferCount; i++) {
2560 TU_FROM_HANDLE(tu_cmd_buffer, secondary, pCmdBuffers[i]);
2561
2562 result = tu_bo_list_merge(&cmd->bo_list, &secondary->bo_list);
2563 if (result != VK_SUCCESS) {
2564 cmd->record_result = result;
2565 break;
2566 }
2567
2568 if (secondary->usage_flags &
2569 VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
2570 assert(tu_cs_is_empty(&secondary->cs));
2571
2572 result = tu_cs_add_entries(&cmd->draw_cs, &secondary->draw_cs);
2573 if (result != VK_SUCCESS) {
2574 cmd->record_result = result;
2575 break;
2576 }
2577
2578 result = tu_cs_add_entries(&cmd->draw_epilogue_cs,
2579 &secondary->draw_epilogue_cs);
2580 if (result != VK_SUCCESS) {
2581 cmd->record_result = result;
2582 break;
2583 }
2584
2585 if (secondary->state.has_tess)
2586 cmd->state.has_tess = true;
2587 if (secondary->state.has_subpass_predication)
2588 cmd->state.has_subpass_predication = true;
2589 } else {
2590 assert(tu_cs_is_empty(&secondary->draw_cs));
2591 assert(tu_cs_is_empty(&secondary->draw_epilogue_cs));
2592
2593 for (uint32_t j = 0; j < secondary->cs.bo_count; j++) {
2594 tu_bo_list_add(&cmd->bo_list, secondary->cs.bos[j],
2595 MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
2596 }
2597
2598 tu_cs_add_entries(&cmd->cs, &secondary->cs);
2599 }
2600
2601 cmd->state.index_size = secondary->state.index_size; /* for restart index update */
2602 }
2603 cmd->state.dirty = ~0u; /* TODO: set dirty only what needs to be */
2604
2605 /* After executing secondary command buffers, there may have been arbitrary
2606 * flushes executed, so when we encounter a pipeline barrier with a
2607 * srcMask, we have to assume that we need to invalidate. Therefore we need
2608 * to re-initialize the cache with all pending invalidate bits set.
2609 */
2610 if (cmd->state.pass) {
2611 tu_cache_init(&cmd->state.renderpass_cache);
2612 } else {
2613 tu_cache_init(&cmd->state.cache);
2614 }
2615 }
2616
2617 VkResult
2618 tu_CreateCommandPool(VkDevice _device,
2619 const VkCommandPoolCreateInfo *pCreateInfo,
2620 const VkAllocationCallbacks *pAllocator,
2621 VkCommandPool *pCmdPool)
2622 {
2623 TU_FROM_HANDLE(tu_device, device, _device);
2624 struct tu_cmd_pool *pool;
2625
2626 pool = vk_object_alloc(&device->vk, pAllocator, sizeof(*pool),
2627 VK_OBJECT_TYPE_COMMAND_POOL);
2628 if (pool == NULL)
2629 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
2630
2631 if (pAllocator)
2632 pool->alloc = *pAllocator;
2633 else
2634 pool->alloc = device->vk.alloc;
2635
2636 list_inithead(&pool->cmd_buffers);
2637 list_inithead(&pool->free_cmd_buffers);
2638
2639 pool->queue_family_index = pCreateInfo->queueFamilyIndex;
2640
2641 *pCmdPool = tu_cmd_pool_to_handle(pool);
2642
2643 return VK_SUCCESS;
2644 }
2645
2646 void
2647 tu_DestroyCommandPool(VkDevice _device,
2648 VkCommandPool commandPool,
2649 const VkAllocationCallbacks *pAllocator)
2650 {
2651 TU_FROM_HANDLE(tu_device, device, _device);
2652 TU_FROM_HANDLE(tu_cmd_pool, pool, commandPool);
2653
2654 if (!pool)
2655 return;
2656
2657 list_for_each_entry_safe(struct tu_cmd_buffer, cmd_buffer,
2658 &pool->cmd_buffers, pool_link)
2659 {
2660 tu_cmd_buffer_destroy(cmd_buffer);
2661 }
2662
2663 list_for_each_entry_safe(struct tu_cmd_buffer, cmd_buffer,
2664 &pool->free_cmd_buffers, pool_link)
2665 {
2666 tu_cmd_buffer_destroy(cmd_buffer);
2667 }
2668
2669 vk_object_free(&device->vk, pAllocator, pool);
2670 }
2671
2672 VkResult
2673 tu_ResetCommandPool(VkDevice device,
2674 VkCommandPool commandPool,
2675 VkCommandPoolResetFlags flags)
2676 {
2677 TU_FROM_HANDLE(tu_cmd_pool, pool, commandPool);
2678 VkResult result;
2679
2680 list_for_each_entry(struct tu_cmd_buffer, cmd_buffer, &pool->cmd_buffers,
2681 pool_link)
2682 {
2683 result = tu_reset_cmd_buffer(cmd_buffer);
2684 if (result != VK_SUCCESS)
2685 return result;
2686 }
2687
2688 return VK_SUCCESS;
2689 }
2690
2691 void
2692 tu_TrimCommandPool(VkDevice device,
2693 VkCommandPool commandPool,
2694 VkCommandPoolTrimFlags flags)
2695 {
2696 TU_FROM_HANDLE(tu_cmd_pool, pool, commandPool);
2697
2698 if (!pool)
2699 return;
2700
2701 list_for_each_entry_safe(struct tu_cmd_buffer, cmd_buffer,
2702 &pool->free_cmd_buffers, pool_link)
2703 {
2704 tu_cmd_buffer_destroy(cmd_buffer);
2705 }
2706 }
2707
2708 static void
2709 tu_subpass_barrier(struct tu_cmd_buffer *cmd_buffer,
2710 const struct tu_subpass_barrier *barrier,
2711 bool external)
2712 {
2713 /* Note: we don't know until the end of the subpass whether we'll use
2714 * sysmem, so assume sysmem here to be safe.
2715 */
2716 struct tu_cache_state *cache =
2717 external ? &cmd_buffer->state.cache : &cmd_buffer->state.renderpass_cache;
2718 enum tu_cmd_access_mask src_flags =
2719 vk2tu_access(barrier->src_access_mask, false);
2720 enum tu_cmd_access_mask dst_flags =
2721 vk2tu_access(barrier->dst_access_mask, false);
2722
2723 if (barrier->incoherent_ccu_color)
2724 src_flags |= TU_ACCESS_CCU_COLOR_INCOHERENT_WRITE;
2725 if (barrier->incoherent_ccu_depth)
2726 src_flags |= TU_ACCESS_CCU_DEPTH_INCOHERENT_WRITE;
2727
2728 tu_flush_for_access(cache, src_flags, dst_flags);
2729 }
2730
2731 void
2732 tu_CmdBeginRenderPass(VkCommandBuffer commandBuffer,
2733 const VkRenderPassBeginInfo *pRenderPassBegin,
2734 VkSubpassContents contents)
2735 {
2736 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2737 TU_FROM_HANDLE(tu_render_pass, pass, pRenderPassBegin->renderPass);
2738 TU_FROM_HANDLE(tu_framebuffer, fb, pRenderPassBegin->framebuffer);
2739
2740 cmd->state.pass = pass;
2741 cmd->state.subpass = pass->subpasses;
2742 cmd->state.framebuffer = fb;
2743 cmd->state.render_area = pRenderPassBegin->renderArea;
2744
2745 tu_cmd_prepare_tile_store_ib(cmd);
2746
2747 /* Note: because this is external, any flushes will happen before draw_cs
2748 * gets called. However deferred flushes could have to happen later as part
2749 * of the subpass.
2750 */
2751 tu_subpass_barrier(cmd, &pass->subpasses[0].start_barrier, true);
2752 cmd->state.renderpass_cache.pending_flush_bits =
2753 cmd->state.cache.pending_flush_bits;
2754 cmd->state.renderpass_cache.flush_bits = 0;
2755
2756 tu_emit_renderpass_begin(cmd, pRenderPassBegin);
2757
2758 tu6_emit_zs(cmd, cmd->state.subpass, &cmd->draw_cs);
2759 tu6_emit_mrt(cmd, cmd->state.subpass, &cmd->draw_cs);
2760 tu6_emit_msaa(&cmd->draw_cs, cmd->state.subpass->samples);
2761 tu6_emit_render_cntl(cmd, cmd->state.subpass, &cmd->draw_cs, false);
2762
2763 tu_set_input_attachments(cmd, cmd->state.subpass);
2764
2765 for (uint32_t i = 0; i < fb->attachment_count; ++i) {
2766 const struct tu_image_view *iview = fb->attachments[i].attachment;
2767 tu_bo_list_add(&cmd->bo_list, iview->image->bo,
2768 MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE);
2769 }
2770
2771 cmd->state.dirty |= TU_CMD_DIRTY_DRAW_STATE;
2772 }
2773
2774 void
2775 tu_CmdBeginRenderPass2(VkCommandBuffer commandBuffer,
2776 const VkRenderPassBeginInfo *pRenderPassBeginInfo,
2777 const VkSubpassBeginInfoKHR *pSubpassBeginInfo)
2778 {
2779 tu_CmdBeginRenderPass(commandBuffer, pRenderPassBeginInfo,
2780 pSubpassBeginInfo->contents);
2781 }
2782
2783 void
2784 tu_CmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents)
2785 {
2786 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2787 const struct tu_render_pass *pass = cmd->state.pass;
2788 struct tu_cs *cs = &cmd->draw_cs;
2789
2790 const struct tu_subpass *subpass = cmd->state.subpass++;
2791
2792 tu_cond_exec_start(cs, CP_COND_EXEC_0_RENDER_MODE_GMEM);
2793
2794 if (subpass->resolve_attachments) {
2795 tu6_emit_blit_scissor(cmd, cs, true);
2796
2797 for (unsigned i = 0; i < subpass->color_count; i++) {
2798 uint32_t a = subpass->resolve_attachments[i].attachment;
2799 if (a == VK_ATTACHMENT_UNUSED)
2800 continue;
2801
2802 tu_store_gmem_attachment(cmd, cs, a,
2803 subpass->color_attachments[i].attachment);
2804
2805 if (pass->attachments[a].gmem_offset < 0)
2806 continue;
2807
2808 /* TODO:
2809 * check if the resolved attachment is needed by later subpasses,
2810 * if it is, should be doing a GMEM->GMEM resolve instead of GMEM->MEM->GMEM..
2811 */
2812 tu_finishme("missing GMEM->GMEM resolve path\n");
2813 tu_load_gmem_attachment(cmd, cs, a, true);
2814 }
2815 }
2816
2817 tu_cond_exec_end(cs);
2818
2819 tu_cond_exec_start(cs, CP_COND_EXEC_0_RENDER_MODE_SYSMEM);
2820
2821 tu6_emit_sysmem_resolves(cmd, cs, subpass);
2822
2823 tu_cond_exec_end(cs);
2824
2825 /* Handle dependencies for the next subpass */
2826 tu_subpass_barrier(cmd, &cmd->state.subpass->start_barrier, false);
2827
2828 /* emit mrt/zs/msaa/ubwc state for the subpass that is starting */
2829 tu6_emit_zs(cmd, cmd->state.subpass, cs);
2830 tu6_emit_mrt(cmd, cmd->state.subpass, cs);
2831 tu6_emit_msaa(cs, cmd->state.subpass->samples);
2832 tu6_emit_render_cntl(cmd, cmd->state.subpass, cs, false);
2833
2834 tu_set_input_attachments(cmd, cmd->state.subpass);
2835 }
2836
2837 void
2838 tu_CmdNextSubpass2(VkCommandBuffer commandBuffer,
2839 const VkSubpassBeginInfoKHR *pSubpassBeginInfo,
2840 const VkSubpassEndInfoKHR *pSubpassEndInfo)
2841 {
2842 tu_CmdNextSubpass(commandBuffer, pSubpassBeginInfo->contents);
2843 }
2844
2845 static void
2846 tu6_emit_user_consts(struct tu_cs *cs, const struct tu_pipeline *pipeline,
2847 struct tu_descriptor_state *descriptors_state,
2848 gl_shader_stage type,
2849 uint32_t *push_constants)
2850 {
2851 const struct tu_program_descriptor_linkage *link =
2852 &pipeline->program.link[type];
2853 const struct ir3_ubo_analysis_state *state = &link->const_state.ubo_state;
2854
2855 if (link->push_consts.count > 0) {
2856 unsigned num_units = link->push_consts.count;
2857 unsigned offset = link->push_consts.lo;
2858 tu_cs_emit_pkt7(cs, tu6_stage2opcode(type), 3 + num_units * 4);
2859 tu_cs_emit(cs, CP_LOAD_STATE6_0_DST_OFF(offset) |
2860 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
2861 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
2862 CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type)) |
2863 CP_LOAD_STATE6_0_NUM_UNIT(num_units));
2864 tu_cs_emit(cs, 0);
2865 tu_cs_emit(cs, 0);
2866 for (unsigned i = 0; i < num_units * 4; i++)
2867 tu_cs_emit(cs, push_constants[i + offset * 4]);
2868 }
2869
2870 for (uint32_t i = 0; i < state->num_enabled; i++) {
2871 uint32_t size = state->range[i].end - state->range[i].start;
2872 uint32_t offset = state->range[i].start;
2873
2874 /* and even if the start of the const buffer is before
2875 * first_immediate, the end may not be:
2876 */
2877 size = MIN2(size, (16 * link->constlen) - state->range[i].offset);
2878
2879 if (size == 0)
2880 continue;
2881
2882 /* things should be aligned to vec4: */
2883 debug_assert((state->range[i].offset % 16) == 0);
2884 debug_assert((size % 16) == 0);
2885 debug_assert((offset % 16) == 0);
2886
2887 /* Dig out the descriptor from the descriptor state and read the VA from
2888 * it.
2889 */
2890 assert(state->range[i].ubo.bindless);
2891 uint32_t *base = state->range[i].ubo.bindless_base == MAX_SETS ?
2892 descriptors_state->dynamic_descriptors :
2893 descriptors_state->sets[state->range[i].ubo.bindless_base]->mapped_ptr;
2894 unsigned block = state->range[i].ubo.block;
2895 uint32_t *desc = base + block * A6XX_TEX_CONST_DWORDS;
2896 uint64_t va = desc[0] | ((uint64_t)(desc[1] & A6XX_UBO_1_BASE_HI__MASK) << 32);
2897 assert(va);
2898
2899 tu_cs_emit_pkt7(cs, tu6_stage2opcode(type), 3);
2900 tu_cs_emit(cs, CP_LOAD_STATE6_0_DST_OFF(state->range[i].offset / 16) |
2901 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
2902 CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT) |
2903 CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type)) |
2904 CP_LOAD_STATE6_0_NUM_UNIT(size / 16));
2905 tu_cs_emit_qw(cs, va + offset);
2906 }
2907 }
2908
2909 static struct tu_draw_state
2910 tu6_emit_consts(struct tu_cmd_buffer *cmd,
2911 const struct tu_pipeline *pipeline,
2912 struct tu_descriptor_state *descriptors_state,
2913 gl_shader_stage type)
2914 {
2915 struct tu_cs cs;
2916 tu_cs_begin_sub_stream(&cmd->sub_cs, 512, &cs); /* TODO: maximum size? */
2917
2918 tu6_emit_user_consts(&cs, pipeline, descriptors_state, type, cmd->push_constants);
2919
2920 return tu_cs_end_draw_state(&cmd->sub_cs, &cs);
2921 }
2922
2923 static struct tu_draw_state
2924 tu6_emit_vertex_buffers(struct tu_cmd_buffer *cmd,
2925 const struct tu_pipeline *pipeline)
2926 {
2927 struct tu_cs cs;
2928 tu_cs_begin_sub_stream(&cmd->sub_cs, 4 * MAX_VBS, &cs);
2929
2930 int binding;
2931 for_each_bit(binding, pipeline->vi.bindings_used) {
2932 const struct tu_buffer *buf = cmd->state.vb.buffers[binding];
2933 const VkDeviceSize offset = buf->bo_offset +
2934 cmd->state.vb.offsets[binding];
2935
2936 tu_cs_emit_regs(&cs,
2937 A6XX_VFD_FETCH_BASE(binding, .bo = buf->bo, .bo_offset = offset),
2938 A6XX_VFD_FETCH_SIZE(binding, buf->size - offset));
2939
2940 }
2941
2942 cmd->vertex_bindings_set = pipeline->vi.bindings_used;
2943
2944 return tu_cs_end_draw_state(&cmd->sub_cs, &cs);
2945 }
2946
2947 static uint64_t
2948 get_tess_param_bo_size(const struct tu_pipeline *pipeline,
2949 uint32_t draw_count)
2950 {
2951 /* TODO: For indirect draws, we can't compute the BO size ahead of time.
2952 * Still not sure what to do here, so just allocate a reasonably large
2953 * BO and hope for the best for now. */
2954 if (!draw_count)
2955 draw_count = 2048;
2956
2957 /* the tess param BO is pipeline->tess.param_stride bytes per patch,
2958 * which includes both the per-vertex outputs and per-patch outputs
2959 * build_primitive_map in ir3 calculates this stride
2960 */
2961 uint32_t verts_per_patch = pipeline->ia.primtype - DI_PT_PATCHES0;
2962 uint32_t num_patches = draw_count / verts_per_patch;
2963 return num_patches * pipeline->tess.param_stride;
2964 }
2965
2966 static uint64_t
2967 get_tess_factor_bo_size(const struct tu_pipeline *pipeline,
2968 uint32_t draw_count)
2969 {
2970 /* TODO: For indirect draws, we can't compute the BO size ahead of time.
2971 * Still not sure what to do here, so just allocate a reasonably large
2972 * BO and hope for the best for now. */
2973 if (!draw_count)
2974 draw_count = 2048;
2975
2976 /* Each distinct patch gets its own tess factor output. */
2977 uint32_t verts_per_patch = pipeline->ia.primtype - DI_PT_PATCHES0;
2978 uint32_t num_patches = draw_count / verts_per_patch;
2979 uint32_t factor_stride;
2980 switch (pipeline->tess.patch_type) {
2981 case IR3_TESS_ISOLINES:
2982 factor_stride = 12;
2983 break;
2984 case IR3_TESS_TRIANGLES:
2985 factor_stride = 20;
2986 break;
2987 case IR3_TESS_QUADS:
2988 factor_stride = 28;
2989 break;
2990 default:
2991 unreachable("bad tessmode");
2992 }
2993 return factor_stride * num_patches;
2994 }
2995
2996 static VkResult
2997 tu6_emit_tess_consts(struct tu_cmd_buffer *cmd,
2998 uint32_t draw_count,
2999 const struct tu_pipeline *pipeline,
3000 struct tu_draw_state *state,
3001 uint64_t *factor_iova)
3002 {
3003 struct tu_cs cs;
3004 VkResult result = tu_cs_begin_sub_stream(&cmd->sub_cs, 16, &cs);
3005 if (result != VK_SUCCESS)
3006 return result;
3007
3008 uint64_t tess_factor_size = get_tess_factor_bo_size(pipeline, draw_count);
3009 uint64_t tess_param_size = get_tess_param_bo_size(pipeline, draw_count);
3010 uint64_t tess_bo_size = tess_factor_size + tess_param_size;
3011 if (tess_bo_size > 0) {
3012 struct tu_bo *tess_bo;
3013 result = tu_get_scratch_bo(cmd->device, tess_bo_size, &tess_bo);
3014 if (result != VK_SUCCESS)
3015 return result;
3016
3017 tu_bo_list_add(&cmd->bo_list, tess_bo,
3018 MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE);
3019 uint64_t tess_factor_iova = tess_bo->iova;
3020 uint64_t tess_param_iova = tess_factor_iova + tess_factor_size;
3021
3022 tu_cs_emit_pkt7(&cs, CP_LOAD_STATE6_GEOM, 3 + 4);
3023 tu_cs_emit(&cs, CP_LOAD_STATE6_0_DST_OFF(pipeline->tess.hs_bo_regid) |
3024 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
3025 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
3026 CP_LOAD_STATE6_0_STATE_BLOCK(SB6_HS_SHADER) |
3027 CP_LOAD_STATE6_0_NUM_UNIT(1));
3028 tu_cs_emit(&cs, CP_LOAD_STATE6_1_EXT_SRC_ADDR(0));
3029 tu_cs_emit(&cs, CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI(0));
3030 tu_cs_emit_qw(&cs, tess_param_iova);
3031 tu_cs_emit_qw(&cs, tess_factor_iova);
3032
3033 tu_cs_emit_pkt7(&cs, CP_LOAD_STATE6_GEOM, 3 + 4);
3034 tu_cs_emit(&cs, CP_LOAD_STATE6_0_DST_OFF(pipeline->tess.ds_bo_regid) |
3035 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
3036 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
3037 CP_LOAD_STATE6_0_STATE_BLOCK(SB6_DS_SHADER) |
3038 CP_LOAD_STATE6_0_NUM_UNIT(1));
3039 tu_cs_emit(&cs, CP_LOAD_STATE6_1_EXT_SRC_ADDR(0));
3040 tu_cs_emit(&cs, CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI(0));
3041 tu_cs_emit_qw(&cs, tess_param_iova);
3042 tu_cs_emit_qw(&cs, tess_factor_iova);
3043
3044 *factor_iova = tess_factor_iova;
3045 }
3046 *state = tu_cs_end_draw_state(&cmd->sub_cs, &cs);
3047 return VK_SUCCESS;
3048 }
3049
3050 static VkResult
3051 tu6_draw_common(struct tu_cmd_buffer *cmd,
3052 struct tu_cs *cs,
3053 bool indexed,
3054 /* note: draw_count is 0 for indirect */
3055 uint32_t draw_count)
3056 {
3057 const struct tu_pipeline *pipeline = cmd->state.pipeline;
3058 VkResult result;
3059
3060 struct tu_descriptor_state *descriptors_state =
3061 &cmd->descriptors[VK_PIPELINE_BIND_POINT_GRAPHICS];
3062
3063 tu_emit_cache_flush_renderpass(cmd, cs);
3064
3065 /* TODO lrz */
3066
3067 tu_cs_emit_regs(cs, A6XX_PC_PRIMITIVE_CNTL_0(
3068 .primitive_restart =
3069 pipeline->ia.primitive_restart && indexed,
3070 .tess_upper_left_domain_origin =
3071 pipeline->tess.upper_left_domain_origin));
3072
3073 if (cmd->state.dirty & TU_CMD_DIRTY_SHADER_CONSTS) {
3074 cmd->state.shader_const[MESA_SHADER_VERTEX] =
3075 tu6_emit_consts(cmd, pipeline, descriptors_state, MESA_SHADER_VERTEX);
3076 cmd->state.shader_const[MESA_SHADER_TESS_CTRL] =
3077 tu6_emit_consts(cmd, pipeline, descriptors_state, MESA_SHADER_TESS_CTRL);
3078 cmd->state.shader_const[MESA_SHADER_TESS_EVAL] =
3079 tu6_emit_consts(cmd, pipeline, descriptors_state, MESA_SHADER_TESS_EVAL);
3080 cmd->state.shader_const[MESA_SHADER_GEOMETRY] =
3081 tu6_emit_consts(cmd, pipeline, descriptors_state, MESA_SHADER_GEOMETRY);
3082 cmd->state.shader_const[MESA_SHADER_FRAGMENT] =
3083 tu6_emit_consts(cmd, pipeline, descriptors_state, MESA_SHADER_FRAGMENT);
3084 }
3085
3086 if (cmd->state.dirty & TU_CMD_DIRTY_VERTEX_BUFFERS)
3087 cmd->state.vertex_buffers = tu6_emit_vertex_buffers(cmd, pipeline);
3088
3089 bool has_tess =
3090 pipeline->active_stages & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT;
3091 struct tu_draw_state tess_consts = {};
3092 if (has_tess) {
3093 uint64_t tess_factor_iova = 0;
3094
3095 cmd->state.has_tess = true;
3096 result = tu6_emit_tess_consts(cmd, draw_count, pipeline, &tess_consts, &tess_factor_iova);
3097 if (result != VK_SUCCESS)
3098 return result;
3099
3100 /* this sequence matches what the blob does before every tess draw
3101 * PC_TESSFACTOR_ADDR_LO is a non-context register and needs a wfi
3102 * before writing to it
3103 */
3104 tu_cs_emit_wfi(cs);
3105
3106 tu_cs_emit_pkt4(cs, REG_A6XX_PC_TESSFACTOR_ADDR_LO, 2);
3107 tu_cs_emit_qw(cs, tess_factor_iova);
3108
3109 tu_cs_emit_pkt7(cs, CP_SET_SUBDRAW_SIZE, 1);
3110 tu_cs_emit(cs, draw_count);
3111 }
3112
3113 /* for the first draw in a renderpass, re-emit all the draw states
3114 *
3115 * and if a draw-state disabling path (CmdClearAttachments 3D fallback) was
3116 * used, then draw states must be re-emitted. note however this only happens
3117 * in the sysmem path, so this can be skipped this for the gmem path (TODO)
3118 *
3119 * the two input attachment states are excluded because secondary command
3120 * buffer doesn't have a state ib to restore it, and not re-emitting them
3121 * is OK since CmdClearAttachments won't disable/overwrite them
3122 */
3123 if (cmd->state.dirty & TU_CMD_DIRTY_DRAW_STATE) {
3124 tu_cs_emit_pkt7(cs, CP_SET_DRAW_STATE, 3 * (TU_DRAW_STATE_COUNT - 2));
3125
3126 tu_cs_emit_draw_state(cs, TU_DRAW_STATE_PROGRAM, pipeline->program.state);
3127 tu_cs_emit_draw_state(cs, TU_DRAW_STATE_PROGRAM_BINNING, pipeline->program.binning_state);
3128 tu_cs_emit_draw_state(cs, TU_DRAW_STATE_TESS, tess_consts);
3129 tu_cs_emit_draw_state(cs, TU_DRAW_STATE_VI, pipeline->vi.state);
3130 tu_cs_emit_draw_state(cs, TU_DRAW_STATE_VI_BINNING, pipeline->vi.binning_state);
3131 tu_cs_emit_draw_state(cs, TU_DRAW_STATE_RAST, pipeline->rast_state);
3132 tu_cs_emit_draw_state(cs, TU_DRAW_STATE_DS, pipeline->ds_state);
3133 tu_cs_emit_draw_state(cs, TU_DRAW_STATE_BLEND, pipeline->blend_state);
3134 tu_cs_emit_draw_state(cs, TU_DRAW_STATE_VS_CONST, cmd->state.shader_const[MESA_SHADER_VERTEX]);
3135 tu_cs_emit_draw_state(cs, TU_DRAW_STATE_HS_CONST, cmd->state.shader_const[MESA_SHADER_TESS_CTRL]);
3136 tu_cs_emit_draw_state(cs, TU_DRAW_STATE_DS_CONST, cmd->state.shader_const[MESA_SHADER_TESS_EVAL]);
3137 tu_cs_emit_draw_state(cs, TU_DRAW_STATE_GS_CONST, cmd->state.shader_const[MESA_SHADER_GEOMETRY]);
3138 tu_cs_emit_draw_state(cs, TU_DRAW_STATE_FS_CONST, cmd->state.shader_const[MESA_SHADER_FRAGMENT]);
3139 tu_cs_emit_draw_state(cs, TU_DRAW_STATE_DESC_SETS, cmd->state.desc_sets);
3140 tu_cs_emit_draw_state(cs, TU_DRAW_STATE_DESC_SETS_LOAD, pipeline->load_state);
3141 tu_cs_emit_draw_state(cs, TU_DRAW_STATE_VB, cmd->state.vertex_buffers);
3142 tu_cs_emit_draw_state(cs, TU_DRAW_STATE_VS_PARAMS, cmd->state.vs_params);
3143
3144 for (uint32_t i = 0; i < ARRAY_SIZE(cmd->state.dynamic_state); i++) {
3145 tu_cs_emit_draw_state(cs, TU_DRAW_STATE_DYNAMIC + i,
3146 ((pipeline->dynamic_state_mask & BIT(i)) ?
3147 cmd->state.dynamic_state[i] :
3148 pipeline->dynamic_state[i]));
3149 }
3150 } else {
3151
3152 /* emit draw states that were just updated
3153 * note we eventually don't want to have to emit anything here
3154 */
3155 uint32_t draw_state_count =
3156 has_tess +
3157 ((cmd->state.dirty & TU_CMD_DIRTY_SHADER_CONSTS) ? 5 : 0) +
3158 ((cmd->state.dirty & TU_CMD_DIRTY_DESC_SETS_LOAD) ? 1 : 0) +
3159 ((cmd->state.dirty & TU_CMD_DIRTY_VERTEX_BUFFERS) ? 1 : 0) +
3160 1; /* vs_params */
3161
3162 tu_cs_emit_pkt7(cs, CP_SET_DRAW_STATE, 3 * draw_state_count);
3163
3164 /* We may need to re-emit tess consts if the current draw call is
3165 * sufficiently larger than the last draw call. */
3166 if (has_tess)
3167 tu_cs_emit_draw_state(cs, TU_DRAW_STATE_TESS, tess_consts);
3168 if (cmd->state.dirty & TU_CMD_DIRTY_SHADER_CONSTS) {
3169 tu_cs_emit_draw_state(cs, TU_DRAW_STATE_VS_CONST, cmd->state.shader_const[MESA_SHADER_VERTEX]);
3170 tu_cs_emit_draw_state(cs, TU_DRAW_STATE_HS_CONST, cmd->state.shader_const[MESA_SHADER_TESS_CTRL]);
3171 tu_cs_emit_draw_state(cs, TU_DRAW_STATE_DS_CONST, cmd->state.shader_const[MESA_SHADER_TESS_EVAL]);
3172 tu_cs_emit_draw_state(cs, TU_DRAW_STATE_GS_CONST, cmd->state.shader_const[MESA_SHADER_GEOMETRY]);
3173 tu_cs_emit_draw_state(cs, TU_DRAW_STATE_FS_CONST, cmd->state.shader_const[MESA_SHADER_FRAGMENT]);
3174 }
3175 if (cmd->state.dirty & TU_CMD_DIRTY_DESC_SETS_LOAD)
3176 tu_cs_emit_draw_state(cs, TU_DRAW_STATE_DESC_SETS_LOAD, pipeline->load_state);
3177 if (cmd->state.dirty & TU_CMD_DIRTY_VERTEX_BUFFERS)
3178 tu_cs_emit_draw_state(cs, TU_DRAW_STATE_VB, cmd->state.vertex_buffers);
3179 tu_cs_emit_draw_state(cs, TU_DRAW_STATE_VS_PARAMS, cmd->state.vs_params);
3180 }
3181
3182 tu_cs_sanity_check(cs);
3183
3184 /* There are too many graphics dirty bits to list here, so just list the
3185 * bits to preserve instead. The only things not emitted here are
3186 * compute-related state.
3187 */
3188 cmd->state.dirty &= TU_CMD_DIRTY_COMPUTE_DESC_SETS_LOAD;
3189 return VK_SUCCESS;
3190 }
3191
3192 static uint32_t
3193 tu_draw_initiator(struct tu_cmd_buffer *cmd, enum pc_di_src_sel src_sel)
3194 {
3195 const struct tu_pipeline *pipeline = cmd->state.pipeline;
3196 uint32_t initiator =
3197 CP_DRAW_INDX_OFFSET_0_PRIM_TYPE(pipeline->ia.primtype) |
3198 CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(src_sel) |
3199 CP_DRAW_INDX_OFFSET_0_INDEX_SIZE(cmd->state.index_size) |
3200 CP_DRAW_INDX_OFFSET_0_VIS_CULL(USE_VISIBILITY);
3201
3202 if (pipeline->active_stages & VK_SHADER_STAGE_GEOMETRY_BIT)
3203 initiator |= CP_DRAW_INDX_OFFSET_0_GS_ENABLE;
3204
3205 switch (pipeline->tess.patch_type) {
3206 case IR3_TESS_TRIANGLES:
3207 initiator |= CP_DRAW_INDX_OFFSET_0_PATCH_TYPE(TESS_TRIANGLES) |
3208 CP_DRAW_INDX_OFFSET_0_TESS_ENABLE;
3209 break;
3210 case IR3_TESS_ISOLINES:
3211 initiator |= CP_DRAW_INDX_OFFSET_0_PATCH_TYPE(TESS_ISOLINES) |
3212 CP_DRAW_INDX_OFFSET_0_TESS_ENABLE;
3213 break;
3214 case IR3_TESS_NONE:
3215 initiator |= CP_DRAW_INDX_OFFSET_0_PATCH_TYPE(TESS_QUADS);
3216 break;
3217 case IR3_TESS_QUADS:
3218 initiator |= CP_DRAW_INDX_OFFSET_0_PATCH_TYPE(TESS_QUADS) |
3219 CP_DRAW_INDX_OFFSET_0_TESS_ENABLE;
3220 break;
3221 }
3222 return initiator;
3223 }
3224
3225
3226 static uint32_t
3227 vs_params_offset(struct tu_cmd_buffer *cmd)
3228 {
3229 const struct tu_program_descriptor_linkage *link =
3230 &cmd->state.pipeline->program.link[MESA_SHADER_VERTEX];
3231 const struct ir3_const_state *const_state = &link->const_state;
3232
3233 if (const_state->offsets.driver_param >= link->constlen)
3234 return 0;
3235
3236 /* this layout is required by CP_DRAW_INDIRECT_MULTI */
3237 STATIC_ASSERT(IR3_DP_DRAWID == 0);
3238 STATIC_ASSERT(IR3_DP_VTXID_BASE == 1);
3239 STATIC_ASSERT(IR3_DP_INSTID_BASE == 2);
3240
3241 /* 0 means disabled for CP_DRAW_INDIRECT_MULTI */
3242 assert(const_state->offsets.driver_param != 0);
3243
3244 return const_state->offsets.driver_param;
3245 }
3246
3247 static struct tu_draw_state
3248 tu6_emit_vs_params(struct tu_cmd_buffer *cmd,
3249 uint32_t vertex_offset,
3250 uint32_t first_instance)
3251 {
3252 uint32_t offset = vs_params_offset(cmd);
3253
3254 struct tu_cs cs;
3255 VkResult result = tu_cs_begin_sub_stream(&cmd->sub_cs, 3 + (offset ? 8 : 0), &cs);
3256 if (result != VK_SUCCESS) {
3257 cmd->record_result = result;
3258 return (struct tu_draw_state) {};
3259 }
3260
3261 /* TODO: don't make a new draw state when it doesn't change */
3262
3263 tu_cs_emit_regs(&cs,
3264 A6XX_VFD_INDEX_OFFSET(vertex_offset),
3265 A6XX_VFD_INSTANCE_START_OFFSET(first_instance));
3266
3267 if (offset) {
3268 tu_cs_emit_pkt7(&cs, CP_LOAD_STATE6_GEOM, 3 + 4);
3269 tu_cs_emit(&cs, CP_LOAD_STATE6_0_DST_OFF(offset) |
3270 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
3271 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
3272 CP_LOAD_STATE6_0_STATE_BLOCK(SB6_VS_SHADER) |
3273 CP_LOAD_STATE6_0_NUM_UNIT(1));
3274 tu_cs_emit(&cs, 0);
3275 tu_cs_emit(&cs, 0);
3276
3277 tu_cs_emit(&cs, 0);
3278 tu_cs_emit(&cs, vertex_offset);
3279 tu_cs_emit(&cs, first_instance);
3280 tu_cs_emit(&cs, 0);
3281 }
3282
3283 struct tu_cs_entry entry = tu_cs_end_sub_stream(&cmd->sub_cs, &cs);
3284 return (struct tu_draw_state) {entry.bo->iova + entry.offset, entry.size / 4};
3285 }
3286
3287 void
3288 tu_CmdDraw(VkCommandBuffer commandBuffer,
3289 uint32_t vertexCount,
3290 uint32_t instanceCount,
3291 uint32_t firstVertex,
3292 uint32_t firstInstance)
3293 {
3294 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
3295 struct tu_cs *cs = &cmd->draw_cs;
3296
3297 cmd->state.vs_params = tu6_emit_vs_params(cmd, firstVertex, firstInstance);
3298
3299 tu6_draw_common(cmd, cs, false, vertexCount);
3300
3301 tu_cs_emit_pkt7(cs, CP_DRAW_INDX_OFFSET, 3);
3302 tu_cs_emit(cs, tu_draw_initiator(cmd, DI_SRC_SEL_AUTO_INDEX));
3303 tu_cs_emit(cs, instanceCount);
3304 tu_cs_emit(cs, vertexCount);
3305 }
3306
3307 void
3308 tu_CmdDrawIndexed(VkCommandBuffer commandBuffer,
3309 uint32_t indexCount,
3310 uint32_t instanceCount,
3311 uint32_t firstIndex,
3312 int32_t vertexOffset,
3313 uint32_t firstInstance)
3314 {
3315 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
3316 struct tu_cs *cs = &cmd->draw_cs;
3317
3318 cmd->state.vs_params = tu6_emit_vs_params(cmd, vertexOffset, firstInstance);
3319
3320 tu6_draw_common(cmd, cs, true, indexCount);
3321
3322 tu_cs_emit_pkt7(cs, CP_DRAW_INDX_OFFSET, 7);
3323 tu_cs_emit(cs, tu_draw_initiator(cmd, DI_SRC_SEL_DMA));
3324 tu_cs_emit(cs, instanceCount);
3325 tu_cs_emit(cs, indexCount);
3326 tu_cs_emit(cs, firstIndex);
3327 tu_cs_emit_qw(cs, cmd->state.index_va);
3328 tu_cs_emit(cs, cmd->state.max_index_count);
3329 }
3330
3331 /* Various firmware bugs/inconsistencies mean that some indirect draw opcodes
3332 * do not wait for WFI's to complete before executing. Add a WAIT_FOR_ME if
3333 * pending for these opcodes. This may result in a few extra WAIT_FOR_ME's
3334 * with these opcodes, but the alternative would add unnecessary WAIT_FOR_ME's
3335 * before draw opcodes that don't need it.
3336 */
3337 static void
3338 draw_wfm(struct tu_cmd_buffer *cmd)
3339 {
3340 cmd->state.renderpass_cache.flush_bits |=
3341 cmd->state.renderpass_cache.pending_flush_bits & TU_CMD_FLAG_WAIT_FOR_ME;
3342 cmd->state.renderpass_cache.pending_flush_bits &= ~TU_CMD_FLAG_WAIT_FOR_ME;
3343 }
3344
3345 void
3346 tu_CmdDrawIndirect(VkCommandBuffer commandBuffer,
3347 VkBuffer _buffer,
3348 VkDeviceSize offset,
3349 uint32_t drawCount,
3350 uint32_t stride)
3351 {
3352 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
3353 TU_FROM_HANDLE(tu_buffer, buf, _buffer);
3354 struct tu_cs *cs = &cmd->draw_cs;
3355
3356 cmd->state.vs_params = (struct tu_draw_state) {};
3357
3358 /* The latest known a630_sqe.fw fails to wait for WFI before reading the
3359 * indirect buffer when using CP_DRAW_INDIRECT_MULTI, so we have to fall
3360 * back to CP_WAIT_FOR_ME except for a650 which has a fixed firmware.
3361 *
3362 * TODO: There may be newer a630_sqe.fw released in the future which fixes
3363 * this, if so we should detect it and avoid this workaround.
3364 */
3365 if (cmd->device->physical_device->gpu_id != 650)
3366 draw_wfm(cmd);
3367
3368 tu6_draw_common(cmd, cs, false, 0);
3369
3370 tu_cs_emit_pkt7(cs, CP_DRAW_INDIRECT_MULTI, 6);
3371 tu_cs_emit(cs, tu_draw_initiator(cmd, DI_SRC_SEL_AUTO_INDEX));
3372 tu_cs_emit(cs, A6XX_CP_DRAW_INDIRECT_MULTI_1_OPCODE(INDIRECT_OP_NORMAL) |
3373 A6XX_CP_DRAW_INDIRECT_MULTI_1_DST_OFF(vs_params_offset(cmd)));
3374 tu_cs_emit(cs, drawCount);
3375 tu_cs_emit_qw(cs, buf->bo->iova + buf->bo_offset + offset);
3376 tu_cs_emit(cs, stride);
3377
3378 tu_bo_list_add(&cmd->bo_list, buf->bo, MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
3379 }
3380
3381 void
3382 tu_CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer,
3383 VkBuffer _buffer,
3384 VkDeviceSize offset,
3385 uint32_t drawCount,
3386 uint32_t stride)
3387 {
3388 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
3389 TU_FROM_HANDLE(tu_buffer, buf, _buffer);
3390 struct tu_cs *cs = &cmd->draw_cs;
3391
3392 cmd->state.vs_params = (struct tu_draw_state) {};
3393
3394 if (cmd->device->physical_device->gpu_id != 650)
3395 draw_wfm(cmd);
3396
3397 tu6_draw_common(cmd, cs, true, 0);
3398
3399 tu_cs_emit_pkt7(cs, CP_DRAW_INDIRECT_MULTI, 9);
3400 tu_cs_emit(cs, tu_draw_initiator(cmd, DI_SRC_SEL_DMA));
3401 tu_cs_emit(cs, A6XX_CP_DRAW_INDIRECT_MULTI_1_OPCODE(INDIRECT_OP_INDEXED) |
3402 A6XX_CP_DRAW_INDIRECT_MULTI_1_DST_OFF(vs_params_offset(cmd)));
3403 tu_cs_emit(cs, drawCount);
3404 tu_cs_emit_qw(cs, cmd->state.index_va);
3405 tu_cs_emit(cs, cmd->state.max_index_count);
3406 tu_cs_emit_qw(cs, buf->bo->iova + buf->bo_offset + offset);
3407 tu_cs_emit(cs, stride);
3408
3409 tu_bo_list_add(&cmd->bo_list, buf->bo, MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
3410 }
3411
3412 void
3413 tu_CmdDrawIndirectCount(VkCommandBuffer commandBuffer,
3414 VkBuffer _buffer,
3415 VkDeviceSize offset,
3416 VkBuffer countBuffer,
3417 VkDeviceSize countBufferOffset,
3418 uint32_t drawCount,
3419 uint32_t stride)
3420 {
3421 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
3422 TU_FROM_HANDLE(tu_buffer, buf, _buffer);
3423 TU_FROM_HANDLE(tu_buffer, count_buf, countBuffer);
3424 struct tu_cs *cs = &cmd->draw_cs;
3425
3426 cmd->state.vs_params = (struct tu_draw_state) {};
3427
3428 /* It turns out that the firmware we have for a650 only partially fixed the
3429 * problem with CP_DRAW_INDIRECT_MULTI not waiting for WFI's to complete
3430 * before reading indirect parameters. It waits for WFI's before reading
3431 * the draw parameters, but after reading the indirect count :(.
3432 */
3433 draw_wfm(cmd);
3434
3435 tu6_draw_common(cmd, cs, false, 0);
3436
3437 tu_cs_emit_pkt7(cs, CP_DRAW_INDIRECT_MULTI, 8);
3438 tu_cs_emit(cs, tu_draw_initiator(cmd, DI_SRC_SEL_AUTO_INDEX));
3439 tu_cs_emit(cs, A6XX_CP_DRAW_INDIRECT_MULTI_1_OPCODE(INDIRECT_OP_INDIRECT_COUNT) |
3440 A6XX_CP_DRAW_INDIRECT_MULTI_1_DST_OFF(vs_params_offset(cmd)));
3441 tu_cs_emit(cs, drawCount);
3442 tu_cs_emit_qw(cs, buf->bo->iova + buf->bo_offset + offset);
3443 tu_cs_emit_qw(cs, count_buf->bo->iova + count_buf->bo_offset + countBufferOffset);
3444 tu_cs_emit(cs, stride);
3445
3446 tu_bo_list_add(&cmd->bo_list, buf->bo, MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
3447 tu_bo_list_add(&cmd->bo_list, count_buf->bo, MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
3448 }
3449
3450 void
3451 tu_CmdDrawIndexedIndirectCount(VkCommandBuffer commandBuffer,
3452 VkBuffer _buffer,
3453 VkDeviceSize offset,
3454 VkBuffer countBuffer,
3455 VkDeviceSize countBufferOffset,
3456 uint32_t drawCount,
3457 uint32_t stride)
3458 {
3459 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
3460 TU_FROM_HANDLE(tu_buffer, buf, _buffer);
3461 TU_FROM_HANDLE(tu_buffer, count_buf, countBuffer);
3462 struct tu_cs *cs = &cmd->draw_cs;
3463
3464 cmd->state.vs_params = (struct tu_draw_state) {};
3465
3466 draw_wfm(cmd);
3467
3468 tu6_draw_common(cmd, cs, true, 0);
3469
3470 tu_cs_emit_pkt7(cs, CP_DRAW_INDIRECT_MULTI, 11);
3471 tu_cs_emit(cs, tu_draw_initiator(cmd, DI_SRC_SEL_DMA));
3472 tu_cs_emit(cs, A6XX_CP_DRAW_INDIRECT_MULTI_1_OPCODE(INDIRECT_OP_INDIRECT_COUNT_INDEXED) |
3473 A6XX_CP_DRAW_INDIRECT_MULTI_1_DST_OFF(vs_params_offset(cmd)));
3474 tu_cs_emit(cs, drawCount);
3475 tu_cs_emit_qw(cs, cmd->state.index_va);
3476 tu_cs_emit(cs, cmd->state.max_index_count);
3477 tu_cs_emit_qw(cs, buf->bo->iova + buf->bo_offset + offset);
3478 tu_cs_emit_qw(cs, count_buf->bo->iova + count_buf->bo_offset + countBufferOffset);
3479 tu_cs_emit(cs, stride);
3480
3481 tu_bo_list_add(&cmd->bo_list, buf->bo, MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
3482 tu_bo_list_add(&cmd->bo_list, count_buf->bo, MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
3483 }
3484
3485 void tu_CmdDrawIndirectByteCountEXT(VkCommandBuffer commandBuffer,
3486 uint32_t instanceCount,
3487 uint32_t firstInstance,
3488 VkBuffer _counterBuffer,
3489 VkDeviceSize counterBufferOffset,
3490 uint32_t counterOffset,
3491 uint32_t vertexStride)
3492 {
3493 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
3494 TU_FROM_HANDLE(tu_buffer, buf, _counterBuffer);
3495 struct tu_cs *cs = &cmd->draw_cs;
3496
3497 /* All known firmware versions do not wait for WFI's with CP_DRAW_AUTO.
3498 * Plus, for the common case where the counter buffer is written by
3499 * vkCmdEndTransformFeedback, we need to wait for the CP_WAIT_MEM_WRITES to
3500 * complete which means we need a WAIT_FOR_ME anyway.
3501 */
3502 draw_wfm(cmd);
3503
3504 cmd->state.vs_params = tu6_emit_vs_params(cmd, 0, firstInstance);
3505
3506 tu6_draw_common(cmd, cs, false, 0);
3507
3508 tu_cs_emit_pkt7(cs, CP_DRAW_AUTO, 6);
3509 tu_cs_emit(cs, tu_draw_initiator(cmd, DI_SRC_SEL_AUTO_XFB));
3510 tu_cs_emit(cs, instanceCount);
3511 tu_cs_emit_qw(cs, buf->bo->iova + buf->bo_offset + counterBufferOffset);
3512 tu_cs_emit(cs, counterOffset);
3513 tu_cs_emit(cs, vertexStride);
3514
3515 tu_bo_list_add(&cmd->bo_list, buf->bo, MSM_SUBMIT_BO_READ);
3516 }
3517
3518 struct tu_dispatch_info
3519 {
3520 /**
3521 * Determine the layout of the grid (in block units) to be used.
3522 */
3523 uint32_t blocks[3];
3524
3525 /**
3526 * A starting offset for the grid. If unaligned is set, the offset
3527 * must still be aligned.
3528 */
3529 uint32_t offsets[3];
3530 /**
3531 * Whether it's an unaligned compute dispatch.
3532 */
3533 bool unaligned;
3534
3535 /**
3536 * Indirect compute parameters resource.
3537 */
3538 struct tu_buffer *indirect;
3539 uint64_t indirect_offset;
3540 };
3541
3542 static void
3543 tu_emit_compute_driver_params(struct tu_cs *cs, struct tu_pipeline *pipeline,
3544 const struct tu_dispatch_info *info)
3545 {
3546 gl_shader_stage type = MESA_SHADER_COMPUTE;
3547 const struct tu_program_descriptor_linkage *link =
3548 &pipeline->program.link[type];
3549 const struct ir3_const_state *const_state = &link->const_state;
3550 uint32_t offset = const_state->offsets.driver_param;
3551
3552 if (link->constlen <= offset)
3553 return;
3554
3555 if (!info->indirect) {
3556 uint32_t driver_params[IR3_DP_CS_COUNT] = {
3557 [IR3_DP_NUM_WORK_GROUPS_X] = info->blocks[0],
3558 [IR3_DP_NUM_WORK_GROUPS_Y] = info->blocks[1],
3559 [IR3_DP_NUM_WORK_GROUPS_Z] = info->blocks[2],
3560 [IR3_DP_LOCAL_GROUP_SIZE_X] = pipeline->compute.local_size[0],
3561 [IR3_DP_LOCAL_GROUP_SIZE_Y] = pipeline->compute.local_size[1],
3562 [IR3_DP_LOCAL_GROUP_SIZE_Z] = pipeline->compute.local_size[2],
3563 };
3564
3565 uint32_t num_consts = MIN2(const_state->num_driver_params,
3566 (link->constlen - offset) * 4);
3567 /* push constants */
3568 tu_cs_emit_pkt7(cs, tu6_stage2opcode(type), 3 + num_consts);
3569 tu_cs_emit(cs, CP_LOAD_STATE6_0_DST_OFF(offset) |
3570 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
3571 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
3572 CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type)) |
3573 CP_LOAD_STATE6_0_NUM_UNIT(num_consts / 4));
3574 tu_cs_emit(cs, 0);
3575 tu_cs_emit(cs, 0);
3576 uint32_t i;
3577 for (i = 0; i < num_consts; i++)
3578 tu_cs_emit(cs, driver_params[i]);
3579 } else {
3580 tu_finishme("Indirect driver params");
3581 }
3582 }
3583
3584 static void
3585 tu_dispatch(struct tu_cmd_buffer *cmd,
3586 const struct tu_dispatch_info *info)
3587 {
3588 struct tu_cs *cs = &cmd->cs;
3589 struct tu_pipeline *pipeline = cmd->state.compute_pipeline;
3590 struct tu_descriptor_state *descriptors_state =
3591 &cmd->descriptors[VK_PIPELINE_BIND_POINT_COMPUTE];
3592
3593 /* TODO: We could probably flush less if we add a compute_flush_bits
3594 * bitfield.
3595 */
3596 tu_emit_cache_flush(cmd, cs);
3597
3598 /* note: no reason to have this in a separate IB */
3599 tu_cs_emit_state_ib(cs,
3600 tu6_emit_consts(cmd, pipeline, descriptors_state, MESA_SHADER_COMPUTE));
3601
3602 tu_emit_compute_driver_params(cs, pipeline, info);
3603
3604 if (cmd->state.dirty & TU_CMD_DIRTY_COMPUTE_DESC_SETS_LOAD)
3605 tu_cs_emit_state_ib(cs, pipeline->load_state);
3606
3607 cmd->state.dirty &= ~TU_CMD_DIRTY_COMPUTE_DESC_SETS_LOAD;
3608
3609 tu_cs_emit_pkt7(cs, CP_SET_MARKER, 1);
3610 tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(RM6_COMPUTE));
3611
3612 const uint32_t *local_size = pipeline->compute.local_size;
3613 const uint32_t *num_groups = info->blocks;
3614 tu_cs_emit_regs(cs,
3615 A6XX_HLSQ_CS_NDRANGE_0(.kerneldim = 3,
3616 .localsizex = local_size[0] - 1,
3617 .localsizey = local_size[1] - 1,
3618 .localsizez = local_size[2] - 1),
3619 A6XX_HLSQ_CS_NDRANGE_1(.globalsize_x = local_size[0] * num_groups[0]),
3620 A6XX_HLSQ_CS_NDRANGE_2(.globaloff_x = 0),
3621 A6XX_HLSQ_CS_NDRANGE_3(.globalsize_y = local_size[1] * num_groups[1]),
3622 A6XX_HLSQ_CS_NDRANGE_4(.globaloff_y = 0),
3623 A6XX_HLSQ_CS_NDRANGE_5(.globalsize_z = local_size[2] * num_groups[2]),
3624 A6XX_HLSQ_CS_NDRANGE_6(.globaloff_z = 0));
3625
3626 tu_cs_emit_regs(cs,
3627 A6XX_HLSQ_CS_KERNEL_GROUP_X(1),
3628 A6XX_HLSQ_CS_KERNEL_GROUP_Y(1),
3629 A6XX_HLSQ_CS_KERNEL_GROUP_Z(1));
3630
3631 if (info->indirect) {
3632 uint64_t iova = tu_buffer_iova(info->indirect) + info->indirect_offset;
3633
3634 tu_bo_list_add(&cmd->bo_list, info->indirect->bo,
3635 MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE);
3636
3637 tu_cs_emit_pkt7(cs, CP_EXEC_CS_INDIRECT, 4);
3638 tu_cs_emit(cs, 0x00000000);
3639 tu_cs_emit_qw(cs, iova);
3640 tu_cs_emit(cs,
3641 A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEX(local_size[0] - 1) |
3642 A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEY(local_size[1] - 1) |
3643 A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEZ(local_size[2] - 1));
3644 } else {
3645 tu_cs_emit_pkt7(cs, CP_EXEC_CS, 4);
3646 tu_cs_emit(cs, 0x00000000);
3647 tu_cs_emit(cs, CP_EXEC_CS_1_NGROUPS_X(info->blocks[0]));
3648 tu_cs_emit(cs, CP_EXEC_CS_2_NGROUPS_Y(info->blocks[1]));
3649 tu_cs_emit(cs, CP_EXEC_CS_3_NGROUPS_Z(info->blocks[2]));
3650 }
3651
3652 tu_cs_emit_wfi(cs);
3653 }
3654
3655 void
3656 tu_CmdDispatchBase(VkCommandBuffer commandBuffer,
3657 uint32_t base_x,
3658 uint32_t base_y,
3659 uint32_t base_z,
3660 uint32_t x,
3661 uint32_t y,
3662 uint32_t z)
3663 {
3664 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
3665 struct tu_dispatch_info info = {};
3666
3667 info.blocks[0] = x;
3668 info.blocks[1] = y;
3669 info.blocks[2] = z;
3670
3671 info.offsets[0] = base_x;
3672 info.offsets[1] = base_y;
3673 info.offsets[2] = base_z;
3674 tu_dispatch(cmd_buffer, &info);
3675 }
3676
3677 void
3678 tu_CmdDispatch(VkCommandBuffer commandBuffer,
3679 uint32_t x,
3680 uint32_t y,
3681 uint32_t z)
3682 {
3683 tu_CmdDispatchBase(commandBuffer, 0, 0, 0, x, y, z);
3684 }
3685
3686 void
3687 tu_CmdDispatchIndirect(VkCommandBuffer commandBuffer,
3688 VkBuffer _buffer,
3689 VkDeviceSize offset)
3690 {
3691 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
3692 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
3693 struct tu_dispatch_info info = {};
3694
3695 info.indirect = buffer;
3696 info.indirect_offset = offset;
3697
3698 tu_dispatch(cmd_buffer, &info);
3699 }
3700
3701 void
3702 tu_CmdEndRenderPass(VkCommandBuffer commandBuffer)
3703 {
3704 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
3705
3706 tu_cs_end(&cmd_buffer->draw_cs);
3707 tu_cs_end(&cmd_buffer->draw_epilogue_cs);
3708
3709 if (use_sysmem_rendering(cmd_buffer))
3710 tu_cmd_render_sysmem(cmd_buffer);
3711 else
3712 tu_cmd_render_tiles(cmd_buffer);
3713
3714 /* discard draw_cs and draw_epilogue_cs entries now that the tiles are
3715 rendered */
3716 tu_cs_discard_entries(&cmd_buffer->draw_cs);
3717 tu_cs_begin(&cmd_buffer->draw_cs);
3718 tu_cs_discard_entries(&cmd_buffer->draw_epilogue_cs);
3719 tu_cs_begin(&cmd_buffer->draw_epilogue_cs);
3720
3721 cmd_buffer->state.cache.pending_flush_bits |=
3722 cmd_buffer->state.renderpass_cache.pending_flush_bits;
3723 tu_subpass_barrier(cmd_buffer, &cmd_buffer->state.pass->end_barrier, true);
3724
3725 cmd_buffer->state.pass = NULL;
3726 cmd_buffer->state.subpass = NULL;
3727 cmd_buffer->state.framebuffer = NULL;
3728 cmd_buffer->state.has_tess = false;
3729 cmd_buffer->state.has_subpass_predication = false;
3730 }
3731
3732 void
3733 tu_CmdEndRenderPass2(VkCommandBuffer commandBuffer,
3734 const VkSubpassEndInfoKHR *pSubpassEndInfo)
3735 {
3736 tu_CmdEndRenderPass(commandBuffer);
3737 }
3738
3739 struct tu_barrier_info
3740 {
3741 uint32_t eventCount;
3742 const VkEvent *pEvents;
3743 VkPipelineStageFlags srcStageMask;
3744 };
3745
3746 static void
3747 tu_barrier(struct tu_cmd_buffer *cmd,
3748 uint32_t memoryBarrierCount,
3749 const VkMemoryBarrier *pMemoryBarriers,
3750 uint32_t bufferMemoryBarrierCount,
3751 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
3752 uint32_t imageMemoryBarrierCount,
3753 const VkImageMemoryBarrier *pImageMemoryBarriers,
3754 const struct tu_barrier_info *info)
3755 {
3756 struct tu_cs *cs = cmd->state.pass ? &cmd->draw_cs : &cmd->cs;
3757 VkAccessFlags srcAccessMask = 0;
3758 VkAccessFlags dstAccessMask = 0;
3759
3760 for (uint32_t i = 0; i < memoryBarrierCount; i++) {
3761 srcAccessMask |= pMemoryBarriers[i].srcAccessMask;
3762 dstAccessMask |= pMemoryBarriers[i].dstAccessMask;
3763 }
3764
3765 for (uint32_t i = 0; i < bufferMemoryBarrierCount; i++) {
3766 srcAccessMask |= pBufferMemoryBarriers[i].srcAccessMask;
3767 dstAccessMask |= pBufferMemoryBarriers[i].dstAccessMask;
3768 }
3769
3770 enum tu_cmd_access_mask src_flags = 0;
3771 enum tu_cmd_access_mask dst_flags = 0;
3772
3773 for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
3774 TU_FROM_HANDLE(tu_image, image, pImageMemoryBarriers[i].image);
3775 VkImageLayout old_layout = pImageMemoryBarriers[i].oldLayout;
3776 /* For non-linear images, PREINITIALIZED is the same as UNDEFINED */
3777 if (old_layout == VK_IMAGE_LAYOUT_UNDEFINED ||
3778 (image->tiling != VK_IMAGE_TILING_LINEAR &&
3779 old_layout == VK_IMAGE_LAYOUT_PREINITIALIZED)) {
3780 /* The underlying memory for this image may have been used earlier
3781 * within the same queue submission for a different image, which
3782 * means that there may be old, stale cache entries which are in the
3783 * "wrong" location, which could cause problems later after writing
3784 * to the image. We don't want these entries being flushed later and
3785 * overwriting the actual image, so we need to flush the CCU.
3786 */
3787 src_flags |= TU_ACCESS_CCU_COLOR_INCOHERENT_WRITE;
3788 }
3789 srcAccessMask |= pImageMemoryBarriers[i].srcAccessMask;
3790 dstAccessMask |= pImageMemoryBarriers[i].dstAccessMask;
3791 }
3792
3793 /* Inside a renderpass, we don't know yet whether we'll be using sysmem
3794 * so we have to use the sysmem flushes.
3795 */
3796 bool gmem = cmd->state.ccu_state == TU_CMD_CCU_GMEM &&
3797 !cmd->state.pass;
3798 src_flags |= vk2tu_access(srcAccessMask, gmem);
3799 dst_flags |= vk2tu_access(dstAccessMask, gmem);
3800
3801 struct tu_cache_state *cache =
3802 cmd->state.pass ? &cmd->state.renderpass_cache : &cmd->state.cache;
3803 tu_flush_for_access(cache, src_flags, dst_flags);
3804
3805 for (uint32_t i = 0; i < info->eventCount; i++) {
3806 TU_FROM_HANDLE(tu_event, event, info->pEvents[i]);
3807
3808 tu_bo_list_add(&cmd->bo_list, &event->bo, MSM_SUBMIT_BO_READ);
3809
3810 tu_cs_emit_pkt7(cs, CP_WAIT_REG_MEM, 6);
3811 tu_cs_emit(cs, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ) |
3812 CP_WAIT_REG_MEM_0_POLL_MEMORY);
3813 tu_cs_emit_qw(cs, event->bo.iova); /* POLL_ADDR_LO/HI */
3814 tu_cs_emit(cs, CP_WAIT_REG_MEM_3_REF(1));
3815 tu_cs_emit(cs, CP_WAIT_REG_MEM_4_MASK(~0u));
3816 tu_cs_emit(cs, CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(20));
3817 }
3818 }
3819
3820 void
3821 tu_CmdPipelineBarrier(VkCommandBuffer commandBuffer,
3822 VkPipelineStageFlags srcStageMask,
3823 VkPipelineStageFlags dstStageMask,
3824 VkDependencyFlags dependencyFlags,
3825 uint32_t memoryBarrierCount,
3826 const VkMemoryBarrier *pMemoryBarriers,
3827 uint32_t bufferMemoryBarrierCount,
3828 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
3829 uint32_t imageMemoryBarrierCount,
3830 const VkImageMemoryBarrier *pImageMemoryBarriers)
3831 {
3832 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
3833 struct tu_barrier_info info;
3834
3835 info.eventCount = 0;
3836 info.pEvents = NULL;
3837 info.srcStageMask = srcStageMask;
3838
3839 tu_barrier(cmd_buffer, memoryBarrierCount, pMemoryBarriers,
3840 bufferMemoryBarrierCount, pBufferMemoryBarriers,
3841 imageMemoryBarrierCount, pImageMemoryBarriers, &info);
3842 }
3843
3844 static void
3845 write_event(struct tu_cmd_buffer *cmd, struct tu_event *event,
3846 VkPipelineStageFlags stageMask, unsigned value)
3847 {
3848 struct tu_cs *cs = &cmd->cs;
3849
3850 /* vkCmdSetEvent/vkCmdResetEvent cannot be called inside a render pass */
3851 assert(!cmd->state.pass);
3852
3853 tu_emit_cache_flush(cmd, cs);
3854
3855 tu_bo_list_add(&cmd->bo_list, &event->bo, MSM_SUBMIT_BO_WRITE);
3856
3857 /* Flags that only require a top-of-pipe event. DrawIndirect parameters are
3858 * read by the CP, so the draw indirect stage counts as top-of-pipe too.
3859 */
3860 VkPipelineStageFlags top_of_pipe_flags =
3861 VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT |
3862 VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT;
3863
3864 if (!(stageMask & ~top_of_pipe_flags)) {
3865 tu_cs_emit_pkt7(cs, CP_MEM_WRITE, 3);
3866 tu_cs_emit_qw(cs, event->bo.iova); /* ADDR_LO/HI */
3867 tu_cs_emit(cs, value);
3868 } else {
3869 /* Use a RB_DONE_TS event to wait for everything to complete. */
3870 tu_cs_emit_pkt7(cs, CP_EVENT_WRITE, 4);
3871 tu_cs_emit(cs, CP_EVENT_WRITE_0_EVENT(RB_DONE_TS));
3872 tu_cs_emit_qw(cs, event->bo.iova);
3873 tu_cs_emit(cs, value);
3874 }
3875 }
3876
3877 void
3878 tu_CmdSetEvent(VkCommandBuffer commandBuffer,
3879 VkEvent _event,
3880 VkPipelineStageFlags stageMask)
3881 {
3882 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
3883 TU_FROM_HANDLE(tu_event, event, _event);
3884
3885 write_event(cmd, event, stageMask, 1);
3886 }
3887
3888 void
3889 tu_CmdResetEvent(VkCommandBuffer commandBuffer,
3890 VkEvent _event,
3891 VkPipelineStageFlags stageMask)
3892 {
3893 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
3894 TU_FROM_HANDLE(tu_event, event, _event);
3895
3896 write_event(cmd, event, stageMask, 0);
3897 }
3898
3899 void
3900 tu_CmdWaitEvents(VkCommandBuffer commandBuffer,
3901 uint32_t eventCount,
3902 const VkEvent *pEvents,
3903 VkPipelineStageFlags srcStageMask,
3904 VkPipelineStageFlags dstStageMask,
3905 uint32_t memoryBarrierCount,
3906 const VkMemoryBarrier *pMemoryBarriers,
3907 uint32_t bufferMemoryBarrierCount,
3908 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
3909 uint32_t imageMemoryBarrierCount,
3910 const VkImageMemoryBarrier *pImageMemoryBarriers)
3911 {
3912 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
3913 struct tu_barrier_info info;
3914
3915 info.eventCount = eventCount;
3916 info.pEvents = pEvents;
3917 info.srcStageMask = 0;
3918
3919 tu_barrier(cmd, memoryBarrierCount, pMemoryBarriers,
3920 bufferMemoryBarrierCount, pBufferMemoryBarriers,
3921 imageMemoryBarrierCount, pImageMemoryBarriers, &info);
3922 }
3923
3924 void
3925 tu_CmdSetDeviceMask(VkCommandBuffer commandBuffer, uint32_t deviceMask)
3926 {
3927 /* No-op */
3928 }
3929
3930
3931 void
3932 tu_CmdBeginConditionalRenderingEXT(VkCommandBuffer commandBuffer,
3933 const VkConditionalRenderingBeginInfoEXT *pConditionalRenderingBegin)
3934 {
3935 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
3936
3937 cmd->state.predication_active = true;
3938 if (cmd->state.pass)
3939 cmd->state.has_subpass_predication = true;
3940
3941 struct tu_cs *cs = cmd->state.pass ? &cmd->draw_cs : &cmd->cs;
3942
3943 tu_cs_emit_pkt7(cs, CP_DRAW_PRED_ENABLE_GLOBAL, 1);
3944 tu_cs_emit(cs, 1);
3945
3946 /* Wait for any writes to the predicate to land */
3947 if (cmd->state.pass)
3948 tu_emit_cache_flush_renderpass(cmd, cs);
3949 else
3950 tu_emit_cache_flush(cmd, cs);
3951
3952 TU_FROM_HANDLE(tu_buffer, buf, pConditionalRenderingBegin->buffer);
3953 uint64_t iova = tu_buffer_iova(buf) + pConditionalRenderingBegin->offset;
3954
3955 /* qcom doesn't support 32-bit reference values, only 64-bit, but Vulkan
3956 * mandates 32-bit comparisons. Our workaround is to copy the the reference
3957 * value to the low 32-bits of a location where the high 32 bits are known
3958 * to be 0 and then compare that.
3959 */
3960 tu_cs_emit_pkt7(cs, CP_MEM_TO_MEM, 5);
3961 tu_cs_emit(cs, 0);
3962 tu_cs_emit_qw(cs, global_iova(cmd, predicate));
3963 tu_cs_emit_qw(cs, iova);
3964
3965 tu_cs_emit_pkt7(cs, CP_WAIT_MEM_WRITES, 0);
3966 tu_cs_emit_pkt7(cs, CP_WAIT_FOR_ME, 0);
3967
3968 bool inv = pConditionalRenderingBegin->flags & VK_CONDITIONAL_RENDERING_INVERTED_BIT_EXT;
3969 tu_cs_emit_pkt7(cs, CP_DRAW_PRED_SET, 3);
3970 tu_cs_emit(cs, CP_DRAW_PRED_SET_0_SRC(PRED_SRC_MEM) |
3971 CP_DRAW_PRED_SET_0_TEST(inv ? EQ_0_PASS : NE_0_PASS));
3972 tu_cs_emit_qw(cs, global_iova(cmd, predicate));
3973
3974 tu_bo_list_add(&cmd->bo_list, buf->bo, MSM_SUBMIT_BO_READ);
3975 }
3976
3977 void
3978 tu_CmdEndConditionalRenderingEXT(VkCommandBuffer commandBuffer)
3979 {
3980 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
3981
3982 cmd->state.predication_active = false;
3983
3984 struct tu_cs *cs = cmd->state.pass ? &cmd->draw_cs : &cmd->cs;
3985
3986 tu_cs_emit_pkt7(cs, CP_DRAW_PRED_ENABLE_GLOBAL, 1);
3987 tu_cs_emit(cs, 0);
3988 }
3989