74997b319335d38ce0f4629a42910729311c9f7e
[mesa.git] / src / freedreno / vulkan / tu_cmd_buffer.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 */
27
28 #include "tu_private.h"
29
30 #include "registers/adreno_pm4.xml.h"
31 #include "registers/adreno_common.xml.h"
32
33 #include "vk_format.h"
34
35 #include "tu_cs.h"
36 #include "tu_blit.h"
37
38 #define OVERFLOW_FLAG_REG REG_A6XX_CP_SCRATCH_REG(0)
39
40 void
41 tu_bo_list_init(struct tu_bo_list *list)
42 {
43 list->count = list->capacity = 0;
44 list->bo_infos = NULL;
45 }
46
47 void
48 tu_bo_list_destroy(struct tu_bo_list *list)
49 {
50 free(list->bo_infos);
51 }
52
53 void
54 tu_bo_list_reset(struct tu_bo_list *list)
55 {
56 list->count = 0;
57 }
58
59 /**
60 * \a flags consists of MSM_SUBMIT_BO_FLAGS.
61 */
62 static uint32_t
63 tu_bo_list_add_info(struct tu_bo_list *list,
64 const struct drm_msm_gem_submit_bo *bo_info)
65 {
66 assert(bo_info->handle != 0);
67
68 for (uint32_t i = 0; i < list->count; ++i) {
69 if (list->bo_infos[i].handle == bo_info->handle) {
70 assert(list->bo_infos[i].presumed == bo_info->presumed);
71 list->bo_infos[i].flags |= bo_info->flags;
72 return i;
73 }
74 }
75
76 /* grow list->bo_infos if needed */
77 if (list->count == list->capacity) {
78 uint32_t new_capacity = MAX2(2 * list->count, 16);
79 struct drm_msm_gem_submit_bo *new_bo_infos = realloc(
80 list->bo_infos, new_capacity * sizeof(struct drm_msm_gem_submit_bo));
81 if (!new_bo_infos)
82 return TU_BO_LIST_FAILED;
83 list->bo_infos = new_bo_infos;
84 list->capacity = new_capacity;
85 }
86
87 list->bo_infos[list->count] = *bo_info;
88 return list->count++;
89 }
90
91 uint32_t
92 tu_bo_list_add(struct tu_bo_list *list,
93 const struct tu_bo *bo,
94 uint32_t flags)
95 {
96 return tu_bo_list_add_info(list, &(struct drm_msm_gem_submit_bo) {
97 .flags = flags,
98 .handle = bo->gem_handle,
99 .presumed = bo->iova,
100 });
101 }
102
103 VkResult
104 tu_bo_list_merge(struct tu_bo_list *list, const struct tu_bo_list *other)
105 {
106 for (uint32_t i = 0; i < other->count; i++) {
107 if (tu_bo_list_add_info(list, other->bo_infos + i) == TU_BO_LIST_FAILED)
108 return VK_ERROR_OUT_OF_HOST_MEMORY;
109 }
110
111 return VK_SUCCESS;
112 }
113
114 static bool
115 is_linear_mipmapped(const struct tu_image_view *iview)
116 {
117 return iview->image->layout.tile_mode == TILE6_LINEAR &&
118 iview->base_mip != iview->image->level_count - 1;
119 }
120
121 static bool
122 force_sysmem(const struct tu_cmd_buffer *cmd,
123 const struct VkRect2D *render_area)
124 {
125 const struct tu_framebuffer *fb = cmd->state.framebuffer;
126 const struct tu_physical_device *device = cmd->device->physical_device;
127 bool has_linear_mipmapped_store = false;
128 const struct tu_render_pass *pass = cmd->state.pass;
129
130 /* Iterate over all the places we call tu6_emit_store_attachment() */
131 for (unsigned i = 0; i < pass->subpass_count; i++) {
132 const struct tu_subpass *subpass = &pass->subpasses[i];
133 if (subpass->resolve_attachments) {
134 for (unsigned i = 0; i < subpass->color_count; i++) {
135 uint32_t a = subpass->resolve_attachments[i].attachment;
136 if (a != VK_ATTACHMENT_UNUSED &&
137 cmd->state.pass->attachments[a].store_op == VK_ATTACHMENT_STORE_OP_STORE) {
138 const struct tu_image_view *iview = fb->attachments[a].attachment;
139 if (is_linear_mipmapped(iview)) {
140 has_linear_mipmapped_store = true;
141 break;
142 }
143 }
144 }
145 }
146 }
147
148 for (unsigned i = 0; i < pass->attachment_count; i++) {
149 if (pass->attachments[i].gmem_offset >= 0 &&
150 cmd->state.pass->attachments[i].store_op == VK_ATTACHMENT_STORE_OP_STORE) {
151 const struct tu_image_view *iview = fb->attachments[i].attachment;
152 if (is_linear_mipmapped(iview)) {
153 has_linear_mipmapped_store = true;
154 break;
155 }
156 }
157 }
158
159 /* Linear textures cannot have any padding between mipmap levels and their
160 * height isn't padded, while at the same time the GMEM->MEM resolve does
161 * not have per-pixel granularity, so if the image height isn't aligned to
162 * the resolve granularity and the render area is tall enough, we may wind
163 * up writing past the bottom of the image into the next miplevel or even
164 * past the end of the image. For the last miplevel, the layout code should
165 * insert enough padding so that the overdraw writes to the padding. To
166 * work around this, we force-enable sysmem rendering.
167 */
168 const uint32_t y2 = render_area->offset.y + render_area->extent.height;
169 const uint32_t aligned_y2 = ALIGN_POT(y2, device->tile_align_h);
170
171 return has_linear_mipmapped_store && aligned_y2 > fb->height;
172 }
173
174 static void
175 tu_tiling_config_update_tile_layout(struct tu_tiling_config *tiling,
176 const struct tu_device *dev,
177 uint32_t pixels)
178 {
179 const uint32_t tile_align_w = dev->physical_device->tile_align_w;
180 const uint32_t tile_align_h = dev->physical_device->tile_align_h;
181 const uint32_t max_tile_width = 1024; /* A6xx */
182
183 /* note: don't offset the tiling config by render_area.offset,
184 * because binning pass can't deal with it
185 * this means we might end up with more tiles than necessary,
186 * but load/store/etc are still scissored to the render_area
187 */
188 tiling->tile0.offset = (VkOffset2D) {};
189
190 const uint32_t ra_width =
191 tiling->render_area.extent.width +
192 (tiling->render_area.offset.x - tiling->tile0.offset.x);
193 const uint32_t ra_height =
194 tiling->render_area.extent.height +
195 (tiling->render_area.offset.y - tiling->tile0.offset.y);
196
197 /* start from 1 tile */
198 tiling->tile_count = (VkExtent2D) {
199 .width = 1,
200 .height = 1,
201 };
202 tiling->tile0.extent = (VkExtent2D) {
203 .width = align(ra_width, tile_align_w),
204 .height = align(ra_height, tile_align_h),
205 };
206
207 if (unlikely(dev->physical_device->instance->debug_flags & TU_DEBUG_FORCEBIN)) {
208 /* start with 2x2 tiles */
209 tiling->tile_count.width = 2;
210 tiling->tile_count.height = 2;
211 tiling->tile0.extent.width = align(DIV_ROUND_UP(ra_width, 2), tile_align_w);
212 tiling->tile0.extent.height = align(DIV_ROUND_UP(ra_height, 2), tile_align_h);
213 }
214
215 /* do not exceed max tile width */
216 while (tiling->tile0.extent.width > max_tile_width) {
217 tiling->tile_count.width++;
218 tiling->tile0.extent.width =
219 align(DIV_ROUND_UP(ra_width, tiling->tile_count.width), tile_align_w);
220 }
221
222 /* will force to sysmem, don't bother trying to have a valid tile config
223 * TODO: just skip all GMEM stuff when sysmem is forced?
224 */
225 if (!pixels)
226 return;
227
228 /* do not exceed gmem size */
229 while (tiling->tile0.extent.width * tiling->tile0.extent.height > pixels) {
230 if (tiling->tile0.extent.width > MAX2(tile_align_w, tiling->tile0.extent.height)) {
231 tiling->tile_count.width++;
232 tiling->tile0.extent.width =
233 align(DIV_ROUND_UP(ra_width, tiling->tile_count.width), tile_align_w);
234 } else {
235 /* if this assert fails then layout is impossible.. */
236 assert(tiling->tile0.extent.height > tile_align_h);
237 tiling->tile_count.height++;
238 tiling->tile0.extent.height =
239 align(DIV_ROUND_UP(ra_height, tiling->tile_count.height), tile_align_h);
240 }
241 }
242 }
243
244 static void
245 tu_tiling_config_update_pipe_layout(struct tu_tiling_config *tiling,
246 const struct tu_device *dev)
247 {
248 const uint32_t max_pipe_count = 32; /* A6xx */
249
250 /* start from 1 tile per pipe */
251 tiling->pipe0 = (VkExtent2D) {
252 .width = 1,
253 .height = 1,
254 };
255 tiling->pipe_count = tiling->tile_count;
256
257 while (tiling->pipe_count.width * tiling->pipe_count.height > max_pipe_count) {
258 if (tiling->pipe0.width < tiling->pipe0.height) {
259 tiling->pipe0.width += 1;
260 tiling->pipe_count.width =
261 DIV_ROUND_UP(tiling->tile_count.width, tiling->pipe0.width);
262 } else {
263 tiling->pipe0.height += 1;
264 tiling->pipe_count.height =
265 DIV_ROUND_UP(tiling->tile_count.height, tiling->pipe0.height);
266 }
267 }
268 }
269
270 static void
271 tu_tiling_config_update_pipes(struct tu_tiling_config *tiling,
272 const struct tu_device *dev)
273 {
274 const uint32_t max_pipe_count = 32; /* A6xx */
275 const uint32_t used_pipe_count =
276 tiling->pipe_count.width * tiling->pipe_count.height;
277 const VkExtent2D last_pipe = {
278 .width = (tiling->tile_count.width - 1) % tiling->pipe0.width + 1,
279 .height = (tiling->tile_count.height - 1) % tiling->pipe0.height + 1,
280 };
281
282 assert(used_pipe_count <= max_pipe_count);
283 assert(max_pipe_count <= ARRAY_SIZE(tiling->pipe_config));
284
285 for (uint32_t y = 0; y < tiling->pipe_count.height; y++) {
286 for (uint32_t x = 0; x < tiling->pipe_count.width; x++) {
287 const uint32_t pipe_x = tiling->pipe0.width * x;
288 const uint32_t pipe_y = tiling->pipe0.height * y;
289 const uint32_t pipe_w = (x == tiling->pipe_count.width - 1)
290 ? last_pipe.width
291 : tiling->pipe0.width;
292 const uint32_t pipe_h = (y == tiling->pipe_count.height - 1)
293 ? last_pipe.height
294 : tiling->pipe0.height;
295 const uint32_t n = tiling->pipe_count.width * y + x;
296
297 tiling->pipe_config[n] = A6XX_VSC_PIPE_CONFIG_REG_X(pipe_x) |
298 A6XX_VSC_PIPE_CONFIG_REG_Y(pipe_y) |
299 A6XX_VSC_PIPE_CONFIG_REG_W(pipe_w) |
300 A6XX_VSC_PIPE_CONFIG_REG_H(pipe_h);
301 tiling->pipe_sizes[n] = CP_SET_BIN_DATA5_0_VSC_SIZE(pipe_w * pipe_h);
302 }
303 }
304
305 memset(tiling->pipe_config + used_pipe_count, 0,
306 sizeof(uint32_t) * (max_pipe_count - used_pipe_count));
307 }
308
309 static void
310 tu_tiling_config_get_tile(const struct tu_tiling_config *tiling,
311 const struct tu_device *dev,
312 uint32_t tx,
313 uint32_t ty,
314 struct tu_tile *tile)
315 {
316 /* find the pipe and the slot for tile (tx, ty) */
317 const uint32_t px = tx / tiling->pipe0.width;
318 const uint32_t py = ty / tiling->pipe0.height;
319 const uint32_t sx = tx - tiling->pipe0.width * px;
320 const uint32_t sy = ty - tiling->pipe0.height * py;
321 /* last pipe has different width */
322 const uint32_t pipe_width =
323 MIN2(tiling->pipe0.width,
324 tiling->tile_count.width - px * tiling->pipe0.width);
325
326 assert(tx < tiling->tile_count.width && ty < tiling->tile_count.height);
327 assert(px < tiling->pipe_count.width && py < tiling->pipe_count.height);
328 assert(sx < tiling->pipe0.width && sy < tiling->pipe0.height);
329
330 /* convert to 1D indices */
331 tile->pipe = tiling->pipe_count.width * py + px;
332 tile->slot = pipe_width * sy + sx;
333
334 /* get the blit area for the tile */
335 tile->begin = (VkOffset2D) {
336 .x = tiling->tile0.offset.x + tiling->tile0.extent.width * tx,
337 .y = tiling->tile0.offset.y + tiling->tile0.extent.height * ty,
338 };
339 tile->end.x =
340 (tx == tiling->tile_count.width - 1)
341 ? tiling->render_area.offset.x + tiling->render_area.extent.width
342 : tile->begin.x + tiling->tile0.extent.width;
343 tile->end.y =
344 (ty == tiling->tile_count.height - 1)
345 ? tiling->render_area.offset.y + tiling->render_area.extent.height
346 : tile->begin.y + tiling->tile0.extent.height;
347 }
348
349 enum a3xx_msaa_samples
350 tu_msaa_samples(uint32_t samples)
351 {
352 switch (samples) {
353 case 1:
354 return MSAA_ONE;
355 case 2:
356 return MSAA_TWO;
357 case 4:
358 return MSAA_FOUR;
359 case 8:
360 return MSAA_EIGHT;
361 default:
362 assert(!"invalid sample count");
363 return MSAA_ONE;
364 }
365 }
366
367 static enum a4xx_index_size
368 tu6_index_size(VkIndexType type)
369 {
370 switch (type) {
371 case VK_INDEX_TYPE_UINT16:
372 return INDEX4_SIZE_16_BIT;
373 case VK_INDEX_TYPE_UINT32:
374 return INDEX4_SIZE_32_BIT;
375 default:
376 unreachable("invalid VkIndexType");
377 return INDEX4_SIZE_8_BIT;
378 }
379 }
380
381 unsigned
382 tu6_emit_event_write(struct tu_cmd_buffer *cmd,
383 struct tu_cs *cs,
384 enum vgt_event_type event,
385 bool need_seqno)
386 {
387 unsigned seqno = 0;
388
389 tu_cs_emit_pkt7(cs, CP_EVENT_WRITE, need_seqno ? 4 : 1);
390 tu_cs_emit(cs, CP_EVENT_WRITE_0_EVENT(event));
391 if (need_seqno) {
392 tu_cs_emit_qw(cs, cmd->scratch_bo.iova);
393 seqno = ++cmd->scratch_seqno;
394 tu_cs_emit(cs, seqno);
395 }
396
397 return seqno;
398 }
399
400 static void
401 tu6_emit_cache_flush(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
402 {
403 tu6_emit_event_write(cmd, cs, 0x31, false);
404 }
405
406 static void
407 tu6_emit_lrz_flush(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
408 {
409 tu6_emit_event_write(cmd, cs, LRZ_FLUSH, false);
410 }
411
412 static void
413 tu6_emit_wfi(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
414 {
415 if (cmd->wait_for_idle) {
416 tu_cs_emit_wfi(cs);
417 cmd->wait_for_idle = false;
418 }
419 }
420
421 #define tu_image_view_ubwc_pitches(iview) \
422 .pitch = tu_image_ubwc_pitch(iview->image, iview->base_mip), \
423 .array_pitch = tu_image_ubwc_size(iview->image, iview->base_mip) >> 2
424
425 static void
426 tu6_emit_zs(struct tu_cmd_buffer *cmd,
427 const struct tu_subpass *subpass,
428 struct tu_cs *cs)
429 {
430 const struct tu_framebuffer *fb = cmd->state.framebuffer;
431
432 const uint32_t a = subpass->depth_stencil_attachment.attachment;
433 if (a == VK_ATTACHMENT_UNUSED) {
434 tu_cs_emit_regs(cs,
435 A6XX_RB_DEPTH_BUFFER_INFO(.depth_format = DEPTH6_NONE),
436 A6XX_RB_DEPTH_BUFFER_PITCH(0),
437 A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH(0),
438 A6XX_RB_DEPTH_BUFFER_BASE(0),
439 A6XX_RB_DEPTH_BUFFER_BASE_GMEM(0));
440
441 tu_cs_emit_regs(cs,
442 A6XX_GRAS_SU_DEPTH_BUFFER_INFO(.depth_format = DEPTH6_NONE));
443
444 tu_cs_emit_regs(cs,
445 A6XX_GRAS_LRZ_BUFFER_BASE(0),
446 A6XX_GRAS_LRZ_BUFFER_PITCH(0),
447 A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE(0));
448
449 tu_cs_emit_regs(cs, A6XX_RB_STENCIL_INFO(0));
450
451 return;
452 }
453
454 const struct tu_image_view *iview = fb->attachments[a].attachment;
455 enum a6xx_depth_format fmt = tu6_pipe2depth(iview->vk_format);
456
457 tu_cs_emit_regs(cs,
458 A6XX_RB_DEPTH_BUFFER_INFO(.depth_format = fmt),
459 A6XX_RB_DEPTH_BUFFER_PITCH(tu_image_stride(iview->image, iview->base_mip)),
460 A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH(iview->image->layout.layer_size),
461 A6XX_RB_DEPTH_BUFFER_BASE(tu_image_view_base_ref(iview)),
462 A6XX_RB_DEPTH_BUFFER_BASE_GMEM(cmd->state.pass->attachments[a].gmem_offset));
463
464 tu_cs_emit_regs(cs,
465 A6XX_GRAS_SU_DEPTH_BUFFER_INFO(.depth_format = fmt));
466
467 tu_cs_emit_regs(cs,
468 A6XX_RB_DEPTH_FLAG_BUFFER_BASE(tu_image_view_ubwc_base_ref(iview)),
469 A6XX_RB_DEPTH_FLAG_BUFFER_PITCH(tu_image_view_ubwc_pitches(iview)));
470
471 tu_cs_emit_regs(cs,
472 A6XX_GRAS_LRZ_BUFFER_BASE(0),
473 A6XX_GRAS_LRZ_BUFFER_PITCH(0),
474 A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE(0));
475
476 tu_cs_emit_regs(cs,
477 A6XX_RB_STENCIL_INFO(0));
478
479 /* enable zs? */
480 }
481
482 static void
483 tu6_emit_mrt(struct tu_cmd_buffer *cmd,
484 const struct tu_subpass *subpass,
485 struct tu_cs *cs)
486 {
487 const struct tu_framebuffer *fb = cmd->state.framebuffer;
488 unsigned char mrt_comp[MAX_RTS] = { 0 };
489 unsigned srgb_cntl = 0;
490
491 for (uint32_t i = 0; i < subpass->color_count; ++i) {
492 uint32_t a = subpass->color_attachments[i].attachment;
493 if (a == VK_ATTACHMENT_UNUSED)
494 continue;
495
496 const struct tu_image_view *iview = fb->attachments[a].attachment;
497 const enum a6xx_tile_mode tile_mode =
498 tu6_get_image_tile_mode(iview->image, iview->base_mip);
499
500 mrt_comp[i] = 0xf;
501
502 if (vk_format_is_srgb(iview->vk_format))
503 srgb_cntl |= (1 << i);
504
505 const struct tu_native_format format =
506 tu6_format_color(iview->vk_format, iview->image->layout.tile_mode);
507
508 tu_cs_emit_regs(cs,
509 A6XX_RB_MRT_BUF_INFO(i,
510 .color_tile_mode = tile_mode,
511 .color_format = format.fmt,
512 .color_swap = format.swap),
513 A6XX_RB_MRT_PITCH(i, tu_image_stride(iview->image, iview->base_mip)),
514 A6XX_RB_MRT_ARRAY_PITCH(i, iview->image->layout.layer_size),
515 A6XX_RB_MRT_BASE(i, tu_image_view_base_ref(iview)),
516 A6XX_RB_MRT_BASE_GMEM(i, cmd->state.pass->attachments[a].gmem_offset));
517
518 tu_cs_emit_regs(cs,
519 A6XX_SP_FS_MRT_REG(i,
520 .color_format = format.fmt,
521 .color_sint = vk_format_is_sint(iview->vk_format),
522 .color_uint = vk_format_is_uint(iview->vk_format)));
523
524 tu_cs_emit_regs(cs,
525 A6XX_RB_MRT_FLAG_BUFFER_ADDR(i, tu_image_view_ubwc_base_ref(iview)),
526 A6XX_RB_MRT_FLAG_BUFFER_PITCH(i, tu_image_view_ubwc_pitches(iview)));
527 }
528
529 tu_cs_emit_regs(cs,
530 A6XX_RB_SRGB_CNTL(.dword = srgb_cntl));
531
532 tu_cs_emit_regs(cs,
533 A6XX_SP_SRGB_CNTL(.dword = srgb_cntl));
534
535 tu_cs_emit_regs(cs,
536 A6XX_RB_RENDER_COMPONENTS(
537 .rt0 = mrt_comp[0],
538 .rt1 = mrt_comp[1],
539 .rt2 = mrt_comp[2],
540 .rt3 = mrt_comp[3],
541 .rt4 = mrt_comp[4],
542 .rt5 = mrt_comp[5],
543 .rt6 = mrt_comp[6],
544 .rt7 = mrt_comp[7]));
545
546 tu_cs_emit_regs(cs,
547 A6XX_SP_FS_RENDER_COMPONENTS(
548 .rt0 = mrt_comp[0],
549 .rt1 = mrt_comp[1],
550 .rt2 = mrt_comp[2],
551 .rt3 = mrt_comp[3],
552 .rt4 = mrt_comp[4],
553 .rt5 = mrt_comp[5],
554 .rt6 = mrt_comp[6],
555 .rt7 = mrt_comp[7]));
556 }
557
558 static void
559 tu6_emit_msaa(struct tu_cmd_buffer *cmd,
560 const struct tu_subpass *subpass,
561 struct tu_cs *cs)
562 {
563 const enum a3xx_msaa_samples samples = tu_msaa_samples(subpass->samples);
564 bool msaa_disable = samples == MSAA_ONE;
565
566 tu_cs_emit_regs(cs,
567 A6XX_SP_TP_RAS_MSAA_CNTL(samples),
568 A6XX_SP_TP_DEST_MSAA_CNTL(.samples = samples,
569 .msaa_disable = msaa_disable));
570
571 tu_cs_emit_regs(cs,
572 A6XX_GRAS_RAS_MSAA_CNTL(samples),
573 A6XX_GRAS_DEST_MSAA_CNTL(.samples = samples,
574 .msaa_disable = msaa_disable));
575
576 tu_cs_emit_regs(cs,
577 A6XX_RB_RAS_MSAA_CNTL(samples),
578 A6XX_RB_DEST_MSAA_CNTL(.samples = samples,
579 .msaa_disable = msaa_disable));
580
581 tu_cs_emit_regs(cs,
582 A6XX_RB_MSAA_CNTL(samples));
583 }
584
585 static void
586 tu6_emit_bin_size(struct tu_cs *cs,
587 uint32_t bin_w, uint32_t bin_h, uint32_t flags)
588 {
589 tu_cs_emit_regs(cs,
590 A6XX_GRAS_BIN_CONTROL(.binw = bin_w,
591 .binh = bin_h,
592 .dword = flags));
593
594 tu_cs_emit_regs(cs,
595 A6XX_RB_BIN_CONTROL(.binw = bin_w,
596 .binh = bin_h,
597 .dword = flags));
598
599 /* no flag for RB_BIN_CONTROL2... */
600 tu_cs_emit_regs(cs,
601 A6XX_RB_BIN_CONTROL2(.binw = bin_w,
602 .binh = bin_h));
603 }
604
605 static void
606 tu6_emit_render_cntl(struct tu_cmd_buffer *cmd,
607 const struct tu_subpass *subpass,
608 struct tu_cs *cs,
609 bool binning)
610 {
611 const struct tu_framebuffer *fb = cmd->state.framebuffer;
612 uint32_t cntl = 0;
613 cntl |= A6XX_RB_RENDER_CNTL_UNK4;
614 if (binning) {
615 cntl |= A6XX_RB_RENDER_CNTL_BINNING;
616 } else {
617 uint32_t mrts_ubwc_enable = 0;
618 for (uint32_t i = 0; i < subpass->color_count; ++i) {
619 uint32_t a = subpass->color_attachments[i].attachment;
620 if (a == VK_ATTACHMENT_UNUSED)
621 continue;
622
623 const struct tu_image_view *iview = fb->attachments[a].attachment;
624 if (iview->image->layout.ubwc_layer_size != 0)
625 mrts_ubwc_enable |= 1 << i;
626 }
627
628 cntl |= A6XX_RB_RENDER_CNTL_FLAG_MRTS(mrts_ubwc_enable);
629
630 const uint32_t a = subpass->depth_stencil_attachment.attachment;
631 if (a != VK_ATTACHMENT_UNUSED) {
632 const struct tu_image_view *iview = fb->attachments[a].attachment;
633 if (iview->image->layout.ubwc_layer_size != 0)
634 cntl |= A6XX_RB_RENDER_CNTL_FLAG_DEPTH;
635 }
636
637 /* In the !binning case, we need to set RB_RENDER_CNTL in the draw_cs
638 * in order to set it correctly for the different subpasses. However,
639 * that means the packets we're emitting also happen during binning. So
640 * we need to guard the write on !BINNING at CP execution time.
641 */
642 tu_cs_reserve(cs, 3 + 4);
643 tu_cs_emit_pkt7(cs, CP_COND_REG_EXEC, 2);
644 tu_cs_emit(cs, CP_COND_REG_EXEC_0_MODE(RENDER_MODE) |
645 CP_COND_REG_EXEC_0_GMEM | CP_COND_REG_EXEC_0_SYSMEM);
646 tu_cs_emit(cs, CP_COND_REG_EXEC_1_DWORDS(4));
647 }
648
649 tu_cs_emit_pkt7(cs, CP_REG_WRITE, 3);
650 tu_cs_emit(cs, CP_REG_WRITE_0_TRACKER(TRACK_RENDER_CNTL));
651 tu_cs_emit(cs, REG_A6XX_RB_RENDER_CNTL);
652 tu_cs_emit(cs, cntl);
653 }
654
655 static void
656 tu6_emit_blit_scissor(struct tu_cmd_buffer *cmd, struct tu_cs *cs, bool align)
657 {
658 const VkRect2D *render_area = &cmd->state.tiling_config.render_area;
659 uint32_t x1 = render_area->offset.x;
660 uint32_t y1 = render_area->offset.y;
661 uint32_t x2 = x1 + render_area->extent.width - 1;
662 uint32_t y2 = y1 + render_area->extent.height - 1;
663
664 /* TODO: alignment requirement seems to be less than tile_align_w/h */
665 if (align) {
666 x1 = x1 & ~cmd->device->physical_device->tile_align_w;
667 y1 = y1 & ~cmd->device->physical_device->tile_align_h;
668 x2 = ALIGN_POT(x2 + 1, cmd->device->physical_device->tile_align_w) - 1;
669 y2 = ALIGN_POT(y2 + 1, cmd->device->physical_device->tile_align_h) - 1;
670 }
671
672 tu_cs_emit_regs(cs,
673 A6XX_RB_BLIT_SCISSOR_TL(.x = x1, .y = y1),
674 A6XX_RB_BLIT_SCISSOR_BR(.x = x2, .y = y2));
675 }
676
677 static void
678 tu6_emit_blit_info(struct tu_cmd_buffer *cmd,
679 struct tu_cs *cs,
680 const struct tu_image_view *iview,
681 uint32_t gmem_offset,
682 bool resolve)
683 {
684 tu_cs_emit_regs(cs,
685 A6XX_RB_BLIT_INFO(.unk0 = !resolve, .gmem = !resolve));
686
687 const struct tu_native_format format =
688 tu6_format_color(iview->vk_format, iview->image->layout.tile_mode);
689
690 enum a6xx_tile_mode tile_mode =
691 tu6_get_image_tile_mode(iview->image, iview->base_mip);
692 tu_cs_emit_regs(cs,
693 A6XX_RB_BLIT_DST_INFO(
694 .tile_mode = tile_mode,
695 .samples = tu_msaa_samples(iview->image->samples),
696 .color_format = format.fmt,
697 .color_swap = format.swap,
698 .flags = iview->image->layout.ubwc_layer_size != 0),
699 A6XX_RB_BLIT_DST(tu_image_view_base_ref(iview)),
700 A6XX_RB_BLIT_DST_PITCH(tu_image_stride(iview->image, iview->base_mip)),
701 A6XX_RB_BLIT_DST_ARRAY_PITCH(iview->image->layout.layer_size));
702
703 if (iview->image->layout.ubwc_layer_size) {
704 tu_cs_emit_regs(cs,
705 A6XX_RB_BLIT_FLAG_DST(tu_image_view_ubwc_base_ref(iview)),
706 A6XX_RB_BLIT_FLAG_DST_PITCH(tu_image_view_ubwc_pitches(iview)));
707 }
708
709 tu_cs_emit_regs(cs,
710 A6XX_RB_BLIT_BASE_GMEM(gmem_offset));
711 }
712
713 static void
714 tu6_emit_blit(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
715 {
716 tu6_emit_event_write(cmd, cs, BLIT, false);
717 }
718
719 static void
720 tu6_emit_window_scissor(struct tu_cmd_buffer *cmd,
721 struct tu_cs *cs,
722 uint32_t x1,
723 uint32_t y1,
724 uint32_t x2,
725 uint32_t y2)
726 {
727 tu_cs_emit_regs(cs,
728 A6XX_GRAS_SC_WINDOW_SCISSOR_TL(.x = x1, .y = y1),
729 A6XX_GRAS_SC_WINDOW_SCISSOR_BR(.x = x2, .y = y2));
730
731 tu_cs_emit_regs(cs,
732 A6XX_GRAS_RESOLVE_CNTL_1(.x = x1, .y = y1),
733 A6XX_GRAS_RESOLVE_CNTL_2(.x = x2, .y = y2));
734 }
735
736 static void
737 tu6_emit_window_offset(struct tu_cmd_buffer *cmd,
738 struct tu_cs *cs,
739 uint32_t x1,
740 uint32_t y1)
741 {
742 tu_cs_emit_regs(cs,
743 A6XX_RB_WINDOW_OFFSET(.x = x1, .y = y1));
744
745 tu_cs_emit_regs(cs,
746 A6XX_RB_WINDOW_OFFSET2(.x = x1, .y = y1));
747
748 tu_cs_emit_regs(cs,
749 A6XX_SP_WINDOW_OFFSET(.x = x1, .y = y1));
750
751 tu_cs_emit_regs(cs,
752 A6XX_SP_TP_WINDOW_OFFSET(.x = x1, .y = y1));
753 }
754
755 static bool
756 use_hw_binning(struct tu_cmd_buffer *cmd)
757 {
758 const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
759
760 if (unlikely(cmd->device->physical_device->instance->debug_flags & TU_DEBUG_NOBIN))
761 return false;
762
763 if (unlikely(cmd->device->physical_device->instance->debug_flags & TU_DEBUG_FORCEBIN))
764 return true;
765
766 return (tiling->tile_count.width * tiling->tile_count.height) > 2;
767 }
768
769 static bool
770 use_sysmem_rendering(struct tu_cmd_buffer *cmd)
771 {
772 if (unlikely(cmd->device->physical_device->instance->debug_flags & TU_DEBUG_SYSMEM))
773 return true;
774
775 /* can't fit attachments into gmem */
776 if (!cmd->state.pass->gmem_pixels)
777 return true;
778
779 return cmd->state.tiling_config.force_sysmem;
780 }
781
782 static void
783 tu6_emit_tile_select(struct tu_cmd_buffer *cmd,
784 struct tu_cs *cs,
785 const struct tu_tile *tile)
786 {
787 tu_cs_emit_pkt7(cs, CP_SET_MARKER, 1);
788 tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(RM6_YIELD));
789
790 tu_cs_emit_pkt7(cs, CP_SET_MARKER, 1);
791 tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(RM6_GMEM));
792
793 const uint32_t x1 = tile->begin.x;
794 const uint32_t y1 = tile->begin.y;
795 const uint32_t x2 = tile->end.x - 1;
796 const uint32_t y2 = tile->end.y - 1;
797 tu6_emit_window_scissor(cmd, cs, x1, y1, x2, y2);
798 tu6_emit_window_offset(cmd, cs, x1, y1);
799
800 tu_cs_emit_regs(cs,
801 A6XX_VPC_SO_OVERRIDE(.so_disable = true));
802
803 if (use_hw_binning(cmd)) {
804 tu_cs_emit_pkt7(cs, CP_WAIT_FOR_ME, 0);
805
806 tu_cs_emit_pkt7(cs, CP_SET_MODE, 1);
807 tu_cs_emit(cs, 0x0);
808
809 tu_cs_emit_pkt7(cs, CP_REG_TEST, 1);
810 tu_cs_emit(cs, A6XX_CP_REG_TEST_0_REG(OVERFLOW_FLAG_REG) |
811 A6XX_CP_REG_TEST_0_BIT(0) |
812 A6XX_CP_REG_TEST_0_WAIT_FOR_ME);
813
814 tu_cs_reserve(cs, 3 + 11);
815 tu_cs_emit_pkt7(cs, CP_COND_REG_EXEC, 2);
816 tu_cs_emit(cs, CP_COND_REG_EXEC_0_MODE(PRED_TEST));
817 tu_cs_emit(cs, CP_COND_REG_EXEC_1_DWORDS(11));
818
819 /* if (no overflow) */ {
820 tu_cs_emit_pkt7(cs, CP_SET_BIN_DATA5, 7);
821 tu_cs_emit(cs, cmd->state.tiling_config.pipe_sizes[tile->pipe] |
822 CP_SET_BIN_DATA5_0_VSC_N(tile->slot));
823 tu_cs_emit_qw(cs, cmd->vsc_data.iova + tile->pipe * cmd->vsc_data_pitch);
824 tu_cs_emit_qw(cs, cmd->vsc_data.iova + (tile->pipe * 4) + (32 * cmd->vsc_data_pitch));
825 tu_cs_emit_qw(cs, cmd->vsc_data2.iova + (tile->pipe * cmd->vsc_data2_pitch));
826
827 tu_cs_emit_pkt7(cs, CP_SET_VISIBILITY_OVERRIDE, 1);
828 tu_cs_emit(cs, 0x0);
829
830 /* use a NOP packet to skip over the 'else' side: */
831 tu_cs_emit_pkt7(cs, CP_NOP, 2);
832 } /* else */ {
833 tu_cs_emit_pkt7(cs, CP_SET_VISIBILITY_OVERRIDE, 1);
834 tu_cs_emit(cs, 0x1);
835 }
836
837 tu_cs_emit_pkt7(cs, CP_SET_MODE, 1);
838 tu_cs_emit(cs, 0x0);
839
840 tu_cs_emit_regs(cs,
841 A6XX_RB_UNKNOWN_8804(0));
842
843 tu_cs_emit_regs(cs,
844 A6XX_SP_TP_UNKNOWN_B304(0));
845
846 tu_cs_emit_regs(cs,
847 A6XX_GRAS_UNKNOWN_80A4(0));
848 } else {
849 tu_cs_emit_pkt7(cs, CP_SET_VISIBILITY_OVERRIDE, 1);
850 tu_cs_emit(cs, 0x1);
851
852 tu_cs_emit_pkt7(cs, CP_SET_MODE, 1);
853 tu_cs_emit(cs, 0x0);
854 }
855 }
856
857 static void
858 tu6_emit_load_attachment(struct tu_cmd_buffer *cmd, struct tu_cs *cs, uint32_t a)
859 {
860 const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
861 const struct tu_framebuffer *fb = cmd->state.framebuffer;
862 const struct tu_image_view *iview = fb->attachments[a].attachment;
863 const struct tu_render_pass_attachment *attachment =
864 &cmd->state.pass->attachments[a];
865
866 if (attachment->gmem_offset < 0)
867 return;
868
869 const uint32_t x1 = tiling->render_area.offset.x;
870 const uint32_t y1 = tiling->render_area.offset.y;
871 const uint32_t x2 = x1 + tiling->render_area.extent.width;
872 const uint32_t y2 = y1 + tiling->render_area.extent.height;
873 const uint32_t tile_x2 =
874 tiling->tile0.offset.x + tiling->tile0.extent.width * tiling->tile_count.width;
875 const uint32_t tile_y2 =
876 tiling->tile0.offset.y + tiling->tile0.extent.height * tiling->tile_count.height;
877 bool need_load =
878 x1 != tiling->tile0.offset.x || x2 != MIN2(fb->width, tile_x2) ||
879 y1 != tiling->tile0.offset.y || y2 != MIN2(fb->height, tile_y2);
880
881 if (need_load)
882 tu_finishme("improve handling of unaligned render area");
883
884 if (attachment->load_op == VK_ATTACHMENT_LOAD_OP_LOAD)
885 need_load = true;
886
887 if (vk_format_has_stencil(iview->vk_format) &&
888 attachment->stencil_load_op == VK_ATTACHMENT_LOAD_OP_LOAD)
889 need_load = true;
890
891 if (need_load) {
892 tu6_emit_blit_info(cmd, cs, iview, attachment->gmem_offset, false);
893 tu6_emit_blit(cmd, cs);
894 }
895 }
896
897 static void
898 tu6_emit_clear_attachment(struct tu_cmd_buffer *cmd, struct tu_cs *cs,
899 uint32_t a,
900 const VkRenderPassBeginInfo *info)
901 {
902 const struct tu_framebuffer *fb = cmd->state.framebuffer;
903 const struct tu_image_view *iview = fb->attachments[a].attachment;
904 const struct tu_render_pass_attachment *attachment =
905 &cmd->state.pass->attachments[a];
906 unsigned clear_mask = 0;
907
908 /* note: this means it isn't used by any subpass and shouldn't be cleared anyway */
909 if (attachment->gmem_offset < 0)
910 return;
911
912 if (attachment->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR)
913 clear_mask = 0xf;
914
915 if (vk_format_has_stencil(iview->vk_format)) {
916 clear_mask &= 0x1;
917 if (attachment->stencil_load_op == VK_ATTACHMENT_LOAD_OP_CLEAR)
918 clear_mask |= 0x2;
919 }
920 if (!clear_mask)
921 return;
922
923 tu_clear_gmem_attachment(cmd, cs, a, clear_mask,
924 &info->pClearValues[a]);
925 }
926
927 static void
928 tu6_emit_predicated_blit(struct tu_cmd_buffer *cmd,
929 struct tu_cs *cs,
930 uint32_t a,
931 uint32_t gmem_a,
932 bool resolve)
933 {
934 tu_cond_exec_start(cs, CP_COND_EXEC_0_RENDER_MODE_GMEM);
935
936 tu6_emit_blit_info(cmd, cs,
937 cmd->state.framebuffer->attachments[a].attachment,
938 cmd->state.pass->attachments[gmem_a].gmem_offset, resolve);
939 tu6_emit_blit(cmd, cs);
940
941 tu_cond_exec_end(cs);
942 }
943
944 static void
945 tu6_emit_sysmem_resolve(struct tu_cmd_buffer *cmd,
946 struct tu_cs *cs,
947 uint32_t a,
948 uint32_t gmem_a)
949 {
950 const struct tu_framebuffer *fb = cmd->state.framebuffer;
951 const struct tu_image_view *dst = fb->attachments[a].attachment;
952 const struct tu_image_view *src = fb->attachments[gmem_a].attachment;
953
954 tu_blit(cmd, cs, &(struct tu_blit) {
955 .dst = sysmem_attachment_surf(dst, dst->base_layer,
956 &cmd->state.tiling_config.render_area),
957 .src = sysmem_attachment_surf(src, src->base_layer,
958 &cmd->state.tiling_config.render_area),
959 .layers = fb->layers,
960 });
961 }
962
963
964 /* Emit a MSAA resolve operation, with both gmem and sysmem paths. */
965 static void tu6_emit_resolve(struct tu_cmd_buffer *cmd,
966 struct tu_cs *cs,
967 uint32_t a,
968 uint32_t gmem_a)
969 {
970 if (cmd->state.pass->attachments[a].store_op == VK_ATTACHMENT_STORE_OP_DONT_CARE)
971 return;
972
973 tu6_emit_predicated_blit(cmd, cs, a, gmem_a, true);
974
975 tu_cond_exec_start(cs, CP_COND_EXEC_0_RENDER_MODE_SYSMEM);
976 tu6_emit_sysmem_resolve(cmd, cs, a, gmem_a);
977 tu_cond_exec_end(cs);
978 }
979
980 static void
981 tu6_emit_store_attachment(struct tu_cmd_buffer *cmd,
982 struct tu_cs *cs,
983 uint32_t a,
984 uint32_t gmem_a)
985 {
986 if (cmd->state.pass->attachments[a].store_op == VK_ATTACHMENT_STORE_OP_DONT_CARE)
987 return;
988
989 tu6_emit_blit_info(cmd, cs,
990 cmd->state.framebuffer->attachments[a].attachment,
991 cmd->state.pass->attachments[gmem_a].gmem_offset, true);
992 tu6_emit_blit(cmd, cs);
993 }
994
995 static void
996 tu6_emit_tile_store(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
997 {
998 const struct tu_render_pass *pass = cmd->state.pass;
999 const struct tu_subpass *subpass = &pass->subpasses[pass->subpass_count-1];
1000
1001 tu_cs_emit_pkt7(cs, CP_SET_DRAW_STATE, 3);
1002 tu_cs_emit(cs, CP_SET_DRAW_STATE__0_COUNT(0) |
1003 CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS |
1004 CP_SET_DRAW_STATE__0_GROUP_ID(0));
1005 tu_cs_emit(cs, CP_SET_DRAW_STATE__1_ADDR_LO(0));
1006 tu_cs_emit(cs, CP_SET_DRAW_STATE__2_ADDR_HI(0));
1007
1008 tu_cs_emit_pkt7(cs, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
1009 tu_cs_emit(cs, 0x0);
1010
1011 tu_cs_emit_pkt7(cs, CP_SET_MARKER, 1);
1012 tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(RM6_RESOLVE));
1013
1014 tu6_emit_blit_scissor(cmd, cs, true);
1015
1016 for (uint32_t a = 0; a < pass->attachment_count; ++a) {
1017 if (pass->attachments[a].gmem_offset >= 0)
1018 tu6_emit_store_attachment(cmd, cs, a, a);
1019 }
1020
1021 if (subpass->resolve_attachments) {
1022 for (unsigned i = 0; i < subpass->color_count; i++) {
1023 uint32_t a = subpass->resolve_attachments[i].attachment;
1024 if (a != VK_ATTACHMENT_UNUSED)
1025 tu6_emit_store_attachment(cmd, cs, a,
1026 subpass->color_attachments[i].attachment);
1027 }
1028 }
1029 }
1030
1031 static void
1032 tu6_emit_restart_index(struct tu_cs *cs, uint32_t restart_index)
1033 {
1034 tu_cs_emit_regs(cs,
1035 A6XX_PC_RESTART_INDEX(restart_index));
1036 }
1037
1038 static void
1039 tu6_init_hw(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
1040 {
1041 tu6_emit_cache_flush(cmd, cs);
1042
1043 tu_cs_emit_write_reg(cs, REG_A6XX_HLSQ_UPDATE_CNTL, 0xfffff);
1044
1045 tu_cs_emit_write_reg(cs, REG_A6XX_RB_CCU_CNTL, 0x10000000);
1046 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8E04, 0x00100000);
1047 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_AE04, 0x8);
1048 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_AE00, 0);
1049 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_AE0F, 0x3f);
1050 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_B605, 0x44);
1051 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_B600, 0x100000);
1052 tu_cs_emit_write_reg(cs, REG_A6XX_HLSQ_UNKNOWN_BE00, 0x80);
1053 tu_cs_emit_write_reg(cs, REG_A6XX_HLSQ_UNKNOWN_BE01, 0);
1054
1055 tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9600, 0);
1056 tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_8600, 0x880);
1057 tu_cs_emit_write_reg(cs, REG_A6XX_HLSQ_UNKNOWN_BE04, 0);
1058 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_AE03, 0x00000410);
1059 tu_cs_emit_write_reg(cs, REG_A6XX_SP_IBO_COUNT, 0);
1060 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_B182, 0);
1061 tu_cs_emit_write_reg(cs, REG_A6XX_HLSQ_UNKNOWN_BB11, 0);
1062 tu_cs_emit_write_reg(cs, REG_A6XX_UCHE_UNKNOWN_0E12, 0x3200000);
1063 tu_cs_emit_write_reg(cs, REG_A6XX_UCHE_CLIENT_PF, 4);
1064 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8E01, 0x0);
1065 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_A982, 0);
1066 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_A9A8, 0);
1067 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_AB00, 0x5);
1068 tu_cs_emit_write_reg(cs, REG_A6XX_VPC_GS_SIV_CNTL, 0x0000ffff);
1069
1070 tu_cs_emit_write_reg(cs, REG_A6XX_VFD_ADD_OFFSET, A6XX_VFD_ADD_OFFSET_VERTEX);
1071 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8811, 0x00000010);
1072 tu_cs_emit_write_reg(cs, REG_A6XX_PC_MODE_CNTL, 0x1f);
1073
1074 tu_cs_emit_write_reg(cs, REG_A6XX_RB_SRGB_CNTL, 0);
1075
1076 tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_8110, 0);
1077
1078 tu_cs_emit_write_reg(cs, REG_A6XX_RB_RENDER_CONTROL0, 0x401);
1079 tu_cs_emit_write_reg(cs, REG_A6XX_RB_RENDER_CONTROL1, 0);
1080 tu_cs_emit_write_reg(cs, REG_A6XX_RB_FS_OUTPUT_CNTL0, 0);
1081 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8818, 0);
1082 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8819, 0);
1083 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_881A, 0);
1084 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_881B, 0);
1085 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_881C, 0);
1086 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_881D, 0);
1087 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_881E, 0);
1088 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_88F0, 0);
1089
1090 tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9101, 0xffff00);
1091 tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9107, 0);
1092
1093 tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9236, 1);
1094 tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9300, 0);
1095
1096 tu_cs_emit_write_reg(cs, REG_A6XX_VPC_SO_OVERRIDE,
1097 A6XX_VPC_SO_OVERRIDE_SO_DISABLE);
1098
1099 tu_cs_emit_write_reg(cs, REG_A6XX_PC_UNKNOWN_9801, 0);
1100 tu_cs_emit_write_reg(cs, REG_A6XX_PC_UNKNOWN_9806, 0);
1101 tu_cs_emit_write_reg(cs, REG_A6XX_PC_UNKNOWN_9980, 0);
1102 tu_cs_emit_write_reg(cs, REG_A6XX_PC_UNKNOWN_9990, 0);
1103
1104 tu_cs_emit_write_reg(cs, REG_A6XX_PC_PRIMITIVE_CNTL_6, 0);
1105 tu_cs_emit_write_reg(cs, REG_A6XX_PC_UNKNOWN_9B07, 0);
1106
1107 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_A81B, 0);
1108
1109 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_B183, 0);
1110
1111 tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_8099, 0);
1112 tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_809B, 0);
1113 tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_80A0, 2);
1114 tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_80AF, 0);
1115 tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9210, 0);
1116 tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9211, 0);
1117 tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9602, 0);
1118 tu_cs_emit_write_reg(cs, REG_A6XX_PC_UNKNOWN_9981, 0x3);
1119 tu_cs_emit_write_reg(cs, REG_A6XX_PC_UNKNOWN_9E72, 0);
1120 tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9108, 0x3);
1121 tu_cs_emit_write_reg(cs, REG_A6XX_SP_TP_UNKNOWN_B304, 0);
1122 tu_cs_emit_write_reg(cs, REG_A6XX_SP_TP_UNKNOWN_B309, 0x000000a2);
1123 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8804, 0);
1124 tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_80A4, 0);
1125 tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_80A5, 0);
1126 tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_80A6, 0);
1127 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8805, 0);
1128 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8806, 0);
1129 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8878, 0);
1130 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8879, 0);
1131 tu_cs_emit_write_reg(cs, REG_A6XX_HLSQ_CONTROL_5_REG, 0xfc);
1132
1133 tu_cs_emit_write_reg(cs, REG_A6XX_VFD_MODE_CNTL, 0x00000000);
1134
1135 tu_cs_emit_write_reg(cs, REG_A6XX_VFD_UNKNOWN_A008, 0);
1136
1137 tu_cs_emit_write_reg(cs, REG_A6XX_PC_MODE_CNTL, 0x0000001f);
1138
1139 /* we don't use this yet.. probably best to disable.. */
1140 tu_cs_emit_pkt7(cs, CP_SET_DRAW_STATE, 3);
1141 tu_cs_emit(cs, CP_SET_DRAW_STATE__0_COUNT(0) |
1142 CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS |
1143 CP_SET_DRAW_STATE__0_GROUP_ID(0));
1144 tu_cs_emit(cs, CP_SET_DRAW_STATE__1_ADDR_LO(0));
1145 tu_cs_emit(cs, CP_SET_DRAW_STATE__2_ADDR_HI(0));
1146
1147 tu_cs_emit_regs(cs,
1148 A6XX_VPC_SO_BUFFER_BASE(0),
1149 A6XX_VPC_SO_BUFFER_SIZE(0));
1150
1151 tu_cs_emit_regs(cs,
1152 A6XX_VPC_SO_FLUSH_BASE(0));
1153
1154 tu_cs_emit_regs(cs,
1155 A6XX_VPC_SO_BUF_CNTL(0));
1156
1157 tu_cs_emit_regs(cs,
1158 A6XX_VPC_SO_BUFFER_OFFSET(0, 0));
1159
1160 tu_cs_emit_regs(cs,
1161 A6XX_VPC_SO_BUFFER_BASE(1, 0),
1162 A6XX_VPC_SO_BUFFER_SIZE(1, 0));
1163
1164 tu_cs_emit_regs(cs,
1165 A6XX_VPC_SO_BUFFER_OFFSET(1, 0),
1166 A6XX_VPC_SO_FLUSH_BASE(1, 0),
1167 A6XX_VPC_SO_BUFFER_BASE(2, 0),
1168 A6XX_VPC_SO_BUFFER_SIZE(2, 0));
1169
1170 tu_cs_emit_regs(cs,
1171 A6XX_VPC_SO_BUFFER_OFFSET(2, 0),
1172 A6XX_VPC_SO_FLUSH_BASE(2, 0),
1173 A6XX_VPC_SO_BUFFER_BASE(3, 0),
1174 A6XX_VPC_SO_BUFFER_SIZE(3, 0));
1175
1176 tu_cs_emit_regs(cs,
1177 A6XX_VPC_SO_BUFFER_OFFSET(3, 0),
1178 A6XX_VPC_SO_FLUSH_BASE(3, 0));
1179
1180 tu_cs_emit_regs(cs,
1181 A6XX_SP_HS_CTRL_REG0(0));
1182
1183 tu_cs_emit_regs(cs,
1184 A6XX_SP_GS_CTRL_REG0(0));
1185
1186 tu_cs_emit_regs(cs,
1187 A6XX_GRAS_LRZ_CNTL(0));
1188
1189 tu_cs_emit_regs(cs,
1190 A6XX_RB_LRZ_CNTL(0));
1191
1192 tu_cs_sanity_check(cs);
1193 }
1194
1195 static void
1196 tu6_cache_flush(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
1197 {
1198 unsigned seqno;
1199
1200 seqno = tu6_emit_event_write(cmd, cs, CACHE_FLUSH_AND_INV_EVENT, true);
1201
1202 tu_cs_emit_pkt7(cs, CP_WAIT_REG_MEM, 6);
1203 tu_cs_emit(cs, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ) |
1204 CP_WAIT_REG_MEM_0_POLL_MEMORY);
1205 tu_cs_emit_qw(cs, cmd->scratch_bo.iova);
1206 tu_cs_emit(cs, CP_WAIT_REG_MEM_3_REF(seqno));
1207 tu_cs_emit(cs, CP_WAIT_REG_MEM_4_MASK(~0));
1208 tu_cs_emit(cs, CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(16));
1209
1210 seqno = tu6_emit_event_write(cmd, cs, CACHE_FLUSH_TS, true);
1211
1212 tu_cs_emit_pkt7(cs, CP_WAIT_MEM_GTE, 4);
1213 tu_cs_emit(cs, CP_WAIT_MEM_GTE_0_RESERVED(0));
1214 tu_cs_emit_qw(cs, cmd->scratch_bo.iova);
1215 tu_cs_emit(cs, CP_WAIT_MEM_GTE_3_REF(seqno));
1216 }
1217
1218 static void
1219 update_vsc_pipe(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
1220 {
1221 const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
1222
1223 tu_cs_emit_regs(cs,
1224 A6XX_VSC_BIN_SIZE(.width = tiling->tile0.extent.width,
1225 .height = tiling->tile0.extent.height),
1226 A6XX_VSC_SIZE_ADDRESS(.bo = &cmd->vsc_data,
1227 .bo_offset = 32 * cmd->vsc_data_pitch));
1228
1229 tu_cs_emit_regs(cs,
1230 A6XX_VSC_BIN_COUNT(.nx = tiling->tile_count.width,
1231 .ny = tiling->tile_count.height));
1232
1233 tu_cs_emit_pkt4(cs, REG_A6XX_VSC_PIPE_CONFIG_REG(0), 32);
1234 for (unsigned i = 0; i < 32; i++)
1235 tu_cs_emit(cs, tiling->pipe_config[i]);
1236
1237 tu_cs_emit_regs(cs,
1238 A6XX_VSC_PIPE_DATA2_ADDRESS(.bo = &cmd->vsc_data2),
1239 A6XX_VSC_PIPE_DATA2_PITCH(cmd->vsc_data2_pitch),
1240 A6XX_VSC_PIPE_DATA2_ARRAY_PITCH(cmd->vsc_data2.size));
1241
1242 tu_cs_emit_regs(cs,
1243 A6XX_VSC_PIPE_DATA_ADDRESS(.bo = &cmd->vsc_data),
1244 A6XX_VSC_PIPE_DATA_PITCH(cmd->vsc_data_pitch),
1245 A6XX_VSC_PIPE_DATA_ARRAY_PITCH(cmd->vsc_data.size));
1246 }
1247
1248 static void
1249 emit_vsc_overflow_test(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
1250 {
1251 const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
1252 const uint32_t used_pipe_count =
1253 tiling->pipe_count.width * tiling->pipe_count.height;
1254
1255 /* Clear vsc_scratch: */
1256 tu_cs_emit_pkt7(cs, CP_MEM_WRITE, 3);
1257 tu_cs_emit_qw(cs, cmd->scratch_bo.iova + VSC_SCRATCH);
1258 tu_cs_emit(cs, 0x0);
1259
1260 /* Check for overflow, write vsc_scratch if detected: */
1261 for (int i = 0; i < used_pipe_count; i++) {
1262 tu_cs_emit_pkt7(cs, CP_COND_WRITE5, 8);
1263 tu_cs_emit(cs, CP_COND_WRITE5_0_FUNCTION(WRITE_GE) |
1264 CP_COND_WRITE5_0_WRITE_MEMORY);
1265 tu_cs_emit(cs, CP_COND_WRITE5_1_POLL_ADDR_LO(REG_A6XX_VSC_SIZE_REG(i)));
1266 tu_cs_emit(cs, CP_COND_WRITE5_2_POLL_ADDR_HI(0));
1267 tu_cs_emit(cs, CP_COND_WRITE5_3_REF(cmd->vsc_data_pitch));
1268 tu_cs_emit(cs, CP_COND_WRITE5_4_MASK(~0));
1269 tu_cs_emit_qw(cs, cmd->scratch_bo.iova + VSC_SCRATCH);
1270 tu_cs_emit(cs, CP_COND_WRITE5_7_WRITE_DATA(1 + cmd->vsc_data_pitch));
1271
1272 tu_cs_emit_pkt7(cs, CP_COND_WRITE5, 8);
1273 tu_cs_emit(cs, CP_COND_WRITE5_0_FUNCTION(WRITE_GE) |
1274 CP_COND_WRITE5_0_WRITE_MEMORY);
1275 tu_cs_emit(cs, CP_COND_WRITE5_1_POLL_ADDR_LO(REG_A6XX_VSC_SIZE2_REG(i)));
1276 tu_cs_emit(cs, CP_COND_WRITE5_2_POLL_ADDR_HI(0));
1277 tu_cs_emit(cs, CP_COND_WRITE5_3_REF(cmd->vsc_data2_pitch));
1278 tu_cs_emit(cs, CP_COND_WRITE5_4_MASK(~0));
1279 tu_cs_emit_qw(cs, cmd->scratch_bo.iova + VSC_SCRATCH);
1280 tu_cs_emit(cs, CP_COND_WRITE5_7_WRITE_DATA(3 + cmd->vsc_data2_pitch));
1281 }
1282
1283 tu_cs_emit_pkt7(cs, CP_WAIT_MEM_WRITES, 0);
1284
1285 tu_cs_emit_pkt7(cs, CP_WAIT_FOR_ME, 0);
1286
1287 tu_cs_emit_pkt7(cs, CP_MEM_TO_REG, 3);
1288 tu_cs_emit(cs, CP_MEM_TO_REG_0_REG(OVERFLOW_FLAG_REG) |
1289 CP_MEM_TO_REG_0_CNT(1 - 1));
1290 tu_cs_emit_qw(cs, cmd->scratch_bo.iova + VSC_SCRATCH);
1291
1292 /*
1293 * This is a bit awkward, we really want a way to invert the
1294 * CP_REG_TEST/CP_COND_REG_EXEC logic, so that we can conditionally
1295 * execute cmds to use hwbinning when a bit is *not* set. This
1296 * dance is to invert OVERFLOW_FLAG_REG
1297 *
1298 * A CP_NOP packet is used to skip executing the 'else' clause
1299 * if (b0 set)..
1300 */
1301
1302 /* b0 will be set if VSC_DATA or VSC_DATA2 overflow: */
1303 tu_cs_emit_pkt7(cs, CP_REG_TEST, 1);
1304 tu_cs_emit(cs, A6XX_CP_REG_TEST_0_REG(OVERFLOW_FLAG_REG) |
1305 A6XX_CP_REG_TEST_0_BIT(0) |
1306 A6XX_CP_REG_TEST_0_WAIT_FOR_ME);
1307
1308 tu_cs_reserve(cs, 3 + 7);
1309 tu_cs_emit_pkt7(cs, CP_COND_REG_EXEC, 2);
1310 tu_cs_emit(cs, CP_COND_REG_EXEC_0_MODE(PRED_TEST));
1311 tu_cs_emit(cs, CP_COND_REG_EXEC_1_DWORDS(7));
1312
1313 /* if (b0 set) */ {
1314 /*
1315 * On overflow, mirror the value to control->vsc_overflow
1316 * which CPU is checking to detect overflow (see
1317 * check_vsc_overflow())
1318 */
1319 tu_cs_emit_pkt7(cs, CP_REG_TO_MEM, 3);
1320 tu_cs_emit(cs, CP_REG_TO_MEM_0_REG(OVERFLOW_FLAG_REG) |
1321 CP_REG_TO_MEM_0_CNT(0));
1322 tu_cs_emit_qw(cs, cmd->scratch_bo.iova + VSC_OVERFLOW);
1323
1324 tu_cs_emit_pkt4(cs, OVERFLOW_FLAG_REG, 1);
1325 tu_cs_emit(cs, 0x0);
1326
1327 tu_cs_emit_pkt7(cs, CP_NOP, 2); /* skip 'else' when 'if' is taken */
1328 } /* else */ {
1329 tu_cs_emit_pkt4(cs, OVERFLOW_FLAG_REG, 1);
1330 tu_cs_emit(cs, 0x1);
1331 }
1332 }
1333
1334 static void
1335 tu6_emit_binning_pass(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
1336 {
1337 struct tu_physical_device *phys_dev = cmd->device->physical_device;
1338 const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
1339
1340 uint32_t x1 = tiling->tile0.offset.x;
1341 uint32_t y1 = tiling->tile0.offset.y;
1342 uint32_t x2 = tiling->render_area.offset.x + tiling->render_area.extent.width - 1;
1343 uint32_t y2 = tiling->render_area.offset.y + tiling->render_area.extent.height - 1;
1344
1345 tu6_emit_window_scissor(cmd, cs, x1, y1, x2, y2);
1346
1347 tu_cs_emit_pkt7(cs, CP_SET_MARKER, 1);
1348 tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(RM6_BINNING));
1349
1350 tu_cs_emit_pkt7(cs, CP_SET_VISIBILITY_OVERRIDE, 1);
1351 tu_cs_emit(cs, 0x1);
1352
1353 tu_cs_emit_pkt7(cs, CP_SET_MODE, 1);
1354 tu_cs_emit(cs, 0x1);
1355
1356 tu_cs_emit_wfi(cs);
1357
1358 tu_cs_emit_regs(cs,
1359 A6XX_VFD_MODE_CNTL(.binning_pass = true));
1360
1361 update_vsc_pipe(cmd, cs);
1362
1363 tu_cs_emit_regs(cs,
1364 A6XX_PC_UNKNOWN_9805(.unknown = phys_dev->magic.PC_UNKNOWN_9805));
1365
1366 tu_cs_emit_regs(cs,
1367 A6XX_SP_UNKNOWN_A0F8(.unknown = phys_dev->magic.SP_UNKNOWN_A0F8));
1368
1369 tu_cs_emit_pkt7(cs, CP_EVENT_WRITE, 1);
1370 tu_cs_emit(cs, UNK_2C);
1371
1372 tu_cs_emit_regs(cs,
1373 A6XX_RB_WINDOW_OFFSET(.x = 0, .y = 0));
1374
1375 tu_cs_emit_regs(cs,
1376 A6XX_SP_TP_WINDOW_OFFSET(.x = 0, .y = 0));
1377
1378 /* emit IB to binning drawcmds: */
1379 tu_cs_emit_call(cs, &cmd->draw_cs);
1380
1381 tu_cs_emit_pkt7(cs, CP_SET_DRAW_STATE, 3);
1382 tu_cs_emit(cs, CP_SET_DRAW_STATE__0_COUNT(0) |
1383 CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS |
1384 CP_SET_DRAW_STATE__0_GROUP_ID(0));
1385 tu_cs_emit(cs, CP_SET_DRAW_STATE__1_ADDR_LO(0));
1386 tu_cs_emit(cs, CP_SET_DRAW_STATE__2_ADDR_HI(0));
1387
1388 tu_cs_emit_pkt7(cs, CP_EVENT_WRITE, 1);
1389 tu_cs_emit(cs, UNK_2D);
1390
1391 tu6_emit_event_write(cmd, cs, CACHE_INVALIDATE, false);
1392 tu6_cache_flush(cmd, cs);
1393
1394 tu_cs_emit_wfi(cs);
1395
1396 tu_cs_emit_pkt7(cs, CP_WAIT_FOR_ME, 0);
1397
1398 emit_vsc_overflow_test(cmd, cs);
1399
1400 tu_cs_emit_pkt7(cs, CP_SET_VISIBILITY_OVERRIDE, 1);
1401 tu_cs_emit(cs, 0x0);
1402
1403 tu_cs_emit_pkt7(cs, CP_SET_MODE, 1);
1404 tu_cs_emit(cs, 0x0);
1405
1406 tu_cs_emit_wfi(cs);
1407
1408 tu_cs_emit_regs(cs,
1409 A6XX_RB_CCU_CNTL(.unknown = phys_dev->magic.RB_CCU_CNTL_gmem));
1410
1411 cmd->wait_for_idle = false;
1412 }
1413
1414 static void
1415 tu_emit_sysmem_clear_attachment(struct tu_cmd_buffer *cmd, struct tu_cs *cs,
1416 uint32_t a,
1417 const VkRenderPassBeginInfo *info)
1418 {
1419 const struct tu_framebuffer *fb = cmd->state.framebuffer;
1420 const struct tu_image_view *iview = fb->attachments[a].attachment;
1421 const struct tu_render_pass_attachment *attachment =
1422 &cmd->state.pass->attachments[a];
1423 unsigned clear_mask = 0;
1424
1425 /* note: this means it isn't used by any subpass and shouldn't be cleared anyway */
1426 if (attachment->gmem_offset < 0)
1427 return;
1428
1429 if (attachment->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
1430 clear_mask = 0xf;
1431 }
1432
1433 if (vk_format_has_stencil(iview->vk_format)) {
1434 clear_mask &= 0x1;
1435 if (attachment->stencil_load_op == VK_ATTACHMENT_LOAD_OP_CLEAR)
1436 clear_mask |= 0x2;
1437 if (clear_mask != 0x3)
1438 tu_finishme("depth/stencil only load op");
1439 }
1440
1441 if (!clear_mask)
1442 return;
1443
1444 tu_clear_sysmem_attachment(cmd, cs, a,
1445 &info->pClearValues[a], &(struct VkClearRect) {
1446 .rect = info->renderArea,
1447 .baseArrayLayer = iview->base_layer,
1448 .layerCount = iview->layer_count,
1449 });
1450 }
1451
1452 static void
1453 tu_emit_load_clear(struct tu_cmd_buffer *cmd,
1454 const VkRenderPassBeginInfo *info)
1455 {
1456 struct tu_cs *cs = &cmd->draw_cs;
1457
1458 tu_cond_exec_start(cs, CP_COND_EXEC_0_RENDER_MODE_GMEM);
1459
1460 tu6_emit_blit_scissor(cmd, cs, true);
1461
1462 for (uint32_t i = 0; i < cmd->state.pass->attachment_count; ++i)
1463 tu6_emit_load_attachment(cmd, cs, i);
1464
1465 tu6_emit_blit_scissor(cmd, cs, false);
1466
1467 for (uint32_t i = 0; i < cmd->state.pass->attachment_count; ++i)
1468 tu6_emit_clear_attachment(cmd, cs, i, info);
1469
1470 tu_cond_exec_end(cs);
1471
1472 /* invalidate because reading input attachments will cache GMEM and
1473 * the cache isn''t updated when GMEM is written
1474 * TODO: is there a no-cache bit for textures?
1475 */
1476 if (cmd->state.subpass->input_count)
1477 tu6_emit_event_write(cmd, cs, CACHE_INVALIDATE, false);
1478
1479 tu_cond_exec_start(cs, CP_COND_EXEC_0_RENDER_MODE_SYSMEM);
1480
1481 for (uint32_t i = 0; i < cmd->state.pass->attachment_count; ++i)
1482 tu_emit_sysmem_clear_attachment(cmd, cs, i, info);
1483
1484 tu_cond_exec_end(cs);
1485 }
1486
1487 static void
1488 tu6_sysmem_render_begin(struct tu_cmd_buffer *cmd, struct tu_cs *cs,
1489 const struct VkRect2D *renderArea)
1490 {
1491 const struct tu_framebuffer *fb = cmd->state.framebuffer;
1492
1493 assert(fb->width > 0 && fb->height > 0);
1494 tu6_emit_window_scissor(cmd, cs, 0, 0, fb->width - 1, fb->height - 1);
1495 tu6_emit_window_offset(cmd, cs, 0, 0);
1496
1497 tu6_emit_bin_size(cs, 0, 0, 0xc00000); /* 0xc00000 = BYPASS? */
1498
1499 tu6_emit_lrz_flush(cmd, cs);
1500
1501 tu_cs_emit_pkt7(cs, CP_SET_MARKER, 1);
1502 tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(RM6_BYPASS));
1503
1504 tu_cs_emit_pkt7(cs, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
1505 tu_cs_emit(cs, 0x0);
1506
1507 tu6_emit_event_write(cmd, cs, PC_CCU_INVALIDATE_COLOR, false);
1508 tu6_emit_event_write(cmd, cs, PC_CCU_INVALIDATE_DEPTH, false);
1509 tu6_emit_event_write(cmd, cs, CACHE_INVALIDATE, false);
1510
1511 tu6_emit_wfi(cmd, cs);
1512 tu_cs_emit_regs(cs,
1513 A6XX_RB_CCU_CNTL(0x10000000));
1514
1515 /* enable stream-out, with sysmem there is only one pass: */
1516 tu_cs_emit_regs(cs,
1517 A6XX_VPC_SO_OVERRIDE(.so_disable = false));
1518
1519 tu_cs_emit_pkt7(cs, CP_SET_VISIBILITY_OVERRIDE, 1);
1520 tu_cs_emit(cs, 0x1);
1521
1522 tu_cs_emit_pkt7(cs, CP_SET_MODE, 1);
1523 tu_cs_emit(cs, 0x0);
1524
1525 tu_cs_sanity_check(cs);
1526 }
1527
1528 static void
1529 tu6_sysmem_render_end(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
1530 {
1531 /* Do any resolves of the last subpass. These are handled in the
1532 * tile_store_ib in the gmem path.
1533 */
1534
1535 const struct tu_subpass *subpass = cmd->state.subpass;
1536 if (subpass->resolve_attachments) {
1537 for (unsigned i = 0; i < subpass->color_count; i++) {
1538 uint32_t a = subpass->resolve_attachments[i].attachment;
1539 if (a != VK_ATTACHMENT_UNUSED)
1540 tu6_emit_sysmem_resolve(cmd, cs, a,
1541 subpass->color_attachments[i].attachment);
1542 }
1543 }
1544
1545 tu_cs_emit_call(cs, &cmd->draw_epilogue_cs);
1546
1547 tu_cs_emit_pkt7(cs, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
1548 tu_cs_emit(cs, 0x0);
1549
1550 tu6_emit_lrz_flush(cmd, cs);
1551
1552 tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_COLOR_TS, true);
1553 tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_DEPTH_TS, true);
1554
1555 tu_cs_sanity_check(cs);
1556 }
1557
1558
1559 static void
1560 tu6_tile_render_begin(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
1561 {
1562 struct tu_physical_device *phys_dev = cmd->device->physical_device;
1563
1564 tu6_emit_lrz_flush(cmd, cs);
1565
1566 /* lrz clear? */
1567
1568 tu6_emit_cache_flush(cmd, cs);
1569
1570 tu_cs_emit_pkt7(cs, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
1571 tu_cs_emit(cs, 0x0);
1572
1573 /* 0x10000000 for BYPASS.. 0x7c13c080 for GMEM: */
1574 tu6_emit_wfi(cmd, cs);
1575 tu_cs_emit_regs(cs,
1576 A6XX_RB_CCU_CNTL(phys_dev->magic.RB_CCU_CNTL_gmem));
1577
1578 const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
1579 if (use_hw_binning(cmd)) {
1580 tu6_emit_bin_size(cs,
1581 tiling->tile0.extent.width,
1582 tiling->tile0.extent.height,
1583 A6XX_RB_BIN_CONTROL_BINNING_PASS | 0x6000000);
1584
1585 tu6_emit_render_cntl(cmd, cmd->state.subpass, cs, true);
1586
1587 tu6_emit_binning_pass(cmd, cs);
1588
1589 tu6_emit_bin_size(cs,
1590 tiling->tile0.extent.width,
1591 tiling->tile0.extent.height,
1592 A6XX_RB_BIN_CONTROL_USE_VIZ | 0x6000000);
1593
1594 tu_cs_emit_regs(cs,
1595 A6XX_VFD_MODE_CNTL(0));
1596
1597 tu_cs_emit_regs(cs, A6XX_PC_UNKNOWN_9805(.unknown = phys_dev->magic.PC_UNKNOWN_9805));
1598
1599 tu_cs_emit_regs(cs, A6XX_SP_UNKNOWN_A0F8(.unknown = phys_dev->magic.SP_UNKNOWN_A0F8));
1600
1601 tu_cs_emit_pkt7(cs, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
1602 tu_cs_emit(cs, 0x1);
1603 } else {
1604 tu6_emit_bin_size(cs,
1605 tiling->tile0.extent.width,
1606 tiling->tile0.extent.height,
1607 0x6000000);
1608 }
1609
1610 tu_cs_sanity_check(cs);
1611 }
1612
1613 static void
1614 tu6_render_tile(struct tu_cmd_buffer *cmd,
1615 struct tu_cs *cs,
1616 const struct tu_tile *tile)
1617 {
1618 tu6_emit_tile_select(cmd, cs, tile);
1619
1620 tu_cs_emit_call(cs, &cmd->draw_cs);
1621 cmd->wait_for_idle = true;
1622
1623 if (use_hw_binning(cmd)) {
1624 tu_cs_emit_pkt7(cs, CP_REG_TEST, 1);
1625 tu_cs_emit(cs, A6XX_CP_REG_TEST_0_REG(OVERFLOW_FLAG_REG) |
1626 A6XX_CP_REG_TEST_0_BIT(0) |
1627 A6XX_CP_REG_TEST_0_WAIT_FOR_ME);
1628
1629 tu_cs_reserve(cs, 3 + 2);
1630 tu_cs_emit_pkt7(cs, CP_COND_REG_EXEC, 2);
1631 tu_cs_emit(cs, CP_COND_REG_EXEC_0_MODE(PRED_TEST));
1632 tu_cs_emit(cs, CP_COND_REG_EXEC_1_DWORDS(2));
1633
1634 /* if (no overflow) */ {
1635 tu_cs_emit_pkt7(cs, CP_SET_MARKER, 1);
1636 tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(RM6_ENDVIS));
1637 }
1638 }
1639
1640 tu_cs_emit_ib(cs, &cmd->state.tile_store_ib);
1641
1642 tu_cs_sanity_check(cs);
1643 }
1644
1645 static void
1646 tu6_tile_render_end(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
1647 {
1648 tu_cs_emit_call(cs, &cmd->draw_epilogue_cs);
1649
1650 tu_cs_emit_regs(cs,
1651 A6XX_GRAS_LRZ_CNTL(0));
1652
1653 tu6_emit_lrz_flush(cmd, cs);
1654
1655 tu6_emit_event_write(cmd, cs, CACHE_FLUSH_TS, true);
1656
1657 tu_cs_sanity_check(cs);
1658 }
1659
1660 static void
1661 tu_cmd_render_tiles(struct tu_cmd_buffer *cmd)
1662 {
1663 const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
1664
1665 tu6_tile_render_begin(cmd, &cmd->cs);
1666
1667 for (uint32_t y = 0; y < tiling->tile_count.height; y++) {
1668 for (uint32_t x = 0; x < tiling->tile_count.width; x++) {
1669 struct tu_tile tile;
1670 tu_tiling_config_get_tile(tiling, cmd->device, x, y, &tile);
1671 tu6_render_tile(cmd, &cmd->cs, &tile);
1672 }
1673 }
1674
1675 tu6_tile_render_end(cmd, &cmd->cs);
1676 }
1677
1678 static void
1679 tu_cmd_render_sysmem(struct tu_cmd_buffer *cmd)
1680 {
1681 const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
1682
1683 tu6_sysmem_render_begin(cmd, &cmd->cs, &tiling->render_area);
1684
1685 tu_cs_emit_call(&cmd->cs, &cmd->draw_cs);
1686 cmd->wait_for_idle = true;
1687
1688 tu6_sysmem_render_end(cmd, &cmd->cs);
1689 }
1690
1691 static void
1692 tu_cmd_prepare_tile_store_ib(struct tu_cmd_buffer *cmd)
1693 {
1694 const uint32_t tile_store_space = 32 + 23 * cmd->state.pass->attachment_count;
1695 struct tu_cs sub_cs;
1696
1697 VkResult result =
1698 tu_cs_begin_sub_stream(&cmd->sub_cs, tile_store_space, &sub_cs);
1699 if (result != VK_SUCCESS) {
1700 cmd->record_result = result;
1701 return;
1702 }
1703
1704 /* emit to tile-store sub_cs */
1705 tu6_emit_tile_store(cmd, &sub_cs);
1706
1707 cmd->state.tile_store_ib = tu_cs_end_sub_stream(&cmd->sub_cs, &sub_cs);
1708 }
1709
1710 static void
1711 tu_cmd_update_tiling_config(struct tu_cmd_buffer *cmd,
1712 const VkRect2D *render_area)
1713 {
1714 const struct tu_device *dev = cmd->device;
1715 struct tu_tiling_config *tiling = &cmd->state.tiling_config;
1716
1717 tiling->render_area = *render_area;
1718 tiling->force_sysmem = force_sysmem(cmd, render_area);
1719
1720 tu_tiling_config_update_tile_layout(tiling, dev, cmd->state.pass->gmem_pixels);
1721 tu_tiling_config_update_pipe_layout(tiling, dev);
1722 tu_tiling_config_update_pipes(tiling, dev);
1723 }
1724
1725 const struct tu_dynamic_state default_dynamic_state = {
1726 .viewport =
1727 {
1728 .count = 0,
1729 },
1730 .scissor =
1731 {
1732 .count = 0,
1733 },
1734 .line_width = 1.0f,
1735 .depth_bias =
1736 {
1737 .bias = 0.0f,
1738 .clamp = 0.0f,
1739 .slope = 0.0f,
1740 },
1741 .blend_constants = { 0.0f, 0.0f, 0.0f, 0.0f },
1742 .depth_bounds =
1743 {
1744 .min = 0.0f,
1745 .max = 1.0f,
1746 },
1747 .stencil_compare_mask =
1748 {
1749 .front = ~0u,
1750 .back = ~0u,
1751 },
1752 .stencil_write_mask =
1753 {
1754 .front = ~0u,
1755 .back = ~0u,
1756 },
1757 .stencil_reference =
1758 {
1759 .front = 0u,
1760 .back = 0u,
1761 },
1762 };
1763
1764 static void UNUSED /* FINISHME */
1765 tu_bind_dynamic_state(struct tu_cmd_buffer *cmd_buffer,
1766 const struct tu_dynamic_state *src)
1767 {
1768 struct tu_dynamic_state *dest = &cmd_buffer->state.dynamic;
1769 uint32_t copy_mask = src->mask;
1770 uint32_t dest_mask = 0;
1771
1772 tu_use_args(cmd_buffer); /* FINISHME */
1773
1774 /* Make sure to copy the number of viewports/scissors because they can
1775 * only be specified at pipeline creation time.
1776 */
1777 dest->viewport.count = src->viewport.count;
1778 dest->scissor.count = src->scissor.count;
1779 dest->discard_rectangle.count = src->discard_rectangle.count;
1780
1781 if (copy_mask & TU_DYNAMIC_VIEWPORT) {
1782 if (memcmp(&dest->viewport.viewports, &src->viewport.viewports,
1783 src->viewport.count * sizeof(VkViewport))) {
1784 typed_memcpy(dest->viewport.viewports, src->viewport.viewports,
1785 src->viewport.count);
1786 dest_mask |= TU_DYNAMIC_VIEWPORT;
1787 }
1788 }
1789
1790 if (copy_mask & TU_DYNAMIC_SCISSOR) {
1791 if (memcmp(&dest->scissor.scissors, &src->scissor.scissors,
1792 src->scissor.count * sizeof(VkRect2D))) {
1793 typed_memcpy(dest->scissor.scissors, src->scissor.scissors,
1794 src->scissor.count);
1795 dest_mask |= TU_DYNAMIC_SCISSOR;
1796 }
1797 }
1798
1799 if (copy_mask & TU_DYNAMIC_LINE_WIDTH) {
1800 if (dest->line_width != src->line_width) {
1801 dest->line_width = src->line_width;
1802 dest_mask |= TU_DYNAMIC_LINE_WIDTH;
1803 }
1804 }
1805
1806 if (copy_mask & TU_DYNAMIC_DEPTH_BIAS) {
1807 if (memcmp(&dest->depth_bias, &src->depth_bias,
1808 sizeof(src->depth_bias))) {
1809 dest->depth_bias = src->depth_bias;
1810 dest_mask |= TU_DYNAMIC_DEPTH_BIAS;
1811 }
1812 }
1813
1814 if (copy_mask & TU_DYNAMIC_BLEND_CONSTANTS) {
1815 if (memcmp(&dest->blend_constants, &src->blend_constants,
1816 sizeof(src->blend_constants))) {
1817 typed_memcpy(dest->blend_constants, src->blend_constants, 4);
1818 dest_mask |= TU_DYNAMIC_BLEND_CONSTANTS;
1819 }
1820 }
1821
1822 if (copy_mask & TU_DYNAMIC_DEPTH_BOUNDS) {
1823 if (memcmp(&dest->depth_bounds, &src->depth_bounds,
1824 sizeof(src->depth_bounds))) {
1825 dest->depth_bounds = src->depth_bounds;
1826 dest_mask |= TU_DYNAMIC_DEPTH_BOUNDS;
1827 }
1828 }
1829
1830 if (copy_mask & TU_DYNAMIC_STENCIL_COMPARE_MASK) {
1831 if (memcmp(&dest->stencil_compare_mask, &src->stencil_compare_mask,
1832 sizeof(src->stencil_compare_mask))) {
1833 dest->stencil_compare_mask = src->stencil_compare_mask;
1834 dest_mask |= TU_DYNAMIC_STENCIL_COMPARE_MASK;
1835 }
1836 }
1837
1838 if (copy_mask & TU_DYNAMIC_STENCIL_WRITE_MASK) {
1839 if (memcmp(&dest->stencil_write_mask, &src->stencil_write_mask,
1840 sizeof(src->stencil_write_mask))) {
1841 dest->stencil_write_mask = src->stencil_write_mask;
1842 dest_mask |= TU_DYNAMIC_STENCIL_WRITE_MASK;
1843 }
1844 }
1845
1846 if (copy_mask & TU_DYNAMIC_STENCIL_REFERENCE) {
1847 if (memcmp(&dest->stencil_reference, &src->stencil_reference,
1848 sizeof(src->stencil_reference))) {
1849 dest->stencil_reference = src->stencil_reference;
1850 dest_mask |= TU_DYNAMIC_STENCIL_REFERENCE;
1851 }
1852 }
1853
1854 if (copy_mask & TU_DYNAMIC_DISCARD_RECTANGLE) {
1855 if (memcmp(&dest->discard_rectangle.rectangles,
1856 &src->discard_rectangle.rectangles,
1857 src->discard_rectangle.count * sizeof(VkRect2D))) {
1858 typed_memcpy(dest->discard_rectangle.rectangles,
1859 src->discard_rectangle.rectangles,
1860 src->discard_rectangle.count);
1861 dest_mask |= TU_DYNAMIC_DISCARD_RECTANGLE;
1862 }
1863 }
1864 }
1865
1866 static VkResult
1867 tu_create_cmd_buffer(struct tu_device *device,
1868 struct tu_cmd_pool *pool,
1869 VkCommandBufferLevel level,
1870 VkCommandBuffer *pCommandBuffer)
1871 {
1872 struct tu_cmd_buffer *cmd_buffer;
1873 cmd_buffer = vk_zalloc(&pool->alloc, sizeof(*cmd_buffer), 8,
1874 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1875 if (cmd_buffer == NULL)
1876 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1877
1878 cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
1879 cmd_buffer->device = device;
1880 cmd_buffer->pool = pool;
1881 cmd_buffer->level = level;
1882
1883 if (pool) {
1884 list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
1885 cmd_buffer->queue_family_index = pool->queue_family_index;
1886
1887 } else {
1888 /* Init the pool_link so we can safely call list_del when we destroy
1889 * the command buffer
1890 */
1891 list_inithead(&cmd_buffer->pool_link);
1892 cmd_buffer->queue_family_index = TU_QUEUE_GENERAL;
1893 }
1894
1895 tu_bo_list_init(&cmd_buffer->bo_list);
1896 tu_cs_init(&cmd_buffer->cs, device, TU_CS_MODE_GROW, 4096);
1897 tu_cs_init(&cmd_buffer->draw_cs, device, TU_CS_MODE_GROW, 4096);
1898 tu_cs_init(&cmd_buffer->draw_epilogue_cs, device, TU_CS_MODE_GROW, 4096);
1899 tu_cs_init(&cmd_buffer->sub_cs, device, TU_CS_MODE_SUB_STREAM, 2048);
1900
1901 *pCommandBuffer = tu_cmd_buffer_to_handle(cmd_buffer);
1902
1903 list_inithead(&cmd_buffer->upload.list);
1904
1905 VkResult result = tu_bo_init_new(device, &cmd_buffer->scratch_bo, 0x1000);
1906 if (result != VK_SUCCESS)
1907 goto fail_scratch_bo;
1908
1909 /* TODO: resize on overflow */
1910 cmd_buffer->vsc_data_pitch = device->vsc_data_pitch;
1911 cmd_buffer->vsc_data2_pitch = device->vsc_data2_pitch;
1912 cmd_buffer->vsc_data = device->vsc_data;
1913 cmd_buffer->vsc_data2 = device->vsc_data2;
1914
1915 return VK_SUCCESS;
1916
1917 fail_scratch_bo:
1918 list_del(&cmd_buffer->pool_link);
1919 return result;
1920 }
1921
1922 static void
1923 tu_cmd_buffer_destroy(struct tu_cmd_buffer *cmd_buffer)
1924 {
1925 tu_bo_finish(cmd_buffer->device, &cmd_buffer->scratch_bo);
1926
1927 list_del(&cmd_buffer->pool_link);
1928
1929 for (unsigned i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; i++)
1930 free(cmd_buffer->descriptors[i].push_set.set.mapped_ptr);
1931
1932 tu_cs_finish(&cmd_buffer->cs);
1933 tu_cs_finish(&cmd_buffer->draw_cs);
1934 tu_cs_finish(&cmd_buffer->draw_epilogue_cs);
1935 tu_cs_finish(&cmd_buffer->sub_cs);
1936
1937 tu_bo_list_destroy(&cmd_buffer->bo_list);
1938 vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
1939 }
1940
1941 static VkResult
1942 tu_reset_cmd_buffer(struct tu_cmd_buffer *cmd_buffer)
1943 {
1944 cmd_buffer->wait_for_idle = true;
1945
1946 cmd_buffer->record_result = VK_SUCCESS;
1947
1948 tu_bo_list_reset(&cmd_buffer->bo_list);
1949 tu_cs_reset(&cmd_buffer->cs);
1950 tu_cs_reset(&cmd_buffer->draw_cs);
1951 tu_cs_reset(&cmd_buffer->draw_epilogue_cs);
1952 tu_cs_reset(&cmd_buffer->sub_cs);
1953
1954 for (unsigned i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; i++) {
1955 cmd_buffer->descriptors[i].valid = 0;
1956 cmd_buffer->descriptors[i].push_dirty = false;
1957 }
1958
1959 cmd_buffer->status = TU_CMD_BUFFER_STATUS_INITIAL;
1960
1961 return cmd_buffer->record_result;
1962 }
1963
1964 VkResult
1965 tu_AllocateCommandBuffers(VkDevice _device,
1966 const VkCommandBufferAllocateInfo *pAllocateInfo,
1967 VkCommandBuffer *pCommandBuffers)
1968 {
1969 TU_FROM_HANDLE(tu_device, device, _device);
1970 TU_FROM_HANDLE(tu_cmd_pool, pool, pAllocateInfo->commandPool);
1971
1972 VkResult result = VK_SUCCESS;
1973 uint32_t i;
1974
1975 for (i = 0; i < pAllocateInfo->commandBufferCount; i++) {
1976
1977 if (!list_is_empty(&pool->free_cmd_buffers)) {
1978 struct tu_cmd_buffer *cmd_buffer = list_first_entry(
1979 &pool->free_cmd_buffers, struct tu_cmd_buffer, pool_link);
1980
1981 list_del(&cmd_buffer->pool_link);
1982 list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
1983
1984 result = tu_reset_cmd_buffer(cmd_buffer);
1985 cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
1986 cmd_buffer->level = pAllocateInfo->level;
1987
1988 pCommandBuffers[i] = tu_cmd_buffer_to_handle(cmd_buffer);
1989 } else {
1990 result = tu_create_cmd_buffer(device, pool, pAllocateInfo->level,
1991 &pCommandBuffers[i]);
1992 }
1993 if (result != VK_SUCCESS)
1994 break;
1995 }
1996
1997 if (result != VK_SUCCESS) {
1998 tu_FreeCommandBuffers(_device, pAllocateInfo->commandPool, i,
1999 pCommandBuffers);
2000
2001 /* From the Vulkan 1.0.66 spec:
2002 *
2003 * "vkAllocateCommandBuffers can be used to create multiple
2004 * command buffers. If the creation of any of those command
2005 * buffers fails, the implementation must destroy all
2006 * successfully created command buffer objects from this
2007 * command, set all entries of the pCommandBuffers array to
2008 * NULL and return the error."
2009 */
2010 memset(pCommandBuffers, 0,
2011 sizeof(*pCommandBuffers) * pAllocateInfo->commandBufferCount);
2012 }
2013
2014 return result;
2015 }
2016
2017 void
2018 tu_FreeCommandBuffers(VkDevice device,
2019 VkCommandPool commandPool,
2020 uint32_t commandBufferCount,
2021 const VkCommandBuffer *pCommandBuffers)
2022 {
2023 for (uint32_t i = 0; i < commandBufferCount; i++) {
2024 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, pCommandBuffers[i]);
2025
2026 if (cmd_buffer) {
2027 if (cmd_buffer->pool) {
2028 list_del(&cmd_buffer->pool_link);
2029 list_addtail(&cmd_buffer->pool_link,
2030 &cmd_buffer->pool->free_cmd_buffers);
2031 } else
2032 tu_cmd_buffer_destroy(cmd_buffer);
2033 }
2034 }
2035 }
2036
2037 VkResult
2038 tu_ResetCommandBuffer(VkCommandBuffer commandBuffer,
2039 VkCommandBufferResetFlags flags)
2040 {
2041 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
2042 return tu_reset_cmd_buffer(cmd_buffer);
2043 }
2044
2045 VkResult
2046 tu_BeginCommandBuffer(VkCommandBuffer commandBuffer,
2047 const VkCommandBufferBeginInfo *pBeginInfo)
2048 {
2049 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
2050 VkResult result = VK_SUCCESS;
2051
2052 if (cmd_buffer->status != TU_CMD_BUFFER_STATUS_INITIAL) {
2053 /* If the command buffer has already been resetted with
2054 * vkResetCommandBuffer, no need to do it again.
2055 */
2056 result = tu_reset_cmd_buffer(cmd_buffer);
2057 if (result != VK_SUCCESS)
2058 return result;
2059 }
2060
2061 memset(&cmd_buffer->state, 0, sizeof(cmd_buffer->state));
2062 cmd_buffer->usage_flags = pBeginInfo->flags;
2063
2064 tu_cs_begin(&cmd_buffer->cs);
2065 tu_cs_begin(&cmd_buffer->draw_cs);
2066 tu_cs_begin(&cmd_buffer->draw_epilogue_cs);
2067
2068 cmd_buffer->scratch_seqno = 0;
2069
2070 /* setup initial configuration into command buffer */
2071 if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
2072 switch (cmd_buffer->queue_family_index) {
2073 case TU_QUEUE_GENERAL:
2074 tu6_init_hw(cmd_buffer, &cmd_buffer->cs);
2075 break;
2076 default:
2077 break;
2078 }
2079 } else if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY &&
2080 (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
2081 assert(pBeginInfo->pInheritanceInfo);
2082 cmd_buffer->state.pass = tu_render_pass_from_handle(pBeginInfo->pInheritanceInfo->renderPass);
2083 cmd_buffer->state.subpass = &cmd_buffer->state.pass->subpasses[pBeginInfo->pInheritanceInfo->subpass];
2084 }
2085
2086 cmd_buffer->status = TU_CMD_BUFFER_STATUS_RECORDING;
2087
2088 return VK_SUCCESS;
2089 }
2090
2091 void
2092 tu_CmdBindVertexBuffers(VkCommandBuffer commandBuffer,
2093 uint32_t firstBinding,
2094 uint32_t bindingCount,
2095 const VkBuffer *pBuffers,
2096 const VkDeviceSize *pOffsets)
2097 {
2098 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2099
2100 assert(firstBinding + bindingCount <= MAX_VBS);
2101
2102 for (uint32_t i = 0; i < bindingCount; i++) {
2103 cmd->state.vb.buffers[firstBinding + i] =
2104 tu_buffer_from_handle(pBuffers[i]);
2105 cmd->state.vb.offsets[firstBinding + i] = pOffsets[i];
2106 }
2107
2108 /* VB states depend on VkPipelineVertexInputStateCreateInfo */
2109 cmd->state.dirty |= TU_CMD_DIRTY_VERTEX_BUFFERS;
2110 }
2111
2112 void
2113 tu_CmdBindIndexBuffer(VkCommandBuffer commandBuffer,
2114 VkBuffer buffer,
2115 VkDeviceSize offset,
2116 VkIndexType indexType)
2117 {
2118 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2119 TU_FROM_HANDLE(tu_buffer, buf, buffer);
2120
2121 /* initialize/update the restart index */
2122 if (!cmd->state.index_buffer || cmd->state.index_type != indexType) {
2123 struct tu_cs *draw_cs = &cmd->draw_cs;
2124
2125 tu6_emit_restart_index(
2126 draw_cs, indexType == VK_INDEX_TYPE_UINT32 ? 0xffffffff : 0xffff);
2127
2128 tu_cs_sanity_check(draw_cs);
2129 }
2130
2131 /* track the BO */
2132 if (cmd->state.index_buffer != buf)
2133 tu_bo_list_add(&cmd->bo_list, buf->bo, MSM_SUBMIT_BO_READ);
2134
2135 cmd->state.index_buffer = buf;
2136 cmd->state.index_offset = offset;
2137 cmd->state.index_type = indexType;
2138 }
2139
2140 void
2141 tu_CmdBindDescriptorSets(VkCommandBuffer commandBuffer,
2142 VkPipelineBindPoint pipelineBindPoint,
2143 VkPipelineLayout _layout,
2144 uint32_t firstSet,
2145 uint32_t descriptorSetCount,
2146 const VkDescriptorSet *pDescriptorSets,
2147 uint32_t dynamicOffsetCount,
2148 const uint32_t *pDynamicOffsets)
2149 {
2150 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
2151 TU_FROM_HANDLE(tu_pipeline_layout, layout, _layout);
2152 unsigned dyn_idx = 0;
2153
2154 struct tu_descriptor_state *descriptors_state =
2155 tu_get_descriptors_state(cmd_buffer, pipelineBindPoint);
2156
2157 for (unsigned i = 0; i < descriptorSetCount; ++i) {
2158 unsigned idx = i + firstSet;
2159 TU_FROM_HANDLE(tu_descriptor_set, set, pDescriptorSets[i]);
2160
2161 descriptors_state->sets[idx] = set;
2162 descriptors_state->valid |= (1u << idx);
2163
2164 for(unsigned j = 0; j < set->layout->dynamic_offset_count; ++j, ++dyn_idx) {
2165 unsigned idx = j + layout->set[i + firstSet].dynamic_offset_start;
2166 assert(dyn_idx < dynamicOffsetCount);
2167
2168 descriptors_state->dynamic_buffers[idx] =
2169 set->dynamic_descriptors[j].va + pDynamicOffsets[dyn_idx];
2170 }
2171 }
2172
2173 cmd_buffer->state.dirty |= TU_CMD_DIRTY_DESCRIPTOR_SETS;
2174 }
2175
2176 void
2177 tu_CmdPushConstants(VkCommandBuffer commandBuffer,
2178 VkPipelineLayout layout,
2179 VkShaderStageFlags stageFlags,
2180 uint32_t offset,
2181 uint32_t size,
2182 const void *pValues)
2183 {
2184 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2185 memcpy((void*) cmd->push_constants + offset, pValues, size);
2186 cmd->state.dirty |= TU_CMD_DIRTY_PUSH_CONSTANTS;
2187 }
2188
2189 VkResult
2190 tu_EndCommandBuffer(VkCommandBuffer commandBuffer)
2191 {
2192 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
2193
2194 if (cmd_buffer->scratch_seqno) {
2195 tu_bo_list_add(&cmd_buffer->bo_list, &cmd_buffer->scratch_bo,
2196 MSM_SUBMIT_BO_WRITE);
2197 }
2198
2199 if (cmd_buffer->use_vsc_data) {
2200 tu_bo_list_add(&cmd_buffer->bo_list, &cmd_buffer->vsc_data,
2201 MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE);
2202 tu_bo_list_add(&cmd_buffer->bo_list, &cmd_buffer->vsc_data2,
2203 MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE);
2204 }
2205
2206 for (uint32_t i = 0; i < cmd_buffer->draw_cs.bo_count; i++) {
2207 tu_bo_list_add(&cmd_buffer->bo_list, cmd_buffer->draw_cs.bos[i],
2208 MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
2209 }
2210
2211 for (uint32_t i = 0; i < cmd_buffer->draw_epilogue_cs.bo_count; i++) {
2212 tu_bo_list_add(&cmd_buffer->bo_list, cmd_buffer->draw_epilogue_cs.bos[i],
2213 MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
2214 }
2215
2216 for (uint32_t i = 0; i < cmd_buffer->sub_cs.bo_count; i++) {
2217 tu_bo_list_add(&cmd_buffer->bo_list, cmd_buffer->sub_cs.bos[i],
2218 MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
2219 }
2220
2221 tu_cs_end(&cmd_buffer->cs);
2222 tu_cs_end(&cmd_buffer->draw_cs);
2223 tu_cs_end(&cmd_buffer->draw_epilogue_cs);
2224
2225 cmd_buffer->status = TU_CMD_BUFFER_STATUS_EXECUTABLE;
2226
2227 return cmd_buffer->record_result;
2228 }
2229
2230 void
2231 tu_CmdBindPipeline(VkCommandBuffer commandBuffer,
2232 VkPipelineBindPoint pipelineBindPoint,
2233 VkPipeline _pipeline)
2234 {
2235 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2236 TU_FROM_HANDLE(tu_pipeline, pipeline, _pipeline);
2237
2238 switch (pipelineBindPoint) {
2239 case VK_PIPELINE_BIND_POINT_GRAPHICS:
2240 cmd->state.pipeline = pipeline;
2241 cmd->state.dirty |= TU_CMD_DIRTY_PIPELINE;
2242 break;
2243 case VK_PIPELINE_BIND_POINT_COMPUTE:
2244 cmd->state.compute_pipeline = pipeline;
2245 cmd->state.dirty |= TU_CMD_DIRTY_COMPUTE_PIPELINE;
2246 break;
2247 default:
2248 unreachable("unrecognized pipeline bind point");
2249 break;
2250 }
2251
2252 tu_bo_list_add(&cmd->bo_list, &pipeline->program.binary_bo,
2253 MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
2254 for (uint32_t i = 0; i < pipeline->cs.bo_count; i++) {
2255 tu_bo_list_add(&cmd->bo_list, pipeline->cs.bos[i],
2256 MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
2257 }
2258 }
2259
2260 void
2261 tu_CmdSetViewport(VkCommandBuffer commandBuffer,
2262 uint32_t firstViewport,
2263 uint32_t viewportCount,
2264 const VkViewport *pViewports)
2265 {
2266 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2267 struct tu_cs *draw_cs = &cmd->draw_cs;
2268
2269 assert(firstViewport == 0 && viewportCount == 1);
2270 tu6_emit_viewport(draw_cs, pViewports);
2271
2272 tu_cs_sanity_check(draw_cs);
2273 }
2274
2275 void
2276 tu_CmdSetScissor(VkCommandBuffer commandBuffer,
2277 uint32_t firstScissor,
2278 uint32_t scissorCount,
2279 const VkRect2D *pScissors)
2280 {
2281 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2282 struct tu_cs *draw_cs = &cmd->draw_cs;
2283
2284 assert(firstScissor == 0 && scissorCount == 1);
2285 tu6_emit_scissor(draw_cs, pScissors);
2286
2287 tu_cs_sanity_check(draw_cs);
2288 }
2289
2290 void
2291 tu_CmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth)
2292 {
2293 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2294
2295 cmd->state.dynamic.line_width = lineWidth;
2296
2297 /* line width depends on VkPipelineRasterizationStateCreateInfo */
2298 cmd->state.dirty |= TU_CMD_DIRTY_DYNAMIC_LINE_WIDTH;
2299 }
2300
2301 void
2302 tu_CmdSetDepthBias(VkCommandBuffer commandBuffer,
2303 float depthBiasConstantFactor,
2304 float depthBiasClamp,
2305 float depthBiasSlopeFactor)
2306 {
2307 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2308 struct tu_cs *draw_cs = &cmd->draw_cs;
2309
2310 tu6_emit_depth_bias(draw_cs, depthBiasConstantFactor, depthBiasClamp,
2311 depthBiasSlopeFactor);
2312
2313 tu_cs_sanity_check(draw_cs);
2314 }
2315
2316 void
2317 tu_CmdSetBlendConstants(VkCommandBuffer commandBuffer,
2318 const float blendConstants[4])
2319 {
2320 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2321 struct tu_cs *draw_cs = &cmd->draw_cs;
2322
2323 tu6_emit_blend_constants(draw_cs, blendConstants);
2324
2325 tu_cs_sanity_check(draw_cs);
2326 }
2327
2328 void
2329 tu_CmdSetDepthBounds(VkCommandBuffer commandBuffer,
2330 float minDepthBounds,
2331 float maxDepthBounds)
2332 {
2333 }
2334
2335 void
2336 tu_CmdSetStencilCompareMask(VkCommandBuffer commandBuffer,
2337 VkStencilFaceFlags faceMask,
2338 uint32_t compareMask)
2339 {
2340 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2341
2342 if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
2343 cmd->state.dynamic.stencil_compare_mask.front = compareMask;
2344 if (faceMask & VK_STENCIL_FACE_BACK_BIT)
2345 cmd->state.dynamic.stencil_compare_mask.back = compareMask;
2346
2347 /* the front/back compare masks must be updated together */
2348 cmd->state.dirty |= TU_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK;
2349 }
2350
2351 void
2352 tu_CmdSetStencilWriteMask(VkCommandBuffer commandBuffer,
2353 VkStencilFaceFlags faceMask,
2354 uint32_t writeMask)
2355 {
2356 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2357
2358 if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
2359 cmd->state.dynamic.stencil_write_mask.front = writeMask;
2360 if (faceMask & VK_STENCIL_FACE_BACK_BIT)
2361 cmd->state.dynamic.stencil_write_mask.back = writeMask;
2362
2363 /* the front/back write masks must be updated together */
2364 cmd->state.dirty |= TU_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK;
2365 }
2366
2367 void
2368 tu_CmdSetStencilReference(VkCommandBuffer commandBuffer,
2369 VkStencilFaceFlags faceMask,
2370 uint32_t reference)
2371 {
2372 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2373
2374 if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
2375 cmd->state.dynamic.stencil_reference.front = reference;
2376 if (faceMask & VK_STENCIL_FACE_BACK_BIT)
2377 cmd->state.dynamic.stencil_reference.back = reference;
2378
2379 /* the front/back references must be updated together */
2380 cmd->state.dirty |= TU_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE;
2381 }
2382
2383 void
2384 tu_CmdExecuteCommands(VkCommandBuffer commandBuffer,
2385 uint32_t commandBufferCount,
2386 const VkCommandBuffer *pCmdBuffers)
2387 {
2388 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2389 VkResult result;
2390
2391 assert(commandBufferCount > 0);
2392
2393 for (uint32_t i = 0; i < commandBufferCount; i++) {
2394 TU_FROM_HANDLE(tu_cmd_buffer, secondary, pCmdBuffers[i]);
2395
2396 result = tu_bo_list_merge(&cmd->bo_list, &secondary->bo_list);
2397 if (result != VK_SUCCESS) {
2398 cmd->record_result = result;
2399 break;
2400 }
2401
2402 if (secondary->usage_flags &
2403 VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
2404 assert(tu_cs_is_empty(&secondary->cs));
2405
2406 result = tu_cs_add_entries(&cmd->draw_cs, &secondary->draw_cs);
2407 if (result != VK_SUCCESS) {
2408 cmd->record_result = result;
2409 break;
2410 }
2411
2412 result = tu_cs_add_entries(&cmd->draw_epilogue_cs,
2413 &secondary->draw_epilogue_cs);
2414 if (result != VK_SUCCESS) {
2415 cmd->record_result = result;
2416 break;
2417 }
2418 } else {
2419 assert(tu_cs_is_empty(&secondary->draw_cs));
2420 assert(tu_cs_is_empty(&secondary->draw_epilogue_cs));
2421
2422 for (uint32_t j = 0; j < secondary->cs.bo_count; j++) {
2423 tu_bo_list_add(&cmd->bo_list, secondary->cs.bos[j],
2424 MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
2425 }
2426
2427 tu_cs_emit_call(&cmd->cs, &secondary->cs);
2428 }
2429 }
2430 cmd->state.dirty = ~0u; /* TODO: set dirty only what needs to be */
2431 }
2432
2433 VkResult
2434 tu_CreateCommandPool(VkDevice _device,
2435 const VkCommandPoolCreateInfo *pCreateInfo,
2436 const VkAllocationCallbacks *pAllocator,
2437 VkCommandPool *pCmdPool)
2438 {
2439 TU_FROM_HANDLE(tu_device, device, _device);
2440 struct tu_cmd_pool *pool;
2441
2442 pool = vk_alloc2(&device->alloc, pAllocator, sizeof(*pool), 8,
2443 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2444 if (pool == NULL)
2445 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
2446
2447 if (pAllocator)
2448 pool->alloc = *pAllocator;
2449 else
2450 pool->alloc = device->alloc;
2451
2452 list_inithead(&pool->cmd_buffers);
2453 list_inithead(&pool->free_cmd_buffers);
2454
2455 pool->queue_family_index = pCreateInfo->queueFamilyIndex;
2456
2457 *pCmdPool = tu_cmd_pool_to_handle(pool);
2458
2459 return VK_SUCCESS;
2460 }
2461
2462 void
2463 tu_DestroyCommandPool(VkDevice _device,
2464 VkCommandPool commandPool,
2465 const VkAllocationCallbacks *pAllocator)
2466 {
2467 TU_FROM_HANDLE(tu_device, device, _device);
2468 TU_FROM_HANDLE(tu_cmd_pool, pool, commandPool);
2469
2470 if (!pool)
2471 return;
2472
2473 list_for_each_entry_safe(struct tu_cmd_buffer, cmd_buffer,
2474 &pool->cmd_buffers, pool_link)
2475 {
2476 tu_cmd_buffer_destroy(cmd_buffer);
2477 }
2478
2479 list_for_each_entry_safe(struct tu_cmd_buffer, cmd_buffer,
2480 &pool->free_cmd_buffers, pool_link)
2481 {
2482 tu_cmd_buffer_destroy(cmd_buffer);
2483 }
2484
2485 vk_free2(&device->alloc, pAllocator, pool);
2486 }
2487
2488 VkResult
2489 tu_ResetCommandPool(VkDevice device,
2490 VkCommandPool commandPool,
2491 VkCommandPoolResetFlags flags)
2492 {
2493 TU_FROM_HANDLE(tu_cmd_pool, pool, commandPool);
2494 VkResult result;
2495
2496 list_for_each_entry(struct tu_cmd_buffer, cmd_buffer, &pool->cmd_buffers,
2497 pool_link)
2498 {
2499 result = tu_reset_cmd_buffer(cmd_buffer);
2500 if (result != VK_SUCCESS)
2501 return result;
2502 }
2503
2504 return VK_SUCCESS;
2505 }
2506
2507 void
2508 tu_TrimCommandPool(VkDevice device,
2509 VkCommandPool commandPool,
2510 VkCommandPoolTrimFlags flags)
2511 {
2512 TU_FROM_HANDLE(tu_cmd_pool, pool, commandPool);
2513
2514 if (!pool)
2515 return;
2516
2517 list_for_each_entry_safe(struct tu_cmd_buffer, cmd_buffer,
2518 &pool->free_cmd_buffers, pool_link)
2519 {
2520 tu_cmd_buffer_destroy(cmd_buffer);
2521 }
2522 }
2523
2524 void
2525 tu_CmdBeginRenderPass(VkCommandBuffer commandBuffer,
2526 const VkRenderPassBeginInfo *pRenderPassBegin,
2527 VkSubpassContents contents)
2528 {
2529 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2530 TU_FROM_HANDLE(tu_render_pass, pass, pRenderPassBegin->renderPass);
2531 TU_FROM_HANDLE(tu_framebuffer, fb, pRenderPassBegin->framebuffer);
2532
2533 cmd->state.pass = pass;
2534 cmd->state.subpass = pass->subpasses;
2535 cmd->state.framebuffer = fb;
2536
2537 tu_cmd_update_tiling_config(cmd, &pRenderPassBegin->renderArea);
2538 tu_cmd_prepare_tile_store_ib(cmd);
2539
2540 tu_emit_load_clear(cmd, pRenderPassBegin);
2541
2542 tu6_emit_zs(cmd, cmd->state.subpass, &cmd->draw_cs);
2543 tu6_emit_mrt(cmd, cmd->state.subpass, &cmd->draw_cs);
2544 tu6_emit_msaa(cmd, cmd->state.subpass, &cmd->draw_cs);
2545 tu6_emit_render_cntl(cmd, cmd->state.subpass, &cmd->draw_cs, false);
2546
2547 /* note: use_hw_binning only checks tiling config */
2548 if (use_hw_binning(cmd))
2549 cmd->use_vsc_data = true;
2550
2551 for (uint32_t i = 0; i < fb->attachment_count; ++i) {
2552 const struct tu_image_view *iview = fb->attachments[i].attachment;
2553 tu_bo_list_add(&cmd->bo_list, iview->image->bo,
2554 MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE);
2555 }
2556 }
2557
2558 void
2559 tu_CmdBeginRenderPass2(VkCommandBuffer commandBuffer,
2560 const VkRenderPassBeginInfo *pRenderPassBeginInfo,
2561 const VkSubpassBeginInfoKHR *pSubpassBeginInfo)
2562 {
2563 tu_CmdBeginRenderPass(commandBuffer, pRenderPassBeginInfo,
2564 pSubpassBeginInfo->contents);
2565 }
2566
2567 void
2568 tu_CmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents)
2569 {
2570 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2571 const struct tu_render_pass *pass = cmd->state.pass;
2572 struct tu_cs *cs = &cmd->draw_cs;
2573
2574 const struct tu_subpass *subpass = cmd->state.subpass++;
2575 /* TODO:
2576 * if msaa samples change between subpasses,
2577 * attachment store is broken for some attachments
2578 */
2579 if (subpass->resolve_attachments) {
2580 tu6_emit_blit_scissor(cmd, cs, true);
2581 for (unsigned i = 0; i < subpass->color_count; i++) {
2582 uint32_t a = subpass->resolve_attachments[i].attachment;
2583 if (a != VK_ATTACHMENT_UNUSED) {
2584 tu6_emit_resolve(cmd, cs, a,
2585 subpass->color_attachments[i].attachment);
2586 }
2587 }
2588 }
2589
2590 /* invalidate because reading input attachments will cache GMEM and
2591 * the cache isn''t updated when GMEM is written
2592 * TODO: is there a no-cache bit for textures?
2593 */
2594 if (cmd->state.subpass->input_count)
2595 tu6_emit_event_write(cmd, cs, CACHE_INVALIDATE, false);
2596
2597 /* emit mrt/zs/msaa/ubwc state for the subpass that is starting */
2598 tu6_emit_zs(cmd, cmd->state.subpass, cs);
2599 tu6_emit_mrt(cmd, cmd->state.subpass, cs);
2600 tu6_emit_msaa(cmd, cmd->state.subpass, cs);
2601 tu6_emit_render_cntl(cmd, cmd->state.subpass, cs, false);
2602
2603 /* Emit flushes so that input attachments will read the correct value. This
2604 * is for sysmem only, although it shouldn't do much harm on gmem.
2605 */
2606 tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_COLOR_TS, true);
2607 tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_DEPTH_TS, true);
2608
2609 /* TODO:
2610 * since we don't know how to do GMEM->GMEM resolve,
2611 * resolve attachments are resolved to memory then loaded to GMEM again if needed
2612 */
2613 if (subpass->resolve_attachments) {
2614 for (unsigned i = 0; i < subpass->color_count; i++) {
2615 uint32_t a = subpass->resolve_attachments[i].attachment;
2616 if (a != VK_ATTACHMENT_UNUSED && pass->attachments[a].gmem_offset >= 0) {
2617 tu_finishme("missing GMEM->GMEM resolve, performance will suffer\n");
2618 tu6_emit_predicated_blit(cmd, cs, a, a, false);
2619 }
2620 }
2621 }
2622 }
2623
2624 void
2625 tu_CmdNextSubpass2(VkCommandBuffer commandBuffer,
2626 const VkSubpassBeginInfoKHR *pSubpassBeginInfo,
2627 const VkSubpassEndInfoKHR *pSubpassEndInfo)
2628 {
2629 tu_CmdNextSubpass(commandBuffer, pSubpassBeginInfo->contents);
2630 }
2631
2632 struct tu_draw_info
2633 {
2634 /**
2635 * Number of vertices.
2636 */
2637 uint32_t count;
2638
2639 /**
2640 * Index of the first vertex.
2641 */
2642 int32_t vertex_offset;
2643
2644 /**
2645 * First instance id.
2646 */
2647 uint32_t first_instance;
2648
2649 /**
2650 * Number of instances.
2651 */
2652 uint32_t instance_count;
2653
2654 /**
2655 * First index (indexed draws only).
2656 */
2657 uint32_t first_index;
2658
2659 /**
2660 * Whether it's an indexed draw.
2661 */
2662 bool indexed;
2663
2664 /**
2665 * Indirect draw parameters resource.
2666 */
2667 struct tu_buffer *indirect;
2668 uint64_t indirect_offset;
2669 uint32_t stride;
2670
2671 /**
2672 * Draw count parameters resource.
2673 */
2674 struct tu_buffer *count_buffer;
2675 uint64_t count_buffer_offset;
2676 };
2677
2678 #define ENABLE_ALL (CP_SET_DRAW_STATE__0_BINNING | CP_SET_DRAW_STATE__0_GMEM | CP_SET_DRAW_STATE__0_SYSMEM)
2679 #define ENABLE_DRAW (CP_SET_DRAW_STATE__0_GMEM | CP_SET_DRAW_STATE__0_SYSMEM)
2680
2681 enum tu_draw_state_group_id
2682 {
2683 TU_DRAW_STATE_PROGRAM,
2684 TU_DRAW_STATE_PROGRAM_BINNING,
2685 TU_DRAW_STATE_VI,
2686 TU_DRAW_STATE_VI_BINNING,
2687 TU_DRAW_STATE_VP,
2688 TU_DRAW_STATE_RAST,
2689 TU_DRAW_STATE_DS,
2690 TU_DRAW_STATE_BLEND,
2691 TU_DRAW_STATE_VS_CONST,
2692 TU_DRAW_STATE_FS_CONST,
2693 TU_DRAW_STATE_VS_TEX,
2694 TU_DRAW_STATE_FS_TEX_SYSMEM,
2695 TU_DRAW_STATE_FS_TEX_GMEM,
2696 TU_DRAW_STATE_FS_IBO,
2697 TU_DRAW_STATE_VS_PARAMS,
2698
2699 TU_DRAW_STATE_COUNT,
2700 };
2701
2702 struct tu_draw_state_group
2703 {
2704 enum tu_draw_state_group_id id;
2705 uint32_t enable_mask;
2706 struct tu_cs_entry ib;
2707 };
2708
2709 const static struct tu_sampler*
2710 sampler_ptr(struct tu_descriptor_state *descriptors_state,
2711 const struct tu_descriptor_map *map, unsigned i,
2712 unsigned array_index)
2713 {
2714 assert(descriptors_state->valid & (1 << map->set[i]));
2715
2716 struct tu_descriptor_set *set = descriptors_state->sets[map->set[i]];
2717 assert(map->binding[i] < set->layout->binding_count);
2718
2719 const struct tu_descriptor_set_binding_layout *layout =
2720 &set->layout->binding[map->binding[i]];
2721
2722 if (layout->immutable_samplers_offset) {
2723 const struct tu_sampler *immutable_samplers =
2724 tu_immutable_samplers(set->layout, layout);
2725
2726 return &immutable_samplers[array_index];
2727 }
2728
2729 switch (layout->type) {
2730 case VK_DESCRIPTOR_TYPE_SAMPLER:
2731 return (struct tu_sampler*) &set->mapped_ptr[layout->offset / 4];
2732 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
2733 return (struct tu_sampler*) &set->mapped_ptr[layout->offset / 4 + A6XX_TEX_CONST_DWORDS +
2734 array_index *
2735 (A6XX_TEX_CONST_DWORDS +
2736 sizeof(struct tu_sampler) / 4)];
2737 default:
2738 unreachable("unimplemented descriptor type");
2739 break;
2740 }
2741 }
2742
2743 static void
2744 write_tex_const(struct tu_cmd_buffer *cmd,
2745 uint32_t *dst,
2746 struct tu_descriptor_state *descriptors_state,
2747 const struct tu_descriptor_map *map,
2748 unsigned i, unsigned array_index, bool is_sysmem)
2749 {
2750 assert(descriptors_state->valid & (1 << map->set[i]));
2751
2752 struct tu_descriptor_set *set = descriptors_state->sets[map->set[i]];
2753 assert(map->binding[i] < set->layout->binding_count);
2754
2755 const struct tu_descriptor_set_binding_layout *layout =
2756 &set->layout->binding[map->binding[i]];
2757
2758 switch (layout->type) {
2759 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
2760 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
2761 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
2762 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
2763 memcpy(dst, &set->mapped_ptr[layout->offset / 4 +
2764 array_index * A6XX_TEX_CONST_DWORDS],
2765 A6XX_TEX_CONST_DWORDS * 4);
2766 break;
2767 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
2768 memcpy(dst, &set->mapped_ptr[layout->offset / 4 +
2769 array_index *
2770 (A6XX_TEX_CONST_DWORDS +
2771 sizeof(struct tu_sampler) / 4)],
2772 A6XX_TEX_CONST_DWORDS * 4);
2773 break;
2774 default:
2775 unreachable("unimplemented descriptor type");
2776 break;
2777 }
2778
2779 if (layout->type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT && !is_sysmem) {
2780 const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
2781 uint32_t a = cmd->state.subpass->input_attachments[map->value[i] +
2782 array_index].attachment;
2783 const struct tu_render_pass_attachment *att = &cmd->state.pass->attachments[a];
2784
2785 assert(att->gmem_offset >= 0);
2786
2787 dst[0] &= ~(A6XX_TEX_CONST_0_SWAP__MASK | A6XX_TEX_CONST_0_TILE_MODE__MASK);
2788 dst[0] |= A6XX_TEX_CONST_0_TILE_MODE(TILE6_2);
2789 dst[2] &= ~(A6XX_TEX_CONST_2_TYPE__MASK | A6XX_TEX_CONST_2_PITCH__MASK);
2790 dst[2] |=
2791 A6XX_TEX_CONST_2_TYPE(A6XX_TEX_2D) |
2792 A6XX_TEX_CONST_2_PITCH(tiling->tile0.extent.width * att->cpp);
2793 dst[3] = 0;
2794 dst[4] = cmd->device->physical_device->gmem_base + att->gmem_offset;
2795 dst[5] = A6XX_TEX_CONST_5_DEPTH(1);
2796 for (unsigned i = 6; i < A6XX_TEX_CONST_DWORDS; i++)
2797 dst[i] = 0;
2798
2799 if (cmd->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY)
2800 tu_finishme("patch input attachment pitch for secondary cmd buffer");
2801 }
2802 }
2803
2804 static void
2805 write_image_ibo(struct tu_cmd_buffer *cmd,
2806 uint32_t *dst,
2807 struct tu_descriptor_state *descriptors_state,
2808 const struct tu_descriptor_map *map,
2809 unsigned i, unsigned array_index)
2810 {
2811 assert(descriptors_state->valid & (1 << map->set[i]));
2812
2813 struct tu_descriptor_set *set = descriptors_state->sets[map->set[i]];
2814 assert(map->binding[i] < set->layout->binding_count);
2815
2816 const struct tu_descriptor_set_binding_layout *layout =
2817 &set->layout->binding[map->binding[i]];
2818
2819 assert(layout->type == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE);
2820
2821 memcpy(dst, &set->mapped_ptr[layout->offset / 4 +
2822 (array_index * 2 + 1) * A6XX_TEX_CONST_DWORDS],
2823 A6XX_TEX_CONST_DWORDS * 4);
2824 }
2825
2826 static uint64_t
2827 buffer_ptr(struct tu_descriptor_state *descriptors_state,
2828 const struct tu_descriptor_map *map,
2829 unsigned i, unsigned array_index)
2830 {
2831 assert(descriptors_state->valid & (1 << map->set[i]));
2832
2833 struct tu_descriptor_set *set = descriptors_state->sets[map->set[i]];
2834 assert(map->binding[i] < set->layout->binding_count);
2835
2836 const struct tu_descriptor_set_binding_layout *layout =
2837 &set->layout->binding[map->binding[i]];
2838
2839 switch (layout->type) {
2840 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
2841 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
2842 return descriptors_state->dynamic_buffers[layout->dynamic_offset_offset +
2843 array_index];
2844 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
2845 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
2846 return (uint64_t) set->mapped_ptr[layout->offset / 4 + array_index * 2 + 1] << 32 |
2847 set->mapped_ptr[layout->offset / 4 + array_index * 2];
2848 default:
2849 unreachable("unimplemented descriptor type");
2850 break;
2851 }
2852 }
2853
2854 static inline uint32_t
2855 tu6_stage2opcode(gl_shader_stage type)
2856 {
2857 switch (type) {
2858 case MESA_SHADER_VERTEX:
2859 case MESA_SHADER_TESS_CTRL:
2860 case MESA_SHADER_TESS_EVAL:
2861 case MESA_SHADER_GEOMETRY:
2862 return CP_LOAD_STATE6_GEOM;
2863 case MESA_SHADER_FRAGMENT:
2864 case MESA_SHADER_COMPUTE:
2865 case MESA_SHADER_KERNEL:
2866 return CP_LOAD_STATE6_FRAG;
2867 default:
2868 unreachable("bad shader type");
2869 }
2870 }
2871
2872 static inline enum a6xx_state_block
2873 tu6_stage2shadersb(gl_shader_stage type)
2874 {
2875 switch (type) {
2876 case MESA_SHADER_VERTEX:
2877 return SB6_VS_SHADER;
2878 case MESA_SHADER_FRAGMENT:
2879 return SB6_FS_SHADER;
2880 case MESA_SHADER_COMPUTE:
2881 case MESA_SHADER_KERNEL:
2882 return SB6_CS_SHADER;
2883 default:
2884 unreachable("bad shader type");
2885 return ~0;
2886 }
2887 }
2888
2889 static void
2890 tu6_emit_user_consts(struct tu_cs *cs, const struct tu_pipeline *pipeline,
2891 struct tu_descriptor_state *descriptors_state,
2892 gl_shader_stage type,
2893 uint32_t *push_constants)
2894 {
2895 const struct tu_program_descriptor_linkage *link =
2896 &pipeline->program.link[type];
2897 const struct ir3_ubo_analysis_state *state = &link->ubo_state;
2898
2899 for (uint32_t i = 0; i < ARRAY_SIZE(state->range); i++) {
2900 if (state->range[i].start < state->range[i].end) {
2901 uint32_t size = state->range[i].end - state->range[i].start;
2902 uint32_t offset = state->range[i].start;
2903
2904 /* and even if the start of the const buffer is before
2905 * first_immediate, the end may not be:
2906 */
2907 size = MIN2(size, (16 * link->constlen) - state->range[i].offset);
2908
2909 if (size == 0)
2910 continue;
2911
2912 /* things should be aligned to vec4: */
2913 debug_assert((state->range[i].offset % 16) == 0);
2914 debug_assert((size % 16) == 0);
2915 debug_assert((offset % 16) == 0);
2916
2917 if (i == 0) {
2918 /* push constants */
2919 tu_cs_emit_pkt7(cs, tu6_stage2opcode(type), 3 + (size / 4));
2920 tu_cs_emit(cs, CP_LOAD_STATE6_0_DST_OFF(state->range[i].offset / 16) |
2921 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
2922 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
2923 CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type)) |
2924 CP_LOAD_STATE6_0_NUM_UNIT(size / 16));
2925 tu_cs_emit(cs, 0);
2926 tu_cs_emit(cs, 0);
2927 for (unsigned i = 0; i < size / 4; i++)
2928 tu_cs_emit(cs, push_constants[i + offset / 4]);
2929 continue;
2930 }
2931
2932 /* Look through the UBO map to find our UBO index, and get the VA for
2933 * that UBO.
2934 */
2935 uint64_t va = 0;
2936 uint32_t ubo_idx = i - 1;
2937 uint32_t ubo_map_base = 0;
2938 for (int j = 0; j < link->ubo_map.num; j++) {
2939 if (ubo_idx >= ubo_map_base &&
2940 ubo_idx < ubo_map_base + link->ubo_map.array_size[j]) {
2941 va = buffer_ptr(descriptors_state, &link->ubo_map, j,
2942 ubo_idx - ubo_map_base);
2943 break;
2944 }
2945 ubo_map_base += link->ubo_map.array_size[j];
2946 }
2947 assert(va);
2948
2949 tu_cs_emit_pkt7(cs, tu6_stage2opcode(type), 3);
2950 tu_cs_emit(cs, CP_LOAD_STATE6_0_DST_OFF(state->range[i].offset / 16) |
2951 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
2952 CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT) |
2953 CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type)) |
2954 CP_LOAD_STATE6_0_NUM_UNIT(size / 16));
2955 tu_cs_emit_qw(cs, va + offset);
2956 }
2957 }
2958 }
2959
2960 static void
2961 tu6_emit_ubos(struct tu_cs *cs, const struct tu_pipeline *pipeline,
2962 struct tu_descriptor_state *descriptors_state,
2963 gl_shader_stage type)
2964 {
2965 const struct tu_program_descriptor_linkage *link =
2966 &pipeline->program.link[type];
2967
2968 uint32_t num = MIN2(link->ubo_map.num_desc, link->const_state.num_ubos);
2969 uint32_t anum = align(num, 2);
2970
2971 if (!num)
2972 return;
2973
2974 tu_cs_emit_pkt7(cs, tu6_stage2opcode(type), 3 + (2 * anum));
2975 tu_cs_emit(cs, CP_LOAD_STATE6_0_DST_OFF(link->const_state.offsets.ubo) |
2976 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
2977 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
2978 CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type)) |
2979 CP_LOAD_STATE6_0_NUM_UNIT(anum/2));
2980 tu_cs_emit(cs, CP_LOAD_STATE6_1_EXT_SRC_ADDR(0));
2981 tu_cs_emit(cs, CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI(0));
2982
2983 unsigned emitted = 0;
2984 for (unsigned i = 0; emitted < num && i < link->ubo_map.num; i++) {
2985 for (unsigned j = 0; emitted < num && j < link->ubo_map.array_size[i]; j++) {
2986 tu_cs_emit_qw(cs, buffer_ptr(descriptors_state, &link->ubo_map, i, j));
2987 emitted++;
2988 }
2989 }
2990
2991 for (; emitted < anum; emitted++) {
2992 tu_cs_emit(cs, 0xffffffff);
2993 tu_cs_emit(cs, 0xffffffff);
2994 }
2995 }
2996
2997 static struct tu_cs_entry
2998 tu6_emit_consts(struct tu_cmd_buffer *cmd,
2999 const struct tu_pipeline *pipeline,
3000 struct tu_descriptor_state *descriptors_state,
3001 gl_shader_stage type)
3002 {
3003 struct tu_cs cs;
3004 tu_cs_begin_sub_stream(&cmd->sub_cs, 512, &cs); /* TODO: maximum size? */
3005
3006 tu6_emit_user_consts(&cs, pipeline, descriptors_state, type, cmd->push_constants);
3007 tu6_emit_ubos(&cs, pipeline, descriptors_state, type);
3008
3009 return tu_cs_end_sub_stream(&cmd->sub_cs, &cs);
3010 }
3011
3012 static VkResult
3013 tu6_emit_vs_params(struct tu_cmd_buffer *cmd,
3014 const struct tu_draw_info *draw,
3015 struct tu_cs_entry *entry)
3016 {
3017 /* TODO: fill out more than just base instance */
3018 const struct tu_program_descriptor_linkage *link =
3019 &cmd->state.pipeline->program.link[MESA_SHADER_VERTEX];
3020 const struct ir3_const_state *const_state = &link->const_state;
3021 struct tu_cs cs;
3022
3023 if (const_state->offsets.driver_param >= link->constlen) {
3024 *entry = (struct tu_cs_entry) {};
3025 return VK_SUCCESS;
3026 }
3027
3028 VkResult result = tu_cs_begin_sub_stream(&cmd->sub_cs, 8, &cs);
3029 if (result != VK_SUCCESS)
3030 return result;
3031
3032 tu_cs_emit_pkt7(&cs, CP_LOAD_STATE6_GEOM, 3 + 4);
3033 tu_cs_emit(&cs, CP_LOAD_STATE6_0_DST_OFF(const_state->offsets.driver_param) |
3034 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
3035 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
3036 CP_LOAD_STATE6_0_STATE_BLOCK(SB6_VS_SHADER) |
3037 CP_LOAD_STATE6_0_NUM_UNIT(1));
3038 tu_cs_emit(&cs, 0);
3039 tu_cs_emit(&cs, 0);
3040
3041 STATIC_ASSERT(IR3_DP_INSTID_BASE == 2);
3042
3043 tu_cs_emit(&cs, 0);
3044 tu_cs_emit(&cs, 0);
3045 tu_cs_emit(&cs, draw->first_instance);
3046 tu_cs_emit(&cs, 0);
3047
3048 *entry = tu_cs_end_sub_stream(&cmd->sub_cs, &cs);
3049 return VK_SUCCESS;
3050 }
3051
3052 static VkResult
3053 tu6_emit_textures(struct tu_cmd_buffer *cmd,
3054 const struct tu_pipeline *pipeline,
3055 struct tu_descriptor_state *descriptors_state,
3056 gl_shader_stage type,
3057 struct tu_cs_entry *entry,
3058 bool *needs_border,
3059 bool is_sysmem)
3060 {
3061 struct tu_cs *draw_state = &cmd->sub_cs;
3062 const struct tu_program_descriptor_linkage *link =
3063 &pipeline->program.link[type];
3064 VkResult result;
3065
3066 if (link->texture_map.num_desc == 0 && link->sampler_map.num_desc == 0) {
3067 *entry = (struct tu_cs_entry) {};
3068 return VK_SUCCESS;
3069 }
3070
3071 /* allocate and fill texture state */
3072 struct ts_cs_memory tex_const;
3073 result = tu_cs_alloc(draw_state, link->texture_map.num_desc,
3074 A6XX_TEX_CONST_DWORDS, &tex_const);
3075 if (result != VK_SUCCESS)
3076 return result;
3077
3078 int tex_index = 0;
3079 for (unsigned i = 0; i < link->texture_map.num; i++) {
3080 for (int j = 0; j < link->texture_map.array_size[i]; j++) {
3081 write_tex_const(cmd,
3082 &tex_const.map[A6XX_TEX_CONST_DWORDS * tex_index++],
3083 descriptors_state, &link->texture_map, i, j,
3084 is_sysmem);
3085 }
3086 }
3087
3088 /* allocate and fill sampler state */
3089 struct ts_cs_memory tex_samp = { 0 };
3090 if (link->sampler_map.num_desc) {
3091 result = tu_cs_alloc(draw_state, link->sampler_map.num_desc,
3092 A6XX_TEX_SAMP_DWORDS, &tex_samp);
3093 if (result != VK_SUCCESS)
3094 return result;
3095
3096 int sampler_index = 0;
3097 for (unsigned i = 0; i < link->sampler_map.num; i++) {
3098 for (int j = 0; j < link->sampler_map.array_size[i]; j++) {
3099 const struct tu_sampler *sampler = sampler_ptr(descriptors_state,
3100 &link->sampler_map,
3101 i, j);
3102 memcpy(&tex_samp.map[A6XX_TEX_SAMP_DWORDS * sampler_index++],
3103 sampler->state, sizeof(sampler->state));
3104 *needs_border |= sampler->needs_border;
3105 }
3106 }
3107 }
3108
3109 unsigned tex_samp_reg, tex_const_reg, tex_count_reg;
3110 enum a6xx_state_block sb;
3111
3112 switch (type) {
3113 case MESA_SHADER_VERTEX:
3114 sb = SB6_VS_TEX;
3115 tex_samp_reg = REG_A6XX_SP_VS_TEX_SAMP_LO;
3116 tex_const_reg = REG_A6XX_SP_VS_TEX_CONST_LO;
3117 tex_count_reg = REG_A6XX_SP_VS_TEX_COUNT;
3118 break;
3119 case MESA_SHADER_FRAGMENT:
3120 sb = SB6_FS_TEX;
3121 tex_samp_reg = REG_A6XX_SP_FS_TEX_SAMP_LO;
3122 tex_const_reg = REG_A6XX_SP_FS_TEX_CONST_LO;
3123 tex_count_reg = REG_A6XX_SP_FS_TEX_COUNT;
3124 break;
3125 case MESA_SHADER_COMPUTE:
3126 sb = SB6_CS_TEX;
3127 tex_samp_reg = REG_A6XX_SP_CS_TEX_SAMP_LO;
3128 tex_const_reg = REG_A6XX_SP_CS_TEX_CONST_LO;
3129 tex_count_reg = REG_A6XX_SP_CS_TEX_COUNT;
3130 break;
3131 default:
3132 unreachable("bad state block");
3133 }
3134
3135 struct tu_cs cs;
3136 result = tu_cs_begin_sub_stream(draw_state, 16, &cs);
3137 if (result != VK_SUCCESS)
3138 return result;
3139
3140 if (link->sampler_map.num_desc) {
3141 /* output sampler state: */
3142 tu_cs_emit_pkt7(&cs, tu6_stage2opcode(type), 3);
3143 tu_cs_emit(&cs, CP_LOAD_STATE6_0_DST_OFF(0) |
3144 CP_LOAD_STATE6_0_STATE_TYPE(ST6_SHADER) |
3145 CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT) |
3146 CP_LOAD_STATE6_0_STATE_BLOCK(sb) |
3147 CP_LOAD_STATE6_0_NUM_UNIT(link->sampler_map.num_desc));
3148 tu_cs_emit_qw(&cs, tex_samp.iova); /* SRC_ADDR_LO/HI */
3149
3150 tu_cs_emit_pkt4(&cs, tex_samp_reg, 2);
3151 tu_cs_emit_qw(&cs, tex_samp.iova); /* SRC_ADDR_LO/HI */
3152 }
3153
3154 /* emit texture state: */
3155 tu_cs_emit_pkt7(&cs, tu6_stage2opcode(type), 3);
3156 tu_cs_emit(&cs, CP_LOAD_STATE6_0_DST_OFF(0) |
3157 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
3158 CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT) |
3159 CP_LOAD_STATE6_0_STATE_BLOCK(sb) |
3160 CP_LOAD_STATE6_0_NUM_UNIT(link->texture_map.num_desc));
3161 tu_cs_emit_qw(&cs, tex_const.iova); /* SRC_ADDR_LO/HI */
3162
3163 tu_cs_emit_pkt4(&cs, tex_const_reg, 2);
3164 tu_cs_emit_qw(&cs, tex_const.iova); /* SRC_ADDR_LO/HI */
3165
3166 tu_cs_emit_pkt4(&cs, tex_count_reg, 1);
3167 tu_cs_emit(&cs, link->texture_map.num_desc);
3168
3169 *entry = tu_cs_end_sub_stream(draw_state, &cs);
3170 return VK_SUCCESS;
3171 }
3172
3173 static VkResult
3174 tu6_emit_ibo(struct tu_cmd_buffer *cmd,
3175 const struct tu_pipeline *pipeline,
3176 struct tu_descriptor_state *descriptors_state,
3177 gl_shader_stage type,
3178 struct tu_cs_entry *entry)
3179 {
3180 struct tu_cs *draw_state = &cmd->sub_cs;
3181 const struct tu_program_descriptor_linkage *link =
3182 &pipeline->program.link[type];
3183 VkResult result;
3184
3185 unsigned num_desc = link->ssbo_map.num_desc + link->image_map.num_desc;
3186
3187 if (num_desc == 0) {
3188 *entry = (struct tu_cs_entry) {};
3189 return VK_SUCCESS;
3190 }
3191
3192 struct ts_cs_memory ibo_const;
3193 result = tu_cs_alloc(draw_state, num_desc,
3194 A6XX_TEX_CONST_DWORDS, &ibo_const);
3195 if (result != VK_SUCCESS)
3196 return result;
3197
3198 int ssbo_index = 0;
3199 for (unsigned i = 0; i < link->ssbo_map.num; i++) {
3200 for (int j = 0; j < link->ssbo_map.array_size[i]; j++) {
3201 uint32_t *dst = &ibo_const.map[A6XX_TEX_CONST_DWORDS * ssbo_index];
3202
3203 uint64_t va = buffer_ptr(descriptors_state, &link->ssbo_map, i, j);
3204 /* We don't expose robustBufferAccess, so leave the size unlimited. */
3205 uint32_t sz = MAX_STORAGE_BUFFER_RANGE / 4;
3206
3207 dst[0] = A6XX_IBO_0_FMT(FMT6_32_UINT);
3208 dst[1] = A6XX_IBO_1_WIDTH(sz & MASK(15)) |
3209 A6XX_IBO_1_HEIGHT(sz >> 15);
3210 dst[2] = A6XX_IBO_2_UNK4 |
3211 A6XX_IBO_2_UNK31 |
3212 A6XX_IBO_2_TYPE(A6XX_TEX_1D);
3213 dst[3] = 0;
3214 dst[4] = va;
3215 dst[5] = va >> 32;
3216 for (int i = 6; i < A6XX_TEX_CONST_DWORDS; i++)
3217 dst[i] = 0;
3218
3219 ssbo_index++;
3220 }
3221 }
3222
3223 for (unsigned i = 0; i < link->image_map.num; i++) {
3224 for (int j = 0; j < link->image_map.array_size[i]; j++) {
3225 uint32_t *dst = &ibo_const.map[A6XX_TEX_CONST_DWORDS * ssbo_index];
3226
3227 write_image_ibo(cmd, dst,
3228 descriptors_state, &link->image_map, i, j);
3229
3230 ssbo_index++;
3231 }
3232 }
3233
3234 assert(ssbo_index == num_desc);
3235
3236 struct tu_cs cs;
3237 result = tu_cs_begin_sub_stream(draw_state, 7, &cs);
3238 if (result != VK_SUCCESS)
3239 return result;
3240
3241 uint32_t opcode, ibo_addr_reg;
3242 enum a6xx_state_block sb;
3243 enum a6xx_state_type st;
3244
3245 switch (type) {
3246 case MESA_SHADER_FRAGMENT:
3247 opcode = CP_LOAD_STATE6;
3248 st = ST6_SHADER;
3249 sb = SB6_IBO;
3250 ibo_addr_reg = REG_A6XX_SP_IBO_LO;
3251 break;
3252 case MESA_SHADER_COMPUTE:
3253 opcode = CP_LOAD_STATE6_FRAG;
3254 st = ST6_IBO;
3255 sb = SB6_CS_SHADER;
3256 ibo_addr_reg = REG_A6XX_SP_CS_IBO_LO;
3257 break;
3258 default:
3259 unreachable("unsupported stage for ibos");
3260 }
3261
3262 /* emit texture state: */
3263 tu_cs_emit_pkt7(&cs, opcode, 3);
3264 tu_cs_emit(&cs, CP_LOAD_STATE6_0_DST_OFF(0) |
3265 CP_LOAD_STATE6_0_STATE_TYPE(st) |
3266 CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT) |
3267 CP_LOAD_STATE6_0_STATE_BLOCK(sb) |
3268 CP_LOAD_STATE6_0_NUM_UNIT(num_desc));
3269 tu_cs_emit_qw(&cs, ibo_const.iova); /* SRC_ADDR_LO/HI */
3270
3271 tu_cs_emit_pkt4(&cs, ibo_addr_reg, 2);
3272 tu_cs_emit_qw(&cs, ibo_const.iova); /* SRC_ADDR_LO/HI */
3273
3274 *entry = tu_cs_end_sub_stream(draw_state, &cs);
3275 return VK_SUCCESS;
3276 }
3277
3278 struct PACKED bcolor_entry {
3279 uint32_t fp32[4];
3280 uint16_t ui16[4];
3281 int16_t si16[4];
3282 uint16_t fp16[4];
3283 uint16_t rgb565;
3284 uint16_t rgb5a1;
3285 uint16_t rgba4;
3286 uint8_t __pad0[2];
3287 uint8_t ui8[4];
3288 int8_t si8[4];
3289 uint32_t rgb10a2;
3290 uint32_t z24; /* also s8? */
3291 uint16_t srgb[4]; /* appears to duplicate fp16[], but clamped, used for srgb */
3292 uint8_t __pad1[56];
3293 } border_color[] = {
3294 [VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK] = {},
3295 [VK_BORDER_COLOR_INT_TRANSPARENT_BLACK] = {},
3296 [VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK] = {
3297 .fp32[3] = 0x3f800000,
3298 .ui16[3] = 0xffff,
3299 .si16[3] = 0x7fff,
3300 .fp16[3] = 0x3c00,
3301 .rgb5a1 = 0x8000,
3302 .rgba4 = 0xf000,
3303 .ui8[3] = 0xff,
3304 .si8[3] = 0x7f,
3305 .rgb10a2 = 0xc0000000,
3306 .srgb[3] = 0x3c00,
3307 },
3308 [VK_BORDER_COLOR_INT_OPAQUE_BLACK] = {
3309 .fp32[3] = 1,
3310 .fp16[3] = 1,
3311 },
3312 [VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE] = {
3313 .fp32[0 ... 3] = 0x3f800000,
3314 .ui16[0 ... 3] = 0xffff,
3315 .si16[0 ... 3] = 0x7fff,
3316 .fp16[0 ... 3] = 0x3c00,
3317 .rgb565 = 0xffff,
3318 .rgb5a1 = 0xffff,
3319 .rgba4 = 0xffff,
3320 .ui8[0 ... 3] = 0xff,
3321 .si8[0 ... 3] = 0x7f,
3322 .rgb10a2 = 0xffffffff,
3323 .z24 = 0xffffff,
3324 .srgb[0 ... 3] = 0x3c00,
3325 },
3326 [VK_BORDER_COLOR_INT_OPAQUE_WHITE] = {
3327 .fp32[0 ... 3] = 1,
3328 .fp16[0 ... 3] = 1,
3329 },
3330 };
3331
3332 static VkResult
3333 tu6_emit_border_color(struct tu_cmd_buffer *cmd,
3334 struct tu_cs *cs)
3335 {
3336 STATIC_ASSERT(sizeof(struct bcolor_entry) == 128);
3337
3338 const struct tu_pipeline *pipeline = cmd->state.pipeline;
3339 struct tu_descriptor_state *descriptors_state =
3340 &cmd->descriptors[VK_PIPELINE_BIND_POINT_GRAPHICS];
3341 const struct tu_descriptor_map *vs_sampler =
3342 &pipeline->program.link[MESA_SHADER_VERTEX].sampler_map;
3343 const struct tu_descriptor_map *fs_sampler =
3344 &pipeline->program.link[MESA_SHADER_FRAGMENT].sampler_map;
3345 struct ts_cs_memory ptr;
3346
3347 VkResult result = tu_cs_alloc(&cmd->sub_cs,
3348 vs_sampler->num_desc + fs_sampler->num_desc,
3349 128 / 4,
3350 &ptr);
3351 if (result != VK_SUCCESS)
3352 return result;
3353
3354 for (unsigned i = 0; i < vs_sampler->num; i++) {
3355 for (unsigned j = 0; j < vs_sampler->array_size[i]; j++) {
3356 const struct tu_sampler *sampler = sampler_ptr(descriptors_state,
3357 vs_sampler, i, j);
3358 memcpy(ptr.map, &border_color[sampler->border], 128);
3359 ptr.map += 128 / 4;
3360 }
3361 }
3362
3363 for (unsigned i = 0; i < fs_sampler->num; i++) {
3364 for (unsigned j = 0; j < fs_sampler->array_size[i]; j++) {
3365 const struct tu_sampler *sampler = sampler_ptr(descriptors_state,
3366 fs_sampler, i, j);
3367 memcpy(ptr.map, &border_color[sampler->border], 128);
3368 ptr.map += 128 / 4;
3369 }
3370 }
3371
3372 tu_cs_emit_pkt4(cs, REG_A6XX_SP_TP_BORDER_COLOR_BASE_ADDR_LO, 2);
3373 tu_cs_emit_qw(cs, ptr.iova);
3374 return VK_SUCCESS;
3375 }
3376
3377 static VkResult
3378 tu6_bind_draw_states(struct tu_cmd_buffer *cmd,
3379 struct tu_cs *cs,
3380 const struct tu_draw_info *draw)
3381 {
3382 const struct tu_pipeline *pipeline = cmd->state.pipeline;
3383 const struct tu_dynamic_state *dynamic = &cmd->state.dynamic;
3384 struct tu_draw_state_group draw_state_groups[TU_DRAW_STATE_COUNT];
3385 uint32_t draw_state_group_count = 0;
3386 VkResult result;
3387
3388 struct tu_descriptor_state *descriptors_state =
3389 &cmd->descriptors[VK_PIPELINE_BIND_POINT_GRAPHICS];
3390
3391 /* TODO lrz */
3392
3393 tu_cs_emit_regs(cs,
3394 A6XX_PC_PRIMITIVE_CNTL_0(.primitive_restart =
3395 pipeline->ia.primitive_restart && draw->indexed));
3396
3397 if (cmd->state.dirty &
3398 (TU_CMD_DIRTY_PIPELINE | TU_CMD_DIRTY_DYNAMIC_LINE_WIDTH) &&
3399 (pipeline->dynamic_state.mask & TU_DYNAMIC_LINE_WIDTH)) {
3400 tu6_emit_gras_su_cntl(cs, pipeline->rast.gras_su_cntl,
3401 dynamic->line_width);
3402 }
3403
3404 if ((cmd->state.dirty & TU_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK) &&
3405 (pipeline->dynamic_state.mask & TU_DYNAMIC_STENCIL_COMPARE_MASK)) {
3406 tu6_emit_stencil_compare_mask(cs, dynamic->stencil_compare_mask.front,
3407 dynamic->stencil_compare_mask.back);
3408 }
3409
3410 if ((cmd->state.dirty & TU_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK) &&
3411 (pipeline->dynamic_state.mask & TU_DYNAMIC_STENCIL_WRITE_MASK)) {
3412 tu6_emit_stencil_write_mask(cs, dynamic->stencil_write_mask.front,
3413 dynamic->stencil_write_mask.back);
3414 }
3415
3416 if ((cmd->state.dirty & TU_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE) &&
3417 (pipeline->dynamic_state.mask & TU_DYNAMIC_STENCIL_REFERENCE)) {
3418 tu6_emit_stencil_reference(cs, dynamic->stencil_reference.front,
3419 dynamic->stencil_reference.back);
3420 }
3421
3422 if (cmd->state.dirty &
3423 (TU_CMD_DIRTY_PIPELINE | TU_CMD_DIRTY_VERTEX_BUFFERS)) {
3424 for (uint32_t i = 0; i < pipeline->vi.count; i++) {
3425 const uint32_t binding = pipeline->vi.bindings[i];
3426 const uint32_t stride = pipeline->vi.strides[i];
3427 const struct tu_buffer *buf = cmd->state.vb.buffers[binding];
3428 const VkDeviceSize offset = buf->bo_offset +
3429 cmd->state.vb.offsets[binding] +
3430 pipeline->vi.offsets[i];
3431 const VkDeviceSize size =
3432 offset < buf->bo->size ? buf->bo->size - offset : 0;
3433
3434 tu_cs_emit_regs(cs,
3435 A6XX_VFD_FETCH_BASE(i, .bo = buf->bo, .bo_offset = offset),
3436 A6XX_VFD_FETCH_SIZE(i, size),
3437 A6XX_VFD_FETCH_STRIDE(i, stride));
3438 }
3439 }
3440
3441 if (cmd->state.dirty & TU_CMD_DIRTY_PIPELINE) {
3442 draw_state_groups[draw_state_group_count++] =
3443 (struct tu_draw_state_group) {
3444 .id = TU_DRAW_STATE_PROGRAM,
3445 .enable_mask = ENABLE_DRAW,
3446 .ib = pipeline->program.state_ib,
3447 };
3448 draw_state_groups[draw_state_group_count++] =
3449 (struct tu_draw_state_group) {
3450 .id = TU_DRAW_STATE_PROGRAM_BINNING,
3451 .enable_mask = CP_SET_DRAW_STATE__0_BINNING,
3452 .ib = pipeline->program.binning_state_ib,
3453 };
3454 draw_state_groups[draw_state_group_count++] =
3455 (struct tu_draw_state_group) {
3456 .id = TU_DRAW_STATE_VI,
3457 .enable_mask = ENABLE_DRAW,
3458 .ib = pipeline->vi.state_ib,
3459 };
3460 draw_state_groups[draw_state_group_count++] =
3461 (struct tu_draw_state_group) {
3462 .id = TU_DRAW_STATE_VI_BINNING,
3463 .enable_mask = CP_SET_DRAW_STATE__0_BINNING,
3464 .ib = pipeline->vi.binning_state_ib,
3465 };
3466 draw_state_groups[draw_state_group_count++] =
3467 (struct tu_draw_state_group) {
3468 .id = TU_DRAW_STATE_VP,
3469 .enable_mask = ENABLE_ALL,
3470 .ib = pipeline->vp.state_ib,
3471 };
3472 draw_state_groups[draw_state_group_count++] =
3473 (struct tu_draw_state_group) {
3474 .id = TU_DRAW_STATE_RAST,
3475 .enable_mask = ENABLE_ALL,
3476 .ib = pipeline->rast.state_ib,
3477 };
3478 draw_state_groups[draw_state_group_count++] =
3479 (struct tu_draw_state_group) {
3480 .id = TU_DRAW_STATE_DS,
3481 .enable_mask = ENABLE_ALL,
3482 .ib = pipeline->ds.state_ib,
3483 };
3484 draw_state_groups[draw_state_group_count++] =
3485 (struct tu_draw_state_group) {
3486 .id = TU_DRAW_STATE_BLEND,
3487 .enable_mask = ENABLE_ALL,
3488 .ib = pipeline->blend.state_ib,
3489 };
3490 }
3491
3492 if (cmd->state.dirty &
3493 (TU_CMD_DIRTY_PIPELINE | TU_CMD_DIRTY_DESCRIPTOR_SETS | TU_CMD_DIRTY_PUSH_CONSTANTS)) {
3494 draw_state_groups[draw_state_group_count++] =
3495 (struct tu_draw_state_group) {
3496 .id = TU_DRAW_STATE_VS_CONST,
3497 .enable_mask = ENABLE_ALL,
3498 .ib = tu6_emit_consts(cmd, pipeline, descriptors_state, MESA_SHADER_VERTEX)
3499 };
3500 draw_state_groups[draw_state_group_count++] =
3501 (struct tu_draw_state_group) {
3502 .id = TU_DRAW_STATE_FS_CONST,
3503 .enable_mask = ENABLE_DRAW,
3504 .ib = tu6_emit_consts(cmd, pipeline, descriptors_state, MESA_SHADER_FRAGMENT)
3505 };
3506 }
3507
3508 if (cmd->state.dirty &
3509 (TU_CMD_DIRTY_PIPELINE | TU_CMD_DIRTY_DESCRIPTOR_SETS)) {
3510 bool needs_border = false;
3511 struct tu_cs_entry vs_tex, fs_tex_sysmem, fs_tex_gmem, fs_ibo;
3512
3513 result = tu6_emit_textures(cmd, pipeline, descriptors_state,
3514 MESA_SHADER_VERTEX, &vs_tex, &needs_border,
3515 false);
3516 if (result != VK_SUCCESS)
3517 return result;
3518
3519 /* TODO: we could emit just one texture descriptor draw state when there
3520 * are no input attachments, which is the most common case. We could
3521 * also split out the sampler state, which doesn't change even for input
3522 * attachments.
3523 */
3524 result = tu6_emit_textures(cmd, pipeline, descriptors_state,
3525 MESA_SHADER_FRAGMENT, &fs_tex_sysmem,
3526 &needs_border, true);
3527 if (result != VK_SUCCESS)
3528 return result;
3529
3530 result = tu6_emit_textures(cmd, pipeline, descriptors_state,
3531 MESA_SHADER_FRAGMENT, &fs_tex_gmem,
3532 &needs_border, false);
3533 if (result != VK_SUCCESS)
3534 return result;
3535
3536 result = tu6_emit_ibo(cmd, pipeline, descriptors_state,
3537 MESA_SHADER_FRAGMENT, &fs_ibo);
3538 if (result != VK_SUCCESS)
3539 return result;
3540
3541 draw_state_groups[draw_state_group_count++] =
3542 (struct tu_draw_state_group) {
3543 .id = TU_DRAW_STATE_VS_TEX,
3544 .enable_mask = ENABLE_ALL,
3545 .ib = vs_tex,
3546 };
3547 draw_state_groups[draw_state_group_count++] =
3548 (struct tu_draw_state_group) {
3549 .id = TU_DRAW_STATE_FS_TEX_GMEM,
3550 .enable_mask = CP_SET_DRAW_STATE__0_GMEM,
3551 .ib = fs_tex_gmem,
3552 };
3553 draw_state_groups[draw_state_group_count++] =
3554 (struct tu_draw_state_group) {
3555 .id = TU_DRAW_STATE_FS_TEX_SYSMEM,
3556 .enable_mask = CP_SET_DRAW_STATE__0_SYSMEM,
3557 .ib = fs_tex_sysmem,
3558 };
3559 draw_state_groups[draw_state_group_count++] =
3560 (struct tu_draw_state_group) {
3561 .id = TU_DRAW_STATE_FS_IBO,
3562 .enable_mask = ENABLE_DRAW,
3563 .ib = fs_ibo,
3564 };
3565
3566 if (needs_border) {
3567 result = tu6_emit_border_color(cmd, cs);
3568 if (result != VK_SUCCESS)
3569 return result;
3570 }
3571 }
3572
3573 struct tu_cs_entry vs_params;
3574 result = tu6_emit_vs_params(cmd, draw, &vs_params);
3575 if (result != VK_SUCCESS)
3576 return result;
3577
3578 draw_state_groups[draw_state_group_count++] =
3579 (struct tu_draw_state_group) {
3580 .id = TU_DRAW_STATE_VS_PARAMS,
3581 .enable_mask = ENABLE_ALL,
3582 .ib = vs_params,
3583 };
3584
3585 tu_cs_emit_pkt7(cs, CP_SET_DRAW_STATE, 3 * draw_state_group_count);
3586 for (uint32_t i = 0; i < draw_state_group_count; i++) {
3587 const struct tu_draw_state_group *group = &draw_state_groups[i];
3588 debug_assert((group->enable_mask & ~ENABLE_ALL) == 0);
3589 uint32_t cp_set_draw_state =
3590 CP_SET_DRAW_STATE__0_COUNT(group->ib.size / 4) |
3591 group->enable_mask |
3592 CP_SET_DRAW_STATE__0_GROUP_ID(group->id);
3593 uint64_t iova;
3594 if (group->ib.size) {
3595 iova = group->ib.bo->iova + group->ib.offset;
3596 } else {
3597 cp_set_draw_state |= CP_SET_DRAW_STATE__0_DISABLE;
3598 iova = 0;
3599 }
3600
3601 tu_cs_emit(cs, cp_set_draw_state);
3602 tu_cs_emit_qw(cs, iova);
3603 }
3604
3605 tu_cs_sanity_check(cs);
3606
3607 /* track BOs */
3608 if (cmd->state.dirty & TU_CMD_DIRTY_VERTEX_BUFFERS) {
3609 for (uint32_t i = 0; i < MAX_VBS; i++) {
3610 const struct tu_buffer *buf = cmd->state.vb.buffers[i];
3611 if (buf)
3612 tu_bo_list_add(&cmd->bo_list, buf->bo, MSM_SUBMIT_BO_READ);
3613 }
3614 }
3615 if (cmd->state.dirty & TU_CMD_DIRTY_DESCRIPTOR_SETS) {
3616 unsigned i;
3617 for_each_bit(i, descriptors_state->valid) {
3618 struct tu_descriptor_set *set = descriptors_state->sets[i];
3619 for (unsigned j = 0; j < set->layout->buffer_count; ++j)
3620 if (set->descriptors[j]) {
3621 tu_bo_list_add(&cmd->bo_list, set->descriptors[j],
3622 MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE);
3623 }
3624 }
3625 }
3626
3627 /* Fragment shader state overwrites compute shader state, so flag the
3628 * compute pipeline for re-emit.
3629 */
3630 cmd->state.dirty = TU_CMD_DIRTY_COMPUTE_PIPELINE;
3631 return VK_SUCCESS;
3632 }
3633
3634 static void
3635 tu6_emit_draw_indirect(struct tu_cmd_buffer *cmd,
3636 struct tu_cs *cs,
3637 const struct tu_draw_info *draw)
3638 {
3639 const enum pc_di_primtype primtype = cmd->state.pipeline->ia.primtype;
3640
3641 tu_cs_emit_regs(cs,
3642 A6XX_VFD_INDEX_OFFSET(draw->vertex_offset),
3643 A6XX_VFD_INSTANCE_START_OFFSET(draw->first_instance));
3644
3645 if (draw->indexed) {
3646 const enum a4xx_index_size index_size =
3647 tu6_index_size(cmd->state.index_type);
3648 const uint32_t index_bytes =
3649 (cmd->state.index_type == VK_INDEX_TYPE_UINT32) ? 4 : 2;
3650 const struct tu_buffer *index_buf = cmd->state.index_buffer;
3651 unsigned max_indicies =
3652 (index_buf->size - cmd->state.index_offset) / index_bytes;
3653
3654 const uint32_t cp_draw_indx =
3655 CP_DRAW_INDX_OFFSET_0_PRIM_TYPE(primtype) |
3656 CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(DI_SRC_SEL_DMA) |
3657 CP_DRAW_INDX_OFFSET_0_INDEX_SIZE(index_size) |
3658 CP_DRAW_INDX_OFFSET_0_VIS_CULL(USE_VISIBILITY) | 0x2000;
3659
3660 tu_cs_emit_pkt7(cs, CP_DRAW_INDX_INDIRECT, 6);
3661 tu_cs_emit(cs, cp_draw_indx);
3662 tu_cs_emit_qw(cs, index_buf->bo->iova + cmd->state.index_offset);
3663 tu_cs_emit(cs, A5XX_CP_DRAW_INDX_INDIRECT_3_MAX_INDICES(max_indicies));
3664 tu_cs_emit_qw(cs, draw->indirect->bo->iova + draw->indirect_offset);
3665 } else {
3666 const uint32_t cp_draw_indx =
3667 CP_DRAW_INDX_OFFSET_0_PRIM_TYPE(primtype) |
3668 CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(DI_SRC_SEL_AUTO_INDEX) |
3669 CP_DRAW_INDX_OFFSET_0_VIS_CULL(USE_VISIBILITY) | 0x2000;
3670
3671 tu_cs_emit_pkt7(cs, CP_DRAW_INDIRECT, 3);
3672 tu_cs_emit(cs, cp_draw_indx);
3673 tu_cs_emit_qw(cs, draw->indirect->bo->iova + draw->indirect_offset);
3674 }
3675
3676 tu_bo_list_add(&cmd->bo_list, draw->indirect->bo, MSM_SUBMIT_BO_READ);
3677 }
3678
3679 static void
3680 tu6_emit_draw_direct(struct tu_cmd_buffer *cmd,
3681 struct tu_cs *cs,
3682 const struct tu_draw_info *draw)
3683 {
3684
3685 const enum pc_di_primtype primtype = cmd->state.pipeline->ia.primtype;
3686
3687 tu_cs_emit_regs(cs,
3688 A6XX_VFD_INDEX_OFFSET(draw->vertex_offset),
3689 A6XX_VFD_INSTANCE_START_OFFSET(draw->first_instance));
3690
3691 /* TODO hw binning */
3692 if (draw->indexed) {
3693 const enum a4xx_index_size index_size =
3694 tu6_index_size(cmd->state.index_type);
3695 const uint32_t index_bytes =
3696 (cmd->state.index_type == VK_INDEX_TYPE_UINT32) ? 4 : 2;
3697 const struct tu_buffer *buf = cmd->state.index_buffer;
3698 const VkDeviceSize offset = buf->bo_offset + cmd->state.index_offset +
3699 index_bytes * draw->first_index;
3700 const uint32_t size = index_bytes * draw->count;
3701
3702 const uint32_t cp_draw_indx =
3703 CP_DRAW_INDX_OFFSET_0_PRIM_TYPE(primtype) |
3704 CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(DI_SRC_SEL_DMA) |
3705 CP_DRAW_INDX_OFFSET_0_INDEX_SIZE(index_size) |
3706 CP_DRAW_INDX_OFFSET_0_VIS_CULL(USE_VISIBILITY) | 0x2000;
3707
3708 tu_cs_emit_pkt7(cs, CP_DRAW_INDX_OFFSET, 7);
3709 tu_cs_emit(cs, cp_draw_indx);
3710 tu_cs_emit(cs, draw->instance_count);
3711 tu_cs_emit(cs, draw->count);
3712 tu_cs_emit(cs, 0x0); /* XXX */
3713 tu_cs_emit_qw(cs, buf->bo->iova + offset);
3714 tu_cs_emit(cs, size);
3715 } else {
3716 const uint32_t cp_draw_indx =
3717 CP_DRAW_INDX_OFFSET_0_PRIM_TYPE(primtype) |
3718 CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(DI_SRC_SEL_AUTO_INDEX) |
3719 CP_DRAW_INDX_OFFSET_0_VIS_CULL(USE_VISIBILITY) | 0x2000;
3720
3721 tu_cs_emit_pkt7(cs, CP_DRAW_INDX_OFFSET, 3);
3722 tu_cs_emit(cs, cp_draw_indx);
3723 tu_cs_emit(cs, draw->instance_count);
3724 tu_cs_emit(cs, draw->count);
3725 }
3726 }
3727
3728 static void
3729 tu_draw(struct tu_cmd_buffer *cmd, const struct tu_draw_info *draw)
3730 {
3731 struct tu_cs *cs = &cmd->draw_cs;
3732 VkResult result;
3733
3734 result = tu6_bind_draw_states(cmd, cs, draw);
3735 if (result != VK_SUCCESS) {
3736 cmd->record_result = result;
3737 return;
3738 }
3739
3740 if (draw->indirect)
3741 tu6_emit_draw_indirect(cmd, cs, draw);
3742 else
3743 tu6_emit_draw_direct(cmd, cs, draw);
3744
3745 cmd->wait_for_idle = true;
3746
3747 tu_cs_sanity_check(cs);
3748 }
3749
3750 void
3751 tu_CmdDraw(VkCommandBuffer commandBuffer,
3752 uint32_t vertexCount,
3753 uint32_t instanceCount,
3754 uint32_t firstVertex,
3755 uint32_t firstInstance)
3756 {
3757 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
3758 struct tu_draw_info info = {};
3759
3760 info.count = vertexCount;
3761 info.instance_count = instanceCount;
3762 info.first_instance = firstInstance;
3763 info.vertex_offset = firstVertex;
3764
3765 tu_draw(cmd_buffer, &info);
3766 }
3767
3768 void
3769 tu_CmdDrawIndexed(VkCommandBuffer commandBuffer,
3770 uint32_t indexCount,
3771 uint32_t instanceCount,
3772 uint32_t firstIndex,
3773 int32_t vertexOffset,
3774 uint32_t firstInstance)
3775 {
3776 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
3777 struct tu_draw_info info = {};
3778
3779 info.indexed = true;
3780 info.count = indexCount;
3781 info.instance_count = instanceCount;
3782 info.first_index = firstIndex;
3783 info.vertex_offset = vertexOffset;
3784 info.first_instance = firstInstance;
3785
3786 tu_draw(cmd_buffer, &info);
3787 }
3788
3789 void
3790 tu_CmdDrawIndirect(VkCommandBuffer commandBuffer,
3791 VkBuffer _buffer,
3792 VkDeviceSize offset,
3793 uint32_t drawCount,
3794 uint32_t stride)
3795 {
3796 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
3797 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
3798 struct tu_draw_info info = {};
3799
3800 info.count = drawCount;
3801 info.indirect = buffer;
3802 info.indirect_offset = offset;
3803 info.stride = stride;
3804
3805 tu_draw(cmd_buffer, &info);
3806 }
3807
3808 void
3809 tu_CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer,
3810 VkBuffer _buffer,
3811 VkDeviceSize offset,
3812 uint32_t drawCount,
3813 uint32_t stride)
3814 {
3815 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
3816 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
3817 struct tu_draw_info info = {};
3818
3819 info.indexed = true;
3820 info.count = drawCount;
3821 info.indirect = buffer;
3822 info.indirect_offset = offset;
3823 info.stride = stride;
3824
3825 tu_draw(cmd_buffer, &info);
3826 }
3827
3828 struct tu_dispatch_info
3829 {
3830 /**
3831 * Determine the layout of the grid (in block units) to be used.
3832 */
3833 uint32_t blocks[3];
3834
3835 /**
3836 * A starting offset for the grid. If unaligned is set, the offset
3837 * must still be aligned.
3838 */
3839 uint32_t offsets[3];
3840 /**
3841 * Whether it's an unaligned compute dispatch.
3842 */
3843 bool unaligned;
3844
3845 /**
3846 * Indirect compute parameters resource.
3847 */
3848 struct tu_buffer *indirect;
3849 uint64_t indirect_offset;
3850 };
3851
3852 static void
3853 tu_emit_compute_driver_params(struct tu_cs *cs, struct tu_pipeline *pipeline,
3854 const struct tu_dispatch_info *info)
3855 {
3856 gl_shader_stage type = MESA_SHADER_COMPUTE;
3857 const struct tu_program_descriptor_linkage *link =
3858 &pipeline->program.link[type];
3859 const struct ir3_const_state *const_state = &link->const_state;
3860 uint32_t offset = const_state->offsets.driver_param;
3861
3862 if (link->constlen <= offset)
3863 return;
3864
3865 if (!info->indirect) {
3866 uint32_t driver_params[IR3_DP_CS_COUNT] = {
3867 [IR3_DP_NUM_WORK_GROUPS_X] = info->blocks[0],
3868 [IR3_DP_NUM_WORK_GROUPS_Y] = info->blocks[1],
3869 [IR3_DP_NUM_WORK_GROUPS_Z] = info->blocks[2],
3870 [IR3_DP_LOCAL_GROUP_SIZE_X] = pipeline->compute.local_size[0],
3871 [IR3_DP_LOCAL_GROUP_SIZE_Y] = pipeline->compute.local_size[1],
3872 [IR3_DP_LOCAL_GROUP_SIZE_Z] = pipeline->compute.local_size[2],
3873 };
3874
3875 uint32_t num_consts = MIN2(const_state->num_driver_params,
3876 (link->constlen - offset) * 4);
3877 /* push constants */
3878 tu_cs_emit_pkt7(cs, tu6_stage2opcode(type), 3 + num_consts);
3879 tu_cs_emit(cs, CP_LOAD_STATE6_0_DST_OFF(offset) |
3880 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
3881 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
3882 CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type)) |
3883 CP_LOAD_STATE6_0_NUM_UNIT(num_consts / 4));
3884 tu_cs_emit(cs, 0);
3885 tu_cs_emit(cs, 0);
3886 uint32_t i;
3887 for (i = 0; i < num_consts; i++)
3888 tu_cs_emit(cs, driver_params[i]);
3889 } else {
3890 tu_finishme("Indirect driver params");
3891 }
3892 }
3893
3894 static void
3895 tu_dispatch(struct tu_cmd_buffer *cmd,
3896 const struct tu_dispatch_info *info)
3897 {
3898 struct tu_cs *cs = &cmd->cs;
3899 struct tu_pipeline *pipeline = cmd->state.compute_pipeline;
3900 struct tu_descriptor_state *descriptors_state =
3901 &cmd->descriptors[VK_PIPELINE_BIND_POINT_COMPUTE];
3902 VkResult result;
3903
3904 if (cmd->state.dirty & TU_CMD_DIRTY_COMPUTE_PIPELINE)
3905 tu_cs_emit_ib(cs, &pipeline->program.state_ib);
3906
3907 struct tu_cs_entry ib;
3908
3909 ib = tu6_emit_consts(cmd, pipeline, descriptors_state, MESA_SHADER_COMPUTE);
3910 if (ib.size)
3911 tu_cs_emit_ib(cs, &ib);
3912
3913 tu_emit_compute_driver_params(cs, pipeline, info);
3914
3915 bool needs_border;
3916 result = tu6_emit_textures(cmd, pipeline, descriptors_state,
3917 MESA_SHADER_COMPUTE, &ib, &needs_border, false);
3918 if (result != VK_SUCCESS) {
3919 cmd->record_result = result;
3920 return;
3921 }
3922
3923 if (ib.size)
3924 tu_cs_emit_ib(cs, &ib);
3925
3926 if (needs_border)
3927 tu_finishme("compute border color");
3928
3929 result = tu6_emit_ibo(cmd, pipeline, descriptors_state, MESA_SHADER_COMPUTE, &ib);
3930 if (result != VK_SUCCESS) {
3931 cmd->record_result = result;
3932 return;
3933 }
3934
3935 if (ib.size)
3936 tu_cs_emit_ib(cs, &ib);
3937
3938 /* track BOs */
3939 if (cmd->state.dirty & TU_CMD_DIRTY_DESCRIPTOR_SETS) {
3940 unsigned i;
3941 for_each_bit(i, descriptors_state->valid) {
3942 struct tu_descriptor_set *set = descriptors_state->sets[i];
3943 for (unsigned j = 0; j < set->layout->buffer_count; ++j)
3944 if (set->descriptors[j]) {
3945 tu_bo_list_add(&cmd->bo_list, set->descriptors[j],
3946 MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE);
3947 }
3948 }
3949 }
3950
3951 /* Compute shader state overwrites fragment shader state, so we flag the
3952 * graphics pipeline for re-emit.
3953 */
3954 cmd->state.dirty = TU_CMD_DIRTY_PIPELINE;
3955
3956 tu_cs_emit_pkt7(cs, CP_SET_MARKER, 1);
3957 tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(RM6_COMPUTE));
3958
3959 const uint32_t *local_size = pipeline->compute.local_size;
3960 const uint32_t *num_groups = info->blocks;
3961 tu_cs_emit_regs(cs,
3962 A6XX_HLSQ_CS_NDRANGE_0(.kerneldim = 3,
3963 .localsizex = local_size[0] - 1,
3964 .localsizey = local_size[1] - 1,
3965 .localsizez = local_size[2] - 1),
3966 A6XX_HLSQ_CS_NDRANGE_1(.globalsize_x = local_size[0] * num_groups[0]),
3967 A6XX_HLSQ_CS_NDRANGE_2(.globaloff_x = 0),
3968 A6XX_HLSQ_CS_NDRANGE_3(.globalsize_y = local_size[1] * num_groups[1]),
3969 A6XX_HLSQ_CS_NDRANGE_4(.globaloff_y = 0),
3970 A6XX_HLSQ_CS_NDRANGE_5(.globalsize_z = local_size[2] * num_groups[2]),
3971 A6XX_HLSQ_CS_NDRANGE_6(.globaloff_z = 0));
3972
3973 tu_cs_emit_regs(cs,
3974 A6XX_HLSQ_CS_KERNEL_GROUP_X(1),
3975 A6XX_HLSQ_CS_KERNEL_GROUP_Y(1),
3976 A6XX_HLSQ_CS_KERNEL_GROUP_Z(1));
3977
3978 if (info->indirect) {
3979 uint64_t iova = tu_buffer_iova(info->indirect) + info->indirect_offset;
3980
3981 tu_bo_list_add(&cmd->bo_list, info->indirect->bo,
3982 MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE);
3983
3984 tu_cs_emit_pkt7(cs, CP_EXEC_CS_INDIRECT, 4);
3985 tu_cs_emit(cs, 0x00000000);
3986 tu_cs_emit_qw(cs, iova);
3987 tu_cs_emit(cs,
3988 A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEX(local_size[0] - 1) |
3989 A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEY(local_size[1] - 1) |
3990 A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEZ(local_size[2] - 1));
3991 } else {
3992 tu_cs_emit_pkt7(cs, CP_EXEC_CS, 4);
3993 tu_cs_emit(cs, 0x00000000);
3994 tu_cs_emit(cs, CP_EXEC_CS_1_NGROUPS_X(info->blocks[0]));
3995 tu_cs_emit(cs, CP_EXEC_CS_2_NGROUPS_Y(info->blocks[1]));
3996 tu_cs_emit(cs, CP_EXEC_CS_3_NGROUPS_Z(info->blocks[2]));
3997 }
3998
3999 tu_cs_emit_wfi(cs);
4000
4001 tu6_emit_cache_flush(cmd, cs);
4002 }
4003
4004 void
4005 tu_CmdDispatchBase(VkCommandBuffer commandBuffer,
4006 uint32_t base_x,
4007 uint32_t base_y,
4008 uint32_t base_z,
4009 uint32_t x,
4010 uint32_t y,
4011 uint32_t z)
4012 {
4013 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
4014 struct tu_dispatch_info info = {};
4015
4016 info.blocks[0] = x;
4017 info.blocks[1] = y;
4018 info.blocks[2] = z;
4019
4020 info.offsets[0] = base_x;
4021 info.offsets[1] = base_y;
4022 info.offsets[2] = base_z;
4023 tu_dispatch(cmd_buffer, &info);
4024 }
4025
4026 void
4027 tu_CmdDispatch(VkCommandBuffer commandBuffer,
4028 uint32_t x,
4029 uint32_t y,
4030 uint32_t z)
4031 {
4032 tu_CmdDispatchBase(commandBuffer, 0, 0, 0, x, y, z);
4033 }
4034
4035 void
4036 tu_CmdDispatchIndirect(VkCommandBuffer commandBuffer,
4037 VkBuffer _buffer,
4038 VkDeviceSize offset)
4039 {
4040 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
4041 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
4042 struct tu_dispatch_info info = {};
4043
4044 info.indirect = buffer;
4045 info.indirect_offset = offset;
4046
4047 tu_dispatch(cmd_buffer, &info);
4048 }
4049
4050 void
4051 tu_CmdEndRenderPass(VkCommandBuffer commandBuffer)
4052 {
4053 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
4054
4055 tu_cs_end(&cmd_buffer->draw_cs);
4056 tu_cs_end(&cmd_buffer->draw_epilogue_cs);
4057
4058 if (use_sysmem_rendering(cmd_buffer))
4059 tu_cmd_render_sysmem(cmd_buffer);
4060 else
4061 tu_cmd_render_tiles(cmd_buffer);
4062
4063 /* discard draw_cs and draw_epilogue_cs entries now that the tiles are
4064 rendered */
4065 tu_cs_discard_entries(&cmd_buffer->draw_cs);
4066 tu_cs_begin(&cmd_buffer->draw_cs);
4067 tu_cs_discard_entries(&cmd_buffer->draw_epilogue_cs);
4068 tu_cs_begin(&cmd_buffer->draw_epilogue_cs);
4069
4070 cmd_buffer->state.pass = NULL;
4071 cmd_buffer->state.subpass = NULL;
4072 cmd_buffer->state.framebuffer = NULL;
4073 }
4074
4075 void
4076 tu_CmdEndRenderPass2(VkCommandBuffer commandBuffer,
4077 const VkSubpassEndInfoKHR *pSubpassEndInfo)
4078 {
4079 tu_CmdEndRenderPass(commandBuffer);
4080 }
4081
4082 struct tu_barrier_info
4083 {
4084 uint32_t eventCount;
4085 const VkEvent *pEvents;
4086 VkPipelineStageFlags srcStageMask;
4087 };
4088
4089 static void
4090 tu_barrier(struct tu_cmd_buffer *cmd_buffer,
4091 uint32_t memoryBarrierCount,
4092 const VkMemoryBarrier *pMemoryBarriers,
4093 uint32_t bufferMemoryBarrierCount,
4094 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
4095 uint32_t imageMemoryBarrierCount,
4096 const VkImageMemoryBarrier *pImageMemoryBarriers,
4097 const struct tu_barrier_info *info)
4098 {
4099 }
4100
4101 void
4102 tu_CmdPipelineBarrier(VkCommandBuffer commandBuffer,
4103 VkPipelineStageFlags srcStageMask,
4104 VkPipelineStageFlags destStageMask,
4105 VkBool32 byRegion,
4106 uint32_t memoryBarrierCount,
4107 const VkMemoryBarrier *pMemoryBarriers,
4108 uint32_t bufferMemoryBarrierCount,
4109 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
4110 uint32_t imageMemoryBarrierCount,
4111 const VkImageMemoryBarrier *pImageMemoryBarriers)
4112 {
4113 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
4114 struct tu_barrier_info info;
4115
4116 info.eventCount = 0;
4117 info.pEvents = NULL;
4118 info.srcStageMask = srcStageMask;
4119
4120 tu_barrier(cmd_buffer, memoryBarrierCount, pMemoryBarriers,
4121 bufferMemoryBarrierCount, pBufferMemoryBarriers,
4122 imageMemoryBarrierCount, pImageMemoryBarriers, &info);
4123 }
4124
4125 static void
4126 write_event(struct tu_cmd_buffer *cmd, struct tu_event *event, unsigned value)
4127 {
4128 struct tu_cs *cs = &cmd->cs;
4129
4130 tu_bo_list_add(&cmd->bo_list, &event->bo, MSM_SUBMIT_BO_WRITE);
4131
4132 /* TODO: any flush required before/after ? */
4133
4134 tu_cs_emit_pkt7(cs, CP_MEM_WRITE, 3);
4135 tu_cs_emit_qw(cs, event->bo.iova); /* ADDR_LO/HI */
4136 tu_cs_emit(cs, value);
4137 }
4138
4139 void
4140 tu_CmdSetEvent(VkCommandBuffer commandBuffer,
4141 VkEvent _event,
4142 VkPipelineStageFlags stageMask)
4143 {
4144 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
4145 TU_FROM_HANDLE(tu_event, event, _event);
4146
4147 write_event(cmd, event, 1);
4148 }
4149
4150 void
4151 tu_CmdResetEvent(VkCommandBuffer commandBuffer,
4152 VkEvent _event,
4153 VkPipelineStageFlags stageMask)
4154 {
4155 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
4156 TU_FROM_HANDLE(tu_event, event, _event);
4157
4158 write_event(cmd, event, 0);
4159 }
4160
4161 void
4162 tu_CmdWaitEvents(VkCommandBuffer commandBuffer,
4163 uint32_t eventCount,
4164 const VkEvent *pEvents,
4165 VkPipelineStageFlags srcStageMask,
4166 VkPipelineStageFlags dstStageMask,
4167 uint32_t memoryBarrierCount,
4168 const VkMemoryBarrier *pMemoryBarriers,
4169 uint32_t bufferMemoryBarrierCount,
4170 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
4171 uint32_t imageMemoryBarrierCount,
4172 const VkImageMemoryBarrier *pImageMemoryBarriers)
4173 {
4174 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
4175 struct tu_cs *cs = &cmd->cs;
4176
4177 /* TODO: any flush required before/after? (CP_WAIT_FOR_ME?) */
4178
4179 for (uint32_t i = 0; i < eventCount; i++) {
4180 TU_FROM_HANDLE(tu_event, event, pEvents[i]);
4181
4182 tu_bo_list_add(&cmd->bo_list, &event->bo, MSM_SUBMIT_BO_READ);
4183
4184 tu_cs_emit_pkt7(cs, CP_WAIT_REG_MEM, 6);
4185 tu_cs_emit(cs, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ) |
4186 CP_WAIT_REG_MEM_0_POLL_MEMORY);
4187 tu_cs_emit_qw(cs, event->bo.iova); /* POLL_ADDR_LO/HI */
4188 tu_cs_emit(cs, CP_WAIT_REG_MEM_3_REF(1));
4189 tu_cs_emit(cs, CP_WAIT_REG_MEM_4_MASK(~0u));
4190 tu_cs_emit(cs, CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(20));
4191 }
4192 }
4193
4194 void
4195 tu_CmdSetDeviceMask(VkCommandBuffer commandBuffer, uint32_t deviceMask)
4196 {
4197 /* No-op */
4198 }