turnip: Implement an empty function vkCmdDrawIndirectByteCountEXT
[mesa.git] / src / freedreno / vulkan / tu_cmd_buffer.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 */
27
28 #include "tu_private.h"
29
30 #include "registers/adreno_pm4.xml.h"
31 #include "registers/adreno_common.xml.h"
32
33 #include "vk_format.h"
34
35 #include "tu_cs.h"
36 #include "tu_blit.h"
37
38 #define OVERFLOW_FLAG_REG REG_A6XX_CP_SCRATCH_REG(0)
39
40 void
41 tu_bo_list_init(struct tu_bo_list *list)
42 {
43 list->count = list->capacity = 0;
44 list->bo_infos = NULL;
45 }
46
47 void
48 tu_bo_list_destroy(struct tu_bo_list *list)
49 {
50 free(list->bo_infos);
51 }
52
53 void
54 tu_bo_list_reset(struct tu_bo_list *list)
55 {
56 list->count = 0;
57 }
58
59 /**
60 * \a flags consists of MSM_SUBMIT_BO_FLAGS.
61 */
62 static uint32_t
63 tu_bo_list_add_info(struct tu_bo_list *list,
64 const struct drm_msm_gem_submit_bo *bo_info)
65 {
66 assert(bo_info->handle != 0);
67
68 for (uint32_t i = 0; i < list->count; ++i) {
69 if (list->bo_infos[i].handle == bo_info->handle) {
70 assert(list->bo_infos[i].presumed == bo_info->presumed);
71 list->bo_infos[i].flags |= bo_info->flags;
72 return i;
73 }
74 }
75
76 /* grow list->bo_infos if needed */
77 if (list->count == list->capacity) {
78 uint32_t new_capacity = MAX2(2 * list->count, 16);
79 struct drm_msm_gem_submit_bo *new_bo_infos = realloc(
80 list->bo_infos, new_capacity * sizeof(struct drm_msm_gem_submit_bo));
81 if (!new_bo_infos)
82 return TU_BO_LIST_FAILED;
83 list->bo_infos = new_bo_infos;
84 list->capacity = new_capacity;
85 }
86
87 list->bo_infos[list->count] = *bo_info;
88 return list->count++;
89 }
90
91 uint32_t
92 tu_bo_list_add(struct tu_bo_list *list,
93 const struct tu_bo *bo,
94 uint32_t flags)
95 {
96 return tu_bo_list_add_info(list, &(struct drm_msm_gem_submit_bo) {
97 .flags = flags,
98 .handle = bo->gem_handle,
99 .presumed = bo->iova,
100 });
101 }
102
103 VkResult
104 tu_bo_list_merge(struct tu_bo_list *list, const struct tu_bo_list *other)
105 {
106 for (uint32_t i = 0; i < other->count; i++) {
107 if (tu_bo_list_add_info(list, other->bo_infos + i) == TU_BO_LIST_FAILED)
108 return VK_ERROR_OUT_OF_HOST_MEMORY;
109 }
110
111 return VK_SUCCESS;
112 }
113
114 static bool
115 is_linear_mipmapped(const struct tu_image_view *iview)
116 {
117 return iview->image->layout.tile_mode == TILE6_LINEAR &&
118 iview->base_mip != iview->image->level_count - 1;
119 }
120
121 static bool
122 force_sysmem(const struct tu_cmd_buffer *cmd,
123 const struct VkRect2D *render_area)
124 {
125 const struct tu_framebuffer *fb = cmd->state.framebuffer;
126 const struct tu_physical_device *device = cmd->device->physical_device;
127 bool has_linear_mipmapped_store = false;
128 const struct tu_render_pass *pass = cmd->state.pass;
129
130 /* Iterate over all the places we call tu6_emit_store_attachment() */
131 for (unsigned i = 0; i < pass->subpass_count; i++) {
132 const struct tu_subpass *subpass = &pass->subpasses[i];
133 if (subpass->resolve_attachments) {
134 for (unsigned i = 0; i < subpass->color_count; i++) {
135 uint32_t a = subpass->resolve_attachments[i].attachment;
136 if (a != VK_ATTACHMENT_UNUSED &&
137 cmd->state.pass->attachments[a].store_op == VK_ATTACHMENT_STORE_OP_STORE) {
138 const struct tu_image_view *iview = fb->attachments[a].attachment;
139 if (is_linear_mipmapped(iview)) {
140 has_linear_mipmapped_store = true;
141 break;
142 }
143 }
144 }
145 }
146 }
147
148 for (unsigned i = 0; i < pass->attachment_count; i++) {
149 if (pass->attachments[i].gmem_offset >= 0 &&
150 cmd->state.pass->attachments[i].store_op == VK_ATTACHMENT_STORE_OP_STORE) {
151 const struct tu_image_view *iview = fb->attachments[i].attachment;
152 if (is_linear_mipmapped(iview)) {
153 has_linear_mipmapped_store = true;
154 break;
155 }
156 }
157 }
158
159 /* Linear textures cannot have any padding between mipmap levels and their
160 * height isn't padded, while at the same time the GMEM->MEM resolve does
161 * not have per-pixel granularity, so if the image height isn't aligned to
162 * the resolve granularity and the render area is tall enough, we may wind
163 * up writing past the bottom of the image into the next miplevel or even
164 * past the end of the image. For the last miplevel, the layout code should
165 * insert enough padding so that the overdraw writes to the padding. To
166 * work around this, we force-enable sysmem rendering.
167 */
168 const uint32_t y2 = render_area->offset.y + render_area->extent.height;
169 const uint32_t aligned_y2 = ALIGN_POT(y2, device->tile_align_h);
170
171 return has_linear_mipmapped_store && aligned_y2 > fb->height;
172 }
173
174 static void
175 tu_tiling_config_update_tile_layout(struct tu_tiling_config *tiling,
176 const struct tu_device *dev,
177 uint32_t pixels)
178 {
179 const uint32_t tile_align_w = dev->physical_device->tile_align_w;
180 const uint32_t tile_align_h = dev->physical_device->tile_align_h;
181 const uint32_t max_tile_width = 1024; /* A6xx */
182
183 /* note: don't offset the tiling config by render_area.offset,
184 * because binning pass can't deal with it
185 * this means we might end up with more tiles than necessary,
186 * but load/store/etc are still scissored to the render_area
187 */
188 tiling->tile0.offset = (VkOffset2D) {};
189
190 const uint32_t ra_width =
191 tiling->render_area.extent.width +
192 (tiling->render_area.offset.x - tiling->tile0.offset.x);
193 const uint32_t ra_height =
194 tiling->render_area.extent.height +
195 (tiling->render_area.offset.y - tiling->tile0.offset.y);
196
197 /* start from 1 tile */
198 tiling->tile_count = (VkExtent2D) {
199 .width = 1,
200 .height = 1,
201 };
202 tiling->tile0.extent = (VkExtent2D) {
203 .width = align(ra_width, tile_align_w),
204 .height = align(ra_height, tile_align_h),
205 };
206
207 if (unlikely(dev->physical_device->instance->debug_flags & TU_DEBUG_FORCEBIN)) {
208 /* start with 2x2 tiles */
209 tiling->tile_count.width = 2;
210 tiling->tile_count.height = 2;
211 tiling->tile0.extent.width = align(DIV_ROUND_UP(ra_width, 2), tile_align_w);
212 tiling->tile0.extent.height = align(DIV_ROUND_UP(ra_height, 2), tile_align_h);
213 }
214
215 /* do not exceed max tile width */
216 while (tiling->tile0.extent.width > max_tile_width) {
217 tiling->tile_count.width++;
218 tiling->tile0.extent.width =
219 align(DIV_ROUND_UP(ra_width, tiling->tile_count.width), tile_align_w);
220 }
221
222 /* will force to sysmem, don't bother trying to have a valid tile config
223 * TODO: just skip all GMEM stuff when sysmem is forced?
224 */
225 if (!pixels)
226 return;
227
228 /* do not exceed gmem size */
229 while (tiling->tile0.extent.width * tiling->tile0.extent.height > pixels) {
230 if (tiling->tile0.extent.width > MAX2(tile_align_w, tiling->tile0.extent.height)) {
231 tiling->tile_count.width++;
232 tiling->tile0.extent.width =
233 align(DIV_ROUND_UP(ra_width, tiling->tile_count.width), tile_align_w);
234 } else {
235 /* if this assert fails then layout is impossible.. */
236 assert(tiling->tile0.extent.height > tile_align_h);
237 tiling->tile_count.height++;
238 tiling->tile0.extent.height =
239 align(DIV_ROUND_UP(ra_height, tiling->tile_count.height), tile_align_h);
240 }
241 }
242 }
243
244 static void
245 tu_tiling_config_update_pipe_layout(struct tu_tiling_config *tiling,
246 const struct tu_device *dev)
247 {
248 const uint32_t max_pipe_count = 32; /* A6xx */
249
250 /* start from 1 tile per pipe */
251 tiling->pipe0 = (VkExtent2D) {
252 .width = 1,
253 .height = 1,
254 };
255 tiling->pipe_count = tiling->tile_count;
256
257 while (tiling->pipe_count.width * tiling->pipe_count.height > max_pipe_count) {
258 if (tiling->pipe0.width < tiling->pipe0.height) {
259 tiling->pipe0.width += 1;
260 tiling->pipe_count.width =
261 DIV_ROUND_UP(tiling->tile_count.width, tiling->pipe0.width);
262 } else {
263 tiling->pipe0.height += 1;
264 tiling->pipe_count.height =
265 DIV_ROUND_UP(tiling->tile_count.height, tiling->pipe0.height);
266 }
267 }
268 }
269
270 static void
271 tu_tiling_config_update_pipes(struct tu_tiling_config *tiling,
272 const struct tu_device *dev)
273 {
274 const uint32_t max_pipe_count = 32; /* A6xx */
275 const uint32_t used_pipe_count =
276 tiling->pipe_count.width * tiling->pipe_count.height;
277 const VkExtent2D last_pipe = {
278 .width = (tiling->tile_count.width - 1) % tiling->pipe0.width + 1,
279 .height = (tiling->tile_count.height - 1) % tiling->pipe0.height + 1,
280 };
281
282 assert(used_pipe_count <= max_pipe_count);
283 assert(max_pipe_count <= ARRAY_SIZE(tiling->pipe_config));
284
285 for (uint32_t y = 0; y < tiling->pipe_count.height; y++) {
286 for (uint32_t x = 0; x < tiling->pipe_count.width; x++) {
287 const uint32_t pipe_x = tiling->pipe0.width * x;
288 const uint32_t pipe_y = tiling->pipe0.height * y;
289 const uint32_t pipe_w = (x == tiling->pipe_count.width - 1)
290 ? last_pipe.width
291 : tiling->pipe0.width;
292 const uint32_t pipe_h = (y == tiling->pipe_count.height - 1)
293 ? last_pipe.height
294 : tiling->pipe0.height;
295 const uint32_t n = tiling->pipe_count.width * y + x;
296
297 tiling->pipe_config[n] = A6XX_VSC_PIPE_CONFIG_REG_X(pipe_x) |
298 A6XX_VSC_PIPE_CONFIG_REG_Y(pipe_y) |
299 A6XX_VSC_PIPE_CONFIG_REG_W(pipe_w) |
300 A6XX_VSC_PIPE_CONFIG_REG_H(pipe_h);
301 tiling->pipe_sizes[n] = CP_SET_BIN_DATA5_0_VSC_SIZE(pipe_w * pipe_h);
302 }
303 }
304
305 memset(tiling->pipe_config + used_pipe_count, 0,
306 sizeof(uint32_t) * (max_pipe_count - used_pipe_count));
307 }
308
309 static void
310 tu_tiling_config_get_tile(const struct tu_tiling_config *tiling,
311 const struct tu_device *dev,
312 uint32_t tx,
313 uint32_t ty,
314 struct tu_tile *tile)
315 {
316 /* find the pipe and the slot for tile (tx, ty) */
317 const uint32_t px = tx / tiling->pipe0.width;
318 const uint32_t py = ty / tiling->pipe0.height;
319 const uint32_t sx = tx - tiling->pipe0.width * px;
320 const uint32_t sy = ty - tiling->pipe0.height * py;
321 /* last pipe has different width */
322 const uint32_t pipe_width =
323 MIN2(tiling->pipe0.width,
324 tiling->tile_count.width - px * tiling->pipe0.width);
325
326 assert(tx < tiling->tile_count.width && ty < tiling->tile_count.height);
327 assert(px < tiling->pipe_count.width && py < tiling->pipe_count.height);
328 assert(sx < tiling->pipe0.width && sy < tiling->pipe0.height);
329
330 /* convert to 1D indices */
331 tile->pipe = tiling->pipe_count.width * py + px;
332 tile->slot = pipe_width * sy + sx;
333
334 /* get the blit area for the tile */
335 tile->begin = (VkOffset2D) {
336 .x = tiling->tile0.offset.x + tiling->tile0.extent.width * tx,
337 .y = tiling->tile0.offset.y + tiling->tile0.extent.height * ty,
338 };
339 tile->end.x =
340 (tx == tiling->tile_count.width - 1)
341 ? tiling->render_area.offset.x + tiling->render_area.extent.width
342 : tile->begin.x + tiling->tile0.extent.width;
343 tile->end.y =
344 (ty == tiling->tile_count.height - 1)
345 ? tiling->render_area.offset.y + tiling->render_area.extent.height
346 : tile->begin.y + tiling->tile0.extent.height;
347 }
348
349 enum a3xx_msaa_samples
350 tu_msaa_samples(uint32_t samples)
351 {
352 switch (samples) {
353 case 1:
354 return MSAA_ONE;
355 case 2:
356 return MSAA_TWO;
357 case 4:
358 return MSAA_FOUR;
359 case 8:
360 return MSAA_EIGHT;
361 default:
362 assert(!"invalid sample count");
363 return MSAA_ONE;
364 }
365 }
366
367 static enum a4xx_index_size
368 tu6_index_size(VkIndexType type)
369 {
370 switch (type) {
371 case VK_INDEX_TYPE_UINT16:
372 return INDEX4_SIZE_16_BIT;
373 case VK_INDEX_TYPE_UINT32:
374 return INDEX4_SIZE_32_BIT;
375 default:
376 unreachable("invalid VkIndexType");
377 return INDEX4_SIZE_8_BIT;
378 }
379 }
380
381 unsigned
382 tu6_emit_event_write(struct tu_cmd_buffer *cmd,
383 struct tu_cs *cs,
384 enum vgt_event_type event,
385 bool need_seqno)
386 {
387 unsigned seqno = 0;
388
389 tu_cs_emit_pkt7(cs, CP_EVENT_WRITE, need_seqno ? 4 : 1);
390 tu_cs_emit(cs, CP_EVENT_WRITE_0_EVENT(event));
391 if (need_seqno) {
392 tu_cs_emit_qw(cs, cmd->scratch_bo.iova);
393 seqno = ++cmd->scratch_seqno;
394 tu_cs_emit(cs, seqno);
395 }
396
397 return seqno;
398 }
399
400 static void
401 tu6_emit_cache_flush(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
402 {
403 tu6_emit_event_write(cmd, cs, 0x31, false);
404 }
405
406 static void
407 tu6_emit_lrz_flush(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
408 {
409 tu6_emit_event_write(cmd, cs, LRZ_FLUSH, false);
410 }
411
412 static void
413 tu6_emit_wfi(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
414 {
415 if (cmd->wait_for_idle) {
416 tu_cs_emit_wfi(cs);
417 cmd->wait_for_idle = false;
418 }
419 }
420
421 #define tu_image_view_ubwc_pitches(iview) \
422 .pitch = tu_image_ubwc_pitch(iview->image, iview->base_mip), \
423 .array_pitch = tu_image_ubwc_size(iview->image, iview->base_mip) >> 2
424
425 static void
426 tu6_emit_zs(struct tu_cmd_buffer *cmd,
427 const struct tu_subpass *subpass,
428 struct tu_cs *cs)
429 {
430 const struct tu_framebuffer *fb = cmd->state.framebuffer;
431
432 const uint32_t a = subpass->depth_stencil_attachment.attachment;
433 if (a == VK_ATTACHMENT_UNUSED) {
434 tu_cs_emit_regs(cs,
435 A6XX_RB_DEPTH_BUFFER_INFO(.depth_format = DEPTH6_NONE),
436 A6XX_RB_DEPTH_BUFFER_PITCH(0),
437 A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH(0),
438 A6XX_RB_DEPTH_BUFFER_BASE(0),
439 A6XX_RB_DEPTH_BUFFER_BASE_GMEM(0));
440
441 tu_cs_emit_regs(cs,
442 A6XX_GRAS_SU_DEPTH_BUFFER_INFO(.depth_format = DEPTH6_NONE));
443
444 tu_cs_emit_regs(cs,
445 A6XX_GRAS_LRZ_BUFFER_BASE(0),
446 A6XX_GRAS_LRZ_BUFFER_PITCH(0),
447 A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE(0));
448
449 tu_cs_emit_regs(cs, A6XX_RB_STENCIL_INFO(0));
450
451 return;
452 }
453
454 const struct tu_image_view *iview = fb->attachments[a].attachment;
455 enum a6xx_depth_format fmt = tu6_pipe2depth(iview->vk_format);
456
457 tu_cs_emit_regs(cs,
458 A6XX_RB_DEPTH_BUFFER_INFO(.depth_format = fmt),
459 A6XX_RB_DEPTH_BUFFER_PITCH(tu_image_stride(iview->image, iview->base_mip)),
460 A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH(iview->image->layout.layer_size),
461 A6XX_RB_DEPTH_BUFFER_BASE(tu_image_view_base_ref(iview)),
462 A6XX_RB_DEPTH_BUFFER_BASE_GMEM(cmd->state.pass->attachments[a].gmem_offset));
463
464 tu_cs_emit_regs(cs,
465 A6XX_GRAS_SU_DEPTH_BUFFER_INFO(.depth_format = fmt));
466
467 tu_cs_emit_regs(cs,
468 A6XX_RB_DEPTH_FLAG_BUFFER_BASE(tu_image_view_ubwc_base_ref(iview)),
469 A6XX_RB_DEPTH_FLAG_BUFFER_PITCH(tu_image_view_ubwc_pitches(iview)));
470
471 tu_cs_emit_regs(cs,
472 A6XX_GRAS_LRZ_BUFFER_BASE(0),
473 A6XX_GRAS_LRZ_BUFFER_PITCH(0),
474 A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE(0));
475
476 tu_cs_emit_regs(cs,
477 A6XX_RB_STENCIL_INFO(0));
478
479 /* enable zs? */
480 }
481
482 static void
483 tu6_emit_mrt(struct tu_cmd_buffer *cmd,
484 const struct tu_subpass *subpass,
485 struct tu_cs *cs)
486 {
487 const struct tu_framebuffer *fb = cmd->state.framebuffer;
488 unsigned char mrt_comp[MAX_RTS] = { 0 };
489 unsigned srgb_cntl = 0;
490
491 for (uint32_t i = 0; i < subpass->color_count; ++i) {
492 uint32_t a = subpass->color_attachments[i].attachment;
493 if (a == VK_ATTACHMENT_UNUSED)
494 continue;
495
496 const struct tu_image_view *iview = fb->attachments[a].attachment;
497 const enum a6xx_tile_mode tile_mode =
498 tu6_get_image_tile_mode(iview->image, iview->base_mip);
499
500 mrt_comp[i] = 0xf;
501
502 if (vk_format_is_srgb(iview->vk_format))
503 srgb_cntl |= (1 << i);
504
505 const struct tu_native_format format =
506 tu6_format_color(iview->vk_format, iview->image->layout.tile_mode);
507
508 tu_cs_emit_regs(cs,
509 A6XX_RB_MRT_BUF_INFO(i,
510 .color_tile_mode = tile_mode,
511 .color_format = format.fmt,
512 .color_swap = format.swap),
513 A6XX_RB_MRT_PITCH(i, tu_image_stride(iview->image, iview->base_mip)),
514 A6XX_RB_MRT_ARRAY_PITCH(i, iview->image->layout.layer_size),
515 A6XX_RB_MRT_BASE(i, tu_image_view_base_ref(iview)),
516 A6XX_RB_MRT_BASE_GMEM(i, cmd->state.pass->attachments[a].gmem_offset));
517
518 tu_cs_emit_regs(cs,
519 A6XX_SP_FS_MRT_REG(i,
520 .color_format = format.fmt,
521 .color_sint = vk_format_is_sint(iview->vk_format),
522 .color_uint = vk_format_is_uint(iview->vk_format)));
523
524 tu_cs_emit_regs(cs,
525 A6XX_RB_MRT_FLAG_BUFFER_ADDR(i, tu_image_view_ubwc_base_ref(iview)),
526 A6XX_RB_MRT_FLAG_BUFFER_PITCH(i, tu_image_view_ubwc_pitches(iview)));
527 }
528
529 tu_cs_emit_regs(cs,
530 A6XX_RB_SRGB_CNTL(.dword = srgb_cntl));
531
532 tu_cs_emit_regs(cs,
533 A6XX_SP_SRGB_CNTL(.dword = srgb_cntl));
534
535 tu_cs_emit_regs(cs,
536 A6XX_RB_RENDER_COMPONENTS(
537 .rt0 = mrt_comp[0],
538 .rt1 = mrt_comp[1],
539 .rt2 = mrt_comp[2],
540 .rt3 = mrt_comp[3],
541 .rt4 = mrt_comp[4],
542 .rt5 = mrt_comp[5],
543 .rt6 = mrt_comp[6],
544 .rt7 = mrt_comp[7]));
545
546 tu_cs_emit_regs(cs,
547 A6XX_SP_FS_RENDER_COMPONENTS(
548 .rt0 = mrt_comp[0],
549 .rt1 = mrt_comp[1],
550 .rt2 = mrt_comp[2],
551 .rt3 = mrt_comp[3],
552 .rt4 = mrt_comp[4],
553 .rt5 = mrt_comp[5],
554 .rt6 = mrt_comp[6],
555 .rt7 = mrt_comp[7]));
556 }
557
558 static void
559 tu6_emit_msaa(struct tu_cmd_buffer *cmd,
560 const struct tu_subpass *subpass,
561 struct tu_cs *cs)
562 {
563 const enum a3xx_msaa_samples samples = tu_msaa_samples(subpass->samples);
564 bool msaa_disable = samples == MSAA_ONE;
565
566 tu_cs_emit_regs(cs,
567 A6XX_SP_TP_RAS_MSAA_CNTL(samples),
568 A6XX_SP_TP_DEST_MSAA_CNTL(.samples = samples,
569 .msaa_disable = msaa_disable));
570
571 tu_cs_emit_regs(cs,
572 A6XX_GRAS_RAS_MSAA_CNTL(samples),
573 A6XX_GRAS_DEST_MSAA_CNTL(.samples = samples,
574 .msaa_disable = msaa_disable));
575
576 tu_cs_emit_regs(cs,
577 A6XX_RB_RAS_MSAA_CNTL(samples),
578 A6XX_RB_DEST_MSAA_CNTL(.samples = samples,
579 .msaa_disable = msaa_disable));
580
581 tu_cs_emit_regs(cs,
582 A6XX_RB_MSAA_CNTL(samples));
583 }
584
585 static void
586 tu6_emit_bin_size(struct tu_cs *cs,
587 uint32_t bin_w, uint32_t bin_h, uint32_t flags)
588 {
589 tu_cs_emit_regs(cs,
590 A6XX_GRAS_BIN_CONTROL(.binw = bin_w,
591 .binh = bin_h,
592 .dword = flags));
593
594 tu_cs_emit_regs(cs,
595 A6XX_RB_BIN_CONTROL(.binw = bin_w,
596 .binh = bin_h,
597 .dword = flags));
598
599 /* no flag for RB_BIN_CONTROL2... */
600 tu_cs_emit_regs(cs,
601 A6XX_RB_BIN_CONTROL2(.binw = bin_w,
602 .binh = bin_h));
603 }
604
605 static void
606 tu6_emit_render_cntl(struct tu_cmd_buffer *cmd,
607 const struct tu_subpass *subpass,
608 struct tu_cs *cs,
609 bool binning)
610 {
611 const struct tu_framebuffer *fb = cmd->state.framebuffer;
612 uint32_t cntl = 0;
613 cntl |= A6XX_RB_RENDER_CNTL_UNK4;
614 if (binning) {
615 cntl |= A6XX_RB_RENDER_CNTL_BINNING;
616 } else {
617 uint32_t mrts_ubwc_enable = 0;
618 for (uint32_t i = 0; i < subpass->color_count; ++i) {
619 uint32_t a = subpass->color_attachments[i].attachment;
620 if (a == VK_ATTACHMENT_UNUSED)
621 continue;
622
623 const struct tu_image_view *iview = fb->attachments[a].attachment;
624 if (iview->image->layout.ubwc_layer_size != 0)
625 mrts_ubwc_enable |= 1 << i;
626 }
627
628 cntl |= A6XX_RB_RENDER_CNTL_FLAG_MRTS(mrts_ubwc_enable);
629
630 const uint32_t a = subpass->depth_stencil_attachment.attachment;
631 if (a != VK_ATTACHMENT_UNUSED) {
632 const struct tu_image_view *iview = fb->attachments[a].attachment;
633 if (iview->image->layout.ubwc_layer_size != 0)
634 cntl |= A6XX_RB_RENDER_CNTL_FLAG_DEPTH;
635 }
636
637 /* In the !binning case, we need to set RB_RENDER_CNTL in the draw_cs
638 * in order to set it correctly for the different subpasses. However,
639 * that means the packets we're emitting also happen during binning. So
640 * we need to guard the write on !BINNING at CP execution time.
641 */
642 tu_cs_reserve(cs, 3 + 4);
643 tu_cs_emit_pkt7(cs, CP_COND_REG_EXEC, 2);
644 tu_cs_emit(cs, CP_COND_REG_EXEC_0_MODE(RENDER_MODE) |
645 CP_COND_REG_EXEC_0_GMEM | CP_COND_REG_EXEC_0_SYSMEM);
646 tu_cs_emit(cs, CP_COND_REG_EXEC_1_DWORDS(4));
647 }
648
649 tu_cs_emit_pkt7(cs, CP_REG_WRITE, 3);
650 tu_cs_emit(cs, CP_REG_WRITE_0_TRACKER(TRACK_RENDER_CNTL));
651 tu_cs_emit(cs, REG_A6XX_RB_RENDER_CNTL);
652 tu_cs_emit(cs, cntl);
653 }
654
655 static void
656 tu6_emit_blit_scissor(struct tu_cmd_buffer *cmd, struct tu_cs *cs, bool align)
657 {
658 const VkRect2D *render_area = &cmd->state.tiling_config.render_area;
659 uint32_t x1 = render_area->offset.x;
660 uint32_t y1 = render_area->offset.y;
661 uint32_t x2 = x1 + render_area->extent.width - 1;
662 uint32_t y2 = y1 + render_area->extent.height - 1;
663
664 /* TODO: alignment requirement seems to be less than tile_align_w/h */
665 if (align) {
666 x1 = x1 & ~cmd->device->physical_device->tile_align_w;
667 y1 = y1 & ~cmd->device->physical_device->tile_align_h;
668 x2 = ALIGN_POT(x2 + 1, cmd->device->physical_device->tile_align_w) - 1;
669 y2 = ALIGN_POT(y2 + 1, cmd->device->physical_device->tile_align_h) - 1;
670 }
671
672 tu_cs_emit_regs(cs,
673 A6XX_RB_BLIT_SCISSOR_TL(.x = x1, .y = y1),
674 A6XX_RB_BLIT_SCISSOR_BR(.x = x2, .y = y2));
675 }
676
677 static void
678 tu6_emit_blit_info(struct tu_cmd_buffer *cmd,
679 struct tu_cs *cs,
680 const struct tu_image_view *iview,
681 uint32_t gmem_offset,
682 bool resolve)
683 {
684 tu_cs_emit_regs(cs,
685 A6XX_RB_BLIT_INFO(.unk0 = !resolve, .gmem = !resolve));
686
687 const struct tu_native_format format =
688 tu6_format_color(iview->vk_format, iview->image->layout.tile_mode);
689
690 enum a6xx_tile_mode tile_mode =
691 tu6_get_image_tile_mode(iview->image, iview->base_mip);
692 tu_cs_emit_regs(cs,
693 A6XX_RB_BLIT_DST_INFO(
694 .tile_mode = tile_mode,
695 .samples = tu_msaa_samples(iview->image->samples),
696 .color_format = format.fmt,
697 .color_swap = format.swap,
698 .flags = iview->image->layout.ubwc_layer_size != 0),
699 A6XX_RB_BLIT_DST(tu_image_view_base_ref(iview)),
700 A6XX_RB_BLIT_DST_PITCH(tu_image_stride(iview->image, iview->base_mip)),
701 A6XX_RB_BLIT_DST_ARRAY_PITCH(iview->image->layout.layer_size));
702
703 if (iview->image->layout.ubwc_layer_size) {
704 tu_cs_emit_regs(cs,
705 A6XX_RB_BLIT_FLAG_DST(tu_image_view_ubwc_base_ref(iview)),
706 A6XX_RB_BLIT_FLAG_DST_PITCH(tu_image_view_ubwc_pitches(iview)));
707 }
708
709 tu_cs_emit_regs(cs,
710 A6XX_RB_BLIT_BASE_GMEM(gmem_offset));
711 }
712
713 static void
714 tu6_emit_blit(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
715 {
716 tu6_emit_event_write(cmd, cs, BLIT, false);
717 }
718
719 static void
720 tu6_emit_window_scissor(struct tu_cmd_buffer *cmd,
721 struct tu_cs *cs,
722 uint32_t x1,
723 uint32_t y1,
724 uint32_t x2,
725 uint32_t y2)
726 {
727 tu_cs_emit_regs(cs,
728 A6XX_GRAS_SC_WINDOW_SCISSOR_TL(.x = x1, .y = y1),
729 A6XX_GRAS_SC_WINDOW_SCISSOR_BR(.x = x2, .y = y2));
730
731 tu_cs_emit_regs(cs,
732 A6XX_GRAS_RESOLVE_CNTL_1(.x = x1, .y = y1),
733 A6XX_GRAS_RESOLVE_CNTL_2(.x = x2, .y = y2));
734 }
735
736 static void
737 tu6_emit_window_offset(struct tu_cmd_buffer *cmd,
738 struct tu_cs *cs,
739 uint32_t x1,
740 uint32_t y1)
741 {
742 tu_cs_emit_regs(cs,
743 A6XX_RB_WINDOW_OFFSET(.x = x1, .y = y1));
744
745 tu_cs_emit_regs(cs,
746 A6XX_RB_WINDOW_OFFSET2(.x = x1, .y = y1));
747
748 tu_cs_emit_regs(cs,
749 A6XX_SP_WINDOW_OFFSET(.x = x1, .y = y1));
750
751 tu_cs_emit_regs(cs,
752 A6XX_SP_TP_WINDOW_OFFSET(.x = x1, .y = y1));
753 }
754
755 static bool
756 use_hw_binning(struct tu_cmd_buffer *cmd)
757 {
758 const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
759
760 if (unlikely(cmd->device->physical_device->instance->debug_flags & TU_DEBUG_NOBIN))
761 return false;
762
763 if (unlikely(cmd->device->physical_device->instance->debug_flags & TU_DEBUG_FORCEBIN))
764 return true;
765
766 return (tiling->tile_count.width * tiling->tile_count.height) > 2;
767 }
768
769 static bool
770 use_sysmem_rendering(struct tu_cmd_buffer *cmd)
771 {
772 if (unlikely(cmd->device->physical_device->instance->debug_flags & TU_DEBUG_SYSMEM))
773 return true;
774
775 /* can't fit attachments into gmem */
776 if (!cmd->state.pass->gmem_pixels)
777 return true;
778
779 return cmd->state.tiling_config.force_sysmem;
780 }
781
782 static void
783 tu6_emit_tile_select(struct tu_cmd_buffer *cmd,
784 struct tu_cs *cs,
785 const struct tu_tile *tile)
786 {
787 tu_cs_emit_pkt7(cs, CP_SET_MARKER, 1);
788 tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(RM6_YIELD));
789
790 tu_cs_emit_pkt7(cs, CP_SET_MARKER, 1);
791 tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(RM6_GMEM));
792
793 const uint32_t x1 = tile->begin.x;
794 const uint32_t y1 = tile->begin.y;
795 const uint32_t x2 = tile->end.x - 1;
796 const uint32_t y2 = tile->end.y - 1;
797 tu6_emit_window_scissor(cmd, cs, x1, y1, x2, y2);
798 tu6_emit_window_offset(cmd, cs, x1, y1);
799
800 tu_cs_emit_regs(cs,
801 A6XX_VPC_SO_OVERRIDE(.so_disable = false));
802
803 if (use_hw_binning(cmd)) {
804 tu_cs_emit_pkt7(cs, CP_WAIT_FOR_ME, 0);
805
806 tu_cs_emit_pkt7(cs, CP_SET_MODE, 1);
807 tu_cs_emit(cs, 0x0);
808
809 tu_cs_emit_pkt7(cs, CP_REG_TEST, 1);
810 tu_cs_emit(cs, A6XX_CP_REG_TEST_0_REG(OVERFLOW_FLAG_REG) |
811 A6XX_CP_REG_TEST_0_BIT(0) |
812 A6XX_CP_REG_TEST_0_WAIT_FOR_ME);
813
814 tu_cs_reserve(cs, 3 + 11);
815 tu_cs_emit_pkt7(cs, CP_COND_REG_EXEC, 2);
816 tu_cs_emit(cs, CP_COND_REG_EXEC_0_MODE(PRED_TEST));
817 tu_cs_emit(cs, CP_COND_REG_EXEC_1_DWORDS(11));
818
819 /* if (no overflow) */ {
820 tu_cs_emit_pkt7(cs, CP_SET_BIN_DATA5, 7);
821 tu_cs_emit(cs, cmd->state.tiling_config.pipe_sizes[tile->pipe] |
822 CP_SET_BIN_DATA5_0_VSC_N(tile->slot));
823 tu_cs_emit_qw(cs, cmd->vsc_data.iova + tile->pipe * cmd->vsc_data_pitch);
824 tu_cs_emit_qw(cs, cmd->vsc_data.iova + (tile->pipe * 4) + (32 * cmd->vsc_data_pitch));
825 tu_cs_emit_qw(cs, cmd->vsc_data2.iova + (tile->pipe * cmd->vsc_data2_pitch));
826
827 tu_cs_emit_pkt7(cs, CP_SET_VISIBILITY_OVERRIDE, 1);
828 tu_cs_emit(cs, 0x0);
829
830 /* use a NOP packet to skip over the 'else' side: */
831 tu_cs_emit_pkt7(cs, CP_NOP, 2);
832 } /* else */ {
833 tu_cs_emit_pkt7(cs, CP_SET_VISIBILITY_OVERRIDE, 1);
834 tu_cs_emit(cs, 0x1);
835 }
836
837 tu_cs_emit_pkt7(cs, CP_SET_MODE, 1);
838 tu_cs_emit(cs, 0x0);
839
840 tu_cs_emit_regs(cs,
841 A6XX_RB_UNKNOWN_8804(0));
842
843 tu_cs_emit_regs(cs,
844 A6XX_SP_TP_UNKNOWN_B304(0));
845
846 tu_cs_emit_regs(cs,
847 A6XX_GRAS_UNKNOWN_80A4(0));
848 } else {
849 tu_cs_emit_pkt7(cs, CP_SET_VISIBILITY_OVERRIDE, 1);
850 tu_cs_emit(cs, 0x1);
851
852 tu_cs_emit_pkt7(cs, CP_SET_MODE, 1);
853 tu_cs_emit(cs, 0x0);
854 }
855 }
856
857 static void
858 tu6_emit_load_attachment(struct tu_cmd_buffer *cmd, struct tu_cs *cs, uint32_t a)
859 {
860 const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
861 const struct tu_framebuffer *fb = cmd->state.framebuffer;
862 const struct tu_image_view *iview = fb->attachments[a].attachment;
863 const struct tu_render_pass_attachment *attachment =
864 &cmd->state.pass->attachments[a];
865
866 if (attachment->gmem_offset < 0)
867 return;
868
869 const uint32_t x1 = tiling->render_area.offset.x;
870 const uint32_t y1 = tiling->render_area.offset.y;
871 const uint32_t x2 = x1 + tiling->render_area.extent.width;
872 const uint32_t y2 = y1 + tiling->render_area.extent.height;
873 const uint32_t tile_x2 =
874 tiling->tile0.offset.x + tiling->tile0.extent.width * tiling->tile_count.width;
875 const uint32_t tile_y2 =
876 tiling->tile0.offset.y + tiling->tile0.extent.height * tiling->tile_count.height;
877 bool need_load =
878 x1 != tiling->tile0.offset.x || x2 != MIN2(fb->width, tile_x2) ||
879 y1 != tiling->tile0.offset.y || y2 != MIN2(fb->height, tile_y2);
880
881 if (need_load)
882 tu_finishme("improve handling of unaligned render area");
883
884 if (attachment->load_op == VK_ATTACHMENT_LOAD_OP_LOAD)
885 need_load = true;
886
887 if (vk_format_has_stencil(iview->vk_format) &&
888 attachment->stencil_load_op == VK_ATTACHMENT_LOAD_OP_LOAD)
889 need_load = true;
890
891 if (need_load) {
892 tu6_emit_blit_info(cmd, cs, iview, attachment->gmem_offset, false);
893 tu6_emit_blit(cmd, cs);
894 }
895 }
896
897 static void
898 tu6_emit_clear_attachment(struct tu_cmd_buffer *cmd, struct tu_cs *cs,
899 uint32_t a,
900 const VkRenderPassBeginInfo *info)
901 {
902 const struct tu_framebuffer *fb = cmd->state.framebuffer;
903 const struct tu_image_view *iview = fb->attachments[a].attachment;
904 const struct tu_render_pass_attachment *attachment =
905 &cmd->state.pass->attachments[a];
906 unsigned clear_mask = 0;
907
908 /* note: this means it isn't used by any subpass and shouldn't be cleared anyway */
909 if (attachment->gmem_offset < 0)
910 return;
911
912 if (attachment->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR)
913 clear_mask = 0xf;
914
915 if (vk_format_has_stencil(iview->vk_format)) {
916 clear_mask &= 0x1;
917 if (attachment->stencil_load_op == VK_ATTACHMENT_LOAD_OP_CLEAR)
918 clear_mask |= 0x2;
919 }
920 if (!clear_mask)
921 return;
922
923 tu_clear_gmem_attachment(cmd, cs, a, clear_mask,
924 &info->pClearValues[a]);
925 }
926
927 static void
928 tu6_emit_predicated_blit(struct tu_cmd_buffer *cmd,
929 struct tu_cs *cs,
930 uint32_t a,
931 uint32_t gmem_a,
932 bool resolve)
933 {
934 tu_cond_exec_start(cs, CP_COND_EXEC_0_RENDER_MODE_GMEM);
935
936 tu6_emit_blit_info(cmd, cs,
937 cmd->state.framebuffer->attachments[a].attachment,
938 cmd->state.pass->attachments[gmem_a].gmem_offset, resolve);
939 tu6_emit_blit(cmd, cs);
940
941 tu_cond_exec_end(cs);
942 }
943
944 static void
945 tu6_emit_sysmem_resolve(struct tu_cmd_buffer *cmd,
946 struct tu_cs *cs,
947 uint32_t a,
948 uint32_t gmem_a)
949 {
950 const struct tu_framebuffer *fb = cmd->state.framebuffer;
951 const struct tu_image_view *dst = fb->attachments[a].attachment;
952 const struct tu_image_view *src = fb->attachments[gmem_a].attachment;
953
954 tu_blit(cmd, cs, &(struct tu_blit) {
955 .dst = sysmem_attachment_surf(dst, dst->base_layer,
956 &cmd->state.tiling_config.render_area),
957 .src = sysmem_attachment_surf(src, src->base_layer,
958 &cmd->state.tiling_config.render_area),
959 .layers = fb->layers,
960 });
961 }
962
963
964 /* Emit a MSAA resolve operation, with both gmem and sysmem paths. */
965 static void tu6_emit_resolve(struct tu_cmd_buffer *cmd,
966 struct tu_cs *cs,
967 uint32_t a,
968 uint32_t gmem_a)
969 {
970 if (cmd->state.pass->attachments[a].store_op == VK_ATTACHMENT_STORE_OP_DONT_CARE)
971 return;
972
973 tu6_emit_predicated_blit(cmd, cs, a, gmem_a, true);
974
975 tu_cond_exec_start(cs, CP_COND_EXEC_0_RENDER_MODE_SYSMEM);
976 tu6_emit_sysmem_resolve(cmd, cs, a, gmem_a);
977 tu_cond_exec_end(cs);
978 }
979
980 static void
981 tu6_emit_store_attachment(struct tu_cmd_buffer *cmd,
982 struct tu_cs *cs,
983 uint32_t a,
984 uint32_t gmem_a)
985 {
986 if (cmd->state.pass->attachments[a].store_op == VK_ATTACHMENT_STORE_OP_DONT_CARE)
987 return;
988
989 tu6_emit_blit_info(cmd, cs,
990 cmd->state.framebuffer->attachments[a].attachment,
991 cmd->state.pass->attachments[gmem_a].gmem_offset, true);
992 tu6_emit_blit(cmd, cs);
993 }
994
995 static void
996 tu6_emit_tile_store(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
997 {
998 const struct tu_render_pass *pass = cmd->state.pass;
999 const struct tu_subpass *subpass = &pass->subpasses[pass->subpass_count-1];
1000
1001 tu_cs_emit_pkt7(cs, CP_SET_DRAW_STATE, 3);
1002 tu_cs_emit(cs, CP_SET_DRAW_STATE__0_COUNT(0) |
1003 CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS |
1004 CP_SET_DRAW_STATE__0_GROUP_ID(0));
1005 tu_cs_emit(cs, CP_SET_DRAW_STATE__1_ADDR_LO(0));
1006 tu_cs_emit(cs, CP_SET_DRAW_STATE__2_ADDR_HI(0));
1007
1008 tu_cs_emit_pkt7(cs, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
1009 tu_cs_emit(cs, 0x0);
1010
1011 tu_cs_emit_pkt7(cs, CP_SET_MARKER, 1);
1012 tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(RM6_RESOLVE));
1013
1014 tu6_emit_blit_scissor(cmd, cs, true);
1015
1016 for (uint32_t a = 0; a < pass->attachment_count; ++a) {
1017 if (pass->attachments[a].gmem_offset >= 0)
1018 tu6_emit_store_attachment(cmd, cs, a, a);
1019 }
1020
1021 if (subpass->resolve_attachments) {
1022 for (unsigned i = 0; i < subpass->color_count; i++) {
1023 uint32_t a = subpass->resolve_attachments[i].attachment;
1024 if (a != VK_ATTACHMENT_UNUSED)
1025 tu6_emit_store_attachment(cmd, cs, a,
1026 subpass->color_attachments[i].attachment);
1027 }
1028 }
1029 }
1030
1031 static void
1032 tu6_emit_restart_index(struct tu_cs *cs, uint32_t restart_index)
1033 {
1034 tu_cs_emit_regs(cs,
1035 A6XX_PC_RESTART_INDEX(restart_index));
1036 }
1037
1038 static void
1039 tu6_init_hw(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
1040 {
1041 tu6_emit_cache_flush(cmd, cs);
1042
1043 tu_cs_emit_write_reg(cs, REG_A6XX_HLSQ_UPDATE_CNTL, 0xfffff);
1044
1045 tu_cs_emit_write_reg(cs, REG_A6XX_RB_CCU_CNTL, 0x10000000);
1046 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8E04, 0x00100000);
1047 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_AE04, 0x8);
1048 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_AE00, 0);
1049 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_AE0F, 0x3f);
1050 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_B605, 0x44);
1051 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_B600, 0x100000);
1052 tu_cs_emit_write_reg(cs, REG_A6XX_HLSQ_UNKNOWN_BE00, 0x80);
1053 tu_cs_emit_write_reg(cs, REG_A6XX_HLSQ_UNKNOWN_BE01, 0);
1054
1055 tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9600, 0);
1056 tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_8600, 0x880);
1057 tu_cs_emit_write_reg(cs, REG_A6XX_HLSQ_UNKNOWN_BE04, 0);
1058 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_AE03, 0x00000410);
1059 tu_cs_emit_write_reg(cs, REG_A6XX_SP_IBO_COUNT, 0);
1060 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_B182, 0);
1061 tu_cs_emit_write_reg(cs, REG_A6XX_HLSQ_UNKNOWN_BB11, 0);
1062 tu_cs_emit_write_reg(cs, REG_A6XX_UCHE_UNKNOWN_0E12, 0x3200000);
1063 tu_cs_emit_write_reg(cs, REG_A6XX_UCHE_CLIENT_PF, 4);
1064 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8E01, 0x0);
1065 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_A982, 0);
1066 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_A9A8, 0);
1067 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_AB00, 0x5);
1068 tu_cs_emit_write_reg(cs, REG_A6XX_VPC_GS_SIV_CNTL, 0x0000ffff);
1069
1070 tu_cs_emit_write_reg(cs, REG_A6XX_VFD_ADD_OFFSET, A6XX_VFD_ADD_OFFSET_VERTEX);
1071 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8811, 0x00000010);
1072 tu_cs_emit_write_reg(cs, REG_A6XX_PC_MODE_CNTL, 0x1f);
1073
1074 tu_cs_emit_write_reg(cs, REG_A6XX_RB_SRGB_CNTL, 0);
1075
1076 tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_8110, 0);
1077
1078 tu_cs_emit_write_reg(cs, REG_A6XX_RB_RENDER_CONTROL0, 0x401);
1079 tu_cs_emit_write_reg(cs, REG_A6XX_RB_RENDER_CONTROL1, 0);
1080 tu_cs_emit_write_reg(cs, REG_A6XX_RB_FS_OUTPUT_CNTL0, 0);
1081 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8818, 0);
1082 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8819, 0);
1083 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_881A, 0);
1084 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_881B, 0);
1085 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_881C, 0);
1086 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_881D, 0);
1087 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_881E, 0);
1088 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_88F0, 0);
1089
1090 tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9101, 0xffff00);
1091 tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9107, 0);
1092
1093 tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9236, 1);
1094 tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9300, 0);
1095
1096 tu_cs_emit_write_reg(cs, REG_A6XX_VPC_SO_OVERRIDE,
1097 A6XX_VPC_SO_OVERRIDE_SO_DISABLE);
1098
1099 tu_cs_emit_write_reg(cs, REG_A6XX_PC_UNKNOWN_9801, 0);
1100 tu_cs_emit_write_reg(cs, REG_A6XX_PC_UNKNOWN_9806, 0);
1101 tu_cs_emit_write_reg(cs, REG_A6XX_PC_UNKNOWN_9980, 0);
1102 tu_cs_emit_write_reg(cs, REG_A6XX_PC_UNKNOWN_9990, 0);
1103
1104 tu_cs_emit_write_reg(cs, REG_A6XX_PC_PRIMITIVE_CNTL_6, 0);
1105 tu_cs_emit_write_reg(cs, REG_A6XX_PC_UNKNOWN_9B07, 0);
1106
1107 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_A81B, 0);
1108
1109 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_B183, 0);
1110
1111 tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_8099, 0);
1112 tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_809B, 0);
1113 tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_80A0, 2);
1114 tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_80AF, 0);
1115 tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9210, 0);
1116 tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9211, 0);
1117 tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9602, 0);
1118 tu_cs_emit_write_reg(cs, REG_A6XX_PC_UNKNOWN_9981, 0x3);
1119 tu_cs_emit_write_reg(cs, REG_A6XX_PC_UNKNOWN_9E72, 0);
1120 tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9108, 0x3);
1121 tu_cs_emit_write_reg(cs, REG_A6XX_SP_TP_UNKNOWN_B304, 0);
1122 tu_cs_emit_write_reg(cs, REG_A6XX_SP_TP_UNKNOWN_B309, 0x000000a2);
1123 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8804, 0);
1124 tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_80A4, 0);
1125 tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_80A5, 0);
1126 tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_80A6, 0);
1127 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8805, 0);
1128 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8806, 0);
1129 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8878, 0);
1130 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8879, 0);
1131 tu_cs_emit_write_reg(cs, REG_A6XX_HLSQ_CONTROL_5_REG, 0xfc);
1132
1133 tu_cs_emit_write_reg(cs, REG_A6XX_VFD_MODE_CNTL, 0x00000000);
1134
1135 tu_cs_emit_write_reg(cs, REG_A6XX_VFD_UNKNOWN_A008, 0);
1136
1137 tu_cs_emit_write_reg(cs, REG_A6XX_PC_MODE_CNTL, 0x0000001f);
1138
1139 /* we don't use this yet.. probably best to disable.. */
1140 tu_cs_emit_pkt7(cs, CP_SET_DRAW_STATE, 3);
1141 tu_cs_emit(cs, CP_SET_DRAW_STATE__0_COUNT(0) |
1142 CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS |
1143 CP_SET_DRAW_STATE__0_GROUP_ID(0));
1144 tu_cs_emit(cs, CP_SET_DRAW_STATE__1_ADDR_LO(0));
1145 tu_cs_emit(cs, CP_SET_DRAW_STATE__2_ADDR_HI(0));
1146
1147 /* Set not to use streamout by default, */
1148 tu_cs_emit_pkt7(cs, CP_CONTEXT_REG_BUNCH, 4);
1149 tu_cs_emit(cs, REG_A6XX_VPC_SO_CNTL);
1150 tu_cs_emit(cs, 0);
1151 tu_cs_emit(cs, REG_A6XX_VPC_SO_BUF_CNTL);
1152 tu_cs_emit(cs, 0);
1153
1154 tu_cs_emit_regs(cs,
1155 A6XX_SP_HS_CTRL_REG0(0));
1156
1157 tu_cs_emit_regs(cs,
1158 A6XX_SP_GS_CTRL_REG0(0));
1159
1160 tu_cs_emit_regs(cs,
1161 A6XX_GRAS_LRZ_CNTL(0));
1162
1163 tu_cs_emit_regs(cs,
1164 A6XX_RB_LRZ_CNTL(0));
1165
1166 tu_cs_sanity_check(cs);
1167 }
1168
1169 static void
1170 tu6_cache_flush(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
1171 {
1172 unsigned seqno;
1173
1174 seqno = tu6_emit_event_write(cmd, cs, CACHE_FLUSH_AND_INV_EVENT, true);
1175
1176 tu_cs_emit_pkt7(cs, CP_WAIT_REG_MEM, 6);
1177 tu_cs_emit(cs, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ) |
1178 CP_WAIT_REG_MEM_0_POLL_MEMORY);
1179 tu_cs_emit_qw(cs, cmd->scratch_bo.iova);
1180 tu_cs_emit(cs, CP_WAIT_REG_MEM_3_REF(seqno));
1181 tu_cs_emit(cs, CP_WAIT_REG_MEM_4_MASK(~0));
1182 tu_cs_emit(cs, CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(16));
1183
1184 seqno = tu6_emit_event_write(cmd, cs, CACHE_FLUSH_TS, true);
1185
1186 tu_cs_emit_pkt7(cs, CP_WAIT_MEM_GTE, 4);
1187 tu_cs_emit(cs, CP_WAIT_MEM_GTE_0_RESERVED(0));
1188 tu_cs_emit_qw(cs, cmd->scratch_bo.iova);
1189 tu_cs_emit(cs, CP_WAIT_MEM_GTE_3_REF(seqno));
1190 }
1191
1192 static void
1193 update_vsc_pipe(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
1194 {
1195 const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
1196
1197 tu_cs_emit_regs(cs,
1198 A6XX_VSC_BIN_SIZE(.width = tiling->tile0.extent.width,
1199 .height = tiling->tile0.extent.height),
1200 A6XX_VSC_SIZE_ADDRESS(.bo = &cmd->vsc_data,
1201 .bo_offset = 32 * cmd->vsc_data_pitch));
1202
1203 tu_cs_emit_regs(cs,
1204 A6XX_VSC_BIN_COUNT(.nx = tiling->tile_count.width,
1205 .ny = tiling->tile_count.height));
1206
1207 tu_cs_emit_pkt4(cs, REG_A6XX_VSC_PIPE_CONFIG_REG(0), 32);
1208 for (unsigned i = 0; i < 32; i++)
1209 tu_cs_emit(cs, tiling->pipe_config[i]);
1210
1211 tu_cs_emit_regs(cs,
1212 A6XX_VSC_PIPE_DATA2_ADDRESS(.bo = &cmd->vsc_data2),
1213 A6XX_VSC_PIPE_DATA2_PITCH(cmd->vsc_data2_pitch),
1214 A6XX_VSC_PIPE_DATA2_ARRAY_PITCH(cmd->vsc_data2.size));
1215
1216 tu_cs_emit_regs(cs,
1217 A6XX_VSC_PIPE_DATA_ADDRESS(.bo = &cmd->vsc_data),
1218 A6XX_VSC_PIPE_DATA_PITCH(cmd->vsc_data_pitch),
1219 A6XX_VSC_PIPE_DATA_ARRAY_PITCH(cmd->vsc_data.size));
1220 }
1221
1222 static void
1223 emit_vsc_overflow_test(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
1224 {
1225 const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
1226 const uint32_t used_pipe_count =
1227 tiling->pipe_count.width * tiling->pipe_count.height;
1228
1229 /* Clear vsc_scratch: */
1230 tu_cs_emit_pkt7(cs, CP_MEM_WRITE, 3);
1231 tu_cs_emit_qw(cs, cmd->scratch_bo.iova + VSC_SCRATCH);
1232 tu_cs_emit(cs, 0x0);
1233
1234 /* Check for overflow, write vsc_scratch if detected: */
1235 for (int i = 0; i < used_pipe_count; i++) {
1236 tu_cs_emit_pkt7(cs, CP_COND_WRITE5, 8);
1237 tu_cs_emit(cs, CP_COND_WRITE5_0_FUNCTION(WRITE_GE) |
1238 CP_COND_WRITE5_0_WRITE_MEMORY);
1239 tu_cs_emit(cs, CP_COND_WRITE5_1_POLL_ADDR_LO(REG_A6XX_VSC_SIZE_REG(i)));
1240 tu_cs_emit(cs, CP_COND_WRITE5_2_POLL_ADDR_HI(0));
1241 tu_cs_emit(cs, CP_COND_WRITE5_3_REF(cmd->vsc_data_pitch));
1242 tu_cs_emit(cs, CP_COND_WRITE5_4_MASK(~0));
1243 tu_cs_emit_qw(cs, cmd->scratch_bo.iova + VSC_SCRATCH);
1244 tu_cs_emit(cs, CP_COND_WRITE5_7_WRITE_DATA(1 + cmd->vsc_data_pitch));
1245
1246 tu_cs_emit_pkt7(cs, CP_COND_WRITE5, 8);
1247 tu_cs_emit(cs, CP_COND_WRITE5_0_FUNCTION(WRITE_GE) |
1248 CP_COND_WRITE5_0_WRITE_MEMORY);
1249 tu_cs_emit(cs, CP_COND_WRITE5_1_POLL_ADDR_LO(REG_A6XX_VSC_SIZE2_REG(i)));
1250 tu_cs_emit(cs, CP_COND_WRITE5_2_POLL_ADDR_HI(0));
1251 tu_cs_emit(cs, CP_COND_WRITE5_3_REF(cmd->vsc_data2_pitch));
1252 tu_cs_emit(cs, CP_COND_WRITE5_4_MASK(~0));
1253 tu_cs_emit_qw(cs, cmd->scratch_bo.iova + VSC_SCRATCH);
1254 tu_cs_emit(cs, CP_COND_WRITE5_7_WRITE_DATA(3 + cmd->vsc_data2_pitch));
1255 }
1256
1257 tu_cs_emit_pkt7(cs, CP_WAIT_MEM_WRITES, 0);
1258
1259 tu_cs_emit_pkt7(cs, CP_WAIT_FOR_ME, 0);
1260
1261 tu_cs_emit_pkt7(cs, CP_MEM_TO_REG, 3);
1262 tu_cs_emit(cs, CP_MEM_TO_REG_0_REG(OVERFLOW_FLAG_REG) |
1263 CP_MEM_TO_REG_0_CNT(1 - 1));
1264 tu_cs_emit_qw(cs, cmd->scratch_bo.iova + VSC_SCRATCH);
1265
1266 /*
1267 * This is a bit awkward, we really want a way to invert the
1268 * CP_REG_TEST/CP_COND_REG_EXEC logic, so that we can conditionally
1269 * execute cmds to use hwbinning when a bit is *not* set. This
1270 * dance is to invert OVERFLOW_FLAG_REG
1271 *
1272 * A CP_NOP packet is used to skip executing the 'else' clause
1273 * if (b0 set)..
1274 */
1275
1276 /* b0 will be set if VSC_DATA or VSC_DATA2 overflow: */
1277 tu_cs_emit_pkt7(cs, CP_REG_TEST, 1);
1278 tu_cs_emit(cs, A6XX_CP_REG_TEST_0_REG(OVERFLOW_FLAG_REG) |
1279 A6XX_CP_REG_TEST_0_BIT(0) |
1280 A6XX_CP_REG_TEST_0_WAIT_FOR_ME);
1281
1282 tu_cs_reserve(cs, 3 + 7);
1283 tu_cs_emit_pkt7(cs, CP_COND_REG_EXEC, 2);
1284 tu_cs_emit(cs, CP_COND_REG_EXEC_0_MODE(PRED_TEST));
1285 tu_cs_emit(cs, CP_COND_REG_EXEC_1_DWORDS(7));
1286
1287 /* if (b0 set) */ {
1288 /*
1289 * On overflow, mirror the value to control->vsc_overflow
1290 * which CPU is checking to detect overflow (see
1291 * check_vsc_overflow())
1292 */
1293 tu_cs_emit_pkt7(cs, CP_REG_TO_MEM, 3);
1294 tu_cs_emit(cs, CP_REG_TO_MEM_0_REG(OVERFLOW_FLAG_REG) |
1295 CP_REG_TO_MEM_0_CNT(0));
1296 tu_cs_emit_qw(cs, cmd->scratch_bo.iova + VSC_OVERFLOW);
1297
1298 tu_cs_emit_pkt4(cs, OVERFLOW_FLAG_REG, 1);
1299 tu_cs_emit(cs, 0x0);
1300
1301 tu_cs_emit_pkt7(cs, CP_NOP, 2); /* skip 'else' when 'if' is taken */
1302 } /* else */ {
1303 tu_cs_emit_pkt4(cs, OVERFLOW_FLAG_REG, 1);
1304 tu_cs_emit(cs, 0x1);
1305 }
1306 }
1307
1308 static void
1309 tu6_emit_binning_pass(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
1310 {
1311 struct tu_physical_device *phys_dev = cmd->device->physical_device;
1312 const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
1313
1314 uint32_t x1 = tiling->tile0.offset.x;
1315 uint32_t y1 = tiling->tile0.offset.y;
1316 uint32_t x2 = tiling->render_area.offset.x + tiling->render_area.extent.width - 1;
1317 uint32_t y2 = tiling->render_area.offset.y + tiling->render_area.extent.height - 1;
1318
1319 tu6_emit_window_scissor(cmd, cs, x1, y1, x2, y2);
1320
1321 tu_cs_emit_pkt7(cs, CP_SET_MARKER, 1);
1322 tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(RM6_BINNING));
1323
1324 tu_cs_emit_pkt7(cs, CP_SET_VISIBILITY_OVERRIDE, 1);
1325 tu_cs_emit(cs, 0x1);
1326
1327 tu_cs_emit_pkt7(cs, CP_SET_MODE, 1);
1328 tu_cs_emit(cs, 0x1);
1329
1330 tu_cs_emit_wfi(cs);
1331
1332 tu_cs_emit_regs(cs,
1333 A6XX_VFD_MODE_CNTL(.binning_pass = true));
1334
1335 update_vsc_pipe(cmd, cs);
1336
1337 tu_cs_emit_regs(cs,
1338 A6XX_PC_UNKNOWN_9805(.unknown = phys_dev->magic.PC_UNKNOWN_9805));
1339
1340 tu_cs_emit_regs(cs,
1341 A6XX_SP_UNKNOWN_A0F8(.unknown = phys_dev->magic.SP_UNKNOWN_A0F8));
1342
1343 tu_cs_emit_pkt7(cs, CP_EVENT_WRITE, 1);
1344 tu_cs_emit(cs, UNK_2C);
1345
1346 tu_cs_emit_regs(cs,
1347 A6XX_RB_WINDOW_OFFSET(.x = 0, .y = 0));
1348
1349 tu_cs_emit_regs(cs,
1350 A6XX_SP_TP_WINDOW_OFFSET(.x = 0, .y = 0));
1351
1352 /* emit IB to binning drawcmds: */
1353 tu_cs_emit_call(cs, &cmd->draw_cs);
1354
1355 tu_cs_emit_pkt7(cs, CP_SET_DRAW_STATE, 3);
1356 tu_cs_emit(cs, CP_SET_DRAW_STATE__0_COUNT(0) |
1357 CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS |
1358 CP_SET_DRAW_STATE__0_GROUP_ID(0));
1359 tu_cs_emit(cs, CP_SET_DRAW_STATE__1_ADDR_LO(0));
1360 tu_cs_emit(cs, CP_SET_DRAW_STATE__2_ADDR_HI(0));
1361
1362 tu_cs_emit_pkt7(cs, CP_EVENT_WRITE, 1);
1363 tu_cs_emit(cs, UNK_2D);
1364
1365 tu6_emit_event_write(cmd, cs, CACHE_INVALIDATE, false);
1366 tu6_cache_flush(cmd, cs);
1367
1368 tu_cs_emit_wfi(cs);
1369
1370 tu_cs_emit_pkt7(cs, CP_WAIT_FOR_ME, 0);
1371
1372 emit_vsc_overflow_test(cmd, cs);
1373
1374 tu_cs_emit_pkt7(cs, CP_SET_VISIBILITY_OVERRIDE, 1);
1375 tu_cs_emit(cs, 0x0);
1376
1377 tu_cs_emit_pkt7(cs, CP_SET_MODE, 1);
1378 tu_cs_emit(cs, 0x0);
1379
1380 tu_cs_emit_wfi(cs);
1381
1382 tu_cs_emit_regs(cs,
1383 A6XX_RB_CCU_CNTL(.unknown = phys_dev->magic.RB_CCU_CNTL_gmem));
1384
1385 cmd->wait_for_idle = false;
1386 }
1387
1388 static void
1389 tu_emit_sysmem_clear_attachment(struct tu_cmd_buffer *cmd, struct tu_cs *cs,
1390 uint32_t a,
1391 const VkRenderPassBeginInfo *info)
1392 {
1393 const struct tu_framebuffer *fb = cmd->state.framebuffer;
1394 const struct tu_image_view *iview = fb->attachments[a].attachment;
1395 const struct tu_render_pass_attachment *attachment =
1396 &cmd->state.pass->attachments[a];
1397 unsigned clear_mask = 0;
1398
1399 /* note: this means it isn't used by any subpass and shouldn't be cleared anyway */
1400 if (attachment->gmem_offset < 0)
1401 return;
1402
1403 if (attachment->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
1404 clear_mask = 0xf;
1405 }
1406
1407 if (vk_format_has_stencil(iview->vk_format)) {
1408 clear_mask &= 0x1;
1409 if (attachment->stencil_load_op == VK_ATTACHMENT_LOAD_OP_CLEAR)
1410 clear_mask |= 0x2;
1411 if (clear_mask != 0x3)
1412 tu_finishme("depth/stencil only load op");
1413 }
1414
1415 if (!clear_mask)
1416 return;
1417
1418 tu_clear_sysmem_attachment(cmd, cs, a,
1419 &info->pClearValues[a], &(struct VkClearRect) {
1420 .rect = info->renderArea,
1421 .baseArrayLayer = iview->base_layer,
1422 .layerCount = iview->layer_count,
1423 });
1424 }
1425
1426 static void
1427 tu_emit_load_clear(struct tu_cmd_buffer *cmd,
1428 const VkRenderPassBeginInfo *info)
1429 {
1430 struct tu_cs *cs = &cmd->draw_cs;
1431
1432 tu_cond_exec_start(cs, CP_COND_EXEC_0_RENDER_MODE_GMEM);
1433
1434 tu6_emit_blit_scissor(cmd, cs, true);
1435
1436 for (uint32_t i = 0; i < cmd->state.pass->attachment_count; ++i)
1437 tu6_emit_load_attachment(cmd, cs, i);
1438
1439 tu6_emit_blit_scissor(cmd, cs, false);
1440
1441 for (uint32_t i = 0; i < cmd->state.pass->attachment_count; ++i)
1442 tu6_emit_clear_attachment(cmd, cs, i, info);
1443
1444 tu_cond_exec_end(cs);
1445
1446 /* invalidate because reading input attachments will cache GMEM and
1447 * the cache isn''t updated when GMEM is written
1448 * TODO: is there a no-cache bit for textures?
1449 */
1450 if (cmd->state.subpass->input_count)
1451 tu6_emit_event_write(cmd, cs, CACHE_INVALIDATE, false);
1452
1453 tu_cond_exec_start(cs, CP_COND_EXEC_0_RENDER_MODE_SYSMEM);
1454
1455 for (uint32_t i = 0; i < cmd->state.pass->attachment_count; ++i)
1456 tu_emit_sysmem_clear_attachment(cmd, cs, i, info);
1457
1458 tu_cond_exec_end(cs);
1459 }
1460
1461 static void
1462 tu6_sysmem_render_begin(struct tu_cmd_buffer *cmd, struct tu_cs *cs,
1463 const struct VkRect2D *renderArea)
1464 {
1465 const struct tu_framebuffer *fb = cmd->state.framebuffer;
1466
1467 assert(fb->width > 0 && fb->height > 0);
1468 tu6_emit_window_scissor(cmd, cs, 0, 0, fb->width - 1, fb->height - 1);
1469 tu6_emit_window_offset(cmd, cs, 0, 0);
1470
1471 tu6_emit_bin_size(cs, 0, 0, 0xc00000); /* 0xc00000 = BYPASS? */
1472
1473 tu6_emit_lrz_flush(cmd, cs);
1474
1475 tu_cs_emit_pkt7(cs, CP_SET_MARKER, 1);
1476 tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(RM6_BYPASS));
1477
1478 tu_cs_emit_pkt7(cs, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
1479 tu_cs_emit(cs, 0x0);
1480
1481 tu6_emit_event_write(cmd, cs, PC_CCU_INVALIDATE_COLOR, false);
1482 tu6_emit_event_write(cmd, cs, PC_CCU_INVALIDATE_DEPTH, false);
1483 tu6_emit_event_write(cmd, cs, CACHE_INVALIDATE, false);
1484
1485 tu6_emit_wfi(cmd, cs);
1486 tu_cs_emit_regs(cs,
1487 A6XX_RB_CCU_CNTL(0x10000000));
1488
1489 /* enable stream-out, with sysmem there is only one pass: */
1490 tu_cs_emit_regs(cs,
1491 A6XX_VPC_SO_OVERRIDE(.so_disable = false));
1492
1493 tu_cs_emit_pkt7(cs, CP_SET_VISIBILITY_OVERRIDE, 1);
1494 tu_cs_emit(cs, 0x1);
1495
1496 tu_cs_emit_pkt7(cs, CP_SET_MODE, 1);
1497 tu_cs_emit(cs, 0x0);
1498
1499 tu_cs_sanity_check(cs);
1500 }
1501
1502 static void
1503 tu6_sysmem_render_end(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
1504 {
1505 /* Do any resolves of the last subpass. These are handled in the
1506 * tile_store_ib in the gmem path.
1507 */
1508
1509 const struct tu_subpass *subpass = cmd->state.subpass;
1510 if (subpass->resolve_attachments) {
1511 for (unsigned i = 0; i < subpass->color_count; i++) {
1512 uint32_t a = subpass->resolve_attachments[i].attachment;
1513 if (a != VK_ATTACHMENT_UNUSED)
1514 tu6_emit_sysmem_resolve(cmd, cs, a,
1515 subpass->color_attachments[i].attachment);
1516 }
1517 }
1518
1519 tu_cs_emit_call(cs, &cmd->draw_epilogue_cs);
1520
1521 tu_cs_emit_pkt7(cs, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
1522 tu_cs_emit(cs, 0x0);
1523
1524 tu6_emit_lrz_flush(cmd, cs);
1525
1526 tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_COLOR_TS, true);
1527 tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_DEPTH_TS, true);
1528
1529 tu_cs_sanity_check(cs);
1530 }
1531
1532
1533 static void
1534 tu6_tile_render_begin(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
1535 {
1536 struct tu_physical_device *phys_dev = cmd->device->physical_device;
1537
1538 tu6_emit_lrz_flush(cmd, cs);
1539
1540 /* lrz clear? */
1541
1542 tu6_emit_cache_flush(cmd, cs);
1543
1544 tu_cs_emit_pkt7(cs, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
1545 tu_cs_emit(cs, 0x0);
1546
1547 /* 0x10000000 for BYPASS.. 0x7c13c080 for GMEM: */
1548 tu6_emit_wfi(cmd, cs);
1549 tu_cs_emit_regs(cs,
1550 A6XX_RB_CCU_CNTL(phys_dev->magic.RB_CCU_CNTL_gmem));
1551
1552 const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
1553 if (use_hw_binning(cmd)) {
1554 /* enable stream-out during binning pass: */
1555 tu_cs_emit_regs(cs, A6XX_VPC_SO_OVERRIDE(.so_disable=false));
1556
1557 tu6_emit_bin_size(cs,
1558 tiling->tile0.extent.width,
1559 tiling->tile0.extent.height,
1560 A6XX_RB_BIN_CONTROL_BINNING_PASS | 0x6000000);
1561
1562 tu6_emit_render_cntl(cmd, cmd->state.subpass, cs, true);
1563
1564 tu6_emit_binning_pass(cmd, cs);
1565
1566 /* and disable stream-out for draw pass: */
1567 tu_cs_emit_regs(cs, A6XX_VPC_SO_OVERRIDE(.so_disable=true));
1568
1569 tu6_emit_bin_size(cs,
1570 tiling->tile0.extent.width,
1571 tiling->tile0.extent.height,
1572 A6XX_RB_BIN_CONTROL_USE_VIZ | 0x6000000);
1573
1574 tu_cs_emit_regs(cs,
1575 A6XX_VFD_MODE_CNTL(0));
1576
1577 tu_cs_emit_regs(cs, A6XX_PC_UNKNOWN_9805(.unknown = phys_dev->magic.PC_UNKNOWN_9805));
1578
1579 tu_cs_emit_regs(cs, A6XX_SP_UNKNOWN_A0F8(.unknown = phys_dev->magic.SP_UNKNOWN_A0F8));
1580
1581 tu_cs_emit_pkt7(cs, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
1582 tu_cs_emit(cs, 0x1);
1583 } else {
1584 /* no binning pass, so enable stream-out for draw pass:: */
1585 tu_cs_emit_regs(cs, A6XX_VPC_SO_OVERRIDE(.so_disable=false));
1586
1587 tu6_emit_bin_size(cs,
1588 tiling->tile0.extent.width,
1589 tiling->tile0.extent.height,
1590 0x6000000);
1591 }
1592
1593 tu_cs_sanity_check(cs);
1594 }
1595
1596 static void
1597 tu6_render_tile(struct tu_cmd_buffer *cmd,
1598 struct tu_cs *cs,
1599 const struct tu_tile *tile)
1600 {
1601 tu6_emit_tile_select(cmd, cs, tile);
1602
1603 tu_cs_emit_call(cs, &cmd->draw_cs);
1604 cmd->wait_for_idle = true;
1605
1606 if (use_hw_binning(cmd)) {
1607 tu_cs_emit_pkt7(cs, CP_REG_TEST, 1);
1608 tu_cs_emit(cs, A6XX_CP_REG_TEST_0_REG(OVERFLOW_FLAG_REG) |
1609 A6XX_CP_REG_TEST_0_BIT(0) |
1610 A6XX_CP_REG_TEST_0_WAIT_FOR_ME);
1611
1612 tu_cs_reserve(cs, 3 + 2);
1613 tu_cs_emit_pkt7(cs, CP_COND_REG_EXEC, 2);
1614 tu_cs_emit(cs, CP_COND_REG_EXEC_0_MODE(PRED_TEST));
1615 tu_cs_emit(cs, CP_COND_REG_EXEC_1_DWORDS(2));
1616
1617 /* if (no overflow) */ {
1618 tu_cs_emit_pkt7(cs, CP_SET_MARKER, 1);
1619 tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(RM6_ENDVIS));
1620 }
1621 }
1622
1623 tu_cs_emit_ib(cs, &cmd->state.tile_store_ib);
1624
1625 tu_cs_sanity_check(cs);
1626 }
1627
1628 static void
1629 tu6_tile_render_end(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
1630 {
1631 tu_cs_emit_call(cs, &cmd->draw_epilogue_cs);
1632
1633 tu_cs_emit_regs(cs,
1634 A6XX_GRAS_LRZ_CNTL(0));
1635
1636 tu6_emit_lrz_flush(cmd, cs);
1637
1638 tu6_emit_event_write(cmd, cs, CACHE_FLUSH_TS, true);
1639
1640 tu_cs_sanity_check(cs);
1641 }
1642
1643 static void
1644 tu_cmd_render_tiles(struct tu_cmd_buffer *cmd)
1645 {
1646 const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
1647
1648 tu6_tile_render_begin(cmd, &cmd->cs);
1649
1650 for (uint32_t y = 0; y < tiling->tile_count.height; y++) {
1651 for (uint32_t x = 0; x < tiling->tile_count.width; x++) {
1652 struct tu_tile tile;
1653 tu_tiling_config_get_tile(tiling, cmd->device, x, y, &tile);
1654 tu6_render_tile(cmd, &cmd->cs, &tile);
1655 }
1656 }
1657
1658 tu6_tile_render_end(cmd, &cmd->cs);
1659 }
1660
1661 static void
1662 tu_cmd_render_sysmem(struct tu_cmd_buffer *cmd)
1663 {
1664 const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
1665
1666 tu6_sysmem_render_begin(cmd, &cmd->cs, &tiling->render_area);
1667
1668 tu_cs_emit_call(&cmd->cs, &cmd->draw_cs);
1669 cmd->wait_for_idle = true;
1670
1671 tu6_sysmem_render_end(cmd, &cmd->cs);
1672 }
1673
1674 static void
1675 tu_cmd_prepare_tile_store_ib(struct tu_cmd_buffer *cmd)
1676 {
1677 const uint32_t tile_store_space = 32 + 23 * cmd->state.pass->attachment_count;
1678 struct tu_cs sub_cs;
1679
1680 VkResult result =
1681 tu_cs_begin_sub_stream(&cmd->sub_cs, tile_store_space, &sub_cs);
1682 if (result != VK_SUCCESS) {
1683 cmd->record_result = result;
1684 return;
1685 }
1686
1687 /* emit to tile-store sub_cs */
1688 tu6_emit_tile_store(cmd, &sub_cs);
1689
1690 cmd->state.tile_store_ib = tu_cs_end_sub_stream(&cmd->sub_cs, &sub_cs);
1691 }
1692
1693 static void
1694 tu_cmd_update_tiling_config(struct tu_cmd_buffer *cmd,
1695 const VkRect2D *render_area)
1696 {
1697 const struct tu_device *dev = cmd->device;
1698 struct tu_tiling_config *tiling = &cmd->state.tiling_config;
1699
1700 tiling->render_area = *render_area;
1701 tiling->force_sysmem = force_sysmem(cmd, render_area);
1702
1703 tu_tiling_config_update_tile_layout(tiling, dev, cmd->state.pass->gmem_pixels);
1704 tu_tiling_config_update_pipe_layout(tiling, dev);
1705 tu_tiling_config_update_pipes(tiling, dev);
1706 }
1707
1708 const struct tu_dynamic_state default_dynamic_state = {
1709 .viewport =
1710 {
1711 .count = 0,
1712 },
1713 .scissor =
1714 {
1715 .count = 0,
1716 },
1717 .line_width = 1.0f,
1718 .depth_bias =
1719 {
1720 .bias = 0.0f,
1721 .clamp = 0.0f,
1722 .slope = 0.0f,
1723 },
1724 .blend_constants = { 0.0f, 0.0f, 0.0f, 0.0f },
1725 .depth_bounds =
1726 {
1727 .min = 0.0f,
1728 .max = 1.0f,
1729 },
1730 .stencil_compare_mask =
1731 {
1732 .front = ~0u,
1733 .back = ~0u,
1734 },
1735 .stencil_write_mask =
1736 {
1737 .front = ~0u,
1738 .back = ~0u,
1739 },
1740 .stencil_reference =
1741 {
1742 .front = 0u,
1743 .back = 0u,
1744 },
1745 };
1746
1747 static void UNUSED /* FINISHME */
1748 tu_bind_dynamic_state(struct tu_cmd_buffer *cmd_buffer,
1749 const struct tu_dynamic_state *src)
1750 {
1751 struct tu_dynamic_state *dest = &cmd_buffer->state.dynamic;
1752 uint32_t copy_mask = src->mask;
1753 uint32_t dest_mask = 0;
1754
1755 tu_use_args(cmd_buffer); /* FINISHME */
1756
1757 /* Make sure to copy the number of viewports/scissors because they can
1758 * only be specified at pipeline creation time.
1759 */
1760 dest->viewport.count = src->viewport.count;
1761 dest->scissor.count = src->scissor.count;
1762 dest->discard_rectangle.count = src->discard_rectangle.count;
1763
1764 if (copy_mask & TU_DYNAMIC_VIEWPORT) {
1765 if (memcmp(&dest->viewport.viewports, &src->viewport.viewports,
1766 src->viewport.count * sizeof(VkViewport))) {
1767 typed_memcpy(dest->viewport.viewports, src->viewport.viewports,
1768 src->viewport.count);
1769 dest_mask |= TU_DYNAMIC_VIEWPORT;
1770 }
1771 }
1772
1773 if (copy_mask & TU_DYNAMIC_SCISSOR) {
1774 if (memcmp(&dest->scissor.scissors, &src->scissor.scissors,
1775 src->scissor.count * sizeof(VkRect2D))) {
1776 typed_memcpy(dest->scissor.scissors, src->scissor.scissors,
1777 src->scissor.count);
1778 dest_mask |= TU_DYNAMIC_SCISSOR;
1779 }
1780 }
1781
1782 if (copy_mask & TU_DYNAMIC_LINE_WIDTH) {
1783 if (dest->line_width != src->line_width) {
1784 dest->line_width = src->line_width;
1785 dest_mask |= TU_DYNAMIC_LINE_WIDTH;
1786 }
1787 }
1788
1789 if (copy_mask & TU_DYNAMIC_DEPTH_BIAS) {
1790 if (memcmp(&dest->depth_bias, &src->depth_bias,
1791 sizeof(src->depth_bias))) {
1792 dest->depth_bias = src->depth_bias;
1793 dest_mask |= TU_DYNAMIC_DEPTH_BIAS;
1794 }
1795 }
1796
1797 if (copy_mask & TU_DYNAMIC_BLEND_CONSTANTS) {
1798 if (memcmp(&dest->blend_constants, &src->blend_constants,
1799 sizeof(src->blend_constants))) {
1800 typed_memcpy(dest->blend_constants, src->blend_constants, 4);
1801 dest_mask |= TU_DYNAMIC_BLEND_CONSTANTS;
1802 }
1803 }
1804
1805 if (copy_mask & TU_DYNAMIC_DEPTH_BOUNDS) {
1806 if (memcmp(&dest->depth_bounds, &src->depth_bounds,
1807 sizeof(src->depth_bounds))) {
1808 dest->depth_bounds = src->depth_bounds;
1809 dest_mask |= TU_DYNAMIC_DEPTH_BOUNDS;
1810 }
1811 }
1812
1813 if (copy_mask & TU_DYNAMIC_STENCIL_COMPARE_MASK) {
1814 if (memcmp(&dest->stencil_compare_mask, &src->stencil_compare_mask,
1815 sizeof(src->stencil_compare_mask))) {
1816 dest->stencil_compare_mask = src->stencil_compare_mask;
1817 dest_mask |= TU_DYNAMIC_STENCIL_COMPARE_MASK;
1818 }
1819 }
1820
1821 if (copy_mask & TU_DYNAMIC_STENCIL_WRITE_MASK) {
1822 if (memcmp(&dest->stencil_write_mask, &src->stencil_write_mask,
1823 sizeof(src->stencil_write_mask))) {
1824 dest->stencil_write_mask = src->stencil_write_mask;
1825 dest_mask |= TU_DYNAMIC_STENCIL_WRITE_MASK;
1826 }
1827 }
1828
1829 if (copy_mask & TU_DYNAMIC_STENCIL_REFERENCE) {
1830 if (memcmp(&dest->stencil_reference, &src->stencil_reference,
1831 sizeof(src->stencil_reference))) {
1832 dest->stencil_reference = src->stencil_reference;
1833 dest_mask |= TU_DYNAMIC_STENCIL_REFERENCE;
1834 }
1835 }
1836
1837 if (copy_mask & TU_DYNAMIC_DISCARD_RECTANGLE) {
1838 if (memcmp(&dest->discard_rectangle.rectangles,
1839 &src->discard_rectangle.rectangles,
1840 src->discard_rectangle.count * sizeof(VkRect2D))) {
1841 typed_memcpy(dest->discard_rectangle.rectangles,
1842 src->discard_rectangle.rectangles,
1843 src->discard_rectangle.count);
1844 dest_mask |= TU_DYNAMIC_DISCARD_RECTANGLE;
1845 }
1846 }
1847 }
1848
1849 static VkResult
1850 tu_create_cmd_buffer(struct tu_device *device,
1851 struct tu_cmd_pool *pool,
1852 VkCommandBufferLevel level,
1853 VkCommandBuffer *pCommandBuffer)
1854 {
1855 struct tu_cmd_buffer *cmd_buffer;
1856 cmd_buffer = vk_zalloc(&pool->alloc, sizeof(*cmd_buffer), 8,
1857 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1858 if (cmd_buffer == NULL)
1859 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1860
1861 cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
1862 cmd_buffer->device = device;
1863 cmd_buffer->pool = pool;
1864 cmd_buffer->level = level;
1865
1866 if (pool) {
1867 list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
1868 cmd_buffer->queue_family_index = pool->queue_family_index;
1869
1870 } else {
1871 /* Init the pool_link so we can safely call list_del when we destroy
1872 * the command buffer
1873 */
1874 list_inithead(&cmd_buffer->pool_link);
1875 cmd_buffer->queue_family_index = TU_QUEUE_GENERAL;
1876 }
1877
1878 tu_bo_list_init(&cmd_buffer->bo_list);
1879 tu_cs_init(&cmd_buffer->cs, device, TU_CS_MODE_GROW, 4096);
1880 tu_cs_init(&cmd_buffer->draw_cs, device, TU_CS_MODE_GROW, 4096);
1881 tu_cs_init(&cmd_buffer->draw_epilogue_cs, device, TU_CS_MODE_GROW, 4096);
1882 tu_cs_init(&cmd_buffer->sub_cs, device, TU_CS_MODE_SUB_STREAM, 2048);
1883
1884 *pCommandBuffer = tu_cmd_buffer_to_handle(cmd_buffer);
1885
1886 list_inithead(&cmd_buffer->upload.list);
1887
1888 VkResult result = tu_bo_init_new(device, &cmd_buffer->scratch_bo, 0x1000);
1889 if (result != VK_SUCCESS)
1890 goto fail_scratch_bo;
1891
1892 /* TODO: resize on overflow */
1893 cmd_buffer->vsc_data_pitch = device->vsc_data_pitch;
1894 cmd_buffer->vsc_data2_pitch = device->vsc_data2_pitch;
1895 cmd_buffer->vsc_data = device->vsc_data;
1896 cmd_buffer->vsc_data2 = device->vsc_data2;
1897
1898 return VK_SUCCESS;
1899
1900 fail_scratch_bo:
1901 list_del(&cmd_buffer->pool_link);
1902 return result;
1903 }
1904
1905 static void
1906 tu_cmd_buffer_destroy(struct tu_cmd_buffer *cmd_buffer)
1907 {
1908 tu_bo_finish(cmd_buffer->device, &cmd_buffer->scratch_bo);
1909
1910 list_del(&cmd_buffer->pool_link);
1911
1912 for (unsigned i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; i++)
1913 free(cmd_buffer->descriptors[i].push_set.set.mapped_ptr);
1914
1915 tu_cs_finish(&cmd_buffer->cs);
1916 tu_cs_finish(&cmd_buffer->draw_cs);
1917 tu_cs_finish(&cmd_buffer->draw_epilogue_cs);
1918 tu_cs_finish(&cmd_buffer->sub_cs);
1919
1920 tu_bo_list_destroy(&cmd_buffer->bo_list);
1921 vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
1922 }
1923
1924 static VkResult
1925 tu_reset_cmd_buffer(struct tu_cmd_buffer *cmd_buffer)
1926 {
1927 cmd_buffer->wait_for_idle = true;
1928
1929 cmd_buffer->record_result = VK_SUCCESS;
1930
1931 tu_bo_list_reset(&cmd_buffer->bo_list);
1932 tu_cs_reset(&cmd_buffer->cs);
1933 tu_cs_reset(&cmd_buffer->draw_cs);
1934 tu_cs_reset(&cmd_buffer->draw_epilogue_cs);
1935 tu_cs_reset(&cmd_buffer->sub_cs);
1936
1937 for (unsigned i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; i++) {
1938 cmd_buffer->descriptors[i].valid = 0;
1939 cmd_buffer->descriptors[i].push_dirty = false;
1940 }
1941
1942 cmd_buffer->status = TU_CMD_BUFFER_STATUS_INITIAL;
1943
1944 return cmd_buffer->record_result;
1945 }
1946
1947 VkResult
1948 tu_AllocateCommandBuffers(VkDevice _device,
1949 const VkCommandBufferAllocateInfo *pAllocateInfo,
1950 VkCommandBuffer *pCommandBuffers)
1951 {
1952 TU_FROM_HANDLE(tu_device, device, _device);
1953 TU_FROM_HANDLE(tu_cmd_pool, pool, pAllocateInfo->commandPool);
1954
1955 VkResult result = VK_SUCCESS;
1956 uint32_t i;
1957
1958 for (i = 0; i < pAllocateInfo->commandBufferCount; i++) {
1959
1960 if (!list_is_empty(&pool->free_cmd_buffers)) {
1961 struct tu_cmd_buffer *cmd_buffer = list_first_entry(
1962 &pool->free_cmd_buffers, struct tu_cmd_buffer, pool_link);
1963
1964 list_del(&cmd_buffer->pool_link);
1965 list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
1966
1967 result = tu_reset_cmd_buffer(cmd_buffer);
1968 cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
1969 cmd_buffer->level = pAllocateInfo->level;
1970
1971 pCommandBuffers[i] = tu_cmd_buffer_to_handle(cmd_buffer);
1972 } else {
1973 result = tu_create_cmd_buffer(device, pool, pAllocateInfo->level,
1974 &pCommandBuffers[i]);
1975 }
1976 if (result != VK_SUCCESS)
1977 break;
1978 }
1979
1980 if (result != VK_SUCCESS) {
1981 tu_FreeCommandBuffers(_device, pAllocateInfo->commandPool, i,
1982 pCommandBuffers);
1983
1984 /* From the Vulkan 1.0.66 spec:
1985 *
1986 * "vkAllocateCommandBuffers can be used to create multiple
1987 * command buffers. If the creation of any of those command
1988 * buffers fails, the implementation must destroy all
1989 * successfully created command buffer objects from this
1990 * command, set all entries of the pCommandBuffers array to
1991 * NULL and return the error."
1992 */
1993 memset(pCommandBuffers, 0,
1994 sizeof(*pCommandBuffers) * pAllocateInfo->commandBufferCount);
1995 }
1996
1997 return result;
1998 }
1999
2000 void
2001 tu_FreeCommandBuffers(VkDevice device,
2002 VkCommandPool commandPool,
2003 uint32_t commandBufferCount,
2004 const VkCommandBuffer *pCommandBuffers)
2005 {
2006 for (uint32_t i = 0; i < commandBufferCount; i++) {
2007 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, pCommandBuffers[i]);
2008
2009 if (cmd_buffer) {
2010 if (cmd_buffer->pool) {
2011 list_del(&cmd_buffer->pool_link);
2012 list_addtail(&cmd_buffer->pool_link,
2013 &cmd_buffer->pool->free_cmd_buffers);
2014 } else
2015 tu_cmd_buffer_destroy(cmd_buffer);
2016 }
2017 }
2018 }
2019
2020 VkResult
2021 tu_ResetCommandBuffer(VkCommandBuffer commandBuffer,
2022 VkCommandBufferResetFlags flags)
2023 {
2024 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
2025 return tu_reset_cmd_buffer(cmd_buffer);
2026 }
2027
2028 VkResult
2029 tu_BeginCommandBuffer(VkCommandBuffer commandBuffer,
2030 const VkCommandBufferBeginInfo *pBeginInfo)
2031 {
2032 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
2033 VkResult result = VK_SUCCESS;
2034
2035 if (cmd_buffer->status != TU_CMD_BUFFER_STATUS_INITIAL) {
2036 /* If the command buffer has already been resetted with
2037 * vkResetCommandBuffer, no need to do it again.
2038 */
2039 result = tu_reset_cmd_buffer(cmd_buffer);
2040 if (result != VK_SUCCESS)
2041 return result;
2042 }
2043
2044 memset(&cmd_buffer->state, 0, sizeof(cmd_buffer->state));
2045 cmd_buffer->usage_flags = pBeginInfo->flags;
2046
2047 tu_cs_begin(&cmd_buffer->cs);
2048 tu_cs_begin(&cmd_buffer->draw_cs);
2049 tu_cs_begin(&cmd_buffer->draw_epilogue_cs);
2050
2051 cmd_buffer->scratch_seqno = 0;
2052
2053 /* setup initial configuration into command buffer */
2054 if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
2055 switch (cmd_buffer->queue_family_index) {
2056 case TU_QUEUE_GENERAL:
2057 tu6_init_hw(cmd_buffer, &cmd_buffer->cs);
2058 break;
2059 default:
2060 break;
2061 }
2062 } else if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY &&
2063 (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
2064 assert(pBeginInfo->pInheritanceInfo);
2065 cmd_buffer->state.pass = tu_render_pass_from_handle(pBeginInfo->pInheritanceInfo->renderPass);
2066 cmd_buffer->state.subpass = &cmd_buffer->state.pass->subpasses[pBeginInfo->pInheritanceInfo->subpass];
2067 }
2068
2069 cmd_buffer->status = TU_CMD_BUFFER_STATUS_RECORDING;
2070
2071 return VK_SUCCESS;
2072 }
2073
2074 void
2075 tu_CmdBindVertexBuffers(VkCommandBuffer commandBuffer,
2076 uint32_t firstBinding,
2077 uint32_t bindingCount,
2078 const VkBuffer *pBuffers,
2079 const VkDeviceSize *pOffsets)
2080 {
2081 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2082
2083 assert(firstBinding + bindingCount <= MAX_VBS);
2084
2085 for (uint32_t i = 0; i < bindingCount; i++) {
2086 cmd->state.vb.buffers[firstBinding + i] =
2087 tu_buffer_from_handle(pBuffers[i]);
2088 cmd->state.vb.offsets[firstBinding + i] = pOffsets[i];
2089 }
2090
2091 /* VB states depend on VkPipelineVertexInputStateCreateInfo */
2092 cmd->state.dirty |= TU_CMD_DIRTY_VERTEX_BUFFERS;
2093 }
2094
2095 void
2096 tu_CmdBindIndexBuffer(VkCommandBuffer commandBuffer,
2097 VkBuffer buffer,
2098 VkDeviceSize offset,
2099 VkIndexType indexType)
2100 {
2101 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2102 TU_FROM_HANDLE(tu_buffer, buf, buffer);
2103
2104 /* initialize/update the restart index */
2105 if (!cmd->state.index_buffer || cmd->state.index_type != indexType) {
2106 struct tu_cs *draw_cs = &cmd->draw_cs;
2107
2108 tu6_emit_restart_index(
2109 draw_cs, indexType == VK_INDEX_TYPE_UINT32 ? 0xffffffff : 0xffff);
2110
2111 tu_cs_sanity_check(draw_cs);
2112 }
2113
2114 /* track the BO */
2115 if (cmd->state.index_buffer != buf)
2116 tu_bo_list_add(&cmd->bo_list, buf->bo, MSM_SUBMIT_BO_READ);
2117
2118 cmd->state.index_buffer = buf;
2119 cmd->state.index_offset = offset;
2120 cmd->state.index_type = indexType;
2121 }
2122
2123 void
2124 tu_CmdBindDescriptorSets(VkCommandBuffer commandBuffer,
2125 VkPipelineBindPoint pipelineBindPoint,
2126 VkPipelineLayout _layout,
2127 uint32_t firstSet,
2128 uint32_t descriptorSetCount,
2129 const VkDescriptorSet *pDescriptorSets,
2130 uint32_t dynamicOffsetCount,
2131 const uint32_t *pDynamicOffsets)
2132 {
2133 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
2134 TU_FROM_HANDLE(tu_pipeline_layout, layout, _layout);
2135 unsigned dyn_idx = 0;
2136
2137 struct tu_descriptor_state *descriptors_state =
2138 tu_get_descriptors_state(cmd_buffer, pipelineBindPoint);
2139
2140 for (unsigned i = 0; i < descriptorSetCount; ++i) {
2141 unsigned idx = i + firstSet;
2142 TU_FROM_HANDLE(tu_descriptor_set, set, pDescriptorSets[i]);
2143
2144 descriptors_state->sets[idx] = set;
2145 descriptors_state->valid |= (1u << idx);
2146
2147 for(unsigned j = 0; j < set->layout->dynamic_offset_count; ++j, ++dyn_idx) {
2148 unsigned idx = j + layout->set[i + firstSet].dynamic_offset_start;
2149 assert(dyn_idx < dynamicOffsetCount);
2150
2151 descriptors_state->dynamic_buffers[idx] =
2152 set->dynamic_descriptors[j].va + pDynamicOffsets[dyn_idx];
2153 }
2154 }
2155
2156 cmd_buffer->state.dirty |= TU_CMD_DIRTY_DESCRIPTOR_SETS;
2157 }
2158
2159 void tu_CmdBindTransformFeedbackBuffersEXT(VkCommandBuffer commandBuffer,
2160 uint32_t firstBinding,
2161 uint32_t bindingCount,
2162 const VkBuffer *pBuffers,
2163 const VkDeviceSize *pOffsets,
2164 const VkDeviceSize *pSizes)
2165 {
2166 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2167 assert(firstBinding + bindingCount <= IR3_MAX_SO_BUFFERS);
2168
2169 for (uint32_t i = 0; i < bindingCount; i++) {
2170 uint32_t idx = firstBinding + i;
2171 TU_FROM_HANDLE(tu_buffer, buf, pBuffers[i]);
2172
2173 if (pOffsets[i] != 0)
2174 cmd->state.streamout_reset |= 1 << idx;
2175
2176 cmd->state.streamout_buf.buffers[idx] = buf;
2177 cmd->state.streamout_buf.offsets[idx] = pOffsets[i];
2178 cmd->state.streamout_buf.sizes[idx] = pSizes[i];
2179
2180 cmd->state.streamout_enabled |= 1 << idx;
2181 }
2182
2183 cmd->state.dirty |= TU_CMD_DIRTY_STREAMOUT_BUFFERS;
2184 }
2185
2186 void tu_CmdBeginTransformFeedbackEXT(VkCommandBuffer commandBuffer,
2187 uint32_t firstCounterBuffer,
2188 uint32_t counterBufferCount,
2189 const VkBuffer *pCounterBuffers,
2190 const VkDeviceSize *pCounterBufferOffsets)
2191 {
2192 assert(firstCounterBuffer + counterBufferCount <= IR3_MAX_SO_BUFFERS);
2193 /* TODO do something with counter buffer? */
2194 }
2195
2196 void tu_CmdEndTransformFeedbackEXT(VkCommandBuffer commandBuffer,
2197 uint32_t firstCounterBuffer,
2198 uint32_t counterBufferCount,
2199 const VkBuffer *pCounterBuffers,
2200 const VkDeviceSize *pCounterBufferOffsets)
2201 {
2202 assert(firstCounterBuffer + counterBufferCount <= IR3_MAX_SO_BUFFERS);
2203 /* TODO do something with counter buffer? */
2204
2205 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2206 cmd->state.streamout_enabled = 0;
2207 }
2208
2209 void
2210 tu_CmdPushConstants(VkCommandBuffer commandBuffer,
2211 VkPipelineLayout layout,
2212 VkShaderStageFlags stageFlags,
2213 uint32_t offset,
2214 uint32_t size,
2215 const void *pValues)
2216 {
2217 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2218 memcpy((void*) cmd->push_constants + offset, pValues, size);
2219 cmd->state.dirty |= TU_CMD_DIRTY_PUSH_CONSTANTS;
2220 }
2221
2222 VkResult
2223 tu_EndCommandBuffer(VkCommandBuffer commandBuffer)
2224 {
2225 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
2226
2227 if (cmd_buffer->scratch_seqno) {
2228 tu_bo_list_add(&cmd_buffer->bo_list, &cmd_buffer->scratch_bo,
2229 MSM_SUBMIT_BO_WRITE);
2230 }
2231
2232 if (cmd_buffer->use_vsc_data) {
2233 tu_bo_list_add(&cmd_buffer->bo_list, &cmd_buffer->vsc_data,
2234 MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE);
2235 tu_bo_list_add(&cmd_buffer->bo_list, &cmd_buffer->vsc_data2,
2236 MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE);
2237 }
2238
2239 for (uint32_t i = 0; i < cmd_buffer->draw_cs.bo_count; i++) {
2240 tu_bo_list_add(&cmd_buffer->bo_list, cmd_buffer->draw_cs.bos[i],
2241 MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
2242 }
2243
2244 for (uint32_t i = 0; i < cmd_buffer->draw_epilogue_cs.bo_count; i++) {
2245 tu_bo_list_add(&cmd_buffer->bo_list, cmd_buffer->draw_epilogue_cs.bos[i],
2246 MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
2247 }
2248
2249 for (uint32_t i = 0; i < cmd_buffer->sub_cs.bo_count; i++) {
2250 tu_bo_list_add(&cmd_buffer->bo_list, cmd_buffer->sub_cs.bos[i],
2251 MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
2252 }
2253
2254 tu_cs_end(&cmd_buffer->cs);
2255 tu_cs_end(&cmd_buffer->draw_cs);
2256 tu_cs_end(&cmd_buffer->draw_epilogue_cs);
2257
2258 cmd_buffer->status = TU_CMD_BUFFER_STATUS_EXECUTABLE;
2259
2260 return cmd_buffer->record_result;
2261 }
2262
2263 void
2264 tu_CmdBindPipeline(VkCommandBuffer commandBuffer,
2265 VkPipelineBindPoint pipelineBindPoint,
2266 VkPipeline _pipeline)
2267 {
2268 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2269 TU_FROM_HANDLE(tu_pipeline, pipeline, _pipeline);
2270
2271 switch (pipelineBindPoint) {
2272 case VK_PIPELINE_BIND_POINT_GRAPHICS:
2273 cmd->state.pipeline = pipeline;
2274 cmd->state.dirty |= TU_CMD_DIRTY_PIPELINE;
2275 break;
2276 case VK_PIPELINE_BIND_POINT_COMPUTE:
2277 cmd->state.compute_pipeline = pipeline;
2278 cmd->state.dirty |= TU_CMD_DIRTY_COMPUTE_PIPELINE;
2279 break;
2280 default:
2281 unreachable("unrecognized pipeline bind point");
2282 break;
2283 }
2284
2285 tu_bo_list_add(&cmd->bo_list, &pipeline->program.binary_bo,
2286 MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
2287 for (uint32_t i = 0; i < pipeline->cs.bo_count; i++) {
2288 tu_bo_list_add(&cmd->bo_list, pipeline->cs.bos[i],
2289 MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
2290 }
2291 }
2292
2293 void
2294 tu_CmdSetViewport(VkCommandBuffer commandBuffer,
2295 uint32_t firstViewport,
2296 uint32_t viewportCount,
2297 const VkViewport *pViewports)
2298 {
2299 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2300 struct tu_cs *draw_cs = &cmd->draw_cs;
2301
2302 assert(firstViewport == 0 && viewportCount == 1);
2303 tu6_emit_viewport(draw_cs, pViewports);
2304
2305 tu_cs_sanity_check(draw_cs);
2306 }
2307
2308 void
2309 tu_CmdSetScissor(VkCommandBuffer commandBuffer,
2310 uint32_t firstScissor,
2311 uint32_t scissorCount,
2312 const VkRect2D *pScissors)
2313 {
2314 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2315 struct tu_cs *draw_cs = &cmd->draw_cs;
2316
2317 assert(firstScissor == 0 && scissorCount == 1);
2318 tu6_emit_scissor(draw_cs, pScissors);
2319
2320 tu_cs_sanity_check(draw_cs);
2321 }
2322
2323 void
2324 tu_CmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth)
2325 {
2326 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2327
2328 cmd->state.dynamic.line_width = lineWidth;
2329
2330 /* line width depends on VkPipelineRasterizationStateCreateInfo */
2331 cmd->state.dirty |= TU_CMD_DIRTY_DYNAMIC_LINE_WIDTH;
2332 }
2333
2334 void
2335 tu_CmdSetDepthBias(VkCommandBuffer commandBuffer,
2336 float depthBiasConstantFactor,
2337 float depthBiasClamp,
2338 float depthBiasSlopeFactor)
2339 {
2340 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2341 struct tu_cs *draw_cs = &cmd->draw_cs;
2342
2343 tu6_emit_depth_bias(draw_cs, depthBiasConstantFactor, depthBiasClamp,
2344 depthBiasSlopeFactor);
2345
2346 tu_cs_sanity_check(draw_cs);
2347 }
2348
2349 void
2350 tu_CmdSetBlendConstants(VkCommandBuffer commandBuffer,
2351 const float blendConstants[4])
2352 {
2353 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2354 struct tu_cs *draw_cs = &cmd->draw_cs;
2355
2356 tu6_emit_blend_constants(draw_cs, blendConstants);
2357
2358 tu_cs_sanity_check(draw_cs);
2359 }
2360
2361 void
2362 tu_CmdSetDepthBounds(VkCommandBuffer commandBuffer,
2363 float minDepthBounds,
2364 float maxDepthBounds)
2365 {
2366 }
2367
2368 void
2369 tu_CmdSetStencilCompareMask(VkCommandBuffer commandBuffer,
2370 VkStencilFaceFlags faceMask,
2371 uint32_t compareMask)
2372 {
2373 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2374
2375 if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
2376 cmd->state.dynamic.stencil_compare_mask.front = compareMask;
2377 if (faceMask & VK_STENCIL_FACE_BACK_BIT)
2378 cmd->state.dynamic.stencil_compare_mask.back = compareMask;
2379
2380 /* the front/back compare masks must be updated together */
2381 cmd->state.dirty |= TU_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK;
2382 }
2383
2384 void
2385 tu_CmdSetStencilWriteMask(VkCommandBuffer commandBuffer,
2386 VkStencilFaceFlags faceMask,
2387 uint32_t writeMask)
2388 {
2389 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2390
2391 if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
2392 cmd->state.dynamic.stencil_write_mask.front = writeMask;
2393 if (faceMask & VK_STENCIL_FACE_BACK_BIT)
2394 cmd->state.dynamic.stencil_write_mask.back = writeMask;
2395
2396 /* the front/back write masks must be updated together */
2397 cmd->state.dirty |= TU_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK;
2398 }
2399
2400 void
2401 tu_CmdSetStencilReference(VkCommandBuffer commandBuffer,
2402 VkStencilFaceFlags faceMask,
2403 uint32_t reference)
2404 {
2405 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2406
2407 if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
2408 cmd->state.dynamic.stencil_reference.front = reference;
2409 if (faceMask & VK_STENCIL_FACE_BACK_BIT)
2410 cmd->state.dynamic.stencil_reference.back = reference;
2411
2412 /* the front/back references must be updated together */
2413 cmd->state.dirty |= TU_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE;
2414 }
2415
2416 void
2417 tu_CmdExecuteCommands(VkCommandBuffer commandBuffer,
2418 uint32_t commandBufferCount,
2419 const VkCommandBuffer *pCmdBuffers)
2420 {
2421 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2422 VkResult result;
2423
2424 assert(commandBufferCount > 0);
2425
2426 for (uint32_t i = 0; i < commandBufferCount; i++) {
2427 TU_FROM_HANDLE(tu_cmd_buffer, secondary, pCmdBuffers[i]);
2428
2429 result = tu_bo_list_merge(&cmd->bo_list, &secondary->bo_list);
2430 if (result != VK_SUCCESS) {
2431 cmd->record_result = result;
2432 break;
2433 }
2434
2435 if (secondary->usage_flags &
2436 VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
2437 assert(tu_cs_is_empty(&secondary->cs));
2438
2439 result = tu_cs_add_entries(&cmd->draw_cs, &secondary->draw_cs);
2440 if (result != VK_SUCCESS) {
2441 cmd->record_result = result;
2442 break;
2443 }
2444
2445 result = tu_cs_add_entries(&cmd->draw_epilogue_cs,
2446 &secondary->draw_epilogue_cs);
2447 if (result != VK_SUCCESS) {
2448 cmd->record_result = result;
2449 break;
2450 }
2451 } else {
2452 assert(tu_cs_is_empty(&secondary->draw_cs));
2453 assert(tu_cs_is_empty(&secondary->draw_epilogue_cs));
2454
2455 for (uint32_t j = 0; j < secondary->cs.bo_count; j++) {
2456 tu_bo_list_add(&cmd->bo_list, secondary->cs.bos[j],
2457 MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
2458 }
2459
2460 tu_cs_emit_call(&cmd->cs, &secondary->cs);
2461 }
2462 }
2463 cmd->state.dirty = ~0u; /* TODO: set dirty only what needs to be */
2464 }
2465
2466 VkResult
2467 tu_CreateCommandPool(VkDevice _device,
2468 const VkCommandPoolCreateInfo *pCreateInfo,
2469 const VkAllocationCallbacks *pAllocator,
2470 VkCommandPool *pCmdPool)
2471 {
2472 TU_FROM_HANDLE(tu_device, device, _device);
2473 struct tu_cmd_pool *pool;
2474
2475 pool = vk_alloc2(&device->alloc, pAllocator, sizeof(*pool), 8,
2476 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2477 if (pool == NULL)
2478 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
2479
2480 if (pAllocator)
2481 pool->alloc = *pAllocator;
2482 else
2483 pool->alloc = device->alloc;
2484
2485 list_inithead(&pool->cmd_buffers);
2486 list_inithead(&pool->free_cmd_buffers);
2487
2488 pool->queue_family_index = pCreateInfo->queueFamilyIndex;
2489
2490 *pCmdPool = tu_cmd_pool_to_handle(pool);
2491
2492 return VK_SUCCESS;
2493 }
2494
2495 void
2496 tu_DestroyCommandPool(VkDevice _device,
2497 VkCommandPool commandPool,
2498 const VkAllocationCallbacks *pAllocator)
2499 {
2500 TU_FROM_HANDLE(tu_device, device, _device);
2501 TU_FROM_HANDLE(tu_cmd_pool, pool, commandPool);
2502
2503 if (!pool)
2504 return;
2505
2506 list_for_each_entry_safe(struct tu_cmd_buffer, cmd_buffer,
2507 &pool->cmd_buffers, pool_link)
2508 {
2509 tu_cmd_buffer_destroy(cmd_buffer);
2510 }
2511
2512 list_for_each_entry_safe(struct tu_cmd_buffer, cmd_buffer,
2513 &pool->free_cmd_buffers, pool_link)
2514 {
2515 tu_cmd_buffer_destroy(cmd_buffer);
2516 }
2517
2518 vk_free2(&device->alloc, pAllocator, pool);
2519 }
2520
2521 VkResult
2522 tu_ResetCommandPool(VkDevice device,
2523 VkCommandPool commandPool,
2524 VkCommandPoolResetFlags flags)
2525 {
2526 TU_FROM_HANDLE(tu_cmd_pool, pool, commandPool);
2527 VkResult result;
2528
2529 list_for_each_entry(struct tu_cmd_buffer, cmd_buffer, &pool->cmd_buffers,
2530 pool_link)
2531 {
2532 result = tu_reset_cmd_buffer(cmd_buffer);
2533 if (result != VK_SUCCESS)
2534 return result;
2535 }
2536
2537 return VK_SUCCESS;
2538 }
2539
2540 void
2541 tu_TrimCommandPool(VkDevice device,
2542 VkCommandPool commandPool,
2543 VkCommandPoolTrimFlags flags)
2544 {
2545 TU_FROM_HANDLE(tu_cmd_pool, pool, commandPool);
2546
2547 if (!pool)
2548 return;
2549
2550 list_for_each_entry_safe(struct tu_cmd_buffer, cmd_buffer,
2551 &pool->free_cmd_buffers, pool_link)
2552 {
2553 tu_cmd_buffer_destroy(cmd_buffer);
2554 }
2555 }
2556
2557 void
2558 tu_CmdBeginRenderPass(VkCommandBuffer commandBuffer,
2559 const VkRenderPassBeginInfo *pRenderPassBegin,
2560 VkSubpassContents contents)
2561 {
2562 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2563 TU_FROM_HANDLE(tu_render_pass, pass, pRenderPassBegin->renderPass);
2564 TU_FROM_HANDLE(tu_framebuffer, fb, pRenderPassBegin->framebuffer);
2565
2566 cmd->state.pass = pass;
2567 cmd->state.subpass = pass->subpasses;
2568 cmd->state.framebuffer = fb;
2569
2570 tu_cmd_update_tiling_config(cmd, &pRenderPassBegin->renderArea);
2571 tu_cmd_prepare_tile_store_ib(cmd);
2572
2573 tu_emit_load_clear(cmd, pRenderPassBegin);
2574
2575 tu6_emit_zs(cmd, cmd->state.subpass, &cmd->draw_cs);
2576 tu6_emit_mrt(cmd, cmd->state.subpass, &cmd->draw_cs);
2577 tu6_emit_msaa(cmd, cmd->state.subpass, &cmd->draw_cs);
2578 tu6_emit_render_cntl(cmd, cmd->state.subpass, &cmd->draw_cs, false);
2579
2580 /* note: use_hw_binning only checks tiling config */
2581 if (use_hw_binning(cmd))
2582 cmd->use_vsc_data = true;
2583
2584 for (uint32_t i = 0; i < fb->attachment_count; ++i) {
2585 const struct tu_image_view *iview = fb->attachments[i].attachment;
2586 tu_bo_list_add(&cmd->bo_list, iview->image->bo,
2587 MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE);
2588 }
2589 }
2590
2591 void
2592 tu_CmdBeginRenderPass2(VkCommandBuffer commandBuffer,
2593 const VkRenderPassBeginInfo *pRenderPassBeginInfo,
2594 const VkSubpassBeginInfoKHR *pSubpassBeginInfo)
2595 {
2596 tu_CmdBeginRenderPass(commandBuffer, pRenderPassBeginInfo,
2597 pSubpassBeginInfo->contents);
2598 }
2599
2600 void
2601 tu_CmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents)
2602 {
2603 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2604 const struct tu_render_pass *pass = cmd->state.pass;
2605 struct tu_cs *cs = &cmd->draw_cs;
2606
2607 const struct tu_subpass *subpass = cmd->state.subpass++;
2608 /* TODO:
2609 * if msaa samples change between subpasses,
2610 * attachment store is broken for some attachments
2611 */
2612 if (subpass->resolve_attachments) {
2613 tu6_emit_blit_scissor(cmd, cs, true);
2614 for (unsigned i = 0; i < subpass->color_count; i++) {
2615 uint32_t a = subpass->resolve_attachments[i].attachment;
2616 if (a != VK_ATTACHMENT_UNUSED) {
2617 tu6_emit_resolve(cmd, cs, a,
2618 subpass->color_attachments[i].attachment);
2619 }
2620 }
2621 }
2622
2623 /* invalidate because reading input attachments will cache GMEM and
2624 * the cache isn''t updated when GMEM is written
2625 * TODO: is there a no-cache bit for textures?
2626 */
2627 if (cmd->state.subpass->input_count)
2628 tu6_emit_event_write(cmd, cs, CACHE_INVALIDATE, false);
2629
2630 /* emit mrt/zs/msaa/ubwc state for the subpass that is starting */
2631 tu6_emit_zs(cmd, cmd->state.subpass, cs);
2632 tu6_emit_mrt(cmd, cmd->state.subpass, cs);
2633 tu6_emit_msaa(cmd, cmd->state.subpass, cs);
2634 tu6_emit_render_cntl(cmd, cmd->state.subpass, cs, false);
2635
2636 /* Emit flushes so that input attachments will read the correct value. This
2637 * is for sysmem only, although it shouldn't do much harm on gmem.
2638 */
2639 tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_COLOR_TS, true);
2640 tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_DEPTH_TS, true);
2641
2642 /* TODO:
2643 * since we don't know how to do GMEM->GMEM resolve,
2644 * resolve attachments are resolved to memory then loaded to GMEM again if needed
2645 */
2646 if (subpass->resolve_attachments) {
2647 for (unsigned i = 0; i < subpass->color_count; i++) {
2648 uint32_t a = subpass->resolve_attachments[i].attachment;
2649 if (a != VK_ATTACHMENT_UNUSED && pass->attachments[a].gmem_offset >= 0) {
2650 tu_finishme("missing GMEM->GMEM resolve, performance will suffer\n");
2651 tu6_emit_predicated_blit(cmd, cs, a, a, false);
2652 }
2653 }
2654 }
2655 }
2656
2657 void
2658 tu_CmdNextSubpass2(VkCommandBuffer commandBuffer,
2659 const VkSubpassBeginInfoKHR *pSubpassBeginInfo,
2660 const VkSubpassEndInfoKHR *pSubpassEndInfo)
2661 {
2662 tu_CmdNextSubpass(commandBuffer, pSubpassBeginInfo->contents);
2663 }
2664
2665 struct tu_draw_info
2666 {
2667 /**
2668 * Number of vertices.
2669 */
2670 uint32_t count;
2671
2672 /**
2673 * Index of the first vertex.
2674 */
2675 int32_t vertex_offset;
2676
2677 /**
2678 * First instance id.
2679 */
2680 uint32_t first_instance;
2681
2682 /**
2683 * Number of instances.
2684 */
2685 uint32_t instance_count;
2686
2687 /**
2688 * First index (indexed draws only).
2689 */
2690 uint32_t first_index;
2691
2692 /**
2693 * Whether it's an indexed draw.
2694 */
2695 bool indexed;
2696
2697 /**
2698 * Indirect draw parameters resource.
2699 */
2700 struct tu_buffer *indirect;
2701 uint64_t indirect_offset;
2702 uint32_t stride;
2703
2704 /**
2705 * Draw count parameters resource.
2706 */
2707 struct tu_buffer *count_buffer;
2708 uint64_t count_buffer_offset;
2709
2710 /**
2711 * Stream output parameters resource.
2712 */
2713 struct tu_buffer *streamout_buffer;
2714 uint64_t streamout_buffer_offset;
2715 };
2716
2717 #define ENABLE_ALL (CP_SET_DRAW_STATE__0_BINNING | CP_SET_DRAW_STATE__0_GMEM | CP_SET_DRAW_STATE__0_SYSMEM)
2718 #define ENABLE_DRAW (CP_SET_DRAW_STATE__0_GMEM | CP_SET_DRAW_STATE__0_SYSMEM)
2719
2720 enum tu_draw_state_group_id
2721 {
2722 TU_DRAW_STATE_PROGRAM,
2723 TU_DRAW_STATE_PROGRAM_BINNING,
2724 TU_DRAW_STATE_VI,
2725 TU_DRAW_STATE_VI_BINNING,
2726 TU_DRAW_STATE_VP,
2727 TU_DRAW_STATE_RAST,
2728 TU_DRAW_STATE_DS,
2729 TU_DRAW_STATE_BLEND,
2730 TU_DRAW_STATE_VS_CONST,
2731 TU_DRAW_STATE_FS_CONST,
2732 TU_DRAW_STATE_VS_TEX,
2733 TU_DRAW_STATE_FS_TEX_SYSMEM,
2734 TU_DRAW_STATE_FS_TEX_GMEM,
2735 TU_DRAW_STATE_FS_IBO,
2736 TU_DRAW_STATE_VS_PARAMS,
2737
2738 TU_DRAW_STATE_COUNT,
2739 };
2740
2741 struct tu_draw_state_group
2742 {
2743 enum tu_draw_state_group_id id;
2744 uint32_t enable_mask;
2745 struct tu_cs_entry ib;
2746 };
2747
2748 const static struct tu_sampler*
2749 sampler_ptr(struct tu_descriptor_state *descriptors_state,
2750 const struct tu_descriptor_map *map, unsigned i,
2751 unsigned array_index)
2752 {
2753 assert(descriptors_state->valid & (1 << map->set[i]));
2754
2755 struct tu_descriptor_set *set = descriptors_state->sets[map->set[i]];
2756 assert(map->binding[i] < set->layout->binding_count);
2757
2758 const struct tu_descriptor_set_binding_layout *layout =
2759 &set->layout->binding[map->binding[i]];
2760
2761 if (layout->immutable_samplers_offset) {
2762 const struct tu_sampler *immutable_samplers =
2763 tu_immutable_samplers(set->layout, layout);
2764
2765 return &immutable_samplers[array_index];
2766 }
2767
2768 switch (layout->type) {
2769 case VK_DESCRIPTOR_TYPE_SAMPLER:
2770 return (struct tu_sampler*) &set->mapped_ptr[layout->offset / 4];
2771 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
2772 return (struct tu_sampler*) &set->mapped_ptr[layout->offset / 4 + A6XX_TEX_CONST_DWORDS +
2773 array_index *
2774 (A6XX_TEX_CONST_DWORDS +
2775 sizeof(struct tu_sampler) / 4)];
2776 default:
2777 unreachable("unimplemented descriptor type");
2778 break;
2779 }
2780 }
2781
2782 static void
2783 write_tex_const(struct tu_cmd_buffer *cmd,
2784 uint32_t *dst,
2785 struct tu_descriptor_state *descriptors_state,
2786 const struct tu_descriptor_map *map,
2787 unsigned i, unsigned array_index, bool is_sysmem)
2788 {
2789 assert(descriptors_state->valid & (1 << map->set[i]));
2790
2791 struct tu_descriptor_set *set = descriptors_state->sets[map->set[i]];
2792 assert(map->binding[i] < set->layout->binding_count);
2793
2794 const struct tu_descriptor_set_binding_layout *layout =
2795 &set->layout->binding[map->binding[i]];
2796
2797 switch (layout->type) {
2798 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
2799 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
2800 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
2801 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
2802 memcpy(dst, &set->mapped_ptr[layout->offset / 4 +
2803 array_index * A6XX_TEX_CONST_DWORDS],
2804 A6XX_TEX_CONST_DWORDS * 4);
2805 break;
2806 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
2807 memcpy(dst, &set->mapped_ptr[layout->offset / 4 +
2808 array_index *
2809 (A6XX_TEX_CONST_DWORDS +
2810 sizeof(struct tu_sampler) / 4)],
2811 A6XX_TEX_CONST_DWORDS * 4);
2812 break;
2813 default:
2814 unreachable("unimplemented descriptor type");
2815 break;
2816 }
2817
2818 if (layout->type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT && !is_sysmem) {
2819 const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
2820 uint32_t a = cmd->state.subpass->input_attachments[map->value[i] +
2821 array_index].attachment;
2822 const struct tu_render_pass_attachment *att = &cmd->state.pass->attachments[a];
2823
2824 assert(att->gmem_offset >= 0);
2825
2826 dst[0] &= ~(A6XX_TEX_CONST_0_SWAP__MASK | A6XX_TEX_CONST_0_TILE_MODE__MASK);
2827 dst[0] |= A6XX_TEX_CONST_0_TILE_MODE(TILE6_2);
2828 dst[2] &= ~(A6XX_TEX_CONST_2_TYPE__MASK | A6XX_TEX_CONST_2_PITCH__MASK);
2829 dst[2] |=
2830 A6XX_TEX_CONST_2_TYPE(A6XX_TEX_2D) |
2831 A6XX_TEX_CONST_2_PITCH(tiling->tile0.extent.width * att->cpp);
2832 dst[3] = 0;
2833 dst[4] = cmd->device->physical_device->gmem_base + att->gmem_offset;
2834 dst[5] = A6XX_TEX_CONST_5_DEPTH(1);
2835 for (unsigned i = 6; i < A6XX_TEX_CONST_DWORDS; i++)
2836 dst[i] = 0;
2837
2838 if (cmd->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY)
2839 tu_finishme("patch input attachment pitch for secondary cmd buffer");
2840 }
2841 }
2842
2843 static void
2844 write_image_ibo(struct tu_cmd_buffer *cmd,
2845 uint32_t *dst,
2846 struct tu_descriptor_state *descriptors_state,
2847 const struct tu_descriptor_map *map,
2848 unsigned i, unsigned array_index)
2849 {
2850 assert(descriptors_state->valid & (1 << map->set[i]));
2851
2852 struct tu_descriptor_set *set = descriptors_state->sets[map->set[i]];
2853 assert(map->binding[i] < set->layout->binding_count);
2854
2855 const struct tu_descriptor_set_binding_layout *layout =
2856 &set->layout->binding[map->binding[i]];
2857
2858 assert(layout->type == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE);
2859
2860 memcpy(dst, &set->mapped_ptr[layout->offset / 4 +
2861 (array_index * 2 + 1) * A6XX_TEX_CONST_DWORDS],
2862 A6XX_TEX_CONST_DWORDS * 4);
2863 }
2864
2865 static uint64_t
2866 buffer_ptr(struct tu_descriptor_state *descriptors_state,
2867 const struct tu_descriptor_map *map,
2868 unsigned i, unsigned array_index)
2869 {
2870 assert(descriptors_state->valid & (1 << map->set[i]));
2871
2872 struct tu_descriptor_set *set = descriptors_state->sets[map->set[i]];
2873 assert(map->binding[i] < set->layout->binding_count);
2874
2875 const struct tu_descriptor_set_binding_layout *layout =
2876 &set->layout->binding[map->binding[i]];
2877
2878 switch (layout->type) {
2879 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
2880 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
2881 return descriptors_state->dynamic_buffers[layout->dynamic_offset_offset +
2882 array_index];
2883 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
2884 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
2885 return (uint64_t) set->mapped_ptr[layout->offset / 4 + array_index * 2 + 1] << 32 |
2886 set->mapped_ptr[layout->offset / 4 + array_index * 2];
2887 default:
2888 unreachable("unimplemented descriptor type");
2889 break;
2890 }
2891 }
2892
2893 static inline uint32_t
2894 tu6_stage2opcode(gl_shader_stage type)
2895 {
2896 switch (type) {
2897 case MESA_SHADER_VERTEX:
2898 case MESA_SHADER_TESS_CTRL:
2899 case MESA_SHADER_TESS_EVAL:
2900 case MESA_SHADER_GEOMETRY:
2901 return CP_LOAD_STATE6_GEOM;
2902 case MESA_SHADER_FRAGMENT:
2903 case MESA_SHADER_COMPUTE:
2904 case MESA_SHADER_KERNEL:
2905 return CP_LOAD_STATE6_FRAG;
2906 default:
2907 unreachable("bad shader type");
2908 }
2909 }
2910
2911 static inline enum a6xx_state_block
2912 tu6_stage2shadersb(gl_shader_stage type)
2913 {
2914 switch (type) {
2915 case MESA_SHADER_VERTEX:
2916 return SB6_VS_SHADER;
2917 case MESA_SHADER_FRAGMENT:
2918 return SB6_FS_SHADER;
2919 case MESA_SHADER_COMPUTE:
2920 case MESA_SHADER_KERNEL:
2921 return SB6_CS_SHADER;
2922 default:
2923 unreachable("bad shader type");
2924 return ~0;
2925 }
2926 }
2927
2928 static void
2929 tu6_emit_user_consts(struct tu_cs *cs, const struct tu_pipeline *pipeline,
2930 struct tu_descriptor_state *descriptors_state,
2931 gl_shader_stage type,
2932 uint32_t *push_constants)
2933 {
2934 const struct tu_program_descriptor_linkage *link =
2935 &pipeline->program.link[type];
2936 const struct ir3_ubo_analysis_state *state = &link->ubo_state;
2937
2938 for (uint32_t i = 0; i < ARRAY_SIZE(state->range); i++) {
2939 if (state->range[i].start < state->range[i].end) {
2940 uint32_t size = state->range[i].end - state->range[i].start;
2941 uint32_t offset = state->range[i].start;
2942
2943 /* and even if the start of the const buffer is before
2944 * first_immediate, the end may not be:
2945 */
2946 size = MIN2(size, (16 * link->constlen) - state->range[i].offset);
2947
2948 if (size == 0)
2949 continue;
2950
2951 /* things should be aligned to vec4: */
2952 debug_assert((state->range[i].offset % 16) == 0);
2953 debug_assert((size % 16) == 0);
2954 debug_assert((offset % 16) == 0);
2955
2956 if (i == 0) {
2957 /* push constants */
2958 tu_cs_emit_pkt7(cs, tu6_stage2opcode(type), 3 + (size / 4));
2959 tu_cs_emit(cs, CP_LOAD_STATE6_0_DST_OFF(state->range[i].offset / 16) |
2960 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
2961 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
2962 CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type)) |
2963 CP_LOAD_STATE6_0_NUM_UNIT(size / 16));
2964 tu_cs_emit(cs, 0);
2965 tu_cs_emit(cs, 0);
2966 for (unsigned i = 0; i < size / 4; i++)
2967 tu_cs_emit(cs, push_constants[i + offset / 4]);
2968 continue;
2969 }
2970
2971 /* Look through the UBO map to find our UBO index, and get the VA for
2972 * that UBO.
2973 */
2974 uint64_t va = 0;
2975 uint32_t ubo_idx = i - 1;
2976 uint32_t ubo_map_base = 0;
2977 for (int j = 0; j < link->ubo_map.num; j++) {
2978 if (ubo_idx >= ubo_map_base &&
2979 ubo_idx < ubo_map_base + link->ubo_map.array_size[j]) {
2980 va = buffer_ptr(descriptors_state, &link->ubo_map, j,
2981 ubo_idx - ubo_map_base);
2982 break;
2983 }
2984 ubo_map_base += link->ubo_map.array_size[j];
2985 }
2986 assert(va);
2987
2988 tu_cs_emit_pkt7(cs, tu6_stage2opcode(type), 3);
2989 tu_cs_emit(cs, CP_LOAD_STATE6_0_DST_OFF(state->range[i].offset / 16) |
2990 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
2991 CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT) |
2992 CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type)) |
2993 CP_LOAD_STATE6_0_NUM_UNIT(size / 16));
2994 tu_cs_emit_qw(cs, va + offset);
2995 }
2996 }
2997 }
2998
2999 static void
3000 tu6_emit_ubos(struct tu_cs *cs, const struct tu_pipeline *pipeline,
3001 struct tu_descriptor_state *descriptors_state,
3002 gl_shader_stage type)
3003 {
3004 const struct tu_program_descriptor_linkage *link =
3005 &pipeline->program.link[type];
3006
3007 uint32_t num = MIN2(link->ubo_map.num_desc, link->const_state.num_ubos);
3008 uint32_t anum = align(num, 2);
3009
3010 if (!num)
3011 return;
3012
3013 tu_cs_emit_pkt7(cs, tu6_stage2opcode(type), 3 + (2 * anum));
3014 tu_cs_emit(cs, CP_LOAD_STATE6_0_DST_OFF(link->const_state.offsets.ubo) |
3015 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
3016 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
3017 CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type)) |
3018 CP_LOAD_STATE6_0_NUM_UNIT(anum/2));
3019 tu_cs_emit(cs, CP_LOAD_STATE6_1_EXT_SRC_ADDR(0));
3020 tu_cs_emit(cs, CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI(0));
3021
3022 unsigned emitted = 0;
3023 for (unsigned i = 0; emitted < num && i < link->ubo_map.num; i++) {
3024 for (unsigned j = 0; emitted < num && j < link->ubo_map.array_size[i]; j++) {
3025 tu_cs_emit_qw(cs, buffer_ptr(descriptors_state, &link->ubo_map, i, j));
3026 emitted++;
3027 }
3028 }
3029
3030 for (; emitted < anum; emitted++) {
3031 tu_cs_emit(cs, 0xffffffff);
3032 tu_cs_emit(cs, 0xffffffff);
3033 }
3034 }
3035
3036 static struct tu_cs_entry
3037 tu6_emit_consts(struct tu_cmd_buffer *cmd,
3038 const struct tu_pipeline *pipeline,
3039 struct tu_descriptor_state *descriptors_state,
3040 gl_shader_stage type)
3041 {
3042 struct tu_cs cs;
3043 tu_cs_begin_sub_stream(&cmd->sub_cs, 512, &cs); /* TODO: maximum size? */
3044
3045 tu6_emit_user_consts(&cs, pipeline, descriptors_state, type, cmd->push_constants);
3046 tu6_emit_ubos(&cs, pipeline, descriptors_state, type);
3047
3048 return tu_cs_end_sub_stream(&cmd->sub_cs, &cs);
3049 }
3050
3051 static VkResult
3052 tu6_emit_vs_params(struct tu_cmd_buffer *cmd,
3053 const struct tu_draw_info *draw,
3054 struct tu_cs_entry *entry)
3055 {
3056 /* TODO: fill out more than just base instance */
3057 const struct tu_program_descriptor_linkage *link =
3058 &cmd->state.pipeline->program.link[MESA_SHADER_VERTEX];
3059 const struct ir3_const_state *const_state = &link->const_state;
3060 struct tu_cs cs;
3061
3062 if (const_state->offsets.driver_param >= link->constlen) {
3063 *entry = (struct tu_cs_entry) {};
3064 return VK_SUCCESS;
3065 }
3066
3067 VkResult result = tu_cs_begin_sub_stream(&cmd->sub_cs, 8, &cs);
3068 if (result != VK_SUCCESS)
3069 return result;
3070
3071 tu_cs_emit_pkt7(&cs, CP_LOAD_STATE6_GEOM, 3 + 4);
3072 tu_cs_emit(&cs, CP_LOAD_STATE6_0_DST_OFF(const_state->offsets.driver_param) |
3073 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
3074 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
3075 CP_LOAD_STATE6_0_STATE_BLOCK(SB6_VS_SHADER) |
3076 CP_LOAD_STATE6_0_NUM_UNIT(1));
3077 tu_cs_emit(&cs, 0);
3078 tu_cs_emit(&cs, 0);
3079
3080 STATIC_ASSERT(IR3_DP_INSTID_BASE == 2);
3081
3082 tu_cs_emit(&cs, 0);
3083 tu_cs_emit(&cs, 0);
3084 tu_cs_emit(&cs, draw->first_instance);
3085 tu_cs_emit(&cs, 0);
3086
3087 *entry = tu_cs_end_sub_stream(&cmd->sub_cs, &cs);
3088 return VK_SUCCESS;
3089 }
3090
3091 static VkResult
3092 tu6_emit_textures(struct tu_cmd_buffer *cmd,
3093 const struct tu_pipeline *pipeline,
3094 struct tu_descriptor_state *descriptors_state,
3095 gl_shader_stage type,
3096 struct tu_cs_entry *entry,
3097 bool *needs_border,
3098 bool is_sysmem)
3099 {
3100 struct tu_cs *draw_state = &cmd->sub_cs;
3101 const struct tu_program_descriptor_linkage *link =
3102 &pipeline->program.link[type];
3103 VkResult result;
3104
3105 if (link->texture_map.num_desc == 0 && link->sampler_map.num_desc == 0) {
3106 *entry = (struct tu_cs_entry) {};
3107 return VK_SUCCESS;
3108 }
3109
3110 /* allocate and fill texture state */
3111 struct ts_cs_memory tex_const;
3112 result = tu_cs_alloc(draw_state, link->texture_map.num_desc,
3113 A6XX_TEX_CONST_DWORDS, &tex_const);
3114 if (result != VK_SUCCESS)
3115 return result;
3116
3117 int tex_index = 0;
3118 for (unsigned i = 0; i < link->texture_map.num; i++) {
3119 for (int j = 0; j < link->texture_map.array_size[i]; j++) {
3120 write_tex_const(cmd,
3121 &tex_const.map[A6XX_TEX_CONST_DWORDS * tex_index++],
3122 descriptors_state, &link->texture_map, i, j,
3123 is_sysmem);
3124 }
3125 }
3126
3127 /* allocate and fill sampler state */
3128 struct ts_cs_memory tex_samp = { 0 };
3129 if (link->sampler_map.num_desc) {
3130 result = tu_cs_alloc(draw_state, link->sampler_map.num_desc,
3131 A6XX_TEX_SAMP_DWORDS, &tex_samp);
3132 if (result != VK_SUCCESS)
3133 return result;
3134
3135 int sampler_index = 0;
3136 for (unsigned i = 0; i < link->sampler_map.num; i++) {
3137 for (int j = 0; j < link->sampler_map.array_size[i]; j++) {
3138 const struct tu_sampler *sampler = sampler_ptr(descriptors_state,
3139 &link->sampler_map,
3140 i, j);
3141 memcpy(&tex_samp.map[A6XX_TEX_SAMP_DWORDS * sampler_index++],
3142 sampler->state, sizeof(sampler->state));
3143 *needs_border |= sampler->needs_border;
3144 }
3145 }
3146 }
3147
3148 unsigned tex_samp_reg, tex_const_reg, tex_count_reg;
3149 enum a6xx_state_block sb;
3150
3151 switch (type) {
3152 case MESA_SHADER_VERTEX:
3153 sb = SB6_VS_TEX;
3154 tex_samp_reg = REG_A6XX_SP_VS_TEX_SAMP_LO;
3155 tex_const_reg = REG_A6XX_SP_VS_TEX_CONST_LO;
3156 tex_count_reg = REG_A6XX_SP_VS_TEX_COUNT;
3157 break;
3158 case MESA_SHADER_FRAGMENT:
3159 sb = SB6_FS_TEX;
3160 tex_samp_reg = REG_A6XX_SP_FS_TEX_SAMP_LO;
3161 tex_const_reg = REG_A6XX_SP_FS_TEX_CONST_LO;
3162 tex_count_reg = REG_A6XX_SP_FS_TEX_COUNT;
3163 break;
3164 case MESA_SHADER_COMPUTE:
3165 sb = SB6_CS_TEX;
3166 tex_samp_reg = REG_A6XX_SP_CS_TEX_SAMP_LO;
3167 tex_const_reg = REG_A6XX_SP_CS_TEX_CONST_LO;
3168 tex_count_reg = REG_A6XX_SP_CS_TEX_COUNT;
3169 break;
3170 default:
3171 unreachable("bad state block");
3172 }
3173
3174 struct tu_cs cs;
3175 result = tu_cs_begin_sub_stream(draw_state, 16, &cs);
3176 if (result != VK_SUCCESS)
3177 return result;
3178
3179 if (link->sampler_map.num_desc) {
3180 /* output sampler state: */
3181 tu_cs_emit_pkt7(&cs, tu6_stage2opcode(type), 3);
3182 tu_cs_emit(&cs, CP_LOAD_STATE6_0_DST_OFF(0) |
3183 CP_LOAD_STATE6_0_STATE_TYPE(ST6_SHADER) |
3184 CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT) |
3185 CP_LOAD_STATE6_0_STATE_BLOCK(sb) |
3186 CP_LOAD_STATE6_0_NUM_UNIT(link->sampler_map.num_desc));
3187 tu_cs_emit_qw(&cs, tex_samp.iova); /* SRC_ADDR_LO/HI */
3188
3189 tu_cs_emit_pkt4(&cs, tex_samp_reg, 2);
3190 tu_cs_emit_qw(&cs, tex_samp.iova); /* SRC_ADDR_LO/HI */
3191 }
3192
3193 /* emit texture state: */
3194 tu_cs_emit_pkt7(&cs, tu6_stage2opcode(type), 3);
3195 tu_cs_emit(&cs, CP_LOAD_STATE6_0_DST_OFF(0) |
3196 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
3197 CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT) |
3198 CP_LOAD_STATE6_0_STATE_BLOCK(sb) |
3199 CP_LOAD_STATE6_0_NUM_UNIT(link->texture_map.num_desc));
3200 tu_cs_emit_qw(&cs, tex_const.iova); /* SRC_ADDR_LO/HI */
3201
3202 tu_cs_emit_pkt4(&cs, tex_const_reg, 2);
3203 tu_cs_emit_qw(&cs, tex_const.iova); /* SRC_ADDR_LO/HI */
3204
3205 tu_cs_emit_pkt4(&cs, tex_count_reg, 1);
3206 tu_cs_emit(&cs, link->texture_map.num_desc);
3207
3208 *entry = tu_cs_end_sub_stream(draw_state, &cs);
3209 return VK_SUCCESS;
3210 }
3211
3212 static VkResult
3213 tu6_emit_ibo(struct tu_cmd_buffer *cmd,
3214 const struct tu_pipeline *pipeline,
3215 struct tu_descriptor_state *descriptors_state,
3216 gl_shader_stage type,
3217 struct tu_cs_entry *entry)
3218 {
3219 struct tu_cs *draw_state = &cmd->sub_cs;
3220 const struct tu_program_descriptor_linkage *link =
3221 &pipeline->program.link[type];
3222 VkResult result;
3223
3224 unsigned num_desc = link->ssbo_map.num_desc + link->image_map.num_desc;
3225
3226 if (num_desc == 0) {
3227 *entry = (struct tu_cs_entry) {};
3228 return VK_SUCCESS;
3229 }
3230
3231 struct ts_cs_memory ibo_const;
3232 result = tu_cs_alloc(draw_state, num_desc,
3233 A6XX_TEX_CONST_DWORDS, &ibo_const);
3234 if (result != VK_SUCCESS)
3235 return result;
3236
3237 int ssbo_index = 0;
3238 for (unsigned i = 0; i < link->ssbo_map.num; i++) {
3239 for (int j = 0; j < link->ssbo_map.array_size[i]; j++) {
3240 uint32_t *dst = &ibo_const.map[A6XX_TEX_CONST_DWORDS * ssbo_index];
3241
3242 uint64_t va = buffer_ptr(descriptors_state, &link->ssbo_map, i, j);
3243 /* We don't expose robustBufferAccess, so leave the size unlimited. */
3244 uint32_t sz = MAX_STORAGE_BUFFER_RANGE / 4;
3245
3246 dst[0] = A6XX_IBO_0_FMT(FMT6_32_UINT);
3247 dst[1] = A6XX_IBO_1_WIDTH(sz & MASK(15)) |
3248 A6XX_IBO_1_HEIGHT(sz >> 15);
3249 dst[2] = A6XX_IBO_2_UNK4 |
3250 A6XX_IBO_2_UNK31 |
3251 A6XX_IBO_2_TYPE(A6XX_TEX_1D);
3252 dst[3] = 0;
3253 dst[4] = va;
3254 dst[5] = va >> 32;
3255 for (int i = 6; i < A6XX_TEX_CONST_DWORDS; i++)
3256 dst[i] = 0;
3257
3258 ssbo_index++;
3259 }
3260 }
3261
3262 for (unsigned i = 0; i < link->image_map.num; i++) {
3263 for (int j = 0; j < link->image_map.array_size[i]; j++) {
3264 uint32_t *dst = &ibo_const.map[A6XX_TEX_CONST_DWORDS * ssbo_index];
3265
3266 write_image_ibo(cmd, dst,
3267 descriptors_state, &link->image_map, i, j);
3268
3269 ssbo_index++;
3270 }
3271 }
3272
3273 assert(ssbo_index == num_desc);
3274
3275 struct tu_cs cs;
3276 result = tu_cs_begin_sub_stream(draw_state, 7, &cs);
3277 if (result != VK_SUCCESS)
3278 return result;
3279
3280 uint32_t opcode, ibo_addr_reg;
3281 enum a6xx_state_block sb;
3282 enum a6xx_state_type st;
3283
3284 switch (type) {
3285 case MESA_SHADER_FRAGMENT:
3286 opcode = CP_LOAD_STATE6;
3287 st = ST6_SHADER;
3288 sb = SB6_IBO;
3289 ibo_addr_reg = REG_A6XX_SP_IBO_LO;
3290 break;
3291 case MESA_SHADER_COMPUTE:
3292 opcode = CP_LOAD_STATE6_FRAG;
3293 st = ST6_IBO;
3294 sb = SB6_CS_SHADER;
3295 ibo_addr_reg = REG_A6XX_SP_CS_IBO_LO;
3296 break;
3297 default:
3298 unreachable("unsupported stage for ibos");
3299 }
3300
3301 /* emit texture state: */
3302 tu_cs_emit_pkt7(&cs, opcode, 3);
3303 tu_cs_emit(&cs, CP_LOAD_STATE6_0_DST_OFF(0) |
3304 CP_LOAD_STATE6_0_STATE_TYPE(st) |
3305 CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT) |
3306 CP_LOAD_STATE6_0_STATE_BLOCK(sb) |
3307 CP_LOAD_STATE6_0_NUM_UNIT(num_desc));
3308 tu_cs_emit_qw(&cs, ibo_const.iova); /* SRC_ADDR_LO/HI */
3309
3310 tu_cs_emit_pkt4(&cs, ibo_addr_reg, 2);
3311 tu_cs_emit_qw(&cs, ibo_const.iova); /* SRC_ADDR_LO/HI */
3312
3313 *entry = tu_cs_end_sub_stream(draw_state, &cs);
3314 return VK_SUCCESS;
3315 }
3316
3317 struct PACKED bcolor_entry {
3318 uint32_t fp32[4];
3319 uint16_t ui16[4];
3320 int16_t si16[4];
3321 uint16_t fp16[4];
3322 uint16_t rgb565;
3323 uint16_t rgb5a1;
3324 uint16_t rgba4;
3325 uint8_t __pad0[2];
3326 uint8_t ui8[4];
3327 int8_t si8[4];
3328 uint32_t rgb10a2;
3329 uint32_t z24; /* also s8? */
3330 uint16_t srgb[4]; /* appears to duplicate fp16[], but clamped, used for srgb */
3331 uint8_t __pad1[56];
3332 } border_color[] = {
3333 [VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK] = {},
3334 [VK_BORDER_COLOR_INT_TRANSPARENT_BLACK] = {},
3335 [VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK] = {
3336 .fp32[3] = 0x3f800000,
3337 .ui16[3] = 0xffff,
3338 .si16[3] = 0x7fff,
3339 .fp16[3] = 0x3c00,
3340 .rgb5a1 = 0x8000,
3341 .rgba4 = 0xf000,
3342 .ui8[3] = 0xff,
3343 .si8[3] = 0x7f,
3344 .rgb10a2 = 0xc0000000,
3345 .srgb[3] = 0x3c00,
3346 },
3347 [VK_BORDER_COLOR_INT_OPAQUE_BLACK] = {
3348 .fp32[3] = 1,
3349 .fp16[3] = 1,
3350 },
3351 [VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE] = {
3352 .fp32[0 ... 3] = 0x3f800000,
3353 .ui16[0 ... 3] = 0xffff,
3354 .si16[0 ... 3] = 0x7fff,
3355 .fp16[0 ... 3] = 0x3c00,
3356 .rgb565 = 0xffff,
3357 .rgb5a1 = 0xffff,
3358 .rgba4 = 0xffff,
3359 .ui8[0 ... 3] = 0xff,
3360 .si8[0 ... 3] = 0x7f,
3361 .rgb10a2 = 0xffffffff,
3362 .z24 = 0xffffff,
3363 .srgb[0 ... 3] = 0x3c00,
3364 },
3365 [VK_BORDER_COLOR_INT_OPAQUE_WHITE] = {
3366 .fp32[0 ... 3] = 1,
3367 .fp16[0 ... 3] = 1,
3368 },
3369 };
3370
3371 static VkResult
3372 tu6_emit_border_color(struct tu_cmd_buffer *cmd,
3373 struct tu_cs *cs)
3374 {
3375 STATIC_ASSERT(sizeof(struct bcolor_entry) == 128);
3376
3377 const struct tu_pipeline *pipeline = cmd->state.pipeline;
3378 struct tu_descriptor_state *descriptors_state =
3379 &cmd->descriptors[VK_PIPELINE_BIND_POINT_GRAPHICS];
3380 const struct tu_descriptor_map *vs_sampler =
3381 &pipeline->program.link[MESA_SHADER_VERTEX].sampler_map;
3382 const struct tu_descriptor_map *fs_sampler =
3383 &pipeline->program.link[MESA_SHADER_FRAGMENT].sampler_map;
3384 struct ts_cs_memory ptr;
3385
3386 VkResult result = tu_cs_alloc(&cmd->sub_cs,
3387 vs_sampler->num_desc + fs_sampler->num_desc,
3388 128 / 4,
3389 &ptr);
3390 if (result != VK_SUCCESS)
3391 return result;
3392
3393 for (unsigned i = 0; i < vs_sampler->num; i++) {
3394 for (unsigned j = 0; j < vs_sampler->array_size[i]; j++) {
3395 const struct tu_sampler *sampler = sampler_ptr(descriptors_state,
3396 vs_sampler, i, j);
3397 memcpy(ptr.map, &border_color[sampler->border], 128);
3398 ptr.map += 128 / 4;
3399 }
3400 }
3401
3402 for (unsigned i = 0; i < fs_sampler->num; i++) {
3403 for (unsigned j = 0; j < fs_sampler->array_size[i]; j++) {
3404 const struct tu_sampler *sampler = sampler_ptr(descriptors_state,
3405 fs_sampler, i, j);
3406 memcpy(ptr.map, &border_color[sampler->border], 128);
3407 ptr.map += 128 / 4;
3408 }
3409 }
3410
3411 tu_cs_emit_pkt4(cs, REG_A6XX_SP_TP_BORDER_COLOR_BASE_ADDR_LO, 2);
3412 tu_cs_emit_qw(cs, ptr.iova);
3413 return VK_SUCCESS;
3414 }
3415
3416 static void
3417 tu6_emit_streamout(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
3418 {
3419 struct tu_streamout_state *tf = &cmd->state.pipeline->streamout;
3420
3421 for (unsigned i = 0; i < IR3_MAX_SO_BUFFERS; i++) {
3422 struct tu_buffer *buf = cmd->state.streamout_buf.buffers[i];
3423 if (!buf)
3424 continue;
3425
3426 uint32_t offset;
3427 offset = cmd->state.streamout_buf.offsets[i];
3428
3429 tu_cs_emit_regs(cs, A6XX_VPC_SO_BUFFER_BASE(i, .bo = buf->bo,
3430 .bo_offset = buf->bo_offset));
3431 tu_cs_emit_regs(cs, A6XX_VPC_SO_BUFFER_SIZE(i, buf->size));
3432
3433 if (cmd->state.streamout_reset & (1 << i)) {
3434 offset *= tf->stride[i];
3435
3436 tu_cs_emit_regs(cs, A6XX_VPC_SO_BUFFER_OFFSET(i, offset));
3437 cmd->state.streamout_reset &= ~(1 << i);
3438 } else {
3439 tu_cs_emit_pkt7(cs, CP_MEM_TO_REG, 3);
3440 tu_cs_emit(cs, CP_MEM_TO_REG_0_REG(REG_A6XX_VPC_SO_BUFFER_OFFSET(i)) |
3441 CP_MEM_TO_REG_0_SHIFT_BY_2 | CP_MEM_TO_REG_0_UNK31 |
3442 CP_MEM_TO_REG_0_CNT(0));
3443 tu_cs_emit_qw(cs, cmd->scratch_bo.iova + VSC_FLUSH * (i + 1));
3444 }
3445
3446 tu_cs_emit_regs(cs, A6XX_VPC_SO_FLUSH_BASE(i, .bo = &cmd->scratch_bo,
3447 .bo_offset = VSC_FLUSH * (i + 1)));
3448 }
3449
3450 if (cmd->state.streamout_enabled) {
3451 tu_cs_emit_pkt7(cs, CP_CONTEXT_REG_BUNCH, 12 + (2 * tf->prog_count));
3452 tu_cs_emit(cs, REG_A6XX_VPC_SO_BUF_CNTL);
3453 tu_cs_emit(cs, tf->vpc_so_buf_cntl);
3454 tu_cs_emit(cs, REG_A6XX_VPC_SO_NCOMP(0));
3455 tu_cs_emit(cs, tf->ncomp[0]);
3456 tu_cs_emit(cs, REG_A6XX_VPC_SO_NCOMP(1));
3457 tu_cs_emit(cs, tf->ncomp[1]);
3458 tu_cs_emit(cs, REG_A6XX_VPC_SO_NCOMP(2));
3459 tu_cs_emit(cs, tf->ncomp[2]);
3460 tu_cs_emit(cs, REG_A6XX_VPC_SO_NCOMP(3));
3461 tu_cs_emit(cs, tf->ncomp[3]);
3462 tu_cs_emit(cs, REG_A6XX_VPC_SO_CNTL);
3463 tu_cs_emit(cs, A6XX_VPC_SO_CNTL_ENABLE);
3464 for (unsigned i = 0; i < tf->prog_count; i++) {
3465 tu_cs_emit(cs, REG_A6XX_VPC_SO_PROG);
3466 tu_cs_emit(cs, tf->prog[i]);
3467 }
3468 } else {
3469 tu_cs_emit_pkt7(cs, CP_CONTEXT_REG_BUNCH, 4);
3470 tu_cs_emit(cs, REG_A6XX_VPC_SO_CNTL);
3471 tu_cs_emit(cs, 0);
3472 tu_cs_emit(cs, REG_A6XX_VPC_SO_BUF_CNTL);
3473 tu_cs_emit(cs, 0);
3474 }
3475 }
3476
3477 static VkResult
3478 tu6_bind_draw_states(struct tu_cmd_buffer *cmd,
3479 struct tu_cs *cs,
3480 const struct tu_draw_info *draw)
3481 {
3482 const struct tu_pipeline *pipeline = cmd->state.pipeline;
3483 const struct tu_dynamic_state *dynamic = &cmd->state.dynamic;
3484 struct tu_draw_state_group draw_state_groups[TU_DRAW_STATE_COUNT];
3485 uint32_t draw_state_group_count = 0;
3486 VkResult result;
3487
3488 struct tu_descriptor_state *descriptors_state =
3489 &cmd->descriptors[VK_PIPELINE_BIND_POINT_GRAPHICS];
3490
3491 /* TODO lrz */
3492
3493 tu_cs_emit_regs(cs,
3494 A6XX_PC_PRIMITIVE_CNTL_0(.primitive_restart =
3495 pipeline->ia.primitive_restart && draw->indexed));
3496
3497 if (cmd->state.dirty &
3498 (TU_CMD_DIRTY_PIPELINE | TU_CMD_DIRTY_DYNAMIC_LINE_WIDTH) &&
3499 (pipeline->dynamic_state.mask & TU_DYNAMIC_LINE_WIDTH)) {
3500 tu6_emit_gras_su_cntl(cs, pipeline->rast.gras_su_cntl,
3501 dynamic->line_width);
3502 }
3503
3504 if ((cmd->state.dirty & TU_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK) &&
3505 (pipeline->dynamic_state.mask & TU_DYNAMIC_STENCIL_COMPARE_MASK)) {
3506 tu6_emit_stencil_compare_mask(cs, dynamic->stencil_compare_mask.front,
3507 dynamic->stencil_compare_mask.back);
3508 }
3509
3510 if ((cmd->state.dirty & TU_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK) &&
3511 (pipeline->dynamic_state.mask & TU_DYNAMIC_STENCIL_WRITE_MASK)) {
3512 tu6_emit_stencil_write_mask(cs, dynamic->stencil_write_mask.front,
3513 dynamic->stencil_write_mask.back);
3514 }
3515
3516 if ((cmd->state.dirty & TU_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE) &&
3517 (pipeline->dynamic_state.mask & TU_DYNAMIC_STENCIL_REFERENCE)) {
3518 tu6_emit_stencil_reference(cs, dynamic->stencil_reference.front,
3519 dynamic->stencil_reference.back);
3520 }
3521
3522 if (cmd->state.dirty &
3523 (TU_CMD_DIRTY_PIPELINE | TU_CMD_DIRTY_VERTEX_BUFFERS)) {
3524 for (uint32_t i = 0; i < pipeline->vi.count; i++) {
3525 const uint32_t binding = pipeline->vi.bindings[i];
3526 const uint32_t stride = pipeline->vi.strides[i];
3527 const struct tu_buffer *buf = cmd->state.vb.buffers[binding];
3528 const VkDeviceSize offset = buf->bo_offset +
3529 cmd->state.vb.offsets[binding] +
3530 pipeline->vi.offsets[i];
3531 const VkDeviceSize size =
3532 offset < buf->bo->size ? buf->bo->size - offset : 0;
3533
3534 tu_cs_emit_regs(cs,
3535 A6XX_VFD_FETCH_BASE(i, .bo = buf->bo, .bo_offset = offset),
3536 A6XX_VFD_FETCH_SIZE(i, size),
3537 A6XX_VFD_FETCH_STRIDE(i, stride));
3538 }
3539 }
3540
3541 if (cmd->state.dirty & TU_CMD_DIRTY_PIPELINE) {
3542 draw_state_groups[draw_state_group_count++] =
3543 (struct tu_draw_state_group) {
3544 .id = TU_DRAW_STATE_PROGRAM,
3545 .enable_mask = ENABLE_DRAW,
3546 .ib = pipeline->program.state_ib,
3547 };
3548 draw_state_groups[draw_state_group_count++] =
3549 (struct tu_draw_state_group) {
3550 .id = TU_DRAW_STATE_PROGRAM_BINNING,
3551 .enable_mask = CP_SET_DRAW_STATE__0_BINNING,
3552 .ib = pipeline->program.binning_state_ib,
3553 };
3554 draw_state_groups[draw_state_group_count++] =
3555 (struct tu_draw_state_group) {
3556 .id = TU_DRAW_STATE_VI,
3557 .enable_mask = ENABLE_DRAW,
3558 .ib = pipeline->vi.state_ib,
3559 };
3560 draw_state_groups[draw_state_group_count++] =
3561 (struct tu_draw_state_group) {
3562 .id = TU_DRAW_STATE_VI_BINNING,
3563 .enable_mask = CP_SET_DRAW_STATE__0_BINNING,
3564 .ib = pipeline->vi.binning_state_ib,
3565 };
3566 draw_state_groups[draw_state_group_count++] =
3567 (struct tu_draw_state_group) {
3568 .id = TU_DRAW_STATE_VP,
3569 .enable_mask = ENABLE_ALL,
3570 .ib = pipeline->vp.state_ib,
3571 };
3572 draw_state_groups[draw_state_group_count++] =
3573 (struct tu_draw_state_group) {
3574 .id = TU_DRAW_STATE_RAST,
3575 .enable_mask = ENABLE_ALL,
3576 .ib = pipeline->rast.state_ib,
3577 };
3578 draw_state_groups[draw_state_group_count++] =
3579 (struct tu_draw_state_group) {
3580 .id = TU_DRAW_STATE_DS,
3581 .enable_mask = ENABLE_ALL,
3582 .ib = pipeline->ds.state_ib,
3583 };
3584 draw_state_groups[draw_state_group_count++] =
3585 (struct tu_draw_state_group) {
3586 .id = TU_DRAW_STATE_BLEND,
3587 .enable_mask = ENABLE_ALL,
3588 .ib = pipeline->blend.state_ib,
3589 };
3590 }
3591
3592 if (cmd->state.dirty &
3593 (TU_CMD_DIRTY_PIPELINE | TU_CMD_DIRTY_DESCRIPTOR_SETS | TU_CMD_DIRTY_PUSH_CONSTANTS)) {
3594 draw_state_groups[draw_state_group_count++] =
3595 (struct tu_draw_state_group) {
3596 .id = TU_DRAW_STATE_VS_CONST,
3597 .enable_mask = ENABLE_ALL,
3598 .ib = tu6_emit_consts(cmd, pipeline, descriptors_state, MESA_SHADER_VERTEX)
3599 };
3600 draw_state_groups[draw_state_group_count++] =
3601 (struct tu_draw_state_group) {
3602 .id = TU_DRAW_STATE_FS_CONST,
3603 .enable_mask = ENABLE_DRAW,
3604 .ib = tu6_emit_consts(cmd, pipeline, descriptors_state, MESA_SHADER_FRAGMENT)
3605 };
3606 }
3607
3608 if (cmd->state.dirty & TU_CMD_DIRTY_STREAMOUT_BUFFERS)
3609 tu6_emit_streamout(cmd, cs);
3610
3611 if (cmd->state.dirty &
3612 (TU_CMD_DIRTY_PIPELINE | TU_CMD_DIRTY_DESCRIPTOR_SETS)) {
3613 bool needs_border = false;
3614 struct tu_cs_entry vs_tex, fs_tex_sysmem, fs_tex_gmem, fs_ibo;
3615
3616 result = tu6_emit_textures(cmd, pipeline, descriptors_state,
3617 MESA_SHADER_VERTEX, &vs_tex, &needs_border,
3618 false);
3619 if (result != VK_SUCCESS)
3620 return result;
3621
3622 /* TODO: we could emit just one texture descriptor draw state when there
3623 * are no input attachments, which is the most common case. We could
3624 * also split out the sampler state, which doesn't change even for input
3625 * attachments.
3626 */
3627 result = tu6_emit_textures(cmd, pipeline, descriptors_state,
3628 MESA_SHADER_FRAGMENT, &fs_tex_sysmem,
3629 &needs_border, true);
3630 if (result != VK_SUCCESS)
3631 return result;
3632
3633 result = tu6_emit_textures(cmd, pipeline, descriptors_state,
3634 MESA_SHADER_FRAGMENT, &fs_tex_gmem,
3635 &needs_border, false);
3636 if (result != VK_SUCCESS)
3637 return result;
3638
3639 result = tu6_emit_ibo(cmd, pipeline, descriptors_state,
3640 MESA_SHADER_FRAGMENT, &fs_ibo);
3641 if (result != VK_SUCCESS)
3642 return result;
3643
3644 draw_state_groups[draw_state_group_count++] =
3645 (struct tu_draw_state_group) {
3646 .id = TU_DRAW_STATE_VS_TEX,
3647 .enable_mask = ENABLE_ALL,
3648 .ib = vs_tex,
3649 };
3650 draw_state_groups[draw_state_group_count++] =
3651 (struct tu_draw_state_group) {
3652 .id = TU_DRAW_STATE_FS_TEX_GMEM,
3653 .enable_mask = CP_SET_DRAW_STATE__0_GMEM,
3654 .ib = fs_tex_gmem,
3655 };
3656 draw_state_groups[draw_state_group_count++] =
3657 (struct tu_draw_state_group) {
3658 .id = TU_DRAW_STATE_FS_TEX_SYSMEM,
3659 .enable_mask = CP_SET_DRAW_STATE__0_SYSMEM,
3660 .ib = fs_tex_sysmem,
3661 };
3662 draw_state_groups[draw_state_group_count++] =
3663 (struct tu_draw_state_group) {
3664 .id = TU_DRAW_STATE_FS_IBO,
3665 .enable_mask = ENABLE_DRAW,
3666 .ib = fs_ibo,
3667 };
3668
3669 if (needs_border) {
3670 result = tu6_emit_border_color(cmd, cs);
3671 if (result != VK_SUCCESS)
3672 return result;
3673 }
3674 }
3675
3676 struct tu_cs_entry vs_params;
3677 result = tu6_emit_vs_params(cmd, draw, &vs_params);
3678 if (result != VK_SUCCESS)
3679 return result;
3680
3681 draw_state_groups[draw_state_group_count++] =
3682 (struct tu_draw_state_group) {
3683 .id = TU_DRAW_STATE_VS_PARAMS,
3684 .enable_mask = ENABLE_ALL,
3685 .ib = vs_params,
3686 };
3687
3688 tu_cs_emit_pkt7(cs, CP_SET_DRAW_STATE, 3 * draw_state_group_count);
3689 for (uint32_t i = 0; i < draw_state_group_count; i++) {
3690 const struct tu_draw_state_group *group = &draw_state_groups[i];
3691 debug_assert((group->enable_mask & ~ENABLE_ALL) == 0);
3692 uint32_t cp_set_draw_state =
3693 CP_SET_DRAW_STATE__0_COUNT(group->ib.size / 4) |
3694 group->enable_mask |
3695 CP_SET_DRAW_STATE__0_GROUP_ID(group->id);
3696 uint64_t iova;
3697 if (group->ib.size) {
3698 iova = group->ib.bo->iova + group->ib.offset;
3699 } else {
3700 cp_set_draw_state |= CP_SET_DRAW_STATE__0_DISABLE;
3701 iova = 0;
3702 }
3703
3704 tu_cs_emit(cs, cp_set_draw_state);
3705 tu_cs_emit_qw(cs, iova);
3706 }
3707
3708 tu_cs_sanity_check(cs);
3709
3710 /* track BOs */
3711 if (cmd->state.dirty & TU_CMD_DIRTY_VERTEX_BUFFERS) {
3712 for (uint32_t i = 0; i < MAX_VBS; i++) {
3713 const struct tu_buffer *buf = cmd->state.vb.buffers[i];
3714 if (buf)
3715 tu_bo_list_add(&cmd->bo_list, buf->bo, MSM_SUBMIT_BO_READ);
3716 }
3717 }
3718 if (cmd->state.dirty & TU_CMD_DIRTY_DESCRIPTOR_SETS) {
3719 unsigned i;
3720 for_each_bit(i, descriptors_state->valid) {
3721 struct tu_descriptor_set *set = descriptors_state->sets[i];
3722 for (unsigned j = 0; j < set->layout->buffer_count; ++j)
3723 if (set->descriptors[j]) {
3724 tu_bo_list_add(&cmd->bo_list, set->descriptors[j],
3725 MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE);
3726 }
3727 }
3728 }
3729 if (cmd->state.dirty & TU_CMD_DIRTY_STREAMOUT_BUFFERS) {
3730 for (unsigned i = 0; i < IR3_MAX_SO_BUFFERS; i++) {
3731 const struct tu_buffer *buf = cmd->state.streamout_buf.buffers[i];
3732 if (buf) {
3733 tu_bo_list_add(&cmd->bo_list, buf->bo,
3734 MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE);
3735 }
3736 }
3737 }
3738
3739 /* Fragment shader state overwrites compute shader state, so flag the
3740 * compute pipeline for re-emit.
3741 */
3742 cmd->state.dirty = TU_CMD_DIRTY_COMPUTE_PIPELINE;
3743 return VK_SUCCESS;
3744 }
3745
3746 static void
3747 tu6_emit_draw_indirect(struct tu_cmd_buffer *cmd,
3748 struct tu_cs *cs,
3749 const struct tu_draw_info *draw)
3750 {
3751 const enum pc_di_primtype primtype = cmd->state.pipeline->ia.primtype;
3752
3753 tu_cs_emit_regs(cs,
3754 A6XX_VFD_INDEX_OFFSET(draw->vertex_offset),
3755 A6XX_VFD_INSTANCE_START_OFFSET(draw->first_instance));
3756
3757 if (draw->indexed) {
3758 const enum a4xx_index_size index_size =
3759 tu6_index_size(cmd->state.index_type);
3760 const uint32_t index_bytes =
3761 (cmd->state.index_type == VK_INDEX_TYPE_UINT32) ? 4 : 2;
3762 const struct tu_buffer *index_buf = cmd->state.index_buffer;
3763 unsigned max_indicies =
3764 (index_buf->size - cmd->state.index_offset) / index_bytes;
3765
3766 const uint32_t cp_draw_indx =
3767 CP_DRAW_INDX_OFFSET_0_PRIM_TYPE(primtype) |
3768 CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(DI_SRC_SEL_DMA) |
3769 CP_DRAW_INDX_OFFSET_0_INDEX_SIZE(index_size) |
3770 CP_DRAW_INDX_OFFSET_0_VIS_CULL(USE_VISIBILITY) | 0x2000;
3771
3772 tu_cs_emit_pkt7(cs, CP_DRAW_INDX_INDIRECT, 6);
3773 tu_cs_emit(cs, cp_draw_indx);
3774 tu_cs_emit_qw(cs, index_buf->bo->iova + cmd->state.index_offset);
3775 tu_cs_emit(cs, A5XX_CP_DRAW_INDX_INDIRECT_3_MAX_INDICES(max_indicies));
3776 tu_cs_emit_qw(cs, draw->indirect->bo->iova + draw->indirect_offset);
3777 } else {
3778 const uint32_t cp_draw_indx =
3779 CP_DRAW_INDX_OFFSET_0_PRIM_TYPE(primtype) |
3780 CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(DI_SRC_SEL_AUTO_INDEX) |
3781 CP_DRAW_INDX_OFFSET_0_VIS_CULL(USE_VISIBILITY) | 0x2000;
3782
3783 tu_cs_emit_pkt7(cs, CP_DRAW_INDIRECT, 3);
3784 tu_cs_emit(cs, cp_draw_indx);
3785 tu_cs_emit_qw(cs, draw->indirect->bo->iova + draw->indirect_offset);
3786 }
3787
3788 tu_bo_list_add(&cmd->bo_list, draw->indirect->bo, MSM_SUBMIT_BO_READ);
3789 }
3790
3791 static void
3792 tu6_emit_draw_direct(struct tu_cmd_buffer *cmd,
3793 struct tu_cs *cs,
3794 const struct tu_draw_info *draw)
3795 {
3796
3797 const enum pc_di_primtype primtype = cmd->state.pipeline->ia.primtype;
3798
3799 tu_cs_emit_regs(cs,
3800 A6XX_VFD_INDEX_OFFSET(draw->vertex_offset),
3801 A6XX_VFD_INSTANCE_START_OFFSET(draw->first_instance));
3802
3803 /* TODO hw binning */
3804 if (draw->indexed) {
3805 const enum a4xx_index_size index_size =
3806 tu6_index_size(cmd->state.index_type);
3807 const uint32_t index_bytes =
3808 (cmd->state.index_type == VK_INDEX_TYPE_UINT32) ? 4 : 2;
3809 const struct tu_buffer *buf = cmd->state.index_buffer;
3810 const VkDeviceSize offset = buf->bo_offset + cmd->state.index_offset +
3811 index_bytes * draw->first_index;
3812 const uint32_t size = index_bytes * draw->count;
3813
3814 const uint32_t cp_draw_indx =
3815 CP_DRAW_INDX_OFFSET_0_PRIM_TYPE(primtype) |
3816 CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(DI_SRC_SEL_DMA) |
3817 CP_DRAW_INDX_OFFSET_0_INDEX_SIZE(index_size) |
3818 CP_DRAW_INDX_OFFSET_0_VIS_CULL(USE_VISIBILITY) | 0x2000;
3819
3820 tu_cs_emit_pkt7(cs, CP_DRAW_INDX_OFFSET, 7);
3821 tu_cs_emit(cs, cp_draw_indx);
3822 tu_cs_emit(cs, draw->instance_count);
3823 tu_cs_emit(cs, draw->count);
3824 tu_cs_emit(cs, 0x0); /* XXX */
3825 tu_cs_emit_qw(cs, buf->bo->iova + offset);
3826 tu_cs_emit(cs, size);
3827 } else {
3828 const uint32_t cp_draw_indx =
3829 CP_DRAW_INDX_OFFSET_0_PRIM_TYPE(primtype) |
3830 CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(DI_SRC_SEL_AUTO_INDEX) |
3831 CP_DRAW_INDX_OFFSET_0_VIS_CULL(USE_VISIBILITY) | 0x2000;
3832
3833 tu_cs_emit_pkt7(cs, CP_DRAW_INDX_OFFSET, 3);
3834 tu_cs_emit(cs, cp_draw_indx);
3835 tu_cs_emit(cs, draw->instance_count);
3836 tu_cs_emit(cs, draw->count);
3837 }
3838 }
3839
3840 static void
3841 tu_draw(struct tu_cmd_buffer *cmd, const struct tu_draw_info *draw)
3842 {
3843 struct tu_cs *cs = &cmd->draw_cs;
3844 VkResult result;
3845
3846 result = tu6_bind_draw_states(cmd, cs, draw);
3847 if (result != VK_SUCCESS) {
3848 cmd->record_result = result;
3849 return;
3850 }
3851
3852 if (draw->indirect)
3853 tu6_emit_draw_indirect(cmd, cs, draw);
3854 else
3855 tu6_emit_draw_direct(cmd, cs, draw);
3856
3857 if (cmd->state.streamout_enabled) {
3858 for (unsigned i = 0; i < IR3_MAX_SO_BUFFERS; i++) {
3859 if (cmd->state.streamout_enabled & (1 << i))
3860 tu6_emit_event_write(cmd, cs, FLUSH_SO_0 + i, false);
3861 }
3862 }
3863
3864 cmd->wait_for_idle = true;
3865
3866 tu_cs_sanity_check(cs);
3867 }
3868
3869 void
3870 tu_CmdDraw(VkCommandBuffer commandBuffer,
3871 uint32_t vertexCount,
3872 uint32_t instanceCount,
3873 uint32_t firstVertex,
3874 uint32_t firstInstance)
3875 {
3876 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
3877 struct tu_draw_info info = {};
3878
3879 info.count = vertexCount;
3880 info.instance_count = instanceCount;
3881 info.first_instance = firstInstance;
3882 info.vertex_offset = firstVertex;
3883
3884 tu_draw(cmd_buffer, &info);
3885 }
3886
3887 void
3888 tu_CmdDrawIndexed(VkCommandBuffer commandBuffer,
3889 uint32_t indexCount,
3890 uint32_t instanceCount,
3891 uint32_t firstIndex,
3892 int32_t vertexOffset,
3893 uint32_t firstInstance)
3894 {
3895 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
3896 struct tu_draw_info info = {};
3897
3898 info.indexed = true;
3899 info.count = indexCount;
3900 info.instance_count = instanceCount;
3901 info.first_index = firstIndex;
3902 info.vertex_offset = vertexOffset;
3903 info.first_instance = firstInstance;
3904
3905 tu_draw(cmd_buffer, &info);
3906 }
3907
3908 void
3909 tu_CmdDrawIndirect(VkCommandBuffer commandBuffer,
3910 VkBuffer _buffer,
3911 VkDeviceSize offset,
3912 uint32_t drawCount,
3913 uint32_t stride)
3914 {
3915 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
3916 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
3917 struct tu_draw_info info = {};
3918
3919 info.count = drawCount;
3920 info.indirect = buffer;
3921 info.indirect_offset = offset;
3922 info.stride = stride;
3923
3924 tu_draw(cmd_buffer, &info);
3925 }
3926
3927 void
3928 tu_CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer,
3929 VkBuffer _buffer,
3930 VkDeviceSize offset,
3931 uint32_t drawCount,
3932 uint32_t stride)
3933 {
3934 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
3935 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
3936 struct tu_draw_info info = {};
3937
3938 info.indexed = true;
3939 info.count = drawCount;
3940 info.indirect = buffer;
3941 info.indirect_offset = offset;
3942 info.stride = stride;
3943
3944 tu_draw(cmd_buffer, &info);
3945 }
3946
3947 void tu_CmdDrawIndirectByteCountEXT(VkCommandBuffer commandBuffer,
3948 uint32_t instanceCount,
3949 uint32_t firstInstance,
3950 VkBuffer _counterBuffer,
3951 VkDeviceSize counterBufferOffset,
3952 uint32_t counterOffset,
3953 uint32_t vertexStride)
3954 {
3955 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
3956 TU_FROM_HANDLE(tu_buffer, buffer, _counterBuffer);
3957
3958 struct tu_draw_info info = {};
3959
3960 info.instance_count = instanceCount;
3961 info.first_instance = firstInstance;
3962 info.streamout_buffer = buffer;
3963 info.streamout_buffer_offset = counterBufferOffset;
3964 info.stride = vertexStride;
3965
3966 tu_draw(cmd_buffer, &info);
3967 }
3968
3969 struct tu_dispatch_info
3970 {
3971 /**
3972 * Determine the layout of the grid (in block units) to be used.
3973 */
3974 uint32_t blocks[3];
3975
3976 /**
3977 * A starting offset for the grid. If unaligned is set, the offset
3978 * must still be aligned.
3979 */
3980 uint32_t offsets[3];
3981 /**
3982 * Whether it's an unaligned compute dispatch.
3983 */
3984 bool unaligned;
3985
3986 /**
3987 * Indirect compute parameters resource.
3988 */
3989 struct tu_buffer *indirect;
3990 uint64_t indirect_offset;
3991 };
3992
3993 static void
3994 tu_emit_compute_driver_params(struct tu_cs *cs, struct tu_pipeline *pipeline,
3995 const struct tu_dispatch_info *info)
3996 {
3997 gl_shader_stage type = MESA_SHADER_COMPUTE;
3998 const struct tu_program_descriptor_linkage *link =
3999 &pipeline->program.link[type];
4000 const struct ir3_const_state *const_state = &link->const_state;
4001 uint32_t offset = const_state->offsets.driver_param;
4002
4003 if (link->constlen <= offset)
4004 return;
4005
4006 if (!info->indirect) {
4007 uint32_t driver_params[IR3_DP_CS_COUNT] = {
4008 [IR3_DP_NUM_WORK_GROUPS_X] = info->blocks[0],
4009 [IR3_DP_NUM_WORK_GROUPS_Y] = info->blocks[1],
4010 [IR3_DP_NUM_WORK_GROUPS_Z] = info->blocks[2],
4011 [IR3_DP_LOCAL_GROUP_SIZE_X] = pipeline->compute.local_size[0],
4012 [IR3_DP_LOCAL_GROUP_SIZE_Y] = pipeline->compute.local_size[1],
4013 [IR3_DP_LOCAL_GROUP_SIZE_Z] = pipeline->compute.local_size[2],
4014 };
4015
4016 uint32_t num_consts = MIN2(const_state->num_driver_params,
4017 (link->constlen - offset) * 4);
4018 /* push constants */
4019 tu_cs_emit_pkt7(cs, tu6_stage2opcode(type), 3 + num_consts);
4020 tu_cs_emit(cs, CP_LOAD_STATE6_0_DST_OFF(offset) |
4021 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
4022 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
4023 CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type)) |
4024 CP_LOAD_STATE6_0_NUM_UNIT(num_consts / 4));
4025 tu_cs_emit(cs, 0);
4026 tu_cs_emit(cs, 0);
4027 uint32_t i;
4028 for (i = 0; i < num_consts; i++)
4029 tu_cs_emit(cs, driver_params[i]);
4030 } else {
4031 tu_finishme("Indirect driver params");
4032 }
4033 }
4034
4035 static void
4036 tu_dispatch(struct tu_cmd_buffer *cmd,
4037 const struct tu_dispatch_info *info)
4038 {
4039 struct tu_cs *cs = &cmd->cs;
4040 struct tu_pipeline *pipeline = cmd->state.compute_pipeline;
4041 struct tu_descriptor_state *descriptors_state =
4042 &cmd->descriptors[VK_PIPELINE_BIND_POINT_COMPUTE];
4043 VkResult result;
4044
4045 if (cmd->state.dirty & TU_CMD_DIRTY_COMPUTE_PIPELINE)
4046 tu_cs_emit_ib(cs, &pipeline->program.state_ib);
4047
4048 struct tu_cs_entry ib;
4049
4050 ib = tu6_emit_consts(cmd, pipeline, descriptors_state, MESA_SHADER_COMPUTE);
4051 if (ib.size)
4052 tu_cs_emit_ib(cs, &ib);
4053
4054 tu_emit_compute_driver_params(cs, pipeline, info);
4055
4056 bool needs_border;
4057 result = tu6_emit_textures(cmd, pipeline, descriptors_state,
4058 MESA_SHADER_COMPUTE, &ib, &needs_border, false);
4059 if (result != VK_SUCCESS) {
4060 cmd->record_result = result;
4061 return;
4062 }
4063
4064 if (ib.size)
4065 tu_cs_emit_ib(cs, &ib);
4066
4067 if (needs_border)
4068 tu_finishme("compute border color");
4069
4070 result = tu6_emit_ibo(cmd, pipeline, descriptors_state, MESA_SHADER_COMPUTE, &ib);
4071 if (result != VK_SUCCESS) {
4072 cmd->record_result = result;
4073 return;
4074 }
4075
4076 if (ib.size)
4077 tu_cs_emit_ib(cs, &ib);
4078
4079 /* track BOs */
4080 if (cmd->state.dirty & TU_CMD_DIRTY_DESCRIPTOR_SETS) {
4081 unsigned i;
4082 for_each_bit(i, descriptors_state->valid) {
4083 struct tu_descriptor_set *set = descriptors_state->sets[i];
4084 for (unsigned j = 0; j < set->layout->buffer_count; ++j)
4085 if (set->descriptors[j]) {
4086 tu_bo_list_add(&cmd->bo_list, set->descriptors[j],
4087 MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE);
4088 }
4089 }
4090 }
4091
4092 /* Compute shader state overwrites fragment shader state, so we flag the
4093 * graphics pipeline for re-emit.
4094 */
4095 cmd->state.dirty = TU_CMD_DIRTY_PIPELINE;
4096
4097 tu_cs_emit_pkt7(cs, CP_SET_MARKER, 1);
4098 tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(RM6_COMPUTE));
4099
4100 const uint32_t *local_size = pipeline->compute.local_size;
4101 const uint32_t *num_groups = info->blocks;
4102 tu_cs_emit_regs(cs,
4103 A6XX_HLSQ_CS_NDRANGE_0(.kerneldim = 3,
4104 .localsizex = local_size[0] - 1,
4105 .localsizey = local_size[1] - 1,
4106 .localsizez = local_size[2] - 1),
4107 A6XX_HLSQ_CS_NDRANGE_1(.globalsize_x = local_size[0] * num_groups[0]),
4108 A6XX_HLSQ_CS_NDRANGE_2(.globaloff_x = 0),
4109 A6XX_HLSQ_CS_NDRANGE_3(.globalsize_y = local_size[1] * num_groups[1]),
4110 A6XX_HLSQ_CS_NDRANGE_4(.globaloff_y = 0),
4111 A6XX_HLSQ_CS_NDRANGE_5(.globalsize_z = local_size[2] * num_groups[2]),
4112 A6XX_HLSQ_CS_NDRANGE_6(.globaloff_z = 0));
4113
4114 tu_cs_emit_regs(cs,
4115 A6XX_HLSQ_CS_KERNEL_GROUP_X(1),
4116 A6XX_HLSQ_CS_KERNEL_GROUP_Y(1),
4117 A6XX_HLSQ_CS_KERNEL_GROUP_Z(1));
4118
4119 if (info->indirect) {
4120 uint64_t iova = tu_buffer_iova(info->indirect) + info->indirect_offset;
4121
4122 tu_bo_list_add(&cmd->bo_list, info->indirect->bo,
4123 MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE);
4124
4125 tu_cs_emit_pkt7(cs, CP_EXEC_CS_INDIRECT, 4);
4126 tu_cs_emit(cs, 0x00000000);
4127 tu_cs_emit_qw(cs, iova);
4128 tu_cs_emit(cs,
4129 A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEX(local_size[0] - 1) |
4130 A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEY(local_size[1] - 1) |
4131 A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEZ(local_size[2] - 1));
4132 } else {
4133 tu_cs_emit_pkt7(cs, CP_EXEC_CS, 4);
4134 tu_cs_emit(cs, 0x00000000);
4135 tu_cs_emit(cs, CP_EXEC_CS_1_NGROUPS_X(info->blocks[0]));
4136 tu_cs_emit(cs, CP_EXEC_CS_2_NGROUPS_Y(info->blocks[1]));
4137 tu_cs_emit(cs, CP_EXEC_CS_3_NGROUPS_Z(info->blocks[2]));
4138 }
4139
4140 tu_cs_emit_wfi(cs);
4141
4142 tu6_emit_cache_flush(cmd, cs);
4143 }
4144
4145 void
4146 tu_CmdDispatchBase(VkCommandBuffer commandBuffer,
4147 uint32_t base_x,
4148 uint32_t base_y,
4149 uint32_t base_z,
4150 uint32_t x,
4151 uint32_t y,
4152 uint32_t z)
4153 {
4154 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
4155 struct tu_dispatch_info info = {};
4156
4157 info.blocks[0] = x;
4158 info.blocks[1] = y;
4159 info.blocks[2] = z;
4160
4161 info.offsets[0] = base_x;
4162 info.offsets[1] = base_y;
4163 info.offsets[2] = base_z;
4164 tu_dispatch(cmd_buffer, &info);
4165 }
4166
4167 void
4168 tu_CmdDispatch(VkCommandBuffer commandBuffer,
4169 uint32_t x,
4170 uint32_t y,
4171 uint32_t z)
4172 {
4173 tu_CmdDispatchBase(commandBuffer, 0, 0, 0, x, y, z);
4174 }
4175
4176 void
4177 tu_CmdDispatchIndirect(VkCommandBuffer commandBuffer,
4178 VkBuffer _buffer,
4179 VkDeviceSize offset)
4180 {
4181 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
4182 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
4183 struct tu_dispatch_info info = {};
4184
4185 info.indirect = buffer;
4186 info.indirect_offset = offset;
4187
4188 tu_dispatch(cmd_buffer, &info);
4189 }
4190
4191 void
4192 tu_CmdEndRenderPass(VkCommandBuffer commandBuffer)
4193 {
4194 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
4195
4196 tu_cs_end(&cmd_buffer->draw_cs);
4197 tu_cs_end(&cmd_buffer->draw_epilogue_cs);
4198
4199 if (use_sysmem_rendering(cmd_buffer))
4200 tu_cmd_render_sysmem(cmd_buffer);
4201 else
4202 tu_cmd_render_tiles(cmd_buffer);
4203
4204 /* discard draw_cs and draw_epilogue_cs entries now that the tiles are
4205 rendered */
4206 tu_cs_discard_entries(&cmd_buffer->draw_cs);
4207 tu_cs_begin(&cmd_buffer->draw_cs);
4208 tu_cs_discard_entries(&cmd_buffer->draw_epilogue_cs);
4209 tu_cs_begin(&cmd_buffer->draw_epilogue_cs);
4210
4211 cmd_buffer->state.pass = NULL;
4212 cmd_buffer->state.subpass = NULL;
4213 cmd_buffer->state.framebuffer = NULL;
4214 }
4215
4216 void
4217 tu_CmdEndRenderPass2(VkCommandBuffer commandBuffer,
4218 const VkSubpassEndInfoKHR *pSubpassEndInfo)
4219 {
4220 tu_CmdEndRenderPass(commandBuffer);
4221 }
4222
4223 struct tu_barrier_info
4224 {
4225 uint32_t eventCount;
4226 const VkEvent *pEvents;
4227 VkPipelineStageFlags srcStageMask;
4228 };
4229
4230 static void
4231 tu_barrier(struct tu_cmd_buffer *cmd_buffer,
4232 uint32_t memoryBarrierCount,
4233 const VkMemoryBarrier *pMemoryBarriers,
4234 uint32_t bufferMemoryBarrierCount,
4235 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
4236 uint32_t imageMemoryBarrierCount,
4237 const VkImageMemoryBarrier *pImageMemoryBarriers,
4238 const struct tu_barrier_info *info)
4239 {
4240 }
4241
4242 void
4243 tu_CmdPipelineBarrier(VkCommandBuffer commandBuffer,
4244 VkPipelineStageFlags srcStageMask,
4245 VkPipelineStageFlags destStageMask,
4246 VkBool32 byRegion,
4247 uint32_t memoryBarrierCount,
4248 const VkMemoryBarrier *pMemoryBarriers,
4249 uint32_t bufferMemoryBarrierCount,
4250 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
4251 uint32_t imageMemoryBarrierCount,
4252 const VkImageMemoryBarrier *pImageMemoryBarriers)
4253 {
4254 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
4255 struct tu_barrier_info info;
4256
4257 info.eventCount = 0;
4258 info.pEvents = NULL;
4259 info.srcStageMask = srcStageMask;
4260
4261 tu_barrier(cmd_buffer, memoryBarrierCount, pMemoryBarriers,
4262 bufferMemoryBarrierCount, pBufferMemoryBarriers,
4263 imageMemoryBarrierCount, pImageMemoryBarriers, &info);
4264 }
4265
4266 static void
4267 write_event(struct tu_cmd_buffer *cmd, struct tu_event *event, unsigned value)
4268 {
4269 struct tu_cs *cs = &cmd->cs;
4270
4271 tu_bo_list_add(&cmd->bo_list, &event->bo, MSM_SUBMIT_BO_WRITE);
4272
4273 /* TODO: any flush required before/after ? */
4274
4275 tu_cs_emit_pkt7(cs, CP_MEM_WRITE, 3);
4276 tu_cs_emit_qw(cs, event->bo.iova); /* ADDR_LO/HI */
4277 tu_cs_emit(cs, value);
4278 }
4279
4280 void
4281 tu_CmdSetEvent(VkCommandBuffer commandBuffer,
4282 VkEvent _event,
4283 VkPipelineStageFlags stageMask)
4284 {
4285 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
4286 TU_FROM_HANDLE(tu_event, event, _event);
4287
4288 write_event(cmd, event, 1);
4289 }
4290
4291 void
4292 tu_CmdResetEvent(VkCommandBuffer commandBuffer,
4293 VkEvent _event,
4294 VkPipelineStageFlags stageMask)
4295 {
4296 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
4297 TU_FROM_HANDLE(tu_event, event, _event);
4298
4299 write_event(cmd, event, 0);
4300 }
4301
4302 void
4303 tu_CmdWaitEvents(VkCommandBuffer commandBuffer,
4304 uint32_t eventCount,
4305 const VkEvent *pEvents,
4306 VkPipelineStageFlags srcStageMask,
4307 VkPipelineStageFlags dstStageMask,
4308 uint32_t memoryBarrierCount,
4309 const VkMemoryBarrier *pMemoryBarriers,
4310 uint32_t bufferMemoryBarrierCount,
4311 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
4312 uint32_t imageMemoryBarrierCount,
4313 const VkImageMemoryBarrier *pImageMemoryBarriers)
4314 {
4315 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
4316 struct tu_cs *cs = &cmd->cs;
4317
4318 /* TODO: any flush required before/after? (CP_WAIT_FOR_ME?) */
4319
4320 for (uint32_t i = 0; i < eventCount; i++) {
4321 TU_FROM_HANDLE(tu_event, event, pEvents[i]);
4322
4323 tu_bo_list_add(&cmd->bo_list, &event->bo, MSM_SUBMIT_BO_READ);
4324
4325 tu_cs_emit_pkt7(cs, CP_WAIT_REG_MEM, 6);
4326 tu_cs_emit(cs, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ) |
4327 CP_WAIT_REG_MEM_0_POLL_MEMORY);
4328 tu_cs_emit_qw(cs, event->bo.iova); /* POLL_ADDR_LO/HI */
4329 tu_cs_emit(cs, CP_WAIT_REG_MEM_3_REF(1));
4330 tu_cs_emit(cs, CP_WAIT_REG_MEM_4_MASK(~0u));
4331 tu_cs_emit(cs, CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(20));
4332 }
4333 }
4334
4335 void
4336 tu_CmdSetDeviceMask(VkCommandBuffer commandBuffer, uint32_t deviceMask)
4337 {
4338 /* No-op */
4339 }