turnip: Implement stream-out emit and vkApis for transform feedback
[mesa.git] / src / freedreno / vulkan / tu_cmd_buffer.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 */
27
28 #include "tu_private.h"
29
30 #include "registers/adreno_pm4.xml.h"
31 #include "registers/adreno_common.xml.h"
32
33 #include "vk_format.h"
34
35 #include "tu_cs.h"
36 #include "tu_blit.h"
37
38 #define OVERFLOW_FLAG_REG REG_A6XX_CP_SCRATCH_REG(0)
39
40 void
41 tu_bo_list_init(struct tu_bo_list *list)
42 {
43 list->count = list->capacity = 0;
44 list->bo_infos = NULL;
45 }
46
47 void
48 tu_bo_list_destroy(struct tu_bo_list *list)
49 {
50 free(list->bo_infos);
51 }
52
53 void
54 tu_bo_list_reset(struct tu_bo_list *list)
55 {
56 list->count = 0;
57 }
58
59 /**
60 * \a flags consists of MSM_SUBMIT_BO_FLAGS.
61 */
62 static uint32_t
63 tu_bo_list_add_info(struct tu_bo_list *list,
64 const struct drm_msm_gem_submit_bo *bo_info)
65 {
66 assert(bo_info->handle != 0);
67
68 for (uint32_t i = 0; i < list->count; ++i) {
69 if (list->bo_infos[i].handle == bo_info->handle) {
70 assert(list->bo_infos[i].presumed == bo_info->presumed);
71 list->bo_infos[i].flags |= bo_info->flags;
72 return i;
73 }
74 }
75
76 /* grow list->bo_infos if needed */
77 if (list->count == list->capacity) {
78 uint32_t new_capacity = MAX2(2 * list->count, 16);
79 struct drm_msm_gem_submit_bo *new_bo_infos = realloc(
80 list->bo_infos, new_capacity * sizeof(struct drm_msm_gem_submit_bo));
81 if (!new_bo_infos)
82 return TU_BO_LIST_FAILED;
83 list->bo_infos = new_bo_infos;
84 list->capacity = new_capacity;
85 }
86
87 list->bo_infos[list->count] = *bo_info;
88 return list->count++;
89 }
90
91 uint32_t
92 tu_bo_list_add(struct tu_bo_list *list,
93 const struct tu_bo *bo,
94 uint32_t flags)
95 {
96 return tu_bo_list_add_info(list, &(struct drm_msm_gem_submit_bo) {
97 .flags = flags,
98 .handle = bo->gem_handle,
99 .presumed = bo->iova,
100 });
101 }
102
103 VkResult
104 tu_bo_list_merge(struct tu_bo_list *list, const struct tu_bo_list *other)
105 {
106 for (uint32_t i = 0; i < other->count; i++) {
107 if (tu_bo_list_add_info(list, other->bo_infos + i) == TU_BO_LIST_FAILED)
108 return VK_ERROR_OUT_OF_HOST_MEMORY;
109 }
110
111 return VK_SUCCESS;
112 }
113
114 static bool
115 is_linear_mipmapped(const struct tu_image_view *iview)
116 {
117 return iview->image->layout.tile_mode == TILE6_LINEAR &&
118 iview->base_mip != iview->image->level_count - 1;
119 }
120
121 static bool
122 force_sysmem(const struct tu_cmd_buffer *cmd,
123 const struct VkRect2D *render_area)
124 {
125 const struct tu_framebuffer *fb = cmd->state.framebuffer;
126 const struct tu_physical_device *device = cmd->device->physical_device;
127 bool has_linear_mipmapped_store = false;
128 const struct tu_render_pass *pass = cmd->state.pass;
129
130 /* Iterate over all the places we call tu6_emit_store_attachment() */
131 for (unsigned i = 0; i < pass->subpass_count; i++) {
132 const struct tu_subpass *subpass = &pass->subpasses[i];
133 if (subpass->resolve_attachments) {
134 for (unsigned i = 0; i < subpass->color_count; i++) {
135 uint32_t a = subpass->resolve_attachments[i].attachment;
136 if (a != VK_ATTACHMENT_UNUSED &&
137 cmd->state.pass->attachments[a].store_op == VK_ATTACHMENT_STORE_OP_STORE) {
138 const struct tu_image_view *iview = fb->attachments[a].attachment;
139 if (is_linear_mipmapped(iview)) {
140 has_linear_mipmapped_store = true;
141 break;
142 }
143 }
144 }
145 }
146 }
147
148 for (unsigned i = 0; i < pass->attachment_count; i++) {
149 if (pass->attachments[i].gmem_offset >= 0 &&
150 cmd->state.pass->attachments[i].store_op == VK_ATTACHMENT_STORE_OP_STORE) {
151 const struct tu_image_view *iview = fb->attachments[i].attachment;
152 if (is_linear_mipmapped(iview)) {
153 has_linear_mipmapped_store = true;
154 break;
155 }
156 }
157 }
158
159 /* Linear textures cannot have any padding between mipmap levels and their
160 * height isn't padded, while at the same time the GMEM->MEM resolve does
161 * not have per-pixel granularity, so if the image height isn't aligned to
162 * the resolve granularity and the render area is tall enough, we may wind
163 * up writing past the bottom of the image into the next miplevel or even
164 * past the end of the image. For the last miplevel, the layout code should
165 * insert enough padding so that the overdraw writes to the padding. To
166 * work around this, we force-enable sysmem rendering.
167 */
168 const uint32_t y2 = render_area->offset.y + render_area->extent.height;
169 const uint32_t aligned_y2 = ALIGN_POT(y2, device->tile_align_h);
170
171 return has_linear_mipmapped_store && aligned_y2 > fb->height;
172 }
173
174 static void
175 tu_tiling_config_update_tile_layout(struct tu_tiling_config *tiling,
176 const struct tu_device *dev,
177 uint32_t pixels)
178 {
179 const uint32_t tile_align_w = dev->physical_device->tile_align_w;
180 const uint32_t tile_align_h = dev->physical_device->tile_align_h;
181 const uint32_t max_tile_width = 1024; /* A6xx */
182
183 /* note: don't offset the tiling config by render_area.offset,
184 * because binning pass can't deal with it
185 * this means we might end up with more tiles than necessary,
186 * but load/store/etc are still scissored to the render_area
187 */
188 tiling->tile0.offset = (VkOffset2D) {};
189
190 const uint32_t ra_width =
191 tiling->render_area.extent.width +
192 (tiling->render_area.offset.x - tiling->tile0.offset.x);
193 const uint32_t ra_height =
194 tiling->render_area.extent.height +
195 (tiling->render_area.offset.y - tiling->tile0.offset.y);
196
197 /* start from 1 tile */
198 tiling->tile_count = (VkExtent2D) {
199 .width = 1,
200 .height = 1,
201 };
202 tiling->tile0.extent = (VkExtent2D) {
203 .width = align(ra_width, tile_align_w),
204 .height = align(ra_height, tile_align_h),
205 };
206
207 if (unlikely(dev->physical_device->instance->debug_flags & TU_DEBUG_FORCEBIN)) {
208 /* start with 2x2 tiles */
209 tiling->tile_count.width = 2;
210 tiling->tile_count.height = 2;
211 tiling->tile0.extent.width = align(DIV_ROUND_UP(ra_width, 2), tile_align_w);
212 tiling->tile0.extent.height = align(DIV_ROUND_UP(ra_height, 2), tile_align_h);
213 }
214
215 /* do not exceed max tile width */
216 while (tiling->tile0.extent.width > max_tile_width) {
217 tiling->tile_count.width++;
218 tiling->tile0.extent.width =
219 align(DIV_ROUND_UP(ra_width, tiling->tile_count.width), tile_align_w);
220 }
221
222 /* will force to sysmem, don't bother trying to have a valid tile config
223 * TODO: just skip all GMEM stuff when sysmem is forced?
224 */
225 if (!pixels)
226 return;
227
228 /* do not exceed gmem size */
229 while (tiling->tile0.extent.width * tiling->tile0.extent.height > pixels) {
230 if (tiling->tile0.extent.width > MAX2(tile_align_w, tiling->tile0.extent.height)) {
231 tiling->tile_count.width++;
232 tiling->tile0.extent.width =
233 align(DIV_ROUND_UP(ra_width, tiling->tile_count.width), tile_align_w);
234 } else {
235 /* if this assert fails then layout is impossible.. */
236 assert(tiling->tile0.extent.height > tile_align_h);
237 tiling->tile_count.height++;
238 tiling->tile0.extent.height =
239 align(DIV_ROUND_UP(ra_height, tiling->tile_count.height), tile_align_h);
240 }
241 }
242 }
243
244 static void
245 tu_tiling_config_update_pipe_layout(struct tu_tiling_config *tiling,
246 const struct tu_device *dev)
247 {
248 const uint32_t max_pipe_count = 32; /* A6xx */
249
250 /* start from 1 tile per pipe */
251 tiling->pipe0 = (VkExtent2D) {
252 .width = 1,
253 .height = 1,
254 };
255 tiling->pipe_count = tiling->tile_count;
256
257 while (tiling->pipe_count.width * tiling->pipe_count.height > max_pipe_count) {
258 if (tiling->pipe0.width < tiling->pipe0.height) {
259 tiling->pipe0.width += 1;
260 tiling->pipe_count.width =
261 DIV_ROUND_UP(tiling->tile_count.width, tiling->pipe0.width);
262 } else {
263 tiling->pipe0.height += 1;
264 tiling->pipe_count.height =
265 DIV_ROUND_UP(tiling->tile_count.height, tiling->pipe0.height);
266 }
267 }
268 }
269
270 static void
271 tu_tiling_config_update_pipes(struct tu_tiling_config *tiling,
272 const struct tu_device *dev)
273 {
274 const uint32_t max_pipe_count = 32; /* A6xx */
275 const uint32_t used_pipe_count =
276 tiling->pipe_count.width * tiling->pipe_count.height;
277 const VkExtent2D last_pipe = {
278 .width = (tiling->tile_count.width - 1) % tiling->pipe0.width + 1,
279 .height = (tiling->tile_count.height - 1) % tiling->pipe0.height + 1,
280 };
281
282 assert(used_pipe_count <= max_pipe_count);
283 assert(max_pipe_count <= ARRAY_SIZE(tiling->pipe_config));
284
285 for (uint32_t y = 0; y < tiling->pipe_count.height; y++) {
286 for (uint32_t x = 0; x < tiling->pipe_count.width; x++) {
287 const uint32_t pipe_x = tiling->pipe0.width * x;
288 const uint32_t pipe_y = tiling->pipe0.height * y;
289 const uint32_t pipe_w = (x == tiling->pipe_count.width - 1)
290 ? last_pipe.width
291 : tiling->pipe0.width;
292 const uint32_t pipe_h = (y == tiling->pipe_count.height - 1)
293 ? last_pipe.height
294 : tiling->pipe0.height;
295 const uint32_t n = tiling->pipe_count.width * y + x;
296
297 tiling->pipe_config[n] = A6XX_VSC_PIPE_CONFIG_REG_X(pipe_x) |
298 A6XX_VSC_PIPE_CONFIG_REG_Y(pipe_y) |
299 A6XX_VSC_PIPE_CONFIG_REG_W(pipe_w) |
300 A6XX_VSC_PIPE_CONFIG_REG_H(pipe_h);
301 tiling->pipe_sizes[n] = CP_SET_BIN_DATA5_0_VSC_SIZE(pipe_w * pipe_h);
302 }
303 }
304
305 memset(tiling->pipe_config + used_pipe_count, 0,
306 sizeof(uint32_t) * (max_pipe_count - used_pipe_count));
307 }
308
309 static void
310 tu_tiling_config_get_tile(const struct tu_tiling_config *tiling,
311 const struct tu_device *dev,
312 uint32_t tx,
313 uint32_t ty,
314 struct tu_tile *tile)
315 {
316 /* find the pipe and the slot for tile (tx, ty) */
317 const uint32_t px = tx / tiling->pipe0.width;
318 const uint32_t py = ty / tiling->pipe0.height;
319 const uint32_t sx = tx - tiling->pipe0.width * px;
320 const uint32_t sy = ty - tiling->pipe0.height * py;
321 /* last pipe has different width */
322 const uint32_t pipe_width =
323 MIN2(tiling->pipe0.width,
324 tiling->tile_count.width - px * tiling->pipe0.width);
325
326 assert(tx < tiling->tile_count.width && ty < tiling->tile_count.height);
327 assert(px < tiling->pipe_count.width && py < tiling->pipe_count.height);
328 assert(sx < tiling->pipe0.width && sy < tiling->pipe0.height);
329
330 /* convert to 1D indices */
331 tile->pipe = tiling->pipe_count.width * py + px;
332 tile->slot = pipe_width * sy + sx;
333
334 /* get the blit area for the tile */
335 tile->begin = (VkOffset2D) {
336 .x = tiling->tile0.offset.x + tiling->tile0.extent.width * tx,
337 .y = tiling->tile0.offset.y + tiling->tile0.extent.height * ty,
338 };
339 tile->end.x =
340 (tx == tiling->tile_count.width - 1)
341 ? tiling->render_area.offset.x + tiling->render_area.extent.width
342 : tile->begin.x + tiling->tile0.extent.width;
343 tile->end.y =
344 (ty == tiling->tile_count.height - 1)
345 ? tiling->render_area.offset.y + tiling->render_area.extent.height
346 : tile->begin.y + tiling->tile0.extent.height;
347 }
348
349 enum a3xx_msaa_samples
350 tu_msaa_samples(uint32_t samples)
351 {
352 switch (samples) {
353 case 1:
354 return MSAA_ONE;
355 case 2:
356 return MSAA_TWO;
357 case 4:
358 return MSAA_FOUR;
359 case 8:
360 return MSAA_EIGHT;
361 default:
362 assert(!"invalid sample count");
363 return MSAA_ONE;
364 }
365 }
366
367 static enum a4xx_index_size
368 tu6_index_size(VkIndexType type)
369 {
370 switch (type) {
371 case VK_INDEX_TYPE_UINT16:
372 return INDEX4_SIZE_16_BIT;
373 case VK_INDEX_TYPE_UINT32:
374 return INDEX4_SIZE_32_BIT;
375 default:
376 unreachable("invalid VkIndexType");
377 return INDEX4_SIZE_8_BIT;
378 }
379 }
380
381 unsigned
382 tu6_emit_event_write(struct tu_cmd_buffer *cmd,
383 struct tu_cs *cs,
384 enum vgt_event_type event,
385 bool need_seqno)
386 {
387 unsigned seqno = 0;
388
389 tu_cs_emit_pkt7(cs, CP_EVENT_WRITE, need_seqno ? 4 : 1);
390 tu_cs_emit(cs, CP_EVENT_WRITE_0_EVENT(event));
391 if (need_seqno) {
392 tu_cs_emit_qw(cs, cmd->scratch_bo.iova);
393 seqno = ++cmd->scratch_seqno;
394 tu_cs_emit(cs, seqno);
395 }
396
397 return seqno;
398 }
399
400 static void
401 tu6_emit_cache_flush(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
402 {
403 tu6_emit_event_write(cmd, cs, 0x31, false);
404 }
405
406 static void
407 tu6_emit_lrz_flush(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
408 {
409 tu6_emit_event_write(cmd, cs, LRZ_FLUSH, false);
410 }
411
412 static void
413 tu6_emit_wfi(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
414 {
415 if (cmd->wait_for_idle) {
416 tu_cs_emit_wfi(cs);
417 cmd->wait_for_idle = false;
418 }
419 }
420
421 #define tu_image_view_ubwc_pitches(iview) \
422 .pitch = tu_image_ubwc_pitch(iview->image, iview->base_mip), \
423 .array_pitch = tu_image_ubwc_size(iview->image, iview->base_mip) >> 2
424
425 static void
426 tu6_emit_zs(struct tu_cmd_buffer *cmd,
427 const struct tu_subpass *subpass,
428 struct tu_cs *cs)
429 {
430 const struct tu_framebuffer *fb = cmd->state.framebuffer;
431
432 const uint32_t a = subpass->depth_stencil_attachment.attachment;
433 if (a == VK_ATTACHMENT_UNUSED) {
434 tu_cs_emit_regs(cs,
435 A6XX_RB_DEPTH_BUFFER_INFO(.depth_format = DEPTH6_NONE),
436 A6XX_RB_DEPTH_BUFFER_PITCH(0),
437 A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH(0),
438 A6XX_RB_DEPTH_BUFFER_BASE(0),
439 A6XX_RB_DEPTH_BUFFER_BASE_GMEM(0));
440
441 tu_cs_emit_regs(cs,
442 A6XX_GRAS_SU_DEPTH_BUFFER_INFO(.depth_format = DEPTH6_NONE));
443
444 tu_cs_emit_regs(cs,
445 A6XX_GRAS_LRZ_BUFFER_BASE(0),
446 A6XX_GRAS_LRZ_BUFFER_PITCH(0),
447 A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE(0));
448
449 tu_cs_emit_regs(cs, A6XX_RB_STENCIL_INFO(0));
450
451 return;
452 }
453
454 const struct tu_image_view *iview = fb->attachments[a].attachment;
455 enum a6xx_depth_format fmt = tu6_pipe2depth(iview->vk_format);
456
457 tu_cs_emit_regs(cs,
458 A6XX_RB_DEPTH_BUFFER_INFO(.depth_format = fmt),
459 A6XX_RB_DEPTH_BUFFER_PITCH(tu_image_stride(iview->image, iview->base_mip)),
460 A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH(iview->image->layout.layer_size),
461 A6XX_RB_DEPTH_BUFFER_BASE(tu_image_view_base_ref(iview)),
462 A6XX_RB_DEPTH_BUFFER_BASE_GMEM(cmd->state.pass->attachments[a].gmem_offset));
463
464 tu_cs_emit_regs(cs,
465 A6XX_GRAS_SU_DEPTH_BUFFER_INFO(.depth_format = fmt));
466
467 tu_cs_emit_regs(cs,
468 A6XX_RB_DEPTH_FLAG_BUFFER_BASE(tu_image_view_ubwc_base_ref(iview)),
469 A6XX_RB_DEPTH_FLAG_BUFFER_PITCH(tu_image_view_ubwc_pitches(iview)));
470
471 tu_cs_emit_regs(cs,
472 A6XX_GRAS_LRZ_BUFFER_BASE(0),
473 A6XX_GRAS_LRZ_BUFFER_PITCH(0),
474 A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE(0));
475
476 tu_cs_emit_regs(cs,
477 A6XX_RB_STENCIL_INFO(0));
478
479 /* enable zs? */
480 }
481
482 static void
483 tu6_emit_mrt(struct tu_cmd_buffer *cmd,
484 const struct tu_subpass *subpass,
485 struct tu_cs *cs)
486 {
487 const struct tu_framebuffer *fb = cmd->state.framebuffer;
488 unsigned char mrt_comp[MAX_RTS] = { 0 };
489 unsigned srgb_cntl = 0;
490
491 for (uint32_t i = 0; i < subpass->color_count; ++i) {
492 uint32_t a = subpass->color_attachments[i].attachment;
493 if (a == VK_ATTACHMENT_UNUSED)
494 continue;
495
496 const struct tu_image_view *iview = fb->attachments[a].attachment;
497 const enum a6xx_tile_mode tile_mode =
498 tu6_get_image_tile_mode(iview->image, iview->base_mip);
499
500 mrt_comp[i] = 0xf;
501
502 if (vk_format_is_srgb(iview->vk_format))
503 srgb_cntl |= (1 << i);
504
505 const struct tu_native_format format =
506 tu6_format_color(iview->vk_format, iview->image->layout.tile_mode);
507
508 tu_cs_emit_regs(cs,
509 A6XX_RB_MRT_BUF_INFO(i,
510 .color_tile_mode = tile_mode,
511 .color_format = format.fmt,
512 .color_swap = format.swap),
513 A6XX_RB_MRT_PITCH(i, tu_image_stride(iview->image, iview->base_mip)),
514 A6XX_RB_MRT_ARRAY_PITCH(i, iview->image->layout.layer_size),
515 A6XX_RB_MRT_BASE(i, tu_image_view_base_ref(iview)),
516 A6XX_RB_MRT_BASE_GMEM(i, cmd->state.pass->attachments[a].gmem_offset));
517
518 tu_cs_emit_regs(cs,
519 A6XX_SP_FS_MRT_REG(i,
520 .color_format = format.fmt,
521 .color_sint = vk_format_is_sint(iview->vk_format),
522 .color_uint = vk_format_is_uint(iview->vk_format)));
523
524 tu_cs_emit_regs(cs,
525 A6XX_RB_MRT_FLAG_BUFFER_ADDR(i, tu_image_view_ubwc_base_ref(iview)),
526 A6XX_RB_MRT_FLAG_BUFFER_PITCH(i, tu_image_view_ubwc_pitches(iview)));
527 }
528
529 tu_cs_emit_regs(cs,
530 A6XX_RB_SRGB_CNTL(.dword = srgb_cntl));
531
532 tu_cs_emit_regs(cs,
533 A6XX_SP_SRGB_CNTL(.dword = srgb_cntl));
534
535 tu_cs_emit_regs(cs,
536 A6XX_RB_RENDER_COMPONENTS(
537 .rt0 = mrt_comp[0],
538 .rt1 = mrt_comp[1],
539 .rt2 = mrt_comp[2],
540 .rt3 = mrt_comp[3],
541 .rt4 = mrt_comp[4],
542 .rt5 = mrt_comp[5],
543 .rt6 = mrt_comp[6],
544 .rt7 = mrt_comp[7]));
545
546 tu_cs_emit_regs(cs,
547 A6XX_SP_FS_RENDER_COMPONENTS(
548 .rt0 = mrt_comp[0],
549 .rt1 = mrt_comp[1],
550 .rt2 = mrt_comp[2],
551 .rt3 = mrt_comp[3],
552 .rt4 = mrt_comp[4],
553 .rt5 = mrt_comp[5],
554 .rt6 = mrt_comp[6],
555 .rt7 = mrt_comp[7]));
556 }
557
558 static void
559 tu6_emit_msaa(struct tu_cmd_buffer *cmd,
560 const struct tu_subpass *subpass,
561 struct tu_cs *cs)
562 {
563 const enum a3xx_msaa_samples samples = tu_msaa_samples(subpass->samples);
564 bool msaa_disable = samples == MSAA_ONE;
565
566 tu_cs_emit_regs(cs,
567 A6XX_SP_TP_RAS_MSAA_CNTL(samples),
568 A6XX_SP_TP_DEST_MSAA_CNTL(.samples = samples,
569 .msaa_disable = msaa_disable));
570
571 tu_cs_emit_regs(cs,
572 A6XX_GRAS_RAS_MSAA_CNTL(samples),
573 A6XX_GRAS_DEST_MSAA_CNTL(.samples = samples,
574 .msaa_disable = msaa_disable));
575
576 tu_cs_emit_regs(cs,
577 A6XX_RB_RAS_MSAA_CNTL(samples),
578 A6XX_RB_DEST_MSAA_CNTL(.samples = samples,
579 .msaa_disable = msaa_disable));
580
581 tu_cs_emit_regs(cs,
582 A6XX_RB_MSAA_CNTL(samples));
583 }
584
585 static void
586 tu6_emit_bin_size(struct tu_cs *cs,
587 uint32_t bin_w, uint32_t bin_h, uint32_t flags)
588 {
589 tu_cs_emit_regs(cs,
590 A6XX_GRAS_BIN_CONTROL(.binw = bin_w,
591 .binh = bin_h,
592 .dword = flags));
593
594 tu_cs_emit_regs(cs,
595 A6XX_RB_BIN_CONTROL(.binw = bin_w,
596 .binh = bin_h,
597 .dword = flags));
598
599 /* no flag for RB_BIN_CONTROL2... */
600 tu_cs_emit_regs(cs,
601 A6XX_RB_BIN_CONTROL2(.binw = bin_w,
602 .binh = bin_h));
603 }
604
605 static void
606 tu6_emit_render_cntl(struct tu_cmd_buffer *cmd,
607 const struct tu_subpass *subpass,
608 struct tu_cs *cs,
609 bool binning)
610 {
611 const struct tu_framebuffer *fb = cmd->state.framebuffer;
612 uint32_t cntl = 0;
613 cntl |= A6XX_RB_RENDER_CNTL_UNK4;
614 if (binning) {
615 cntl |= A6XX_RB_RENDER_CNTL_BINNING;
616 } else {
617 uint32_t mrts_ubwc_enable = 0;
618 for (uint32_t i = 0; i < subpass->color_count; ++i) {
619 uint32_t a = subpass->color_attachments[i].attachment;
620 if (a == VK_ATTACHMENT_UNUSED)
621 continue;
622
623 const struct tu_image_view *iview = fb->attachments[a].attachment;
624 if (iview->image->layout.ubwc_layer_size != 0)
625 mrts_ubwc_enable |= 1 << i;
626 }
627
628 cntl |= A6XX_RB_RENDER_CNTL_FLAG_MRTS(mrts_ubwc_enable);
629
630 const uint32_t a = subpass->depth_stencil_attachment.attachment;
631 if (a != VK_ATTACHMENT_UNUSED) {
632 const struct tu_image_view *iview = fb->attachments[a].attachment;
633 if (iview->image->layout.ubwc_layer_size != 0)
634 cntl |= A6XX_RB_RENDER_CNTL_FLAG_DEPTH;
635 }
636
637 /* In the !binning case, we need to set RB_RENDER_CNTL in the draw_cs
638 * in order to set it correctly for the different subpasses. However,
639 * that means the packets we're emitting also happen during binning. So
640 * we need to guard the write on !BINNING at CP execution time.
641 */
642 tu_cs_reserve(cs, 3 + 4);
643 tu_cs_emit_pkt7(cs, CP_COND_REG_EXEC, 2);
644 tu_cs_emit(cs, CP_COND_REG_EXEC_0_MODE(RENDER_MODE) |
645 CP_COND_REG_EXEC_0_GMEM | CP_COND_REG_EXEC_0_SYSMEM);
646 tu_cs_emit(cs, CP_COND_REG_EXEC_1_DWORDS(4));
647 }
648
649 tu_cs_emit_pkt7(cs, CP_REG_WRITE, 3);
650 tu_cs_emit(cs, CP_REG_WRITE_0_TRACKER(TRACK_RENDER_CNTL));
651 tu_cs_emit(cs, REG_A6XX_RB_RENDER_CNTL);
652 tu_cs_emit(cs, cntl);
653 }
654
655 static void
656 tu6_emit_blit_scissor(struct tu_cmd_buffer *cmd, struct tu_cs *cs, bool align)
657 {
658 const VkRect2D *render_area = &cmd->state.tiling_config.render_area;
659 uint32_t x1 = render_area->offset.x;
660 uint32_t y1 = render_area->offset.y;
661 uint32_t x2 = x1 + render_area->extent.width - 1;
662 uint32_t y2 = y1 + render_area->extent.height - 1;
663
664 /* TODO: alignment requirement seems to be less than tile_align_w/h */
665 if (align) {
666 x1 = x1 & ~cmd->device->physical_device->tile_align_w;
667 y1 = y1 & ~cmd->device->physical_device->tile_align_h;
668 x2 = ALIGN_POT(x2 + 1, cmd->device->physical_device->tile_align_w) - 1;
669 y2 = ALIGN_POT(y2 + 1, cmd->device->physical_device->tile_align_h) - 1;
670 }
671
672 tu_cs_emit_regs(cs,
673 A6XX_RB_BLIT_SCISSOR_TL(.x = x1, .y = y1),
674 A6XX_RB_BLIT_SCISSOR_BR(.x = x2, .y = y2));
675 }
676
677 static void
678 tu6_emit_blit_info(struct tu_cmd_buffer *cmd,
679 struct tu_cs *cs,
680 const struct tu_image_view *iview,
681 uint32_t gmem_offset,
682 bool resolve)
683 {
684 tu_cs_emit_regs(cs,
685 A6XX_RB_BLIT_INFO(.unk0 = !resolve, .gmem = !resolve));
686
687 const struct tu_native_format format =
688 tu6_format_color(iview->vk_format, iview->image->layout.tile_mode);
689
690 enum a6xx_tile_mode tile_mode =
691 tu6_get_image_tile_mode(iview->image, iview->base_mip);
692 tu_cs_emit_regs(cs,
693 A6XX_RB_BLIT_DST_INFO(
694 .tile_mode = tile_mode,
695 .samples = tu_msaa_samples(iview->image->samples),
696 .color_format = format.fmt,
697 .color_swap = format.swap,
698 .flags = iview->image->layout.ubwc_layer_size != 0),
699 A6XX_RB_BLIT_DST(tu_image_view_base_ref(iview)),
700 A6XX_RB_BLIT_DST_PITCH(tu_image_stride(iview->image, iview->base_mip)),
701 A6XX_RB_BLIT_DST_ARRAY_PITCH(iview->image->layout.layer_size));
702
703 if (iview->image->layout.ubwc_layer_size) {
704 tu_cs_emit_regs(cs,
705 A6XX_RB_BLIT_FLAG_DST(tu_image_view_ubwc_base_ref(iview)),
706 A6XX_RB_BLIT_FLAG_DST_PITCH(tu_image_view_ubwc_pitches(iview)));
707 }
708
709 tu_cs_emit_regs(cs,
710 A6XX_RB_BLIT_BASE_GMEM(gmem_offset));
711 }
712
713 static void
714 tu6_emit_blit(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
715 {
716 tu6_emit_event_write(cmd, cs, BLIT, false);
717 }
718
719 static void
720 tu6_emit_window_scissor(struct tu_cmd_buffer *cmd,
721 struct tu_cs *cs,
722 uint32_t x1,
723 uint32_t y1,
724 uint32_t x2,
725 uint32_t y2)
726 {
727 tu_cs_emit_regs(cs,
728 A6XX_GRAS_SC_WINDOW_SCISSOR_TL(.x = x1, .y = y1),
729 A6XX_GRAS_SC_WINDOW_SCISSOR_BR(.x = x2, .y = y2));
730
731 tu_cs_emit_regs(cs,
732 A6XX_GRAS_RESOLVE_CNTL_1(.x = x1, .y = y1),
733 A6XX_GRAS_RESOLVE_CNTL_2(.x = x2, .y = y2));
734 }
735
736 static void
737 tu6_emit_window_offset(struct tu_cmd_buffer *cmd,
738 struct tu_cs *cs,
739 uint32_t x1,
740 uint32_t y1)
741 {
742 tu_cs_emit_regs(cs,
743 A6XX_RB_WINDOW_OFFSET(.x = x1, .y = y1));
744
745 tu_cs_emit_regs(cs,
746 A6XX_RB_WINDOW_OFFSET2(.x = x1, .y = y1));
747
748 tu_cs_emit_regs(cs,
749 A6XX_SP_WINDOW_OFFSET(.x = x1, .y = y1));
750
751 tu_cs_emit_regs(cs,
752 A6XX_SP_TP_WINDOW_OFFSET(.x = x1, .y = y1));
753 }
754
755 static bool
756 use_hw_binning(struct tu_cmd_buffer *cmd)
757 {
758 const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
759
760 if (unlikely(cmd->device->physical_device->instance->debug_flags & TU_DEBUG_NOBIN))
761 return false;
762
763 if (unlikely(cmd->device->physical_device->instance->debug_flags & TU_DEBUG_FORCEBIN))
764 return true;
765
766 return (tiling->tile_count.width * tiling->tile_count.height) > 2;
767 }
768
769 static bool
770 use_sysmem_rendering(struct tu_cmd_buffer *cmd)
771 {
772 if (unlikely(cmd->device->physical_device->instance->debug_flags & TU_DEBUG_SYSMEM))
773 return true;
774
775 /* can't fit attachments into gmem */
776 if (!cmd->state.pass->gmem_pixels)
777 return true;
778
779 return cmd->state.tiling_config.force_sysmem;
780 }
781
782 static void
783 tu6_emit_tile_select(struct tu_cmd_buffer *cmd,
784 struct tu_cs *cs,
785 const struct tu_tile *tile)
786 {
787 tu_cs_emit_pkt7(cs, CP_SET_MARKER, 1);
788 tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(RM6_YIELD));
789
790 tu_cs_emit_pkt7(cs, CP_SET_MARKER, 1);
791 tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(RM6_GMEM));
792
793 const uint32_t x1 = tile->begin.x;
794 const uint32_t y1 = tile->begin.y;
795 const uint32_t x2 = tile->end.x - 1;
796 const uint32_t y2 = tile->end.y - 1;
797 tu6_emit_window_scissor(cmd, cs, x1, y1, x2, y2);
798 tu6_emit_window_offset(cmd, cs, x1, y1);
799
800 tu_cs_emit_regs(cs,
801 A6XX_VPC_SO_OVERRIDE(.so_disable = false));
802
803 if (use_hw_binning(cmd)) {
804 tu_cs_emit_pkt7(cs, CP_WAIT_FOR_ME, 0);
805
806 tu_cs_emit_pkt7(cs, CP_SET_MODE, 1);
807 tu_cs_emit(cs, 0x0);
808
809 tu_cs_emit_pkt7(cs, CP_REG_TEST, 1);
810 tu_cs_emit(cs, A6XX_CP_REG_TEST_0_REG(OVERFLOW_FLAG_REG) |
811 A6XX_CP_REG_TEST_0_BIT(0) |
812 A6XX_CP_REG_TEST_0_WAIT_FOR_ME);
813
814 tu_cs_reserve(cs, 3 + 11);
815 tu_cs_emit_pkt7(cs, CP_COND_REG_EXEC, 2);
816 tu_cs_emit(cs, CP_COND_REG_EXEC_0_MODE(PRED_TEST));
817 tu_cs_emit(cs, CP_COND_REG_EXEC_1_DWORDS(11));
818
819 /* if (no overflow) */ {
820 tu_cs_emit_pkt7(cs, CP_SET_BIN_DATA5, 7);
821 tu_cs_emit(cs, cmd->state.tiling_config.pipe_sizes[tile->pipe] |
822 CP_SET_BIN_DATA5_0_VSC_N(tile->slot));
823 tu_cs_emit_qw(cs, cmd->vsc_data.iova + tile->pipe * cmd->vsc_data_pitch);
824 tu_cs_emit_qw(cs, cmd->vsc_data.iova + (tile->pipe * 4) + (32 * cmd->vsc_data_pitch));
825 tu_cs_emit_qw(cs, cmd->vsc_data2.iova + (tile->pipe * cmd->vsc_data2_pitch));
826
827 tu_cs_emit_pkt7(cs, CP_SET_VISIBILITY_OVERRIDE, 1);
828 tu_cs_emit(cs, 0x0);
829
830 /* use a NOP packet to skip over the 'else' side: */
831 tu_cs_emit_pkt7(cs, CP_NOP, 2);
832 } /* else */ {
833 tu_cs_emit_pkt7(cs, CP_SET_VISIBILITY_OVERRIDE, 1);
834 tu_cs_emit(cs, 0x1);
835 }
836
837 tu_cs_emit_pkt7(cs, CP_SET_MODE, 1);
838 tu_cs_emit(cs, 0x0);
839
840 tu_cs_emit_regs(cs,
841 A6XX_RB_UNKNOWN_8804(0));
842
843 tu_cs_emit_regs(cs,
844 A6XX_SP_TP_UNKNOWN_B304(0));
845
846 tu_cs_emit_regs(cs,
847 A6XX_GRAS_UNKNOWN_80A4(0));
848 } else {
849 tu_cs_emit_pkt7(cs, CP_SET_VISIBILITY_OVERRIDE, 1);
850 tu_cs_emit(cs, 0x1);
851
852 tu_cs_emit_pkt7(cs, CP_SET_MODE, 1);
853 tu_cs_emit(cs, 0x0);
854 }
855 }
856
857 static void
858 tu6_emit_load_attachment(struct tu_cmd_buffer *cmd, struct tu_cs *cs, uint32_t a)
859 {
860 const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
861 const struct tu_framebuffer *fb = cmd->state.framebuffer;
862 const struct tu_image_view *iview = fb->attachments[a].attachment;
863 const struct tu_render_pass_attachment *attachment =
864 &cmd->state.pass->attachments[a];
865
866 if (attachment->gmem_offset < 0)
867 return;
868
869 const uint32_t x1 = tiling->render_area.offset.x;
870 const uint32_t y1 = tiling->render_area.offset.y;
871 const uint32_t x2 = x1 + tiling->render_area.extent.width;
872 const uint32_t y2 = y1 + tiling->render_area.extent.height;
873 const uint32_t tile_x2 =
874 tiling->tile0.offset.x + tiling->tile0.extent.width * tiling->tile_count.width;
875 const uint32_t tile_y2 =
876 tiling->tile0.offset.y + tiling->tile0.extent.height * tiling->tile_count.height;
877 bool need_load =
878 x1 != tiling->tile0.offset.x || x2 != MIN2(fb->width, tile_x2) ||
879 y1 != tiling->tile0.offset.y || y2 != MIN2(fb->height, tile_y2);
880
881 if (need_load)
882 tu_finishme("improve handling of unaligned render area");
883
884 if (attachment->load_op == VK_ATTACHMENT_LOAD_OP_LOAD)
885 need_load = true;
886
887 if (vk_format_has_stencil(iview->vk_format) &&
888 attachment->stencil_load_op == VK_ATTACHMENT_LOAD_OP_LOAD)
889 need_load = true;
890
891 if (need_load) {
892 tu6_emit_blit_info(cmd, cs, iview, attachment->gmem_offset, false);
893 tu6_emit_blit(cmd, cs);
894 }
895 }
896
897 static void
898 tu6_emit_clear_attachment(struct tu_cmd_buffer *cmd, struct tu_cs *cs,
899 uint32_t a,
900 const VkRenderPassBeginInfo *info)
901 {
902 const struct tu_framebuffer *fb = cmd->state.framebuffer;
903 const struct tu_image_view *iview = fb->attachments[a].attachment;
904 const struct tu_render_pass_attachment *attachment =
905 &cmd->state.pass->attachments[a];
906 unsigned clear_mask = 0;
907
908 /* note: this means it isn't used by any subpass and shouldn't be cleared anyway */
909 if (attachment->gmem_offset < 0)
910 return;
911
912 if (attachment->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR)
913 clear_mask = 0xf;
914
915 if (vk_format_has_stencil(iview->vk_format)) {
916 clear_mask &= 0x1;
917 if (attachment->stencil_load_op == VK_ATTACHMENT_LOAD_OP_CLEAR)
918 clear_mask |= 0x2;
919 }
920 if (!clear_mask)
921 return;
922
923 tu_clear_gmem_attachment(cmd, cs, a, clear_mask,
924 &info->pClearValues[a]);
925 }
926
927 static void
928 tu6_emit_predicated_blit(struct tu_cmd_buffer *cmd,
929 struct tu_cs *cs,
930 uint32_t a,
931 uint32_t gmem_a,
932 bool resolve)
933 {
934 tu_cond_exec_start(cs, CP_COND_EXEC_0_RENDER_MODE_GMEM);
935
936 tu6_emit_blit_info(cmd, cs,
937 cmd->state.framebuffer->attachments[a].attachment,
938 cmd->state.pass->attachments[gmem_a].gmem_offset, resolve);
939 tu6_emit_blit(cmd, cs);
940
941 tu_cond_exec_end(cs);
942 }
943
944 static void
945 tu6_emit_sysmem_resolve(struct tu_cmd_buffer *cmd,
946 struct tu_cs *cs,
947 uint32_t a,
948 uint32_t gmem_a)
949 {
950 const struct tu_framebuffer *fb = cmd->state.framebuffer;
951 const struct tu_image_view *dst = fb->attachments[a].attachment;
952 const struct tu_image_view *src = fb->attachments[gmem_a].attachment;
953
954 tu_blit(cmd, cs, &(struct tu_blit) {
955 .dst = sysmem_attachment_surf(dst, dst->base_layer,
956 &cmd->state.tiling_config.render_area),
957 .src = sysmem_attachment_surf(src, src->base_layer,
958 &cmd->state.tiling_config.render_area),
959 .layers = fb->layers,
960 });
961 }
962
963
964 /* Emit a MSAA resolve operation, with both gmem and sysmem paths. */
965 static void tu6_emit_resolve(struct tu_cmd_buffer *cmd,
966 struct tu_cs *cs,
967 uint32_t a,
968 uint32_t gmem_a)
969 {
970 if (cmd->state.pass->attachments[a].store_op == VK_ATTACHMENT_STORE_OP_DONT_CARE)
971 return;
972
973 tu6_emit_predicated_blit(cmd, cs, a, gmem_a, true);
974
975 tu_cond_exec_start(cs, CP_COND_EXEC_0_RENDER_MODE_SYSMEM);
976 tu6_emit_sysmem_resolve(cmd, cs, a, gmem_a);
977 tu_cond_exec_end(cs);
978 }
979
980 static void
981 tu6_emit_store_attachment(struct tu_cmd_buffer *cmd,
982 struct tu_cs *cs,
983 uint32_t a,
984 uint32_t gmem_a)
985 {
986 if (cmd->state.pass->attachments[a].store_op == VK_ATTACHMENT_STORE_OP_DONT_CARE)
987 return;
988
989 tu6_emit_blit_info(cmd, cs,
990 cmd->state.framebuffer->attachments[a].attachment,
991 cmd->state.pass->attachments[gmem_a].gmem_offset, true);
992 tu6_emit_blit(cmd, cs);
993 }
994
995 static void
996 tu6_emit_tile_store(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
997 {
998 const struct tu_render_pass *pass = cmd->state.pass;
999 const struct tu_subpass *subpass = &pass->subpasses[pass->subpass_count-1];
1000
1001 tu_cs_emit_pkt7(cs, CP_SET_DRAW_STATE, 3);
1002 tu_cs_emit(cs, CP_SET_DRAW_STATE__0_COUNT(0) |
1003 CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS |
1004 CP_SET_DRAW_STATE__0_GROUP_ID(0));
1005 tu_cs_emit(cs, CP_SET_DRAW_STATE__1_ADDR_LO(0));
1006 tu_cs_emit(cs, CP_SET_DRAW_STATE__2_ADDR_HI(0));
1007
1008 tu_cs_emit_pkt7(cs, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
1009 tu_cs_emit(cs, 0x0);
1010
1011 tu_cs_emit_pkt7(cs, CP_SET_MARKER, 1);
1012 tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(RM6_RESOLVE));
1013
1014 tu6_emit_blit_scissor(cmd, cs, true);
1015
1016 for (uint32_t a = 0; a < pass->attachment_count; ++a) {
1017 if (pass->attachments[a].gmem_offset >= 0)
1018 tu6_emit_store_attachment(cmd, cs, a, a);
1019 }
1020
1021 if (subpass->resolve_attachments) {
1022 for (unsigned i = 0; i < subpass->color_count; i++) {
1023 uint32_t a = subpass->resolve_attachments[i].attachment;
1024 if (a != VK_ATTACHMENT_UNUSED)
1025 tu6_emit_store_attachment(cmd, cs, a,
1026 subpass->color_attachments[i].attachment);
1027 }
1028 }
1029 }
1030
1031 static void
1032 tu6_emit_restart_index(struct tu_cs *cs, uint32_t restart_index)
1033 {
1034 tu_cs_emit_regs(cs,
1035 A6XX_PC_RESTART_INDEX(restart_index));
1036 }
1037
1038 static void
1039 tu6_init_hw(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
1040 {
1041 tu6_emit_cache_flush(cmd, cs);
1042
1043 tu_cs_emit_write_reg(cs, REG_A6XX_HLSQ_UPDATE_CNTL, 0xfffff);
1044
1045 tu_cs_emit_write_reg(cs, REG_A6XX_RB_CCU_CNTL, 0x10000000);
1046 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8E04, 0x00100000);
1047 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_AE04, 0x8);
1048 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_AE00, 0);
1049 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_AE0F, 0x3f);
1050 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_B605, 0x44);
1051 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_B600, 0x100000);
1052 tu_cs_emit_write_reg(cs, REG_A6XX_HLSQ_UNKNOWN_BE00, 0x80);
1053 tu_cs_emit_write_reg(cs, REG_A6XX_HLSQ_UNKNOWN_BE01, 0);
1054
1055 tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9600, 0);
1056 tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_8600, 0x880);
1057 tu_cs_emit_write_reg(cs, REG_A6XX_HLSQ_UNKNOWN_BE04, 0);
1058 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_AE03, 0x00000410);
1059 tu_cs_emit_write_reg(cs, REG_A6XX_SP_IBO_COUNT, 0);
1060 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_B182, 0);
1061 tu_cs_emit_write_reg(cs, REG_A6XX_HLSQ_UNKNOWN_BB11, 0);
1062 tu_cs_emit_write_reg(cs, REG_A6XX_UCHE_UNKNOWN_0E12, 0x3200000);
1063 tu_cs_emit_write_reg(cs, REG_A6XX_UCHE_CLIENT_PF, 4);
1064 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8E01, 0x0);
1065 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_A982, 0);
1066 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_A9A8, 0);
1067 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_AB00, 0x5);
1068 tu_cs_emit_write_reg(cs, REG_A6XX_VPC_GS_SIV_CNTL, 0x0000ffff);
1069
1070 tu_cs_emit_write_reg(cs, REG_A6XX_VFD_ADD_OFFSET, A6XX_VFD_ADD_OFFSET_VERTEX);
1071 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8811, 0x00000010);
1072 tu_cs_emit_write_reg(cs, REG_A6XX_PC_MODE_CNTL, 0x1f);
1073
1074 tu_cs_emit_write_reg(cs, REG_A6XX_RB_SRGB_CNTL, 0);
1075
1076 tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_8110, 0);
1077
1078 tu_cs_emit_write_reg(cs, REG_A6XX_RB_RENDER_CONTROL0, 0x401);
1079 tu_cs_emit_write_reg(cs, REG_A6XX_RB_RENDER_CONTROL1, 0);
1080 tu_cs_emit_write_reg(cs, REG_A6XX_RB_FS_OUTPUT_CNTL0, 0);
1081 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8818, 0);
1082 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8819, 0);
1083 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_881A, 0);
1084 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_881B, 0);
1085 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_881C, 0);
1086 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_881D, 0);
1087 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_881E, 0);
1088 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_88F0, 0);
1089
1090 tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9101, 0xffff00);
1091 tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9107, 0);
1092
1093 tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9236, 1);
1094 tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9300, 0);
1095
1096 tu_cs_emit_write_reg(cs, REG_A6XX_VPC_SO_OVERRIDE,
1097 A6XX_VPC_SO_OVERRIDE_SO_DISABLE);
1098
1099 tu_cs_emit_write_reg(cs, REG_A6XX_PC_UNKNOWN_9801, 0);
1100 tu_cs_emit_write_reg(cs, REG_A6XX_PC_UNKNOWN_9806, 0);
1101 tu_cs_emit_write_reg(cs, REG_A6XX_PC_UNKNOWN_9980, 0);
1102 tu_cs_emit_write_reg(cs, REG_A6XX_PC_UNKNOWN_9990, 0);
1103
1104 tu_cs_emit_write_reg(cs, REG_A6XX_PC_PRIMITIVE_CNTL_6, 0);
1105 tu_cs_emit_write_reg(cs, REG_A6XX_PC_UNKNOWN_9B07, 0);
1106
1107 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_A81B, 0);
1108
1109 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_B183, 0);
1110
1111 tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_8099, 0);
1112 tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_809B, 0);
1113 tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_80A0, 2);
1114 tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_80AF, 0);
1115 tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9210, 0);
1116 tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9211, 0);
1117 tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9602, 0);
1118 tu_cs_emit_write_reg(cs, REG_A6XX_PC_UNKNOWN_9981, 0x3);
1119 tu_cs_emit_write_reg(cs, REG_A6XX_PC_UNKNOWN_9E72, 0);
1120 tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9108, 0x3);
1121 tu_cs_emit_write_reg(cs, REG_A6XX_SP_TP_UNKNOWN_B304, 0);
1122 tu_cs_emit_write_reg(cs, REG_A6XX_SP_TP_UNKNOWN_B309, 0x000000a2);
1123 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8804, 0);
1124 tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_80A4, 0);
1125 tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_80A5, 0);
1126 tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_80A6, 0);
1127 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8805, 0);
1128 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8806, 0);
1129 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8878, 0);
1130 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8879, 0);
1131 tu_cs_emit_write_reg(cs, REG_A6XX_HLSQ_CONTROL_5_REG, 0xfc);
1132
1133 tu_cs_emit_write_reg(cs, REG_A6XX_VFD_MODE_CNTL, 0x00000000);
1134
1135 tu_cs_emit_write_reg(cs, REG_A6XX_VFD_UNKNOWN_A008, 0);
1136
1137 tu_cs_emit_write_reg(cs, REG_A6XX_PC_MODE_CNTL, 0x0000001f);
1138
1139 /* we don't use this yet.. probably best to disable.. */
1140 tu_cs_emit_pkt7(cs, CP_SET_DRAW_STATE, 3);
1141 tu_cs_emit(cs, CP_SET_DRAW_STATE__0_COUNT(0) |
1142 CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS |
1143 CP_SET_DRAW_STATE__0_GROUP_ID(0));
1144 tu_cs_emit(cs, CP_SET_DRAW_STATE__1_ADDR_LO(0));
1145 tu_cs_emit(cs, CP_SET_DRAW_STATE__2_ADDR_HI(0));
1146
1147 /* Set not to use streamout by default, */
1148 tu_cs_emit_pkt7(cs, CP_CONTEXT_REG_BUNCH, 4);
1149 tu_cs_emit(cs, REG_A6XX_VPC_SO_CNTL);
1150 tu_cs_emit(cs, 0);
1151 tu_cs_emit(cs, REG_A6XX_VPC_SO_BUF_CNTL);
1152 tu_cs_emit(cs, 0);
1153
1154 tu_cs_emit_regs(cs,
1155 A6XX_SP_HS_CTRL_REG0(0));
1156
1157 tu_cs_emit_regs(cs,
1158 A6XX_SP_GS_CTRL_REG0(0));
1159
1160 tu_cs_emit_regs(cs,
1161 A6XX_GRAS_LRZ_CNTL(0));
1162
1163 tu_cs_emit_regs(cs,
1164 A6XX_RB_LRZ_CNTL(0));
1165
1166 tu_cs_sanity_check(cs);
1167 }
1168
1169 static void
1170 tu6_cache_flush(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
1171 {
1172 unsigned seqno;
1173
1174 seqno = tu6_emit_event_write(cmd, cs, CACHE_FLUSH_AND_INV_EVENT, true);
1175
1176 tu_cs_emit_pkt7(cs, CP_WAIT_REG_MEM, 6);
1177 tu_cs_emit(cs, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ) |
1178 CP_WAIT_REG_MEM_0_POLL_MEMORY);
1179 tu_cs_emit_qw(cs, cmd->scratch_bo.iova);
1180 tu_cs_emit(cs, CP_WAIT_REG_MEM_3_REF(seqno));
1181 tu_cs_emit(cs, CP_WAIT_REG_MEM_4_MASK(~0));
1182 tu_cs_emit(cs, CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(16));
1183
1184 seqno = tu6_emit_event_write(cmd, cs, CACHE_FLUSH_TS, true);
1185
1186 tu_cs_emit_pkt7(cs, CP_WAIT_MEM_GTE, 4);
1187 tu_cs_emit(cs, CP_WAIT_MEM_GTE_0_RESERVED(0));
1188 tu_cs_emit_qw(cs, cmd->scratch_bo.iova);
1189 tu_cs_emit(cs, CP_WAIT_MEM_GTE_3_REF(seqno));
1190 }
1191
1192 static void
1193 update_vsc_pipe(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
1194 {
1195 const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
1196
1197 tu_cs_emit_regs(cs,
1198 A6XX_VSC_BIN_SIZE(.width = tiling->tile0.extent.width,
1199 .height = tiling->tile0.extent.height),
1200 A6XX_VSC_SIZE_ADDRESS(.bo = &cmd->vsc_data,
1201 .bo_offset = 32 * cmd->vsc_data_pitch));
1202
1203 tu_cs_emit_regs(cs,
1204 A6XX_VSC_BIN_COUNT(.nx = tiling->tile_count.width,
1205 .ny = tiling->tile_count.height));
1206
1207 tu_cs_emit_pkt4(cs, REG_A6XX_VSC_PIPE_CONFIG_REG(0), 32);
1208 for (unsigned i = 0; i < 32; i++)
1209 tu_cs_emit(cs, tiling->pipe_config[i]);
1210
1211 tu_cs_emit_regs(cs,
1212 A6XX_VSC_PIPE_DATA2_ADDRESS(.bo = &cmd->vsc_data2),
1213 A6XX_VSC_PIPE_DATA2_PITCH(cmd->vsc_data2_pitch),
1214 A6XX_VSC_PIPE_DATA2_ARRAY_PITCH(cmd->vsc_data2.size));
1215
1216 tu_cs_emit_regs(cs,
1217 A6XX_VSC_PIPE_DATA_ADDRESS(.bo = &cmd->vsc_data),
1218 A6XX_VSC_PIPE_DATA_PITCH(cmd->vsc_data_pitch),
1219 A6XX_VSC_PIPE_DATA_ARRAY_PITCH(cmd->vsc_data.size));
1220 }
1221
1222 static void
1223 emit_vsc_overflow_test(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
1224 {
1225 const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
1226 const uint32_t used_pipe_count =
1227 tiling->pipe_count.width * tiling->pipe_count.height;
1228
1229 /* Clear vsc_scratch: */
1230 tu_cs_emit_pkt7(cs, CP_MEM_WRITE, 3);
1231 tu_cs_emit_qw(cs, cmd->scratch_bo.iova + VSC_SCRATCH);
1232 tu_cs_emit(cs, 0x0);
1233
1234 /* Check for overflow, write vsc_scratch if detected: */
1235 for (int i = 0; i < used_pipe_count; i++) {
1236 tu_cs_emit_pkt7(cs, CP_COND_WRITE5, 8);
1237 tu_cs_emit(cs, CP_COND_WRITE5_0_FUNCTION(WRITE_GE) |
1238 CP_COND_WRITE5_0_WRITE_MEMORY);
1239 tu_cs_emit(cs, CP_COND_WRITE5_1_POLL_ADDR_LO(REG_A6XX_VSC_SIZE_REG(i)));
1240 tu_cs_emit(cs, CP_COND_WRITE5_2_POLL_ADDR_HI(0));
1241 tu_cs_emit(cs, CP_COND_WRITE5_3_REF(cmd->vsc_data_pitch));
1242 tu_cs_emit(cs, CP_COND_WRITE5_4_MASK(~0));
1243 tu_cs_emit_qw(cs, cmd->scratch_bo.iova + VSC_SCRATCH);
1244 tu_cs_emit(cs, CP_COND_WRITE5_7_WRITE_DATA(1 + cmd->vsc_data_pitch));
1245
1246 tu_cs_emit_pkt7(cs, CP_COND_WRITE5, 8);
1247 tu_cs_emit(cs, CP_COND_WRITE5_0_FUNCTION(WRITE_GE) |
1248 CP_COND_WRITE5_0_WRITE_MEMORY);
1249 tu_cs_emit(cs, CP_COND_WRITE5_1_POLL_ADDR_LO(REG_A6XX_VSC_SIZE2_REG(i)));
1250 tu_cs_emit(cs, CP_COND_WRITE5_2_POLL_ADDR_HI(0));
1251 tu_cs_emit(cs, CP_COND_WRITE5_3_REF(cmd->vsc_data2_pitch));
1252 tu_cs_emit(cs, CP_COND_WRITE5_4_MASK(~0));
1253 tu_cs_emit_qw(cs, cmd->scratch_bo.iova + VSC_SCRATCH);
1254 tu_cs_emit(cs, CP_COND_WRITE5_7_WRITE_DATA(3 + cmd->vsc_data2_pitch));
1255 }
1256
1257 tu_cs_emit_pkt7(cs, CP_WAIT_MEM_WRITES, 0);
1258
1259 tu_cs_emit_pkt7(cs, CP_WAIT_FOR_ME, 0);
1260
1261 tu_cs_emit_pkt7(cs, CP_MEM_TO_REG, 3);
1262 tu_cs_emit(cs, CP_MEM_TO_REG_0_REG(OVERFLOW_FLAG_REG) |
1263 CP_MEM_TO_REG_0_CNT(1 - 1));
1264 tu_cs_emit_qw(cs, cmd->scratch_bo.iova + VSC_SCRATCH);
1265
1266 /*
1267 * This is a bit awkward, we really want a way to invert the
1268 * CP_REG_TEST/CP_COND_REG_EXEC logic, so that we can conditionally
1269 * execute cmds to use hwbinning when a bit is *not* set. This
1270 * dance is to invert OVERFLOW_FLAG_REG
1271 *
1272 * A CP_NOP packet is used to skip executing the 'else' clause
1273 * if (b0 set)..
1274 */
1275
1276 /* b0 will be set if VSC_DATA or VSC_DATA2 overflow: */
1277 tu_cs_emit_pkt7(cs, CP_REG_TEST, 1);
1278 tu_cs_emit(cs, A6XX_CP_REG_TEST_0_REG(OVERFLOW_FLAG_REG) |
1279 A6XX_CP_REG_TEST_0_BIT(0) |
1280 A6XX_CP_REG_TEST_0_WAIT_FOR_ME);
1281
1282 tu_cs_reserve(cs, 3 + 7);
1283 tu_cs_emit_pkt7(cs, CP_COND_REG_EXEC, 2);
1284 tu_cs_emit(cs, CP_COND_REG_EXEC_0_MODE(PRED_TEST));
1285 tu_cs_emit(cs, CP_COND_REG_EXEC_1_DWORDS(7));
1286
1287 /* if (b0 set) */ {
1288 /*
1289 * On overflow, mirror the value to control->vsc_overflow
1290 * which CPU is checking to detect overflow (see
1291 * check_vsc_overflow())
1292 */
1293 tu_cs_emit_pkt7(cs, CP_REG_TO_MEM, 3);
1294 tu_cs_emit(cs, CP_REG_TO_MEM_0_REG(OVERFLOW_FLAG_REG) |
1295 CP_REG_TO_MEM_0_CNT(0));
1296 tu_cs_emit_qw(cs, cmd->scratch_bo.iova + VSC_OVERFLOW);
1297
1298 tu_cs_emit_pkt4(cs, OVERFLOW_FLAG_REG, 1);
1299 tu_cs_emit(cs, 0x0);
1300
1301 tu_cs_emit_pkt7(cs, CP_NOP, 2); /* skip 'else' when 'if' is taken */
1302 } /* else */ {
1303 tu_cs_emit_pkt4(cs, OVERFLOW_FLAG_REG, 1);
1304 tu_cs_emit(cs, 0x1);
1305 }
1306 }
1307
1308 static void
1309 tu6_emit_binning_pass(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
1310 {
1311 struct tu_physical_device *phys_dev = cmd->device->physical_device;
1312 const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
1313
1314 uint32_t x1 = tiling->tile0.offset.x;
1315 uint32_t y1 = tiling->tile0.offset.y;
1316 uint32_t x2 = tiling->render_area.offset.x + tiling->render_area.extent.width - 1;
1317 uint32_t y2 = tiling->render_area.offset.y + tiling->render_area.extent.height - 1;
1318
1319 tu6_emit_window_scissor(cmd, cs, x1, y1, x2, y2);
1320
1321 tu_cs_emit_pkt7(cs, CP_SET_MARKER, 1);
1322 tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(RM6_BINNING));
1323
1324 tu_cs_emit_pkt7(cs, CP_SET_VISIBILITY_OVERRIDE, 1);
1325 tu_cs_emit(cs, 0x1);
1326
1327 tu_cs_emit_pkt7(cs, CP_SET_MODE, 1);
1328 tu_cs_emit(cs, 0x1);
1329
1330 tu_cs_emit_wfi(cs);
1331
1332 tu_cs_emit_regs(cs,
1333 A6XX_VFD_MODE_CNTL(.binning_pass = true));
1334
1335 update_vsc_pipe(cmd, cs);
1336
1337 tu_cs_emit_regs(cs,
1338 A6XX_PC_UNKNOWN_9805(.unknown = phys_dev->magic.PC_UNKNOWN_9805));
1339
1340 tu_cs_emit_regs(cs,
1341 A6XX_SP_UNKNOWN_A0F8(.unknown = phys_dev->magic.SP_UNKNOWN_A0F8));
1342
1343 tu_cs_emit_pkt7(cs, CP_EVENT_WRITE, 1);
1344 tu_cs_emit(cs, UNK_2C);
1345
1346 tu_cs_emit_regs(cs,
1347 A6XX_RB_WINDOW_OFFSET(.x = 0, .y = 0));
1348
1349 tu_cs_emit_regs(cs,
1350 A6XX_SP_TP_WINDOW_OFFSET(.x = 0, .y = 0));
1351
1352 /* emit IB to binning drawcmds: */
1353 tu_cs_emit_call(cs, &cmd->draw_cs);
1354
1355 tu_cs_emit_pkt7(cs, CP_SET_DRAW_STATE, 3);
1356 tu_cs_emit(cs, CP_SET_DRAW_STATE__0_COUNT(0) |
1357 CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS |
1358 CP_SET_DRAW_STATE__0_GROUP_ID(0));
1359 tu_cs_emit(cs, CP_SET_DRAW_STATE__1_ADDR_LO(0));
1360 tu_cs_emit(cs, CP_SET_DRAW_STATE__2_ADDR_HI(0));
1361
1362 tu_cs_emit_pkt7(cs, CP_EVENT_WRITE, 1);
1363 tu_cs_emit(cs, UNK_2D);
1364
1365 tu6_emit_event_write(cmd, cs, CACHE_INVALIDATE, false);
1366 tu6_cache_flush(cmd, cs);
1367
1368 tu_cs_emit_wfi(cs);
1369
1370 tu_cs_emit_pkt7(cs, CP_WAIT_FOR_ME, 0);
1371
1372 emit_vsc_overflow_test(cmd, cs);
1373
1374 tu_cs_emit_pkt7(cs, CP_SET_VISIBILITY_OVERRIDE, 1);
1375 tu_cs_emit(cs, 0x0);
1376
1377 tu_cs_emit_pkt7(cs, CP_SET_MODE, 1);
1378 tu_cs_emit(cs, 0x0);
1379
1380 tu_cs_emit_wfi(cs);
1381
1382 tu_cs_emit_regs(cs,
1383 A6XX_RB_CCU_CNTL(.unknown = phys_dev->magic.RB_CCU_CNTL_gmem));
1384
1385 cmd->wait_for_idle = false;
1386 }
1387
1388 static void
1389 tu_emit_sysmem_clear_attachment(struct tu_cmd_buffer *cmd, struct tu_cs *cs,
1390 uint32_t a,
1391 const VkRenderPassBeginInfo *info)
1392 {
1393 const struct tu_framebuffer *fb = cmd->state.framebuffer;
1394 const struct tu_image_view *iview = fb->attachments[a].attachment;
1395 const struct tu_render_pass_attachment *attachment =
1396 &cmd->state.pass->attachments[a];
1397 unsigned clear_mask = 0;
1398
1399 /* note: this means it isn't used by any subpass and shouldn't be cleared anyway */
1400 if (attachment->gmem_offset < 0)
1401 return;
1402
1403 if (attachment->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
1404 clear_mask = 0xf;
1405 }
1406
1407 if (vk_format_has_stencil(iview->vk_format)) {
1408 clear_mask &= 0x1;
1409 if (attachment->stencil_load_op == VK_ATTACHMENT_LOAD_OP_CLEAR)
1410 clear_mask |= 0x2;
1411 if (clear_mask != 0x3)
1412 tu_finishme("depth/stencil only load op");
1413 }
1414
1415 if (!clear_mask)
1416 return;
1417
1418 tu_clear_sysmem_attachment(cmd, cs, a,
1419 &info->pClearValues[a], &(struct VkClearRect) {
1420 .rect = info->renderArea,
1421 .baseArrayLayer = iview->base_layer,
1422 .layerCount = iview->layer_count,
1423 });
1424 }
1425
1426 static void
1427 tu_emit_load_clear(struct tu_cmd_buffer *cmd,
1428 const VkRenderPassBeginInfo *info)
1429 {
1430 struct tu_cs *cs = &cmd->draw_cs;
1431
1432 tu_cond_exec_start(cs, CP_COND_EXEC_0_RENDER_MODE_GMEM);
1433
1434 tu6_emit_blit_scissor(cmd, cs, true);
1435
1436 for (uint32_t i = 0; i < cmd->state.pass->attachment_count; ++i)
1437 tu6_emit_load_attachment(cmd, cs, i);
1438
1439 tu6_emit_blit_scissor(cmd, cs, false);
1440
1441 for (uint32_t i = 0; i < cmd->state.pass->attachment_count; ++i)
1442 tu6_emit_clear_attachment(cmd, cs, i, info);
1443
1444 tu_cond_exec_end(cs);
1445
1446 /* invalidate because reading input attachments will cache GMEM and
1447 * the cache isn''t updated when GMEM is written
1448 * TODO: is there a no-cache bit for textures?
1449 */
1450 if (cmd->state.subpass->input_count)
1451 tu6_emit_event_write(cmd, cs, CACHE_INVALIDATE, false);
1452
1453 tu_cond_exec_start(cs, CP_COND_EXEC_0_RENDER_MODE_SYSMEM);
1454
1455 for (uint32_t i = 0; i < cmd->state.pass->attachment_count; ++i)
1456 tu_emit_sysmem_clear_attachment(cmd, cs, i, info);
1457
1458 tu_cond_exec_end(cs);
1459 }
1460
1461 static void
1462 tu6_sysmem_render_begin(struct tu_cmd_buffer *cmd, struct tu_cs *cs,
1463 const struct VkRect2D *renderArea)
1464 {
1465 const struct tu_framebuffer *fb = cmd->state.framebuffer;
1466
1467 assert(fb->width > 0 && fb->height > 0);
1468 tu6_emit_window_scissor(cmd, cs, 0, 0, fb->width - 1, fb->height - 1);
1469 tu6_emit_window_offset(cmd, cs, 0, 0);
1470
1471 tu6_emit_bin_size(cs, 0, 0, 0xc00000); /* 0xc00000 = BYPASS? */
1472
1473 tu6_emit_lrz_flush(cmd, cs);
1474
1475 tu_cs_emit_pkt7(cs, CP_SET_MARKER, 1);
1476 tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(RM6_BYPASS));
1477
1478 tu_cs_emit_pkt7(cs, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
1479 tu_cs_emit(cs, 0x0);
1480
1481 tu6_emit_event_write(cmd, cs, PC_CCU_INVALIDATE_COLOR, false);
1482 tu6_emit_event_write(cmd, cs, PC_CCU_INVALIDATE_DEPTH, false);
1483 tu6_emit_event_write(cmd, cs, CACHE_INVALIDATE, false);
1484
1485 tu6_emit_wfi(cmd, cs);
1486 tu_cs_emit_regs(cs,
1487 A6XX_RB_CCU_CNTL(0x10000000));
1488
1489 /* enable stream-out, with sysmem there is only one pass: */
1490 tu_cs_emit_regs(cs,
1491 A6XX_VPC_SO_OVERRIDE(.so_disable = false));
1492
1493 tu_cs_emit_pkt7(cs, CP_SET_VISIBILITY_OVERRIDE, 1);
1494 tu_cs_emit(cs, 0x1);
1495
1496 tu_cs_emit_pkt7(cs, CP_SET_MODE, 1);
1497 tu_cs_emit(cs, 0x0);
1498
1499 tu_cs_sanity_check(cs);
1500 }
1501
1502 static void
1503 tu6_sysmem_render_end(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
1504 {
1505 /* Do any resolves of the last subpass. These are handled in the
1506 * tile_store_ib in the gmem path.
1507 */
1508
1509 const struct tu_subpass *subpass = cmd->state.subpass;
1510 if (subpass->resolve_attachments) {
1511 for (unsigned i = 0; i < subpass->color_count; i++) {
1512 uint32_t a = subpass->resolve_attachments[i].attachment;
1513 if (a != VK_ATTACHMENT_UNUSED)
1514 tu6_emit_sysmem_resolve(cmd, cs, a,
1515 subpass->color_attachments[i].attachment);
1516 }
1517 }
1518
1519 tu_cs_emit_call(cs, &cmd->draw_epilogue_cs);
1520
1521 tu_cs_emit_pkt7(cs, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
1522 tu_cs_emit(cs, 0x0);
1523
1524 tu6_emit_lrz_flush(cmd, cs);
1525
1526 tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_COLOR_TS, true);
1527 tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_DEPTH_TS, true);
1528
1529 tu_cs_sanity_check(cs);
1530 }
1531
1532
1533 static void
1534 tu6_tile_render_begin(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
1535 {
1536 struct tu_physical_device *phys_dev = cmd->device->physical_device;
1537
1538 tu6_emit_lrz_flush(cmd, cs);
1539
1540 /* lrz clear? */
1541
1542 tu6_emit_cache_flush(cmd, cs);
1543
1544 tu_cs_emit_pkt7(cs, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
1545 tu_cs_emit(cs, 0x0);
1546
1547 /* 0x10000000 for BYPASS.. 0x7c13c080 for GMEM: */
1548 tu6_emit_wfi(cmd, cs);
1549 tu_cs_emit_regs(cs,
1550 A6XX_RB_CCU_CNTL(phys_dev->magic.RB_CCU_CNTL_gmem));
1551
1552 const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
1553 if (use_hw_binning(cmd)) {
1554 /* enable stream-out during binning pass: */
1555 tu_cs_emit_regs(cs, A6XX_VPC_SO_OVERRIDE(.so_disable=false));
1556
1557 tu6_emit_bin_size(cs,
1558 tiling->tile0.extent.width,
1559 tiling->tile0.extent.height,
1560 A6XX_RB_BIN_CONTROL_BINNING_PASS | 0x6000000);
1561
1562 tu6_emit_render_cntl(cmd, cmd->state.subpass, cs, true);
1563
1564 tu6_emit_binning_pass(cmd, cs);
1565
1566 /* and disable stream-out for draw pass: */
1567 tu_cs_emit_regs(cs, A6XX_VPC_SO_OVERRIDE(.so_disable=true));
1568
1569 tu6_emit_bin_size(cs,
1570 tiling->tile0.extent.width,
1571 tiling->tile0.extent.height,
1572 A6XX_RB_BIN_CONTROL_USE_VIZ | 0x6000000);
1573
1574 tu_cs_emit_regs(cs,
1575 A6XX_VFD_MODE_CNTL(0));
1576
1577 tu_cs_emit_regs(cs, A6XX_PC_UNKNOWN_9805(.unknown = phys_dev->magic.PC_UNKNOWN_9805));
1578
1579 tu_cs_emit_regs(cs, A6XX_SP_UNKNOWN_A0F8(.unknown = phys_dev->magic.SP_UNKNOWN_A0F8));
1580
1581 tu_cs_emit_pkt7(cs, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
1582 tu_cs_emit(cs, 0x1);
1583 } else {
1584 /* no binning pass, so enable stream-out for draw pass:: */
1585 tu_cs_emit_regs(cs, A6XX_VPC_SO_OVERRIDE(.so_disable=false));
1586
1587 tu6_emit_bin_size(cs,
1588 tiling->tile0.extent.width,
1589 tiling->tile0.extent.height,
1590 0x6000000);
1591 }
1592
1593 tu_cs_sanity_check(cs);
1594 }
1595
1596 static void
1597 tu6_render_tile(struct tu_cmd_buffer *cmd,
1598 struct tu_cs *cs,
1599 const struct tu_tile *tile)
1600 {
1601 tu6_emit_tile_select(cmd, cs, tile);
1602
1603 tu_cs_emit_call(cs, &cmd->draw_cs);
1604 cmd->wait_for_idle = true;
1605
1606 if (use_hw_binning(cmd)) {
1607 tu_cs_emit_pkt7(cs, CP_REG_TEST, 1);
1608 tu_cs_emit(cs, A6XX_CP_REG_TEST_0_REG(OVERFLOW_FLAG_REG) |
1609 A6XX_CP_REG_TEST_0_BIT(0) |
1610 A6XX_CP_REG_TEST_0_WAIT_FOR_ME);
1611
1612 tu_cs_reserve(cs, 3 + 2);
1613 tu_cs_emit_pkt7(cs, CP_COND_REG_EXEC, 2);
1614 tu_cs_emit(cs, CP_COND_REG_EXEC_0_MODE(PRED_TEST));
1615 tu_cs_emit(cs, CP_COND_REG_EXEC_1_DWORDS(2));
1616
1617 /* if (no overflow) */ {
1618 tu_cs_emit_pkt7(cs, CP_SET_MARKER, 1);
1619 tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(RM6_ENDVIS));
1620 }
1621 }
1622
1623 tu_cs_emit_ib(cs, &cmd->state.tile_store_ib);
1624
1625 tu_cs_sanity_check(cs);
1626 }
1627
1628 static void
1629 tu6_tile_render_end(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
1630 {
1631 tu_cs_emit_call(cs, &cmd->draw_epilogue_cs);
1632
1633 tu_cs_emit_regs(cs,
1634 A6XX_GRAS_LRZ_CNTL(0));
1635
1636 tu6_emit_lrz_flush(cmd, cs);
1637
1638 tu6_emit_event_write(cmd, cs, CACHE_FLUSH_TS, true);
1639
1640 tu_cs_sanity_check(cs);
1641 }
1642
1643 static void
1644 tu_cmd_render_tiles(struct tu_cmd_buffer *cmd)
1645 {
1646 const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
1647
1648 tu6_tile_render_begin(cmd, &cmd->cs);
1649
1650 for (uint32_t y = 0; y < tiling->tile_count.height; y++) {
1651 for (uint32_t x = 0; x < tiling->tile_count.width; x++) {
1652 struct tu_tile tile;
1653 tu_tiling_config_get_tile(tiling, cmd->device, x, y, &tile);
1654 tu6_render_tile(cmd, &cmd->cs, &tile);
1655 }
1656 }
1657
1658 tu6_tile_render_end(cmd, &cmd->cs);
1659 }
1660
1661 static void
1662 tu_cmd_render_sysmem(struct tu_cmd_buffer *cmd)
1663 {
1664 const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
1665
1666 tu6_sysmem_render_begin(cmd, &cmd->cs, &tiling->render_area);
1667
1668 tu_cs_emit_call(&cmd->cs, &cmd->draw_cs);
1669 cmd->wait_for_idle = true;
1670
1671 tu6_sysmem_render_end(cmd, &cmd->cs);
1672 }
1673
1674 static void
1675 tu_cmd_prepare_tile_store_ib(struct tu_cmd_buffer *cmd)
1676 {
1677 const uint32_t tile_store_space = 32 + 23 * cmd->state.pass->attachment_count;
1678 struct tu_cs sub_cs;
1679
1680 VkResult result =
1681 tu_cs_begin_sub_stream(&cmd->sub_cs, tile_store_space, &sub_cs);
1682 if (result != VK_SUCCESS) {
1683 cmd->record_result = result;
1684 return;
1685 }
1686
1687 /* emit to tile-store sub_cs */
1688 tu6_emit_tile_store(cmd, &sub_cs);
1689
1690 cmd->state.tile_store_ib = tu_cs_end_sub_stream(&cmd->sub_cs, &sub_cs);
1691 }
1692
1693 static void
1694 tu_cmd_update_tiling_config(struct tu_cmd_buffer *cmd,
1695 const VkRect2D *render_area)
1696 {
1697 const struct tu_device *dev = cmd->device;
1698 struct tu_tiling_config *tiling = &cmd->state.tiling_config;
1699
1700 tiling->render_area = *render_area;
1701 tiling->force_sysmem = force_sysmem(cmd, render_area);
1702
1703 tu_tiling_config_update_tile_layout(tiling, dev, cmd->state.pass->gmem_pixels);
1704 tu_tiling_config_update_pipe_layout(tiling, dev);
1705 tu_tiling_config_update_pipes(tiling, dev);
1706 }
1707
1708 const struct tu_dynamic_state default_dynamic_state = {
1709 .viewport =
1710 {
1711 .count = 0,
1712 },
1713 .scissor =
1714 {
1715 .count = 0,
1716 },
1717 .line_width = 1.0f,
1718 .depth_bias =
1719 {
1720 .bias = 0.0f,
1721 .clamp = 0.0f,
1722 .slope = 0.0f,
1723 },
1724 .blend_constants = { 0.0f, 0.0f, 0.0f, 0.0f },
1725 .depth_bounds =
1726 {
1727 .min = 0.0f,
1728 .max = 1.0f,
1729 },
1730 .stencil_compare_mask =
1731 {
1732 .front = ~0u,
1733 .back = ~0u,
1734 },
1735 .stencil_write_mask =
1736 {
1737 .front = ~0u,
1738 .back = ~0u,
1739 },
1740 .stencil_reference =
1741 {
1742 .front = 0u,
1743 .back = 0u,
1744 },
1745 };
1746
1747 static void UNUSED /* FINISHME */
1748 tu_bind_dynamic_state(struct tu_cmd_buffer *cmd_buffer,
1749 const struct tu_dynamic_state *src)
1750 {
1751 struct tu_dynamic_state *dest = &cmd_buffer->state.dynamic;
1752 uint32_t copy_mask = src->mask;
1753 uint32_t dest_mask = 0;
1754
1755 tu_use_args(cmd_buffer); /* FINISHME */
1756
1757 /* Make sure to copy the number of viewports/scissors because they can
1758 * only be specified at pipeline creation time.
1759 */
1760 dest->viewport.count = src->viewport.count;
1761 dest->scissor.count = src->scissor.count;
1762 dest->discard_rectangle.count = src->discard_rectangle.count;
1763
1764 if (copy_mask & TU_DYNAMIC_VIEWPORT) {
1765 if (memcmp(&dest->viewport.viewports, &src->viewport.viewports,
1766 src->viewport.count * sizeof(VkViewport))) {
1767 typed_memcpy(dest->viewport.viewports, src->viewport.viewports,
1768 src->viewport.count);
1769 dest_mask |= TU_DYNAMIC_VIEWPORT;
1770 }
1771 }
1772
1773 if (copy_mask & TU_DYNAMIC_SCISSOR) {
1774 if (memcmp(&dest->scissor.scissors, &src->scissor.scissors,
1775 src->scissor.count * sizeof(VkRect2D))) {
1776 typed_memcpy(dest->scissor.scissors, src->scissor.scissors,
1777 src->scissor.count);
1778 dest_mask |= TU_DYNAMIC_SCISSOR;
1779 }
1780 }
1781
1782 if (copy_mask & TU_DYNAMIC_LINE_WIDTH) {
1783 if (dest->line_width != src->line_width) {
1784 dest->line_width = src->line_width;
1785 dest_mask |= TU_DYNAMIC_LINE_WIDTH;
1786 }
1787 }
1788
1789 if (copy_mask & TU_DYNAMIC_DEPTH_BIAS) {
1790 if (memcmp(&dest->depth_bias, &src->depth_bias,
1791 sizeof(src->depth_bias))) {
1792 dest->depth_bias = src->depth_bias;
1793 dest_mask |= TU_DYNAMIC_DEPTH_BIAS;
1794 }
1795 }
1796
1797 if (copy_mask & TU_DYNAMIC_BLEND_CONSTANTS) {
1798 if (memcmp(&dest->blend_constants, &src->blend_constants,
1799 sizeof(src->blend_constants))) {
1800 typed_memcpy(dest->blend_constants, src->blend_constants, 4);
1801 dest_mask |= TU_DYNAMIC_BLEND_CONSTANTS;
1802 }
1803 }
1804
1805 if (copy_mask & TU_DYNAMIC_DEPTH_BOUNDS) {
1806 if (memcmp(&dest->depth_bounds, &src->depth_bounds,
1807 sizeof(src->depth_bounds))) {
1808 dest->depth_bounds = src->depth_bounds;
1809 dest_mask |= TU_DYNAMIC_DEPTH_BOUNDS;
1810 }
1811 }
1812
1813 if (copy_mask & TU_DYNAMIC_STENCIL_COMPARE_MASK) {
1814 if (memcmp(&dest->stencil_compare_mask, &src->stencil_compare_mask,
1815 sizeof(src->stencil_compare_mask))) {
1816 dest->stencil_compare_mask = src->stencil_compare_mask;
1817 dest_mask |= TU_DYNAMIC_STENCIL_COMPARE_MASK;
1818 }
1819 }
1820
1821 if (copy_mask & TU_DYNAMIC_STENCIL_WRITE_MASK) {
1822 if (memcmp(&dest->stencil_write_mask, &src->stencil_write_mask,
1823 sizeof(src->stencil_write_mask))) {
1824 dest->stencil_write_mask = src->stencil_write_mask;
1825 dest_mask |= TU_DYNAMIC_STENCIL_WRITE_MASK;
1826 }
1827 }
1828
1829 if (copy_mask & TU_DYNAMIC_STENCIL_REFERENCE) {
1830 if (memcmp(&dest->stencil_reference, &src->stencil_reference,
1831 sizeof(src->stencil_reference))) {
1832 dest->stencil_reference = src->stencil_reference;
1833 dest_mask |= TU_DYNAMIC_STENCIL_REFERENCE;
1834 }
1835 }
1836
1837 if (copy_mask & TU_DYNAMIC_DISCARD_RECTANGLE) {
1838 if (memcmp(&dest->discard_rectangle.rectangles,
1839 &src->discard_rectangle.rectangles,
1840 src->discard_rectangle.count * sizeof(VkRect2D))) {
1841 typed_memcpy(dest->discard_rectangle.rectangles,
1842 src->discard_rectangle.rectangles,
1843 src->discard_rectangle.count);
1844 dest_mask |= TU_DYNAMIC_DISCARD_RECTANGLE;
1845 }
1846 }
1847 }
1848
1849 static VkResult
1850 tu_create_cmd_buffer(struct tu_device *device,
1851 struct tu_cmd_pool *pool,
1852 VkCommandBufferLevel level,
1853 VkCommandBuffer *pCommandBuffer)
1854 {
1855 struct tu_cmd_buffer *cmd_buffer;
1856 cmd_buffer = vk_zalloc(&pool->alloc, sizeof(*cmd_buffer), 8,
1857 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1858 if (cmd_buffer == NULL)
1859 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1860
1861 cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
1862 cmd_buffer->device = device;
1863 cmd_buffer->pool = pool;
1864 cmd_buffer->level = level;
1865
1866 if (pool) {
1867 list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
1868 cmd_buffer->queue_family_index = pool->queue_family_index;
1869
1870 } else {
1871 /* Init the pool_link so we can safely call list_del when we destroy
1872 * the command buffer
1873 */
1874 list_inithead(&cmd_buffer->pool_link);
1875 cmd_buffer->queue_family_index = TU_QUEUE_GENERAL;
1876 }
1877
1878 tu_bo_list_init(&cmd_buffer->bo_list);
1879 tu_cs_init(&cmd_buffer->cs, device, TU_CS_MODE_GROW, 4096);
1880 tu_cs_init(&cmd_buffer->draw_cs, device, TU_CS_MODE_GROW, 4096);
1881 tu_cs_init(&cmd_buffer->draw_epilogue_cs, device, TU_CS_MODE_GROW, 4096);
1882 tu_cs_init(&cmd_buffer->sub_cs, device, TU_CS_MODE_SUB_STREAM, 2048);
1883
1884 *pCommandBuffer = tu_cmd_buffer_to_handle(cmd_buffer);
1885
1886 list_inithead(&cmd_buffer->upload.list);
1887
1888 VkResult result = tu_bo_init_new(device, &cmd_buffer->scratch_bo, 0x1000);
1889 if (result != VK_SUCCESS)
1890 goto fail_scratch_bo;
1891
1892 /* TODO: resize on overflow */
1893 cmd_buffer->vsc_data_pitch = device->vsc_data_pitch;
1894 cmd_buffer->vsc_data2_pitch = device->vsc_data2_pitch;
1895 cmd_buffer->vsc_data = device->vsc_data;
1896 cmd_buffer->vsc_data2 = device->vsc_data2;
1897
1898 return VK_SUCCESS;
1899
1900 fail_scratch_bo:
1901 list_del(&cmd_buffer->pool_link);
1902 return result;
1903 }
1904
1905 static void
1906 tu_cmd_buffer_destroy(struct tu_cmd_buffer *cmd_buffer)
1907 {
1908 tu_bo_finish(cmd_buffer->device, &cmd_buffer->scratch_bo);
1909
1910 list_del(&cmd_buffer->pool_link);
1911
1912 for (unsigned i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; i++)
1913 free(cmd_buffer->descriptors[i].push_set.set.mapped_ptr);
1914
1915 tu_cs_finish(&cmd_buffer->cs);
1916 tu_cs_finish(&cmd_buffer->draw_cs);
1917 tu_cs_finish(&cmd_buffer->draw_epilogue_cs);
1918 tu_cs_finish(&cmd_buffer->sub_cs);
1919
1920 tu_bo_list_destroy(&cmd_buffer->bo_list);
1921 vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
1922 }
1923
1924 static VkResult
1925 tu_reset_cmd_buffer(struct tu_cmd_buffer *cmd_buffer)
1926 {
1927 cmd_buffer->wait_for_idle = true;
1928
1929 cmd_buffer->record_result = VK_SUCCESS;
1930
1931 tu_bo_list_reset(&cmd_buffer->bo_list);
1932 tu_cs_reset(&cmd_buffer->cs);
1933 tu_cs_reset(&cmd_buffer->draw_cs);
1934 tu_cs_reset(&cmd_buffer->draw_epilogue_cs);
1935 tu_cs_reset(&cmd_buffer->sub_cs);
1936
1937 for (unsigned i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; i++) {
1938 cmd_buffer->descriptors[i].valid = 0;
1939 cmd_buffer->descriptors[i].push_dirty = false;
1940 }
1941
1942 cmd_buffer->status = TU_CMD_BUFFER_STATUS_INITIAL;
1943
1944 return cmd_buffer->record_result;
1945 }
1946
1947 VkResult
1948 tu_AllocateCommandBuffers(VkDevice _device,
1949 const VkCommandBufferAllocateInfo *pAllocateInfo,
1950 VkCommandBuffer *pCommandBuffers)
1951 {
1952 TU_FROM_HANDLE(tu_device, device, _device);
1953 TU_FROM_HANDLE(tu_cmd_pool, pool, pAllocateInfo->commandPool);
1954
1955 VkResult result = VK_SUCCESS;
1956 uint32_t i;
1957
1958 for (i = 0; i < pAllocateInfo->commandBufferCount; i++) {
1959
1960 if (!list_is_empty(&pool->free_cmd_buffers)) {
1961 struct tu_cmd_buffer *cmd_buffer = list_first_entry(
1962 &pool->free_cmd_buffers, struct tu_cmd_buffer, pool_link);
1963
1964 list_del(&cmd_buffer->pool_link);
1965 list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
1966
1967 result = tu_reset_cmd_buffer(cmd_buffer);
1968 cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
1969 cmd_buffer->level = pAllocateInfo->level;
1970
1971 pCommandBuffers[i] = tu_cmd_buffer_to_handle(cmd_buffer);
1972 } else {
1973 result = tu_create_cmd_buffer(device, pool, pAllocateInfo->level,
1974 &pCommandBuffers[i]);
1975 }
1976 if (result != VK_SUCCESS)
1977 break;
1978 }
1979
1980 if (result != VK_SUCCESS) {
1981 tu_FreeCommandBuffers(_device, pAllocateInfo->commandPool, i,
1982 pCommandBuffers);
1983
1984 /* From the Vulkan 1.0.66 spec:
1985 *
1986 * "vkAllocateCommandBuffers can be used to create multiple
1987 * command buffers. If the creation of any of those command
1988 * buffers fails, the implementation must destroy all
1989 * successfully created command buffer objects from this
1990 * command, set all entries of the pCommandBuffers array to
1991 * NULL and return the error."
1992 */
1993 memset(pCommandBuffers, 0,
1994 sizeof(*pCommandBuffers) * pAllocateInfo->commandBufferCount);
1995 }
1996
1997 return result;
1998 }
1999
2000 void
2001 tu_FreeCommandBuffers(VkDevice device,
2002 VkCommandPool commandPool,
2003 uint32_t commandBufferCount,
2004 const VkCommandBuffer *pCommandBuffers)
2005 {
2006 for (uint32_t i = 0; i < commandBufferCount; i++) {
2007 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, pCommandBuffers[i]);
2008
2009 if (cmd_buffer) {
2010 if (cmd_buffer->pool) {
2011 list_del(&cmd_buffer->pool_link);
2012 list_addtail(&cmd_buffer->pool_link,
2013 &cmd_buffer->pool->free_cmd_buffers);
2014 } else
2015 tu_cmd_buffer_destroy(cmd_buffer);
2016 }
2017 }
2018 }
2019
2020 VkResult
2021 tu_ResetCommandBuffer(VkCommandBuffer commandBuffer,
2022 VkCommandBufferResetFlags flags)
2023 {
2024 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
2025 return tu_reset_cmd_buffer(cmd_buffer);
2026 }
2027
2028 VkResult
2029 tu_BeginCommandBuffer(VkCommandBuffer commandBuffer,
2030 const VkCommandBufferBeginInfo *pBeginInfo)
2031 {
2032 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
2033 VkResult result = VK_SUCCESS;
2034
2035 if (cmd_buffer->status != TU_CMD_BUFFER_STATUS_INITIAL) {
2036 /* If the command buffer has already been resetted with
2037 * vkResetCommandBuffer, no need to do it again.
2038 */
2039 result = tu_reset_cmd_buffer(cmd_buffer);
2040 if (result != VK_SUCCESS)
2041 return result;
2042 }
2043
2044 memset(&cmd_buffer->state, 0, sizeof(cmd_buffer->state));
2045 cmd_buffer->usage_flags = pBeginInfo->flags;
2046
2047 tu_cs_begin(&cmd_buffer->cs);
2048 tu_cs_begin(&cmd_buffer->draw_cs);
2049 tu_cs_begin(&cmd_buffer->draw_epilogue_cs);
2050
2051 cmd_buffer->scratch_seqno = 0;
2052
2053 /* setup initial configuration into command buffer */
2054 if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
2055 switch (cmd_buffer->queue_family_index) {
2056 case TU_QUEUE_GENERAL:
2057 tu6_init_hw(cmd_buffer, &cmd_buffer->cs);
2058 break;
2059 default:
2060 break;
2061 }
2062 } else if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY &&
2063 (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
2064 assert(pBeginInfo->pInheritanceInfo);
2065 cmd_buffer->state.pass = tu_render_pass_from_handle(pBeginInfo->pInheritanceInfo->renderPass);
2066 cmd_buffer->state.subpass = &cmd_buffer->state.pass->subpasses[pBeginInfo->pInheritanceInfo->subpass];
2067 }
2068
2069 cmd_buffer->status = TU_CMD_BUFFER_STATUS_RECORDING;
2070
2071 return VK_SUCCESS;
2072 }
2073
2074 void
2075 tu_CmdBindVertexBuffers(VkCommandBuffer commandBuffer,
2076 uint32_t firstBinding,
2077 uint32_t bindingCount,
2078 const VkBuffer *pBuffers,
2079 const VkDeviceSize *pOffsets)
2080 {
2081 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2082
2083 assert(firstBinding + bindingCount <= MAX_VBS);
2084
2085 for (uint32_t i = 0; i < bindingCount; i++) {
2086 cmd->state.vb.buffers[firstBinding + i] =
2087 tu_buffer_from_handle(pBuffers[i]);
2088 cmd->state.vb.offsets[firstBinding + i] = pOffsets[i];
2089 }
2090
2091 /* VB states depend on VkPipelineVertexInputStateCreateInfo */
2092 cmd->state.dirty |= TU_CMD_DIRTY_VERTEX_BUFFERS;
2093 }
2094
2095 void
2096 tu_CmdBindIndexBuffer(VkCommandBuffer commandBuffer,
2097 VkBuffer buffer,
2098 VkDeviceSize offset,
2099 VkIndexType indexType)
2100 {
2101 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2102 TU_FROM_HANDLE(tu_buffer, buf, buffer);
2103
2104 /* initialize/update the restart index */
2105 if (!cmd->state.index_buffer || cmd->state.index_type != indexType) {
2106 struct tu_cs *draw_cs = &cmd->draw_cs;
2107
2108 tu6_emit_restart_index(
2109 draw_cs, indexType == VK_INDEX_TYPE_UINT32 ? 0xffffffff : 0xffff);
2110
2111 tu_cs_sanity_check(draw_cs);
2112 }
2113
2114 /* track the BO */
2115 if (cmd->state.index_buffer != buf)
2116 tu_bo_list_add(&cmd->bo_list, buf->bo, MSM_SUBMIT_BO_READ);
2117
2118 cmd->state.index_buffer = buf;
2119 cmd->state.index_offset = offset;
2120 cmd->state.index_type = indexType;
2121 }
2122
2123 void
2124 tu_CmdBindDescriptorSets(VkCommandBuffer commandBuffer,
2125 VkPipelineBindPoint pipelineBindPoint,
2126 VkPipelineLayout _layout,
2127 uint32_t firstSet,
2128 uint32_t descriptorSetCount,
2129 const VkDescriptorSet *pDescriptorSets,
2130 uint32_t dynamicOffsetCount,
2131 const uint32_t *pDynamicOffsets)
2132 {
2133 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
2134 TU_FROM_HANDLE(tu_pipeline_layout, layout, _layout);
2135 unsigned dyn_idx = 0;
2136
2137 struct tu_descriptor_state *descriptors_state =
2138 tu_get_descriptors_state(cmd_buffer, pipelineBindPoint);
2139
2140 for (unsigned i = 0; i < descriptorSetCount; ++i) {
2141 unsigned idx = i + firstSet;
2142 TU_FROM_HANDLE(tu_descriptor_set, set, pDescriptorSets[i]);
2143
2144 descriptors_state->sets[idx] = set;
2145 descriptors_state->valid |= (1u << idx);
2146
2147 for(unsigned j = 0; j < set->layout->dynamic_offset_count; ++j, ++dyn_idx) {
2148 unsigned idx = j + layout->set[i + firstSet].dynamic_offset_start;
2149 assert(dyn_idx < dynamicOffsetCount);
2150
2151 descriptors_state->dynamic_buffers[idx] =
2152 set->dynamic_descriptors[j].va + pDynamicOffsets[dyn_idx];
2153 }
2154 }
2155
2156 cmd_buffer->state.dirty |= TU_CMD_DIRTY_DESCRIPTOR_SETS;
2157 }
2158
2159 void tu_CmdBindTransformFeedbackBuffersEXT(VkCommandBuffer commandBuffer,
2160 uint32_t firstBinding,
2161 uint32_t bindingCount,
2162 const VkBuffer *pBuffers,
2163 const VkDeviceSize *pOffsets,
2164 const VkDeviceSize *pSizes)
2165 {
2166 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2167 assert(firstBinding + bindingCount <= IR3_MAX_SO_BUFFERS);
2168
2169 for (uint32_t i = 0; i < bindingCount; i++) {
2170 uint32_t idx = firstBinding + i;
2171 TU_FROM_HANDLE(tu_buffer, buf, pBuffers[i]);
2172
2173 if (pOffsets[i] != 0)
2174 cmd->state.streamout_reset |= 1 << idx;
2175
2176 cmd->state.streamout_buf.buffers[idx] = buf;
2177 cmd->state.streamout_buf.offsets[idx] = pOffsets[i];
2178 cmd->state.streamout_buf.sizes[idx] = pSizes[i];
2179
2180 cmd->state.streamout_enabled |= 1 << idx;
2181 }
2182
2183 cmd->state.dirty |= TU_CMD_DIRTY_STREAMOUT_BUFFERS;
2184 }
2185
2186 void tu_CmdBeginTransformFeedbackEXT(VkCommandBuffer commandBuffer,
2187 uint32_t firstCounterBuffer,
2188 uint32_t counterBufferCount,
2189 const VkBuffer *pCounterBuffers,
2190 const VkDeviceSize *pCounterBufferOffsets)
2191 {
2192 assert(firstCounterBuffer + counterBufferCount <= IR3_MAX_SO_BUFFERS);
2193 /* TODO do something with counter buffer? */
2194 }
2195
2196 void tu_CmdEndTransformFeedbackEXT(VkCommandBuffer commandBuffer,
2197 uint32_t firstCounterBuffer,
2198 uint32_t counterBufferCount,
2199 const VkBuffer *pCounterBuffers,
2200 const VkDeviceSize *pCounterBufferOffsets)
2201 {
2202 assert(firstCounterBuffer + counterBufferCount <= IR3_MAX_SO_BUFFERS);
2203 /* TODO do something with counter buffer? */
2204
2205 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2206 cmd->state.streamout_enabled = 0;
2207 }
2208
2209 void
2210 tu_CmdPushConstants(VkCommandBuffer commandBuffer,
2211 VkPipelineLayout layout,
2212 VkShaderStageFlags stageFlags,
2213 uint32_t offset,
2214 uint32_t size,
2215 const void *pValues)
2216 {
2217 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2218 memcpy((void*) cmd->push_constants + offset, pValues, size);
2219 cmd->state.dirty |= TU_CMD_DIRTY_PUSH_CONSTANTS;
2220 }
2221
2222 VkResult
2223 tu_EndCommandBuffer(VkCommandBuffer commandBuffer)
2224 {
2225 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
2226
2227 if (cmd_buffer->scratch_seqno) {
2228 tu_bo_list_add(&cmd_buffer->bo_list, &cmd_buffer->scratch_bo,
2229 MSM_SUBMIT_BO_WRITE);
2230 }
2231
2232 if (cmd_buffer->use_vsc_data) {
2233 tu_bo_list_add(&cmd_buffer->bo_list, &cmd_buffer->vsc_data,
2234 MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE);
2235 tu_bo_list_add(&cmd_buffer->bo_list, &cmd_buffer->vsc_data2,
2236 MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE);
2237 }
2238
2239 for (uint32_t i = 0; i < cmd_buffer->draw_cs.bo_count; i++) {
2240 tu_bo_list_add(&cmd_buffer->bo_list, cmd_buffer->draw_cs.bos[i],
2241 MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
2242 }
2243
2244 for (uint32_t i = 0; i < cmd_buffer->draw_epilogue_cs.bo_count; i++) {
2245 tu_bo_list_add(&cmd_buffer->bo_list, cmd_buffer->draw_epilogue_cs.bos[i],
2246 MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
2247 }
2248
2249 for (uint32_t i = 0; i < cmd_buffer->sub_cs.bo_count; i++) {
2250 tu_bo_list_add(&cmd_buffer->bo_list, cmd_buffer->sub_cs.bos[i],
2251 MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
2252 }
2253
2254 tu_cs_end(&cmd_buffer->cs);
2255 tu_cs_end(&cmd_buffer->draw_cs);
2256 tu_cs_end(&cmd_buffer->draw_epilogue_cs);
2257
2258 cmd_buffer->status = TU_CMD_BUFFER_STATUS_EXECUTABLE;
2259
2260 return cmd_buffer->record_result;
2261 }
2262
2263 void
2264 tu_CmdBindPipeline(VkCommandBuffer commandBuffer,
2265 VkPipelineBindPoint pipelineBindPoint,
2266 VkPipeline _pipeline)
2267 {
2268 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2269 TU_FROM_HANDLE(tu_pipeline, pipeline, _pipeline);
2270
2271 switch (pipelineBindPoint) {
2272 case VK_PIPELINE_BIND_POINT_GRAPHICS:
2273 cmd->state.pipeline = pipeline;
2274 cmd->state.dirty |= TU_CMD_DIRTY_PIPELINE;
2275 break;
2276 case VK_PIPELINE_BIND_POINT_COMPUTE:
2277 cmd->state.compute_pipeline = pipeline;
2278 cmd->state.dirty |= TU_CMD_DIRTY_COMPUTE_PIPELINE;
2279 break;
2280 default:
2281 unreachable("unrecognized pipeline bind point");
2282 break;
2283 }
2284
2285 tu_bo_list_add(&cmd->bo_list, &pipeline->program.binary_bo,
2286 MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
2287 for (uint32_t i = 0; i < pipeline->cs.bo_count; i++) {
2288 tu_bo_list_add(&cmd->bo_list, pipeline->cs.bos[i],
2289 MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
2290 }
2291 }
2292
2293 void
2294 tu_CmdSetViewport(VkCommandBuffer commandBuffer,
2295 uint32_t firstViewport,
2296 uint32_t viewportCount,
2297 const VkViewport *pViewports)
2298 {
2299 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2300 struct tu_cs *draw_cs = &cmd->draw_cs;
2301
2302 assert(firstViewport == 0 && viewportCount == 1);
2303 tu6_emit_viewport(draw_cs, pViewports);
2304
2305 tu_cs_sanity_check(draw_cs);
2306 }
2307
2308 void
2309 tu_CmdSetScissor(VkCommandBuffer commandBuffer,
2310 uint32_t firstScissor,
2311 uint32_t scissorCount,
2312 const VkRect2D *pScissors)
2313 {
2314 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2315 struct tu_cs *draw_cs = &cmd->draw_cs;
2316
2317 assert(firstScissor == 0 && scissorCount == 1);
2318 tu6_emit_scissor(draw_cs, pScissors);
2319
2320 tu_cs_sanity_check(draw_cs);
2321 }
2322
2323 void
2324 tu_CmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth)
2325 {
2326 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2327
2328 cmd->state.dynamic.line_width = lineWidth;
2329
2330 /* line width depends on VkPipelineRasterizationStateCreateInfo */
2331 cmd->state.dirty |= TU_CMD_DIRTY_DYNAMIC_LINE_WIDTH;
2332 }
2333
2334 void
2335 tu_CmdSetDepthBias(VkCommandBuffer commandBuffer,
2336 float depthBiasConstantFactor,
2337 float depthBiasClamp,
2338 float depthBiasSlopeFactor)
2339 {
2340 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2341 struct tu_cs *draw_cs = &cmd->draw_cs;
2342
2343 tu6_emit_depth_bias(draw_cs, depthBiasConstantFactor, depthBiasClamp,
2344 depthBiasSlopeFactor);
2345
2346 tu_cs_sanity_check(draw_cs);
2347 }
2348
2349 void
2350 tu_CmdSetBlendConstants(VkCommandBuffer commandBuffer,
2351 const float blendConstants[4])
2352 {
2353 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2354 struct tu_cs *draw_cs = &cmd->draw_cs;
2355
2356 tu6_emit_blend_constants(draw_cs, blendConstants);
2357
2358 tu_cs_sanity_check(draw_cs);
2359 }
2360
2361 void
2362 tu_CmdSetDepthBounds(VkCommandBuffer commandBuffer,
2363 float minDepthBounds,
2364 float maxDepthBounds)
2365 {
2366 }
2367
2368 void
2369 tu_CmdSetStencilCompareMask(VkCommandBuffer commandBuffer,
2370 VkStencilFaceFlags faceMask,
2371 uint32_t compareMask)
2372 {
2373 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2374
2375 if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
2376 cmd->state.dynamic.stencil_compare_mask.front = compareMask;
2377 if (faceMask & VK_STENCIL_FACE_BACK_BIT)
2378 cmd->state.dynamic.stencil_compare_mask.back = compareMask;
2379
2380 /* the front/back compare masks must be updated together */
2381 cmd->state.dirty |= TU_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK;
2382 }
2383
2384 void
2385 tu_CmdSetStencilWriteMask(VkCommandBuffer commandBuffer,
2386 VkStencilFaceFlags faceMask,
2387 uint32_t writeMask)
2388 {
2389 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2390
2391 if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
2392 cmd->state.dynamic.stencil_write_mask.front = writeMask;
2393 if (faceMask & VK_STENCIL_FACE_BACK_BIT)
2394 cmd->state.dynamic.stencil_write_mask.back = writeMask;
2395
2396 /* the front/back write masks must be updated together */
2397 cmd->state.dirty |= TU_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK;
2398 }
2399
2400 void
2401 tu_CmdSetStencilReference(VkCommandBuffer commandBuffer,
2402 VkStencilFaceFlags faceMask,
2403 uint32_t reference)
2404 {
2405 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2406
2407 if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
2408 cmd->state.dynamic.stencil_reference.front = reference;
2409 if (faceMask & VK_STENCIL_FACE_BACK_BIT)
2410 cmd->state.dynamic.stencil_reference.back = reference;
2411
2412 /* the front/back references must be updated together */
2413 cmd->state.dirty |= TU_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE;
2414 }
2415
2416 void
2417 tu_CmdExecuteCommands(VkCommandBuffer commandBuffer,
2418 uint32_t commandBufferCount,
2419 const VkCommandBuffer *pCmdBuffers)
2420 {
2421 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2422 VkResult result;
2423
2424 assert(commandBufferCount > 0);
2425
2426 for (uint32_t i = 0; i < commandBufferCount; i++) {
2427 TU_FROM_HANDLE(tu_cmd_buffer, secondary, pCmdBuffers[i]);
2428
2429 result = tu_bo_list_merge(&cmd->bo_list, &secondary->bo_list);
2430 if (result != VK_SUCCESS) {
2431 cmd->record_result = result;
2432 break;
2433 }
2434
2435 if (secondary->usage_flags &
2436 VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
2437 assert(tu_cs_is_empty(&secondary->cs));
2438
2439 result = tu_cs_add_entries(&cmd->draw_cs, &secondary->draw_cs);
2440 if (result != VK_SUCCESS) {
2441 cmd->record_result = result;
2442 break;
2443 }
2444
2445 result = tu_cs_add_entries(&cmd->draw_epilogue_cs,
2446 &secondary->draw_epilogue_cs);
2447 if (result != VK_SUCCESS) {
2448 cmd->record_result = result;
2449 break;
2450 }
2451 } else {
2452 assert(tu_cs_is_empty(&secondary->draw_cs));
2453 assert(tu_cs_is_empty(&secondary->draw_epilogue_cs));
2454
2455 for (uint32_t j = 0; j < secondary->cs.bo_count; j++) {
2456 tu_bo_list_add(&cmd->bo_list, secondary->cs.bos[j],
2457 MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
2458 }
2459
2460 tu_cs_emit_call(&cmd->cs, &secondary->cs);
2461 }
2462 }
2463 cmd->state.dirty = ~0u; /* TODO: set dirty only what needs to be */
2464 }
2465
2466 VkResult
2467 tu_CreateCommandPool(VkDevice _device,
2468 const VkCommandPoolCreateInfo *pCreateInfo,
2469 const VkAllocationCallbacks *pAllocator,
2470 VkCommandPool *pCmdPool)
2471 {
2472 TU_FROM_HANDLE(tu_device, device, _device);
2473 struct tu_cmd_pool *pool;
2474
2475 pool = vk_alloc2(&device->alloc, pAllocator, sizeof(*pool), 8,
2476 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2477 if (pool == NULL)
2478 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
2479
2480 if (pAllocator)
2481 pool->alloc = *pAllocator;
2482 else
2483 pool->alloc = device->alloc;
2484
2485 list_inithead(&pool->cmd_buffers);
2486 list_inithead(&pool->free_cmd_buffers);
2487
2488 pool->queue_family_index = pCreateInfo->queueFamilyIndex;
2489
2490 *pCmdPool = tu_cmd_pool_to_handle(pool);
2491
2492 return VK_SUCCESS;
2493 }
2494
2495 void
2496 tu_DestroyCommandPool(VkDevice _device,
2497 VkCommandPool commandPool,
2498 const VkAllocationCallbacks *pAllocator)
2499 {
2500 TU_FROM_HANDLE(tu_device, device, _device);
2501 TU_FROM_HANDLE(tu_cmd_pool, pool, commandPool);
2502
2503 if (!pool)
2504 return;
2505
2506 list_for_each_entry_safe(struct tu_cmd_buffer, cmd_buffer,
2507 &pool->cmd_buffers, pool_link)
2508 {
2509 tu_cmd_buffer_destroy(cmd_buffer);
2510 }
2511
2512 list_for_each_entry_safe(struct tu_cmd_buffer, cmd_buffer,
2513 &pool->free_cmd_buffers, pool_link)
2514 {
2515 tu_cmd_buffer_destroy(cmd_buffer);
2516 }
2517
2518 vk_free2(&device->alloc, pAllocator, pool);
2519 }
2520
2521 VkResult
2522 tu_ResetCommandPool(VkDevice device,
2523 VkCommandPool commandPool,
2524 VkCommandPoolResetFlags flags)
2525 {
2526 TU_FROM_HANDLE(tu_cmd_pool, pool, commandPool);
2527 VkResult result;
2528
2529 list_for_each_entry(struct tu_cmd_buffer, cmd_buffer, &pool->cmd_buffers,
2530 pool_link)
2531 {
2532 result = tu_reset_cmd_buffer(cmd_buffer);
2533 if (result != VK_SUCCESS)
2534 return result;
2535 }
2536
2537 return VK_SUCCESS;
2538 }
2539
2540 void
2541 tu_TrimCommandPool(VkDevice device,
2542 VkCommandPool commandPool,
2543 VkCommandPoolTrimFlags flags)
2544 {
2545 TU_FROM_HANDLE(tu_cmd_pool, pool, commandPool);
2546
2547 if (!pool)
2548 return;
2549
2550 list_for_each_entry_safe(struct tu_cmd_buffer, cmd_buffer,
2551 &pool->free_cmd_buffers, pool_link)
2552 {
2553 tu_cmd_buffer_destroy(cmd_buffer);
2554 }
2555 }
2556
2557 void
2558 tu_CmdBeginRenderPass(VkCommandBuffer commandBuffer,
2559 const VkRenderPassBeginInfo *pRenderPassBegin,
2560 VkSubpassContents contents)
2561 {
2562 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2563 TU_FROM_HANDLE(tu_render_pass, pass, pRenderPassBegin->renderPass);
2564 TU_FROM_HANDLE(tu_framebuffer, fb, pRenderPassBegin->framebuffer);
2565
2566 cmd->state.pass = pass;
2567 cmd->state.subpass = pass->subpasses;
2568 cmd->state.framebuffer = fb;
2569
2570 tu_cmd_update_tiling_config(cmd, &pRenderPassBegin->renderArea);
2571 tu_cmd_prepare_tile_store_ib(cmd);
2572
2573 tu_emit_load_clear(cmd, pRenderPassBegin);
2574
2575 tu6_emit_zs(cmd, cmd->state.subpass, &cmd->draw_cs);
2576 tu6_emit_mrt(cmd, cmd->state.subpass, &cmd->draw_cs);
2577 tu6_emit_msaa(cmd, cmd->state.subpass, &cmd->draw_cs);
2578 tu6_emit_render_cntl(cmd, cmd->state.subpass, &cmd->draw_cs, false);
2579
2580 /* note: use_hw_binning only checks tiling config */
2581 if (use_hw_binning(cmd))
2582 cmd->use_vsc_data = true;
2583
2584 for (uint32_t i = 0; i < fb->attachment_count; ++i) {
2585 const struct tu_image_view *iview = fb->attachments[i].attachment;
2586 tu_bo_list_add(&cmd->bo_list, iview->image->bo,
2587 MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE);
2588 }
2589 }
2590
2591 void
2592 tu_CmdBeginRenderPass2(VkCommandBuffer commandBuffer,
2593 const VkRenderPassBeginInfo *pRenderPassBeginInfo,
2594 const VkSubpassBeginInfoKHR *pSubpassBeginInfo)
2595 {
2596 tu_CmdBeginRenderPass(commandBuffer, pRenderPassBeginInfo,
2597 pSubpassBeginInfo->contents);
2598 }
2599
2600 void
2601 tu_CmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents)
2602 {
2603 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2604 const struct tu_render_pass *pass = cmd->state.pass;
2605 struct tu_cs *cs = &cmd->draw_cs;
2606
2607 const struct tu_subpass *subpass = cmd->state.subpass++;
2608 /* TODO:
2609 * if msaa samples change between subpasses,
2610 * attachment store is broken for some attachments
2611 */
2612 if (subpass->resolve_attachments) {
2613 tu6_emit_blit_scissor(cmd, cs, true);
2614 for (unsigned i = 0; i < subpass->color_count; i++) {
2615 uint32_t a = subpass->resolve_attachments[i].attachment;
2616 if (a != VK_ATTACHMENT_UNUSED) {
2617 tu6_emit_resolve(cmd, cs, a,
2618 subpass->color_attachments[i].attachment);
2619 }
2620 }
2621 }
2622
2623 /* invalidate because reading input attachments will cache GMEM and
2624 * the cache isn''t updated when GMEM is written
2625 * TODO: is there a no-cache bit for textures?
2626 */
2627 if (cmd->state.subpass->input_count)
2628 tu6_emit_event_write(cmd, cs, CACHE_INVALIDATE, false);
2629
2630 /* emit mrt/zs/msaa/ubwc state for the subpass that is starting */
2631 tu6_emit_zs(cmd, cmd->state.subpass, cs);
2632 tu6_emit_mrt(cmd, cmd->state.subpass, cs);
2633 tu6_emit_msaa(cmd, cmd->state.subpass, cs);
2634 tu6_emit_render_cntl(cmd, cmd->state.subpass, cs, false);
2635
2636 /* Emit flushes so that input attachments will read the correct value. This
2637 * is for sysmem only, although it shouldn't do much harm on gmem.
2638 */
2639 tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_COLOR_TS, true);
2640 tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_DEPTH_TS, true);
2641
2642 /* TODO:
2643 * since we don't know how to do GMEM->GMEM resolve,
2644 * resolve attachments are resolved to memory then loaded to GMEM again if needed
2645 */
2646 if (subpass->resolve_attachments) {
2647 for (unsigned i = 0; i < subpass->color_count; i++) {
2648 uint32_t a = subpass->resolve_attachments[i].attachment;
2649 if (a != VK_ATTACHMENT_UNUSED && pass->attachments[a].gmem_offset >= 0) {
2650 tu_finishme("missing GMEM->GMEM resolve, performance will suffer\n");
2651 tu6_emit_predicated_blit(cmd, cs, a, a, false);
2652 }
2653 }
2654 }
2655 }
2656
2657 void
2658 tu_CmdNextSubpass2(VkCommandBuffer commandBuffer,
2659 const VkSubpassBeginInfoKHR *pSubpassBeginInfo,
2660 const VkSubpassEndInfoKHR *pSubpassEndInfo)
2661 {
2662 tu_CmdNextSubpass(commandBuffer, pSubpassBeginInfo->contents);
2663 }
2664
2665 struct tu_draw_info
2666 {
2667 /**
2668 * Number of vertices.
2669 */
2670 uint32_t count;
2671
2672 /**
2673 * Index of the first vertex.
2674 */
2675 int32_t vertex_offset;
2676
2677 /**
2678 * First instance id.
2679 */
2680 uint32_t first_instance;
2681
2682 /**
2683 * Number of instances.
2684 */
2685 uint32_t instance_count;
2686
2687 /**
2688 * First index (indexed draws only).
2689 */
2690 uint32_t first_index;
2691
2692 /**
2693 * Whether it's an indexed draw.
2694 */
2695 bool indexed;
2696
2697 /**
2698 * Indirect draw parameters resource.
2699 */
2700 struct tu_buffer *indirect;
2701 uint64_t indirect_offset;
2702 uint32_t stride;
2703
2704 /**
2705 * Draw count parameters resource.
2706 */
2707 struct tu_buffer *count_buffer;
2708 uint64_t count_buffer_offset;
2709 };
2710
2711 #define ENABLE_ALL (CP_SET_DRAW_STATE__0_BINNING | CP_SET_DRAW_STATE__0_GMEM | CP_SET_DRAW_STATE__0_SYSMEM)
2712 #define ENABLE_DRAW (CP_SET_DRAW_STATE__0_GMEM | CP_SET_DRAW_STATE__0_SYSMEM)
2713
2714 enum tu_draw_state_group_id
2715 {
2716 TU_DRAW_STATE_PROGRAM,
2717 TU_DRAW_STATE_PROGRAM_BINNING,
2718 TU_DRAW_STATE_VI,
2719 TU_DRAW_STATE_VI_BINNING,
2720 TU_DRAW_STATE_VP,
2721 TU_DRAW_STATE_RAST,
2722 TU_DRAW_STATE_DS,
2723 TU_DRAW_STATE_BLEND,
2724 TU_DRAW_STATE_VS_CONST,
2725 TU_DRAW_STATE_FS_CONST,
2726 TU_DRAW_STATE_VS_TEX,
2727 TU_DRAW_STATE_FS_TEX_SYSMEM,
2728 TU_DRAW_STATE_FS_TEX_GMEM,
2729 TU_DRAW_STATE_FS_IBO,
2730 TU_DRAW_STATE_VS_PARAMS,
2731
2732 TU_DRAW_STATE_COUNT,
2733 };
2734
2735 struct tu_draw_state_group
2736 {
2737 enum tu_draw_state_group_id id;
2738 uint32_t enable_mask;
2739 struct tu_cs_entry ib;
2740 };
2741
2742 const static struct tu_sampler*
2743 sampler_ptr(struct tu_descriptor_state *descriptors_state,
2744 const struct tu_descriptor_map *map, unsigned i,
2745 unsigned array_index)
2746 {
2747 assert(descriptors_state->valid & (1 << map->set[i]));
2748
2749 struct tu_descriptor_set *set = descriptors_state->sets[map->set[i]];
2750 assert(map->binding[i] < set->layout->binding_count);
2751
2752 const struct tu_descriptor_set_binding_layout *layout =
2753 &set->layout->binding[map->binding[i]];
2754
2755 if (layout->immutable_samplers_offset) {
2756 const struct tu_sampler *immutable_samplers =
2757 tu_immutable_samplers(set->layout, layout);
2758
2759 return &immutable_samplers[array_index];
2760 }
2761
2762 switch (layout->type) {
2763 case VK_DESCRIPTOR_TYPE_SAMPLER:
2764 return (struct tu_sampler*) &set->mapped_ptr[layout->offset / 4];
2765 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
2766 return (struct tu_sampler*) &set->mapped_ptr[layout->offset / 4 + A6XX_TEX_CONST_DWORDS +
2767 array_index *
2768 (A6XX_TEX_CONST_DWORDS +
2769 sizeof(struct tu_sampler) / 4)];
2770 default:
2771 unreachable("unimplemented descriptor type");
2772 break;
2773 }
2774 }
2775
2776 static void
2777 write_tex_const(struct tu_cmd_buffer *cmd,
2778 uint32_t *dst,
2779 struct tu_descriptor_state *descriptors_state,
2780 const struct tu_descriptor_map *map,
2781 unsigned i, unsigned array_index, bool is_sysmem)
2782 {
2783 assert(descriptors_state->valid & (1 << map->set[i]));
2784
2785 struct tu_descriptor_set *set = descriptors_state->sets[map->set[i]];
2786 assert(map->binding[i] < set->layout->binding_count);
2787
2788 const struct tu_descriptor_set_binding_layout *layout =
2789 &set->layout->binding[map->binding[i]];
2790
2791 switch (layout->type) {
2792 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
2793 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
2794 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
2795 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
2796 memcpy(dst, &set->mapped_ptr[layout->offset / 4 +
2797 array_index * A6XX_TEX_CONST_DWORDS],
2798 A6XX_TEX_CONST_DWORDS * 4);
2799 break;
2800 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
2801 memcpy(dst, &set->mapped_ptr[layout->offset / 4 +
2802 array_index *
2803 (A6XX_TEX_CONST_DWORDS +
2804 sizeof(struct tu_sampler) / 4)],
2805 A6XX_TEX_CONST_DWORDS * 4);
2806 break;
2807 default:
2808 unreachable("unimplemented descriptor type");
2809 break;
2810 }
2811
2812 if (layout->type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT && !is_sysmem) {
2813 const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
2814 uint32_t a = cmd->state.subpass->input_attachments[map->value[i] +
2815 array_index].attachment;
2816 const struct tu_render_pass_attachment *att = &cmd->state.pass->attachments[a];
2817
2818 assert(att->gmem_offset >= 0);
2819
2820 dst[0] &= ~(A6XX_TEX_CONST_0_SWAP__MASK | A6XX_TEX_CONST_0_TILE_MODE__MASK);
2821 dst[0] |= A6XX_TEX_CONST_0_TILE_MODE(TILE6_2);
2822 dst[2] &= ~(A6XX_TEX_CONST_2_TYPE__MASK | A6XX_TEX_CONST_2_PITCH__MASK);
2823 dst[2] |=
2824 A6XX_TEX_CONST_2_TYPE(A6XX_TEX_2D) |
2825 A6XX_TEX_CONST_2_PITCH(tiling->tile0.extent.width * att->cpp);
2826 dst[3] = 0;
2827 dst[4] = cmd->device->physical_device->gmem_base + att->gmem_offset;
2828 dst[5] = A6XX_TEX_CONST_5_DEPTH(1);
2829 for (unsigned i = 6; i < A6XX_TEX_CONST_DWORDS; i++)
2830 dst[i] = 0;
2831
2832 if (cmd->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY)
2833 tu_finishme("patch input attachment pitch for secondary cmd buffer");
2834 }
2835 }
2836
2837 static void
2838 write_image_ibo(struct tu_cmd_buffer *cmd,
2839 uint32_t *dst,
2840 struct tu_descriptor_state *descriptors_state,
2841 const struct tu_descriptor_map *map,
2842 unsigned i, unsigned array_index)
2843 {
2844 assert(descriptors_state->valid & (1 << map->set[i]));
2845
2846 struct tu_descriptor_set *set = descriptors_state->sets[map->set[i]];
2847 assert(map->binding[i] < set->layout->binding_count);
2848
2849 const struct tu_descriptor_set_binding_layout *layout =
2850 &set->layout->binding[map->binding[i]];
2851
2852 assert(layout->type == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE);
2853
2854 memcpy(dst, &set->mapped_ptr[layout->offset / 4 +
2855 (array_index * 2 + 1) * A6XX_TEX_CONST_DWORDS],
2856 A6XX_TEX_CONST_DWORDS * 4);
2857 }
2858
2859 static uint64_t
2860 buffer_ptr(struct tu_descriptor_state *descriptors_state,
2861 const struct tu_descriptor_map *map,
2862 unsigned i, unsigned array_index)
2863 {
2864 assert(descriptors_state->valid & (1 << map->set[i]));
2865
2866 struct tu_descriptor_set *set = descriptors_state->sets[map->set[i]];
2867 assert(map->binding[i] < set->layout->binding_count);
2868
2869 const struct tu_descriptor_set_binding_layout *layout =
2870 &set->layout->binding[map->binding[i]];
2871
2872 switch (layout->type) {
2873 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
2874 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
2875 return descriptors_state->dynamic_buffers[layout->dynamic_offset_offset +
2876 array_index];
2877 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
2878 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
2879 return (uint64_t) set->mapped_ptr[layout->offset / 4 + array_index * 2 + 1] << 32 |
2880 set->mapped_ptr[layout->offset / 4 + array_index * 2];
2881 default:
2882 unreachable("unimplemented descriptor type");
2883 break;
2884 }
2885 }
2886
2887 static inline uint32_t
2888 tu6_stage2opcode(gl_shader_stage type)
2889 {
2890 switch (type) {
2891 case MESA_SHADER_VERTEX:
2892 case MESA_SHADER_TESS_CTRL:
2893 case MESA_SHADER_TESS_EVAL:
2894 case MESA_SHADER_GEOMETRY:
2895 return CP_LOAD_STATE6_GEOM;
2896 case MESA_SHADER_FRAGMENT:
2897 case MESA_SHADER_COMPUTE:
2898 case MESA_SHADER_KERNEL:
2899 return CP_LOAD_STATE6_FRAG;
2900 default:
2901 unreachable("bad shader type");
2902 }
2903 }
2904
2905 static inline enum a6xx_state_block
2906 tu6_stage2shadersb(gl_shader_stage type)
2907 {
2908 switch (type) {
2909 case MESA_SHADER_VERTEX:
2910 return SB6_VS_SHADER;
2911 case MESA_SHADER_FRAGMENT:
2912 return SB6_FS_SHADER;
2913 case MESA_SHADER_COMPUTE:
2914 case MESA_SHADER_KERNEL:
2915 return SB6_CS_SHADER;
2916 default:
2917 unreachable("bad shader type");
2918 return ~0;
2919 }
2920 }
2921
2922 static void
2923 tu6_emit_user_consts(struct tu_cs *cs, const struct tu_pipeline *pipeline,
2924 struct tu_descriptor_state *descriptors_state,
2925 gl_shader_stage type,
2926 uint32_t *push_constants)
2927 {
2928 const struct tu_program_descriptor_linkage *link =
2929 &pipeline->program.link[type];
2930 const struct ir3_ubo_analysis_state *state = &link->ubo_state;
2931
2932 for (uint32_t i = 0; i < ARRAY_SIZE(state->range); i++) {
2933 if (state->range[i].start < state->range[i].end) {
2934 uint32_t size = state->range[i].end - state->range[i].start;
2935 uint32_t offset = state->range[i].start;
2936
2937 /* and even if the start of the const buffer is before
2938 * first_immediate, the end may not be:
2939 */
2940 size = MIN2(size, (16 * link->constlen) - state->range[i].offset);
2941
2942 if (size == 0)
2943 continue;
2944
2945 /* things should be aligned to vec4: */
2946 debug_assert((state->range[i].offset % 16) == 0);
2947 debug_assert((size % 16) == 0);
2948 debug_assert((offset % 16) == 0);
2949
2950 if (i == 0) {
2951 /* push constants */
2952 tu_cs_emit_pkt7(cs, tu6_stage2opcode(type), 3 + (size / 4));
2953 tu_cs_emit(cs, CP_LOAD_STATE6_0_DST_OFF(state->range[i].offset / 16) |
2954 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
2955 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
2956 CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type)) |
2957 CP_LOAD_STATE6_0_NUM_UNIT(size / 16));
2958 tu_cs_emit(cs, 0);
2959 tu_cs_emit(cs, 0);
2960 for (unsigned i = 0; i < size / 4; i++)
2961 tu_cs_emit(cs, push_constants[i + offset / 4]);
2962 continue;
2963 }
2964
2965 /* Look through the UBO map to find our UBO index, and get the VA for
2966 * that UBO.
2967 */
2968 uint64_t va = 0;
2969 uint32_t ubo_idx = i - 1;
2970 uint32_t ubo_map_base = 0;
2971 for (int j = 0; j < link->ubo_map.num; j++) {
2972 if (ubo_idx >= ubo_map_base &&
2973 ubo_idx < ubo_map_base + link->ubo_map.array_size[j]) {
2974 va = buffer_ptr(descriptors_state, &link->ubo_map, j,
2975 ubo_idx - ubo_map_base);
2976 break;
2977 }
2978 ubo_map_base += link->ubo_map.array_size[j];
2979 }
2980 assert(va);
2981
2982 tu_cs_emit_pkt7(cs, tu6_stage2opcode(type), 3);
2983 tu_cs_emit(cs, CP_LOAD_STATE6_0_DST_OFF(state->range[i].offset / 16) |
2984 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
2985 CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT) |
2986 CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type)) |
2987 CP_LOAD_STATE6_0_NUM_UNIT(size / 16));
2988 tu_cs_emit_qw(cs, va + offset);
2989 }
2990 }
2991 }
2992
2993 static void
2994 tu6_emit_ubos(struct tu_cs *cs, const struct tu_pipeline *pipeline,
2995 struct tu_descriptor_state *descriptors_state,
2996 gl_shader_stage type)
2997 {
2998 const struct tu_program_descriptor_linkage *link =
2999 &pipeline->program.link[type];
3000
3001 uint32_t num = MIN2(link->ubo_map.num_desc, link->const_state.num_ubos);
3002 uint32_t anum = align(num, 2);
3003
3004 if (!num)
3005 return;
3006
3007 tu_cs_emit_pkt7(cs, tu6_stage2opcode(type), 3 + (2 * anum));
3008 tu_cs_emit(cs, CP_LOAD_STATE6_0_DST_OFF(link->const_state.offsets.ubo) |
3009 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
3010 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
3011 CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type)) |
3012 CP_LOAD_STATE6_0_NUM_UNIT(anum/2));
3013 tu_cs_emit(cs, CP_LOAD_STATE6_1_EXT_SRC_ADDR(0));
3014 tu_cs_emit(cs, CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI(0));
3015
3016 unsigned emitted = 0;
3017 for (unsigned i = 0; emitted < num && i < link->ubo_map.num; i++) {
3018 for (unsigned j = 0; emitted < num && j < link->ubo_map.array_size[i]; j++) {
3019 tu_cs_emit_qw(cs, buffer_ptr(descriptors_state, &link->ubo_map, i, j));
3020 emitted++;
3021 }
3022 }
3023
3024 for (; emitted < anum; emitted++) {
3025 tu_cs_emit(cs, 0xffffffff);
3026 tu_cs_emit(cs, 0xffffffff);
3027 }
3028 }
3029
3030 static struct tu_cs_entry
3031 tu6_emit_consts(struct tu_cmd_buffer *cmd,
3032 const struct tu_pipeline *pipeline,
3033 struct tu_descriptor_state *descriptors_state,
3034 gl_shader_stage type)
3035 {
3036 struct tu_cs cs;
3037 tu_cs_begin_sub_stream(&cmd->sub_cs, 512, &cs); /* TODO: maximum size? */
3038
3039 tu6_emit_user_consts(&cs, pipeline, descriptors_state, type, cmd->push_constants);
3040 tu6_emit_ubos(&cs, pipeline, descriptors_state, type);
3041
3042 return tu_cs_end_sub_stream(&cmd->sub_cs, &cs);
3043 }
3044
3045 static VkResult
3046 tu6_emit_vs_params(struct tu_cmd_buffer *cmd,
3047 const struct tu_draw_info *draw,
3048 struct tu_cs_entry *entry)
3049 {
3050 /* TODO: fill out more than just base instance */
3051 const struct tu_program_descriptor_linkage *link =
3052 &cmd->state.pipeline->program.link[MESA_SHADER_VERTEX];
3053 const struct ir3_const_state *const_state = &link->const_state;
3054 struct tu_cs cs;
3055
3056 if (const_state->offsets.driver_param >= link->constlen) {
3057 *entry = (struct tu_cs_entry) {};
3058 return VK_SUCCESS;
3059 }
3060
3061 VkResult result = tu_cs_begin_sub_stream(&cmd->sub_cs, 8, &cs);
3062 if (result != VK_SUCCESS)
3063 return result;
3064
3065 tu_cs_emit_pkt7(&cs, CP_LOAD_STATE6_GEOM, 3 + 4);
3066 tu_cs_emit(&cs, CP_LOAD_STATE6_0_DST_OFF(const_state->offsets.driver_param) |
3067 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
3068 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
3069 CP_LOAD_STATE6_0_STATE_BLOCK(SB6_VS_SHADER) |
3070 CP_LOAD_STATE6_0_NUM_UNIT(1));
3071 tu_cs_emit(&cs, 0);
3072 tu_cs_emit(&cs, 0);
3073
3074 STATIC_ASSERT(IR3_DP_INSTID_BASE == 2);
3075
3076 tu_cs_emit(&cs, 0);
3077 tu_cs_emit(&cs, 0);
3078 tu_cs_emit(&cs, draw->first_instance);
3079 tu_cs_emit(&cs, 0);
3080
3081 *entry = tu_cs_end_sub_stream(&cmd->sub_cs, &cs);
3082 return VK_SUCCESS;
3083 }
3084
3085 static VkResult
3086 tu6_emit_textures(struct tu_cmd_buffer *cmd,
3087 const struct tu_pipeline *pipeline,
3088 struct tu_descriptor_state *descriptors_state,
3089 gl_shader_stage type,
3090 struct tu_cs_entry *entry,
3091 bool *needs_border,
3092 bool is_sysmem)
3093 {
3094 struct tu_cs *draw_state = &cmd->sub_cs;
3095 const struct tu_program_descriptor_linkage *link =
3096 &pipeline->program.link[type];
3097 VkResult result;
3098
3099 if (link->texture_map.num_desc == 0 && link->sampler_map.num_desc == 0) {
3100 *entry = (struct tu_cs_entry) {};
3101 return VK_SUCCESS;
3102 }
3103
3104 /* allocate and fill texture state */
3105 struct ts_cs_memory tex_const;
3106 result = tu_cs_alloc(draw_state, link->texture_map.num_desc,
3107 A6XX_TEX_CONST_DWORDS, &tex_const);
3108 if (result != VK_SUCCESS)
3109 return result;
3110
3111 int tex_index = 0;
3112 for (unsigned i = 0; i < link->texture_map.num; i++) {
3113 for (int j = 0; j < link->texture_map.array_size[i]; j++) {
3114 write_tex_const(cmd,
3115 &tex_const.map[A6XX_TEX_CONST_DWORDS * tex_index++],
3116 descriptors_state, &link->texture_map, i, j,
3117 is_sysmem);
3118 }
3119 }
3120
3121 /* allocate and fill sampler state */
3122 struct ts_cs_memory tex_samp = { 0 };
3123 if (link->sampler_map.num_desc) {
3124 result = tu_cs_alloc(draw_state, link->sampler_map.num_desc,
3125 A6XX_TEX_SAMP_DWORDS, &tex_samp);
3126 if (result != VK_SUCCESS)
3127 return result;
3128
3129 int sampler_index = 0;
3130 for (unsigned i = 0; i < link->sampler_map.num; i++) {
3131 for (int j = 0; j < link->sampler_map.array_size[i]; j++) {
3132 const struct tu_sampler *sampler = sampler_ptr(descriptors_state,
3133 &link->sampler_map,
3134 i, j);
3135 memcpy(&tex_samp.map[A6XX_TEX_SAMP_DWORDS * sampler_index++],
3136 sampler->state, sizeof(sampler->state));
3137 *needs_border |= sampler->needs_border;
3138 }
3139 }
3140 }
3141
3142 unsigned tex_samp_reg, tex_const_reg, tex_count_reg;
3143 enum a6xx_state_block sb;
3144
3145 switch (type) {
3146 case MESA_SHADER_VERTEX:
3147 sb = SB6_VS_TEX;
3148 tex_samp_reg = REG_A6XX_SP_VS_TEX_SAMP_LO;
3149 tex_const_reg = REG_A6XX_SP_VS_TEX_CONST_LO;
3150 tex_count_reg = REG_A6XX_SP_VS_TEX_COUNT;
3151 break;
3152 case MESA_SHADER_FRAGMENT:
3153 sb = SB6_FS_TEX;
3154 tex_samp_reg = REG_A6XX_SP_FS_TEX_SAMP_LO;
3155 tex_const_reg = REG_A6XX_SP_FS_TEX_CONST_LO;
3156 tex_count_reg = REG_A6XX_SP_FS_TEX_COUNT;
3157 break;
3158 case MESA_SHADER_COMPUTE:
3159 sb = SB6_CS_TEX;
3160 tex_samp_reg = REG_A6XX_SP_CS_TEX_SAMP_LO;
3161 tex_const_reg = REG_A6XX_SP_CS_TEX_CONST_LO;
3162 tex_count_reg = REG_A6XX_SP_CS_TEX_COUNT;
3163 break;
3164 default:
3165 unreachable("bad state block");
3166 }
3167
3168 struct tu_cs cs;
3169 result = tu_cs_begin_sub_stream(draw_state, 16, &cs);
3170 if (result != VK_SUCCESS)
3171 return result;
3172
3173 if (link->sampler_map.num_desc) {
3174 /* output sampler state: */
3175 tu_cs_emit_pkt7(&cs, tu6_stage2opcode(type), 3);
3176 tu_cs_emit(&cs, CP_LOAD_STATE6_0_DST_OFF(0) |
3177 CP_LOAD_STATE6_0_STATE_TYPE(ST6_SHADER) |
3178 CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT) |
3179 CP_LOAD_STATE6_0_STATE_BLOCK(sb) |
3180 CP_LOAD_STATE6_0_NUM_UNIT(link->sampler_map.num_desc));
3181 tu_cs_emit_qw(&cs, tex_samp.iova); /* SRC_ADDR_LO/HI */
3182
3183 tu_cs_emit_pkt4(&cs, tex_samp_reg, 2);
3184 tu_cs_emit_qw(&cs, tex_samp.iova); /* SRC_ADDR_LO/HI */
3185 }
3186
3187 /* emit texture state: */
3188 tu_cs_emit_pkt7(&cs, tu6_stage2opcode(type), 3);
3189 tu_cs_emit(&cs, CP_LOAD_STATE6_0_DST_OFF(0) |
3190 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
3191 CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT) |
3192 CP_LOAD_STATE6_0_STATE_BLOCK(sb) |
3193 CP_LOAD_STATE6_0_NUM_UNIT(link->texture_map.num_desc));
3194 tu_cs_emit_qw(&cs, tex_const.iova); /* SRC_ADDR_LO/HI */
3195
3196 tu_cs_emit_pkt4(&cs, tex_const_reg, 2);
3197 tu_cs_emit_qw(&cs, tex_const.iova); /* SRC_ADDR_LO/HI */
3198
3199 tu_cs_emit_pkt4(&cs, tex_count_reg, 1);
3200 tu_cs_emit(&cs, link->texture_map.num_desc);
3201
3202 *entry = tu_cs_end_sub_stream(draw_state, &cs);
3203 return VK_SUCCESS;
3204 }
3205
3206 static VkResult
3207 tu6_emit_ibo(struct tu_cmd_buffer *cmd,
3208 const struct tu_pipeline *pipeline,
3209 struct tu_descriptor_state *descriptors_state,
3210 gl_shader_stage type,
3211 struct tu_cs_entry *entry)
3212 {
3213 struct tu_cs *draw_state = &cmd->sub_cs;
3214 const struct tu_program_descriptor_linkage *link =
3215 &pipeline->program.link[type];
3216 VkResult result;
3217
3218 unsigned num_desc = link->ssbo_map.num_desc + link->image_map.num_desc;
3219
3220 if (num_desc == 0) {
3221 *entry = (struct tu_cs_entry) {};
3222 return VK_SUCCESS;
3223 }
3224
3225 struct ts_cs_memory ibo_const;
3226 result = tu_cs_alloc(draw_state, num_desc,
3227 A6XX_TEX_CONST_DWORDS, &ibo_const);
3228 if (result != VK_SUCCESS)
3229 return result;
3230
3231 int ssbo_index = 0;
3232 for (unsigned i = 0; i < link->ssbo_map.num; i++) {
3233 for (int j = 0; j < link->ssbo_map.array_size[i]; j++) {
3234 uint32_t *dst = &ibo_const.map[A6XX_TEX_CONST_DWORDS * ssbo_index];
3235
3236 uint64_t va = buffer_ptr(descriptors_state, &link->ssbo_map, i, j);
3237 /* We don't expose robustBufferAccess, so leave the size unlimited. */
3238 uint32_t sz = MAX_STORAGE_BUFFER_RANGE / 4;
3239
3240 dst[0] = A6XX_IBO_0_FMT(FMT6_32_UINT);
3241 dst[1] = A6XX_IBO_1_WIDTH(sz & MASK(15)) |
3242 A6XX_IBO_1_HEIGHT(sz >> 15);
3243 dst[2] = A6XX_IBO_2_UNK4 |
3244 A6XX_IBO_2_UNK31 |
3245 A6XX_IBO_2_TYPE(A6XX_TEX_1D);
3246 dst[3] = 0;
3247 dst[4] = va;
3248 dst[5] = va >> 32;
3249 for (int i = 6; i < A6XX_TEX_CONST_DWORDS; i++)
3250 dst[i] = 0;
3251
3252 ssbo_index++;
3253 }
3254 }
3255
3256 for (unsigned i = 0; i < link->image_map.num; i++) {
3257 for (int j = 0; j < link->image_map.array_size[i]; j++) {
3258 uint32_t *dst = &ibo_const.map[A6XX_TEX_CONST_DWORDS * ssbo_index];
3259
3260 write_image_ibo(cmd, dst,
3261 descriptors_state, &link->image_map, i, j);
3262
3263 ssbo_index++;
3264 }
3265 }
3266
3267 assert(ssbo_index == num_desc);
3268
3269 struct tu_cs cs;
3270 result = tu_cs_begin_sub_stream(draw_state, 7, &cs);
3271 if (result != VK_SUCCESS)
3272 return result;
3273
3274 uint32_t opcode, ibo_addr_reg;
3275 enum a6xx_state_block sb;
3276 enum a6xx_state_type st;
3277
3278 switch (type) {
3279 case MESA_SHADER_FRAGMENT:
3280 opcode = CP_LOAD_STATE6;
3281 st = ST6_SHADER;
3282 sb = SB6_IBO;
3283 ibo_addr_reg = REG_A6XX_SP_IBO_LO;
3284 break;
3285 case MESA_SHADER_COMPUTE:
3286 opcode = CP_LOAD_STATE6_FRAG;
3287 st = ST6_IBO;
3288 sb = SB6_CS_SHADER;
3289 ibo_addr_reg = REG_A6XX_SP_CS_IBO_LO;
3290 break;
3291 default:
3292 unreachable("unsupported stage for ibos");
3293 }
3294
3295 /* emit texture state: */
3296 tu_cs_emit_pkt7(&cs, opcode, 3);
3297 tu_cs_emit(&cs, CP_LOAD_STATE6_0_DST_OFF(0) |
3298 CP_LOAD_STATE6_0_STATE_TYPE(st) |
3299 CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT) |
3300 CP_LOAD_STATE6_0_STATE_BLOCK(sb) |
3301 CP_LOAD_STATE6_0_NUM_UNIT(num_desc));
3302 tu_cs_emit_qw(&cs, ibo_const.iova); /* SRC_ADDR_LO/HI */
3303
3304 tu_cs_emit_pkt4(&cs, ibo_addr_reg, 2);
3305 tu_cs_emit_qw(&cs, ibo_const.iova); /* SRC_ADDR_LO/HI */
3306
3307 *entry = tu_cs_end_sub_stream(draw_state, &cs);
3308 return VK_SUCCESS;
3309 }
3310
3311 struct PACKED bcolor_entry {
3312 uint32_t fp32[4];
3313 uint16_t ui16[4];
3314 int16_t si16[4];
3315 uint16_t fp16[4];
3316 uint16_t rgb565;
3317 uint16_t rgb5a1;
3318 uint16_t rgba4;
3319 uint8_t __pad0[2];
3320 uint8_t ui8[4];
3321 int8_t si8[4];
3322 uint32_t rgb10a2;
3323 uint32_t z24; /* also s8? */
3324 uint16_t srgb[4]; /* appears to duplicate fp16[], but clamped, used for srgb */
3325 uint8_t __pad1[56];
3326 } border_color[] = {
3327 [VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK] = {},
3328 [VK_BORDER_COLOR_INT_TRANSPARENT_BLACK] = {},
3329 [VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK] = {
3330 .fp32[3] = 0x3f800000,
3331 .ui16[3] = 0xffff,
3332 .si16[3] = 0x7fff,
3333 .fp16[3] = 0x3c00,
3334 .rgb5a1 = 0x8000,
3335 .rgba4 = 0xf000,
3336 .ui8[3] = 0xff,
3337 .si8[3] = 0x7f,
3338 .rgb10a2 = 0xc0000000,
3339 .srgb[3] = 0x3c00,
3340 },
3341 [VK_BORDER_COLOR_INT_OPAQUE_BLACK] = {
3342 .fp32[3] = 1,
3343 .fp16[3] = 1,
3344 },
3345 [VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE] = {
3346 .fp32[0 ... 3] = 0x3f800000,
3347 .ui16[0 ... 3] = 0xffff,
3348 .si16[0 ... 3] = 0x7fff,
3349 .fp16[0 ... 3] = 0x3c00,
3350 .rgb565 = 0xffff,
3351 .rgb5a1 = 0xffff,
3352 .rgba4 = 0xffff,
3353 .ui8[0 ... 3] = 0xff,
3354 .si8[0 ... 3] = 0x7f,
3355 .rgb10a2 = 0xffffffff,
3356 .z24 = 0xffffff,
3357 .srgb[0 ... 3] = 0x3c00,
3358 },
3359 [VK_BORDER_COLOR_INT_OPAQUE_WHITE] = {
3360 .fp32[0 ... 3] = 1,
3361 .fp16[0 ... 3] = 1,
3362 },
3363 };
3364
3365 static VkResult
3366 tu6_emit_border_color(struct tu_cmd_buffer *cmd,
3367 struct tu_cs *cs)
3368 {
3369 STATIC_ASSERT(sizeof(struct bcolor_entry) == 128);
3370
3371 const struct tu_pipeline *pipeline = cmd->state.pipeline;
3372 struct tu_descriptor_state *descriptors_state =
3373 &cmd->descriptors[VK_PIPELINE_BIND_POINT_GRAPHICS];
3374 const struct tu_descriptor_map *vs_sampler =
3375 &pipeline->program.link[MESA_SHADER_VERTEX].sampler_map;
3376 const struct tu_descriptor_map *fs_sampler =
3377 &pipeline->program.link[MESA_SHADER_FRAGMENT].sampler_map;
3378 struct ts_cs_memory ptr;
3379
3380 VkResult result = tu_cs_alloc(&cmd->sub_cs,
3381 vs_sampler->num_desc + fs_sampler->num_desc,
3382 128 / 4,
3383 &ptr);
3384 if (result != VK_SUCCESS)
3385 return result;
3386
3387 for (unsigned i = 0; i < vs_sampler->num; i++) {
3388 for (unsigned j = 0; j < vs_sampler->array_size[i]; j++) {
3389 const struct tu_sampler *sampler = sampler_ptr(descriptors_state,
3390 vs_sampler, i, j);
3391 memcpy(ptr.map, &border_color[sampler->border], 128);
3392 ptr.map += 128 / 4;
3393 }
3394 }
3395
3396 for (unsigned i = 0; i < fs_sampler->num; i++) {
3397 for (unsigned j = 0; j < fs_sampler->array_size[i]; j++) {
3398 const struct tu_sampler *sampler = sampler_ptr(descriptors_state,
3399 fs_sampler, i, j);
3400 memcpy(ptr.map, &border_color[sampler->border], 128);
3401 ptr.map += 128 / 4;
3402 }
3403 }
3404
3405 tu_cs_emit_pkt4(cs, REG_A6XX_SP_TP_BORDER_COLOR_BASE_ADDR_LO, 2);
3406 tu_cs_emit_qw(cs, ptr.iova);
3407 return VK_SUCCESS;
3408 }
3409
3410 static void
3411 tu6_emit_streamout(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
3412 {
3413 struct tu_streamout_state *tf = &cmd->state.pipeline->streamout;
3414
3415 for (unsigned i = 0; i < IR3_MAX_SO_BUFFERS; i++) {
3416 struct tu_buffer *buf = cmd->state.streamout_buf.buffers[i];
3417 if (!buf)
3418 continue;
3419
3420 uint32_t offset;
3421 offset = cmd->state.streamout_buf.offsets[i];
3422
3423 tu_cs_emit_regs(cs, A6XX_VPC_SO_BUFFER_BASE(i, .bo = buf->bo,
3424 .bo_offset = buf->bo_offset));
3425 tu_cs_emit_regs(cs, A6XX_VPC_SO_BUFFER_SIZE(i, buf->size));
3426
3427 if (cmd->state.streamout_reset & (1 << i)) {
3428 offset *= tf->stride[i];
3429
3430 tu_cs_emit_regs(cs, A6XX_VPC_SO_BUFFER_OFFSET(i, offset));
3431 cmd->state.streamout_reset &= ~(1 << i);
3432 } else {
3433 tu_cs_emit_pkt7(cs, CP_MEM_TO_REG, 3);
3434 tu_cs_emit(cs, CP_MEM_TO_REG_0_REG(REG_A6XX_VPC_SO_BUFFER_OFFSET(i)) |
3435 CP_MEM_TO_REG_0_SHIFT_BY_2 | CP_MEM_TO_REG_0_UNK31 |
3436 CP_MEM_TO_REG_0_CNT(0));
3437 tu_cs_emit_qw(cs, cmd->scratch_bo.iova + VSC_FLUSH * (i + 1));
3438 }
3439
3440 tu_cs_emit_regs(cs, A6XX_VPC_SO_FLUSH_BASE(i, .bo = &cmd->scratch_bo,
3441 .bo_offset = VSC_FLUSH * (i + 1)));
3442 }
3443
3444 if (cmd->state.streamout_enabled) {
3445 tu_cs_emit_pkt7(cs, CP_CONTEXT_REG_BUNCH, 12 + (2 * tf->prog_count));
3446 tu_cs_emit(cs, REG_A6XX_VPC_SO_BUF_CNTL);
3447 tu_cs_emit(cs, tf->vpc_so_buf_cntl);
3448 tu_cs_emit(cs, REG_A6XX_VPC_SO_NCOMP(0));
3449 tu_cs_emit(cs, tf->ncomp[0]);
3450 tu_cs_emit(cs, REG_A6XX_VPC_SO_NCOMP(1));
3451 tu_cs_emit(cs, tf->ncomp[1]);
3452 tu_cs_emit(cs, REG_A6XX_VPC_SO_NCOMP(2));
3453 tu_cs_emit(cs, tf->ncomp[2]);
3454 tu_cs_emit(cs, REG_A6XX_VPC_SO_NCOMP(3));
3455 tu_cs_emit(cs, tf->ncomp[3]);
3456 tu_cs_emit(cs, REG_A6XX_VPC_SO_CNTL);
3457 tu_cs_emit(cs, A6XX_VPC_SO_CNTL_ENABLE);
3458 for (unsigned i = 0; i < tf->prog_count; i++) {
3459 tu_cs_emit(cs, REG_A6XX_VPC_SO_PROG);
3460 tu_cs_emit(cs, tf->prog[i]);
3461 }
3462 } else {
3463 tu_cs_emit_pkt7(cs, CP_CONTEXT_REG_BUNCH, 4);
3464 tu_cs_emit(cs, REG_A6XX_VPC_SO_CNTL);
3465 tu_cs_emit(cs, 0);
3466 tu_cs_emit(cs, REG_A6XX_VPC_SO_BUF_CNTL);
3467 tu_cs_emit(cs, 0);
3468 }
3469 }
3470
3471 static VkResult
3472 tu6_bind_draw_states(struct tu_cmd_buffer *cmd,
3473 struct tu_cs *cs,
3474 const struct tu_draw_info *draw)
3475 {
3476 const struct tu_pipeline *pipeline = cmd->state.pipeline;
3477 const struct tu_dynamic_state *dynamic = &cmd->state.dynamic;
3478 struct tu_draw_state_group draw_state_groups[TU_DRAW_STATE_COUNT];
3479 uint32_t draw_state_group_count = 0;
3480 VkResult result;
3481
3482 struct tu_descriptor_state *descriptors_state =
3483 &cmd->descriptors[VK_PIPELINE_BIND_POINT_GRAPHICS];
3484
3485 /* TODO lrz */
3486
3487 tu_cs_emit_regs(cs,
3488 A6XX_PC_PRIMITIVE_CNTL_0(.primitive_restart =
3489 pipeline->ia.primitive_restart && draw->indexed));
3490
3491 if (cmd->state.dirty &
3492 (TU_CMD_DIRTY_PIPELINE | TU_CMD_DIRTY_DYNAMIC_LINE_WIDTH) &&
3493 (pipeline->dynamic_state.mask & TU_DYNAMIC_LINE_WIDTH)) {
3494 tu6_emit_gras_su_cntl(cs, pipeline->rast.gras_su_cntl,
3495 dynamic->line_width);
3496 }
3497
3498 if ((cmd->state.dirty & TU_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK) &&
3499 (pipeline->dynamic_state.mask & TU_DYNAMIC_STENCIL_COMPARE_MASK)) {
3500 tu6_emit_stencil_compare_mask(cs, dynamic->stencil_compare_mask.front,
3501 dynamic->stencil_compare_mask.back);
3502 }
3503
3504 if ((cmd->state.dirty & TU_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK) &&
3505 (pipeline->dynamic_state.mask & TU_DYNAMIC_STENCIL_WRITE_MASK)) {
3506 tu6_emit_stencil_write_mask(cs, dynamic->stencil_write_mask.front,
3507 dynamic->stencil_write_mask.back);
3508 }
3509
3510 if ((cmd->state.dirty & TU_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE) &&
3511 (pipeline->dynamic_state.mask & TU_DYNAMIC_STENCIL_REFERENCE)) {
3512 tu6_emit_stencil_reference(cs, dynamic->stencil_reference.front,
3513 dynamic->stencil_reference.back);
3514 }
3515
3516 if (cmd->state.dirty &
3517 (TU_CMD_DIRTY_PIPELINE | TU_CMD_DIRTY_VERTEX_BUFFERS)) {
3518 for (uint32_t i = 0; i < pipeline->vi.count; i++) {
3519 const uint32_t binding = pipeline->vi.bindings[i];
3520 const uint32_t stride = pipeline->vi.strides[i];
3521 const struct tu_buffer *buf = cmd->state.vb.buffers[binding];
3522 const VkDeviceSize offset = buf->bo_offset +
3523 cmd->state.vb.offsets[binding] +
3524 pipeline->vi.offsets[i];
3525 const VkDeviceSize size =
3526 offset < buf->bo->size ? buf->bo->size - offset : 0;
3527
3528 tu_cs_emit_regs(cs,
3529 A6XX_VFD_FETCH_BASE(i, .bo = buf->bo, .bo_offset = offset),
3530 A6XX_VFD_FETCH_SIZE(i, size),
3531 A6XX_VFD_FETCH_STRIDE(i, stride));
3532 }
3533 }
3534
3535 if (cmd->state.dirty & TU_CMD_DIRTY_PIPELINE) {
3536 draw_state_groups[draw_state_group_count++] =
3537 (struct tu_draw_state_group) {
3538 .id = TU_DRAW_STATE_PROGRAM,
3539 .enable_mask = ENABLE_DRAW,
3540 .ib = pipeline->program.state_ib,
3541 };
3542 draw_state_groups[draw_state_group_count++] =
3543 (struct tu_draw_state_group) {
3544 .id = TU_DRAW_STATE_PROGRAM_BINNING,
3545 .enable_mask = CP_SET_DRAW_STATE__0_BINNING,
3546 .ib = pipeline->program.binning_state_ib,
3547 };
3548 draw_state_groups[draw_state_group_count++] =
3549 (struct tu_draw_state_group) {
3550 .id = TU_DRAW_STATE_VI,
3551 .enable_mask = ENABLE_DRAW,
3552 .ib = pipeline->vi.state_ib,
3553 };
3554 draw_state_groups[draw_state_group_count++] =
3555 (struct tu_draw_state_group) {
3556 .id = TU_DRAW_STATE_VI_BINNING,
3557 .enable_mask = CP_SET_DRAW_STATE__0_BINNING,
3558 .ib = pipeline->vi.binning_state_ib,
3559 };
3560 draw_state_groups[draw_state_group_count++] =
3561 (struct tu_draw_state_group) {
3562 .id = TU_DRAW_STATE_VP,
3563 .enable_mask = ENABLE_ALL,
3564 .ib = pipeline->vp.state_ib,
3565 };
3566 draw_state_groups[draw_state_group_count++] =
3567 (struct tu_draw_state_group) {
3568 .id = TU_DRAW_STATE_RAST,
3569 .enable_mask = ENABLE_ALL,
3570 .ib = pipeline->rast.state_ib,
3571 };
3572 draw_state_groups[draw_state_group_count++] =
3573 (struct tu_draw_state_group) {
3574 .id = TU_DRAW_STATE_DS,
3575 .enable_mask = ENABLE_ALL,
3576 .ib = pipeline->ds.state_ib,
3577 };
3578 draw_state_groups[draw_state_group_count++] =
3579 (struct tu_draw_state_group) {
3580 .id = TU_DRAW_STATE_BLEND,
3581 .enable_mask = ENABLE_ALL,
3582 .ib = pipeline->blend.state_ib,
3583 };
3584 }
3585
3586 if (cmd->state.dirty &
3587 (TU_CMD_DIRTY_PIPELINE | TU_CMD_DIRTY_DESCRIPTOR_SETS | TU_CMD_DIRTY_PUSH_CONSTANTS)) {
3588 draw_state_groups[draw_state_group_count++] =
3589 (struct tu_draw_state_group) {
3590 .id = TU_DRAW_STATE_VS_CONST,
3591 .enable_mask = ENABLE_ALL,
3592 .ib = tu6_emit_consts(cmd, pipeline, descriptors_state, MESA_SHADER_VERTEX)
3593 };
3594 draw_state_groups[draw_state_group_count++] =
3595 (struct tu_draw_state_group) {
3596 .id = TU_DRAW_STATE_FS_CONST,
3597 .enable_mask = ENABLE_DRAW,
3598 .ib = tu6_emit_consts(cmd, pipeline, descriptors_state, MESA_SHADER_FRAGMENT)
3599 };
3600 }
3601
3602 if (cmd->state.dirty & TU_CMD_DIRTY_STREAMOUT_BUFFERS)
3603 tu6_emit_streamout(cmd, cs);
3604
3605 if (cmd->state.dirty &
3606 (TU_CMD_DIRTY_PIPELINE | TU_CMD_DIRTY_DESCRIPTOR_SETS)) {
3607 bool needs_border = false;
3608 struct tu_cs_entry vs_tex, fs_tex_sysmem, fs_tex_gmem, fs_ibo;
3609
3610 result = tu6_emit_textures(cmd, pipeline, descriptors_state,
3611 MESA_SHADER_VERTEX, &vs_tex, &needs_border,
3612 false);
3613 if (result != VK_SUCCESS)
3614 return result;
3615
3616 /* TODO: we could emit just one texture descriptor draw state when there
3617 * are no input attachments, which is the most common case. We could
3618 * also split out the sampler state, which doesn't change even for input
3619 * attachments.
3620 */
3621 result = tu6_emit_textures(cmd, pipeline, descriptors_state,
3622 MESA_SHADER_FRAGMENT, &fs_tex_sysmem,
3623 &needs_border, true);
3624 if (result != VK_SUCCESS)
3625 return result;
3626
3627 result = tu6_emit_textures(cmd, pipeline, descriptors_state,
3628 MESA_SHADER_FRAGMENT, &fs_tex_gmem,
3629 &needs_border, false);
3630 if (result != VK_SUCCESS)
3631 return result;
3632
3633 result = tu6_emit_ibo(cmd, pipeline, descriptors_state,
3634 MESA_SHADER_FRAGMENT, &fs_ibo);
3635 if (result != VK_SUCCESS)
3636 return result;
3637
3638 draw_state_groups[draw_state_group_count++] =
3639 (struct tu_draw_state_group) {
3640 .id = TU_DRAW_STATE_VS_TEX,
3641 .enable_mask = ENABLE_ALL,
3642 .ib = vs_tex,
3643 };
3644 draw_state_groups[draw_state_group_count++] =
3645 (struct tu_draw_state_group) {
3646 .id = TU_DRAW_STATE_FS_TEX_GMEM,
3647 .enable_mask = CP_SET_DRAW_STATE__0_GMEM,
3648 .ib = fs_tex_gmem,
3649 };
3650 draw_state_groups[draw_state_group_count++] =
3651 (struct tu_draw_state_group) {
3652 .id = TU_DRAW_STATE_FS_TEX_SYSMEM,
3653 .enable_mask = CP_SET_DRAW_STATE__0_SYSMEM,
3654 .ib = fs_tex_sysmem,
3655 };
3656 draw_state_groups[draw_state_group_count++] =
3657 (struct tu_draw_state_group) {
3658 .id = TU_DRAW_STATE_FS_IBO,
3659 .enable_mask = ENABLE_DRAW,
3660 .ib = fs_ibo,
3661 };
3662
3663 if (needs_border) {
3664 result = tu6_emit_border_color(cmd, cs);
3665 if (result != VK_SUCCESS)
3666 return result;
3667 }
3668 }
3669
3670 struct tu_cs_entry vs_params;
3671 result = tu6_emit_vs_params(cmd, draw, &vs_params);
3672 if (result != VK_SUCCESS)
3673 return result;
3674
3675 draw_state_groups[draw_state_group_count++] =
3676 (struct tu_draw_state_group) {
3677 .id = TU_DRAW_STATE_VS_PARAMS,
3678 .enable_mask = ENABLE_ALL,
3679 .ib = vs_params,
3680 };
3681
3682 tu_cs_emit_pkt7(cs, CP_SET_DRAW_STATE, 3 * draw_state_group_count);
3683 for (uint32_t i = 0; i < draw_state_group_count; i++) {
3684 const struct tu_draw_state_group *group = &draw_state_groups[i];
3685 debug_assert((group->enable_mask & ~ENABLE_ALL) == 0);
3686 uint32_t cp_set_draw_state =
3687 CP_SET_DRAW_STATE__0_COUNT(group->ib.size / 4) |
3688 group->enable_mask |
3689 CP_SET_DRAW_STATE__0_GROUP_ID(group->id);
3690 uint64_t iova;
3691 if (group->ib.size) {
3692 iova = group->ib.bo->iova + group->ib.offset;
3693 } else {
3694 cp_set_draw_state |= CP_SET_DRAW_STATE__0_DISABLE;
3695 iova = 0;
3696 }
3697
3698 tu_cs_emit(cs, cp_set_draw_state);
3699 tu_cs_emit_qw(cs, iova);
3700 }
3701
3702 tu_cs_sanity_check(cs);
3703
3704 /* track BOs */
3705 if (cmd->state.dirty & TU_CMD_DIRTY_VERTEX_BUFFERS) {
3706 for (uint32_t i = 0; i < MAX_VBS; i++) {
3707 const struct tu_buffer *buf = cmd->state.vb.buffers[i];
3708 if (buf)
3709 tu_bo_list_add(&cmd->bo_list, buf->bo, MSM_SUBMIT_BO_READ);
3710 }
3711 }
3712 if (cmd->state.dirty & TU_CMD_DIRTY_DESCRIPTOR_SETS) {
3713 unsigned i;
3714 for_each_bit(i, descriptors_state->valid) {
3715 struct tu_descriptor_set *set = descriptors_state->sets[i];
3716 for (unsigned j = 0; j < set->layout->buffer_count; ++j)
3717 if (set->descriptors[j]) {
3718 tu_bo_list_add(&cmd->bo_list, set->descriptors[j],
3719 MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE);
3720 }
3721 }
3722 }
3723 if (cmd->state.dirty & TU_CMD_DIRTY_STREAMOUT_BUFFERS) {
3724 for (unsigned i = 0; i < IR3_MAX_SO_BUFFERS; i++) {
3725 const struct tu_buffer *buf = cmd->state.streamout_buf.buffers[i];
3726 if (buf) {
3727 tu_bo_list_add(&cmd->bo_list, buf->bo,
3728 MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE);
3729 }
3730 }
3731 }
3732
3733 /* Fragment shader state overwrites compute shader state, so flag the
3734 * compute pipeline for re-emit.
3735 */
3736 cmd->state.dirty = TU_CMD_DIRTY_COMPUTE_PIPELINE;
3737 return VK_SUCCESS;
3738 }
3739
3740 static void
3741 tu6_emit_draw_indirect(struct tu_cmd_buffer *cmd,
3742 struct tu_cs *cs,
3743 const struct tu_draw_info *draw)
3744 {
3745 const enum pc_di_primtype primtype = cmd->state.pipeline->ia.primtype;
3746
3747 tu_cs_emit_regs(cs,
3748 A6XX_VFD_INDEX_OFFSET(draw->vertex_offset),
3749 A6XX_VFD_INSTANCE_START_OFFSET(draw->first_instance));
3750
3751 if (draw->indexed) {
3752 const enum a4xx_index_size index_size =
3753 tu6_index_size(cmd->state.index_type);
3754 const uint32_t index_bytes =
3755 (cmd->state.index_type == VK_INDEX_TYPE_UINT32) ? 4 : 2;
3756 const struct tu_buffer *index_buf = cmd->state.index_buffer;
3757 unsigned max_indicies =
3758 (index_buf->size - cmd->state.index_offset) / index_bytes;
3759
3760 const uint32_t cp_draw_indx =
3761 CP_DRAW_INDX_OFFSET_0_PRIM_TYPE(primtype) |
3762 CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(DI_SRC_SEL_DMA) |
3763 CP_DRAW_INDX_OFFSET_0_INDEX_SIZE(index_size) |
3764 CP_DRAW_INDX_OFFSET_0_VIS_CULL(USE_VISIBILITY) | 0x2000;
3765
3766 tu_cs_emit_pkt7(cs, CP_DRAW_INDX_INDIRECT, 6);
3767 tu_cs_emit(cs, cp_draw_indx);
3768 tu_cs_emit_qw(cs, index_buf->bo->iova + cmd->state.index_offset);
3769 tu_cs_emit(cs, A5XX_CP_DRAW_INDX_INDIRECT_3_MAX_INDICES(max_indicies));
3770 tu_cs_emit_qw(cs, draw->indirect->bo->iova + draw->indirect_offset);
3771 } else {
3772 const uint32_t cp_draw_indx =
3773 CP_DRAW_INDX_OFFSET_0_PRIM_TYPE(primtype) |
3774 CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(DI_SRC_SEL_AUTO_INDEX) |
3775 CP_DRAW_INDX_OFFSET_0_VIS_CULL(USE_VISIBILITY) | 0x2000;
3776
3777 tu_cs_emit_pkt7(cs, CP_DRAW_INDIRECT, 3);
3778 tu_cs_emit(cs, cp_draw_indx);
3779 tu_cs_emit_qw(cs, draw->indirect->bo->iova + draw->indirect_offset);
3780 }
3781
3782 tu_bo_list_add(&cmd->bo_list, draw->indirect->bo, MSM_SUBMIT_BO_READ);
3783 }
3784
3785 static void
3786 tu6_emit_draw_direct(struct tu_cmd_buffer *cmd,
3787 struct tu_cs *cs,
3788 const struct tu_draw_info *draw)
3789 {
3790
3791 const enum pc_di_primtype primtype = cmd->state.pipeline->ia.primtype;
3792
3793 tu_cs_emit_regs(cs,
3794 A6XX_VFD_INDEX_OFFSET(draw->vertex_offset),
3795 A6XX_VFD_INSTANCE_START_OFFSET(draw->first_instance));
3796
3797 /* TODO hw binning */
3798 if (draw->indexed) {
3799 const enum a4xx_index_size index_size =
3800 tu6_index_size(cmd->state.index_type);
3801 const uint32_t index_bytes =
3802 (cmd->state.index_type == VK_INDEX_TYPE_UINT32) ? 4 : 2;
3803 const struct tu_buffer *buf = cmd->state.index_buffer;
3804 const VkDeviceSize offset = buf->bo_offset + cmd->state.index_offset +
3805 index_bytes * draw->first_index;
3806 const uint32_t size = index_bytes * draw->count;
3807
3808 const uint32_t cp_draw_indx =
3809 CP_DRAW_INDX_OFFSET_0_PRIM_TYPE(primtype) |
3810 CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(DI_SRC_SEL_DMA) |
3811 CP_DRAW_INDX_OFFSET_0_INDEX_SIZE(index_size) |
3812 CP_DRAW_INDX_OFFSET_0_VIS_CULL(USE_VISIBILITY) | 0x2000;
3813
3814 tu_cs_emit_pkt7(cs, CP_DRAW_INDX_OFFSET, 7);
3815 tu_cs_emit(cs, cp_draw_indx);
3816 tu_cs_emit(cs, draw->instance_count);
3817 tu_cs_emit(cs, draw->count);
3818 tu_cs_emit(cs, 0x0); /* XXX */
3819 tu_cs_emit_qw(cs, buf->bo->iova + offset);
3820 tu_cs_emit(cs, size);
3821 } else {
3822 const uint32_t cp_draw_indx =
3823 CP_DRAW_INDX_OFFSET_0_PRIM_TYPE(primtype) |
3824 CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(DI_SRC_SEL_AUTO_INDEX) |
3825 CP_DRAW_INDX_OFFSET_0_VIS_CULL(USE_VISIBILITY) | 0x2000;
3826
3827 tu_cs_emit_pkt7(cs, CP_DRAW_INDX_OFFSET, 3);
3828 tu_cs_emit(cs, cp_draw_indx);
3829 tu_cs_emit(cs, draw->instance_count);
3830 tu_cs_emit(cs, draw->count);
3831 }
3832 }
3833
3834 static void
3835 tu_draw(struct tu_cmd_buffer *cmd, const struct tu_draw_info *draw)
3836 {
3837 struct tu_cs *cs = &cmd->draw_cs;
3838 VkResult result;
3839
3840 result = tu6_bind_draw_states(cmd, cs, draw);
3841 if (result != VK_SUCCESS) {
3842 cmd->record_result = result;
3843 return;
3844 }
3845
3846 if (draw->indirect)
3847 tu6_emit_draw_indirect(cmd, cs, draw);
3848 else
3849 tu6_emit_draw_direct(cmd, cs, draw);
3850
3851 if (cmd->state.streamout_enabled) {
3852 for (unsigned i = 0; i < IR3_MAX_SO_BUFFERS; i++) {
3853 if (cmd->state.streamout_enabled & (1 << i))
3854 tu6_emit_event_write(cmd, cs, FLUSH_SO_0 + i, false);
3855 }
3856 }
3857
3858 cmd->wait_for_idle = true;
3859
3860 tu_cs_sanity_check(cs);
3861 }
3862
3863 void
3864 tu_CmdDraw(VkCommandBuffer commandBuffer,
3865 uint32_t vertexCount,
3866 uint32_t instanceCount,
3867 uint32_t firstVertex,
3868 uint32_t firstInstance)
3869 {
3870 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
3871 struct tu_draw_info info = {};
3872
3873 info.count = vertexCount;
3874 info.instance_count = instanceCount;
3875 info.first_instance = firstInstance;
3876 info.vertex_offset = firstVertex;
3877
3878 tu_draw(cmd_buffer, &info);
3879 }
3880
3881 void
3882 tu_CmdDrawIndexed(VkCommandBuffer commandBuffer,
3883 uint32_t indexCount,
3884 uint32_t instanceCount,
3885 uint32_t firstIndex,
3886 int32_t vertexOffset,
3887 uint32_t firstInstance)
3888 {
3889 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
3890 struct tu_draw_info info = {};
3891
3892 info.indexed = true;
3893 info.count = indexCount;
3894 info.instance_count = instanceCount;
3895 info.first_index = firstIndex;
3896 info.vertex_offset = vertexOffset;
3897 info.first_instance = firstInstance;
3898
3899 tu_draw(cmd_buffer, &info);
3900 }
3901
3902 void
3903 tu_CmdDrawIndirect(VkCommandBuffer commandBuffer,
3904 VkBuffer _buffer,
3905 VkDeviceSize offset,
3906 uint32_t drawCount,
3907 uint32_t stride)
3908 {
3909 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
3910 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
3911 struct tu_draw_info info = {};
3912
3913 info.count = drawCount;
3914 info.indirect = buffer;
3915 info.indirect_offset = offset;
3916 info.stride = stride;
3917
3918 tu_draw(cmd_buffer, &info);
3919 }
3920
3921 void
3922 tu_CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer,
3923 VkBuffer _buffer,
3924 VkDeviceSize offset,
3925 uint32_t drawCount,
3926 uint32_t stride)
3927 {
3928 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
3929 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
3930 struct tu_draw_info info = {};
3931
3932 info.indexed = true;
3933 info.count = drawCount;
3934 info.indirect = buffer;
3935 info.indirect_offset = offset;
3936 info.stride = stride;
3937
3938 tu_draw(cmd_buffer, &info);
3939 }
3940
3941 struct tu_dispatch_info
3942 {
3943 /**
3944 * Determine the layout of the grid (in block units) to be used.
3945 */
3946 uint32_t blocks[3];
3947
3948 /**
3949 * A starting offset for the grid. If unaligned is set, the offset
3950 * must still be aligned.
3951 */
3952 uint32_t offsets[3];
3953 /**
3954 * Whether it's an unaligned compute dispatch.
3955 */
3956 bool unaligned;
3957
3958 /**
3959 * Indirect compute parameters resource.
3960 */
3961 struct tu_buffer *indirect;
3962 uint64_t indirect_offset;
3963 };
3964
3965 static void
3966 tu_emit_compute_driver_params(struct tu_cs *cs, struct tu_pipeline *pipeline,
3967 const struct tu_dispatch_info *info)
3968 {
3969 gl_shader_stage type = MESA_SHADER_COMPUTE;
3970 const struct tu_program_descriptor_linkage *link =
3971 &pipeline->program.link[type];
3972 const struct ir3_const_state *const_state = &link->const_state;
3973 uint32_t offset = const_state->offsets.driver_param;
3974
3975 if (link->constlen <= offset)
3976 return;
3977
3978 if (!info->indirect) {
3979 uint32_t driver_params[IR3_DP_CS_COUNT] = {
3980 [IR3_DP_NUM_WORK_GROUPS_X] = info->blocks[0],
3981 [IR3_DP_NUM_WORK_GROUPS_Y] = info->blocks[1],
3982 [IR3_DP_NUM_WORK_GROUPS_Z] = info->blocks[2],
3983 [IR3_DP_LOCAL_GROUP_SIZE_X] = pipeline->compute.local_size[0],
3984 [IR3_DP_LOCAL_GROUP_SIZE_Y] = pipeline->compute.local_size[1],
3985 [IR3_DP_LOCAL_GROUP_SIZE_Z] = pipeline->compute.local_size[2],
3986 };
3987
3988 uint32_t num_consts = MIN2(const_state->num_driver_params,
3989 (link->constlen - offset) * 4);
3990 /* push constants */
3991 tu_cs_emit_pkt7(cs, tu6_stage2opcode(type), 3 + num_consts);
3992 tu_cs_emit(cs, CP_LOAD_STATE6_0_DST_OFF(offset) |
3993 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
3994 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
3995 CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type)) |
3996 CP_LOAD_STATE6_0_NUM_UNIT(num_consts / 4));
3997 tu_cs_emit(cs, 0);
3998 tu_cs_emit(cs, 0);
3999 uint32_t i;
4000 for (i = 0; i < num_consts; i++)
4001 tu_cs_emit(cs, driver_params[i]);
4002 } else {
4003 tu_finishme("Indirect driver params");
4004 }
4005 }
4006
4007 static void
4008 tu_dispatch(struct tu_cmd_buffer *cmd,
4009 const struct tu_dispatch_info *info)
4010 {
4011 struct tu_cs *cs = &cmd->cs;
4012 struct tu_pipeline *pipeline = cmd->state.compute_pipeline;
4013 struct tu_descriptor_state *descriptors_state =
4014 &cmd->descriptors[VK_PIPELINE_BIND_POINT_COMPUTE];
4015 VkResult result;
4016
4017 if (cmd->state.dirty & TU_CMD_DIRTY_COMPUTE_PIPELINE)
4018 tu_cs_emit_ib(cs, &pipeline->program.state_ib);
4019
4020 struct tu_cs_entry ib;
4021
4022 ib = tu6_emit_consts(cmd, pipeline, descriptors_state, MESA_SHADER_COMPUTE);
4023 if (ib.size)
4024 tu_cs_emit_ib(cs, &ib);
4025
4026 tu_emit_compute_driver_params(cs, pipeline, info);
4027
4028 bool needs_border;
4029 result = tu6_emit_textures(cmd, pipeline, descriptors_state,
4030 MESA_SHADER_COMPUTE, &ib, &needs_border, false);
4031 if (result != VK_SUCCESS) {
4032 cmd->record_result = result;
4033 return;
4034 }
4035
4036 if (ib.size)
4037 tu_cs_emit_ib(cs, &ib);
4038
4039 if (needs_border)
4040 tu_finishme("compute border color");
4041
4042 result = tu6_emit_ibo(cmd, pipeline, descriptors_state, MESA_SHADER_COMPUTE, &ib);
4043 if (result != VK_SUCCESS) {
4044 cmd->record_result = result;
4045 return;
4046 }
4047
4048 if (ib.size)
4049 tu_cs_emit_ib(cs, &ib);
4050
4051 /* track BOs */
4052 if (cmd->state.dirty & TU_CMD_DIRTY_DESCRIPTOR_SETS) {
4053 unsigned i;
4054 for_each_bit(i, descriptors_state->valid) {
4055 struct tu_descriptor_set *set = descriptors_state->sets[i];
4056 for (unsigned j = 0; j < set->layout->buffer_count; ++j)
4057 if (set->descriptors[j]) {
4058 tu_bo_list_add(&cmd->bo_list, set->descriptors[j],
4059 MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE);
4060 }
4061 }
4062 }
4063
4064 /* Compute shader state overwrites fragment shader state, so we flag the
4065 * graphics pipeline for re-emit.
4066 */
4067 cmd->state.dirty = TU_CMD_DIRTY_PIPELINE;
4068
4069 tu_cs_emit_pkt7(cs, CP_SET_MARKER, 1);
4070 tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(RM6_COMPUTE));
4071
4072 const uint32_t *local_size = pipeline->compute.local_size;
4073 const uint32_t *num_groups = info->blocks;
4074 tu_cs_emit_regs(cs,
4075 A6XX_HLSQ_CS_NDRANGE_0(.kerneldim = 3,
4076 .localsizex = local_size[0] - 1,
4077 .localsizey = local_size[1] - 1,
4078 .localsizez = local_size[2] - 1),
4079 A6XX_HLSQ_CS_NDRANGE_1(.globalsize_x = local_size[0] * num_groups[0]),
4080 A6XX_HLSQ_CS_NDRANGE_2(.globaloff_x = 0),
4081 A6XX_HLSQ_CS_NDRANGE_3(.globalsize_y = local_size[1] * num_groups[1]),
4082 A6XX_HLSQ_CS_NDRANGE_4(.globaloff_y = 0),
4083 A6XX_HLSQ_CS_NDRANGE_5(.globalsize_z = local_size[2] * num_groups[2]),
4084 A6XX_HLSQ_CS_NDRANGE_6(.globaloff_z = 0));
4085
4086 tu_cs_emit_regs(cs,
4087 A6XX_HLSQ_CS_KERNEL_GROUP_X(1),
4088 A6XX_HLSQ_CS_KERNEL_GROUP_Y(1),
4089 A6XX_HLSQ_CS_KERNEL_GROUP_Z(1));
4090
4091 if (info->indirect) {
4092 uint64_t iova = tu_buffer_iova(info->indirect) + info->indirect_offset;
4093
4094 tu_bo_list_add(&cmd->bo_list, info->indirect->bo,
4095 MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE);
4096
4097 tu_cs_emit_pkt7(cs, CP_EXEC_CS_INDIRECT, 4);
4098 tu_cs_emit(cs, 0x00000000);
4099 tu_cs_emit_qw(cs, iova);
4100 tu_cs_emit(cs,
4101 A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEX(local_size[0] - 1) |
4102 A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEY(local_size[1] - 1) |
4103 A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEZ(local_size[2] - 1));
4104 } else {
4105 tu_cs_emit_pkt7(cs, CP_EXEC_CS, 4);
4106 tu_cs_emit(cs, 0x00000000);
4107 tu_cs_emit(cs, CP_EXEC_CS_1_NGROUPS_X(info->blocks[0]));
4108 tu_cs_emit(cs, CP_EXEC_CS_2_NGROUPS_Y(info->blocks[1]));
4109 tu_cs_emit(cs, CP_EXEC_CS_3_NGROUPS_Z(info->blocks[2]));
4110 }
4111
4112 tu_cs_emit_wfi(cs);
4113
4114 tu6_emit_cache_flush(cmd, cs);
4115 }
4116
4117 void
4118 tu_CmdDispatchBase(VkCommandBuffer commandBuffer,
4119 uint32_t base_x,
4120 uint32_t base_y,
4121 uint32_t base_z,
4122 uint32_t x,
4123 uint32_t y,
4124 uint32_t z)
4125 {
4126 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
4127 struct tu_dispatch_info info = {};
4128
4129 info.blocks[0] = x;
4130 info.blocks[1] = y;
4131 info.blocks[2] = z;
4132
4133 info.offsets[0] = base_x;
4134 info.offsets[1] = base_y;
4135 info.offsets[2] = base_z;
4136 tu_dispatch(cmd_buffer, &info);
4137 }
4138
4139 void
4140 tu_CmdDispatch(VkCommandBuffer commandBuffer,
4141 uint32_t x,
4142 uint32_t y,
4143 uint32_t z)
4144 {
4145 tu_CmdDispatchBase(commandBuffer, 0, 0, 0, x, y, z);
4146 }
4147
4148 void
4149 tu_CmdDispatchIndirect(VkCommandBuffer commandBuffer,
4150 VkBuffer _buffer,
4151 VkDeviceSize offset)
4152 {
4153 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
4154 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
4155 struct tu_dispatch_info info = {};
4156
4157 info.indirect = buffer;
4158 info.indirect_offset = offset;
4159
4160 tu_dispatch(cmd_buffer, &info);
4161 }
4162
4163 void
4164 tu_CmdEndRenderPass(VkCommandBuffer commandBuffer)
4165 {
4166 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
4167
4168 tu_cs_end(&cmd_buffer->draw_cs);
4169 tu_cs_end(&cmd_buffer->draw_epilogue_cs);
4170
4171 if (use_sysmem_rendering(cmd_buffer))
4172 tu_cmd_render_sysmem(cmd_buffer);
4173 else
4174 tu_cmd_render_tiles(cmd_buffer);
4175
4176 /* discard draw_cs and draw_epilogue_cs entries now that the tiles are
4177 rendered */
4178 tu_cs_discard_entries(&cmd_buffer->draw_cs);
4179 tu_cs_begin(&cmd_buffer->draw_cs);
4180 tu_cs_discard_entries(&cmd_buffer->draw_epilogue_cs);
4181 tu_cs_begin(&cmd_buffer->draw_epilogue_cs);
4182
4183 cmd_buffer->state.pass = NULL;
4184 cmd_buffer->state.subpass = NULL;
4185 cmd_buffer->state.framebuffer = NULL;
4186 }
4187
4188 void
4189 tu_CmdEndRenderPass2(VkCommandBuffer commandBuffer,
4190 const VkSubpassEndInfoKHR *pSubpassEndInfo)
4191 {
4192 tu_CmdEndRenderPass(commandBuffer);
4193 }
4194
4195 struct tu_barrier_info
4196 {
4197 uint32_t eventCount;
4198 const VkEvent *pEvents;
4199 VkPipelineStageFlags srcStageMask;
4200 };
4201
4202 static void
4203 tu_barrier(struct tu_cmd_buffer *cmd_buffer,
4204 uint32_t memoryBarrierCount,
4205 const VkMemoryBarrier *pMemoryBarriers,
4206 uint32_t bufferMemoryBarrierCount,
4207 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
4208 uint32_t imageMemoryBarrierCount,
4209 const VkImageMemoryBarrier *pImageMemoryBarriers,
4210 const struct tu_barrier_info *info)
4211 {
4212 }
4213
4214 void
4215 tu_CmdPipelineBarrier(VkCommandBuffer commandBuffer,
4216 VkPipelineStageFlags srcStageMask,
4217 VkPipelineStageFlags destStageMask,
4218 VkBool32 byRegion,
4219 uint32_t memoryBarrierCount,
4220 const VkMemoryBarrier *pMemoryBarriers,
4221 uint32_t bufferMemoryBarrierCount,
4222 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
4223 uint32_t imageMemoryBarrierCount,
4224 const VkImageMemoryBarrier *pImageMemoryBarriers)
4225 {
4226 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
4227 struct tu_barrier_info info;
4228
4229 info.eventCount = 0;
4230 info.pEvents = NULL;
4231 info.srcStageMask = srcStageMask;
4232
4233 tu_barrier(cmd_buffer, memoryBarrierCount, pMemoryBarriers,
4234 bufferMemoryBarrierCount, pBufferMemoryBarriers,
4235 imageMemoryBarrierCount, pImageMemoryBarriers, &info);
4236 }
4237
4238 static void
4239 write_event(struct tu_cmd_buffer *cmd, struct tu_event *event, unsigned value)
4240 {
4241 struct tu_cs *cs = &cmd->cs;
4242
4243 tu_bo_list_add(&cmd->bo_list, &event->bo, MSM_SUBMIT_BO_WRITE);
4244
4245 /* TODO: any flush required before/after ? */
4246
4247 tu_cs_emit_pkt7(cs, CP_MEM_WRITE, 3);
4248 tu_cs_emit_qw(cs, event->bo.iova); /* ADDR_LO/HI */
4249 tu_cs_emit(cs, value);
4250 }
4251
4252 void
4253 tu_CmdSetEvent(VkCommandBuffer commandBuffer,
4254 VkEvent _event,
4255 VkPipelineStageFlags stageMask)
4256 {
4257 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
4258 TU_FROM_HANDLE(tu_event, event, _event);
4259
4260 write_event(cmd, event, 1);
4261 }
4262
4263 void
4264 tu_CmdResetEvent(VkCommandBuffer commandBuffer,
4265 VkEvent _event,
4266 VkPipelineStageFlags stageMask)
4267 {
4268 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
4269 TU_FROM_HANDLE(tu_event, event, _event);
4270
4271 write_event(cmd, event, 0);
4272 }
4273
4274 void
4275 tu_CmdWaitEvents(VkCommandBuffer commandBuffer,
4276 uint32_t eventCount,
4277 const VkEvent *pEvents,
4278 VkPipelineStageFlags srcStageMask,
4279 VkPipelineStageFlags dstStageMask,
4280 uint32_t memoryBarrierCount,
4281 const VkMemoryBarrier *pMemoryBarriers,
4282 uint32_t bufferMemoryBarrierCount,
4283 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
4284 uint32_t imageMemoryBarrierCount,
4285 const VkImageMemoryBarrier *pImageMemoryBarriers)
4286 {
4287 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
4288 struct tu_cs *cs = &cmd->cs;
4289
4290 /* TODO: any flush required before/after? (CP_WAIT_FOR_ME?) */
4291
4292 for (uint32_t i = 0; i < eventCount; i++) {
4293 TU_FROM_HANDLE(tu_event, event, pEvents[i]);
4294
4295 tu_bo_list_add(&cmd->bo_list, &event->bo, MSM_SUBMIT_BO_READ);
4296
4297 tu_cs_emit_pkt7(cs, CP_WAIT_REG_MEM, 6);
4298 tu_cs_emit(cs, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ) |
4299 CP_WAIT_REG_MEM_0_POLL_MEMORY);
4300 tu_cs_emit_qw(cs, event->bo.iova); /* POLL_ADDR_LO/HI */
4301 tu_cs_emit(cs, CP_WAIT_REG_MEM_3_REF(1));
4302 tu_cs_emit(cs, CP_WAIT_REG_MEM_4_MASK(~0u));
4303 tu_cs_emit(cs, CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(20));
4304 }
4305 }
4306
4307 void
4308 tu_CmdSetDeviceMask(VkCommandBuffer commandBuffer, uint32_t deviceMask)
4309 {
4310 /* No-op */
4311 }