2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
28 #include "tu_private.h"
30 #include "registers/adreno_pm4.xml.h"
31 #include "registers/adreno_common.xml.h"
33 #include "vk_format.h"
37 #define OVERFLOW_FLAG_REG REG_A6XX_CP_SCRATCH_REG(0)
40 tu_bo_list_init(struct tu_bo_list
*list
)
42 list
->count
= list
->capacity
= 0;
43 list
->bo_infos
= NULL
;
47 tu_bo_list_destroy(struct tu_bo_list
*list
)
53 tu_bo_list_reset(struct tu_bo_list
*list
)
59 * \a flags consists of MSM_SUBMIT_BO_FLAGS.
62 tu_bo_list_add_info(struct tu_bo_list
*list
,
63 const struct drm_msm_gem_submit_bo
*bo_info
)
65 assert(bo_info
->handle
!= 0);
67 for (uint32_t i
= 0; i
< list
->count
; ++i
) {
68 if (list
->bo_infos
[i
].handle
== bo_info
->handle
) {
69 assert(list
->bo_infos
[i
].presumed
== bo_info
->presumed
);
70 list
->bo_infos
[i
].flags
|= bo_info
->flags
;
75 /* grow list->bo_infos if needed */
76 if (list
->count
== list
->capacity
) {
77 uint32_t new_capacity
= MAX2(2 * list
->count
, 16);
78 struct drm_msm_gem_submit_bo
*new_bo_infos
= realloc(
79 list
->bo_infos
, new_capacity
* sizeof(struct drm_msm_gem_submit_bo
));
81 return TU_BO_LIST_FAILED
;
82 list
->bo_infos
= new_bo_infos
;
83 list
->capacity
= new_capacity
;
86 list
->bo_infos
[list
->count
] = *bo_info
;
91 tu_bo_list_add(struct tu_bo_list
*list
,
92 const struct tu_bo
*bo
,
95 return tu_bo_list_add_info(list
, &(struct drm_msm_gem_submit_bo
) {
97 .handle
= bo
->gem_handle
,
103 tu_bo_list_merge(struct tu_bo_list
*list
, const struct tu_bo_list
*other
)
105 for (uint32_t i
= 0; i
< other
->count
; i
++) {
106 if (tu_bo_list_add_info(list
, other
->bo_infos
+ i
) == TU_BO_LIST_FAILED
)
107 return VK_ERROR_OUT_OF_HOST_MEMORY
;
114 tu_tiling_config_update_tile_layout(struct tu_tiling_config
*tiling
,
115 const struct tu_device
*dev
,
116 const struct tu_render_pass
*pass
)
118 const uint32_t tile_align_w
= pass
->tile_align_w
;
119 const uint32_t max_tile_width
= 1024;
121 /* note: don't offset the tiling config by render_area.offset,
122 * because binning pass can't deal with it
123 * this means we might end up with more tiles than necessary,
124 * but load/store/etc are still scissored to the render_area
126 tiling
->tile0
.offset
= (VkOffset2D
) {};
128 const uint32_t ra_width
=
129 tiling
->render_area
.extent
.width
+
130 (tiling
->render_area
.offset
.x
- tiling
->tile0
.offset
.x
);
131 const uint32_t ra_height
=
132 tiling
->render_area
.extent
.height
+
133 (tiling
->render_area
.offset
.y
- tiling
->tile0
.offset
.y
);
135 /* start from 1 tile */
136 tiling
->tile_count
= (VkExtent2D
) {
140 tiling
->tile0
.extent
= (VkExtent2D
) {
141 .width
= util_align_npot(ra_width
, tile_align_w
),
142 .height
= align(ra_height
, TILE_ALIGN_H
),
145 if (unlikely(dev
->physical_device
->instance
->debug_flags
& TU_DEBUG_FORCEBIN
)) {
146 /* start with 2x2 tiles */
147 tiling
->tile_count
.width
= 2;
148 tiling
->tile_count
.height
= 2;
149 tiling
->tile0
.extent
.width
= util_align_npot(DIV_ROUND_UP(ra_width
, 2), tile_align_w
);
150 tiling
->tile0
.extent
.height
= align(DIV_ROUND_UP(ra_height
, 2), TILE_ALIGN_H
);
153 /* do not exceed max tile width */
154 while (tiling
->tile0
.extent
.width
> max_tile_width
) {
155 tiling
->tile_count
.width
++;
156 tiling
->tile0
.extent
.width
=
157 util_align_npot(DIV_ROUND_UP(ra_width
, tiling
->tile_count
.width
), tile_align_w
);
160 /* will force to sysmem, don't bother trying to have a valid tile config
161 * TODO: just skip all GMEM stuff when sysmem is forced?
163 if (!pass
->gmem_pixels
)
166 /* do not exceed gmem size */
167 while (tiling
->tile0
.extent
.width
* tiling
->tile0
.extent
.height
> pass
->gmem_pixels
) {
168 if (tiling
->tile0
.extent
.width
> MAX2(tile_align_w
, tiling
->tile0
.extent
.height
)) {
169 tiling
->tile_count
.width
++;
170 tiling
->tile0
.extent
.width
=
171 util_align_npot(DIV_ROUND_UP(ra_width
, tiling
->tile_count
.width
), tile_align_w
);
173 /* if this assert fails then layout is impossible.. */
174 assert(tiling
->tile0
.extent
.height
> TILE_ALIGN_H
);
175 tiling
->tile_count
.height
++;
176 tiling
->tile0
.extent
.height
=
177 align(DIV_ROUND_UP(ra_height
, tiling
->tile_count
.height
), TILE_ALIGN_H
);
183 tu_tiling_config_update_pipe_layout(struct tu_tiling_config
*tiling
,
184 const struct tu_device
*dev
)
186 const uint32_t max_pipe_count
= 32; /* A6xx */
188 /* start from 1 tile per pipe */
189 tiling
->pipe0
= (VkExtent2D
) {
193 tiling
->pipe_count
= tiling
->tile_count
;
195 while (tiling
->pipe_count
.width
* tiling
->pipe_count
.height
> max_pipe_count
) {
196 if (tiling
->pipe0
.width
< tiling
->pipe0
.height
) {
197 tiling
->pipe0
.width
+= 1;
198 tiling
->pipe_count
.width
=
199 DIV_ROUND_UP(tiling
->tile_count
.width
, tiling
->pipe0
.width
);
201 tiling
->pipe0
.height
+= 1;
202 tiling
->pipe_count
.height
=
203 DIV_ROUND_UP(tiling
->tile_count
.height
, tiling
->pipe0
.height
);
209 tu_tiling_config_update_pipes(struct tu_tiling_config
*tiling
,
210 const struct tu_device
*dev
)
212 const uint32_t max_pipe_count
= 32; /* A6xx */
213 const uint32_t used_pipe_count
=
214 tiling
->pipe_count
.width
* tiling
->pipe_count
.height
;
215 const VkExtent2D last_pipe
= {
216 .width
= (tiling
->tile_count
.width
- 1) % tiling
->pipe0
.width
+ 1,
217 .height
= (tiling
->tile_count
.height
- 1) % tiling
->pipe0
.height
+ 1,
220 assert(used_pipe_count
<= max_pipe_count
);
221 assert(max_pipe_count
<= ARRAY_SIZE(tiling
->pipe_config
));
223 for (uint32_t y
= 0; y
< tiling
->pipe_count
.height
; y
++) {
224 for (uint32_t x
= 0; x
< tiling
->pipe_count
.width
; x
++) {
225 const uint32_t pipe_x
= tiling
->pipe0
.width
* x
;
226 const uint32_t pipe_y
= tiling
->pipe0
.height
* y
;
227 const uint32_t pipe_w
= (x
== tiling
->pipe_count
.width
- 1)
229 : tiling
->pipe0
.width
;
230 const uint32_t pipe_h
= (y
== tiling
->pipe_count
.height
- 1)
232 : tiling
->pipe0
.height
;
233 const uint32_t n
= tiling
->pipe_count
.width
* y
+ x
;
235 tiling
->pipe_config
[n
] = A6XX_VSC_PIPE_CONFIG_REG_X(pipe_x
) |
236 A6XX_VSC_PIPE_CONFIG_REG_Y(pipe_y
) |
237 A6XX_VSC_PIPE_CONFIG_REG_W(pipe_w
) |
238 A6XX_VSC_PIPE_CONFIG_REG_H(pipe_h
);
239 tiling
->pipe_sizes
[n
] = CP_SET_BIN_DATA5_0_VSC_SIZE(pipe_w
* pipe_h
);
243 memset(tiling
->pipe_config
+ used_pipe_count
, 0,
244 sizeof(uint32_t) * (max_pipe_count
- used_pipe_count
));
248 tu_tiling_config_get_tile(const struct tu_tiling_config
*tiling
,
249 const struct tu_device
*dev
,
252 struct tu_tile
*tile
)
254 /* find the pipe and the slot for tile (tx, ty) */
255 const uint32_t px
= tx
/ tiling
->pipe0
.width
;
256 const uint32_t py
= ty
/ tiling
->pipe0
.height
;
257 const uint32_t sx
= tx
- tiling
->pipe0
.width
* px
;
258 const uint32_t sy
= ty
- tiling
->pipe0
.height
* py
;
259 /* last pipe has different width */
260 const uint32_t pipe_width
=
261 MIN2(tiling
->pipe0
.width
,
262 tiling
->tile_count
.width
- px
* tiling
->pipe0
.width
);
264 assert(tx
< tiling
->tile_count
.width
&& ty
< tiling
->tile_count
.height
);
265 assert(px
< tiling
->pipe_count
.width
&& py
< tiling
->pipe_count
.height
);
266 assert(sx
< tiling
->pipe0
.width
&& sy
< tiling
->pipe0
.height
);
268 /* convert to 1D indices */
269 tile
->pipe
= tiling
->pipe_count
.width
* py
+ px
;
270 tile
->slot
= pipe_width
* sy
+ sx
;
272 /* get the blit area for the tile */
273 tile
->begin
= (VkOffset2D
) {
274 .x
= tiling
->tile0
.offset
.x
+ tiling
->tile0
.extent
.width
* tx
,
275 .y
= tiling
->tile0
.offset
.y
+ tiling
->tile0
.extent
.height
* ty
,
278 (tx
== tiling
->tile_count
.width
- 1)
279 ? tiling
->render_area
.offset
.x
+ tiling
->render_area
.extent
.width
280 : tile
->begin
.x
+ tiling
->tile0
.extent
.width
;
282 (ty
== tiling
->tile_count
.height
- 1)
283 ? tiling
->render_area
.offset
.y
+ tiling
->render_area
.extent
.height
284 : tile
->begin
.y
+ tiling
->tile0
.extent
.height
;
287 enum a3xx_msaa_samples
288 tu_msaa_samples(uint32_t samples
)
300 assert(!"invalid sample count");
305 static enum a4xx_index_size
306 tu6_index_size(VkIndexType type
)
309 case VK_INDEX_TYPE_UINT16
:
310 return INDEX4_SIZE_16_BIT
;
311 case VK_INDEX_TYPE_UINT32
:
312 return INDEX4_SIZE_32_BIT
;
314 unreachable("invalid VkIndexType");
315 return INDEX4_SIZE_8_BIT
;
320 tu6_emit_event_write(struct tu_cmd_buffer
*cmd
,
322 enum vgt_event_type event
,
327 tu_cs_emit_pkt7(cs
, CP_EVENT_WRITE
, need_seqno
? 4 : 1);
328 tu_cs_emit(cs
, CP_EVENT_WRITE_0_EVENT(event
));
330 tu_cs_emit_qw(cs
, cmd
->scratch_bo
.iova
);
331 seqno
= ++cmd
->scratch_seqno
;
332 tu_cs_emit(cs
, seqno
);
339 tu6_emit_cache_flush(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
341 tu6_emit_event_write(cmd
, cs
, 0x31, false);
345 tu6_emit_lrz_flush(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
347 tu6_emit_event_write(cmd
, cs
, LRZ_FLUSH
, false);
351 tu6_emit_wfi(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
353 if (cmd
->wait_for_idle
) {
355 cmd
->wait_for_idle
= false;
360 tu6_emit_zs(struct tu_cmd_buffer
*cmd
,
361 const struct tu_subpass
*subpass
,
364 const struct tu_framebuffer
*fb
= cmd
->state
.framebuffer
;
366 const uint32_t a
= subpass
->depth_stencil_attachment
.attachment
;
367 if (a
== VK_ATTACHMENT_UNUSED
) {
369 A6XX_RB_DEPTH_BUFFER_INFO(.depth_format
= DEPTH6_NONE
),
370 A6XX_RB_DEPTH_BUFFER_PITCH(0),
371 A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH(0),
372 A6XX_RB_DEPTH_BUFFER_BASE(0),
373 A6XX_RB_DEPTH_BUFFER_BASE_GMEM(0));
376 A6XX_GRAS_SU_DEPTH_BUFFER_INFO(.depth_format
= DEPTH6_NONE
));
379 A6XX_GRAS_LRZ_BUFFER_BASE(0),
380 A6XX_GRAS_LRZ_BUFFER_PITCH(0),
381 A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE(0));
383 tu_cs_emit_regs(cs
, A6XX_RB_STENCIL_INFO(0));
388 const struct tu_image_view
*iview
= fb
->attachments
[a
].attachment
;
389 const struct tu_render_pass_attachment
*attachment
=
390 &cmd
->state
.pass
->attachments
[a
];
391 enum a6xx_depth_format fmt
= tu6_pipe2depth(attachment
->format
);
393 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_DEPTH_BUFFER_INFO
, 6);
394 tu_cs_emit(cs
, A6XX_RB_DEPTH_BUFFER_INFO(.depth_format
= fmt
).value
);
395 tu_cs_image_ref(cs
, iview
, 0);
396 tu_cs_emit(cs
, attachment
->gmem_offset
);
399 A6XX_GRAS_SU_DEPTH_BUFFER_INFO(.depth_format
= fmt
));
401 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_DEPTH_FLAG_BUFFER_BASE_LO
, 3);
402 tu_cs_image_flag_ref(cs
, iview
, 0);
405 A6XX_GRAS_LRZ_BUFFER_BASE(0),
406 A6XX_GRAS_LRZ_BUFFER_PITCH(0),
407 A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE(0));
409 if (attachment
->format
== VK_FORMAT_S8_UINT
) {
410 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_STENCIL_INFO
, 6);
411 tu_cs_emit(cs
, A6XX_RB_STENCIL_INFO(.separate_stencil
= true).value
);
412 tu_cs_image_ref(cs
, iview
, 0);
413 tu_cs_emit(cs
, attachment
->gmem_offset
);
416 A6XX_RB_STENCIL_INFO(0));
421 tu6_emit_mrt(struct tu_cmd_buffer
*cmd
,
422 const struct tu_subpass
*subpass
,
425 const struct tu_framebuffer
*fb
= cmd
->state
.framebuffer
;
427 for (uint32_t i
= 0; i
< subpass
->color_count
; ++i
) {
428 uint32_t a
= subpass
->color_attachments
[i
].attachment
;
429 if (a
== VK_ATTACHMENT_UNUSED
)
432 const struct tu_image_view
*iview
= fb
->attachments
[a
].attachment
;
434 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_MRT_BUF_INFO(i
), 6);
435 tu_cs_emit(cs
, iview
->RB_MRT_BUF_INFO
);
436 tu_cs_image_ref(cs
, iview
, 0);
437 tu_cs_emit(cs
, cmd
->state
.pass
->attachments
[a
].gmem_offset
);
440 A6XX_SP_FS_MRT_REG(i
, .dword
= iview
->SP_FS_MRT_REG
));
442 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_MRT_FLAG_BUFFER_ADDR_LO(i
), 3);
443 tu_cs_image_flag_ref(cs
, iview
, 0);
447 A6XX_RB_SRGB_CNTL(.dword
= subpass
->srgb_cntl
));
449 A6XX_SP_SRGB_CNTL(.dword
= subpass
->srgb_cntl
));
451 tu_cs_emit_regs(cs
, A6XX_GRAS_MAX_LAYER_INDEX(fb
->layers
- 1));
455 tu6_emit_msaa(struct tu_cs
*cs
, VkSampleCountFlagBits vk_samples
)
457 const enum a3xx_msaa_samples samples
= tu_msaa_samples(vk_samples
);
458 bool msaa_disable
= samples
== MSAA_ONE
;
461 A6XX_SP_TP_RAS_MSAA_CNTL(samples
),
462 A6XX_SP_TP_DEST_MSAA_CNTL(.samples
= samples
,
463 .msaa_disable
= msaa_disable
));
466 A6XX_GRAS_RAS_MSAA_CNTL(samples
),
467 A6XX_GRAS_DEST_MSAA_CNTL(.samples
= samples
,
468 .msaa_disable
= msaa_disable
));
471 A6XX_RB_RAS_MSAA_CNTL(samples
),
472 A6XX_RB_DEST_MSAA_CNTL(.samples
= samples
,
473 .msaa_disable
= msaa_disable
));
476 A6XX_RB_MSAA_CNTL(samples
));
480 tu6_emit_bin_size(struct tu_cs
*cs
,
481 uint32_t bin_w
, uint32_t bin_h
, uint32_t flags
)
484 A6XX_GRAS_BIN_CONTROL(.binw
= bin_w
,
489 A6XX_RB_BIN_CONTROL(.binw
= bin_w
,
493 /* no flag for RB_BIN_CONTROL2... */
495 A6XX_RB_BIN_CONTROL2(.binw
= bin_w
,
500 tu6_emit_render_cntl(struct tu_cmd_buffer
*cmd
,
501 const struct tu_subpass
*subpass
,
505 const struct tu_framebuffer
*fb
= cmd
->state
.framebuffer
;
507 cntl
|= A6XX_RB_RENDER_CNTL_UNK4
;
509 cntl
|= A6XX_RB_RENDER_CNTL_BINNING
;
511 uint32_t mrts_ubwc_enable
= 0;
512 for (uint32_t i
= 0; i
< subpass
->color_count
; ++i
) {
513 uint32_t a
= subpass
->color_attachments
[i
].attachment
;
514 if (a
== VK_ATTACHMENT_UNUSED
)
517 const struct tu_image_view
*iview
= fb
->attachments
[a
].attachment
;
518 if (iview
->ubwc_enabled
)
519 mrts_ubwc_enable
|= 1 << i
;
522 cntl
|= A6XX_RB_RENDER_CNTL_FLAG_MRTS(mrts_ubwc_enable
);
524 const uint32_t a
= subpass
->depth_stencil_attachment
.attachment
;
525 if (a
!= VK_ATTACHMENT_UNUSED
) {
526 const struct tu_image_view
*iview
= fb
->attachments
[a
].attachment
;
527 if (iview
->ubwc_enabled
)
528 cntl
|= A6XX_RB_RENDER_CNTL_FLAG_DEPTH
;
531 /* In the !binning case, we need to set RB_RENDER_CNTL in the draw_cs
532 * in order to set it correctly for the different subpasses. However,
533 * that means the packets we're emitting also happen during binning. So
534 * we need to guard the write on !BINNING at CP execution time.
536 tu_cs_reserve(cs
, 3 + 4);
537 tu_cs_emit_pkt7(cs
, CP_COND_REG_EXEC
, 2);
538 tu_cs_emit(cs
, CP_COND_REG_EXEC_0_MODE(RENDER_MODE
) |
539 CP_COND_REG_EXEC_0_GMEM
| CP_COND_REG_EXEC_0_SYSMEM
);
540 tu_cs_emit(cs
, CP_COND_REG_EXEC_1_DWORDS(4));
543 tu_cs_emit_pkt7(cs
, CP_REG_WRITE
, 3);
544 tu_cs_emit(cs
, CP_REG_WRITE_0_TRACKER(TRACK_RENDER_CNTL
));
545 tu_cs_emit(cs
, REG_A6XX_RB_RENDER_CNTL
);
546 tu_cs_emit(cs
, cntl
);
550 tu6_emit_blit_scissor(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
, bool align
)
552 const VkRect2D
*render_area
= &cmd
->state
.tiling_config
.render_area
;
553 uint32_t x1
= render_area
->offset
.x
;
554 uint32_t y1
= render_area
->offset
.y
;
555 uint32_t x2
= x1
+ render_area
->extent
.width
- 1;
556 uint32_t y2
= y1
+ render_area
->extent
.height
- 1;
559 x1
= x1
& ~(GMEM_ALIGN_W
- 1);
560 y1
= y1
& ~(GMEM_ALIGN_H
- 1);
561 x2
= ALIGN_POT(x2
+ 1, GMEM_ALIGN_W
) - 1;
562 y2
= ALIGN_POT(y2
+ 1, GMEM_ALIGN_H
) - 1;
566 A6XX_RB_BLIT_SCISSOR_TL(.x
= x1
, .y
= y1
),
567 A6XX_RB_BLIT_SCISSOR_BR(.x
= x2
, .y
= y2
));
571 tu6_emit_window_scissor(struct tu_cs
*cs
,
578 A6XX_GRAS_SC_WINDOW_SCISSOR_TL(.x
= x1
, .y
= y1
),
579 A6XX_GRAS_SC_WINDOW_SCISSOR_BR(.x
= x2
, .y
= y2
));
582 A6XX_GRAS_RESOLVE_CNTL_1(.x
= x1
, .y
= y1
),
583 A6XX_GRAS_RESOLVE_CNTL_2(.x
= x2
, .y
= y2
));
587 tu6_emit_window_offset(struct tu_cs
*cs
, uint32_t x1
, uint32_t y1
)
590 A6XX_RB_WINDOW_OFFSET(.x
= x1
, .y
= y1
));
593 A6XX_RB_WINDOW_OFFSET2(.x
= x1
, .y
= y1
));
596 A6XX_SP_WINDOW_OFFSET(.x
= x1
, .y
= y1
));
599 A6XX_SP_TP_WINDOW_OFFSET(.x
= x1
, .y
= y1
));
603 use_hw_binning(struct tu_cmd_buffer
*cmd
)
605 const struct tu_tiling_config
*tiling
= &cmd
->state
.tiling_config
;
607 if (unlikely(cmd
->device
->physical_device
->instance
->debug_flags
& TU_DEBUG_NOBIN
))
610 if (unlikely(cmd
->device
->physical_device
->instance
->debug_flags
& TU_DEBUG_FORCEBIN
))
613 return (tiling
->tile_count
.width
* tiling
->tile_count
.height
) > 2;
617 use_sysmem_rendering(struct tu_cmd_buffer
*cmd
)
619 if (unlikely(cmd
->device
->physical_device
->instance
->debug_flags
& TU_DEBUG_SYSMEM
))
622 /* can't fit attachments into gmem */
623 if (!cmd
->state
.pass
->gmem_pixels
)
626 if (cmd
->state
.framebuffer
->layers
> 1)
629 return cmd
->state
.tiling_config
.force_sysmem
;
633 tu6_emit_tile_select(struct tu_cmd_buffer
*cmd
,
635 const struct tu_tile
*tile
)
637 tu_cs_emit_pkt7(cs
, CP_SET_MARKER
, 1);
638 tu_cs_emit(cs
, A6XX_CP_SET_MARKER_0_MODE(RM6_YIELD
));
640 tu_cs_emit_pkt7(cs
, CP_SET_MARKER
, 1);
641 tu_cs_emit(cs
, A6XX_CP_SET_MARKER_0_MODE(RM6_GMEM
));
643 const uint32_t x1
= tile
->begin
.x
;
644 const uint32_t y1
= tile
->begin
.y
;
645 const uint32_t x2
= tile
->end
.x
- 1;
646 const uint32_t y2
= tile
->end
.y
- 1;
647 tu6_emit_window_scissor(cs
, x1
, y1
, x2
, y2
);
648 tu6_emit_window_offset(cs
, x1
, y1
);
651 A6XX_VPC_SO_OVERRIDE(.so_disable
= false));
653 if (use_hw_binning(cmd
)) {
654 tu_cs_emit_pkt7(cs
, CP_WAIT_FOR_ME
, 0);
656 tu_cs_emit_pkt7(cs
, CP_SET_MODE
, 1);
659 tu_cs_emit_pkt7(cs
, CP_REG_TEST
, 1);
660 tu_cs_emit(cs
, A6XX_CP_REG_TEST_0_REG(OVERFLOW_FLAG_REG
) |
661 A6XX_CP_REG_TEST_0_BIT(0) |
662 A6XX_CP_REG_TEST_0_WAIT_FOR_ME
);
664 tu_cs_reserve(cs
, 3 + 11);
665 tu_cs_emit_pkt7(cs
, CP_COND_REG_EXEC
, 2);
666 tu_cs_emit(cs
, CP_COND_REG_EXEC_0_MODE(PRED_TEST
));
667 tu_cs_emit(cs
, CP_COND_REG_EXEC_1_DWORDS(11));
669 /* if (no overflow) */ {
670 tu_cs_emit_pkt7(cs
, CP_SET_BIN_DATA5
, 7);
671 tu_cs_emit(cs
, cmd
->state
.tiling_config
.pipe_sizes
[tile
->pipe
] |
672 CP_SET_BIN_DATA5_0_VSC_N(tile
->slot
));
673 tu_cs_emit_qw(cs
, cmd
->vsc_draw_strm
.iova
+ tile
->pipe
* cmd
->vsc_draw_strm_pitch
);
674 tu_cs_emit_qw(cs
, cmd
->vsc_draw_strm
.iova
+ (tile
->pipe
* 4) + (32 * cmd
->vsc_draw_strm_pitch
));
675 tu_cs_emit_qw(cs
, cmd
->vsc_prim_strm
.iova
+ (tile
->pipe
* cmd
->vsc_prim_strm_pitch
));
677 tu_cs_emit_pkt7(cs
, CP_SET_VISIBILITY_OVERRIDE
, 1);
680 /* use a NOP packet to skip over the 'else' side: */
681 tu_cs_emit_pkt7(cs
, CP_NOP
, 2);
683 tu_cs_emit_pkt7(cs
, CP_SET_VISIBILITY_OVERRIDE
, 1);
687 tu_cs_emit_pkt7(cs
, CP_SET_MODE
, 1);
690 tu_cs_emit_pkt7(cs
, CP_SET_VISIBILITY_OVERRIDE
, 1);
693 tu_cs_emit_pkt7(cs
, CP_SET_MODE
, 1);
699 tu6_emit_sysmem_resolve(struct tu_cmd_buffer
*cmd
,
704 const struct tu_framebuffer
*fb
= cmd
->state
.framebuffer
;
705 struct tu_image_view
*dst
= fb
->attachments
[a
].attachment
;
706 struct tu_image_view
*src
= fb
->attachments
[gmem_a
].attachment
;
708 tu_resolve_sysmem(cmd
, cs
, src
, dst
, fb
->layers
, &cmd
->state
.tiling_config
.render_area
);
712 tu6_emit_tile_store(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
714 const struct tu_render_pass
*pass
= cmd
->state
.pass
;
715 const struct tu_subpass
*subpass
= &pass
->subpasses
[pass
->subpass_count
-1];
717 tu_cs_emit_pkt7(cs
, CP_SET_DRAW_STATE
, 3);
718 tu_cs_emit(cs
, CP_SET_DRAW_STATE__0_COUNT(0) |
719 CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS
|
720 CP_SET_DRAW_STATE__0_GROUP_ID(0));
721 tu_cs_emit(cs
, CP_SET_DRAW_STATE__1_ADDR_LO(0));
722 tu_cs_emit(cs
, CP_SET_DRAW_STATE__2_ADDR_HI(0));
724 tu_cs_emit_pkt7(cs
, CP_SKIP_IB2_ENABLE_GLOBAL
, 1);
727 tu_cs_emit_pkt7(cs
, CP_SET_MARKER
, 1);
728 tu_cs_emit(cs
, A6XX_CP_SET_MARKER_0_MODE(RM6_RESOLVE
));
730 tu6_emit_blit_scissor(cmd
, cs
, true);
732 for (uint32_t a
= 0; a
< pass
->attachment_count
; ++a
) {
733 if (pass
->attachments
[a
].gmem_offset
>= 0)
734 tu_store_gmem_attachment(cmd
, cs
, a
, a
);
737 if (subpass
->resolve_attachments
) {
738 for (unsigned i
= 0; i
< subpass
->color_count
; i
++) {
739 uint32_t a
= subpass
->resolve_attachments
[i
].attachment
;
740 if (a
!= VK_ATTACHMENT_UNUSED
)
741 tu_store_gmem_attachment(cmd
, cs
, a
,
742 subpass
->color_attachments
[i
].attachment
);
748 tu6_emit_restart_index(struct tu_cs
*cs
, uint32_t restart_index
)
751 A6XX_PC_RESTART_INDEX(restart_index
));
755 tu6_init_hw(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
757 const struct tu_physical_device
*phys_dev
= cmd
->device
->physical_device
;
759 tu6_emit_cache_flush(cmd
, cs
);
761 tu_cs_emit_write_reg(cs
, REG_A6XX_HLSQ_UPDATE_CNTL
, 0xfffff);
764 A6XX_RB_CCU_CNTL(.offset
= phys_dev
->ccu_offset_bypass
));
765 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_8E04
, 0x00100000);
766 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_AE04
, 0x8);
767 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_AE00
, 0);
768 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_AE0F
, 0x3f);
769 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_B605
, 0x44);
770 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_B600
, 0x100000);
771 tu_cs_emit_write_reg(cs
, REG_A6XX_HLSQ_UNKNOWN_BE00
, 0x80);
772 tu_cs_emit_write_reg(cs
, REG_A6XX_HLSQ_UNKNOWN_BE01
, 0);
774 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9600
, 0);
775 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_UNKNOWN_8600
, 0x880);
776 tu_cs_emit_write_reg(cs
, REG_A6XX_HLSQ_UNKNOWN_BE04
, 0);
777 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_AE03
, 0x00000410);
778 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_IBO_COUNT
, 0);
779 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_B182
, 0);
780 tu_cs_emit_write_reg(cs
, REG_A6XX_HLSQ_UNKNOWN_BB11
, 0);
781 tu_cs_emit_write_reg(cs
, REG_A6XX_UCHE_UNKNOWN_0E12
, 0x3200000);
782 tu_cs_emit_write_reg(cs
, REG_A6XX_UCHE_CLIENT_PF
, 4);
783 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_8E01
, 0x0);
784 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_A982
, 0);
785 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_A9A8
, 0);
786 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_AB00
, 0x5);
787 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_GS_SIV_CNTL
, 0x0000ffff);
789 tu_cs_emit_write_reg(cs
, REG_A6XX_VFD_ADD_OFFSET
, A6XX_VFD_ADD_OFFSET_VERTEX
);
790 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_8811
, 0x00000010);
791 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_MODE_CNTL
, 0x1f);
793 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_SRGB_CNTL
, 0);
795 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_UNKNOWN_8110
, 0);
797 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_RENDER_CONTROL0
, 0x401);
798 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_RENDER_CONTROL1
, 0);
799 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_FS_OUTPUT_CNTL0
, 0);
800 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_8818
, 0);
801 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_8819
, 0);
802 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_881A
, 0);
803 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_881B
, 0);
804 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_881C
, 0);
805 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_881D
, 0);
806 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_881E
, 0);
807 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_88F0
, 0);
809 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9101
, 0xffff00);
810 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9107
, 0);
812 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9236
,
813 A6XX_VPC_UNKNOWN_9236_POINT_COORD_INVERT(0));
814 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9300
, 0);
816 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_SO_OVERRIDE
,
817 A6XX_VPC_SO_OVERRIDE_SO_DISABLE
);
819 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_UNKNOWN_9801
, 0);
820 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_UNKNOWN_9980
, 0);
821 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_UNKNOWN_9990
, 0);
823 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_PRIMITIVE_CNTL_6
, 0);
824 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_UNKNOWN_9B07
, 0);
826 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_A81B
, 0);
828 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_B183
, 0);
830 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_UNKNOWN_8099
, 0);
831 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_UNKNOWN_809B
, 0);
832 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_UNKNOWN_80A0
, 2);
833 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_UNKNOWN_80AF
, 0);
834 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9210
, 0);
835 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9211
, 0);
836 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9602
, 0);
837 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_UNKNOWN_9981
, 0x3);
838 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_UNKNOWN_9E72
, 0);
839 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9108
, 0x3);
840 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_TP_UNKNOWN_B309
, 0x000000a2);
841 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_8878
, 0);
842 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_8879
, 0);
843 tu_cs_emit_write_reg(cs
, REG_A6XX_HLSQ_CONTROL_5_REG
, 0xfc);
845 tu_cs_emit_write_reg(cs
, REG_A6XX_VFD_MODE_CNTL
, 0x00000000);
847 tu_cs_emit_write_reg(cs
, REG_A6XX_VFD_UNKNOWN_A008
, 0);
849 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_MODE_CNTL
, 0x0000001f);
851 /* we don't use this yet.. probably best to disable.. */
852 tu_cs_emit_pkt7(cs
, CP_SET_DRAW_STATE
, 3);
853 tu_cs_emit(cs
, CP_SET_DRAW_STATE__0_COUNT(0) |
854 CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS
|
855 CP_SET_DRAW_STATE__0_GROUP_ID(0));
856 tu_cs_emit(cs
, CP_SET_DRAW_STATE__1_ADDR_LO(0));
857 tu_cs_emit(cs
, CP_SET_DRAW_STATE__2_ADDR_HI(0));
859 /* Set not to use streamout by default, */
860 tu_cs_emit_pkt7(cs
, CP_CONTEXT_REG_BUNCH
, 4);
861 tu_cs_emit(cs
, REG_A6XX_VPC_SO_CNTL
);
863 tu_cs_emit(cs
, REG_A6XX_VPC_SO_BUF_CNTL
);
867 A6XX_SP_HS_CTRL_REG0(0));
870 A6XX_SP_GS_CTRL_REG0(0));
873 A6XX_GRAS_LRZ_CNTL(0));
876 A6XX_RB_LRZ_CNTL(0));
879 A6XX_SP_TP_BORDER_COLOR_BASE_ADDR(.bo
= &cmd
->device
->border_color
));
881 A6XX_SP_PS_TP_BORDER_COLOR_BASE_ADDR(.bo
= &cmd
->device
->border_color
));
883 tu_cs_sanity_check(cs
);
887 tu6_cache_flush(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
891 seqno
= tu6_emit_event_write(cmd
, cs
, RB_DONE_TS
, true);
893 tu_cs_emit_pkt7(cs
, CP_WAIT_REG_MEM
, 6);
894 tu_cs_emit(cs
, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ
) |
895 CP_WAIT_REG_MEM_0_POLL_MEMORY
);
896 tu_cs_emit_qw(cs
, cmd
->scratch_bo
.iova
);
897 tu_cs_emit(cs
, CP_WAIT_REG_MEM_3_REF(seqno
));
898 tu_cs_emit(cs
, CP_WAIT_REG_MEM_4_MASK(~0));
899 tu_cs_emit(cs
, CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(16));
901 seqno
= tu6_emit_event_write(cmd
, cs
, CACHE_FLUSH_TS
, true);
903 tu_cs_emit_pkt7(cs
, CP_WAIT_MEM_GTE
, 4);
904 tu_cs_emit(cs
, CP_WAIT_MEM_GTE_0_RESERVED(0));
905 tu_cs_emit_qw(cs
, cmd
->scratch_bo
.iova
);
906 tu_cs_emit(cs
, CP_WAIT_MEM_GTE_3_REF(seqno
));
910 update_vsc_pipe(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
912 const struct tu_tiling_config
*tiling
= &cmd
->state
.tiling_config
;
915 A6XX_VSC_BIN_SIZE(.width
= tiling
->tile0
.extent
.width
,
916 .height
= tiling
->tile0
.extent
.height
),
917 A6XX_VSC_DRAW_STRM_SIZE_ADDRESS(.bo
= &cmd
->vsc_draw_strm
,
918 .bo_offset
= 32 * cmd
->vsc_draw_strm_pitch
));
921 A6XX_VSC_BIN_COUNT(.nx
= tiling
->tile_count
.width
,
922 .ny
= tiling
->tile_count
.height
));
924 tu_cs_emit_pkt4(cs
, REG_A6XX_VSC_PIPE_CONFIG_REG(0), 32);
925 for (unsigned i
= 0; i
< 32; i
++)
926 tu_cs_emit(cs
, tiling
->pipe_config
[i
]);
929 A6XX_VSC_PRIM_STRM_ADDRESS(.bo
= &cmd
->vsc_prim_strm
),
930 A6XX_VSC_PRIM_STRM_PITCH(cmd
->vsc_prim_strm_pitch
),
931 A6XX_VSC_PRIM_STRM_ARRAY_PITCH(cmd
->vsc_prim_strm
.size
));
934 A6XX_VSC_DRAW_STRM_ADDRESS(.bo
= &cmd
->vsc_draw_strm
),
935 A6XX_VSC_DRAW_STRM_PITCH(cmd
->vsc_draw_strm_pitch
),
936 A6XX_VSC_DRAW_STRM_ARRAY_PITCH(cmd
->vsc_draw_strm
.size
));
940 emit_vsc_overflow_test(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
942 const struct tu_tiling_config
*tiling
= &cmd
->state
.tiling_config
;
943 const uint32_t used_pipe_count
=
944 tiling
->pipe_count
.width
* tiling
->pipe_count
.height
;
946 /* Clear vsc_scratch: */
947 tu_cs_emit_pkt7(cs
, CP_MEM_WRITE
, 3);
948 tu_cs_emit_qw(cs
, cmd
->scratch_bo
.iova
+ ctrl_offset(vsc_scratch
));
951 /* Check for overflow, write vsc_scratch if detected: */
952 for (int i
= 0; i
< used_pipe_count
; i
++) {
953 tu_cs_emit_pkt7(cs
, CP_COND_WRITE5
, 8);
954 tu_cs_emit(cs
, CP_COND_WRITE5_0_FUNCTION(WRITE_GE
) |
955 CP_COND_WRITE5_0_WRITE_MEMORY
);
956 tu_cs_emit(cs
, CP_COND_WRITE5_1_POLL_ADDR_LO(REG_A6XX_VSC_DRAW_STRM_SIZE_REG(i
)));
957 tu_cs_emit(cs
, CP_COND_WRITE5_2_POLL_ADDR_HI(0));
958 tu_cs_emit(cs
, CP_COND_WRITE5_3_REF(cmd
->vsc_draw_strm_pitch
));
959 tu_cs_emit(cs
, CP_COND_WRITE5_4_MASK(~0));
960 tu_cs_emit_qw(cs
, cmd
->scratch_bo
.iova
+ ctrl_offset(vsc_scratch
));
961 tu_cs_emit(cs
, CP_COND_WRITE5_7_WRITE_DATA(1 + cmd
->vsc_draw_strm_pitch
));
963 tu_cs_emit_pkt7(cs
, CP_COND_WRITE5
, 8);
964 tu_cs_emit(cs
, CP_COND_WRITE5_0_FUNCTION(WRITE_GE
) |
965 CP_COND_WRITE5_0_WRITE_MEMORY
);
966 tu_cs_emit(cs
, CP_COND_WRITE5_1_POLL_ADDR_LO(REG_A6XX_VSC_PRIM_STRM_SIZE_REG(i
)));
967 tu_cs_emit(cs
, CP_COND_WRITE5_2_POLL_ADDR_HI(0));
968 tu_cs_emit(cs
, CP_COND_WRITE5_3_REF(cmd
->vsc_prim_strm_pitch
));
969 tu_cs_emit(cs
, CP_COND_WRITE5_4_MASK(~0));
970 tu_cs_emit_qw(cs
, cmd
->scratch_bo
.iova
+ ctrl_offset(vsc_scratch
));
971 tu_cs_emit(cs
, CP_COND_WRITE5_7_WRITE_DATA(3 + cmd
->vsc_prim_strm_pitch
));
974 tu_cs_emit_pkt7(cs
, CP_WAIT_MEM_WRITES
, 0);
976 tu_cs_emit_pkt7(cs
, CP_WAIT_FOR_ME
, 0);
978 tu_cs_emit_pkt7(cs
, CP_MEM_TO_REG
, 3);
979 tu_cs_emit(cs
, CP_MEM_TO_REG_0_REG(OVERFLOW_FLAG_REG
) |
980 CP_MEM_TO_REG_0_CNT(1 - 1));
981 tu_cs_emit_qw(cs
, cmd
->scratch_bo
.iova
+ ctrl_offset(vsc_scratch
));
984 * This is a bit awkward, we really want a way to invert the
985 * CP_REG_TEST/CP_COND_REG_EXEC logic, so that we can conditionally
986 * execute cmds to use hwbinning when a bit is *not* set. This
987 * dance is to invert OVERFLOW_FLAG_REG
989 * A CP_NOP packet is used to skip executing the 'else' clause
993 /* b0 will be set if VSC_DRAW_STRM or VSC_PRIM_STRM overflow: */
994 tu_cs_emit_pkt7(cs
, CP_REG_TEST
, 1);
995 tu_cs_emit(cs
, A6XX_CP_REG_TEST_0_REG(OVERFLOW_FLAG_REG
) |
996 A6XX_CP_REG_TEST_0_BIT(0) |
997 A6XX_CP_REG_TEST_0_WAIT_FOR_ME
);
999 tu_cs_reserve(cs
, 3 + 7);
1000 tu_cs_emit_pkt7(cs
, CP_COND_REG_EXEC
, 2);
1001 tu_cs_emit(cs
, CP_COND_REG_EXEC_0_MODE(PRED_TEST
));
1002 tu_cs_emit(cs
, CP_COND_REG_EXEC_1_DWORDS(7));
1006 * On overflow, mirror the value to control->vsc_overflow
1007 * which CPU is checking to detect overflow (see
1008 * check_vsc_overflow())
1010 tu_cs_emit_pkt7(cs
, CP_REG_TO_MEM
, 3);
1011 tu_cs_emit(cs
, CP_REG_TO_MEM_0_REG(OVERFLOW_FLAG_REG
) |
1012 CP_REG_TO_MEM_0_CNT(0));
1013 tu_cs_emit_qw(cs
, cmd
->scratch_bo
.iova
+ ctrl_offset(vsc_overflow
));
1015 tu_cs_emit_pkt4(cs
, OVERFLOW_FLAG_REG
, 1);
1016 tu_cs_emit(cs
, 0x0);
1018 tu_cs_emit_pkt7(cs
, CP_NOP
, 2); /* skip 'else' when 'if' is taken */
1020 tu_cs_emit_pkt4(cs
, OVERFLOW_FLAG_REG
, 1);
1021 tu_cs_emit(cs
, 0x1);
1026 tu6_emit_binning_pass(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
1028 struct tu_physical_device
*phys_dev
= cmd
->device
->physical_device
;
1029 const struct tu_tiling_config
*tiling
= &cmd
->state
.tiling_config
;
1031 uint32_t x1
= tiling
->tile0
.offset
.x
;
1032 uint32_t y1
= tiling
->tile0
.offset
.y
;
1033 uint32_t x2
= tiling
->render_area
.offset
.x
+ tiling
->render_area
.extent
.width
- 1;
1034 uint32_t y2
= tiling
->render_area
.offset
.y
+ tiling
->render_area
.extent
.height
- 1;
1036 tu6_emit_window_scissor(cs
, x1
, y1
, x2
, y2
);
1038 tu_cs_emit_pkt7(cs
, CP_SET_MARKER
, 1);
1039 tu_cs_emit(cs
, A6XX_CP_SET_MARKER_0_MODE(RM6_BINNING
));
1041 tu_cs_emit_pkt7(cs
, CP_SET_VISIBILITY_OVERRIDE
, 1);
1042 tu_cs_emit(cs
, 0x1);
1044 tu_cs_emit_pkt7(cs
, CP_SET_MODE
, 1);
1045 tu_cs_emit(cs
, 0x1);
1050 A6XX_VFD_MODE_CNTL(.binning_pass
= true));
1052 update_vsc_pipe(cmd
, cs
);
1055 A6XX_PC_UNKNOWN_9805(.unknown
= phys_dev
->magic
.PC_UNKNOWN_9805
));
1058 A6XX_SP_UNKNOWN_A0F8(.unknown
= phys_dev
->magic
.SP_UNKNOWN_A0F8
));
1060 tu_cs_emit_pkt7(cs
, CP_EVENT_WRITE
, 1);
1061 tu_cs_emit(cs
, UNK_2C
);
1064 A6XX_RB_WINDOW_OFFSET(.x
= 0, .y
= 0));
1067 A6XX_SP_TP_WINDOW_OFFSET(.x
= 0, .y
= 0));
1069 /* emit IB to binning drawcmds: */
1070 tu_cs_emit_call(cs
, &cmd
->draw_cs
);
1072 tu_cs_emit_pkt7(cs
, CP_SET_DRAW_STATE
, 3);
1073 tu_cs_emit(cs
, CP_SET_DRAW_STATE__0_COUNT(0) |
1074 CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS
|
1075 CP_SET_DRAW_STATE__0_GROUP_ID(0));
1076 tu_cs_emit(cs
, CP_SET_DRAW_STATE__1_ADDR_LO(0));
1077 tu_cs_emit(cs
, CP_SET_DRAW_STATE__2_ADDR_HI(0));
1079 tu_cs_emit_pkt7(cs
, CP_EVENT_WRITE
, 1);
1080 tu_cs_emit(cs
, UNK_2D
);
1082 tu6_emit_event_write(cmd
, cs
, CACHE_INVALIDATE
, false);
1083 tu6_cache_flush(cmd
, cs
);
1087 tu_cs_emit_pkt7(cs
, CP_WAIT_FOR_ME
, 0);
1089 emit_vsc_overflow_test(cmd
, cs
);
1091 tu_cs_emit_pkt7(cs
, CP_SET_VISIBILITY_OVERRIDE
, 1);
1092 tu_cs_emit(cs
, 0x0);
1094 tu_cs_emit_pkt7(cs
, CP_SET_MODE
, 1);
1095 tu_cs_emit(cs
, 0x0);
1097 cmd
->wait_for_idle
= false;
1101 tu_emit_load_clear(struct tu_cmd_buffer
*cmd
,
1102 const VkRenderPassBeginInfo
*info
)
1104 struct tu_cs
*cs
= &cmd
->draw_cs
;
1106 tu_cond_exec_start(cs
, CP_COND_EXEC_0_RENDER_MODE_GMEM
);
1108 tu6_emit_blit_scissor(cmd
, cs
, true);
1110 for (uint32_t i
= 0; i
< cmd
->state
.pass
->attachment_count
; ++i
)
1111 tu_load_gmem_attachment(cmd
, cs
, i
, false);
1113 tu6_emit_blit_scissor(cmd
, cs
, false);
1115 for (uint32_t i
= 0; i
< cmd
->state
.pass
->attachment_count
; ++i
)
1116 tu_clear_gmem_attachment(cmd
, cs
, i
, info
);
1118 tu_cond_exec_end(cs
);
1120 tu_cond_exec_start(cs
, CP_COND_EXEC_0_RENDER_MODE_SYSMEM
);
1122 for (uint32_t i
= 0; i
< cmd
->state
.pass
->attachment_count
; ++i
)
1123 tu_clear_sysmem_attachment(cmd
, cs
, i
, info
);
1125 tu_cond_exec_end(cs
);
1129 tu6_sysmem_render_begin(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
,
1130 const struct VkRect2D
*renderArea
)
1132 const struct tu_physical_device
*phys_dev
= cmd
->device
->physical_device
;
1133 const struct tu_framebuffer
*fb
= cmd
->state
.framebuffer
;
1135 assert(fb
->width
> 0 && fb
->height
> 0);
1136 tu6_emit_window_scissor(cs
, 0, 0, fb
->width
- 1, fb
->height
- 1);
1137 tu6_emit_window_offset(cs
, 0, 0);
1139 tu6_emit_bin_size(cs
, 0, 0, 0xc00000); /* 0xc00000 = BYPASS? */
1141 tu6_emit_lrz_flush(cmd
, cs
);
1143 tu_cs_emit_pkt7(cs
, CP_SET_MARKER
, 1);
1144 tu_cs_emit(cs
, A6XX_CP_SET_MARKER_0_MODE(RM6_BYPASS
));
1146 tu_cs_emit_pkt7(cs
, CP_SKIP_IB2_ENABLE_GLOBAL
, 1);
1147 tu_cs_emit(cs
, 0x0);
1149 tu6_emit_event_write(cmd
, cs
, PC_CCU_INVALIDATE_COLOR
, false);
1150 tu6_emit_event_write(cmd
, cs
, PC_CCU_INVALIDATE_DEPTH
, false);
1151 tu6_emit_event_write(cmd
, cs
, CACHE_INVALIDATE
, false);
1153 tu6_emit_wfi(cmd
, cs
);
1155 A6XX_RB_CCU_CNTL(.offset
= phys_dev
->ccu_offset_bypass
));
1157 /* enable stream-out, with sysmem there is only one pass: */
1159 A6XX_VPC_SO_OVERRIDE(.so_disable
= false));
1161 tu_cs_emit_pkt7(cs
, CP_SET_VISIBILITY_OVERRIDE
, 1);
1162 tu_cs_emit(cs
, 0x1);
1164 tu_cs_emit_pkt7(cs
, CP_SET_MODE
, 1);
1165 tu_cs_emit(cs
, 0x0);
1167 tu_cs_sanity_check(cs
);
1171 tu6_sysmem_render_end(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
1173 /* Do any resolves of the last subpass. These are handled in the
1174 * tile_store_ib in the gmem path.
1176 const struct tu_subpass
*subpass
= cmd
->state
.subpass
;
1177 if (subpass
->resolve_attachments
) {
1178 for (unsigned i
= 0; i
< subpass
->color_count
; i
++) {
1179 uint32_t a
= subpass
->resolve_attachments
[i
].attachment
;
1180 if (a
!= VK_ATTACHMENT_UNUSED
)
1181 tu6_emit_sysmem_resolve(cmd
, cs
, a
,
1182 subpass
->color_attachments
[i
].attachment
);
1186 tu_cs_emit_call(cs
, &cmd
->draw_epilogue_cs
);
1188 tu_cs_emit_pkt7(cs
, CP_SKIP_IB2_ENABLE_GLOBAL
, 1);
1189 tu_cs_emit(cs
, 0x0);
1191 tu6_emit_lrz_flush(cmd
, cs
);
1193 tu6_emit_event_write(cmd
, cs
, PC_CCU_FLUSH_COLOR_TS
, true);
1194 tu6_emit_event_write(cmd
, cs
, PC_CCU_FLUSH_DEPTH_TS
, true);
1196 tu_cs_sanity_check(cs
);
1201 tu6_tile_render_begin(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
1203 struct tu_physical_device
*phys_dev
= cmd
->device
->physical_device
;
1205 tu6_emit_lrz_flush(cmd
, cs
);
1209 tu6_emit_cache_flush(cmd
, cs
);
1211 tu_cs_emit_pkt7(cs
, CP_SKIP_IB2_ENABLE_GLOBAL
, 1);
1212 tu_cs_emit(cs
, 0x0);
1214 /* TODO: flushing with barriers instead of blindly always flushing */
1215 tu6_emit_event_write(cmd
, cs
, PC_CCU_FLUSH_COLOR_TS
, true);
1216 tu6_emit_event_write(cmd
, cs
, PC_CCU_FLUSH_DEPTH_TS
, true);
1217 tu6_emit_event_write(cmd
, cs
, PC_CCU_INVALIDATE_COLOR
, false);
1218 tu6_emit_event_write(cmd
, cs
, PC_CCU_INVALIDATE_DEPTH
, false);
1222 A6XX_RB_CCU_CNTL(.offset
= phys_dev
->ccu_offset_gmem
, .gmem
= 1));
1224 const struct tu_tiling_config
*tiling
= &cmd
->state
.tiling_config
;
1225 if (use_hw_binning(cmd
)) {
1226 /* enable stream-out during binning pass: */
1227 tu_cs_emit_regs(cs
, A6XX_VPC_SO_OVERRIDE(.so_disable
=false));
1229 tu6_emit_bin_size(cs
,
1230 tiling
->tile0
.extent
.width
,
1231 tiling
->tile0
.extent
.height
,
1232 A6XX_RB_BIN_CONTROL_BINNING_PASS
| 0x6000000);
1234 tu6_emit_render_cntl(cmd
, cmd
->state
.subpass
, cs
, true);
1236 tu6_emit_binning_pass(cmd
, cs
);
1238 /* and disable stream-out for draw pass: */
1239 tu_cs_emit_regs(cs
, A6XX_VPC_SO_OVERRIDE(.so_disable
=true));
1241 tu6_emit_bin_size(cs
,
1242 tiling
->tile0
.extent
.width
,
1243 tiling
->tile0
.extent
.height
,
1244 A6XX_RB_BIN_CONTROL_USE_VIZ
| 0x6000000);
1247 A6XX_VFD_MODE_CNTL(0));
1249 tu_cs_emit_regs(cs
, A6XX_PC_UNKNOWN_9805(.unknown
= phys_dev
->magic
.PC_UNKNOWN_9805
));
1251 tu_cs_emit_regs(cs
, A6XX_SP_UNKNOWN_A0F8(.unknown
= phys_dev
->magic
.SP_UNKNOWN_A0F8
));
1253 tu_cs_emit_pkt7(cs
, CP_SKIP_IB2_ENABLE_GLOBAL
, 1);
1254 tu_cs_emit(cs
, 0x1);
1256 /* no binning pass, so enable stream-out for draw pass:: */
1257 tu_cs_emit_regs(cs
, A6XX_VPC_SO_OVERRIDE(.so_disable
=false));
1259 tu6_emit_bin_size(cs
,
1260 tiling
->tile0
.extent
.width
,
1261 tiling
->tile0
.extent
.height
,
1265 tu_cs_sanity_check(cs
);
1269 tu6_render_tile(struct tu_cmd_buffer
*cmd
,
1271 const struct tu_tile
*tile
)
1273 tu6_emit_tile_select(cmd
, cs
, tile
);
1275 tu_cs_emit_call(cs
, &cmd
->draw_cs
);
1276 cmd
->wait_for_idle
= true;
1278 if (use_hw_binning(cmd
)) {
1279 tu_cs_emit_pkt7(cs
, CP_REG_TEST
, 1);
1280 tu_cs_emit(cs
, A6XX_CP_REG_TEST_0_REG(OVERFLOW_FLAG_REG
) |
1281 A6XX_CP_REG_TEST_0_BIT(0) |
1282 A6XX_CP_REG_TEST_0_WAIT_FOR_ME
);
1284 tu_cs_reserve(cs
, 3 + 2);
1285 tu_cs_emit_pkt7(cs
, CP_COND_REG_EXEC
, 2);
1286 tu_cs_emit(cs
, CP_COND_REG_EXEC_0_MODE(PRED_TEST
));
1287 tu_cs_emit(cs
, CP_COND_REG_EXEC_1_DWORDS(2));
1289 /* if (no overflow) */ {
1290 tu_cs_emit_pkt7(cs
, CP_SET_MARKER
, 1);
1291 tu_cs_emit(cs
, A6XX_CP_SET_MARKER_0_MODE(RM6_ENDVIS
));
1295 tu_cs_emit_ib(cs
, &cmd
->state
.tile_store_ib
);
1297 tu_cs_sanity_check(cs
);
1301 tu6_tile_render_end(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
1303 tu_cs_emit_call(cs
, &cmd
->draw_epilogue_cs
);
1306 A6XX_GRAS_LRZ_CNTL(0));
1308 tu6_emit_lrz_flush(cmd
, cs
);
1310 tu6_emit_event_write(cmd
, cs
, PC_CCU_RESOLVE_TS
, true);
1312 tu_cs_sanity_check(cs
);
1316 tu_cmd_render_tiles(struct tu_cmd_buffer
*cmd
)
1318 const struct tu_tiling_config
*tiling
= &cmd
->state
.tiling_config
;
1320 tu6_tile_render_begin(cmd
, &cmd
->cs
);
1322 for (uint32_t y
= 0; y
< tiling
->tile_count
.height
; y
++) {
1323 for (uint32_t x
= 0; x
< tiling
->tile_count
.width
; x
++) {
1324 struct tu_tile tile
;
1325 tu_tiling_config_get_tile(tiling
, cmd
->device
, x
, y
, &tile
);
1326 tu6_render_tile(cmd
, &cmd
->cs
, &tile
);
1330 tu6_tile_render_end(cmd
, &cmd
->cs
);
1334 tu_cmd_render_sysmem(struct tu_cmd_buffer
*cmd
)
1336 const struct tu_tiling_config
*tiling
= &cmd
->state
.tiling_config
;
1338 tu6_sysmem_render_begin(cmd
, &cmd
->cs
, &tiling
->render_area
);
1340 tu_cs_emit_call(&cmd
->cs
, &cmd
->draw_cs
);
1341 cmd
->wait_for_idle
= true;
1343 tu6_sysmem_render_end(cmd
, &cmd
->cs
);
1347 tu_cmd_prepare_tile_store_ib(struct tu_cmd_buffer
*cmd
)
1349 const uint32_t tile_store_space
= 11 + (35 * 2) * cmd
->state
.pass
->attachment_count
;
1350 struct tu_cs sub_cs
;
1353 tu_cs_begin_sub_stream(&cmd
->sub_cs
, tile_store_space
, &sub_cs
);
1354 if (result
!= VK_SUCCESS
) {
1355 cmd
->record_result
= result
;
1359 /* emit to tile-store sub_cs */
1360 tu6_emit_tile_store(cmd
, &sub_cs
);
1362 cmd
->state
.tile_store_ib
= tu_cs_end_sub_stream(&cmd
->sub_cs
, &sub_cs
);
1366 tu_cmd_update_tiling_config(struct tu_cmd_buffer
*cmd
,
1367 const VkRect2D
*render_area
)
1369 const struct tu_device
*dev
= cmd
->device
;
1370 struct tu_tiling_config
*tiling
= &cmd
->state
.tiling_config
;
1372 tiling
->render_area
= *render_area
;
1373 tiling
->force_sysmem
= false;
1375 tu_tiling_config_update_tile_layout(tiling
, dev
, cmd
->state
.pass
);
1376 tu_tiling_config_update_pipe_layout(tiling
, dev
);
1377 tu_tiling_config_update_pipes(tiling
, dev
);
1380 const struct tu_dynamic_state default_dynamic_state
= {
1396 .blend_constants
= { 0.0f
, 0.0f
, 0.0f
, 0.0f
},
1402 .stencil_compare_mask
=
1407 .stencil_write_mask
=
1412 .stencil_reference
=
1419 static void UNUSED
/* FINISHME */
1420 tu_bind_dynamic_state(struct tu_cmd_buffer
*cmd_buffer
,
1421 const struct tu_dynamic_state
*src
)
1423 struct tu_dynamic_state
*dest
= &cmd_buffer
->state
.dynamic
;
1424 uint32_t copy_mask
= src
->mask
;
1425 uint32_t dest_mask
= 0;
1427 tu_use_args(cmd_buffer
); /* FINISHME */
1429 /* Make sure to copy the number of viewports/scissors because they can
1430 * only be specified at pipeline creation time.
1432 dest
->viewport
.count
= src
->viewport
.count
;
1433 dest
->scissor
.count
= src
->scissor
.count
;
1434 dest
->discard_rectangle
.count
= src
->discard_rectangle
.count
;
1436 if (copy_mask
& TU_DYNAMIC_VIEWPORT
) {
1437 if (memcmp(&dest
->viewport
.viewports
, &src
->viewport
.viewports
,
1438 src
->viewport
.count
* sizeof(VkViewport
))) {
1439 typed_memcpy(dest
->viewport
.viewports
, src
->viewport
.viewports
,
1440 src
->viewport
.count
);
1441 dest_mask
|= TU_DYNAMIC_VIEWPORT
;
1445 if (copy_mask
& TU_DYNAMIC_SCISSOR
) {
1446 if (memcmp(&dest
->scissor
.scissors
, &src
->scissor
.scissors
,
1447 src
->scissor
.count
* sizeof(VkRect2D
))) {
1448 typed_memcpy(dest
->scissor
.scissors
, src
->scissor
.scissors
,
1449 src
->scissor
.count
);
1450 dest_mask
|= TU_DYNAMIC_SCISSOR
;
1454 if (copy_mask
& TU_DYNAMIC_LINE_WIDTH
) {
1455 if (dest
->line_width
!= src
->line_width
) {
1456 dest
->line_width
= src
->line_width
;
1457 dest_mask
|= TU_DYNAMIC_LINE_WIDTH
;
1461 if (copy_mask
& TU_DYNAMIC_DEPTH_BIAS
) {
1462 if (memcmp(&dest
->depth_bias
, &src
->depth_bias
,
1463 sizeof(src
->depth_bias
))) {
1464 dest
->depth_bias
= src
->depth_bias
;
1465 dest_mask
|= TU_DYNAMIC_DEPTH_BIAS
;
1469 if (copy_mask
& TU_DYNAMIC_BLEND_CONSTANTS
) {
1470 if (memcmp(&dest
->blend_constants
, &src
->blend_constants
,
1471 sizeof(src
->blend_constants
))) {
1472 typed_memcpy(dest
->blend_constants
, src
->blend_constants
, 4);
1473 dest_mask
|= TU_DYNAMIC_BLEND_CONSTANTS
;
1477 if (copy_mask
& TU_DYNAMIC_DEPTH_BOUNDS
) {
1478 if (memcmp(&dest
->depth_bounds
, &src
->depth_bounds
,
1479 sizeof(src
->depth_bounds
))) {
1480 dest
->depth_bounds
= src
->depth_bounds
;
1481 dest_mask
|= TU_DYNAMIC_DEPTH_BOUNDS
;
1485 if (copy_mask
& TU_DYNAMIC_STENCIL_COMPARE_MASK
) {
1486 if (memcmp(&dest
->stencil_compare_mask
, &src
->stencil_compare_mask
,
1487 sizeof(src
->stencil_compare_mask
))) {
1488 dest
->stencil_compare_mask
= src
->stencil_compare_mask
;
1489 dest_mask
|= TU_DYNAMIC_STENCIL_COMPARE_MASK
;
1493 if (copy_mask
& TU_DYNAMIC_STENCIL_WRITE_MASK
) {
1494 if (memcmp(&dest
->stencil_write_mask
, &src
->stencil_write_mask
,
1495 sizeof(src
->stencil_write_mask
))) {
1496 dest
->stencil_write_mask
= src
->stencil_write_mask
;
1497 dest_mask
|= TU_DYNAMIC_STENCIL_WRITE_MASK
;
1501 if (copy_mask
& TU_DYNAMIC_STENCIL_REFERENCE
) {
1502 if (memcmp(&dest
->stencil_reference
, &src
->stencil_reference
,
1503 sizeof(src
->stencil_reference
))) {
1504 dest
->stencil_reference
= src
->stencil_reference
;
1505 dest_mask
|= TU_DYNAMIC_STENCIL_REFERENCE
;
1509 if (copy_mask
& TU_DYNAMIC_DISCARD_RECTANGLE
) {
1510 if (memcmp(&dest
->discard_rectangle
.rectangles
,
1511 &src
->discard_rectangle
.rectangles
,
1512 src
->discard_rectangle
.count
* sizeof(VkRect2D
))) {
1513 typed_memcpy(dest
->discard_rectangle
.rectangles
,
1514 src
->discard_rectangle
.rectangles
,
1515 src
->discard_rectangle
.count
);
1516 dest_mask
|= TU_DYNAMIC_DISCARD_RECTANGLE
;
1522 tu_create_cmd_buffer(struct tu_device
*device
,
1523 struct tu_cmd_pool
*pool
,
1524 VkCommandBufferLevel level
,
1525 VkCommandBuffer
*pCommandBuffer
)
1527 struct tu_cmd_buffer
*cmd_buffer
;
1528 cmd_buffer
= vk_zalloc(&pool
->alloc
, sizeof(*cmd_buffer
), 8,
1529 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1530 if (cmd_buffer
== NULL
)
1531 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
1533 cmd_buffer
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
1534 cmd_buffer
->device
= device
;
1535 cmd_buffer
->pool
= pool
;
1536 cmd_buffer
->level
= level
;
1539 list_addtail(&cmd_buffer
->pool_link
, &pool
->cmd_buffers
);
1540 cmd_buffer
->queue_family_index
= pool
->queue_family_index
;
1543 /* Init the pool_link so we can safely call list_del when we destroy
1544 * the command buffer
1546 list_inithead(&cmd_buffer
->pool_link
);
1547 cmd_buffer
->queue_family_index
= TU_QUEUE_GENERAL
;
1550 tu_bo_list_init(&cmd_buffer
->bo_list
);
1551 tu_cs_init(&cmd_buffer
->cs
, device
, TU_CS_MODE_GROW
, 4096);
1552 tu_cs_init(&cmd_buffer
->draw_cs
, device
, TU_CS_MODE_GROW
, 4096);
1553 tu_cs_init(&cmd_buffer
->draw_epilogue_cs
, device
, TU_CS_MODE_GROW
, 4096);
1554 tu_cs_init(&cmd_buffer
->sub_cs
, device
, TU_CS_MODE_SUB_STREAM
, 2048);
1556 *pCommandBuffer
= tu_cmd_buffer_to_handle(cmd_buffer
);
1558 list_inithead(&cmd_buffer
->upload
.list
);
1560 VkResult result
= tu_bo_init_new(device
, &cmd_buffer
->scratch_bo
, 0x1000);
1561 if (result
!= VK_SUCCESS
)
1562 goto fail_scratch_bo
;
1564 /* TODO: resize on overflow */
1565 cmd_buffer
->vsc_draw_strm_pitch
= device
->vsc_draw_strm_pitch
;
1566 cmd_buffer
->vsc_prim_strm_pitch
= device
->vsc_prim_strm_pitch
;
1567 cmd_buffer
->vsc_draw_strm
= device
->vsc_draw_strm
;
1568 cmd_buffer
->vsc_prim_strm
= device
->vsc_prim_strm
;
1573 list_del(&cmd_buffer
->pool_link
);
1578 tu_cmd_buffer_destroy(struct tu_cmd_buffer
*cmd_buffer
)
1580 tu_bo_finish(cmd_buffer
->device
, &cmd_buffer
->scratch_bo
);
1582 list_del(&cmd_buffer
->pool_link
);
1584 for (unsigned i
= 0; i
< MAX_BIND_POINTS
; i
++)
1585 free(cmd_buffer
->descriptors
[i
].push_set
.set
.mapped_ptr
);
1587 tu_cs_finish(&cmd_buffer
->cs
);
1588 tu_cs_finish(&cmd_buffer
->draw_cs
);
1589 tu_cs_finish(&cmd_buffer
->draw_epilogue_cs
);
1590 tu_cs_finish(&cmd_buffer
->sub_cs
);
1592 tu_bo_list_destroy(&cmd_buffer
->bo_list
);
1593 vk_free(&cmd_buffer
->pool
->alloc
, cmd_buffer
);
1597 tu_reset_cmd_buffer(struct tu_cmd_buffer
*cmd_buffer
)
1599 cmd_buffer
->wait_for_idle
= true;
1601 cmd_buffer
->record_result
= VK_SUCCESS
;
1603 tu_bo_list_reset(&cmd_buffer
->bo_list
);
1604 tu_cs_reset(&cmd_buffer
->cs
);
1605 tu_cs_reset(&cmd_buffer
->draw_cs
);
1606 tu_cs_reset(&cmd_buffer
->draw_epilogue_cs
);
1607 tu_cs_reset(&cmd_buffer
->sub_cs
);
1609 for (unsigned i
= 0; i
< MAX_BIND_POINTS
; i
++) {
1610 cmd_buffer
->descriptors
[i
].valid
= 0;
1611 cmd_buffer
->descriptors
[i
].push_dirty
= false;
1614 cmd_buffer
->status
= TU_CMD_BUFFER_STATUS_INITIAL
;
1616 return cmd_buffer
->record_result
;
1620 tu_AllocateCommandBuffers(VkDevice _device
,
1621 const VkCommandBufferAllocateInfo
*pAllocateInfo
,
1622 VkCommandBuffer
*pCommandBuffers
)
1624 TU_FROM_HANDLE(tu_device
, device
, _device
);
1625 TU_FROM_HANDLE(tu_cmd_pool
, pool
, pAllocateInfo
->commandPool
);
1627 VkResult result
= VK_SUCCESS
;
1630 for (i
= 0; i
< pAllocateInfo
->commandBufferCount
; i
++) {
1632 if (!list_is_empty(&pool
->free_cmd_buffers
)) {
1633 struct tu_cmd_buffer
*cmd_buffer
= list_first_entry(
1634 &pool
->free_cmd_buffers
, struct tu_cmd_buffer
, pool_link
);
1636 list_del(&cmd_buffer
->pool_link
);
1637 list_addtail(&cmd_buffer
->pool_link
, &pool
->cmd_buffers
);
1639 result
= tu_reset_cmd_buffer(cmd_buffer
);
1640 cmd_buffer
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
1641 cmd_buffer
->level
= pAllocateInfo
->level
;
1643 pCommandBuffers
[i
] = tu_cmd_buffer_to_handle(cmd_buffer
);
1645 result
= tu_create_cmd_buffer(device
, pool
, pAllocateInfo
->level
,
1646 &pCommandBuffers
[i
]);
1648 if (result
!= VK_SUCCESS
)
1652 if (result
!= VK_SUCCESS
) {
1653 tu_FreeCommandBuffers(_device
, pAllocateInfo
->commandPool
, i
,
1656 /* From the Vulkan 1.0.66 spec:
1658 * "vkAllocateCommandBuffers can be used to create multiple
1659 * command buffers. If the creation of any of those command
1660 * buffers fails, the implementation must destroy all
1661 * successfully created command buffer objects from this
1662 * command, set all entries of the pCommandBuffers array to
1663 * NULL and return the error."
1665 memset(pCommandBuffers
, 0,
1666 sizeof(*pCommandBuffers
) * pAllocateInfo
->commandBufferCount
);
1673 tu_FreeCommandBuffers(VkDevice device
,
1674 VkCommandPool commandPool
,
1675 uint32_t commandBufferCount
,
1676 const VkCommandBuffer
*pCommandBuffers
)
1678 for (uint32_t i
= 0; i
< commandBufferCount
; i
++) {
1679 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, pCommandBuffers
[i
]);
1682 if (cmd_buffer
->pool
) {
1683 list_del(&cmd_buffer
->pool_link
);
1684 list_addtail(&cmd_buffer
->pool_link
,
1685 &cmd_buffer
->pool
->free_cmd_buffers
);
1687 tu_cmd_buffer_destroy(cmd_buffer
);
1693 tu_ResetCommandBuffer(VkCommandBuffer commandBuffer
,
1694 VkCommandBufferResetFlags flags
)
1696 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
1697 return tu_reset_cmd_buffer(cmd_buffer
);
1701 tu_BeginCommandBuffer(VkCommandBuffer commandBuffer
,
1702 const VkCommandBufferBeginInfo
*pBeginInfo
)
1704 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
1705 VkResult result
= VK_SUCCESS
;
1707 if (cmd_buffer
->status
!= TU_CMD_BUFFER_STATUS_INITIAL
) {
1708 /* If the command buffer has already been resetted with
1709 * vkResetCommandBuffer, no need to do it again.
1711 result
= tu_reset_cmd_buffer(cmd_buffer
);
1712 if (result
!= VK_SUCCESS
)
1716 memset(&cmd_buffer
->state
, 0, sizeof(cmd_buffer
->state
));
1717 cmd_buffer
->usage_flags
= pBeginInfo
->flags
;
1719 tu_cs_begin(&cmd_buffer
->cs
);
1720 tu_cs_begin(&cmd_buffer
->draw_cs
);
1721 tu_cs_begin(&cmd_buffer
->draw_epilogue_cs
);
1723 cmd_buffer
->scratch_seqno
= 0;
1725 /* setup initial configuration into command buffer */
1726 if (cmd_buffer
->level
== VK_COMMAND_BUFFER_LEVEL_PRIMARY
) {
1727 switch (cmd_buffer
->queue_family_index
) {
1728 case TU_QUEUE_GENERAL
:
1729 tu6_init_hw(cmd_buffer
, &cmd_buffer
->cs
);
1734 } else if (cmd_buffer
->level
== VK_COMMAND_BUFFER_LEVEL_SECONDARY
&&
1735 (pBeginInfo
->flags
& VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT
)) {
1736 assert(pBeginInfo
->pInheritanceInfo
);
1737 cmd_buffer
->state
.pass
= tu_render_pass_from_handle(pBeginInfo
->pInheritanceInfo
->renderPass
);
1738 cmd_buffer
->state
.subpass
= &cmd_buffer
->state
.pass
->subpasses
[pBeginInfo
->pInheritanceInfo
->subpass
];
1741 cmd_buffer
->status
= TU_CMD_BUFFER_STATUS_RECORDING
;
1746 /* Sets vertex buffers to HW binding points. We emit VBs in SDS (so that bin
1747 * rendering can skip over unused state), so we need to collect all the
1748 * bindings together into a single state emit at draw time.
1751 tu_CmdBindVertexBuffers(VkCommandBuffer commandBuffer
,
1752 uint32_t firstBinding
,
1753 uint32_t bindingCount
,
1754 const VkBuffer
*pBuffers
,
1755 const VkDeviceSize
*pOffsets
)
1757 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
1759 assert(firstBinding
+ bindingCount
<= MAX_VBS
);
1761 for (uint32_t i
= 0; i
< bindingCount
; i
++) {
1762 struct tu_buffer
*buf
= tu_buffer_from_handle(pBuffers
[i
]);
1764 cmd
->state
.vb
.buffers
[firstBinding
+ i
] = buf
;
1765 cmd
->state
.vb
.offsets
[firstBinding
+ i
] = pOffsets
[i
];
1767 tu_bo_list_add(&cmd
->bo_list
, buf
->bo
, MSM_SUBMIT_BO_READ
);
1770 cmd
->state
.dirty
|= TU_CMD_DIRTY_VERTEX_BUFFERS
;
1774 tu_CmdBindIndexBuffer(VkCommandBuffer commandBuffer
,
1776 VkDeviceSize offset
,
1777 VkIndexType indexType
)
1779 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
1780 TU_FROM_HANDLE(tu_buffer
, buf
, buffer
);
1782 /* initialize/update the restart index */
1783 if (!cmd
->state
.index_buffer
|| cmd
->state
.index_type
!= indexType
) {
1784 struct tu_cs
*draw_cs
= &cmd
->draw_cs
;
1786 tu6_emit_restart_index(
1787 draw_cs
, indexType
== VK_INDEX_TYPE_UINT32
? 0xffffffff : 0xffff);
1789 tu_cs_sanity_check(draw_cs
);
1793 if (cmd
->state
.index_buffer
!= buf
)
1794 tu_bo_list_add(&cmd
->bo_list
, buf
->bo
, MSM_SUBMIT_BO_READ
);
1796 cmd
->state
.index_buffer
= buf
;
1797 cmd
->state
.index_offset
= offset
;
1798 cmd
->state
.index_type
= indexType
;
1802 tu_CmdBindDescriptorSets(VkCommandBuffer commandBuffer
,
1803 VkPipelineBindPoint pipelineBindPoint
,
1804 VkPipelineLayout _layout
,
1806 uint32_t descriptorSetCount
,
1807 const VkDescriptorSet
*pDescriptorSets
,
1808 uint32_t dynamicOffsetCount
,
1809 const uint32_t *pDynamicOffsets
)
1811 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
1812 TU_FROM_HANDLE(tu_pipeline_layout
, layout
, _layout
);
1813 unsigned dyn_idx
= 0;
1815 struct tu_descriptor_state
*descriptors_state
=
1816 tu_get_descriptors_state(cmd_buffer
, pipelineBindPoint
);
1818 for (unsigned i
= 0; i
< descriptorSetCount
; ++i
) {
1819 unsigned idx
= i
+ firstSet
;
1820 TU_FROM_HANDLE(tu_descriptor_set
, set
, pDescriptorSets
[i
]);
1822 descriptors_state
->sets
[idx
] = set
;
1823 descriptors_state
->valid
|= (1u << idx
);
1825 /* Note: the actual input attachment indices come from the shader
1826 * itself, so we can't generate the patched versions of these until
1827 * draw time when both the pipeline and descriptors are bound and
1828 * we're inside the render pass.
1830 unsigned dst_idx
= layout
->set
[idx
].input_attachment_start
;
1831 memcpy(&descriptors_state
->input_attachments
[dst_idx
* A6XX_TEX_CONST_DWORDS
],
1832 set
->dynamic_descriptors
,
1833 set
->layout
->input_attachment_count
* A6XX_TEX_CONST_DWORDS
* 4);
1835 for(unsigned j
= 0; j
< set
->layout
->dynamic_offset_count
; ++j
, ++dyn_idx
) {
1836 /* Dynamic buffers come after input attachments in the descriptor set
1837 * itself, but due to how the Vulkan descriptor set binding works, we
1838 * have to put input attachments and dynamic buffers in separate
1839 * buffers in the descriptor_state and then combine them at draw
1840 * time. Binding a descriptor set only invalidates the descriptor
1841 * sets after it, but if we try to tightly pack the descriptors after
1842 * the input attachments then we could corrupt dynamic buffers in the
1843 * descriptor set before it, or we'd have to move all the dynamic
1844 * buffers over. We just put them into separate buffers to make
1845 * binding as well as the later patching of input attachments easy.
1847 unsigned src_idx
= j
+ set
->layout
->input_attachment_count
;
1848 unsigned dst_idx
= j
+ layout
->set
[idx
].dynamic_offset_start
;
1849 assert(dyn_idx
< dynamicOffsetCount
);
1852 &descriptors_state
->dynamic_descriptors
[dst_idx
* A6XX_TEX_CONST_DWORDS
];
1854 &set
->dynamic_descriptors
[src_idx
* A6XX_TEX_CONST_DWORDS
];
1855 uint32_t offset
= pDynamicOffsets
[dyn_idx
];
1857 /* Patch the storage/uniform descriptors right away. */
1858 if (layout
->set
[idx
].layout
->dynamic_ubo
& (1 << j
)) {
1859 /* Note: we can assume here that the addition won't roll over and
1860 * change the SIZE field.
1862 uint64_t va
= src
[0] | ((uint64_t)src
[1] << 32);
1867 memcpy(dst
, src
, A6XX_TEX_CONST_DWORDS
* 4);
1868 /* Note: A6XX_IBO_5_DEPTH is always 0 */
1869 uint64_t va
= dst
[4] | ((uint64_t)dst
[5] << 32);
1877 if (pipelineBindPoint
== VK_PIPELINE_BIND_POINT_COMPUTE
)
1878 cmd_buffer
->state
.dirty
|= TU_CMD_DIRTY_COMPUTE_DESCRIPTOR_SETS
;
1880 cmd_buffer
->state
.dirty
|= TU_CMD_DIRTY_DESCRIPTOR_SETS
;
1883 void tu_CmdBindTransformFeedbackBuffersEXT(VkCommandBuffer commandBuffer
,
1884 uint32_t firstBinding
,
1885 uint32_t bindingCount
,
1886 const VkBuffer
*pBuffers
,
1887 const VkDeviceSize
*pOffsets
,
1888 const VkDeviceSize
*pSizes
)
1890 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
1891 assert(firstBinding
+ bindingCount
<= IR3_MAX_SO_BUFFERS
);
1893 for (uint32_t i
= 0; i
< bindingCount
; i
++) {
1894 uint32_t idx
= firstBinding
+ i
;
1895 TU_FROM_HANDLE(tu_buffer
, buf
, pBuffers
[i
]);
1897 if (pOffsets
[i
] != 0)
1898 cmd
->state
.streamout_reset
|= 1 << idx
;
1900 cmd
->state
.streamout_buf
.buffers
[idx
] = buf
;
1901 cmd
->state
.streamout_buf
.offsets
[idx
] = pOffsets
[i
];
1902 cmd
->state
.streamout_buf
.sizes
[idx
] = pSizes
[i
];
1904 cmd
->state
.streamout_enabled
|= 1 << idx
;
1907 cmd
->state
.dirty
|= TU_CMD_DIRTY_STREAMOUT_BUFFERS
;
1910 void tu_CmdBeginTransformFeedbackEXT(VkCommandBuffer commandBuffer
,
1911 uint32_t firstCounterBuffer
,
1912 uint32_t counterBufferCount
,
1913 const VkBuffer
*pCounterBuffers
,
1914 const VkDeviceSize
*pCounterBufferOffsets
)
1916 assert(firstCounterBuffer
+ counterBufferCount
<= IR3_MAX_SO_BUFFERS
);
1917 /* TODO do something with counter buffer? */
1920 void tu_CmdEndTransformFeedbackEXT(VkCommandBuffer commandBuffer
,
1921 uint32_t firstCounterBuffer
,
1922 uint32_t counterBufferCount
,
1923 const VkBuffer
*pCounterBuffers
,
1924 const VkDeviceSize
*pCounterBufferOffsets
)
1926 assert(firstCounterBuffer
+ counterBufferCount
<= IR3_MAX_SO_BUFFERS
);
1927 /* TODO do something with counter buffer? */
1929 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
1930 cmd
->state
.streamout_enabled
= 0;
1934 tu_CmdPushConstants(VkCommandBuffer commandBuffer
,
1935 VkPipelineLayout layout
,
1936 VkShaderStageFlags stageFlags
,
1939 const void *pValues
)
1941 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
1942 memcpy((void*) cmd
->push_constants
+ offset
, pValues
, size
);
1943 cmd
->state
.dirty
|= TU_CMD_DIRTY_PUSH_CONSTANTS
;
1947 tu_EndCommandBuffer(VkCommandBuffer commandBuffer
)
1949 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
1951 if (cmd_buffer
->scratch_seqno
) {
1952 tu_bo_list_add(&cmd_buffer
->bo_list
, &cmd_buffer
->scratch_bo
,
1953 MSM_SUBMIT_BO_WRITE
);
1956 if (cmd_buffer
->use_vsc_data
) {
1957 tu_bo_list_add(&cmd_buffer
->bo_list
, &cmd_buffer
->vsc_draw_strm
,
1958 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_WRITE
);
1959 tu_bo_list_add(&cmd_buffer
->bo_list
, &cmd_buffer
->vsc_prim_strm
,
1960 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_WRITE
);
1963 tu_bo_list_add(&cmd_buffer
->bo_list
, &cmd_buffer
->device
->border_color
,
1964 MSM_SUBMIT_BO_READ
);
1966 for (uint32_t i
= 0; i
< cmd_buffer
->draw_cs
.bo_count
; i
++) {
1967 tu_bo_list_add(&cmd_buffer
->bo_list
, cmd_buffer
->draw_cs
.bos
[i
],
1968 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_DUMP
);
1971 for (uint32_t i
= 0; i
< cmd_buffer
->draw_epilogue_cs
.bo_count
; i
++) {
1972 tu_bo_list_add(&cmd_buffer
->bo_list
, cmd_buffer
->draw_epilogue_cs
.bos
[i
],
1973 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_DUMP
);
1976 for (uint32_t i
= 0; i
< cmd_buffer
->sub_cs
.bo_count
; i
++) {
1977 tu_bo_list_add(&cmd_buffer
->bo_list
, cmd_buffer
->sub_cs
.bos
[i
],
1978 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_DUMP
);
1981 tu_cs_end(&cmd_buffer
->cs
);
1982 tu_cs_end(&cmd_buffer
->draw_cs
);
1983 tu_cs_end(&cmd_buffer
->draw_epilogue_cs
);
1985 cmd_buffer
->status
= TU_CMD_BUFFER_STATUS_EXECUTABLE
;
1987 return cmd_buffer
->record_result
;
1991 tu_CmdBindPipeline(VkCommandBuffer commandBuffer
,
1992 VkPipelineBindPoint pipelineBindPoint
,
1993 VkPipeline _pipeline
)
1995 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
1996 TU_FROM_HANDLE(tu_pipeline
, pipeline
, _pipeline
);
1998 switch (pipelineBindPoint
) {
1999 case VK_PIPELINE_BIND_POINT_GRAPHICS
:
2000 cmd
->state
.pipeline
= pipeline
;
2001 cmd
->state
.dirty
|= TU_CMD_DIRTY_PIPELINE
;
2003 case VK_PIPELINE_BIND_POINT_COMPUTE
:
2004 cmd
->state
.compute_pipeline
= pipeline
;
2005 cmd
->state
.dirty
|= TU_CMD_DIRTY_COMPUTE_PIPELINE
;
2008 unreachable("unrecognized pipeline bind point");
2012 tu_bo_list_add(&cmd
->bo_list
, &pipeline
->program
.binary_bo
,
2013 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_DUMP
);
2014 for (uint32_t i
= 0; i
< pipeline
->cs
.bo_count
; i
++) {
2015 tu_bo_list_add(&cmd
->bo_list
, pipeline
->cs
.bos
[i
],
2016 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_DUMP
);
2021 tu_CmdSetViewport(VkCommandBuffer commandBuffer
,
2022 uint32_t firstViewport
,
2023 uint32_t viewportCount
,
2024 const VkViewport
*pViewports
)
2026 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2028 assert(firstViewport
== 0 && viewportCount
== 1);
2029 cmd
->state
.dynamic
.viewport
.viewports
[0] = pViewports
[0];
2030 cmd
->state
.dirty
|= TU_CMD_DIRTY_DYNAMIC_VIEWPORT
;
2034 tu_CmdSetScissor(VkCommandBuffer commandBuffer
,
2035 uint32_t firstScissor
,
2036 uint32_t scissorCount
,
2037 const VkRect2D
*pScissors
)
2039 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2041 assert(firstScissor
== 0 && scissorCount
== 1);
2042 cmd
->state
.dynamic
.scissor
.scissors
[0] = pScissors
[0];
2043 cmd
->state
.dirty
|= TU_CMD_DIRTY_DYNAMIC_SCISSOR
;
2047 tu_CmdSetLineWidth(VkCommandBuffer commandBuffer
, float lineWidth
)
2049 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2051 cmd
->state
.dynamic
.line_width
= lineWidth
;
2053 /* line width depends on VkPipelineRasterizationStateCreateInfo */
2054 cmd
->state
.dirty
|= TU_CMD_DIRTY_DYNAMIC_LINE_WIDTH
;
2058 tu_CmdSetDepthBias(VkCommandBuffer commandBuffer
,
2059 float depthBiasConstantFactor
,
2060 float depthBiasClamp
,
2061 float depthBiasSlopeFactor
)
2063 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2064 struct tu_cs
*draw_cs
= &cmd
->draw_cs
;
2066 tu6_emit_depth_bias(draw_cs
, depthBiasConstantFactor
, depthBiasClamp
,
2067 depthBiasSlopeFactor
);
2069 tu_cs_sanity_check(draw_cs
);
2073 tu_CmdSetBlendConstants(VkCommandBuffer commandBuffer
,
2074 const float blendConstants
[4])
2076 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2077 struct tu_cs
*draw_cs
= &cmd
->draw_cs
;
2079 tu6_emit_blend_constants(draw_cs
, blendConstants
);
2081 tu_cs_sanity_check(draw_cs
);
2085 tu_CmdSetDepthBounds(VkCommandBuffer commandBuffer
,
2086 float minDepthBounds
,
2087 float maxDepthBounds
)
2092 tu_CmdSetStencilCompareMask(VkCommandBuffer commandBuffer
,
2093 VkStencilFaceFlags faceMask
,
2094 uint32_t compareMask
)
2096 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2098 if (faceMask
& VK_STENCIL_FACE_FRONT_BIT
)
2099 cmd
->state
.dynamic
.stencil_compare_mask
.front
= compareMask
;
2100 if (faceMask
& VK_STENCIL_FACE_BACK_BIT
)
2101 cmd
->state
.dynamic
.stencil_compare_mask
.back
= compareMask
;
2103 /* the front/back compare masks must be updated together */
2104 cmd
->state
.dirty
|= TU_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK
;
2108 tu_CmdSetStencilWriteMask(VkCommandBuffer commandBuffer
,
2109 VkStencilFaceFlags faceMask
,
2112 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2114 if (faceMask
& VK_STENCIL_FACE_FRONT_BIT
)
2115 cmd
->state
.dynamic
.stencil_write_mask
.front
= writeMask
;
2116 if (faceMask
& VK_STENCIL_FACE_BACK_BIT
)
2117 cmd
->state
.dynamic
.stencil_write_mask
.back
= writeMask
;
2119 /* the front/back write masks must be updated together */
2120 cmd
->state
.dirty
|= TU_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK
;
2124 tu_CmdSetStencilReference(VkCommandBuffer commandBuffer
,
2125 VkStencilFaceFlags faceMask
,
2128 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2130 if (faceMask
& VK_STENCIL_FACE_FRONT_BIT
)
2131 cmd
->state
.dynamic
.stencil_reference
.front
= reference
;
2132 if (faceMask
& VK_STENCIL_FACE_BACK_BIT
)
2133 cmd
->state
.dynamic
.stencil_reference
.back
= reference
;
2135 /* the front/back references must be updated together */
2136 cmd
->state
.dirty
|= TU_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE
;
2140 tu_CmdSetSampleLocationsEXT(VkCommandBuffer commandBuffer
,
2141 const VkSampleLocationsInfoEXT
* pSampleLocationsInfo
)
2143 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2145 tu6_emit_sample_locations(&cmd
->draw_cs
, pSampleLocationsInfo
);
2149 tu_CmdExecuteCommands(VkCommandBuffer commandBuffer
,
2150 uint32_t commandBufferCount
,
2151 const VkCommandBuffer
*pCmdBuffers
)
2153 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2156 assert(commandBufferCount
> 0);
2158 for (uint32_t i
= 0; i
< commandBufferCount
; i
++) {
2159 TU_FROM_HANDLE(tu_cmd_buffer
, secondary
, pCmdBuffers
[i
]);
2161 result
= tu_bo_list_merge(&cmd
->bo_list
, &secondary
->bo_list
);
2162 if (result
!= VK_SUCCESS
) {
2163 cmd
->record_result
= result
;
2167 if (secondary
->usage_flags
&
2168 VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT
) {
2169 assert(tu_cs_is_empty(&secondary
->cs
));
2171 result
= tu_cs_add_entries(&cmd
->draw_cs
, &secondary
->draw_cs
);
2172 if (result
!= VK_SUCCESS
) {
2173 cmd
->record_result
= result
;
2177 result
= tu_cs_add_entries(&cmd
->draw_epilogue_cs
,
2178 &secondary
->draw_epilogue_cs
);
2179 if (result
!= VK_SUCCESS
) {
2180 cmd
->record_result
= result
;
2184 assert(tu_cs_is_empty(&secondary
->draw_cs
));
2185 assert(tu_cs_is_empty(&secondary
->draw_epilogue_cs
));
2187 for (uint32_t j
= 0; j
< secondary
->cs
.bo_count
; j
++) {
2188 tu_bo_list_add(&cmd
->bo_list
, secondary
->cs
.bos
[j
],
2189 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_DUMP
);
2192 tu_cs_add_entries(&cmd
->cs
, &secondary
->cs
);
2195 cmd
->state
.dirty
= ~0u; /* TODO: set dirty only what needs to be */
2199 tu_CreateCommandPool(VkDevice _device
,
2200 const VkCommandPoolCreateInfo
*pCreateInfo
,
2201 const VkAllocationCallbacks
*pAllocator
,
2202 VkCommandPool
*pCmdPool
)
2204 TU_FROM_HANDLE(tu_device
, device
, _device
);
2205 struct tu_cmd_pool
*pool
;
2207 pool
= vk_alloc2(&device
->alloc
, pAllocator
, sizeof(*pool
), 8,
2208 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
2210 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
2213 pool
->alloc
= *pAllocator
;
2215 pool
->alloc
= device
->alloc
;
2217 list_inithead(&pool
->cmd_buffers
);
2218 list_inithead(&pool
->free_cmd_buffers
);
2220 pool
->queue_family_index
= pCreateInfo
->queueFamilyIndex
;
2222 *pCmdPool
= tu_cmd_pool_to_handle(pool
);
2228 tu_DestroyCommandPool(VkDevice _device
,
2229 VkCommandPool commandPool
,
2230 const VkAllocationCallbacks
*pAllocator
)
2232 TU_FROM_HANDLE(tu_device
, device
, _device
);
2233 TU_FROM_HANDLE(tu_cmd_pool
, pool
, commandPool
);
2238 list_for_each_entry_safe(struct tu_cmd_buffer
, cmd_buffer
,
2239 &pool
->cmd_buffers
, pool_link
)
2241 tu_cmd_buffer_destroy(cmd_buffer
);
2244 list_for_each_entry_safe(struct tu_cmd_buffer
, cmd_buffer
,
2245 &pool
->free_cmd_buffers
, pool_link
)
2247 tu_cmd_buffer_destroy(cmd_buffer
);
2250 vk_free2(&device
->alloc
, pAllocator
, pool
);
2254 tu_ResetCommandPool(VkDevice device
,
2255 VkCommandPool commandPool
,
2256 VkCommandPoolResetFlags flags
)
2258 TU_FROM_HANDLE(tu_cmd_pool
, pool
, commandPool
);
2261 list_for_each_entry(struct tu_cmd_buffer
, cmd_buffer
, &pool
->cmd_buffers
,
2264 result
= tu_reset_cmd_buffer(cmd_buffer
);
2265 if (result
!= VK_SUCCESS
)
2273 tu_TrimCommandPool(VkDevice device
,
2274 VkCommandPool commandPool
,
2275 VkCommandPoolTrimFlags flags
)
2277 TU_FROM_HANDLE(tu_cmd_pool
, pool
, commandPool
);
2282 list_for_each_entry_safe(struct tu_cmd_buffer
, cmd_buffer
,
2283 &pool
->free_cmd_buffers
, pool_link
)
2285 tu_cmd_buffer_destroy(cmd_buffer
);
2290 tu_CmdBeginRenderPass(VkCommandBuffer commandBuffer
,
2291 const VkRenderPassBeginInfo
*pRenderPassBegin
,
2292 VkSubpassContents contents
)
2294 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2295 TU_FROM_HANDLE(tu_render_pass
, pass
, pRenderPassBegin
->renderPass
);
2296 TU_FROM_HANDLE(tu_framebuffer
, fb
, pRenderPassBegin
->framebuffer
);
2298 cmd
->state
.pass
= pass
;
2299 cmd
->state
.subpass
= pass
->subpasses
;
2300 cmd
->state
.framebuffer
= fb
;
2302 tu_cmd_update_tiling_config(cmd
, &pRenderPassBegin
->renderArea
);
2303 tu_cmd_prepare_tile_store_ib(cmd
);
2305 tu_emit_load_clear(cmd
, pRenderPassBegin
);
2307 tu6_emit_zs(cmd
, cmd
->state
.subpass
, &cmd
->draw_cs
);
2308 tu6_emit_mrt(cmd
, cmd
->state
.subpass
, &cmd
->draw_cs
);
2309 tu6_emit_msaa(&cmd
->draw_cs
, cmd
->state
.subpass
->samples
);
2310 tu6_emit_render_cntl(cmd
, cmd
->state
.subpass
, &cmd
->draw_cs
, false);
2312 /* note: use_hw_binning only checks tiling config */
2313 if (use_hw_binning(cmd
))
2314 cmd
->use_vsc_data
= true;
2316 for (uint32_t i
= 0; i
< fb
->attachment_count
; ++i
) {
2317 const struct tu_image_view
*iview
= fb
->attachments
[i
].attachment
;
2318 tu_bo_list_add(&cmd
->bo_list
, iview
->image
->bo
,
2319 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_WRITE
);
2322 /* Flag input attachment descriptors for re-emission if necessary */
2323 cmd
->state
.dirty
|= TU_CMD_DIRTY_INPUT_ATTACHMENTS
;
2327 tu_CmdBeginRenderPass2(VkCommandBuffer commandBuffer
,
2328 const VkRenderPassBeginInfo
*pRenderPassBeginInfo
,
2329 const VkSubpassBeginInfoKHR
*pSubpassBeginInfo
)
2331 tu_CmdBeginRenderPass(commandBuffer
, pRenderPassBeginInfo
,
2332 pSubpassBeginInfo
->contents
);
2336 tu_CmdNextSubpass(VkCommandBuffer commandBuffer
, VkSubpassContents contents
)
2338 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2339 const struct tu_render_pass
*pass
= cmd
->state
.pass
;
2340 struct tu_cs
*cs
= &cmd
->draw_cs
;
2342 const struct tu_subpass
*subpass
= cmd
->state
.subpass
++;
2344 tu_cond_exec_start(cs
, CP_COND_EXEC_0_RENDER_MODE_GMEM
);
2346 if (subpass
->resolve_attachments
) {
2347 tu6_emit_blit_scissor(cmd
, cs
, true);
2349 for (unsigned i
= 0; i
< subpass
->color_count
; i
++) {
2350 uint32_t a
= subpass
->resolve_attachments
[i
].attachment
;
2351 if (a
== VK_ATTACHMENT_UNUSED
)
2354 tu_store_gmem_attachment(cmd
, cs
, a
,
2355 subpass
->color_attachments
[i
].attachment
);
2357 if (pass
->attachments
[a
].gmem_offset
< 0)
2361 * check if the resolved attachment is needed by later subpasses,
2362 * if it is, should be doing a GMEM->GMEM resolve instead of GMEM->MEM->GMEM..
2364 tu_finishme("missing GMEM->GMEM resolve path\n");
2365 tu_load_gmem_attachment(cmd
, cs
, a
, true);
2369 tu_cond_exec_end(cs
);
2371 tu_cond_exec_start(cs
, CP_COND_EXEC_0_RENDER_MODE_SYSMEM
);
2373 /* Emit flushes so that input attachments will read the correct value.
2374 * TODO: use subpass dependencies to flush or not
2376 tu6_emit_event_write(cmd
, cs
, PC_CCU_FLUSH_COLOR_TS
, true);
2377 tu6_emit_event_write(cmd
, cs
, PC_CCU_FLUSH_DEPTH_TS
, true);
2379 if (subpass
->resolve_attachments
) {
2380 tu6_emit_event_write(cmd
, cs
, CACHE_INVALIDATE
, false);
2382 for (unsigned i
= 0; i
< subpass
->color_count
; i
++) {
2383 uint32_t a
= subpass
->resolve_attachments
[i
].attachment
;
2384 if (a
== VK_ATTACHMENT_UNUSED
)
2387 tu6_emit_sysmem_resolve(cmd
, cs
, a
,
2388 subpass
->color_attachments
[i
].attachment
);
2391 tu6_emit_event_write(cmd
, cs
, PC_CCU_FLUSH_COLOR_TS
, true);
2394 tu_cond_exec_end(cs
);
2396 /* subpass->input_count > 0 then texture cache invalidate is likely to be needed */
2397 if (cmd
->state
.subpass
->input_count
)
2398 tu6_emit_event_write(cmd
, cs
, CACHE_INVALIDATE
, false);
2400 /* emit mrt/zs/msaa/ubwc state for the subpass that is starting */
2401 tu6_emit_zs(cmd
, cmd
->state
.subpass
, cs
);
2402 tu6_emit_mrt(cmd
, cmd
->state
.subpass
, cs
);
2403 tu6_emit_msaa(cs
, cmd
->state
.subpass
->samples
);
2404 tu6_emit_render_cntl(cmd
, cmd
->state
.subpass
, cs
, false);
2406 /* Flag input attachment descriptors for re-emission if necessary */
2407 cmd
->state
.dirty
|= TU_CMD_DIRTY_INPUT_ATTACHMENTS
;
2411 tu_CmdNextSubpass2(VkCommandBuffer commandBuffer
,
2412 const VkSubpassBeginInfoKHR
*pSubpassBeginInfo
,
2413 const VkSubpassEndInfoKHR
*pSubpassEndInfo
)
2415 tu_CmdNextSubpass(commandBuffer
, pSubpassBeginInfo
->contents
);
2421 * Number of vertices.
2426 * Index of the first vertex.
2428 int32_t vertex_offset
;
2431 * First instance id.
2433 uint32_t first_instance
;
2436 * Number of instances.
2438 uint32_t instance_count
;
2441 * First index (indexed draws only).
2443 uint32_t first_index
;
2446 * Whether it's an indexed draw.
2451 * Indirect draw parameters resource.
2453 struct tu_buffer
*indirect
;
2454 uint64_t indirect_offset
;
2458 * Draw count parameters resource.
2460 struct tu_buffer
*count_buffer
;
2461 uint64_t count_buffer_offset
;
2464 * Stream output parameters resource.
2466 struct tu_buffer
*streamout_buffer
;
2467 uint64_t streamout_buffer_offset
;
2470 #define ENABLE_ALL (CP_SET_DRAW_STATE__0_BINNING | CP_SET_DRAW_STATE__0_GMEM | CP_SET_DRAW_STATE__0_SYSMEM)
2471 #define ENABLE_DRAW (CP_SET_DRAW_STATE__0_GMEM | CP_SET_DRAW_STATE__0_SYSMEM)
2472 #define ENABLE_NON_GMEM (CP_SET_DRAW_STATE__0_BINNING | CP_SET_DRAW_STATE__0_SYSMEM)
2474 enum tu_draw_state_group_id
2476 TU_DRAW_STATE_PROGRAM
,
2477 TU_DRAW_STATE_PROGRAM_BINNING
,
2480 TU_DRAW_STATE_VI_BINNING
,
2484 TU_DRAW_STATE_BLEND
,
2485 TU_DRAW_STATE_VS_CONST
,
2486 TU_DRAW_STATE_GS_CONST
,
2487 TU_DRAW_STATE_FS_CONST
,
2488 TU_DRAW_STATE_DESC_SETS
,
2489 TU_DRAW_STATE_DESC_SETS_GMEM
,
2490 TU_DRAW_STATE_DESC_SETS_LOAD
,
2491 TU_DRAW_STATE_VS_PARAMS
,
2493 TU_DRAW_STATE_COUNT
,
2496 struct tu_draw_state_group
2498 enum tu_draw_state_group_id id
;
2499 uint32_t enable_mask
;
2500 struct tu_cs_entry ib
;
2503 static inline uint32_t
2504 tu6_stage2opcode(gl_shader_stage type
)
2507 case MESA_SHADER_VERTEX
:
2508 case MESA_SHADER_TESS_CTRL
:
2509 case MESA_SHADER_TESS_EVAL
:
2510 case MESA_SHADER_GEOMETRY
:
2511 return CP_LOAD_STATE6_GEOM
;
2512 case MESA_SHADER_FRAGMENT
:
2513 case MESA_SHADER_COMPUTE
:
2514 case MESA_SHADER_KERNEL
:
2515 return CP_LOAD_STATE6_FRAG
;
2517 unreachable("bad shader type");
2521 static inline enum a6xx_state_block
2522 tu6_stage2shadersb(gl_shader_stage type
)
2525 case MESA_SHADER_VERTEX
:
2526 return SB6_VS_SHADER
;
2527 case MESA_SHADER_GEOMETRY
:
2528 return SB6_GS_SHADER
;
2529 case MESA_SHADER_FRAGMENT
:
2530 return SB6_FS_SHADER
;
2531 case MESA_SHADER_COMPUTE
:
2532 case MESA_SHADER_KERNEL
:
2533 return SB6_CS_SHADER
;
2535 unreachable("bad shader type");
2541 tu6_emit_user_consts(struct tu_cs
*cs
, const struct tu_pipeline
*pipeline
,
2542 struct tu_descriptor_state
*descriptors_state
,
2543 gl_shader_stage type
,
2544 uint32_t *push_constants
)
2546 const struct tu_program_descriptor_linkage
*link
=
2547 &pipeline
->program
.link
[type
];
2548 const struct ir3_ubo_analysis_state
*state
= &link
->ubo_state
;
2550 if (link
->push_consts
.count
> 0) {
2551 unsigned num_units
= link
->push_consts
.count
;
2552 unsigned offset
= link
->push_consts
.lo
;
2553 tu_cs_emit_pkt7(cs
, tu6_stage2opcode(type
), 3 + num_units
* 4);
2554 tu_cs_emit(cs
, CP_LOAD_STATE6_0_DST_OFF(offset
) |
2555 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS
) |
2556 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT
) |
2557 CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type
)) |
2558 CP_LOAD_STATE6_0_NUM_UNIT(num_units
));
2561 for (unsigned i
= 0; i
< num_units
* 4; i
++)
2562 tu_cs_emit(cs
, push_constants
[i
+ offset
* 4]);
2565 for (uint32_t i
= 0; i
< state
->num_enabled
; i
++) {
2566 uint32_t size
= state
->range
[i
].end
- state
->range
[i
].start
;
2567 uint32_t offset
= state
->range
[i
].start
;
2569 /* and even if the start of the const buffer is before
2570 * first_immediate, the end may not be:
2572 size
= MIN2(size
, (16 * link
->constlen
) - state
->range
[i
].offset
);
2577 /* things should be aligned to vec4: */
2578 debug_assert((state
->range
[i
].offset
% 16) == 0);
2579 debug_assert((size
% 16) == 0);
2580 debug_assert((offset
% 16) == 0);
2582 /* Dig out the descriptor from the descriptor state and read the VA from
2585 assert(state
->range
[i
].bindless
);
2586 uint32_t *base
= state
->range
[i
].bindless_base
== MAX_SETS
?
2587 descriptors_state
->dynamic_descriptors
:
2588 descriptors_state
->sets
[state
->range
[i
].bindless_base
]->mapped_ptr
;
2589 unsigned block
= state
->range
[i
].block
;
2590 /* If the block in the shader here is in the dynamic descriptor set, it
2591 * is an index into the dynamic descriptor set which is combined from
2592 * dynamic descriptors and input attachments on-the-fly, and we don't
2593 * have access to it here. Instead we work backwards to get the index
2594 * into dynamic_descriptors.
2596 if (state
->range
[i
].bindless_base
== MAX_SETS
)
2597 block
-= pipeline
->layout
->input_attachment_count
;
2598 uint32_t *desc
= base
+ block
* A6XX_TEX_CONST_DWORDS
;
2599 uint64_t va
= desc
[0] | ((uint64_t)(desc
[1] & A6XX_UBO_1_BASE_HI__MASK
) << 32);
2602 tu_cs_emit_pkt7(cs
, tu6_stage2opcode(type
), 3);
2603 tu_cs_emit(cs
, CP_LOAD_STATE6_0_DST_OFF(state
->range
[i
].offset
/ 16) |
2604 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS
) |
2605 CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT
) |
2606 CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type
)) |
2607 CP_LOAD_STATE6_0_NUM_UNIT(size
/ 16));
2608 tu_cs_emit_qw(cs
, va
+ offset
);
2612 static struct tu_cs_entry
2613 tu6_emit_consts(struct tu_cmd_buffer
*cmd
,
2614 const struct tu_pipeline
*pipeline
,
2615 struct tu_descriptor_state
*descriptors_state
,
2616 gl_shader_stage type
)
2619 tu_cs_begin_sub_stream(&cmd
->sub_cs
, 512, &cs
); /* TODO: maximum size? */
2621 tu6_emit_user_consts(&cs
, pipeline
, descriptors_state
, type
, cmd
->push_constants
);
2623 return tu_cs_end_sub_stream(&cmd
->sub_cs
, &cs
);
2627 tu6_emit_vs_params(struct tu_cmd_buffer
*cmd
,
2628 const struct tu_draw_info
*draw
,
2629 struct tu_cs_entry
*entry
)
2631 /* TODO: fill out more than just base instance */
2632 const struct tu_program_descriptor_linkage
*link
=
2633 &cmd
->state
.pipeline
->program
.link
[MESA_SHADER_VERTEX
];
2634 const struct ir3_const_state
*const_state
= &link
->const_state
;
2637 if (const_state
->offsets
.driver_param
>= link
->constlen
) {
2638 *entry
= (struct tu_cs_entry
) {};
2642 VkResult result
= tu_cs_begin_sub_stream(&cmd
->sub_cs
, 8, &cs
);
2643 if (result
!= VK_SUCCESS
)
2646 tu_cs_emit_pkt7(&cs
, CP_LOAD_STATE6_GEOM
, 3 + 4);
2647 tu_cs_emit(&cs
, CP_LOAD_STATE6_0_DST_OFF(const_state
->offsets
.driver_param
) |
2648 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS
) |
2649 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT
) |
2650 CP_LOAD_STATE6_0_STATE_BLOCK(SB6_VS_SHADER
) |
2651 CP_LOAD_STATE6_0_NUM_UNIT(1));
2655 STATIC_ASSERT(IR3_DP_INSTID_BASE
== 2);
2659 tu_cs_emit(&cs
, draw
->first_instance
);
2662 *entry
= tu_cs_end_sub_stream(&cmd
->sub_cs
, &cs
);
2666 static struct tu_cs_entry
2667 tu6_emit_vertex_buffers(struct tu_cmd_buffer
*cmd
,
2668 const struct tu_pipeline
*pipeline
)
2671 tu_cs_begin_sub_stream(&cmd
->sub_cs
, 4 * MAX_VBS
, &cs
);
2673 for (uint32_t i
= 0; i
< pipeline
->vi
.count
; i
++) {
2674 const uint32_t binding
= pipeline
->vi
.bindings
[i
];
2675 const struct tu_buffer
*buf
= cmd
->state
.vb
.buffers
[binding
];
2676 const VkDeviceSize offset
= buf
->bo_offset
+
2677 cmd
->state
.vb
.offsets
[binding
];
2678 const VkDeviceSize size
=
2679 offset
< buf
->size
? buf
->size
- offset
: 0;
2681 tu_cs_emit_regs(&cs
,
2682 A6XX_VFD_FETCH_BASE(i
, .bo
= buf
->bo
, .bo_offset
= offset
),
2683 A6XX_VFD_FETCH_SIZE(i
, size
));
2687 return tu_cs_end_sub_stream(&cmd
->sub_cs
, &cs
);
2691 tu6_emit_descriptor_sets(struct tu_cmd_buffer
*cmd
,
2692 const struct tu_pipeline
*pipeline
,
2693 VkPipelineBindPoint bind_point
,
2694 struct tu_cs_entry
*entry
,
2697 struct tu_cs
*draw_state
= &cmd
->sub_cs
;
2698 struct tu_pipeline_layout
*layout
= pipeline
->layout
;
2699 struct tu_descriptor_state
*descriptors_state
=
2700 tu_get_descriptors_state(cmd
, bind_point
);
2701 const struct tu_tiling_config
*tiling
= &cmd
->state
.tiling_config
;
2702 const uint32_t *input_attachment_idx
=
2703 pipeline
->program
.input_attachment_idx
;
2704 uint32_t num_dynamic_descs
= layout
->dynamic_offset_count
+
2705 layout
->input_attachment_count
;
2706 struct ts_cs_memory dynamic_desc_set
;
2709 if (num_dynamic_descs
> 0) {
2710 /* allocate and fill out dynamic descriptor set */
2711 result
= tu_cs_alloc(draw_state
, num_dynamic_descs
,
2712 A6XX_TEX_CONST_DWORDS
, &dynamic_desc_set
);
2713 if (result
!= VK_SUCCESS
)
2716 memcpy(dynamic_desc_set
.map
, descriptors_state
->input_attachments
,
2717 layout
->input_attachment_count
* A6XX_TEX_CONST_DWORDS
* 4);
2720 /* Patch input attachments to refer to GMEM instead */
2721 for (unsigned i
= 0; i
< layout
->input_attachment_count
; i
++) {
2723 &dynamic_desc_set
.map
[A6XX_TEX_CONST_DWORDS
* i
];
2725 /* The compiler has already laid out input_attachment_idx in the
2726 * final order of input attachments, so there's no need to go
2727 * through the pipeline layout finding input attachments.
2729 unsigned attachment_idx
= input_attachment_idx
[i
];
2731 /* It's possible for the pipeline layout to include an input
2732 * attachment which doesn't actually exist for the current
2733 * subpass. Of course, this is only valid so long as the pipeline
2734 * doesn't try to actually load that attachment. Just skip
2735 * patching in that scenario to avoid out-of-bounds accesses.
2737 if (attachment_idx
>= cmd
->state
.subpass
->input_count
)
2740 uint32_t a
= cmd
->state
.subpass
->input_attachments
[attachment_idx
].attachment
;
2741 const struct tu_render_pass_attachment
*att
= &cmd
->state
.pass
->attachments
[a
];
2743 assert(att
->gmem_offset
>= 0);
2745 dst
[0] &= ~(A6XX_TEX_CONST_0_SWAP__MASK
| A6XX_TEX_CONST_0_TILE_MODE__MASK
);
2746 dst
[0] |= A6XX_TEX_CONST_0_TILE_MODE(TILE6_2
);
2747 dst
[2] &= ~(A6XX_TEX_CONST_2_TYPE__MASK
| A6XX_TEX_CONST_2_PITCH__MASK
);
2749 A6XX_TEX_CONST_2_TYPE(A6XX_TEX_2D
) |
2750 A6XX_TEX_CONST_2_PITCH(tiling
->tile0
.extent
.width
* att
->cpp
);
2752 dst
[4] = cmd
->device
->physical_device
->gmem_base
+ att
->gmem_offset
;
2753 dst
[5] = A6XX_TEX_CONST_5_DEPTH(1);
2754 for (unsigned i
= 6; i
< A6XX_TEX_CONST_DWORDS
; i
++)
2757 if (cmd
->level
== VK_COMMAND_BUFFER_LEVEL_SECONDARY
)
2758 tu_finishme("patch input attachment pitch for secondary cmd buffer");
2762 memcpy(dynamic_desc_set
.map
+ layout
->input_attachment_count
* A6XX_TEX_CONST_DWORDS
,
2763 descriptors_state
->dynamic_descriptors
,
2764 layout
->dynamic_offset_count
* A6XX_TEX_CONST_DWORDS
* 4);
2767 uint32_t sp_bindless_base_reg
, hlsq_bindless_base_reg
;
2768 uint32_t hlsq_update_value
;
2769 switch (bind_point
) {
2770 case VK_PIPELINE_BIND_POINT_GRAPHICS
:
2771 sp_bindless_base_reg
= REG_A6XX_SP_BINDLESS_BASE(0);
2772 hlsq_bindless_base_reg
= REG_A6XX_HLSQ_BINDLESS_BASE(0);
2773 hlsq_update_value
= 0x7c000;
2775 case VK_PIPELINE_BIND_POINT_COMPUTE
:
2776 sp_bindless_base_reg
= REG_A6XX_SP_CS_BINDLESS_BASE(0);
2777 hlsq_bindless_base_reg
= REG_A6XX_HLSQ_CS_BINDLESS_BASE(0);
2778 hlsq_update_value
= 0x3e00;
2781 unreachable("bad bind point");
2784 /* Be careful here to *not* refer to the pipeline, so that if only the
2785 * pipeline changes we don't have to emit this again (except if there are
2786 * dynamic descriptors in the pipeline layout). This means always emitting
2787 * all the valid descriptors, which means that we always have to put the
2788 * dynamic descriptor in the driver-only slot at the end
2790 uint32_t num_user_sets
= util_last_bit(descriptors_state
->valid
);
2791 uint32_t num_sets
= num_user_sets
;
2792 if (num_dynamic_descs
> 0) {
2793 num_user_sets
= MAX_SETS
;
2794 num_sets
= num_user_sets
+ 1;
2797 unsigned regs
[2] = { sp_bindless_base_reg
, hlsq_bindless_base_reg
};
2800 result
= tu_cs_begin_sub_stream(draw_state
, ARRAY_SIZE(regs
) * (1 + num_sets
* 2) + 2, &cs
);
2801 if (result
!= VK_SUCCESS
)
2805 for (unsigned i
= 0; i
< ARRAY_SIZE(regs
); i
++) {
2806 tu_cs_emit_pkt4(&cs
, regs
[i
], num_sets
* 2);
2807 for (unsigned j
= 0; j
< num_user_sets
; j
++) {
2808 if (descriptors_state
->valid
& (1 << j
)) {
2809 /* magic | 3 copied from the blob */
2810 tu_cs_emit_qw(&cs
, descriptors_state
->sets
[j
]->va
| 3);
2812 tu_cs_emit_qw(&cs
, 0 | 3);
2815 if (num_dynamic_descs
> 0) {
2816 tu_cs_emit_qw(&cs
, dynamic_desc_set
.iova
| 3);
2820 tu_cs_emit_regs(&cs
, A6XX_HLSQ_UPDATE_CNTL(hlsq_update_value
));
2823 *entry
= tu_cs_end_sub_stream(draw_state
, &cs
);
2828 tu6_emit_streamout(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
2830 struct tu_streamout_state
*tf
= &cmd
->state
.pipeline
->streamout
;
2832 for (unsigned i
= 0; i
< IR3_MAX_SO_BUFFERS
; i
++) {
2833 struct tu_buffer
*buf
= cmd
->state
.streamout_buf
.buffers
[i
];
2838 offset
= cmd
->state
.streamout_buf
.offsets
[i
];
2840 tu_cs_emit_regs(cs
, A6XX_VPC_SO_BUFFER_BASE(i
, .bo
= buf
->bo
,
2841 .bo_offset
= buf
->bo_offset
));
2842 tu_cs_emit_regs(cs
, A6XX_VPC_SO_BUFFER_SIZE(i
, buf
->size
));
2844 if (cmd
->state
.streamout_reset
& (1 << i
)) {
2845 tu_cs_emit_regs(cs
, A6XX_VPC_SO_BUFFER_OFFSET(i
, offset
));
2846 cmd
->state
.streamout_reset
&= ~(1 << i
);
2848 tu_cs_emit_pkt7(cs
, CP_MEM_TO_REG
, 3);
2849 tu_cs_emit(cs
, CP_MEM_TO_REG_0_REG(REG_A6XX_VPC_SO_BUFFER_OFFSET(i
)) |
2850 CP_MEM_TO_REG_0_SHIFT_BY_2
| CP_MEM_TO_REG_0_UNK31
|
2851 CP_MEM_TO_REG_0_CNT(0));
2852 tu_cs_emit_qw(cs
, cmd
->scratch_bo
.iova
+
2853 ctrl_offset(flush_base
[i
].offset
));
2856 tu_cs_emit_regs(cs
, A6XX_VPC_SO_FLUSH_BASE(i
, .bo
= &cmd
->scratch_bo
,
2858 ctrl_offset(flush_base
[i
])));
2861 if (cmd
->state
.streamout_enabled
) {
2862 tu_cs_emit_pkt7(cs
, CP_CONTEXT_REG_BUNCH
, 12 + (2 * tf
->prog_count
));
2863 tu_cs_emit(cs
, REG_A6XX_VPC_SO_BUF_CNTL
);
2864 tu_cs_emit(cs
, tf
->vpc_so_buf_cntl
);
2865 tu_cs_emit(cs
, REG_A6XX_VPC_SO_NCOMP(0));
2866 tu_cs_emit(cs
, tf
->ncomp
[0]);
2867 tu_cs_emit(cs
, REG_A6XX_VPC_SO_NCOMP(1));
2868 tu_cs_emit(cs
, tf
->ncomp
[1]);
2869 tu_cs_emit(cs
, REG_A6XX_VPC_SO_NCOMP(2));
2870 tu_cs_emit(cs
, tf
->ncomp
[2]);
2871 tu_cs_emit(cs
, REG_A6XX_VPC_SO_NCOMP(3));
2872 tu_cs_emit(cs
, tf
->ncomp
[3]);
2873 tu_cs_emit(cs
, REG_A6XX_VPC_SO_CNTL
);
2874 tu_cs_emit(cs
, A6XX_VPC_SO_CNTL_ENABLE
);
2875 for (unsigned i
= 0; i
< tf
->prog_count
; i
++) {
2876 tu_cs_emit(cs
, REG_A6XX_VPC_SO_PROG
);
2877 tu_cs_emit(cs
, tf
->prog
[i
]);
2880 tu_cs_emit_pkt7(cs
, CP_CONTEXT_REG_BUNCH
, 4);
2881 tu_cs_emit(cs
, REG_A6XX_VPC_SO_CNTL
);
2883 tu_cs_emit(cs
, REG_A6XX_VPC_SO_BUF_CNTL
);
2889 tu6_bind_draw_states(struct tu_cmd_buffer
*cmd
,
2891 const struct tu_draw_info
*draw
)
2893 const struct tu_pipeline
*pipeline
= cmd
->state
.pipeline
;
2894 const struct tu_dynamic_state
*dynamic
= &cmd
->state
.dynamic
;
2895 struct tu_draw_state_group draw_state_groups
[TU_DRAW_STATE_COUNT
];
2896 uint32_t draw_state_group_count
= 0;
2899 struct tu_descriptor_state
*descriptors_state
=
2900 &cmd
->descriptors
[VK_PIPELINE_BIND_POINT_GRAPHICS
];
2905 A6XX_PC_PRIMITIVE_CNTL_0(.primitive_restart
=
2906 pipeline
->ia
.primitive_restart
&& draw
->indexed
));
2908 if (cmd
->state
.dirty
&
2909 (TU_CMD_DIRTY_PIPELINE
| TU_CMD_DIRTY_DYNAMIC_LINE_WIDTH
) &&
2910 (pipeline
->dynamic_state
.mask
& TU_DYNAMIC_LINE_WIDTH
)) {
2911 tu6_emit_gras_su_cntl(cs
, pipeline
->rast
.gras_su_cntl
,
2912 dynamic
->line_width
);
2915 if ((cmd
->state
.dirty
& TU_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK
) &&
2916 (pipeline
->dynamic_state
.mask
& TU_DYNAMIC_STENCIL_COMPARE_MASK
)) {
2917 tu6_emit_stencil_compare_mask(cs
, dynamic
->stencil_compare_mask
.front
,
2918 dynamic
->stencil_compare_mask
.back
);
2921 if ((cmd
->state
.dirty
& TU_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK
) &&
2922 (pipeline
->dynamic_state
.mask
& TU_DYNAMIC_STENCIL_WRITE_MASK
)) {
2923 tu6_emit_stencil_write_mask(cs
, dynamic
->stencil_write_mask
.front
,
2924 dynamic
->stencil_write_mask
.back
);
2927 if ((cmd
->state
.dirty
& TU_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE
) &&
2928 (pipeline
->dynamic_state
.mask
& TU_DYNAMIC_STENCIL_REFERENCE
)) {
2929 tu6_emit_stencil_reference(cs
, dynamic
->stencil_reference
.front
,
2930 dynamic
->stencil_reference
.back
);
2933 if ((cmd
->state
.dirty
& TU_CMD_DIRTY_DYNAMIC_VIEWPORT
) &&
2934 (pipeline
->dynamic_state
.mask
& TU_DYNAMIC_VIEWPORT
)) {
2935 tu6_emit_viewport(cs
, &cmd
->state
.dynamic
.viewport
.viewports
[0]);
2938 if ((cmd
->state
.dirty
& TU_CMD_DIRTY_DYNAMIC_SCISSOR
) &&
2939 (pipeline
->dynamic_state
.mask
& TU_DYNAMIC_SCISSOR
)) {
2940 tu6_emit_scissor(cs
, &cmd
->state
.dynamic
.scissor
.scissors
[0]);
2943 if (cmd
->state
.dirty
& TU_CMD_DIRTY_PIPELINE
) {
2944 draw_state_groups
[draw_state_group_count
++] =
2945 (struct tu_draw_state_group
) {
2946 .id
= TU_DRAW_STATE_PROGRAM
,
2947 .enable_mask
= ENABLE_DRAW
,
2948 .ib
= pipeline
->program
.state_ib
,
2950 draw_state_groups
[draw_state_group_count
++] =
2951 (struct tu_draw_state_group
) {
2952 .id
= TU_DRAW_STATE_PROGRAM_BINNING
,
2953 .enable_mask
= CP_SET_DRAW_STATE__0_BINNING
,
2954 .ib
= pipeline
->program
.binning_state_ib
,
2956 draw_state_groups
[draw_state_group_count
++] =
2957 (struct tu_draw_state_group
) {
2958 .id
= TU_DRAW_STATE_VI
,
2959 .enable_mask
= ENABLE_DRAW
,
2960 .ib
= pipeline
->vi
.state_ib
,
2962 draw_state_groups
[draw_state_group_count
++] =
2963 (struct tu_draw_state_group
) {
2964 .id
= TU_DRAW_STATE_VI_BINNING
,
2965 .enable_mask
= CP_SET_DRAW_STATE__0_BINNING
,
2966 .ib
= pipeline
->vi
.binning_state_ib
,
2968 draw_state_groups
[draw_state_group_count
++] =
2969 (struct tu_draw_state_group
) {
2970 .id
= TU_DRAW_STATE_VP
,
2971 .enable_mask
= ENABLE_ALL
,
2972 .ib
= pipeline
->vp
.state_ib
,
2974 draw_state_groups
[draw_state_group_count
++] =
2975 (struct tu_draw_state_group
) {
2976 .id
= TU_DRAW_STATE_RAST
,
2977 .enable_mask
= ENABLE_ALL
,
2978 .ib
= pipeline
->rast
.state_ib
,
2980 draw_state_groups
[draw_state_group_count
++] =
2981 (struct tu_draw_state_group
) {
2982 .id
= TU_DRAW_STATE_DS
,
2983 .enable_mask
= ENABLE_ALL
,
2984 .ib
= pipeline
->ds
.state_ib
,
2986 draw_state_groups
[draw_state_group_count
++] =
2987 (struct tu_draw_state_group
) {
2988 .id
= TU_DRAW_STATE_BLEND
,
2989 .enable_mask
= ENABLE_ALL
,
2990 .ib
= pipeline
->blend
.state_ib
,
2994 if (cmd
->state
.dirty
&
2995 (TU_CMD_DIRTY_PIPELINE
| TU_CMD_DIRTY_DESCRIPTOR_SETS
| TU_CMD_DIRTY_PUSH_CONSTANTS
)) {
2996 draw_state_groups
[draw_state_group_count
++] =
2997 (struct tu_draw_state_group
) {
2998 .id
= TU_DRAW_STATE_VS_CONST
,
2999 .enable_mask
= ENABLE_ALL
,
3000 .ib
= tu6_emit_consts(cmd
, pipeline
, descriptors_state
, MESA_SHADER_VERTEX
)
3002 draw_state_groups
[draw_state_group_count
++] =
3003 (struct tu_draw_state_group
) {
3004 .id
= TU_DRAW_STATE_GS_CONST
,
3005 .enable_mask
= ENABLE_ALL
,
3006 .ib
= tu6_emit_consts(cmd
, pipeline
, descriptors_state
, MESA_SHADER_GEOMETRY
)
3008 draw_state_groups
[draw_state_group_count
++] =
3009 (struct tu_draw_state_group
) {
3010 .id
= TU_DRAW_STATE_FS_CONST
,
3011 .enable_mask
= ENABLE_DRAW
,
3012 .ib
= tu6_emit_consts(cmd
, pipeline
, descriptors_state
, MESA_SHADER_FRAGMENT
)
3016 if (cmd
->state
.dirty
&
3017 (TU_CMD_DIRTY_PIPELINE
| TU_CMD_DIRTY_VERTEX_BUFFERS
)) {
3018 draw_state_groups
[draw_state_group_count
++] =
3019 (struct tu_draw_state_group
) {
3020 .id
= TU_DRAW_STATE_VB
,
3021 .enable_mask
= ENABLE_ALL
,
3022 .ib
= tu6_emit_vertex_buffers(cmd
, pipeline
)
3026 if (cmd
->state
.dirty
& TU_CMD_DIRTY_STREAMOUT_BUFFERS
)
3027 tu6_emit_streamout(cmd
, cs
);
3029 /* If there are any any dynamic descriptors, then we may need to re-emit
3030 * them after every pipeline change in case the number of input attachments
3031 * changes. We also always need to re-emit after a pipeline change if there
3032 * are any input attachments, because the input attachment index comes from
3033 * the pipeline. Finally, it can also happen that the subpass changes
3034 * without the pipeline changing, in which case the GMEM descriptors need
3035 * to be patched differently.
3037 * TODO: We could probably be clever and avoid re-emitting state on
3038 * pipeline changes if the number of input attachments is always 0. We
3039 * could also only re-emit dynamic state.
3041 if (cmd
->state
.dirty
& TU_CMD_DIRTY_DESCRIPTOR_SETS
||
3042 ((pipeline
->layout
->dynamic_offset_count
+
3043 pipeline
->layout
->input_attachment_count
> 0) &&
3044 cmd
->state
.dirty
& TU_CMD_DIRTY_PIPELINE
) ||
3045 (pipeline
->layout
->input_attachment_count
> 0 &&
3046 cmd
->state
.dirty
& TU_CMD_DIRTY_INPUT_ATTACHMENTS
)) {
3047 struct tu_cs_entry desc_sets
, desc_sets_gmem
;
3048 bool need_gmem_desc_set
= pipeline
->layout
->input_attachment_count
> 0;
3050 result
= tu6_emit_descriptor_sets(cmd
, pipeline
,
3051 VK_PIPELINE_BIND_POINT_GRAPHICS
,
3053 if (result
!= VK_SUCCESS
)
3056 draw_state_groups
[draw_state_group_count
++] =
3057 (struct tu_draw_state_group
) {
3058 .id
= TU_DRAW_STATE_DESC_SETS
,
3059 .enable_mask
= need_gmem_desc_set
? ENABLE_NON_GMEM
: ENABLE_ALL
,
3063 if (need_gmem_desc_set
) {
3064 result
= tu6_emit_descriptor_sets(cmd
, pipeline
,
3065 VK_PIPELINE_BIND_POINT_GRAPHICS
,
3066 &desc_sets_gmem
, true);
3067 if (result
!= VK_SUCCESS
)
3070 draw_state_groups
[draw_state_group_count
++] =
3071 (struct tu_draw_state_group
) {
3072 .id
= TU_DRAW_STATE_DESC_SETS_GMEM
,
3073 .enable_mask
= CP_SET_DRAW_STATE__0_GMEM
,
3074 .ib
= desc_sets_gmem
,
3078 /* We need to reload the descriptors every time the descriptor sets
3079 * change. However, the commands we send only depend on the pipeline
3080 * because the whole point is to cache descriptors which are used by the
3081 * pipeline. There's a problem here, in that the firmware has an
3082 * "optimization" which skips executing groups that are set to the same
3083 * value as the last draw. This means that if the descriptor sets change
3084 * but not the pipeline, we'd try to re-execute the same buffer which
3085 * the firmware would ignore and we wouldn't pre-load the new
3086 * descriptors. The blob seems to re-emit the LOAD_STATE group whenever
3087 * the descriptor sets change, which we emulate here by copying the
3088 * pre-prepared buffer.
3090 const struct tu_cs_entry
*load_entry
= &pipeline
->load_state
.state_ib
;
3091 if (load_entry
->size
> 0) {
3092 struct tu_cs load_cs
;
3093 result
= tu_cs_begin_sub_stream(&cmd
->sub_cs
, load_entry
->size
, &load_cs
);
3094 if (result
!= VK_SUCCESS
)
3096 tu_cs_emit_array(&load_cs
,
3097 (uint32_t *)((char *)load_entry
->bo
->map
+ load_entry
->offset
),
3098 load_entry
->size
/ 4);
3099 struct tu_cs_entry load_copy
= tu_cs_end_sub_stream(&cmd
->sub_cs
, &load_cs
);
3101 draw_state_groups
[draw_state_group_count
++] =
3102 (struct tu_draw_state_group
) {
3103 .id
= TU_DRAW_STATE_DESC_SETS_LOAD
,
3104 /* The blob seems to not enable this for binning, even when
3105 * resources would actually be used in the binning shader.
3106 * Presumably the overhead of prefetching the resources isn't
3109 .enable_mask
= ENABLE_DRAW
,
3115 struct tu_cs_entry vs_params
;
3116 result
= tu6_emit_vs_params(cmd
, draw
, &vs_params
);
3117 if (result
!= VK_SUCCESS
)
3120 draw_state_groups
[draw_state_group_count
++] =
3121 (struct tu_draw_state_group
) {
3122 .id
= TU_DRAW_STATE_VS_PARAMS
,
3123 .enable_mask
= ENABLE_ALL
,
3127 tu_cs_emit_pkt7(cs
, CP_SET_DRAW_STATE
, 3 * draw_state_group_count
);
3128 for (uint32_t i
= 0; i
< draw_state_group_count
; i
++) {
3129 const struct tu_draw_state_group
*group
= &draw_state_groups
[i
];
3130 debug_assert((group
->enable_mask
& ~ENABLE_ALL
) == 0);
3131 uint32_t cp_set_draw_state
=
3132 CP_SET_DRAW_STATE__0_COUNT(group
->ib
.size
/ 4) |
3133 group
->enable_mask
|
3134 CP_SET_DRAW_STATE__0_GROUP_ID(group
->id
);
3136 if (group
->ib
.size
) {
3137 iova
= group
->ib
.bo
->iova
+ group
->ib
.offset
;
3139 cp_set_draw_state
|= CP_SET_DRAW_STATE__0_DISABLE
;
3143 tu_cs_emit(cs
, cp_set_draw_state
);
3144 tu_cs_emit_qw(cs
, iova
);
3147 tu_cs_sanity_check(cs
);
3150 if (cmd
->state
.dirty
& TU_CMD_DIRTY_DESCRIPTOR_SETS
) {
3152 for_each_bit(i
, descriptors_state
->valid
) {
3153 struct tu_descriptor_set
*set
= descriptors_state
->sets
[i
];
3154 for (unsigned j
= 0; j
< set
->layout
->buffer_count
; ++j
) {
3155 if (set
->buffers
[j
]) {
3156 tu_bo_list_add(&cmd
->bo_list
, set
->buffers
[j
],
3157 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_WRITE
);
3160 if (set
->size
> 0) {
3161 tu_bo_list_add(&cmd
->bo_list
, &set
->pool
->bo
,
3162 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_DUMP
);
3166 if (cmd
->state
.dirty
& TU_CMD_DIRTY_STREAMOUT_BUFFERS
) {
3167 for (unsigned i
= 0; i
< IR3_MAX_SO_BUFFERS
; i
++) {
3168 const struct tu_buffer
*buf
= cmd
->state
.streamout_buf
.buffers
[i
];
3170 tu_bo_list_add(&cmd
->bo_list
, buf
->bo
,
3171 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_WRITE
);
3176 /* There are too many graphics dirty bits to list here, so just list the
3177 * bits to preserve instead. The only things not emitted here are
3178 * compute-related state.
3180 cmd
->state
.dirty
&= TU_CMD_DIRTY_COMPUTE_DESCRIPTOR_SETS
;
3182 /* Fragment shader state overwrites compute shader state, so flag the
3183 * compute pipeline for re-emit.
3185 cmd
->state
.dirty
|= TU_CMD_DIRTY_COMPUTE_PIPELINE
;
3190 tu6_emit_draw_indirect(struct tu_cmd_buffer
*cmd
,
3192 const struct tu_draw_info
*draw
)
3194 const enum pc_di_primtype primtype
= cmd
->state
.pipeline
->ia
.primtype
;
3195 bool has_gs
= cmd
->state
.pipeline
->active_stages
&
3196 VK_SHADER_STAGE_GEOMETRY_BIT
;
3199 A6XX_VFD_INDEX_OFFSET(draw
->vertex_offset
),
3200 A6XX_VFD_INSTANCE_START_OFFSET(draw
->first_instance
));
3202 if (draw
->indexed
) {
3203 const enum a4xx_index_size index_size
=
3204 tu6_index_size(cmd
->state
.index_type
);
3205 const uint32_t index_bytes
=
3206 (cmd
->state
.index_type
== VK_INDEX_TYPE_UINT32
) ? 4 : 2;
3207 const struct tu_buffer
*index_buf
= cmd
->state
.index_buffer
;
3208 unsigned max_indicies
=
3209 (index_buf
->size
- cmd
->state
.index_offset
) / index_bytes
;
3211 const uint32_t cp_draw_indx
=
3212 CP_DRAW_INDX_OFFSET_0_PRIM_TYPE(primtype
) |
3213 CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(DI_SRC_SEL_DMA
) |
3214 CP_DRAW_INDX_OFFSET_0_INDEX_SIZE(index_size
) |
3215 CP_DRAW_INDX_OFFSET_0_VIS_CULL(USE_VISIBILITY
) |
3216 COND(has_gs
, CP_DRAW_INDX_OFFSET_0_GS_ENABLE
) | 0x2000;
3218 tu_cs_emit_pkt7(cs
, CP_DRAW_INDX_INDIRECT
, 6);
3219 tu_cs_emit(cs
, cp_draw_indx
);
3220 tu_cs_emit_qw(cs
, index_buf
->bo
->iova
+ cmd
->state
.index_offset
);
3221 tu_cs_emit(cs
, A5XX_CP_DRAW_INDX_INDIRECT_3_MAX_INDICES(max_indicies
));
3222 tu_cs_emit_qw(cs
, draw
->indirect
->bo
->iova
+ draw
->indirect_offset
);
3224 const uint32_t cp_draw_indx
=
3225 CP_DRAW_INDX_OFFSET_0_PRIM_TYPE(primtype
) |
3226 CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(DI_SRC_SEL_AUTO_INDEX
) |
3227 CP_DRAW_INDX_OFFSET_0_VIS_CULL(USE_VISIBILITY
) |
3228 COND(has_gs
, CP_DRAW_INDX_OFFSET_0_GS_ENABLE
) | 0x2000;
3230 tu_cs_emit_pkt7(cs
, CP_DRAW_INDIRECT
, 3);
3231 tu_cs_emit(cs
, cp_draw_indx
);
3232 tu_cs_emit_qw(cs
, draw
->indirect
->bo
->iova
+ draw
->indirect_offset
);
3235 tu_bo_list_add(&cmd
->bo_list
, draw
->indirect
->bo
, MSM_SUBMIT_BO_READ
);
3239 tu6_emit_draw_direct(struct tu_cmd_buffer
*cmd
,
3241 const struct tu_draw_info
*draw
)
3244 const enum pc_di_primtype primtype
= cmd
->state
.pipeline
->ia
.primtype
;
3245 bool has_gs
= cmd
->state
.pipeline
->active_stages
&
3246 VK_SHADER_STAGE_GEOMETRY_BIT
;
3249 A6XX_VFD_INDEX_OFFSET(draw
->vertex_offset
),
3250 A6XX_VFD_INSTANCE_START_OFFSET(draw
->first_instance
));
3252 /* TODO hw binning */
3253 if (draw
->indexed
) {
3254 const enum a4xx_index_size index_size
=
3255 tu6_index_size(cmd
->state
.index_type
);
3256 const uint32_t index_bytes
=
3257 (cmd
->state
.index_type
== VK_INDEX_TYPE_UINT32
) ? 4 : 2;
3258 const struct tu_buffer
*buf
= cmd
->state
.index_buffer
;
3259 const VkDeviceSize offset
= buf
->bo_offset
+ cmd
->state
.index_offset
+
3260 index_bytes
* draw
->first_index
;
3261 const uint32_t size
= index_bytes
* draw
->count
;
3263 const uint32_t cp_draw_indx
=
3264 CP_DRAW_INDX_OFFSET_0_PRIM_TYPE(primtype
) |
3265 CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(DI_SRC_SEL_DMA
) |
3266 CP_DRAW_INDX_OFFSET_0_INDEX_SIZE(index_size
) |
3267 CP_DRAW_INDX_OFFSET_0_VIS_CULL(USE_VISIBILITY
) |
3268 COND(has_gs
, CP_DRAW_INDX_OFFSET_0_GS_ENABLE
) | 0x2000;
3270 tu_cs_emit_pkt7(cs
, CP_DRAW_INDX_OFFSET
, 7);
3271 tu_cs_emit(cs
, cp_draw_indx
);
3272 tu_cs_emit(cs
, draw
->instance_count
);
3273 tu_cs_emit(cs
, draw
->count
);
3274 tu_cs_emit(cs
, 0x0); /* XXX */
3275 tu_cs_emit_qw(cs
, buf
->bo
->iova
+ offset
);
3276 tu_cs_emit(cs
, size
);
3278 const uint32_t cp_draw_indx
=
3279 CP_DRAW_INDX_OFFSET_0_PRIM_TYPE(primtype
) |
3280 CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(DI_SRC_SEL_AUTO_INDEX
) |
3281 CP_DRAW_INDX_OFFSET_0_VIS_CULL(USE_VISIBILITY
) |
3282 COND(has_gs
, CP_DRAW_INDX_OFFSET_0_GS_ENABLE
) | 0x2000;
3284 tu_cs_emit_pkt7(cs
, CP_DRAW_INDX_OFFSET
, 3);
3285 tu_cs_emit(cs
, cp_draw_indx
);
3286 tu_cs_emit(cs
, draw
->instance_count
);
3287 tu_cs_emit(cs
, draw
->count
);
3292 tu_draw(struct tu_cmd_buffer
*cmd
, const struct tu_draw_info
*draw
)
3294 struct tu_cs
*cs
= &cmd
->draw_cs
;
3297 result
= tu6_bind_draw_states(cmd
, cs
, draw
);
3298 if (result
!= VK_SUCCESS
) {
3299 cmd
->record_result
= result
;
3304 tu6_emit_draw_indirect(cmd
, cs
, draw
);
3306 tu6_emit_draw_direct(cmd
, cs
, draw
);
3308 if (cmd
->state
.streamout_enabled
) {
3309 for (unsigned i
= 0; i
< IR3_MAX_SO_BUFFERS
; i
++) {
3310 if (cmd
->state
.streamout_enabled
& (1 << i
))
3311 tu6_emit_event_write(cmd
, cs
, FLUSH_SO_0
+ i
, false);
3315 cmd
->wait_for_idle
= true;
3317 tu_cs_sanity_check(cs
);
3321 tu_CmdDraw(VkCommandBuffer commandBuffer
,
3322 uint32_t vertexCount
,
3323 uint32_t instanceCount
,
3324 uint32_t firstVertex
,
3325 uint32_t firstInstance
)
3327 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
3328 struct tu_draw_info info
= {};
3330 info
.count
= vertexCount
;
3331 info
.instance_count
= instanceCount
;
3332 info
.first_instance
= firstInstance
;
3333 info
.vertex_offset
= firstVertex
;
3335 tu_draw(cmd_buffer
, &info
);
3339 tu_CmdDrawIndexed(VkCommandBuffer commandBuffer
,
3340 uint32_t indexCount
,
3341 uint32_t instanceCount
,
3342 uint32_t firstIndex
,
3343 int32_t vertexOffset
,
3344 uint32_t firstInstance
)
3346 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
3347 struct tu_draw_info info
= {};
3349 info
.indexed
= true;
3350 info
.count
= indexCount
;
3351 info
.instance_count
= instanceCount
;
3352 info
.first_index
= firstIndex
;
3353 info
.vertex_offset
= vertexOffset
;
3354 info
.first_instance
= firstInstance
;
3356 tu_draw(cmd_buffer
, &info
);
3360 tu_CmdDrawIndirect(VkCommandBuffer commandBuffer
,
3362 VkDeviceSize offset
,
3366 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
3367 TU_FROM_HANDLE(tu_buffer
, buffer
, _buffer
);
3368 struct tu_draw_info info
= {};
3370 info
.count
= drawCount
;
3371 info
.indirect
= buffer
;
3372 info
.indirect_offset
= offset
;
3373 info
.stride
= stride
;
3375 tu_draw(cmd_buffer
, &info
);
3379 tu_CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer
,
3381 VkDeviceSize offset
,
3385 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
3386 TU_FROM_HANDLE(tu_buffer
, buffer
, _buffer
);
3387 struct tu_draw_info info
= {};
3389 info
.indexed
= true;
3390 info
.count
= drawCount
;
3391 info
.indirect
= buffer
;
3392 info
.indirect_offset
= offset
;
3393 info
.stride
= stride
;
3395 tu_draw(cmd_buffer
, &info
);
3398 void tu_CmdDrawIndirectByteCountEXT(VkCommandBuffer commandBuffer
,
3399 uint32_t instanceCount
,
3400 uint32_t firstInstance
,
3401 VkBuffer _counterBuffer
,
3402 VkDeviceSize counterBufferOffset
,
3403 uint32_t counterOffset
,
3404 uint32_t vertexStride
)
3406 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
3407 TU_FROM_HANDLE(tu_buffer
, buffer
, _counterBuffer
);
3409 struct tu_draw_info info
= {};
3411 info
.instance_count
= instanceCount
;
3412 info
.first_instance
= firstInstance
;
3413 info
.streamout_buffer
= buffer
;
3414 info
.streamout_buffer_offset
= counterBufferOffset
;
3415 info
.stride
= vertexStride
;
3417 tu_draw(cmd_buffer
, &info
);
3420 struct tu_dispatch_info
3423 * Determine the layout of the grid (in block units) to be used.
3428 * A starting offset for the grid. If unaligned is set, the offset
3429 * must still be aligned.
3431 uint32_t offsets
[3];
3433 * Whether it's an unaligned compute dispatch.
3438 * Indirect compute parameters resource.
3440 struct tu_buffer
*indirect
;
3441 uint64_t indirect_offset
;
3445 tu_emit_compute_driver_params(struct tu_cs
*cs
, struct tu_pipeline
*pipeline
,
3446 const struct tu_dispatch_info
*info
)
3448 gl_shader_stage type
= MESA_SHADER_COMPUTE
;
3449 const struct tu_program_descriptor_linkage
*link
=
3450 &pipeline
->program
.link
[type
];
3451 const struct ir3_const_state
*const_state
= &link
->const_state
;
3452 uint32_t offset
= const_state
->offsets
.driver_param
;
3454 if (link
->constlen
<= offset
)
3457 if (!info
->indirect
) {
3458 uint32_t driver_params
[IR3_DP_CS_COUNT
] = {
3459 [IR3_DP_NUM_WORK_GROUPS_X
] = info
->blocks
[0],
3460 [IR3_DP_NUM_WORK_GROUPS_Y
] = info
->blocks
[1],
3461 [IR3_DP_NUM_WORK_GROUPS_Z
] = info
->blocks
[2],
3462 [IR3_DP_LOCAL_GROUP_SIZE_X
] = pipeline
->compute
.local_size
[0],
3463 [IR3_DP_LOCAL_GROUP_SIZE_Y
] = pipeline
->compute
.local_size
[1],
3464 [IR3_DP_LOCAL_GROUP_SIZE_Z
] = pipeline
->compute
.local_size
[2],
3467 uint32_t num_consts
= MIN2(const_state
->num_driver_params
,
3468 (link
->constlen
- offset
) * 4);
3469 /* push constants */
3470 tu_cs_emit_pkt7(cs
, tu6_stage2opcode(type
), 3 + num_consts
);
3471 tu_cs_emit(cs
, CP_LOAD_STATE6_0_DST_OFF(offset
) |
3472 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS
) |
3473 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT
) |
3474 CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type
)) |
3475 CP_LOAD_STATE6_0_NUM_UNIT(num_consts
/ 4));
3479 for (i
= 0; i
< num_consts
; i
++)
3480 tu_cs_emit(cs
, driver_params
[i
]);
3482 tu_finishme("Indirect driver params");
3487 tu_dispatch(struct tu_cmd_buffer
*cmd
,
3488 const struct tu_dispatch_info
*info
)
3490 struct tu_cs
*cs
= &cmd
->cs
;
3491 struct tu_pipeline
*pipeline
= cmd
->state
.compute_pipeline
;
3492 struct tu_descriptor_state
*descriptors_state
=
3493 &cmd
->descriptors
[VK_PIPELINE_BIND_POINT_COMPUTE
];
3496 if (cmd
->state
.dirty
& TU_CMD_DIRTY_COMPUTE_PIPELINE
)
3497 tu_cs_emit_ib(cs
, &pipeline
->program
.state_ib
);
3499 struct tu_cs_entry ib
;
3501 ib
= tu6_emit_consts(cmd
, pipeline
, descriptors_state
, MESA_SHADER_COMPUTE
);
3503 tu_cs_emit_ib(cs
, &ib
);
3505 tu_emit_compute_driver_params(cs
, pipeline
, info
);
3507 if (cmd
->state
.dirty
& TU_CMD_DIRTY_COMPUTE_DESCRIPTOR_SETS
) {
3508 result
= tu6_emit_descriptor_sets(cmd
, pipeline
,
3509 VK_PIPELINE_BIND_POINT_COMPUTE
, &ib
,
3511 if (result
!= VK_SUCCESS
) {
3512 cmd
->record_result
= result
;
3518 for_each_bit(i
, descriptors_state
->valid
) {
3519 struct tu_descriptor_set
*set
= descriptors_state
->sets
[i
];
3520 for (unsigned j
= 0; j
< set
->layout
->buffer_count
; ++j
) {
3521 if (set
->buffers
[j
]) {
3522 tu_bo_list_add(&cmd
->bo_list
, set
->buffers
[j
],
3523 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_WRITE
);
3527 if (set
->size
> 0) {
3528 tu_bo_list_add(&cmd
->bo_list
, &set
->pool
->bo
,
3529 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_DUMP
);
3535 tu_cs_emit_ib(cs
, &ib
);
3537 if (cmd
->state
.dirty
& TU_CMD_DIRTY_COMPUTE_DESCRIPTOR_SETS
)
3538 tu_cs_emit_ib(cs
, &pipeline
->load_state
.state_ib
);
3541 ~(TU_CMD_DIRTY_COMPUTE_DESCRIPTOR_SETS
| TU_CMD_DIRTY_COMPUTE_PIPELINE
);
3543 /* Compute shader state overwrites fragment shader state, so we flag the
3544 * graphics pipeline for re-emit.
3546 cmd
->state
.dirty
|= TU_CMD_DIRTY_PIPELINE
;
3548 tu_cs_emit_pkt7(cs
, CP_SET_MARKER
, 1);
3549 tu_cs_emit(cs
, A6XX_CP_SET_MARKER_0_MODE(RM6_COMPUTE
));
3551 const uint32_t *local_size
= pipeline
->compute
.local_size
;
3552 const uint32_t *num_groups
= info
->blocks
;
3554 A6XX_HLSQ_CS_NDRANGE_0(.kerneldim
= 3,
3555 .localsizex
= local_size
[0] - 1,
3556 .localsizey
= local_size
[1] - 1,
3557 .localsizez
= local_size
[2] - 1),
3558 A6XX_HLSQ_CS_NDRANGE_1(.globalsize_x
= local_size
[0] * num_groups
[0]),
3559 A6XX_HLSQ_CS_NDRANGE_2(.globaloff_x
= 0),
3560 A6XX_HLSQ_CS_NDRANGE_3(.globalsize_y
= local_size
[1] * num_groups
[1]),
3561 A6XX_HLSQ_CS_NDRANGE_4(.globaloff_y
= 0),
3562 A6XX_HLSQ_CS_NDRANGE_5(.globalsize_z
= local_size
[2] * num_groups
[2]),
3563 A6XX_HLSQ_CS_NDRANGE_6(.globaloff_z
= 0));
3566 A6XX_HLSQ_CS_KERNEL_GROUP_X(1),
3567 A6XX_HLSQ_CS_KERNEL_GROUP_Y(1),
3568 A6XX_HLSQ_CS_KERNEL_GROUP_Z(1));
3570 if (info
->indirect
) {
3571 uint64_t iova
= tu_buffer_iova(info
->indirect
) + info
->indirect_offset
;
3573 tu_bo_list_add(&cmd
->bo_list
, info
->indirect
->bo
,
3574 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_WRITE
);
3576 tu_cs_emit_pkt7(cs
, CP_EXEC_CS_INDIRECT
, 4);
3577 tu_cs_emit(cs
, 0x00000000);
3578 tu_cs_emit_qw(cs
, iova
);
3580 A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEX(local_size
[0] - 1) |
3581 A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEY(local_size
[1] - 1) |
3582 A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEZ(local_size
[2] - 1));
3584 tu_cs_emit_pkt7(cs
, CP_EXEC_CS
, 4);
3585 tu_cs_emit(cs
, 0x00000000);
3586 tu_cs_emit(cs
, CP_EXEC_CS_1_NGROUPS_X(info
->blocks
[0]));
3587 tu_cs_emit(cs
, CP_EXEC_CS_2_NGROUPS_Y(info
->blocks
[1]));
3588 tu_cs_emit(cs
, CP_EXEC_CS_3_NGROUPS_Z(info
->blocks
[2]));
3593 tu6_emit_cache_flush(cmd
, cs
);
3597 tu_CmdDispatchBase(VkCommandBuffer commandBuffer
,
3605 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
3606 struct tu_dispatch_info info
= {};
3612 info
.offsets
[0] = base_x
;
3613 info
.offsets
[1] = base_y
;
3614 info
.offsets
[2] = base_z
;
3615 tu_dispatch(cmd_buffer
, &info
);
3619 tu_CmdDispatch(VkCommandBuffer commandBuffer
,
3624 tu_CmdDispatchBase(commandBuffer
, 0, 0, 0, x
, y
, z
);
3628 tu_CmdDispatchIndirect(VkCommandBuffer commandBuffer
,
3630 VkDeviceSize offset
)
3632 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
3633 TU_FROM_HANDLE(tu_buffer
, buffer
, _buffer
);
3634 struct tu_dispatch_info info
= {};
3636 info
.indirect
= buffer
;
3637 info
.indirect_offset
= offset
;
3639 tu_dispatch(cmd_buffer
, &info
);
3643 tu_CmdEndRenderPass(VkCommandBuffer commandBuffer
)
3645 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
3647 tu_cs_end(&cmd_buffer
->draw_cs
);
3648 tu_cs_end(&cmd_buffer
->draw_epilogue_cs
);
3650 if (use_sysmem_rendering(cmd_buffer
))
3651 tu_cmd_render_sysmem(cmd_buffer
);
3653 tu_cmd_render_tiles(cmd_buffer
);
3655 /* discard draw_cs and draw_epilogue_cs entries now that the tiles are
3657 tu_cs_discard_entries(&cmd_buffer
->draw_cs
);
3658 tu_cs_begin(&cmd_buffer
->draw_cs
);
3659 tu_cs_discard_entries(&cmd_buffer
->draw_epilogue_cs
);
3660 tu_cs_begin(&cmd_buffer
->draw_epilogue_cs
);
3662 cmd_buffer
->state
.pass
= NULL
;
3663 cmd_buffer
->state
.subpass
= NULL
;
3664 cmd_buffer
->state
.framebuffer
= NULL
;
3668 tu_CmdEndRenderPass2(VkCommandBuffer commandBuffer
,
3669 const VkSubpassEndInfoKHR
*pSubpassEndInfo
)
3671 tu_CmdEndRenderPass(commandBuffer
);
3674 struct tu_barrier_info
3676 uint32_t eventCount
;
3677 const VkEvent
*pEvents
;
3678 VkPipelineStageFlags srcStageMask
;
3682 tu_barrier(struct tu_cmd_buffer
*cmd
,
3683 uint32_t memoryBarrierCount
,
3684 const VkMemoryBarrier
*pMemoryBarriers
,
3685 uint32_t bufferMemoryBarrierCount
,
3686 const VkBufferMemoryBarrier
*pBufferMemoryBarriers
,
3687 uint32_t imageMemoryBarrierCount
,
3688 const VkImageMemoryBarrier
*pImageMemoryBarriers
,
3689 const struct tu_barrier_info
*info
)
3691 /* renderpass case is only for subpass self-dependencies
3692 * which means syncing the render output with texture cache
3693 * note: only the CACHE_INVALIDATE is needed in GMEM mode
3694 * and in sysmem mode we might not need either color/depth flush
3696 if (cmd
->state
.pass
) {
3697 tu6_emit_event_write(cmd
, &cmd
->draw_cs
, PC_CCU_FLUSH_COLOR_TS
, true);
3698 tu6_emit_event_write(cmd
, &cmd
->draw_cs
, PC_CCU_FLUSH_DEPTH_TS
, true);
3699 tu6_emit_event_write(cmd
, &cmd
->draw_cs
, CACHE_INVALIDATE
, false);
3705 tu_CmdPipelineBarrier(VkCommandBuffer commandBuffer
,
3706 VkPipelineStageFlags srcStageMask
,
3707 VkPipelineStageFlags dstStageMask
,
3708 VkDependencyFlags dependencyFlags
,
3709 uint32_t memoryBarrierCount
,
3710 const VkMemoryBarrier
*pMemoryBarriers
,
3711 uint32_t bufferMemoryBarrierCount
,
3712 const VkBufferMemoryBarrier
*pBufferMemoryBarriers
,
3713 uint32_t imageMemoryBarrierCount
,
3714 const VkImageMemoryBarrier
*pImageMemoryBarriers
)
3716 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
3717 struct tu_barrier_info info
;
3719 info
.eventCount
= 0;
3720 info
.pEvents
= NULL
;
3721 info
.srcStageMask
= srcStageMask
;
3723 tu_barrier(cmd_buffer
, memoryBarrierCount
, pMemoryBarriers
,
3724 bufferMemoryBarrierCount
, pBufferMemoryBarriers
,
3725 imageMemoryBarrierCount
, pImageMemoryBarriers
, &info
);
3729 write_event(struct tu_cmd_buffer
*cmd
, struct tu_event
*event
, unsigned value
)
3731 struct tu_cs
*cs
= &cmd
->cs
;
3733 tu_bo_list_add(&cmd
->bo_list
, &event
->bo
, MSM_SUBMIT_BO_WRITE
);
3735 /* TODO: any flush required before/after ? */
3737 tu_cs_emit_pkt7(cs
, CP_MEM_WRITE
, 3);
3738 tu_cs_emit_qw(cs
, event
->bo
.iova
); /* ADDR_LO/HI */
3739 tu_cs_emit(cs
, value
);
3743 tu_CmdSetEvent(VkCommandBuffer commandBuffer
,
3745 VkPipelineStageFlags stageMask
)
3747 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
3748 TU_FROM_HANDLE(tu_event
, event
, _event
);
3750 write_event(cmd
, event
, 1);
3754 tu_CmdResetEvent(VkCommandBuffer commandBuffer
,
3756 VkPipelineStageFlags stageMask
)
3758 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
3759 TU_FROM_HANDLE(tu_event
, event
, _event
);
3761 write_event(cmd
, event
, 0);
3765 tu_CmdWaitEvents(VkCommandBuffer commandBuffer
,
3766 uint32_t eventCount
,
3767 const VkEvent
*pEvents
,
3768 VkPipelineStageFlags srcStageMask
,
3769 VkPipelineStageFlags dstStageMask
,
3770 uint32_t memoryBarrierCount
,
3771 const VkMemoryBarrier
*pMemoryBarriers
,
3772 uint32_t bufferMemoryBarrierCount
,
3773 const VkBufferMemoryBarrier
*pBufferMemoryBarriers
,
3774 uint32_t imageMemoryBarrierCount
,
3775 const VkImageMemoryBarrier
*pImageMemoryBarriers
)
3777 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
3778 struct tu_cs
*cs
= &cmd
->cs
;
3780 /* TODO: any flush required before/after? (CP_WAIT_FOR_ME?) */
3782 for (uint32_t i
= 0; i
< eventCount
; i
++) {
3783 TU_FROM_HANDLE(tu_event
, event
, pEvents
[i
]);
3785 tu_bo_list_add(&cmd
->bo_list
, &event
->bo
, MSM_SUBMIT_BO_READ
);
3787 tu_cs_emit_pkt7(cs
, CP_WAIT_REG_MEM
, 6);
3788 tu_cs_emit(cs
, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ
) |
3789 CP_WAIT_REG_MEM_0_POLL_MEMORY
);
3790 tu_cs_emit_qw(cs
, event
->bo
.iova
); /* POLL_ADDR_LO/HI */
3791 tu_cs_emit(cs
, CP_WAIT_REG_MEM_3_REF(1));
3792 tu_cs_emit(cs
, CP_WAIT_REG_MEM_4_MASK(~0u));
3793 tu_cs_emit(cs
, CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(20));
3798 tu_CmdSetDeviceMask(VkCommandBuffer commandBuffer
, uint32_t deviceMask
)