2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
28 #include "tu_private.h"
30 #include "registers/adreno_pm4.xml.h"
31 #include "registers/adreno_common.xml.h"
33 #include "vk_format.h"
38 #define OVERFLOW_FLAG_REG REG_A6XX_CP_SCRATCH_REG(0)
41 tu_bo_list_init(struct tu_bo_list
*list
)
43 list
->count
= list
->capacity
= 0;
44 list
->bo_infos
= NULL
;
48 tu_bo_list_destroy(struct tu_bo_list
*list
)
54 tu_bo_list_reset(struct tu_bo_list
*list
)
60 * \a flags consists of MSM_SUBMIT_BO_FLAGS.
63 tu_bo_list_add_info(struct tu_bo_list
*list
,
64 const struct drm_msm_gem_submit_bo
*bo_info
)
66 assert(bo_info
->handle
!= 0);
68 for (uint32_t i
= 0; i
< list
->count
; ++i
) {
69 if (list
->bo_infos
[i
].handle
== bo_info
->handle
) {
70 assert(list
->bo_infos
[i
].presumed
== bo_info
->presumed
);
71 list
->bo_infos
[i
].flags
|= bo_info
->flags
;
76 /* grow list->bo_infos if needed */
77 if (list
->count
== list
->capacity
) {
78 uint32_t new_capacity
= MAX2(2 * list
->count
, 16);
79 struct drm_msm_gem_submit_bo
*new_bo_infos
= realloc(
80 list
->bo_infos
, new_capacity
* sizeof(struct drm_msm_gem_submit_bo
));
82 return TU_BO_LIST_FAILED
;
83 list
->bo_infos
= new_bo_infos
;
84 list
->capacity
= new_capacity
;
87 list
->bo_infos
[list
->count
] = *bo_info
;
92 tu_bo_list_add(struct tu_bo_list
*list
,
93 const struct tu_bo
*bo
,
96 return tu_bo_list_add_info(list
, &(struct drm_msm_gem_submit_bo
) {
98 .handle
= bo
->gem_handle
,
104 tu_bo_list_merge(struct tu_bo_list
*list
, const struct tu_bo_list
*other
)
106 for (uint32_t i
= 0; i
< other
->count
; i
++) {
107 if (tu_bo_list_add_info(list
, other
->bo_infos
+ i
) == TU_BO_LIST_FAILED
)
108 return VK_ERROR_OUT_OF_HOST_MEMORY
;
115 tu_tiling_config_update_tile_layout(struct tu_tiling_config
*tiling
,
116 const struct tu_device
*dev
,
119 const uint32_t tile_align_w
= dev
->physical_device
->tile_align_w
;
120 const uint32_t tile_align_h
= dev
->physical_device
->tile_align_h
;
121 const uint32_t max_tile_width
= 1024; /* A6xx */
123 tiling
->tile0
.offset
= (VkOffset2D
) {
124 .x
= tiling
->render_area
.offset
.x
& ~(tile_align_w
- 1),
125 .y
= tiling
->render_area
.offset
.y
& ~(tile_align_h
- 1),
128 const uint32_t ra_width
=
129 tiling
->render_area
.extent
.width
+
130 (tiling
->render_area
.offset
.x
- tiling
->tile0
.offset
.x
);
131 const uint32_t ra_height
=
132 tiling
->render_area
.extent
.height
+
133 (tiling
->render_area
.offset
.y
- tiling
->tile0
.offset
.y
);
135 /* start from 1 tile */
136 tiling
->tile_count
= (VkExtent2D
) {
140 tiling
->tile0
.extent
= (VkExtent2D
) {
141 .width
= align(ra_width
, tile_align_w
),
142 .height
= align(ra_height
, tile_align_h
),
145 /* do not exceed max tile width */
146 while (tiling
->tile0
.extent
.width
> max_tile_width
) {
147 tiling
->tile_count
.width
++;
148 tiling
->tile0
.extent
.width
=
149 align(ra_width
/ tiling
->tile_count
.width
, tile_align_w
);
152 /* do not exceed gmem size */
153 while (tiling
->tile0
.extent
.width
* tiling
->tile0
.extent
.height
> pixels
) {
154 if (tiling
->tile0
.extent
.width
> MAX2(tile_align_w
, tiling
->tile0
.extent
.height
)) {
155 tiling
->tile_count
.width
++;
156 tiling
->tile0
.extent
.width
=
157 align(DIV_ROUND_UP(ra_width
, tiling
->tile_count
.width
), tile_align_w
);
159 /* if this assert fails then layout is impossible.. */
160 assert(tiling
->tile0
.extent
.height
> tile_align_h
);
161 tiling
->tile_count
.height
++;
162 tiling
->tile0
.extent
.height
=
163 align(DIV_ROUND_UP(ra_height
, tiling
->tile_count
.height
), tile_align_h
);
169 tu_tiling_config_update_pipe_layout(struct tu_tiling_config
*tiling
,
170 const struct tu_device
*dev
)
172 const uint32_t max_pipe_count
= 32; /* A6xx */
174 /* start from 1 tile per pipe */
175 tiling
->pipe0
= (VkExtent2D
) {
179 tiling
->pipe_count
= tiling
->tile_count
;
181 /* do not exceed max pipe count vertically */
182 while (tiling
->pipe_count
.height
> max_pipe_count
) {
183 tiling
->pipe0
.height
+= 2;
184 tiling
->pipe_count
.height
=
185 (tiling
->tile_count
.height
+ tiling
->pipe0
.height
- 1) /
186 tiling
->pipe0
.height
;
189 /* do not exceed max pipe count */
190 while (tiling
->pipe_count
.width
* tiling
->pipe_count
.height
>
192 tiling
->pipe0
.width
+= 1;
193 tiling
->pipe_count
.width
=
194 (tiling
->tile_count
.width
+ tiling
->pipe0
.width
- 1) /
200 tu_tiling_config_update_pipes(struct tu_tiling_config
*tiling
,
201 const struct tu_device
*dev
)
203 const uint32_t max_pipe_count
= 32; /* A6xx */
204 const uint32_t used_pipe_count
=
205 tiling
->pipe_count
.width
* tiling
->pipe_count
.height
;
206 const VkExtent2D last_pipe
= {
207 .width
= (tiling
->tile_count
.width
- 1) % tiling
->pipe0
.width
+ 1,
208 .height
= (tiling
->tile_count
.height
- 1) % tiling
->pipe0
.height
+ 1,
211 assert(used_pipe_count
<= max_pipe_count
);
212 assert(max_pipe_count
<= ARRAY_SIZE(tiling
->pipe_config
));
214 for (uint32_t y
= 0; y
< tiling
->pipe_count
.height
; y
++) {
215 for (uint32_t x
= 0; x
< tiling
->pipe_count
.width
; x
++) {
216 const uint32_t pipe_x
= tiling
->pipe0
.width
* x
;
217 const uint32_t pipe_y
= tiling
->pipe0
.height
* y
;
218 const uint32_t pipe_w
= (x
== tiling
->pipe_count
.width
- 1)
220 : tiling
->pipe0
.width
;
221 const uint32_t pipe_h
= (y
== tiling
->pipe_count
.height
- 1)
223 : tiling
->pipe0
.height
;
224 const uint32_t n
= tiling
->pipe_count
.width
* y
+ x
;
226 tiling
->pipe_config
[n
] = A6XX_VSC_PIPE_CONFIG_REG_X(pipe_x
) |
227 A6XX_VSC_PIPE_CONFIG_REG_Y(pipe_y
) |
228 A6XX_VSC_PIPE_CONFIG_REG_W(pipe_w
) |
229 A6XX_VSC_PIPE_CONFIG_REG_H(pipe_h
);
230 tiling
->pipe_sizes
[n
] = CP_SET_BIN_DATA5_0_VSC_SIZE(pipe_w
* pipe_h
);
234 memset(tiling
->pipe_config
+ used_pipe_count
, 0,
235 sizeof(uint32_t) * (max_pipe_count
- used_pipe_count
));
239 tu_tiling_config_get_tile(const struct tu_tiling_config
*tiling
,
240 const struct tu_device
*dev
,
243 struct tu_tile
*tile
)
245 /* find the pipe and the slot for tile (tx, ty) */
246 const uint32_t px
= tx
/ tiling
->pipe0
.width
;
247 const uint32_t py
= ty
/ tiling
->pipe0
.height
;
248 const uint32_t sx
= tx
- tiling
->pipe0
.width
* px
;
249 const uint32_t sy
= ty
- tiling
->pipe0
.height
* py
;
251 assert(tx
< tiling
->tile_count
.width
&& ty
< tiling
->tile_count
.height
);
252 assert(px
< tiling
->pipe_count
.width
&& py
< tiling
->pipe_count
.height
);
253 assert(sx
< tiling
->pipe0
.width
&& sy
< tiling
->pipe0
.height
);
255 /* convert to 1D indices */
256 tile
->pipe
= tiling
->pipe_count
.width
* py
+ px
;
257 tile
->slot
= tiling
->pipe0
.width
* sy
+ sx
;
259 /* get the blit area for the tile */
260 tile
->begin
= (VkOffset2D
) {
261 .x
= tiling
->tile0
.offset
.x
+ tiling
->tile0
.extent
.width
* tx
,
262 .y
= tiling
->tile0
.offset
.y
+ tiling
->tile0
.extent
.height
* ty
,
265 (tx
== tiling
->tile_count
.width
- 1)
266 ? tiling
->render_area
.offset
.x
+ tiling
->render_area
.extent
.width
267 : tile
->begin
.x
+ tiling
->tile0
.extent
.width
;
269 (ty
== tiling
->tile_count
.height
- 1)
270 ? tiling
->render_area
.offset
.y
+ tiling
->render_area
.extent
.height
271 : tile
->begin
.y
+ tiling
->tile0
.extent
.height
;
274 enum a3xx_msaa_samples
275 tu_msaa_samples(uint32_t samples
)
287 assert(!"invalid sample count");
292 static enum a4xx_index_size
293 tu6_index_size(VkIndexType type
)
296 case VK_INDEX_TYPE_UINT16
:
297 return INDEX4_SIZE_16_BIT
;
298 case VK_INDEX_TYPE_UINT32
:
299 return INDEX4_SIZE_32_BIT
;
301 unreachable("invalid VkIndexType");
302 return INDEX4_SIZE_8_BIT
;
307 tu6_emit_marker(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
309 tu_cs_emit_write_reg(cs
, cmd
->marker_reg
, ++cmd
->marker_seqno
);
313 tu6_emit_event_write(struct tu_cmd_buffer
*cmd
,
315 enum vgt_event_type event
,
320 tu_cs_emit_pkt7(cs
, CP_EVENT_WRITE
, need_seqno
? 4 : 1);
321 tu_cs_emit(cs
, CP_EVENT_WRITE_0_EVENT(event
));
323 tu_cs_emit_qw(cs
, cmd
->scratch_bo
.iova
);
324 seqno
= ++cmd
->scratch_seqno
;
325 tu_cs_emit(cs
, seqno
);
332 tu6_emit_cache_flush(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
334 tu6_emit_event_write(cmd
, cs
, 0x31, false);
338 tu6_emit_lrz_flush(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
340 tu6_emit_event_write(cmd
, cs
, LRZ_FLUSH
, false);
344 tu6_emit_wfi(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
346 if (cmd
->wait_for_idle
) {
348 cmd
->wait_for_idle
= false;
352 #define tu_image_view_ubwc_pitches(iview) \
353 .pitch = tu_image_ubwc_pitch(iview->image, iview->base_mip), \
354 .array_pitch = tu_image_ubwc_size(iview->image, iview->base_mip) >> 2
357 tu6_emit_zs(struct tu_cmd_buffer
*cmd
,
358 const struct tu_subpass
*subpass
,
361 const struct tu_framebuffer
*fb
= cmd
->state
.framebuffer
;
363 const uint32_t a
= subpass
->depth_stencil_attachment
.attachment
;
364 if (a
== VK_ATTACHMENT_UNUSED
) {
366 A6XX_RB_DEPTH_BUFFER_INFO(.depth_format
= DEPTH6_NONE
),
367 A6XX_RB_DEPTH_BUFFER_PITCH(0),
368 A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH(0),
369 A6XX_RB_DEPTH_BUFFER_BASE(0),
370 A6XX_RB_DEPTH_BUFFER_BASE_GMEM(0));
373 A6XX_GRAS_SU_DEPTH_BUFFER_INFO(.depth_format
= DEPTH6_NONE
));
376 A6XX_GRAS_LRZ_BUFFER_BASE(0),
377 A6XX_GRAS_LRZ_BUFFER_PITCH(0),
378 A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE(0));
380 tu_cs_emit_regs(cs
, A6XX_RB_STENCIL_INFO(0));
385 const struct tu_image_view
*iview
= fb
->attachments
[a
].attachment
;
386 enum a6xx_depth_format fmt
= tu6_pipe2depth(iview
->vk_format
);
389 A6XX_RB_DEPTH_BUFFER_INFO(.depth_format
= fmt
),
390 A6XX_RB_DEPTH_BUFFER_PITCH(tu_image_stride(iview
->image
, iview
->base_mip
)),
391 A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH(iview
->image
->layout
.layer_size
),
392 A6XX_RB_DEPTH_BUFFER_BASE(tu_image_view_base_ref(iview
)),
393 A6XX_RB_DEPTH_BUFFER_BASE_GMEM(cmd
->state
.pass
->attachments
[a
].gmem_offset
));
396 A6XX_GRAS_SU_DEPTH_BUFFER_INFO(.depth_format
= fmt
));
399 A6XX_RB_DEPTH_FLAG_BUFFER_BASE(tu_image_view_ubwc_base_ref(iview
)),
400 A6XX_RB_DEPTH_FLAG_BUFFER_PITCH(tu_image_view_ubwc_pitches(iview
)));
403 A6XX_GRAS_LRZ_BUFFER_BASE(0),
404 A6XX_GRAS_LRZ_BUFFER_PITCH(0),
405 A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE(0));
408 A6XX_RB_STENCIL_INFO(0));
414 tu6_emit_mrt(struct tu_cmd_buffer
*cmd
,
415 const struct tu_subpass
*subpass
,
418 const struct tu_framebuffer
*fb
= cmd
->state
.framebuffer
;
419 unsigned char mrt_comp
[MAX_RTS
] = { 0 };
420 unsigned srgb_cntl
= 0;
422 for (uint32_t i
= 0; i
< subpass
->color_count
; ++i
) {
423 uint32_t a
= subpass
->color_attachments
[i
].attachment
;
424 if (a
== VK_ATTACHMENT_UNUSED
)
427 const struct tu_image_view
*iview
= fb
->attachments
[a
].attachment
;
428 const enum a6xx_tile_mode tile_mode
=
429 tu6_get_image_tile_mode(iview
->image
, iview
->base_mip
);
433 if (vk_format_is_srgb(iview
->vk_format
))
434 srgb_cntl
|= (1 << i
);
436 const struct tu_native_format
*format
=
437 tu6_get_native_format(iview
->vk_format
);
438 assert(format
&& format
->rb
>= 0);
441 A6XX_RB_MRT_BUF_INFO(i
,
442 .color_tile_mode
= tile_mode
,
443 .color_format
= format
->rb
,
444 .color_swap
= format
->swap
),
445 A6XX_RB_MRT_PITCH(i
, tu_image_stride(iview
->image
, iview
->base_mip
)),
446 A6XX_RB_MRT_ARRAY_PITCH(i
, iview
->image
->layout
.layer_size
),
447 A6XX_RB_MRT_BASE(i
, tu_image_view_base_ref(iview
)),
448 A6XX_RB_MRT_BASE_GMEM(i
, cmd
->state
.pass
->attachments
[a
].gmem_offset
));
451 A6XX_SP_FS_MRT_REG(i
,
452 .color_format
= format
->rb
,
453 .color_sint
= vk_format_is_sint(iview
->vk_format
),
454 .color_uint
= vk_format_is_uint(iview
->vk_format
)));
457 A6XX_RB_MRT_FLAG_BUFFER_ADDR(i
, tu_image_view_ubwc_base_ref(iview
)),
458 A6XX_RB_MRT_FLAG_BUFFER_PITCH(i
, tu_image_view_ubwc_pitches(iview
)));
462 A6XX_RB_SRGB_CNTL(srgb_cntl
));
465 A6XX_SP_SRGB_CNTL(srgb_cntl
));
468 A6XX_RB_RENDER_COMPONENTS(
476 .rt7
= mrt_comp
[7]));
479 A6XX_SP_FS_RENDER_COMPONENTS(
487 .rt7
= mrt_comp
[7]));
491 tu6_emit_msaa(struct tu_cmd_buffer
*cmd
,
492 const struct tu_subpass
*subpass
,
495 const enum a3xx_msaa_samples samples
= tu_msaa_samples(subpass
->samples
);
496 bool msaa_disable
= samples
== MSAA_ONE
;
499 A6XX_SP_TP_RAS_MSAA_CNTL(samples
),
500 A6XX_SP_TP_DEST_MSAA_CNTL(.samples
= samples
,
501 .msaa_disable
= msaa_disable
));
504 A6XX_GRAS_RAS_MSAA_CNTL(samples
),
505 A6XX_GRAS_DEST_MSAA_CNTL(.samples
= samples
,
506 .msaa_disable
= msaa_disable
));
509 A6XX_RB_RAS_MSAA_CNTL(samples
),
510 A6XX_RB_DEST_MSAA_CNTL(.samples
= samples
,
511 .msaa_disable
= msaa_disable
));
514 A6XX_RB_MSAA_CNTL(samples
));
518 tu6_emit_bin_size(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
, uint32_t flags
)
520 const struct tu_tiling_config
*tiling
= &cmd
->state
.tiling_config
;
521 const uint32_t bin_w
= tiling
->tile0
.extent
.width
;
522 const uint32_t bin_h
= tiling
->tile0
.extent
.height
;
525 A6XX_GRAS_BIN_CONTROL(.binw
= bin_w
,
530 A6XX_RB_BIN_CONTROL(.binw
= bin_w
,
534 /* no flag for RB_BIN_CONTROL2... */
536 A6XX_RB_BIN_CONTROL2(.binw
= bin_w
,
541 tu6_emit_render_cntl(struct tu_cmd_buffer
*cmd
,
546 cntl
|= A6XX_RB_RENDER_CNTL_UNK4
;
548 cntl
|= A6XX_RB_RENDER_CNTL_BINNING
;
550 tu_cs_emit_pkt7(cs
, CP_REG_WRITE
, 3);
551 tu_cs_emit(cs
, CP_REG_WRITE_0_TRACKER(TRACK_RENDER_CNTL
));
552 tu_cs_emit(cs
, REG_A6XX_RB_RENDER_CNTL
);
553 tu_cs_emit(cs
, cntl
);
557 tu6_emit_blit_scissor(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
, bool align
)
559 const VkRect2D
*render_area
= &cmd
->state
.tiling_config
.render_area
;
560 uint32_t x1
= render_area
->offset
.x
;
561 uint32_t y1
= render_area
->offset
.y
;
562 uint32_t x2
= x1
+ render_area
->extent
.width
- 1;
563 uint32_t y2
= y1
+ render_area
->extent
.height
- 1;
565 /* TODO: alignment requirement seems to be less than tile_align_w/h */
567 x1
= x1
& ~cmd
->device
->physical_device
->tile_align_w
;
568 y1
= y1
& ~cmd
->device
->physical_device
->tile_align_h
;
569 x2
= ALIGN_POT(x2
+ 1, cmd
->device
->physical_device
->tile_align_w
) - 1;
570 y2
= ALIGN_POT(y2
+ 1, cmd
->device
->physical_device
->tile_align_h
) - 1;
574 A6XX_RB_BLIT_SCISSOR_TL(.x
= x1
, .y
= y1
),
575 A6XX_RB_BLIT_SCISSOR_BR(.x
= x2
, .y
= y2
));
579 tu6_emit_blit_info(struct tu_cmd_buffer
*cmd
,
581 const struct tu_image_view
*iview
,
582 uint32_t gmem_offset
,
586 A6XX_RB_BLIT_INFO(.unk0
= !resolve
, .gmem
= !resolve
));
588 const struct tu_native_format
*format
=
589 tu6_get_native_format(iview
->vk_format
);
590 assert(format
&& format
->rb
>= 0);
592 enum a6xx_tile_mode tile_mode
=
593 tu6_get_image_tile_mode(iview
->image
, iview
->base_mip
);
595 A6XX_RB_BLIT_DST_INFO(
596 .tile_mode
= tile_mode
,
597 .samples
= tu_msaa_samples(iview
->image
->samples
),
598 .color_format
= format
->rb
,
599 .color_swap
= format
->swap
,
600 .flags
= iview
->image
->layout
.ubwc_layer_size
!= 0),
601 A6XX_RB_BLIT_DST(tu_image_view_base_ref(iview
)),
602 A6XX_RB_BLIT_DST_PITCH(tu_image_stride(iview
->image
, iview
->base_mip
)),
603 A6XX_RB_BLIT_DST_ARRAY_PITCH(iview
->image
->layout
.layer_size
));
605 if (iview
->image
->layout
.ubwc_layer_size
) {
607 A6XX_RB_BLIT_FLAG_DST(tu_image_view_ubwc_base_ref(iview
)),
608 A6XX_RB_BLIT_FLAG_DST_PITCH(tu_image_view_ubwc_pitches(iview
)));
612 A6XX_RB_BLIT_BASE_GMEM(gmem_offset
));
616 tu6_emit_blit(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
618 tu6_emit_marker(cmd
, cs
);
619 tu6_emit_event_write(cmd
, cs
, BLIT
, false);
620 tu6_emit_marker(cmd
, cs
);
624 tu6_emit_window_scissor(struct tu_cmd_buffer
*cmd
,
632 A6XX_GRAS_SC_WINDOW_SCISSOR_TL(.x
= x1
, .y
= y1
),
633 A6XX_GRAS_SC_WINDOW_SCISSOR_BR(.x
= x2
, .y
= y2
));
636 A6XX_GRAS_RESOLVE_CNTL_1(.x
= x1
, .y
= y1
),
637 A6XX_GRAS_RESOLVE_CNTL_2(.x
= x2
, .y
= y2
));
641 tu6_emit_window_offset(struct tu_cmd_buffer
*cmd
,
647 A6XX_RB_WINDOW_OFFSET(.x
= x1
, .y
= y1
));
650 A6XX_RB_WINDOW_OFFSET2(.x
= x1
, .y
= y1
));
653 A6XX_SP_WINDOW_OFFSET(.x
= x1
, .y
= y1
));
656 A6XX_SP_TP_WINDOW_OFFSET(.x
= x1
, .y
= y1
));
660 use_hw_binning(struct tu_cmd_buffer
*cmd
)
662 const struct tu_tiling_config
*tiling
= &cmd
->state
.tiling_config
;
664 if (unlikely(cmd
->device
->physical_device
->instance
->debug_flags
& TU_DEBUG_NOBIN
))
667 return (tiling
->tile_count
.width
* tiling
->tile_count
.height
) > 2;
671 tu6_emit_tile_select(struct tu_cmd_buffer
*cmd
,
673 const struct tu_tile
*tile
)
675 tu_cs_emit_pkt7(cs
, CP_SET_MARKER
, 1);
676 tu_cs_emit(cs
, A6XX_CP_SET_MARKER_0_MODE(0x7));
678 tu6_emit_marker(cmd
, cs
);
679 tu_cs_emit_pkt7(cs
, CP_SET_MARKER
, 1);
680 tu_cs_emit(cs
, A6XX_CP_SET_MARKER_0_MODE(RM6_GMEM
) | 0x10);
681 tu6_emit_marker(cmd
, cs
);
683 const uint32_t x1
= tile
->begin
.x
;
684 const uint32_t y1
= tile
->begin
.y
;
685 const uint32_t x2
= tile
->end
.x
- 1;
686 const uint32_t y2
= tile
->end
.y
- 1;
687 tu6_emit_window_scissor(cmd
, cs
, x1
, y1
, x2
, y2
);
688 tu6_emit_window_offset(cmd
, cs
, x1
, y1
);
691 A6XX_VPC_SO_OVERRIDE(.so_disable
= true));
693 if (use_hw_binning(cmd
)) {
694 tu_cs_emit_pkt7(cs
, CP_WAIT_FOR_ME
, 0);
696 tu_cs_emit_pkt7(cs
, CP_SET_MODE
, 1);
699 tu_cs_emit_pkt7(cs
, CP_REG_TEST
, 1);
700 tu_cs_emit(cs
, A6XX_CP_REG_TEST_0_REG(OVERFLOW_FLAG_REG
) |
701 A6XX_CP_REG_TEST_0_BIT(0) |
702 A6XX_CP_REG_TEST_0_WAIT_FOR_ME
);
704 tu_cs_emit_pkt7(cs
, CP_COND_REG_EXEC
, 2);
705 tu_cs_emit(cs
, CP_COND_REG_EXEC_0_MODE(PRED_TEST
));
706 tu_cs_emit(cs
, CP_COND_REG_EXEC_1_DWORDS(11));
708 /* if (no overflow) */ {
709 tu_cs_emit_pkt7(cs
, CP_SET_BIN_DATA5
, 7);
710 tu_cs_emit(cs
, cmd
->state
.tiling_config
.pipe_sizes
[tile
->pipe
] |
711 CP_SET_BIN_DATA5_0_VSC_N(tile
->slot
));
712 tu_cs_emit_qw(cs
, cmd
->vsc_data
.iova
+ tile
->pipe
* cmd
->vsc_data_pitch
);
713 tu_cs_emit_qw(cs
, cmd
->vsc_data
.iova
+ (tile
->pipe
* 4) + (32 * cmd
->vsc_data_pitch
));
714 tu_cs_emit_qw(cs
, cmd
->vsc_data2
.iova
+ (tile
->pipe
* cmd
->vsc_data2_pitch
));
716 tu_cs_emit_pkt7(cs
, CP_SET_VISIBILITY_OVERRIDE
, 1);
719 /* use a NOP packet to skip over the 'else' side: */
720 tu_cs_emit_pkt7(cs
, CP_NOP
, 2);
722 tu_cs_emit_pkt7(cs
, CP_SET_VISIBILITY_OVERRIDE
, 1);
726 tu_cs_emit_pkt7(cs
, CP_SET_MODE
, 1);
730 A6XX_RB_UNKNOWN_8804(0));
733 A6XX_SP_TP_UNKNOWN_B304(0));
736 A6XX_GRAS_UNKNOWN_80A4(0));
738 tu_cs_emit_pkt7(cs
, CP_SET_VISIBILITY_OVERRIDE
, 1);
741 tu_cs_emit_pkt7(cs
, CP_SET_MODE
, 1);
747 tu6_emit_load_attachment(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
, uint32_t a
)
749 const struct tu_tiling_config
*tiling
= &cmd
->state
.tiling_config
;
750 const struct tu_framebuffer
*fb
= cmd
->state
.framebuffer
;
751 const struct tu_image_view
*iview
= fb
->attachments
[a
].attachment
;
752 const struct tu_render_pass_attachment
*attachment
=
753 &cmd
->state
.pass
->attachments
[a
];
755 if (attachment
->gmem_offset
< 0)
758 const uint32_t x1
= tiling
->render_area
.offset
.x
;
759 const uint32_t y1
= tiling
->render_area
.offset
.y
;
760 const uint32_t x2
= x1
+ tiling
->render_area
.extent
.width
;
761 const uint32_t y2
= y1
+ tiling
->render_area
.extent
.height
;
762 const uint32_t tile_x2
=
763 tiling
->tile0
.offset
.x
+ tiling
->tile0
.extent
.width
* tiling
->tile_count
.width
;
764 const uint32_t tile_y2
=
765 tiling
->tile0
.offset
.y
+ tiling
->tile0
.extent
.height
* tiling
->tile_count
.height
;
767 x1
!= tiling
->tile0
.offset
.x
|| x2
!= MIN2(fb
->width
, tile_x2
) ||
768 y1
!= tiling
->tile0
.offset
.y
|| y2
!= MIN2(fb
->height
, tile_y2
);
771 tu_finishme("improve handling of unaligned render area");
773 if (attachment
->load_op
== VK_ATTACHMENT_LOAD_OP_LOAD
)
776 if (vk_format_has_stencil(iview
->vk_format
) &&
777 attachment
->stencil_load_op
== VK_ATTACHMENT_LOAD_OP_LOAD
)
781 tu6_emit_blit_info(cmd
, cs
, iview
, attachment
->gmem_offset
, false);
782 tu6_emit_blit(cmd
, cs
);
787 tu6_emit_clear_attachment(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
,
789 const VkRenderPassBeginInfo
*info
)
791 const struct tu_framebuffer
*fb
= cmd
->state
.framebuffer
;
792 const struct tu_image_view
*iview
= fb
->attachments
[a
].attachment
;
793 const struct tu_render_pass_attachment
*attachment
=
794 &cmd
->state
.pass
->attachments
[a
];
795 unsigned clear_mask
= 0;
797 /* note: this means it isn't used by any subpass and shouldn't be cleared anyway */
798 if (attachment
->gmem_offset
< 0)
801 if (attachment
->load_op
== VK_ATTACHMENT_LOAD_OP_CLEAR
)
804 if (vk_format_has_stencil(iview
->vk_format
)) {
806 if (attachment
->stencil_load_op
== VK_ATTACHMENT_LOAD_OP_CLEAR
)
812 const struct tu_native_format
*format
=
813 tu6_get_native_format(iview
->vk_format
);
814 assert(format
&& format
->rb
>= 0);
817 A6XX_RB_BLIT_DST_INFO(.color_format
= format
->rb
));
820 A6XX_RB_BLIT_INFO(.gmem
= true,
821 .clear_mask
= clear_mask
));
824 A6XX_RB_BLIT_BASE_GMEM(attachment
->gmem_offset
));
827 A6XX_RB_UNKNOWN_88D0(0));
829 uint32_t clear_vals
[4] = { 0 };
830 tu_pack_clear_value(&info
->pClearValues
[a
], iview
->vk_format
, clear_vals
);
833 A6XX_RB_BLIT_CLEAR_COLOR_DW0(clear_vals
[0]),
834 A6XX_RB_BLIT_CLEAR_COLOR_DW1(clear_vals
[1]),
835 A6XX_RB_BLIT_CLEAR_COLOR_DW2(clear_vals
[2]),
836 A6XX_RB_BLIT_CLEAR_COLOR_DW3(clear_vals
[3]));
838 tu6_emit_blit(cmd
, cs
);
842 tu6_emit_store_attachment(struct tu_cmd_buffer
*cmd
,
847 if (cmd
->state
.pass
->attachments
[a
].store_op
== VK_ATTACHMENT_STORE_OP_DONT_CARE
)
850 tu6_emit_blit_info(cmd
, cs
,
851 cmd
->state
.framebuffer
->attachments
[a
].attachment
,
852 cmd
->state
.pass
->attachments
[gmem_a
].gmem_offset
, true);
853 tu6_emit_blit(cmd
, cs
);
857 tu6_emit_tile_store(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
859 const struct tu_render_pass
*pass
= cmd
->state
.pass
;
860 const struct tu_subpass
*subpass
= &pass
->subpasses
[pass
->subpass_count
-1];
862 tu_cs_emit_pkt7(cs
, CP_SET_DRAW_STATE
, 3);
863 tu_cs_emit(cs
, CP_SET_DRAW_STATE__0_COUNT(0) |
864 CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS
|
865 CP_SET_DRAW_STATE__0_GROUP_ID(0));
866 tu_cs_emit(cs
, CP_SET_DRAW_STATE__1_ADDR_LO(0));
867 tu_cs_emit(cs
, CP_SET_DRAW_STATE__2_ADDR_HI(0));
869 tu_cs_emit_pkt7(cs
, CP_SKIP_IB2_ENABLE_GLOBAL
, 1);
872 tu6_emit_marker(cmd
, cs
);
873 tu_cs_emit_pkt7(cs
, CP_SET_MARKER
, 1);
874 tu_cs_emit(cs
, A6XX_CP_SET_MARKER_0_MODE(RM6_RESOLVE
) | 0x10);
875 tu6_emit_marker(cmd
, cs
);
877 tu6_emit_blit_scissor(cmd
, cs
, true);
879 for (uint32_t a
= 0; a
< pass
->attachment_count
; ++a
) {
880 if (pass
->attachments
[a
].gmem_offset
>= 0)
881 tu6_emit_store_attachment(cmd
, cs
, a
, a
);
884 if (subpass
->resolve_attachments
) {
885 for (unsigned i
= 0; i
< subpass
->color_count
; i
++) {
886 uint32_t a
= subpass
->resolve_attachments
[i
].attachment
;
887 if (a
!= VK_ATTACHMENT_UNUSED
)
888 tu6_emit_store_attachment(cmd
, cs
, a
,
889 subpass
->color_attachments
[i
].attachment
);
895 tu6_emit_restart_index(struct tu_cs
*cs
, uint32_t restart_index
)
898 A6XX_PC_RESTART_INDEX(restart_index
));
902 tu6_init_hw(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
904 struct tu_physical_device
*phys_dev
= cmd
->device
->physical_device
;
906 VkResult result
= tu_cs_reserve_space(cmd
->device
, cs
, 256);
907 if (result
!= VK_SUCCESS
) {
908 cmd
->record_result
= result
;
912 tu6_emit_cache_flush(cmd
, cs
);
914 tu_cs_emit_write_reg(cs
, REG_A6XX_HLSQ_UPDATE_CNTL
, 0xfffff);
916 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_CCU_CNTL
, phys_dev
->magic
.RB_CCU_CNTL_gmem
);
917 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_8E04
, 0x00100000);
918 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_AE04
, 0x8);
919 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_AE00
, 0);
920 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_AE0F
, 0x3f);
921 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_B605
, 0x44);
922 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_B600
, 0x100000);
923 tu_cs_emit_write_reg(cs
, REG_A6XX_HLSQ_UNKNOWN_BE00
, 0x80);
924 tu_cs_emit_write_reg(cs
, REG_A6XX_HLSQ_UNKNOWN_BE01
, 0);
926 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9600
, 0);
927 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_UNKNOWN_8600
, 0x880);
928 tu_cs_emit_write_reg(cs
, REG_A6XX_HLSQ_UNKNOWN_BE04
, 0);
929 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_AE03
, 0x00000410);
930 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_IBO_COUNT
, 0);
931 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_B182
, 0);
932 tu_cs_emit_write_reg(cs
, REG_A6XX_HLSQ_UNKNOWN_BB11
, 0);
933 tu_cs_emit_write_reg(cs
, REG_A6XX_UCHE_UNKNOWN_0E12
, 0x3200000);
934 tu_cs_emit_write_reg(cs
, REG_A6XX_UCHE_CLIENT_PF
, 4);
935 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_8E01
, 0x0);
936 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_AB00
, 0x5);
937 tu_cs_emit_write_reg(cs
, REG_A6XX_VFD_ADD_OFFSET
, A6XX_VFD_ADD_OFFSET_VERTEX
);
938 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_8811
, 0x00000010);
939 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_MODE_CNTL
, 0x1f);
941 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_SRGB_CNTL
, 0);
943 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_UNKNOWN_8101
, 0);
944 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_SAMPLE_CNTL
, 0);
945 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_UNKNOWN_8110
, 0);
947 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_RENDER_CONTROL0
, 0x401);
948 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_RENDER_CONTROL1
, 0);
949 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_FS_OUTPUT_CNTL0
, 0);
950 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_SAMPLE_CNTL
, 0);
951 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_8818
, 0);
952 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_8819
, 0);
953 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_881A
, 0);
954 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_881B
, 0);
955 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_881C
, 0);
956 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_881D
, 0);
957 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_881E
, 0);
958 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_88F0
, 0);
960 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9101
, 0xffff00);
961 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9107
, 0);
963 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9236
, 1);
964 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9300
, 0);
966 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_SO_OVERRIDE
,
967 A6XX_VPC_SO_OVERRIDE_SO_DISABLE
);
969 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_UNKNOWN_9801
, 0);
970 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_UNKNOWN_9806
, 0);
971 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_UNKNOWN_9980
, 0);
973 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_PRIMITIVE_CNTL_6
, 0);
974 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_UNKNOWN_9B07
, 0);
976 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_A81B
, 0);
978 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_B183
, 0);
980 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_UNKNOWN_8099
, 0);
981 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_UNKNOWN_809B
, 0);
982 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_UNKNOWN_80A0
, 2);
983 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_UNKNOWN_80AF
, 0);
984 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9210
, 0);
985 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9211
, 0);
986 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9602
, 0);
987 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_UNKNOWN_9981
, 0x3);
988 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_UNKNOWN_9E72
, 0);
989 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9108
, 0x3);
990 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_TP_UNKNOWN_B304
, 0);
991 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_TP_UNKNOWN_B309
, 0x000000a2);
992 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_8804
, 0);
993 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_UNKNOWN_80A4
, 0);
994 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_UNKNOWN_80A5
, 0);
995 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_UNKNOWN_80A6
, 0);
996 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_8805
, 0);
997 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_8806
, 0);
998 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_8878
, 0);
999 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_8879
, 0);
1000 tu_cs_emit_write_reg(cs
, REG_A6XX_HLSQ_CONTROL_5_REG
, 0xfc);
1002 tu6_emit_marker(cmd
, cs
);
1004 tu_cs_emit_write_reg(cs
, REG_A6XX_VFD_MODE_CNTL
, 0x00000000);
1006 tu_cs_emit_write_reg(cs
, REG_A6XX_VFD_UNKNOWN_A008
, 0);
1008 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_MODE_CNTL
, 0x0000001f);
1010 /* we don't use this yet.. probably best to disable.. */
1011 tu_cs_emit_pkt7(cs
, CP_SET_DRAW_STATE
, 3);
1012 tu_cs_emit(cs
, CP_SET_DRAW_STATE__0_COUNT(0) |
1013 CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS
|
1014 CP_SET_DRAW_STATE__0_GROUP_ID(0));
1015 tu_cs_emit(cs
, CP_SET_DRAW_STATE__1_ADDR_LO(0));
1016 tu_cs_emit(cs
, CP_SET_DRAW_STATE__2_ADDR_HI(0));
1019 A6XX_VPC_SO_BUFFER_BASE(0),
1020 A6XX_VPC_SO_BUFFER_SIZE(0));
1023 A6XX_VPC_SO_FLUSH_BASE(0));
1026 A6XX_VPC_SO_BUF_CNTL(0));
1029 A6XX_VPC_SO_BUFFER_OFFSET(0, 0));
1032 A6XX_VPC_SO_BUFFER_BASE(1, 0),
1033 A6XX_VPC_SO_BUFFER_SIZE(1, 0));
1036 A6XX_VPC_SO_BUFFER_OFFSET(1, 0),
1037 A6XX_VPC_SO_FLUSH_BASE(1, 0),
1038 A6XX_VPC_SO_BUFFER_BASE(2, 0),
1039 A6XX_VPC_SO_BUFFER_SIZE(2, 0));
1042 A6XX_VPC_SO_BUFFER_OFFSET(2, 0),
1043 A6XX_VPC_SO_FLUSH_BASE(2, 0),
1044 A6XX_VPC_SO_BUFFER_BASE(3, 0),
1045 A6XX_VPC_SO_BUFFER_SIZE(3, 0));
1048 A6XX_VPC_SO_BUFFER_OFFSET(3, 0),
1049 A6XX_VPC_SO_FLUSH_BASE(3, 0));
1052 A6XX_SP_HS_CTRL_REG0(0));
1055 A6XX_SP_GS_CTRL_REG0(0));
1058 A6XX_GRAS_LRZ_CNTL(0));
1061 A6XX_RB_LRZ_CNTL(0));
1063 tu_cs_sanity_check(cs
);
1067 tu6_cache_flush(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
1071 seqno
= tu6_emit_event_write(cmd
, cs
, CACHE_FLUSH_AND_INV_EVENT
, true);
1073 tu_cs_emit_pkt7(cs
, CP_WAIT_REG_MEM
, 6);
1074 tu_cs_emit(cs
, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ
) |
1075 CP_WAIT_REG_MEM_0_POLL_MEMORY
);
1076 tu_cs_emit_qw(cs
, cmd
->scratch_bo
.iova
);
1077 tu_cs_emit(cs
, CP_WAIT_REG_MEM_3_REF(seqno
));
1078 tu_cs_emit(cs
, CP_WAIT_REG_MEM_4_MASK(~0));
1079 tu_cs_emit(cs
, CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(16));
1081 seqno
= tu6_emit_event_write(cmd
, cs
, CACHE_FLUSH_TS
, true);
1083 tu_cs_emit_pkt7(cs
, CP_WAIT_MEM_GTE
, 4);
1084 tu_cs_emit(cs
, CP_WAIT_MEM_GTE_0_RESERVED(0));
1085 tu_cs_emit_qw(cs
, cmd
->scratch_bo
.iova
);
1086 tu_cs_emit(cs
, CP_WAIT_MEM_GTE_3_REF(seqno
));
1090 update_vsc_pipe(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
1092 const struct tu_tiling_config
*tiling
= &cmd
->state
.tiling_config
;
1095 A6XX_VSC_BIN_SIZE(.width
= tiling
->tile0
.extent
.width
,
1096 .height
= tiling
->tile0
.extent
.height
),
1097 A6XX_VSC_SIZE_ADDRESS(.bo
= &cmd
->vsc_data
,
1098 .bo_offset
= 32 * cmd
->vsc_data_pitch
));
1101 A6XX_VSC_BIN_COUNT(.nx
= tiling
->tile_count
.width
,
1102 .ny
= tiling
->tile_count
.height
));
1104 tu_cs_emit_pkt4(cs
, REG_A6XX_VSC_PIPE_CONFIG_REG(0), 32);
1105 for (unsigned i
= 0; i
< 32; i
++)
1106 tu_cs_emit(cs
, tiling
->pipe_config
[i
]);
1109 A6XX_VSC_PIPE_DATA2_ADDRESS(.bo
= &cmd
->vsc_data2
),
1110 A6XX_VSC_PIPE_DATA2_PITCH(cmd
->vsc_data2_pitch
),
1111 A6XX_VSC_PIPE_DATA2_ARRAY_PITCH(cmd
->vsc_data2
.size
));
1114 A6XX_VSC_PIPE_DATA_ADDRESS(.bo
= &cmd
->vsc_data
),
1115 A6XX_VSC_PIPE_DATA_PITCH(cmd
->vsc_data_pitch
),
1116 A6XX_VSC_PIPE_DATA_ARRAY_PITCH(cmd
->vsc_data
.size
));
1120 emit_vsc_overflow_test(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
1122 const struct tu_tiling_config
*tiling
= &cmd
->state
.tiling_config
;
1123 const uint32_t used_pipe_count
=
1124 tiling
->pipe_count
.width
* tiling
->pipe_count
.height
;
1126 /* Clear vsc_scratch: */
1127 tu_cs_emit_pkt7(cs
, CP_MEM_WRITE
, 3);
1128 tu_cs_emit_qw(cs
, cmd
->scratch_bo
.iova
+ VSC_SCRATCH
);
1129 tu_cs_emit(cs
, 0x0);
1131 /* Check for overflow, write vsc_scratch if detected: */
1132 for (int i
= 0; i
< used_pipe_count
; i
++) {
1133 tu_cs_emit_pkt7(cs
, CP_COND_WRITE5
, 8);
1134 tu_cs_emit(cs
, CP_COND_WRITE5_0_FUNCTION(WRITE_GE
) |
1135 CP_COND_WRITE5_0_WRITE_MEMORY
);
1136 tu_cs_emit(cs
, CP_COND_WRITE5_1_POLL_ADDR_LO(REG_A6XX_VSC_SIZE_REG(i
)));
1137 tu_cs_emit(cs
, CP_COND_WRITE5_2_POLL_ADDR_HI(0));
1138 tu_cs_emit(cs
, CP_COND_WRITE5_3_REF(cmd
->vsc_data_pitch
));
1139 tu_cs_emit(cs
, CP_COND_WRITE5_4_MASK(~0));
1140 tu_cs_emit_qw(cs
, cmd
->scratch_bo
.iova
+ VSC_SCRATCH
);
1141 tu_cs_emit(cs
, CP_COND_WRITE5_7_WRITE_DATA(1 + cmd
->vsc_data_pitch
));
1143 tu_cs_emit_pkt7(cs
, CP_COND_WRITE5
, 8);
1144 tu_cs_emit(cs
, CP_COND_WRITE5_0_FUNCTION(WRITE_GE
) |
1145 CP_COND_WRITE5_0_WRITE_MEMORY
);
1146 tu_cs_emit(cs
, CP_COND_WRITE5_1_POLL_ADDR_LO(REG_A6XX_VSC_SIZE2_REG(i
)));
1147 tu_cs_emit(cs
, CP_COND_WRITE5_2_POLL_ADDR_HI(0));
1148 tu_cs_emit(cs
, CP_COND_WRITE5_3_REF(cmd
->vsc_data2_pitch
));
1149 tu_cs_emit(cs
, CP_COND_WRITE5_4_MASK(~0));
1150 tu_cs_emit_qw(cs
, cmd
->scratch_bo
.iova
+ VSC_SCRATCH
);
1151 tu_cs_emit(cs
, CP_COND_WRITE5_7_WRITE_DATA(3 + cmd
->vsc_data2_pitch
));
1154 tu_cs_emit_pkt7(cs
, CP_WAIT_MEM_WRITES
, 0);
1156 tu_cs_emit_pkt7(cs
, CP_WAIT_FOR_ME
, 0);
1158 tu_cs_emit_pkt7(cs
, CP_MEM_TO_REG
, 3);
1159 tu_cs_emit(cs
, CP_MEM_TO_REG_0_REG(OVERFLOW_FLAG_REG
) |
1160 CP_MEM_TO_REG_0_CNT(1 - 1));
1161 tu_cs_emit_qw(cs
, cmd
->scratch_bo
.iova
+ VSC_SCRATCH
);
1164 * This is a bit awkward, we really want a way to invert the
1165 * CP_REG_TEST/CP_COND_REG_EXEC logic, so that we can conditionally
1166 * execute cmds to use hwbinning when a bit is *not* set. This
1167 * dance is to invert OVERFLOW_FLAG_REG
1169 * A CP_NOP packet is used to skip executing the 'else' clause
1173 /* b0 will be set if VSC_DATA or VSC_DATA2 overflow: */
1174 tu_cs_emit_pkt7(cs
, CP_REG_TEST
, 1);
1175 tu_cs_emit(cs
, A6XX_CP_REG_TEST_0_REG(OVERFLOW_FLAG_REG
) |
1176 A6XX_CP_REG_TEST_0_BIT(0) |
1177 A6XX_CP_REG_TEST_0_WAIT_FOR_ME
);
1179 tu_cs_emit_pkt7(cs
, CP_COND_REG_EXEC
, 2);
1180 tu_cs_emit(cs
, CP_COND_REG_EXEC_0_MODE(PRED_TEST
));
1181 tu_cs_emit(cs
, CP_COND_REG_EXEC_1_DWORDS(7));
1185 * On overflow, mirror the value to control->vsc_overflow
1186 * which CPU is checking to detect overflow (see
1187 * check_vsc_overflow())
1189 tu_cs_emit_pkt7(cs
, CP_REG_TO_MEM
, 3);
1190 tu_cs_emit(cs
, CP_REG_TO_MEM_0_REG(OVERFLOW_FLAG_REG
) |
1191 CP_REG_TO_MEM_0_CNT(0));
1192 tu_cs_emit_qw(cs
, cmd
->scratch_bo
.iova
+ VSC_OVERFLOW
);
1194 tu_cs_emit_pkt4(cs
, OVERFLOW_FLAG_REG
, 1);
1195 tu_cs_emit(cs
, 0x0);
1197 tu_cs_emit_pkt7(cs
, CP_NOP
, 2); /* skip 'else' when 'if' is taken */
1199 tu_cs_emit_pkt4(cs
, OVERFLOW_FLAG_REG
, 1);
1200 tu_cs_emit(cs
, 0x1);
1205 tu6_emit_binning_pass(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
1207 struct tu_physical_device
*phys_dev
= cmd
->device
->physical_device
;
1208 const struct tu_tiling_config
*tiling
= &cmd
->state
.tiling_config
;
1210 uint32_t x1
= tiling
->tile0
.offset
.x
;
1211 uint32_t y1
= tiling
->tile0
.offset
.y
;
1212 uint32_t x2
= tiling
->render_area
.offset
.x
+ tiling
->render_area
.extent
.width
- 1;
1213 uint32_t y2
= tiling
->render_area
.offset
.y
+ tiling
->render_area
.extent
.height
- 1;
1215 tu6_emit_window_scissor(cmd
, cs
, x1
, y1
, x2
, y2
);
1217 tu6_emit_marker(cmd
, cs
);
1218 tu_cs_emit_pkt7(cs
, CP_SET_MARKER
, 1);
1219 tu_cs_emit(cs
, A6XX_CP_SET_MARKER_0_MODE(RM6_BINNING
));
1220 tu6_emit_marker(cmd
, cs
);
1222 tu_cs_emit_pkt7(cs
, CP_SET_VISIBILITY_OVERRIDE
, 1);
1223 tu_cs_emit(cs
, 0x1);
1225 tu_cs_emit_pkt7(cs
, CP_SET_MODE
, 1);
1226 tu_cs_emit(cs
, 0x1);
1231 A6XX_VFD_MODE_CNTL(.binning_pass
= true));
1233 update_vsc_pipe(cmd
, cs
);
1236 A6XX_PC_UNKNOWN_9805(.unknown
= phys_dev
->magic
.PC_UNKNOWN_9805
));
1239 A6XX_SP_UNKNOWN_A0F8(.unknown
= phys_dev
->magic
.SP_UNKNOWN_A0F8
));
1241 tu_cs_emit_pkt7(cs
, CP_EVENT_WRITE
, 1);
1242 tu_cs_emit(cs
, UNK_2C
);
1245 A6XX_RB_WINDOW_OFFSET(.x
= 0, .y
= 0));
1248 A6XX_SP_TP_WINDOW_OFFSET(.x
= 0, .y
= 0));
1250 /* emit IB to binning drawcmds: */
1251 tu_cs_emit_call(cs
, &cmd
->draw_cs
);
1253 tu_cs_emit_pkt7(cs
, CP_SET_DRAW_STATE
, 3);
1254 tu_cs_emit(cs
, CP_SET_DRAW_STATE__0_COUNT(0) |
1255 CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS
|
1256 CP_SET_DRAW_STATE__0_GROUP_ID(0));
1257 tu_cs_emit(cs
, CP_SET_DRAW_STATE__1_ADDR_LO(0));
1258 tu_cs_emit(cs
, CP_SET_DRAW_STATE__2_ADDR_HI(0));
1260 tu_cs_emit_pkt7(cs
, CP_EVENT_WRITE
, 1);
1261 tu_cs_emit(cs
, UNK_2D
);
1263 tu6_emit_event_write(cmd
, cs
, CACHE_INVALIDATE
, false);
1264 tu6_cache_flush(cmd
, cs
);
1268 tu_cs_emit_pkt7(cs
, CP_WAIT_FOR_ME
, 0);
1270 emit_vsc_overflow_test(cmd
, cs
);
1272 tu_cs_emit_pkt7(cs
, CP_SET_VISIBILITY_OVERRIDE
, 1);
1273 tu_cs_emit(cs
, 0x0);
1275 tu_cs_emit_pkt7(cs
, CP_SET_MODE
, 1);
1276 tu_cs_emit(cs
, 0x0);
1281 A6XX_RB_CCU_CNTL(.unknown
= 0x7c400004));
1283 cmd
->wait_for_idle
= false;
1287 tu6_render_begin(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
1289 struct tu_physical_device
*phys_dev
= cmd
->device
->physical_device
;
1291 VkResult result
= tu_cs_reserve_space(cmd
->device
, cs
, 1024);
1292 if (result
!= VK_SUCCESS
) {
1293 cmd
->record_result
= result
;
1297 tu6_emit_lrz_flush(cmd
, cs
);
1301 tu6_emit_cache_flush(cmd
, cs
);
1303 tu_cs_emit_pkt7(cs
, CP_SKIP_IB2_ENABLE_GLOBAL
, 1);
1304 tu_cs_emit(cs
, 0x0);
1306 /* 0x10000000 for BYPASS.. 0x7c13c080 for GMEM: */
1307 tu6_emit_wfi(cmd
, cs
);
1309 A6XX_RB_CCU_CNTL(0x7c400004));
1311 if (use_hw_binning(cmd
)) {
1312 tu6_emit_bin_size(cmd
, cs
, A6XX_RB_BIN_CONTROL_BINNING_PASS
| 0x6000000);
1314 tu6_emit_render_cntl(cmd
, cs
, true);
1316 tu6_emit_binning_pass(cmd
, cs
);
1318 tu6_emit_bin_size(cmd
, cs
, A6XX_RB_BIN_CONTROL_USE_VIZ
| 0x6000000);
1321 A6XX_VFD_MODE_CNTL(0));
1323 tu_cs_emit_regs(cs
, A6XX_PC_UNKNOWN_9805(.unknown
= phys_dev
->magic
.PC_UNKNOWN_9805
));
1325 tu_cs_emit_regs(cs
, A6XX_SP_UNKNOWN_A0F8(.unknown
= phys_dev
->magic
.SP_UNKNOWN_A0F8
));
1327 tu_cs_emit_pkt7(cs
, CP_SKIP_IB2_ENABLE_GLOBAL
, 1);
1328 tu_cs_emit(cs
, 0x1);
1330 tu6_emit_bin_size(cmd
, cs
, 0x6000000);
1333 tu6_emit_render_cntl(cmd
, cs
, false);
1335 tu_cs_sanity_check(cs
);
1339 tu6_render_tile(struct tu_cmd_buffer
*cmd
,
1341 const struct tu_tile
*tile
)
1343 const uint32_t render_tile_space
= 256 + tu_cs_get_call_size(&cmd
->draw_cs
);
1344 VkResult result
= tu_cs_reserve_space(cmd
->device
, cs
, render_tile_space
);
1345 if (result
!= VK_SUCCESS
) {
1346 cmd
->record_result
= result
;
1350 tu6_emit_tile_select(cmd
, cs
, tile
);
1351 tu_cs_emit_ib(cs
, &cmd
->state
.tile_load_ib
);
1353 tu_cs_emit_call(cs
, &cmd
->draw_cs
);
1354 cmd
->wait_for_idle
= true;
1356 if (use_hw_binning(cmd
)) {
1357 tu_cs_emit_pkt7(cs
, CP_REG_TEST
, 1);
1358 tu_cs_emit(cs
, A6XX_CP_REG_TEST_0_REG(OVERFLOW_FLAG_REG
) |
1359 A6XX_CP_REG_TEST_0_BIT(0) |
1360 A6XX_CP_REG_TEST_0_WAIT_FOR_ME
);
1362 tu_cs_emit_pkt7(cs
, CP_COND_REG_EXEC
, 2);
1363 tu_cs_emit(cs
, 0x10000000);
1364 tu_cs_emit(cs
, 2); /* conditionally execute next 2 dwords */
1366 /* if (no overflow) */ {
1367 tu_cs_emit_pkt7(cs
, CP_SET_MARKER
, 1);
1368 tu_cs_emit(cs
, A6XX_CP_SET_MARKER_0_MODE(0x5) | 0x10);
1372 tu_cs_emit_ib(cs
, &cmd
->state
.tile_store_ib
);
1374 tu_cs_sanity_check(cs
);
1378 tu6_render_end(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
1380 const uint32_t space
= 16 + tu_cs_get_call_size(&cmd
->draw_epilogue_cs
);
1381 VkResult result
= tu_cs_reserve_space(cmd
->device
, cs
, space
);
1382 if (result
!= VK_SUCCESS
) {
1383 cmd
->record_result
= result
;
1387 tu_cs_emit_call(cs
, &cmd
->draw_epilogue_cs
);
1390 A6XX_GRAS_LRZ_CNTL(0));
1392 tu6_emit_lrz_flush(cmd
, cs
);
1394 tu6_emit_event_write(cmd
, cs
, CACHE_FLUSH_TS
, true);
1396 tu_cs_sanity_check(cs
);
1400 tu_cmd_render_tiles(struct tu_cmd_buffer
*cmd
)
1402 const struct tu_tiling_config
*tiling
= &cmd
->state
.tiling_config
;
1404 tu6_render_begin(cmd
, &cmd
->cs
);
1406 for (uint32_t y
= 0; y
< tiling
->tile_count
.height
; y
++) {
1407 for (uint32_t x
= 0; x
< tiling
->tile_count
.width
; x
++) {
1408 struct tu_tile tile
;
1409 tu_tiling_config_get_tile(tiling
, cmd
->device
, x
, y
, &tile
);
1410 tu6_render_tile(cmd
, &cmd
->cs
, &tile
);
1414 tu6_render_end(cmd
, &cmd
->cs
);
1418 tu_cmd_prepare_tile_load_ib(struct tu_cmd_buffer
*cmd
,
1419 const VkRenderPassBeginInfo
*info
)
1421 const uint32_t tile_load_space
=
1422 2 * 3 /* blit_scissor */ +
1423 (20 /* load */ + 19 /* clear */) * cmd
->state
.pass
->attachment_count
+
1424 2 /* cache invalidate */;
1426 struct tu_cs sub_cs
;
1428 VkResult result
= tu_cs_begin_sub_stream(cmd
->device
, &cmd
->sub_cs
,
1429 tile_load_space
, &sub_cs
);
1430 if (result
!= VK_SUCCESS
) {
1431 cmd
->record_result
= result
;
1435 tu6_emit_blit_scissor(cmd
, &sub_cs
, true);
1437 for (uint32_t i
= 0; i
< cmd
->state
.pass
->attachment_count
; ++i
)
1438 tu6_emit_load_attachment(cmd
, &sub_cs
, i
);
1440 tu6_emit_blit_scissor(cmd
, &sub_cs
, false);
1442 for (uint32_t i
= 0; i
< cmd
->state
.pass
->attachment_count
; ++i
)
1443 tu6_emit_clear_attachment(cmd
, &sub_cs
, i
, info
);
1445 /* invalidate because reading input attachments will cache GMEM and
1446 * the cache isn''t updated when GMEM is written
1447 * TODO: is there a no-cache bit for textures?
1449 if (cmd
->state
.subpass
->input_count
)
1450 tu6_emit_event_write(cmd
, &sub_cs
, CACHE_INVALIDATE
, false);
1452 cmd
->state
.tile_load_ib
= tu_cs_end_sub_stream(&cmd
->sub_cs
, &sub_cs
);
1456 tu_cmd_prepare_tile_store_ib(struct tu_cmd_buffer
*cmd
)
1458 const uint32_t tile_store_space
= 32 + 23 * cmd
->state
.pass
->attachment_count
;
1459 struct tu_cs sub_cs
;
1461 VkResult result
= tu_cs_begin_sub_stream(cmd
->device
, &cmd
->sub_cs
,
1462 tile_store_space
, &sub_cs
);
1463 if (result
!= VK_SUCCESS
) {
1464 cmd
->record_result
= result
;
1468 /* emit to tile-store sub_cs */
1469 tu6_emit_tile_store(cmd
, &sub_cs
);
1471 cmd
->state
.tile_store_ib
= tu_cs_end_sub_stream(&cmd
->sub_cs
, &sub_cs
);
1475 tu_cmd_update_tiling_config(struct tu_cmd_buffer
*cmd
,
1476 const VkRect2D
*render_area
)
1478 const struct tu_device
*dev
= cmd
->device
;
1479 struct tu_tiling_config
*tiling
= &cmd
->state
.tiling_config
;
1481 tiling
->render_area
= *render_area
;
1483 tu_tiling_config_update_tile_layout(tiling
, dev
, cmd
->state
.pass
->gmem_pixels
);
1484 tu_tiling_config_update_pipe_layout(tiling
, dev
);
1485 tu_tiling_config_update_pipes(tiling
, dev
);
1488 const struct tu_dynamic_state default_dynamic_state
= {
1504 .blend_constants
= { 0.0f
, 0.0f
, 0.0f
, 0.0f
},
1510 .stencil_compare_mask
=
1515 .stencil_write_mask
=
1520 .stencil_reference
=
1527 static void UNUSED
/* FINISHME */
1528 tu_bind_dynamic_state(struct tu_cmd_buffer
*cmd_buffer
,
1529 const struct tu_dynamic_state
*src
)
1531 struct tu_dynamic_state
*dest
= &cmd_buffer
->state
.dynamic
;
1532 uint32_t copy_mask
= src
->mask
;
1533 uint32_t dest_mask
= 0;
1535 tu_use_args(cmd_buffer
); /* FINISHME */
1537 /* Make sure to copy the number of viewports/scissors because they can
1538 * only be specified at pipeline creation time.
1540 dest
->viewport
.count
= src
->viewport
.count
;
1541 dest
->scissor
.count
= src
->scissor
.count
;
1542 dest
->discard_rectangle
.count
= src
->discard_rectangle
.count
;
1544 if (copy_mask
& TU_DYNAMIC_VIEWPORT
) {
1545 if (memcmp(&dest
->viewport
.viewports
, &src
->viewport
.viewports
,
1546 src
->viewport
.count
* sizeof(VkViewport
))) {
1547 typed_memcpy(dest
->viewport
.viewports
, src
->viewport
.viewports
,
1548 src
->viewport
.count
);
1549 dest_mask
|= TU_DYNAMIC_VIEWPORT
;
1553 if (copy_mask
& TU_DYNAMIC_SCISSOR
) {
1554 if (memcmp(&dest
->scissor
.scissors
, &src
->scissor
.scissors
,
1555 src
->scissor
.count
* sizeof(VkRect2D
))) {
1556 typed_memcpy(dest
->scissor
.scissors
, src
->scissor
.scissors
,
1557 src
->scissor
.count
);
1558 dest_mask
|= TU_DYNAMIC_SCISSOR
;
1562 if (copy_mask
& TU_DYNAMIC_LINE_WIDTH
) {
1563 if (dest
->line_width
!= src
->line_width
) {
1564 dest
->line_width
= src
->line_width
;
1565 dest_mask
|= TU_DYNAMIC_LINE_WIDTH
;
1569 if (copy_mask
& TU_DYNAMIC_DEPTH_BIAS
) {
1570 if (memcmp(&dest
->depth_bias
, &src
->depth_bias
,
1571 sizeof(src
->depth_bias
))) {
1572 dest
->depth_bias
= src
->depth_bias
;
1573 dest_mask
|= TU_DYNAMIC_DEPTH_BIAS
;
1577 if (copy_mask
& TU_DYNAMIC_BLEND_CONSTANTS
) {
1578 if (memcmp(&dest
->blend_constants
, &src
->blend_constants
,
1579 sizeof(src
->blend_constants
))) {
1580 typed_memcpy(dest
->blend_constants
, src
->blend_constants
, 4);
1581 dest_mask
|= TU_DYNAMIC_BLEND_CONSTANTS
;
1585 if (copy_mask
& TU_DYNAMIC_DEPTH_BOUNDS
) {
1586 if (memcmp(&dest
->depth_bounds
, &src
->depth_bounds
,
1587 sizeof(src
->depth_bounds
))) {
1588 dest
->depth_bounds
= src
->depth_bounds
;
1589 dest_mask
|= TU_DYNAMIC_DEPTH_BOUNDS
;
1593 if (copy_mask
& TU_DYNAMIC_STENCIL_COMPARE_MASK
) {
1594 if (memcmp(&dest
->stencil_compare_mask
, &src
->stencil_compare_mask
,
1595 sizeof(src
->stencil_compare_mask
))) {
1596 dest
->stencil_compare_mask
= src
->stencil_compare_mask
;
1597 dest_mask
|= TU_DYNAMIC_STENCIL_COMPARE_MASK
;
1601 if (copy_mask
& TU_DYNAMIC_STENCIL_WRITE_MASK
) {
1602 if (memcmp(&dest
->stencil_write_mask
, &src
->stencil_write_mask
,
1603 sizeof(src
->stencil_write_mask
))) {
1604 dest
->stencil_write_mask
= src
->stencil_write_mask
;
1605 dest_mask
|= TU_DYNAMIC_STENCIL_WRITE_MASK
;
1609 if (copy_mask
& TU_DYNAMIC_STENCIL_REFERENCE
) {
1610 if (memcmp(&dest
->stencil_reference
, &src
->stencil_reference
,
1611 sizeof(src
->stencil_reference
))) {
1612 dest
->stencil_reference
= src
->stencil_reference
;
1613 dest_mask
|= TU_DYNAMIC_STENCIL_REFERENCE
;
1617 if (copy_mask
& TU_DYNAMIC_DISCARD_RECTANGLE
) {
1618 if (memcmp(&dest
->discard_rectangle
.rectangles
,
1619 &src
->discard_rectangle
.rectangles
,
1620 src
->discard_rectangle
.count
* sizeof(VkRect2D
))) {
1621 typed_memcpy(dest
->discard_rectangle
.rectangles
,
1622 src
->discard_rectangle
.rectangles
,
1623 src
->discard_rectangle
.count
);
1624 dest_mask
|= TU_DYNAMIC_DISCARD_RECTANGLE
;
1630 tu_create_cmd_buffer(struct tu_device
*device
,
1631 struct tu_cmd_pool
*pool
,
1632 VkCommandBufferLevel level
,
1633 VkCommandBuffer
*pCommandBuffer
)
1635 struct tu_cmd_buffer
*cmd_buffer
;
1636 cmd_buffer
= vk_zalloc(&pool
->alloc
, sizeof(*cmd_buffer
), 8,
1637 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1638 if (cmd_buffer
== NULL
)
1639 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
1641 cmd_buffer
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
1642 cmd_buffer
->device
= device
;
1643 cmd_buffer
->pool
= pool
;
1644 cmd_buffer
->level
= level
;
1647 list_addtail(&cmd_buffer
->pool_link
, &pool
->cmd_buffers
);
1648 cmd_buffer
->queue_family_index
= pool
->queue_family_index
;
1651 /* Init the pool_link so we can safely call list_del when we destroy
1652 * the command buffer
1654 list_inithead(&cmd_buffer
->pool_link
);
1655 cmd_buffer
->queue_family_index
= TU_QUEUE_GENERAL
;
1658 tu_bo_list_init(&cmd_buffer
->bo_list
);
1659 tu_cs_init(&cmd_buffer
->cs
, TU_CS_MODE_GROW
, 4096);
1660 tu_cs_init(&cmd_buffer
->draw_cs
, TU_CS_MODE_GROW
, 4096);
1661 tu_cs_init(&cmd_buffer
->draw_epilogue_cs
, TU_CS_MODE_GROW
, 4096);
1662 tu_cs_init(&cmd_buffer
->sub_cs
, TU_CS_MODE_SUB_STREAM
, 2048);
1664 *pCommandBuffer
= tu_cmd_buffer_to_handle(cmd_buffer
);
1666 list_inithead(&cmd_buffer
->upload
.list
);
1668 cmd_buffer
->marker_reg
= REG_A6XX_CP_SCRATCH_REG(
1669 cmd_buffer
->level
== VK_COMMAND_BUFFER_LEVEL_PRIMARY
? 7 : 6);
1671 VkResult result
= tu_bo_init_new(device
, &cmd_buffer
->scratch_bo
, 0x1000);
1672 if (result
!= VK_SUCCESS
)
1673 goto fail_scratch_bo
;
1675 /* TODO: resize on overflow */
1676 cmd_buffer
->vsc_data_pitch
= device
->vsc_data_pitch
;
1677 cmd_buffer
->vsc_data2_pitch
= device
->vsc_data2_pitch
;
1678 cmd_buffer
->vsc_data
= device
->vsc_data
;
1679 cmd_buffer
->vsc_data2
= device
->vsc_data2
;
1684 list_del(&cmd_buffer
->pool_link
);
1689 tu_cmd_buffer_destroy(struct tu_cmd_buffer
*cmd_buffer
)
1691 tu_bo_finish(cmd_buffer
->device
, &cmd_buffer
->scratch_bo
);
1693 list_del(&cmd_buffer
->pool_link
);
1695 for (unsigned i
= 0; i
< VK_PIPELINE_BIND_POINT_RANGE_SIZE
; i
++)
1696 free(cmd_buffer
->descriptors
[i
].push_set
.set
.mapped_ptr
);
1698 tu_cs_finish(cmd_buffer
->device
, &cmd_buffer
->cs
);
1699 tu_cs_finish(cmd_buffer
->device
, &cmd_buffer
->draw_cs
);
1700 tu_cs_finish(cmd_buffer
->device
, &cmd_buffer
->draw_epilogue_cs
);
1701 tu_cs_finish(cmd_buffer
->device
, &cmd_buffer
->sub_cs
);
1703 tu_bo_list_destroy(&cmd_buffer
->bo_list
);
1704 vk_free(&cmd_buffer
->pool
->alloc
, cmd_buffer
);
1708 tu_reset_cmd_buffer(struct tu_cmd_buffer
*cmd_buffer
)
1710 cmd_buffer
->wait_for_idle
= true;
1712 cmd_buffer
->record_result
= VK_SUCCESS
;
1714 tu_bo_list_reset(&cmd_buffer
->bo_list
);
1715 tu_cs_reset(cmd_buffer
->device
, &cmd_buffer
->cs
);
1716 tu_cs_reset(cmd_buffer
->device
, &cmd_buffer
->draw_cs
);
1717 tu_cs_reset(cmd_buffer
->device
, &cmd_buffer
->draw_epilogue_cs
);
1718 tu_cs_reset(cmd_buffer
->device
, &cmd_buffer
->sub_cs
);
1720 for (unsigned i
= 0; i
< VK_PIPELINE_BIND_POINT_RANGE_SIZE
; i
++) {
1721 cmd_buffer
->descriptors
[i
].valid
= 0;
1722 cmd_buffer
->descriptors
[i
].push_dirty
= false;
1725 cmd_buffer
->status
= TU_CMD_BUFFER_STATUS_INITIAL
;
1727 return cmd_buffer
->record_result
;
1731 tu_AllocateCommandBuffers(VkDevice _device
,
1732 const VkCommandBufferAllocateInfo
*pAllocateInfo
,
1733 VkCommandBuffer
*pCommandBuffers
)
1735 TU_FROM_HANDLE(tu_device
, device
, _device
);
1736 TU_FROM_HANDLE(tu_cmd_pool
, pool
, pAllocateInfo
->commandPool
);
1738 VkResult result
= VK_SUCCESS
;
1741 for (i
= 0; i
< pAllocateInfo
->commandBufferCount
; i
++) {
1743 if (!list_is_empty(&pool
->free_cmd_buffers
)) {
1744 struct tu_cmd_buffer
*cmd_buffer
= list_first_entry(
1745 &pool
->free_cmd_buffers
, struct tu_cmd_buffer
, pool_link
);
1747 list_del(&cmd_buffer
->pool_link
);
1748 list_addtail(&cmd_buffer
->pool_link
, &pool
->cmd_buffers
);
1750 result
= tu_reset_cmd_buffer(cmd_buffer
);
1751 cmd_buffer
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
1752 cmd_buffer
->level
= pAllocateInfo
->level
;
1754 pCommandBuffers
[i
] = tu_cmd_buffer_to_handle(cmd_buffer
);
1756 result
= tu_create_cmd_buffer(device
, pool
, pAllocateInfo
->level
,
1757 &pCommandBuffers
[i
]);
1759 if (result
!= VK_SUCCESS
)
1763 if (result
!= VK_SUCCESS
) {
1764 tu_FreeCommandBuffers(_device
, pAllocateInfo
->commandPool
, i
,
1767 /* From the Vulkan 1.0.66 spec:
1769 * "vkAllocateCommandBuffers can be used to create multiple
1770 * command buffers. If the creation of any of those command
1771 * buffers fails, the implementation must destroy all
1772 * successfully created command buffer objects from this
1773 * command, set all entries of the pCommandBuffers array to
1774 * NULL and return the error."
1776 memset(pCommandBuffers
, 0,
1777 sizeof(*pCommandBuffers
) * pAllocateInfo
->commandBufferCount
);
1784 tu_FreeCommandBuffers(VkDevice device
,
1785 VkCommandPool commandPool
,
1786 uint32_t commandBufferCount
,
1787 const VkCommandBuffer
*pCommandBuffers
)
1789 for (uint32_t i
= 0; i
< commandBufferCount
; i
++) {
1790 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, pCommandBuffers
[i
]);
1793 if (cmd_buffer
->pool
) {
1794 list_del(&cmd_buffer
->pool_link
);
1795 list_addtail(&cmd_buffer
->pool_link
,
1796 &cmd_buffer
->pool
->free_cmd_buffers
);
1798 tu_cmd_buffer_destroy(cmd_buffer
);
1804 tu_ResetCommandBuffer(VkCommandBuffer commandBuffer
,
1805 VkCommandBufferResetFlags flags
)
1807 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
1808 return tu_reset_cmd_buffer(cmd_buffer
);
1812 tu_BeginCommandBuffer(VkCommandBuffer commandBuffer
,
1813 const VkCommandBufferBeginInfo
*pBeginInfo
)
1815 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
1816 VkResult result
= VK_SUCCESS
;
1818 if (cmd_buffer
->status
!= TU_CMD_BUFFER_STATUS_INITIAL
) {
1819 /* If the command buffer has already been resetted with
1820 * vkResetCommandBuffer, no need to do it again.
1822 result
= tu_reset_cmd_buffer(cmd_buffer
);
1823 if (result
!= VK_SUCCESS
)
1827 memset(&cmd_buffer
->state
, 0, sizeof(cmd_buffer
->state
));
1828 cmd_buffer
->usage_flags
= pBeginInfo
->flags
;
1830 tu_cs_begin(&cmd_buffer
->cs
);
1831 tu_cs_begin(&cmd_buffer
->draw_cs
);
1832 tu_cs_begin(&cmd_buffer
->draw_epilogue_cs
);
1834 cmd_buffer
->marker_seqno
= 0;
1835 cmd_buffer
->scratch_seqno
= 0;
1837 /* setup initial configuration into command buffer */
1838 if (cmd_buffer
->level
== VK_COMMAND_BUFFER_LEVEL_PRIMARY
) {
1839 switch (cmd_buffer
->queue_family_index
) {
1840 case TU_QUEUE_GENERAL
:
1841 tu6_init_hw(cmd_buffer
, &cmd_buffer
->cs
);
1846 } else if (cmd_buffer
->level
== VK_COMMAND_BUFFER_LEVEL_SECONDARY
&&
1847 (pBeginInfo
->flags
& VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT
)) {
1848 assert(pBeginInfo
->pInheritanceInfo
);
1849 cmd_buffer
->state
.pass
= tu_render_pass_from_handle(pBeginInfo
->pInheritanceInfo
->renderPass
);
1850 cmd_buffer
->state
.subpass
= &cmd_buffer
->state
.pass
->subpasses
[pBeginInfo
->pInheritanceInfo
->subpass
];
1853 cmd_buffer
->status
= TU_CMD_BUFFER_STATUS_RECORDING
;
1859 tu_CmdBindVertexBuffers(VkCommandBuffer commandBuffer
,
1860 uint32_t firstBinding
,
1861 uint32_t bindingCount
,
1862 const VkBuffer
*pBuffers
,
1863 const VkDeviceSize
*pOffsets
)
1865 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
1867 assert(firstBinding
+ bindingCount
<= MAX_VBS
);
1869 for (uint32_t i
= 0; i
< bindingCount
; i
++) {
1870 cmd
->state
.vb
.buffers
[firstBinding
+ i
] =
1871 tu_buffer_from_handle(pBuffers
[i
]);
1872 cmd
->state
.vb
.offsets
[firstBinding
+ i
] = pOffsets
[i
];
1875 /* VB states depend on VkPipelineVertexInputStateCreateInfo */
1876 cmd
->state
.dirty
|= TU_CMD_DIRTY_VERTEX_BUFFERS
;
1880 tu_CmdBindIndexBuffer(VkCommandBuffer commandBuffer
,
1882 VkDeviceSize offset
,
1883 VkIndexType indexType
)
1885 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
1886 TU_FROM_HANDLE(tu_buffer
, buf
, buffer
);
1888 /* initialize/update the restart index */
1889 if (!cmd
->state
.index_buffer
|| cmd
->state
.index_type
!= indexType
) {
1890 struct tu_cs
*draw_cs
= &cmd
->draw_cs
;
1891 VkResult result
= tu_cs_reserve_space(cmd
->device
, draw_cs
, 2);
1892 if (result
!= VK_SUCCESS
) {
1893 cmd
->record_result
= result
;
1897 tu6_emit_restart_index(
1898 draw_cs
, indexType
== VK_INDEX_TYPE_UINT32
? 0xffffffff : 0xffff);
1900 tu_cs_sanity_check(draw_cs
);
1904 if (cmd
->state
.index_buffer
!= buf
)
1905 tu_bo_list_add(&cmd
->bo_list
, buf
->bo
, MSM_SUBMIT_BO_READ
);
1907 cmd
->state
.index_buffer
= buf
;
1908 cmd
->state
.index_offset
= offset
;
1909 cmd
->state
.index_type
= indexType
;
1913 tu_CmdBindDescriptorSets(VkCommandBuffer commandBuffer
,
1914 VkPipelineBindPoint pipelineBindPoint
,
1915 VkPipelineLayout _layout
,
1917 uint32_t descriptorSetCount
,
1918 const VkDescriptorSet
*pDescriptorSets
,
1919 uint32_t dynamicOffsetCount
,
1920 const uint32_t *pDynamicOffsets
)
1922 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
1923 TU_FROM_HANDLE(tu_pipeline_layout
, layout
, _layout
);
1924 unsigned dyn_idx
= 0;
1926 struct tu_descriptor_state
*descriptors_state
=
1927 tu_get_descriptors_state(cmd_buffer
, pipelineBindPoint
);
1929 for (unsigned i
= 0; i
< descriptorSetCount
; ++i
) {
1930 unsigned idx
= i
+ firstSet
;
1931 TU_FROM_HANDLE(tu_descriptor_set
, set
, pDescriptorSets
[i
]);
1933 descriptors_state
->sets
[idx
] = set
;
1934 descriptors_state
->valid
|= (1u << idx
);
1936 for(unsigned j
= 0; j
< set
->layout
->dynamic_offset_count
; ++j
, ++dyn_idx
) {
1937 unsigned idx
= j
+ layout
->set
[i
+ firstSet
].dynamic_offset_start
;
1938 assert(dyn_idx
< dynamicOffsetCount
);
1940 descriptors_state
->dynamic_buffers
[idx
] =
1941 set
->dynamic_descriptors
[j
].va
+ pDynamicOffsets
[dyn_idx
];
1945 cmd_buffer
->state
.dirty
|= TU_CMD_DIRTY_DESCRIPTOR_SETS
;
1949 tu_CmdPushConstants(VkCommandBuffer commandBuffer
,
1950 VkPipelineLayout layout
,
1951 VkShaderStageFlags stageFlags
,
1954 const void *pValues
)
1956 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
1957 memcpy((void*) cmd
->push_constants
+ offset
, pValues
, size
);
1958 cmd
->state
.dirty
|= TU_CMD_DIRTY_PUSH_CONSTANTS
;
1962 tu_EndCommandBuffer(VkCommandBuffer commandBuffer
)
1964 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
1966 if (cmd_buffer
->scratch_seqno
) {
1967 tu_bo_list_add(&cmd_buffer
->bo_list
, &cmd_buffer
->scratch_bo
,
1968 MSM_SUBMIT_BO_WRITE
);
1971 if (cmd_buffer
->use_vsc_data
) {
1972 tu_bo_list_add(&cmd_buffer
->bo_list
, &cmd_buffer
->vsc_data
,
1973 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_WRITE
);
1974 tu_bo_list_add(&cmd_buffer
->bo_list
, &cmd_buffer
->vsc_data2
,
1975 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_WRITE
);
1978 for (uint32_t i
= 0; i
< cmd_buffer
->draw_cs
.bo_count
; i
++) {
1979 tu_bo_list_add(&cmd_buffer
->bo_list
, cmd_buffer
->draw_cs
.bos
[i
],
1980 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_DUMP
);
1983 for (uint32_t i
= 0; i
< cmd_buffer
->draw_epilogue_cs
.bo_count
; i
++) {
1984 tu_bo_list_add(&cmd_buffer
->bo_list
, cmd_buffer
->draw_epilogue_cs
.bos
[i
],
1985 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_DUMP
);
1988 for (uint32_t i
= 0; i
< cmd_buffer
->sub_cs
.bo_count
; i
++) {
1989 tu_bo_list_add(&cmd_buffer
->bo_list
, cmd_buffer
->sub_cs
.bos
[i
],
1990 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_DUMP
);
1993 tu_cs_end(&cmd_buffer
->cs
);
1994 tu_cs_end(&cmd_buffer
->draw_cs
);
1995 tu_cs_end(&cmd_buffer
->draw_epilogue_cs
);
1997 cmd_buffer
->status
= TU_CMD_BUFFER_STATUS_EXECUTABLE
;
1999 return cmd_buffer
->record_result
;
2003 tu_CmdBindPipeline(VkCommandBuffer commandBuffer
,
2004 VkPipelineBindPoint pipelineBindPoint
,
2005 VkPipeline _pipeline
)
2007 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2008 TU_FROM_HANDLE(tu_pipeline
, pipeline
, _pipeline
);
2010 switch (pipelineBindPoint
) {
2011 case VK_PIPELINE_BIND_POINT_GRAPHICS
:
2012 cmd
->state
.pipeline
= pipeline
;
2013 cmd
->state
.dirty
|= TU_CMD_DIRTY_PIPELINE
;
2015 case VK_PIPELINE_BIND_POINT_COMPUTE
:
2016 cmd
->state
.compute_pipeline
= pipeline
;
2017 cmd
->state
.dirty
|= TU_CMD_DIRTY_COMPUTE_PIPELINE
;
2020 unreachable("unrecognized pipeline bind point");
2024 tu_bo_list_add(&cmd
->bo_list
, &pipeline
->program
.binary_bo
,
2025 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_DUMP
);
2026 for (uint32_t i
= 0; i
< pipeline
->cs
.bo_count
; i
++) {
2027 tu_bo_list_add(&cmd
->bo_list
, pipeline
->cs
.bos
[i
],
2028 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_DUMP
);
2033 tu_CmdSetViewport(VkCommandBuffer commandBuffer
,
2034 uint32_t firstViewport
,
2035 uint32_t viewportCount
,
2036 const VkViewport
*pViewports
)
2038 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2039 struct tu_cs
*draw_cs
= &cmd
->draw_cs
;
2041 VkResult result
= tu_cs_reserve_space(cmd
->device
, draw_cs
, 12);
2042 if (result
!= VK_SUCCESS
) {
2043 cmd
->record_result
= result
;
2047 assert(firstViewport
== 0 && viewportCount
== 1);
2048 tu6_emit_viewport(draw_cs
, pViewports
);
2050 tu_cs_sanity_check(draw_cs
);
2054 tu_CmdSetScissor(VkCommandBuffer commandBuffer
,
2055 uint32_t firstScissor
,
2056 uint32_t scissorCount
,
2057 const VkRect2D
*pScissors
)
2059 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2060 struct tu_cs
*draw_cs
= &cmd
->draw_cs
;
2062 VkResult result
= tu_cs_reserve_space(cmd
->device
, draw_cs
, 3);
2063 if (result
!= VK_SUCCESS
) {
2064 cmd
->record_result
= result
;
2068 assert(firstScissor
== 0 && scissorCount
== 1);
2069 tu6_emit_scissor(draw_cs
, pScissors
);
2071 tu_cs_sanity_check(draw_cs
);
2075 tu_CmdSetLineWidth(VkCommandBuffer commandBuffer
, float lineWidth
)
2077 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2079 cmd
->state
.dynamic
.line_width
= lineWidth
;
2081 /* line width depends on VkPipelineRasterizationStateCreateInfo */
2082 cmd
->state
.dirty
|= TU_CMD_DIRTY_DYNAMIC_LINE_WIDTH
;
2086 tu_CmdSetDepthBias(VkCommandBuffer commandBuffer
,
2087 float depthBiasConstantFactor
,
2088 float depthBiasClamp
,
2089 float depthBiasSlopeFactor
)
2091 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2092 struct tu_cs
*draw_cs
= &cmd
->draw_cs
;
2094 VkResult result
= tu_cs_reserve_space(cmd
->device
, draw_cs
, 4);
2095 if (result
!= VK_SUCCESS
) {
2096 cmd
->record_result
= result
;
2100 tu6_emit_depth_bias(draw_cs
, depthBiasConstantFactor
, depthBiasClamp
,
2101 depthBiasSlopeFactor
);
2103 tu_cs_sanity_check(draw_cs
);
2107 tu_CmdSetBlendConstants(VkCommandBuffer commandBuffer
,
2108 const float blendConstants
[4])
2110 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2111 struct tu_cs
*draw_cs
= &cmd
->draw_cs
;
2113 VkResult result
= tu_cs_reserve_space(cmd
->device
, draw_cs
, 5);
2114 if (result
!= VK_SUCCESS
) {
2115 cmd
->record_result
= result
;
2119 tu6_emit_blend_constants(draw_cs
, blendConstants
);
2121 tu_cs_sanity_check(draw_cs
);
2125 tu_CmdSetDepthBounds(VkCommandBuffer commandBuffer
,
2126 float minDepthBounds
,
2127 float maxDepthBounds
)
2132 tu_CmdSetStencilCompareMask(VkCommandBuffer commandBuffer
,
2133 VkStencilFaceFlags faceMask
,
2134 uint32_t compareMask
)
2136 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2138 if (faceMask
& VK_STENCIL_FACE_FRONT_BIT
)
2139 cmd
->state
.dynamic
.stencil_compare_mask
.front
= compareMask
;
2140 if (faceMask
& VK_STENCIL_FACE_BACK_BIT
)
2141 cmd
->state
.dynamic
.stencil_compare_mask
.back
= compareMask
;
2143 /* the front/back compare masks must be updated together */
2144 cmd
->state
.dirty
|= TU_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK
;
2148 tu_CmdSetStencilWriteMask(VkCommandBuffer commandBuffer
,
2149 VkStencilFaceFlags faceMask
,
2152 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2154 if (faceMask
& VK_STENCIL_FACE_FRONT_BIT
)
2155 cmd
->state
.dynamic
.stencil_write_mask
.front
= writeMask
;
2156 if (faceMask
& VK_STENCIL_FACE_BACK_BIT
)
2157 cmd
->state
.dynamic
.stencil_write_mask
.back
= writeMask
;
2159 /* the front/back write masks must be updated together */
2160 cmd
->state
.dirty
|= TU_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK
;
2164 tu_CmdSetStencilReference(VkCommandBuffer commandBuffer
,
2165 VkStencilFaceFlags faceMask
,
2168 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2170 if (faceMask
& VK_STENCIL_FACE_FRONT_BIT
)
2171 cmd
->state
.dynamic
.stencil_reference
.front
= reference
;
2172 if (faceMask
& VK_STENCIL_FACE_BACK_BIT
)
2173 cmd
->state
.dynamic
.stencil_reference
.back
= reference
;
2175 /* the front/back references must be updated together */
2176 cmd
->state
.dirty
|= TU_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE
;
2180 tu_CmdExecuteCommands(VkCommandBuffer commandBuffer
,
2181 uint32_t commandBufferCount
,
2182 const VkCommandBuffer
*pCmdBuffers
)
2184 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2187 assert(commandBufferCount
> 0);
2189 for (uint32_t i
= 0; i
< commandBufferCount
; i
++) {
2190 TU_FROM_HANDLE(tu_cmd_buffer
, secondary
, pCmdBuffers
[i
]);
2192 result
= tu_bo_list_merge(&cmd
->bo_list
, &secondary
->bo_list
);
2193 if (result
!= VK_SUCCESS
) {
2194 cmd
->record_result
= result
;
2198 result
= tu_cs_add_entries(&cmd
->draw_cs
, &secondary
->draw_cs
);
2199 if (result
!= VK_SUCCESS
) {
2200 cmd
->record_result
= result
;
2204 result
= tu_cs_add_entries(&cmd
->draw_epilogue_cs
,
2205 &secondary
->draw_epilogue_cs
);
2206 if (result
!= VK_SUCCESS
) {
2207 cmd
->record_result
= result
;
2211 cmd
->state
.dirty
= ~0u; /* TODO: set dirty only what needs to be */
2215 tu_CreateCommandPool(VkDevice _device
,
2216 const VkCommandPoolCreateInfo
*pCreateInfo
,
2217 const VkAllocationCallbacks
*pAllocator
,
2218 VkCommandPool
*pCmdPool
)
2220 TU_FROM_HANDLE(tu_device
, device
, _device
);
2221 struct tu_cmd_pool
*pool
;
2223 pool
= vk_alloc2(&device
->alloc
, pAllocator
, sizeof(*pool
), 8,
2224 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
2226 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
2229 pool
->alloc
= *pAllocator
;
2231 pool
->alloc
= device
->alloc
;
2233 list_inithead(&pool
->cmd_buffers
);
2234 list_inithead(&pool
->free_cmd_buffers
);
2236 pool
->queue_family_index
= pCreateInfo
->queueFamilyIndex
;
2238 *pCmdPool
= tu_cmd_pool_to_handle(pool
);
2244 tu_DestroyCommandPool(VkDevice _device
,
2245 VkCommandPool commandPool
,
2246 const VkAllocationCallbacks
*pAllocator
)
2248 TU_FROM_HANDLE(tu_device
, device
, _device
);
2249 TU_FROM_HANDLE(tu_cmd_pool
, pool
, commandPool
);
2254 list_for_each_entry_safe(struct tu_cmd_buffer
, cmd_buffer
,
2255 &pool
->cmd_buffers
, pool_link
)
2257 tu_cmd_buffer_destroy(cmd_buffer
);
2260 list_for_each_entry_safe(struct tu_cmd_buffer
, cmd_buffer
,
2261 &pool
->free_cmd_buffers
, pool_link
)
2263 tu_cmd_buffer_destroy(cmd_buffer
);
2266 vk_free2(&device
->alloc
, pAllocator
, pool
);
2270 tu_ResetCommandPool(VkDevice device
,
2271 VkCommandPool commandPool
,
2272 VkCommandPoolResetFlags flags
)
2274 TU_FROM_HANDLE(tu_cmd_pool
, pool
, commandPool
);
2277 list_for_each_entry(struct tu_cmd_buffer
, cmd_buffer
, &pool
->cmd_buffers
,
2280 result
= tu_reset_cmd_buffer(cmd_buffer
);
2281 if (result
!= VK_SUCCESS
)
2289 tu_TrimCommandPool(VkDevice device
,
2290 VkCommandPool commandPool
,
2291 VkCommandPoolTrimFlags flags
)
2293 TU_FROM_HANDLE(tu_cmd_pool
, pool
, commandPool
);
2298 list_for_each_entry_safe(struct tu_cmd_buffer
, cmd_buffer
,
2299 &pool
->free_cmd_buffers
, pool_link
)
2301 tu_cmd_buffer_destroy(cmd_buffer
);
2306 tu_CmdBeginRenderPass(VkCommandBuffer commandBuffer
,
2307 const VkRenderPassBeginInfo
*pRenderPassBegin
,
2308 VkSubpassContents contents
)
2310 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2311 TU_FROM_HANDLE(tu_render_pass
, pass
, pRenderPassBegin
->renderPass
);
2312 TU_FROM_HANDLE(tu_framebuffer
, fb
, pRenderPassBegin
->framebuffer
);
2314 cmd
->state
.pass
= pass
;
2315 cmd
->state
.subpass
= pass
->subpasses
;
2316 cmd
->state
.framebuffer
= fb
;
2318 tu_cmd_update_tiling_config(cmd
, &pRenderPassBegin
->renderArea
);
2319 tu_cmd_prepare_tile_load_ib(cmd
, pRenderPassBegin
);
2320 tu_cmd_prepare_tile_store_ib(cmd
);
2322 VkResult result
= tu_cs_reserve_space(cmd
->device
, &cmd
->draw_cs
, 1024);
2323 if (result
!= VK_SUCCESS
) {
2324 cmd
->record_result
= result
;
2328 tu6_emit_zs(cmd
, cmd
->state
.subpass
, &cmd
->draw_cs
);
2329 tu6_emit_mrt(cmd
, cmd
->state
.subpass
, &cmd
->draw_cs
);
2330 tu6_emit_msaa(cmd
, cmd
->state
.subpass
, &cmd
->draw_cs
);
2332 /* note: use_hw_binning only checks tiling config */
2333 if (use_hw_binning(cmd
))
2334 cmd
->use_vsc_data
= true;
2336 for (uint32_t i
= 0; i
< fb
->attachment_count
; ++i
) {
2337 const struct tu_image_view
*iview
= fb
->attachments
[i
].attachment
;
2338 tu_bo_list_add(&cmd
->bo_list
, iview
->image
->bo
,
2339 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_WRITE
);
2344 tu_CmdBeginRenderPass2(VkCommandBuffer commandBuffer
,
2345 const VkRenderPassBeginInfo
*pRenderPassBeginInfo
,
2346 const VkSubpassBeginInfoKHR
*pSubpassBeginInfo
)
2348 tu_CmdBeginRenderPass(commandBuffer
, pRenderPassBeginInfo
,
2349 pSubpassBeginInfo
->contents
);
2353 tu_CmdNextSubpass(VkCommandBuffer commandBuffer
, VkSubpassContents contents
)
2355 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2356 const struct tu_render_pass
*pass
= cmd
->state
.pass
;
2357 struct tu_cs
*cs
= &cmd
->draw_cs
;
2359 VkResult result
= tu_cs_reserve_space(cmd
->device
, cs
, 1024);
2360 if (result
!= VK_SUCCESS
) {
2361 cmd
->record_result
= result
;
2365 const struct tu_subpass
*subpass
= cmd
->state
.subpass
++;
2367 * if msaa samples change between subpasses,
2368 * attachment store is broken for some attachments
2370 if (subpass
->resolve_attachments
) {
2371 tu6_emit_blit_scissor(cmd
, cs
, true);
2372 for (unsigned i
= 0; i
< subpass
->color_count
; i
++) {
2373 uint32_t a
= subpass
->resolve_attachments
[i
].attachment
;
2374 if (a
!= VK_ATTACHMENT_UNUSED
) {
2375 tu6_emit_store_attachment(cmd
, cs
, a
,
2376 subpass
->color_attachments
[i
].attachment
);
2381 /* invalidate because reading input attachments will cache GMEM and
2382 * the cache isn''t updated when GMEM is written
2383 * TODO: is there a no-cache bit for textures?
2385 if (cmd
->state
.subpass
->input_count
)
2386 tu6_emit_event_write(cmd
, cs
, CACHE_INVALIDATE
, false);
2388 /* emit mrt/zs/msaa state for the subpass that is starting */
2389 tu6_emit_zs(cmd
, cmd
->state
.subpass
, cs
);
2390 tu6_emit_mrt(cmd
, cmd
->state
.subpass
, cs
);
2391 tu6_emit_msaa(cmd
, cmd
->state
.subpass
, cs
);
2394 * since we don't know how to do GMEM->GMEM resolve,
2395 * resolve attachments are resolved to memory then loaded to GMEM again if needed
2397 if (subpass
->resolve_attachments
) {
2398 for (unsigned i
= 0; i
< subpass
->color_count
; i
++) {
2399 uint32_t a
= subpass
->resolve_attachments
[i
].attachment
;
2400 const struct tu_image_view
*iview
=
2401 cmd
->state
.framebuffer
->attachments
[a
].attachment
;
2402 if (a
!= VK_ATTACHMENT_UNUSED
&& pass
->attachments
[a
].gmem_offset
>= 0) {
2403 tu_finishme("missing GMEM->GMEM resolve, performance will suffer\n");
2404 tu6_emit_blit_info(cmd
, cs
, iview
, pass
->attachments
[a
].gmem_offset
, false);
2405 tu6_emit_blit(cmd
, cs
);
2412 tu_CmdNextSubpass2(VkCommandBuffer commandBuffer
,
2413 const VkSubpassBeginInfoKHR
*pSubpassBeginInfo
,
2414 const VkSubpassEndInfoKHR
*pSubpassEndInfo
)
2416 tu_CmdNextSubpass(commandBuffer
, pSubpassBeginInfo
->contents
);
2422 * Number of vertices.
2427 * Index of the first vertex.
2429 int32_t vertex_offset
;
2432 * First instance id.
2434 uint32_t first_instance
;
2437 * Number of instances.
2439 uint32_t instance_count
;
2442 * First index (indexed draws only).
2444 uint32_t first_index
;
2447 * Whether it's an indexed draw.
2452 * Indirect draw parameters resource.
2454 struct tu_buffer
*indirect
;
2455 uint64_t indirect_offset
;
2459 * Draw count parameters resource.
2461 struct tu_buffer
*count_buffer
;
2462 uint64_t count_buffer_offset
;
2465 #define ENABLE_ALL (CP_SET_DRAW_STATE__0_BINNING | CP_SET_DRAW_STATE__0_GMEM | CP_SET_DRAW_STATE__0_SYSMEM)
2466 #define ENABLE_DRAW (CP_SET_DRAW_STATE__0_GMEM | CP_SET_DRAW_STATE__0_SYSMEM)
2468 enum tu_draw_state_group_id
2470 TU_DRAW_STATE_PROGRAM
,
2471 TU_DRAW_STATE_PROGRAM_BINNING
,
2473 TU_DRAW_STATE_VI_BINNING
,
2477 TU_DRAW_STATE_BLEND
,
2478 TU_DRAW_STATE_VS_CONST
,
2479 TU_DRAW_STATE_FS_CONST
,
2480 TU_DRAW_STATE_VS_TEX
,
2481 TU_DRAW_STATE_FS_TEX
,
2482 TU_DRAW_STATE_FS_IBO
,
2483 TU_DRAW_STATE_VS_PARAMS
,
2485 TU_DRAW_STATE_COUNT
,
2488 struct tu_draw_state_group
2490 enum tu_draw_state_group_id id
;
2491 uint32_t enable_mask
;
2492 struct tu_cs_entry ib
;
2495 const static struct tu_sampler
*
2496 sampler_ptr(struct tu_descriptor_state
*descriptors_state
,
2497 const struct tu_descriptor_map
*map
, unsigned i
,
2498 unsigned array_index
)
2500 assert(descriptors_state
->valid
& (1 << map
->set
[i
]));
2502 struct tu_descriptor_set
*set
= descriptors_state
->sets
[map
->set
[i
]];
2503 assert(map
->binding
[i
] < set
->layout
->binding_count
);
2505 const struct tu_descriptor_set_binding_layout
*layout
=
2506 &set
->layout
->binding
[map
->binding
[i
]];
2508 if (layout
->immutable_samplers_offset
) {
2509 const struct tu_sampler
*immutable_samplers
=
2510 tu_immutable_samplers(set
->layout
, layout
);
2512 return &immutable_samplers
[array_index
];
2515 switch (layout
->type
) {
2516 case VK_DESCRIPTOR_TYPE_SAMPLER
:
2517 return (struct tu_sampler
*) &set
->mapped_ptr
[layout
->offset
/ 4];
2518 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
2519 return (struct tu_sampler
*) &set
->mapped_ptr
[layout
->offset
/ 4 + A6XX_TEX_CONST_DWORDS
+
2521 (A6XX_TEX_CONST_DWORDS
+
2522 sizeof(struct tu_sampler
) / 4)];
2524 unreachable("unimplemented descriptor type");
2530 write_tex_const(struct tu_cmd_buffer
*cmd
,
2532 struct tu_descriptor_state
*descriptors_state
,
2533 const struct tu_descriptor_map
*map
,
2534 unsigned i
, unsigned array_index
)
2536 assert(descriptors_state
->valid
& (1 << map
->set
[i
]));
2538 struct tu_descriptor_set
*set
= descriptors_state
->sets
[map
->set
[i
]];
2539 assert(map
->binding
[i
] < set
->layout
->binding_count
);
2541 const struct tu_descriptor_set_binding_layout
*layout
=
2542 &set
->layout
->binding
[map
->binding
[i
]];
2544 switch (layout
->type
) {
2545 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
:
2546 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
:
2547 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER
:
2548 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT
:
2549 memcpy(dst
, &set
->mapped_ptr
[layout
->offset
/ 4 +
2550 array_index
* A6XX_TEX_CONST_DWORDS
],
2551 A6XX_TEX_CONST_DWORDS
* 4);
2553 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
2554 memcpy(dst
, &set
->mapped_ptr
[layout
->offset
/ 4 +
2556 (A6XX_TEX_CONST_DWORDS
+
2557 sizeof(struct tu_sampler
) / 4)],
2558 A6XX_TEX_CONST_DWORDS
* 4);
2561 unreachable("unimplemented descriptor type");
2565 if (layout
->type
== VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT
) {
2566 const struct tu_tiling_config
*tiling
= &cmd
->state
.tiling_config
;
2567 uint32_t a
= cmd
->state
.subpass
->input_attachments
[map
->value
[i
] +
2568 array_index
].attachment
;
2569 const struct tu_render_pass_attachment
*att
= &cmd
->state
.pass
->attachments
[a
];
2571 assert(att
->gmem_offset
>= 0);
2573 dst
[0] &= ~(A6XX_TEX_CONST_0_SWAP__MASK
| A6XX_TEX_CONST_0_TILE_MODE__MASK
);
2574 dst
[0] |= A6XX_TEX_CONST_0_TILE_MODE(TILE6_2
);
2575 dst
[2] &= ~(A6XX_TEX_CONST_2_TYPE__MASK
| A6XX_TEX_CONST_2_PITCH__MASK
);
2577 A6XX_TEX_CONST_2_TYPE(A6XX_TEX_2D
) |
2578 A6XX_TEX_CONST_2_PITCH(tiling
->tile0
.extent
.width
* att
->cpp
);
2580 dst
[4] = 0x100000 + att
->gmem_offset
;
2581 dst
[5] = A6XX_TEX_CONST_5_DEPTH(1);
2582 for (unsigned i
= 6; i
< A6XX_TEX_CONST_DWORDS
; i
++)
2585 if (cmd
->level
== VK_COMMAND_BUFFER_LEVEL_SECONDARY
)
2586 tu_finishme("patch input attachment pitch for secondary cmd buffer");
2591 write_image_ibo(struct tu_cmd_buffer
*cmd
,
2593 struct tu_descriptor_state
*descriptors_state
,
2594 const struct tu_descriptor_map
*map
,
2595 unsigned i
, unsigned array_index
)
2597 assert(descriptors_state
->valid
& (1 << map
->set
[i
]));
2599 struct tu_descriptor_set
*set
= descriptors_state
->sets
[map
->set
[i
]];
2600 assert(map
->binding
[i
] < set
->layout
->binding_count
);
2602 const struct tu_descriptor_set_binding_layout
*layout
=
2603 &set
->layout
->binding
[map
->binding
[i
]];
2605 assert(layout
->type
== VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
);
2607 memcpy(dst
, &set
->mapped_ptr
[layout
->offset
/ 4 +
2608 (array_index
* 2 + 1) * A6XX_TEX_CONST_DWORDS
],
2609 A6XX_TEX_CONST_DWORDS
* 4);
2613 buffer_ptr(struct tu_descriptor_state
*descriptors_state
,
2614 const struct tu_descriptor_map
*map
,
2615 unsigned i
, unsigned array_index
)
2617 assert(descriptors_state
->valid
& (1 << map
->set
[i
]));
2619 struct tu_descriptor_set
*set
= descriptors_state
->sets
[map
->set
[i
]];
2620 assert(map
->binding
[i
] < set
->layout
->binding_count
);
2622 const struct tu_descriptor_set_binding_layout
*layout
=
2623 &set
->layout
->binding
[map
->binding
[i
]];
2625 switch (layout
->type
) {
2626 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
2627 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
2628 return descriptors_state
->dynamic_buffers
[layout
->dynamic_offset_offset
+
2630 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
2631 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
2632 return (uint64_t) set
->mapped_ptr
[layout
->offset
/ 4 + array_index
* 2 + 1] << 32 |
2633 set
->mapped_ptr
[layout
->offset
/ 4 + array_index
* 2];
2635 unreachable("unimplemented descriptor type");
2640 static inline uint32_t
2641 tu6_stage2opcode(gl_shader_stage type
)
2644 case MESA_SHADER_VERTEX
:
2645 case MESA_SHADER_TESS_CTRL
:
2646 case MESA_SHADER_TESS_EVAL
:
2647 case MESA_SHADER_GEOMETRY
:
2648 return CP_LOAD_STATE6_GEOM
;
2649 case MESA_SHADER_FRAGMENT
:
2650 case MESA_SHADER_COMPUTE
:
2651 case MESA_SHADER_KERNEL
:
2652 return CP_LOAD_STATE6_FRAG
;
2654 unreachable("bad shader type");
2658 static inline enum a6xx_state_block
2659 tu6_stage2shadersb(gl_shader_stage type
)
2662 case MESA_SHADER_VERTEX
:
2663 return SB6_VS_SHADER
;
2664 case MESA_SHADER_FRAGMENT
:
2665 return SB6_FS_SHADER
;
2666 case MESA_SHADER_COMPUTE
:
2667 case MESA_SHADER_KERNEL
:
2668 return SB6_CS_SHADER
;
2670 unreachable("bad shader type");
2676 tu6_emit_user_consts(struct tu_cs
*cs
, const struct tu_pipeline
*pipeline
,
2677 struct tu_descriptor_state
*descriptors_state
,
2678 gl_shader_stage type
,
2679 uint32_t *push_constants
)
2681 const struct tu_program_descriptor_linkage
*link
=
2682 &pipeline
->program
.link
[type
];
2683 const struct ir3_ubo_analysis_state
*state
= &link
->ubo_state
;
2685 for (uint32_t i
= 0; i
< ARRAY_SIZE(state
->range
); i
++) {
2686 if (state
->range
[i
].start
< state
->range
[i
].end
) {
2687 uint32_t size
= state
->range
[i
].end
- state
->range
[i
].start
;
2688 uint32_t offset
= state
->range
[i
].start
;
2690 /* and even if the start of the const buffer is before
2691 * first_immediate, the end may not be:
2693 size
= MIN2(size
, (16 * link
->constlen
) - state
->range
[i
].offset
);
2698 /* things should be aligned to vec4: */
2699 debug_assert((state
->range
[i
].offset
% 16) == 0);
2700 debug_assert((size
% 16) == 0);
2701 debug_assert((offset
% 16) == 0);
2704 /* push constants */
2705 tu_cs_emit_pkt7(cs
, tu6_stage2opcode(type
), 3 + (size
/ 4));
2706 tu_cs_emit(cs
, CP_LOAD_STATE6_0_DST_OFF(state
->range
[i
].offset
/ 16) |
2707 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS
) |
2708 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT
) |
2709 CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type
)) |
2710 CP_LOAD_STATE6_0_NUM_UNIT(size
/ 16));
2713 for (unsigned i
= 0; i
< size
/ 4; i
++)
2714 tu_cs_emit(cs
, push_constants
[i
+ offset
/ 4]);
2718 /* Look through the UBO map to find our UBO index, and get the VA for
2722 uint32_t ubo_idx
= i
- 1;
2723 uint32_t ubo_map_base
= 0;
2724 for (int j
= 0; j
< link
->ubo_map
.num
; j
++) {
2725 if (ubo_idx
>= ubo_map_base
&&
2726 ubo_idx
< ubo_map_base
+ link
->ubo_map
.array_size
[j
]) {
2727 va
= buffer_ptr(descriptors_state
, &link
->ubo_map
, j
,
2728 ubo_idx
- ubo_map_base
);
2731 ubo_map_base
+= link
->ubo_map
.array_size
[j
];
2735 tu_cs_emit_pkt7(cs
, tu6_stage2opcode(type
), 3);
2736 tu_cs_emit(cs
, CP_LOAD_STATE6_0_DST_OFF(state
->range
[i
].offset
/ 16) |
2737 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS
) |
2738 CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT
) |
2739 CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type
)) |
2740 CP_LOAD_STATE6_0_NUM_UNIT(size
/ 16));
2741 tu_cs_emit_qw(cs
, va
+ offset
);
2747 tu6_emit_ubos(struct tu_cs
*cs
, const struct tu_pipeline
*pipeline
,
2748 struct tu_descriptor_state
*descriptors_state
,
2749 gl_shader_stage type
)
2751 const struct tu_program_descriptor_linkage
*link
=
2752 &pipeline
->program
.link
[type
];
2754 uint32_t num
= MIN2(link
->ubo_map
.num_desc
, link
->const_state
.num_ubos
);
2755 uint32_t anum
= align(num
, 2);
2760 tu_cs_emit_pkt7(cs
, tu6_stage2opcode(type
), 3 + (2 * anum
));
2761 tu_cs_emit(cs
, CP_LOAD_STATE6_0_DST_OFF(link
->const_state
.offsets
.ubo
) |
2762 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS
) |
2763 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT
) |
2764 CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type
)) |
2765 CP_LOAD_STATE6_0_NUM_UNIT(anum
/2));
2766 tu_cs_emit(cs
, CP_LOAD_STATE6_1_EXT_SRC_ADDR(0));
2767 tu_cs_emit(cs
, CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI(0));
2769 unsigned emitted
= 0;
2770 for (unsigned i
= 0; emitted
< num
&& i
< link
->ubo_map
.num
; i
++) {
2771 for (unsigned j
= 0; emitted
< num
&& j
< link
->ubo_map
.array_size
[i
]; j
++) {
2772 tu_cs_emit_qw(cs
, buffer_ptr(descriptors_state
, &link
->ubo_map
, i
, j
));
2777 for (; emitted
< anum
; emitted
++) {
2778 tu_cs_emit(cs
, 0xffffffff);
2779 tu_cs_emit(cs
, 0xffffffff);
2783 static struct tu_cs_entry
2784 tu6_emit_consts(struct tu_cmd_buffer
*cmd
,
2785 const struct tu_pipeline
*pipeline
,
2786 struct tu_descriptor_state
*descriptors_state
,
2787 gl_shader_stage type
)
2790 tu_cs_begin_sub_stream(cmd
->device
, &cmd
->sub_cs
, 512, &cs
); /* TODO: maximum size? */
2792 tu6_emit_user_consts(&cs
, pipeline
, descriptors_state
, type
, cmd
->push_constants
);
2793 tu6_emit_ubos(&cs
, pipeline
, descriptors_state
, type
);
2795 return tu_cs_end_sub_stream(&cmd
->sub_cs
, &cs
);
2799 tu6_emit_vs_params(struct tu_cmd_buffer
*cmd
,
2800 const struct tu_draw_info
*draw
,
2801 struct tu_cs_entry
*entry
)
2803 /* TODO: fill out more than just base instance */
2804 const struct tu_program_descriptor_linkage
*link
=
2805 &cmd
->state
.pipeline
->program
.link
[MESA_SHADER_VERTEX
];
2806 const struct ir3_const_state
*const_state
= &link
->const_state
;
2809 if (const_state
->offsets
.driver_param
>= link
->constlen
) {
2810 *entry
= (struct tu_cs_entry
) {};
2814 VkResult result
= tu_cs_begin_sub_stream(cmd
->device
, &cmd
->sub_cs
, 8, &cs
);
2815 if (result
!= VK_SUCCESS
)
2818 tu_cs_emit_pkt7(&cs
, CP_LOAD_STATE6_GEOM
, 3 + 4);
2819 tu_cs_emit(&cs
, CP_LOAD_STATE6_0_DST_OFF(const_state
->offsets
.driver_param
) |
2820 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS
) |
2821 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT
) |
2822 CP_LOAD_STATE6_0_STATE_BLOCK(SB6_VS_SHADER
) |
2823 CP_LOAD_STATE6_0_NUM_UNIT(1));
2827 STATIC_ASSERT(IR3_DP_INSTID_BASE
== 2);
2831 tu_cs_emit(&cs
, draw
->first_instance
);
2834 *entry
= tu_cs_end_sub_stream(&cmd
->sub_cs
, &cs
);
2839 tu6_emit_textures(struct tu_cmd_buffer
*cmd
,
2840 const struct tu_pipeline
*pipeline
,
2841 struct tu_descriptor_state
*descriptors_state
,
2842 gl_shader_stage type
,
2843 struct tu_cs_entry
*entry
,
2846 struct tu_device
*device
= cmd
->device
;
2847 struct tu_cs
*draw_state
= &cmd
->sub_cs
;
2848 const struct tu_program_descriptor_linkage
*link
=
2849 &pipeline
->program
.link
[type
];
2852 if (link
->texture_map
.num_desc
== 0 && link
->sampler_map
.num_desc
== 0) {
2853 *entry
= (struct tu_cs_entry
) {};
2857 /* allocate and fill texture state */
2858 struct ts_cs_memory tex_const
;
2859 result
= tu_cs_alloc(device
, draw_state
, link
->texture_map
.num_desc
,
2860 A6XX_TEX_CONST_DWORDS
, &tex_const
);
2861 if (result
!= VK_SUCCESS
)
2865 for (unsigned i
= 0; i
< link
->texture_map
.num
; i
++) {
2866 for (int j
= 0; j
< link
->texture_map
.array_size
[i
]; j
++) {
2867 write_tex_const(cmd
,
2868 &tex_const
.map
[A6XX_TEX_CONST_DWORDS
* tex_index
++],
2869 descriptors_state
, &link
->texture_map
, i
, j
);
2873 /* allocate and fill sampler state */
2874 struct ts_cs_memory tex_samp
= { 0 };
2875 if (link
->sampler_map
.num_desc
) {
2876 result
= tu_cs_alloc(device
, draw_state
, link
->sampler_map
.num_desc
,
2877 A6XX_TEX_SAMP_DWORDS
, &tex_samp
);
2878 if (result
!= VK_SUCCESS
)
2881 int sampler_index
= 0;
2882 for (unsigned i
= 0; i
< link
->sampler_map
.num
; i
++) {
2883 for (int j
= 0; j
< link
->sampler_map
.array_size
[i
]; j
++) {
2884 const struct tu_sampler
*sampler
= sampler_ptr(descriptors_state
,
2887 memcpy(&tex_samp
.map
[A6XX_TEX_SAMP_DWORDS
* sampler_index
++],
2888 sampler
->state
, sizeof(sampler
->state
));
2889 *needs_border
|= sampler
->needs_border
;
2894 unsigned tex_samp_reg
, tex_const_reg
, tex_count_reg
;
2895 enum a6xx_state_block sb
;
2898 case MESA_SHADER_VERTEX
:
2900 tex_samp_reg
= REG_A6XX_SP_VS_TEX_SAMP_LO
;
2901 tex_const_reg
= REG_A6XX_SP_VS_TEX_CONST_LO
;
2902 tex_count_reg
= REG_A6XX_SP_VS_TEX_COUNT
;
2904 case MESA_SHADER_FRAGMENT
:
2906 tex_samp_reg
= REG_A6XX_SP_FS_TEX_SAMP_LO
;
2907 tex_const_reg
= REG_A6XX_SP_FS_TEX_CONST_LO
;
2908 tex_count_reg
= REG_A6XX_SP_FS_TEX_COUNT
;
2910 case MESA_SHADER_COMPUTE
:
2912 tex_samp_reg
= REG_A6XX_SP_CS_TEX_SAMP_LO
;
2913 tex_const_reg
= REG_A6XX_SP_CS_TEX_CONST_LO
;
2914 tex_count_reg
= REG_A6XX_SP_CS_TEX_COUNT
;
2917 unreachable("bad state block");
2921 result
= tu_cs_begin_sub_stream(device
, draw_state
, 16, &cs
);
2922 if (result
!= VK_SUCCESS
)
2925 if (link
->sampler_map
.num_desc
) {
2926 /* output sampler state: */
2927 tu_cs_emit_pkt7(&cs
, tu6_stage2opcode(type
), 3);
2928 tu_cs_emit(&cs
, CP_LOAD_STATE6_0_DST_OFF(0) |
2929 CP_LOAD_STATE6_0_STATE_TYPE(ST6_SHADER
) |
2930 CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT
) |
2931 CP_LOAD_STATE6_0_STATE_BLOCK(sb
) |
2932 CP_LOAD_STATE6_0_NUM_UNIT(link
->sampler_map
.num_desc
));
2933 tu_cs_emit_qw(&cs
, tex_samp
.iova
); /* SRC_ADDR_LO/HI */
2935 tu_cs_emit_pkt4(&cs
, tex_samp_reg
, 2);
2936 tu_cs_emit_qw(&cs
, tex_samp
.iova
); /* SRC_ADDR_LO/HI */
2939 /* emit texture state: */
2940 tu_cs_emit_pkt7(&cs
, tu6_stage2opcode(type
), 3);
2941 tu_cs_emit(&cs
, CP_LOAD_STATE6_0_DST_OFF(0) |
2942 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS
) |
2943 CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT
) |
2944 CP_LOAD_STATE6_0_STATE_BLOCK(sb
) |
2945 CP_LOAD_STATE6_0_NUM_UNIT(link
->texture_map
.num_desc
));
2946 tu_cs_emit_qw(&cs
, tex_const
.iova
); /* SRC_ADDR_LO/HI */
2948 tu_cs_emit_pkt4(&cs
, tex_const_reg
, 2);
2949 tu_cs_emit_qw(&cs
, tex_const
.iova
); /* SRC_ADDR_LO/HI */
2951 tu_cs_emit_pkt4(&cs
, tex_count_reg
, 1);
2952 tu_cs_emit(&cs
, link
->texture_map
.num_desc
);
2954 *entry
= tu_cs_end_sub_stream(draw_state
, &cs
);
2959 tu6_emit_ibo(struct tu_cmd_buffer
*cmd
,
2960 const struct tu_pipeline
*pipeline
,
2961 struct tu_descriptor_state
*descriptors_state
,
2962 gl_shader_stage type
,
2963 struct tu_cs_entry
*entry
)
2965 struct tu_device
*device
= cmd
->device
;
2966 struct tu_cs
*draw_state
= &cmd
->sub_cs
;
2967 const struct tu_program_descriptor_linkage
*link
=
2968 &pipeline
->program
.link
[type
];
2971 unsigned num_desc
= link
->ssbo_map
.num_desc
+ link
->image_map
.num_desc
;
2973 if (num_desc
== 0) {
2974 *entry
= (struct tu_cs_entry
) {};
2978 struct ts_cs_memory ibo_const
;
2979 result
= tu_cs_alloc(device
, draw_state
, num_desc
,
2980 A6XX_TEX_CONST_DWORDS
, &ibo_const
);
2981 if (result
!= VK_SUCCESS
)
2985 for (unsigned i
= 0; i
< link
->ssbo_map
.num
; i
++) {
2986 for (int j
= 0; j
< link
->ssbo_map
.array_size
[i
]; j
++) {
2987 uint32_t *dst
= &ibo_const
.map
[A6XX_TEX_CONST_DWORDS
* ssbo_index
];
2989 uint64_t va
= buffer_ptr(descriptors_state
, &link
->ssbo_map
, i
, j
);
2990 /* We don't expose robustBufferAccess, so leave the size unlimited. */
2991 uint32_t sz
= MAX_STORAGE_BUFFER_RANGE
/ 4;
2993 dst
[0] = A6XX_IBO_0_FMT(FMT6_32_UINT
);
2994 dst
[1] = A6XX_IBO_1_WIDTH(sz
& MASK(15)) |
2995 A6XX_IBO_1_HEIGHT(sz
>> 15);
2996 dst
[2] = A6XX_IBO_2_UNK4
|
2998 A6XX_IBO_2_TYPE(A6XX_TEX_1D
);
3002 for (int i
= 6; i
< A6XX_TEX_CONST_DWORDS
; i
++)
3009 for (unsigned i
= 0; i
< link
->image_map
.num
; i
++) {
3010 for (int j
= 0; j
< link
->image_map
.array_size
[i
]; j
++) {
3011 uint32_t *dst
= &ibo_const
.map
[A6XX_TEX_CONST_DWORDS
* ssbo_index
];
3013 write_image_ibo(cmd
, dst
,
3014 descriptors_state
, &link
->image_map
, i
, j
);
3020 assert(ssbo_index
== num_desc
);
3023 result
= tu_cs_begin_sub_stream(device
, draw_state
, 7, &cs
);
3024 if (result
!= VK_SUCCESS
)
3027 uint32_t opcode
, ibo_addr_reg
;
3028 enum a6xx_state_block sb
;
3029 enum a6xx_state_type st
;
3032 case MESA_SHADER_FRAGMENT
:
3033 opcode
= CP_LOAD_STATE6
;
3036 ibo_addr_reg
= REG_A6XX_SP_IBO_LO
;
3038 case MESA_SHADER_COMPUTE
:
3039 opcode
= CP_LOAD_STATE6_FRAG
;
3042 ibo_addr_reg
= REG_A6XX_SP_CS_IBO_LO
;
3045 unreachable("unsupported stage for ibos");
3048 /* emit texture state: */
3049 tu_cs_emit_pkt7(&cs
, opcode
, 3);
3050 tu_cs_emit(&cs
, CP_LOAD_STATE6_0_DST_OFF(0) |
3051 CP_LOAD_STATE6_0_STATE_TYPE(st
) |
3052 CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT
) |
3053 CP_LOAD_STATE6_0_STATE_BLOCK(sb
) |
3054 CP_LOAD_STATE6_0_NUM_UNIT(num_desc
));
3055 tu_cs_emit_qw(&cs
, ibo_const
.iova
); /* SRC_ADDR_LO/HI */
3057 tu_cs_emit_pkt4(&cs
, ibo_addr_reg
, 2);
3058 tu_cs_emit_qw(&cs
, ibo_const
.iova
); /* SRC_ADDR_LO/HI */
3060 *entry
= tu_cs_end_sub_stream(draw_state
, &cs
);
3064 struct PACKED bcolor_entry
{
3076 uint32_t z24
; /* also s8? */
3077 uint16_t srgb
[4]; /* appears to duplicate fp16[], but clamped, used for srgb */
3079 } border_color
[] = {
3080 [VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK
] = {},
3081 [VK_BORDER_COLOR_INT_TRANSPARENT_BLACK
] = {},
3082 [VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK
] = {
3083 .fp32
[3] = 0x3f800000,
3091 .rgb10a2
= 0xc0000000,
3094 [VK_BORDER_COLOR_INT_OPAQUE_BLACK
] = {
3098 [VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE
] = {
3099 .fp32
[0 ... 3] = 0x3f800000,
3100 .ui16
[0 ... 3] = 0xffff,
3101 .si16
[0 ... 3] = 0x7fff,
3102 .fp16
[0 ... 3] = 0x3c00,
3106 .ui8
[0 ... 3] = 0xff,
3107 .si8
[0 ... 3] = 0x7f,
3108 .rgb10a2
= 0xffffffff,
3110 .srgb
[0 ... 3] = 0x3c00,
3112 [VK_BORDER_COLOR_INT_OPAQUE_WHITE
] = {
3119 tu6_emit_border_color(struct tu_cmd_buffer
*cmd
,
3122 STATIC_ASSERT(sizeof(struct bcolor_entry
) == 128);
3124 const struct tu_pipeline
*pipeline
= cmd
->state
.pipeline
;
3125 struct tu_descriptor_state
*descriptors_state
=
3126 &cmd
->descriptors
[VK_PIPELINE_BIND_POINT_GRAPHICS
];
3127 const struct tu_descriptor_map
*vs_sampler
=
3128 &pipeline
->program
.link
[MESA_SHADER_VERTEX
].sampler_map
;
3129 const struct tu_descriptor_map
*fs_sampler
=
3130 &pipeline
->program
.link
[MESA_SHADER_FRAGMENT
].sampler_map
;
3131 struct ts_cs_memory ptr
;
3133 VkResult result
= tu_cs_alloc(cmd
->device
, &cmd
->sub_cs
,
3134 vs_sampler
->num_desc
+ fs_sampler
->num_desc
,
3137 if (result
!= VK_SUCCESS
)
3140 for (unsigned i
= 0; i
< vs_sampler
->num
; i
++) {
3141 for (unsigned j
= 0; j
< vs_sampler
->array_size
[i
]; j
++) {
3142 const struct tu_sampler
*sampler
= sampler_ptr(descriptors_state
,
3144 memcpy(ptr
.map
, &border_color
[sampler
->border
], 128);
3149 for (unsigned i
= 0; i
< fs_sampler
->num
; i
++) {
3150 for (unsigned j
= 0; j
< fs_sampler
->array_size
[i
]; j
++) {
3151 const struct tu_sampler
*sampler
= sampler_ptr(descriptors_state
,
3153 memcpy(ptr
.map
, &border_color
[sampler
->border
], 128);
3158 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_TP_BORDER_COLOR_BASE_ADDR_LO
, 2);
3159 tu_cs_emit_qw(cs
, ptr
.iova
);
3164 tu6_bind_draw_states(struct tu_cmd_buffer
*cmd
,
3166 const struct tu_draw_info
*draw
)
3168 const struct tu_pipeline
*pipeline
= cmd
->state
.pipeline
;
3169 const struct tu_dynamic_state
*dynamic
= &cmd
->state
.dynamic
;
3170 struct tu_draw_state_group draw_state_groups
[TU_DRAW_STATE_COUNT
];
3171 uint32_t draw_state_group_count
= 0;
3173 struct tu_descriptor_state
*descriptors_state
=
3174 &cmd
->descriptors
[VK_PIPELINE_BIND_POINT_GRAPHICS
];
3176 VkResult result
= tu_cs_reserve_space(cmd
->device
, cs
, 256);
3177 if (result
!= VK_SUCCESS
)
3182 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_UNKNOWN_9806
, 0);
3183 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_UNKNOWN_9990
, 0);
3184 tu_cs_emit_write_reg(cs
, REG_A6XX_VFD_UNKNOWN_A008
, 0);
3187 A6XX_PC_PRIMITIVE_CNTL_0(.primitive_restart
=
3188 pipeline
->ia
.primitive_restart
&& draw
->indexed
));
3190 if (cmd
->state
.dirty
&
3191 (TU_CMD_DIRTY_PIPELINE
| TU_CMD_DIRTY_DYNAMIC_LINE_WIDTH
) &&
3192 (pipeline
->dynamic_state
.mask
& TU_DYNAMIC_LINE_WIDTH
)) {
3193 tu6_emit_gras_su_cntl(cs
, pipeline
->rast
.gras_su_cntl
,
3194 dynamic
->line_width
);
3197 if ((cmd
->state
.dirty
& TU_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK
) &&
3198 (pipeline
->dynamic_state
.mask
& TU_DYNAMIC_STENCIL_COMPARE_MASK
)) {
3199 tu6_emit_stencil_compare_mask(cs
, dynamic
->stencil_compare_mask
.front
,
3200 dynamic
->stencil_compare_mask
.back
);
3203 if ((cmd
->state
.dirty
& TU_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK
) &&
3204 (pipeline
->dynamic_state
.mask
& TU_DYNAMIC_STENCIL_WRITE_MASK
)) {
3205 tu6_emit_stencil_write_mask(cs
, dynamic
->stencil_write_mask
.front
,
3206 dynamic
->stencil_write_mask
.back
);
3209 if ((cmd
->state
.dirty
& TU_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE
) &&
3210 (pipeline
->dynamic_state
.mask
& TU_DYNAMIC_STENCIL_REFERENCE
)) {
3211 tu6_emit_stencil_reference(cs
, dynamic
->stencil_reference
.front
,
3212 dynamic
->stencil_reference
.back
);
3215 if (cmd
->state
.dirty
&
3216 (TU_CMD_DIRTY_PIPELINE
| TU_CMD_DIRTY_VERTEX_BUFFERS
)) {
3217 for (uint32_t i
= 0; i
< pipeline
->vi
.count
; i
++) {
3218 const uint32_t binding
= pipeline
->vi
.bindings
[i
];
3219 const uint32_t stride
= pipeline
->vi
.strides
[i
];
3220 const struct tu_buffer
*buf
= cmd
->state
.vb
.buffers
[binding
];
3221 const VkDeviceSize offset
= buf
->bo_offset
+
3222 cmd
->state
.vb
.offsets
[binding
] +
3223 pipeline
->vi
.offsets
[i
];
3224 const VkDeviceSize size
=
3225 offset
< buf
->bo
->size
? buf
->bo
->size
- offset
: 0;
3228 A6XX_VFD_FETCH_BASE(i
, .bo
= buf
->bo
, .bo_offset
= offset
),
3229 A6XX_VFD_FETCH_SIZE(i
, size
),
3230 A6XX_VFD_FETCH_STRIDE(i
, stride
));
3234 if (cmd
->state
.dirty
& TU_CMD_DIRTY_PIPELINE
) {
3235 draw_state_groups
[draw_state_group_count
++] =
3236 (struct tu_draw_state_group
) {
3237 .id
= TU_DRAW_STATE_PROGRAM
,
3238 .enable_mask
= ENABLE_DRAW
,
3239 .ib
= pipeline
->program
.state_ib
,
3241 draw_state_groups
[draw_state_group_count
++] =
3242 (struct tu_draw_state_group
) {
3243 .id
= TU_DRAW_STATE_PROGRAM_BINNING
,
3244 .enable_mask
= CP_SET_DRAW_STATE__0_BINNING
,
3245 .ib
= pipeline
->program
.binning_state_ib
,
3247 draw_state_groups
[draw_state_group_count
++] =
3248 (struct tu_draw_state_group
) {
3249 .id
= TU_DRAW_STATE_VI
,
3250 .enable_mask
= ENABLE_DRAW
,
3251 .ib
= pipeline
->vi
.state_ib
,
3253 draw_state_groups
[draw_state_group_count
++] =
3254 (struct tu_draw_state_group
) {
3255 .id
= TU_DRAW_STATE_VI_BINNING
,
3256 .enable_mask
= CP_SET_DRAW_STATE__0_BINNING
,
3257 .ib
= pipeline
->vi
.binning_state_ib
,
3259 draw_state_groups
[draw_state_group_count
++] =
3260 (struct tu_draw_state_group
) {
3261 .id
= TU_DRAW_STATE_VP
,
3262 .enable_mask
= ENABLE_ALL
,
3263 .ib
= pipeline
->vp
.state_ib
,
3265 draw_state_groups
[draw_state_group_count
++] =
3266 (struct tu_draw_state_group
) {
3267 .id
= TU_DRAW_STATE_RAST
,
3268 .enable_mask
= ENABLE_ALL
,
3269 .ib
= pipeline
->rast
.state_ib
,
3271 draw_state_groups
[draw_state_group_count
++] =
3272 (struct tu_draw_state_group
) {
3273 .id
= TU_DRAW_STATE_DS
,
3274 .enable_mask
= ENABLE_ALL
,
3275 .ib
= pipeline
->ds
.state_ib
,
3277 draw_state_groups
[draw_state_group_count
++] =
3278 (struct tu_draw_state_group
) {
3279 .id
= TU_DRAW_STATE_BLEND
,
3280 .enable_mask
= ENABLE_ALL
,
3281 .ib
= pipeline
->blend
.state_ib
,
3285 if (cmd
->state
.dirty
&
3286 (TU_CMD_DIRTY_PIPELINE
| TU_CMD_DIRTY_DESCRIPTOR_SETS
| TU_CMD_DIRTY_PUSH_CONSTANTS
)) {
3287 draw_state_groups
[draw_state_group_count
++] =
3288 (struct tu_draw_state_group
) {
3289 .id
= TU_DRAW_STATE_VS_CONST
,
3290 .enable_mask
= ENABLE_ALL
,
3291 .ib
= tu6_emit_consts(cmd
, pipeline
, descriptors_state
, MESA_SHADER_VERTEX
)
3293 draw_state_groups
[draw_state_group_count
++] =
3294 (struct tu_draw_state_group
) {
3295 .id
= TU_DRAW_STATE_FS_CONST
,
3296 .enable_mask
= ENABLE_DRAW
,
3297 .ib
= tu6_emit_consts(cmd
, pipeline
, descriptors_state
, MESA_SHADER_FRAGMENT
)
3301 if (cmd
->state
.dirty
&
3302 (TU_CMD_DIRTY_PIPELINE
| TU_CMD_DIRTY_DESCRIPTOR_SETS
)) {
3303 bool needs_border
= false;
3304 struct tu_cs_entry vs_tex
, fs_tex
, fs_ibo
;
3306 result
= tu6_emit_textures(cmd
, pipeline
, descriptors_state
,
3307 MESA_SHADER_VERTEX
, &vs_tex
, &needs_border
);
3308 if (result
!= VK_SUCCESS
)
3311 result
= tu6_emit_textures(cmd
, pipeline
, descriptors_state
,
3312 MESA_SHADER_FRAGMENT
, &fs_tex
, &needs_border
);
3313 if (result
!= VK_SUCCESS
)
3316 result
= tu6_emit_ibo(cmd
, pipeline
, descriptors_state
,
3317 MESA_SHADER_FRAGMENT
, &fs_ibo
);
3318 if (result
!= VK_SUCCESS
)
3321 draw_state_groups
[draw_state_group_count
++] =
3322 (struct tu_draw_state_group
) {
3323 .id
= TU_DRAW_STATE_VS_TEX
,
3324 .enable_mask
= ENABLE_ALL
,
3327 draw_state_groups
[draw_state_group_count
++] =
3328 (struct tu_draw_state_group
) {
3329 .id
= TU_DRAW_STATE_FS_TEX
,
3330 .enable_mask
= ENABLE_DRAW
,
3333 draw_state_groups
[draw_state_group_count
++] =
3334 (struct tu_draw_state_group
) {
3335 .id
= TU_DRAW_STATE_FS_IBO
,
3336 .enable_mask
= ENABLE_DRAW
,
3341 result
= tu6_emit_border_color(cmd
, cs
);
3342 if (result
!= VK_SUCCESS
)
3347 struct tu_cs_entry vs_params
;
3348 result
= tu6_emit_vs_params(cmd
, draw
, &vs_params
);
3349 if (result
!= VK_SUCCESS
)
3352 draw_state_groups
[draw_state_group_count
++] =
3353 (struct tu_draw_state_group
) {
3354 .id
= TU_DRAW_STATE_VS_PARAMS
,
3355 .enable_mask
= ENABLE_ALL
,
3359 tu_cs_emit_pkt7(cs
, CP_SET_DRAW_STATE
, 3 * draw_state_group_count
);
3360 for (uint32_t i
= 0; i
< draw_state_group_count
; i
++) {
3361 const struct tu_draw_state_group
*group
= &draw_state_groups
[i
];
3362 debug_assert((group
->enable_mask
& ~ENABLE_ALL
) == 0);
3363 uint32_t cp_set_draw_state
=
3364 CP_SET_DRAW_STATE__0_COUNT(group
->ib
.size
/ 4) |
3365 group
->enable_mask
|
3366 CP_SET_DRAW_STATE__0_GROUP_ID(group
->id
);
3368 if (group
->ib
.size
) {
3369 iova
= group
->ib
.bo
->iova
+ group
->ib
.offset
;
3371 cp_set_draw_state
|= CP_SET_DRAW_STATE__0_DISABLE
;
3375 tu_cs_emit(cs
, cp_set_draw_state
);
3376 tu_cs_emit_qw(cs
, iova
);
3379 tu_cs_sanity_check(cs
);
3382 if (cmd
->state
.dirty
& TU_CMD_DIRTY_VERTEX_BUFFERS
) {
3383 for (uint32_t i
= 0; i
< MAX_VBS
; i
++) {
3384 const struct tu_buffer
*buf
= cmd
->state
.vb
.buffers
[i
];
3386 tu_bo_list_add(&cmd
->bo_list
, buf
->bo
, MSM_SUBMIT_BO_READ
);
3389 if (cmd
->state
.dirty
& TU_CMD_DIRTY_DESCRIPTOR_SETS
) {
3391 for_each_bit(i
, descriptors_state
->valid
) {
3392 struct tu_descriptor_set
*set
= descriptors_state
->sets
[i
];
3393 for (unsigned j
= 0; j
< set
->layout
->buffer_count
; ++j
)
3394 if (set
->descriptors
[j
]) {
3395 tu_bo_list_add(&cmd
->bo_list
, set
->descriptors
[j
],
3396 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_WRITE
);
3401 /* Fragment shader state overwrites compute shader state, so flag the
3402 * compute pipeline for re-emit.
3404 cmd
->state
.dirty
= TU_CMD_DIRTY_COMPUTE_PIPELINE
;
3409 tu6_emit_draw_direct(struct tu_cmd_buffer
*cmd
,
3411 const struct tu_draw_info
*draw
)
3414 const enum pc_di_primtype primtype
= cmd
->state
.pipeline
->ia
.primtype
;
3417 A6XX_VFD_INDEX_OFFSET(draw
->vertex_offset
),
3418 A6XX_VFD_INSTANCE_START_OFFSET(draw
->first_instance
));
3420 /* TODO hw binning */
3421 if (draw
->indexed
) {
3422 const enum a4xx_index_size index_size
=
3423 tu6_index_size(cmd
->state
.index_type
);
3424 const uint32_t index_bytes
=
3425 (cmd
->state
.index_type
== VK_INDEX_TYPE_UINT32
) ? 4 : 2;
3426 const struct tu_buffer
*buf
= cmd
->state
.index_buffer
;
3427 const VkDeviceSize offset
= buf
->bo_offset
+ cmd
->state
.index_offset
+
3428 index_bytes
* draw
->first_index
;
3429 const uint32_t size
= index_bytes
* draw
->count
;
3431 const uint32_t cp_draw_indx
=
3432 CP_DRAW_INDX_OFFSET_0_PRIM_TYPE(primtype
) |
3433 CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(DI_SRC_SEL_DMA
) |
3434 CP_DRAW_INDX_OFFSET_0_INDEX_SIZE(index_size
) |
3435 CP_DRAW_INDX_OFFSET_0_VIS_CULL(USE_VISIBILITY
) | 0x2000;
3437 tu_cs_emit_pkt7(cs
, CP_DRAW_INDX_OFFSET
, 7);
3438 tu_cs_emit(cs
, cp_draw_indx
);
3439 tu_cs_emit(cs
, draw
->instance_count
);
3440 tu_cs_emit(cs
, draw
->count
);
3441 tu_cs_emit(cs
, 0x0); /* XXX */
3442 tu_cs_emit_qw(cs
, buf
->bo
->iova
+ offset
);
3443 tu_cs_emit(cs
, size
);
3445 const uint32_t cp_draw_indx
=
3446 CP_DRAW_INDX_OFFSET_0_PRIM_TYPE(primtype
) |
3447 CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(DI_SRC_SEL_AUTO_INDEX
) |
3448 CP_DRAW_INDX_OFFSET_0_VIS_CULL(USE_VISIBILITY
) | 0x2000;
3450 tu_cs_emit_pkt7(cs
, CP_DRAW_INDX_OFFSET
, 3);
3451 tu_cs_emit(cs
, cp_draw_indx
);
3452 tu_cs_emit(cs
, draw
->instance_count
);
3453 tu_cs_emit(cs
, draw
->count
);
3458 tu_draw(struct tu_cmd_buffer
*cmd
, const struct tu_draw_info
*draw
)
3460 struct tu_cs
*cs
= &cmd
->draw_cs
;
3463 result
= tu6_bind_draw_states(cmd
, cs
, draw
);
3464 if (result
!= VK_SUCCESS
) {
3465 cmd
->record_result
= result
;
3469 result
= tu_cs_reserve_space(cmd
->device
, cs
, 32);
3470 if (result
!= VK_SUCCESS
) {
3471 cmd
->record_result
= result
;
3475 if (draw
->indirect
) {
3476 tu_finishme("indirect draw");
3480 /* TODO tu6_emit_marker should pick different regs depending on cs */
3482 tu6_emit_marker(cmd
, cs
);
3483 tu6_emit_draw_direct(cmd
, cs
, draw
);
3484 tu6_emit_marker(cmd
, cs
);
3486 cmd
->wait_for_idle
= true;
3488 tu_cs_sanity_check(cs
);
3492 tu_CmdDraw(VkCommandBuffer commandBuffer
,
3493 uint32_t vertexCount
,
3494 uint32_t instanceCount
,
3495 uint32_t firstVertex
,
3496 uint32_t firstInstance
)
3498 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
3499 struct tu_draw_info info
= {};
3501 info
.count
= vertexCount
;
3502 info
.instance_count
= instanceCount
;
3503 info
.first_instance
= firstInstance
;
3504 info
.vertex_offset
= firstVertex
;
3506 tu_draw(cmd_buffer
, &info
);
3510 tu_CmdDrawIndexed(VkCommandBuffer commandBuffer
,
3511 uint32_t indexCount
,
3512 uint32_t instanceCount
,
3513 uint32_t firstIndex
,
3514 int32_t vertexOffset
,
3515 uint32_t firstInstance
)
3517 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
3518 struct tu_draw_info info
= {};
3520 info
.indexed
= true;
3521 info
.count
= indexCount
;
3522 info
.instance_count
= instanceCount
;
3523 info
.first_index
= firstIndex
;
3524 info
.vertex_offset
= vertexOffset
;
3525 info
.first_instance
= firstInstance
;
3527 tu_draw(cmd_buffer
, &info
);
3531 tu_CmdDrawIndirect(VkCommandBuffer commandBuffer
,
3533 VkDeviceSize offset
,
3537 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
3538 TU_FROM_HANDLE(tu_buffer
, buffer
, _buffer
);
3539 struct tu_draw_info info
= {};
3541 info
.count
= drawCount
;
3542 info
.indirect
= buffer
;
3543 info
.indirect_offset
= offset
;
3544 info
.stride
= stride
;
3546 tu_draw(cmd_buffer
, &info
);
3550 tu_CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer
,
3552 VkDeviceSize offset
,
3556 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
3557 TU_FROM_HANDLE(tu_buffer
, buffer
, _buffer
);
3558 struct tu_draw_info info
= {};
3560 info
.indexed
= true;
3561 info
.count
= drawCount
;
3562 info
.indirect
= buffer
;
3563 info
.indirect_offset
= offset
;
3564 info
.stride
= stride
;
3566 tu_draw(cmd_buffer
, &info
);
3569 struct tu_dispatch_info
3572 * Determine the layout of the grid (in block units) to be used.
3577 * A starting offset for the grid. If unaligned is set, the offset
3578 * must still be aligned.
3580 uint32_t offsets
[3];
3582 * Whether it's an unaligned compute dispatch.
3587 * Indirect compute parameters resource.
3589 struct tu_buffer
*indirect
;
3590 uint64_t indirect_offset
;
3594 tu_emit_compute_driver_params(struct tu_cs
*cs
, struct tu_pipeline
*pipeline
,
3595 const struct tu_dispatch_info
*info
)
3597 gl_shader_stage type
= MESA_SHADER_COMPUTE
;
3598 const struct tu_program_descriptor_linkage
*link
=
3599 &pipeline
->program
.link
[type
];
3600 const struct ir3_const_state
*const_state
= &link
->const_state
;
3601 uint32_t offset
= const_state
->offsets
.driver_param
;
3603 if (link
->constlen
<= offset
)
3606 if (!info
->indirect
) {
3607 uint32_t driver_params
[IR3_DP_CS_COUNT
] = {
3608 [IR3_DP_NUM_WORK_GROUPS_X
] = info
->blocks
[0],
3609 [IR3_DP_NUM_WORK_GROUPS_Y
] = info
->blocks
[1],
3610 [IR3_DP_NUM_WORK_GROUPS_Z
] = info
->blocks
[2],
3611 [IR3_DP_LOCAL_GROUP_SIZE_X
] = pipeline
->compute
.local_size
[0],
3612 [IR3_DP_LOCAL_GROUP_SIZE_Y
] = pipeline
->compute
.local_size
[1],
3613 [IR3_DP_LOCAL_GROUP_SIZE_Z
] = pipeline
->compute
.local_size
[2],
3616 uint32_t num_consts
= MIN2(const_state
->num_driver_params
,
3617 (link
->constlen
- offset
) * 4);
3618 /* push constants */
3619 tu_cs_emit_pkt7(cs
, tu6_stage2opcode(type
), 3 + num_consts
);
3620 tu_cs_emit(cs
, CP_LOAD_STATE6_0_DST_OFF(offset
) |
3621 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS
) |
3622 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT
) |
3623 CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type
)) |
3624 CP_LOAD_STATE6_0_NUM_UNIT(num_consts
/ 4));
3628 for (i
= 0; i
< num_consts
; i
++)
3629 tu_cs_emit(cs
, driver_params
[i
]);
3631 tu_finishme("Indirect driver params");
3636 tu_dispatch(struct tu_cmd_buffer
*cmd
,
3637 const struct tu_dispatch_info
*info
)
3639 struct tu_cs
*cs
= &cmd
->cs
;
3640 struct tu_pipeline
*pipeline
= cmd
->state
.compute_pipeline
;
3641 struct tu_descriptor_state
*descriptors_state
=
3642 &cmd
->descriptors
[VK_PIPELINE_BIND_POINT_COMPUTE
];
3644 VkResult result
= tu_cs_reserve_space(cmd
->device
, cs
, 256);
3645 if (result
!= VK_SUCCESS
) {
3646 cmd
->record_result
= result
;
3650 if (cmd
->state
.dirty
& TU_CMD_DIRTY_COMPUTE_PIPELINE
)
3651 tu_cs_emit_ib(cs
, &pipeline
->program
.state_ib
);
3653 struct tu_cs_entry ib
;
3655 ib
= tu6_emit_consts(cmd
, pipeline
, descriptors_state
, MESA_SHADER_COMPUTE
);
3657 tu_cs_emit_ib(cs
, &ib
);
3659 tu_emit_compute_driver_params(cs
, pipeline
, info
);
3662 result
= tu6_emit_textures(cmd
, pipeline
, descriptors_state
,
3663 MESA_SHADER_COMPUTE
, &ib
, &needs_border
);
3664 if (result
!= VK_SUCCESS
) {
3665 cmd
->record_result
= result
;
3670 tu_cs_emit_ib(cs
, &ib
);
3673 tu_finishme("compute border color");
3675 result
= tu6_emit_ibo(cmd
, pipeline
, descriptors_state
, MESA_SHADER_COMPUTE
, &ib
);
3676 if (result
!= VK_SUCCESS
) {
3677 cmd
->record_result
= result
;
3682 tu_cs_emit_ib(cs
, &ib
);
3685 if (cmd
->state
.dirty
& TU_CMD_DIRTY_DESCRIPTOR_SETS
) {
3687 for_each_bit(i
, descriptors_state
->valid
) {
3688 struct tu_descriptor_set
*set
= descriptors_state
->sets
[i
];
3689 for (unsigned j
= 0; j
< set
->layout
->buffer_count
; ++j
)
3690 if (set
->descriptors
[j
]) {
3691 tu_bo_list_add(&cmd
->bo_list
, set
->descriptors
[j
],
3692 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_WRITE
);
3697 /* Compute shader state overwrites fragment shader state, so we flag the
3698 * graphics pipeline for re-emit.
3700 cmd
->state
.dirty
= TU_CMD_DIRTY_PIPELINE
;
3702 tu_cs_emit_pkt7(cs
, CP_SET_MARKER
, 1);
3703 tu_cs_emit(cs
, A6XX_CP_SET_MARKER_0_MODE(0x8));
3705 const uint32_t *local_size
= pipeline
->compute
.local_size
;
3706 const uint32_t *num_groups
= info
->blocks
;
3708 A6XX_HLSQ_CS_NDRANGE_0(.kerneldim
= 3,
3709 .localsizex
= local_size
[0] - 1,
3710 .localsizey
= local_size
[1] - 1,
3711 .localsizez
= local_size
[2] - 1),
3712 A6XX_HLSQ_CS_NDRANGE_1(.globalsize_x
= local_size
[0] * num_groups
[0]),
3713 A6XX_HLSQ_CS_NDRANGE_2(.globaloff_x
= 0),
3714 A6XX_HLSQ_CS_NDRANGE_3(.globalsize_y
= local_size
[1] * num_groups
[1]),
3715 A6XX_HLSQ_CS_NDRANGE_4(.globaloff_y
= 0),
3716 A6XX_HLSQ_CS_NDRANGE_5(.globalsize_z
= local_size
[2] * num_groups
[2]),
3717 A6XX_HLSQ_CS_NDRANGE_6(.globaloff_z
= 0));
3720 A6XX_HLSQ_CS_KERNEL_GROUP_X(1),
3721 A6XX_HLSQ_CS_KERNEL_GROUP_Y(1),
3722 A6XX_HLSQ_CS_KERNEL_GROUP_Z(1));
3724 if (info
->indirect
) {
3725 uint64_t iova
= tu_buffer_iova(info
->indirect
) + info
->indirect_offset
;
3727 tu_bo_list_add(&cmd
->bo_list
, info
->indirect
->bo
,
3728 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_WRITE
);
3730 tu_cs_emit_pkt7(cs
, CP_EXEC_CS_INDIRECT
, 4);
3731 tu_cs_emit(cs
, 0x00000000);
3732 tu_cs_emit_qw(cs
, iova
);
3734 A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEX(local_size
[0] - 1) |
3735 A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEY(local_size
[1] - 1) |
3736 A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEZ(local_size
[2] - 1));
3738 tu_cs_emit_pkt7(cs
, CP_EXEC_CS
, 4);
3739 tu_cs_emit(cs
, 0x00000000);
3740 tu_cs_emit(cs
, CP_EXEC_CS_1_NGROUPS_X(info
->blocks
[0]));
3741 tu_cs_emit(cs
, CP_EXEC_CS_2_NGROUPS_Y(info
->blocks
[1]));
3742 tu_cs_emit(cs
, CP_EXEC_CS_3_NGROUPS_Z(info
->blocks
[2]));
3747 tu6_emit_cache_flush(cmd
, cs
);
3751 tu_CmdDispatchBase(VkCommandBuffer commandBuffer
,
3759 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
3760 struct tu_dispatch_info info
= {};
3766 info
.offsets
[0] = base_x
;
3767 info
.offsets
[1] = base_y
;
3768 info
.offsets
[2] = base_z
;
3769 tu_dispatch(cmd_buffer
, &info
);
3773 tu_CmdDispatch(VkCommandBuffer commandBuffer
,
3778 tu_CmdDispatchBase(commandBuffer
, 0, 0, 0, x
, y
, z
);
3782 tu_CmdDispatchIndirect(VkCommandBuffer commandBuffer
,
3784 VkDeviceSize offset
)
3786 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
3787 TU_FROM_HANDLE(tu_buffer
, buffer
, _buffer
);
3788 struct tu_dispatch_info info
= {};
3790 info
.indirect
= buffer
;
3791 info
.indirect_offset
= offset
;
3793 tu_dispatch(cmd_buffer
, &info
);
3797 tu_CmdEndRenderPass(VkCommandBuffer commandBuffer
)
3799 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
3801 tu_cs_end(&cmd_buffer
->draw_cs
);
3802 tu_cs_end(&cmd_buffer
->draw_epilogue_cs
);
3804 tu_cmd_render_tiles(cmd_buffer
);
3806 /* discard draw_cs and draw_epilogue_cs entries now that the tiles are
3808 tu_cs_discard_entries(&cmd_buffer
->draw_cs
);
3809 tu_cs_begin(&cmd_buffer
->draw_cs
);
3810 tu_cs_discard_entries(&cmd_buffer
->draw_epilogue_cs
);
3811 tu_cs_begin(&cmd_buffer
->draw_epilogue_cs
);
3813 cmd_buffer
->state
.pass
= NULL
;
3814 cmd_buffer
->state
.subpass
= NULL
;
3815 cmd_buffer
->state
.framebuffer
= NULL
;
3819 tu_CmdEndRenderPass2(VkCommandBuffer commandBuffer
,
3820 const VkSubpassEndInfoKHR
*pSubpassEndInfo
)
3822 tu_CmdEndRenderPass(commandBuffer
);
3825 struct tu_barrier_info
3827 uint32_t eventCount
;
3828 const VkEvent
*pEvents
;
3829 VkPipelineStageFlags srcStageMask
;
3833 tu_barrier(struct tu_cmd_buffer
*cmd_buffer
,
3834 uint32_t memoryBarrierCount
,
3835 const VkMemoryBarrier
*pMemoryBarriers
,
3836 uint32_t bufferMemoryBarrierCount
,
3837 const VkBufferMemoryBarrier
*pBufferMemoryBarriers
,
3838 uint32_t imageMemoryBarrierCount
,
3839 const VkImageMemoryBarrier
*pImageMemoryBarriers
,
3840 const struct tu_barrier_info
*info
)
3845 tu_CmdPipelineBarrier(VkCommandBuffer commandBuffer
,
3846 VkPipelineStageFlags srcStageMask
,
3847 VkPipelineStageFlags destStageMask
,
3849 uint32_t memoryBarrierCount
,
3850 const VkMemoryBarrier
*pMemoryBarriers
,
3851 uint32_t bufferMemoryBarrierCount
,
3852 const VkBufferMemoryBarrier
*pBufferMemoryBarriers
,
3853 uint32_t imageMemoryBarrierCount
,
3854 const VkImageMemoryBarrier
*pImageMemoryBarriers
)
3856 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
3857 struct tu_barrier_info info
;
3859 info
.eventCount
= 0;
3860 info
.pEvents
= NULL
;
3861 info
.srcStageMask
= srcStageMask
;
3863 tu_barrier(cmd_buffer
, memoryBarrierCount
, pMemoryBarriers
,
3864 bufferMemoryBarrierCount
, pBufferMemoryBarriers
,
3865 imageMemoryBarrierCount
, pImageMemoryBarriers
, &info
);
3869 write_event(struct tu_cmd_buffer
*cmd
, struct tu_event
*event
, unsigned value
)
3871 struct tu_cs
*cs
= &cmd
->cs
;
3873 VkResult result
= tu_cs_reserve_space(cmd
->device
, cs
, 4);
3874 if (result
!= VK_SUCCESS
) {
3875 cmd
->record_result
= result
;
3879 tu_bo_list_add(&cmd
->bo_list
, &event
->bo
, MSM_SUBMIT_BO_WRITE
);
3881 /* TODO: any flush required before/after ? */
3883 tu_cs_emit_pkt7(cs
, CP_MEM_WRITE
, 3);
3884 tu_cs_emit_qw(cs
, event
->bo
.iova
); /* ADDR_LO/HI */
3885 tu_cs_emit(cs
, value
);
3889 tu_CmdSetEvent(VkCommandBuffer commandBuffer
,
3891 VkPipelineStageFlags stageMask
)
3893 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
3894 TU_FROM_HANDLE(tu_event
, event
, _event
);
3896 write_event(cmd
, event
, 1);
3900 tu_CmdResetEvent(VkCommandBuffer commandBuffer
,
3902 VkPipelineStageFlags stageMask
)
3904 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
3905 TU_FROM_HANDLE(tu_event
, event
, _event
);
3907 write_event(cmd
, event
, 0);
3911 tu_CmdWaitEvents(VkCommandBuffer commandBuffer
,
3912 uint32_t eventCount
,
3913 const VkEvent
*pEvents
,
3914 VkPipelineStageFlags srcStageMask
,
3915 VkPipelineStageFlags dstStageMask
,
3916 uint32_t memoryBarrierCount
,
3917 const VkMemoryBarrier
*pMemoryBarriers
,
3918 uint32_t bufferMemoryBarrierCount
,
3919 const VkBufferMemoryBarrier
*pBufferMemoryBarriers
,
3920 uint32_t imageMemoryBarrierCount
,
3921 const VkImageMemoryBarrier
*pImageMemoryBarriers
)
3923 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
3924 struct tu_cs
*cs
= &cmd
->cs
;
3926 VkResult result
= tu_cs_reserve_space(cmd
->device
, cs
, eventCount
* 7);
3927 if (result
!= VK_SUCCESS
) {
3928 cmd
->record_result
= result
;
3932 /* TODO: any flush required before/after? (CP_WAIT_FOR_ME?) */
3934 for (uint32_t i
= 0; i
< eventCount
; i
++) {
3935 const struct tu_event
*event
= (const struct tu_event
*) pEvents
[i
];
3937 tu_bo_list_add(&cmd
->bo_list
, &event
->bo
, MSM_SUBMIT_BO_READ
);
3939 tu_cs_emit_pkt7(cs
, CP_WAIT_REG_MEM
, 6);
3940 tu_cs_emit(cs
, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ
) |
3941 CP_WAIT_REG_MEM_0_POLL_MEMORY
);
3942 tu_cs_emit_qw(cs
, event
->bo
.iova
); /* POLL_ADDR_LO/HI */
3943 tu_cs_emit(cs
, CP_WAIT_REG_MEM_3_REF(1));
3944 tu_cs_emit(cs
, CP_WAIT_REG_MEM_4_MASK(~0u));
3945 tu_cs_emit(cs
, CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(20));
3950 tu_CmdSetDeviceMask(VkCommandBuffer commandBuffer
, uint32_t deviceMask
)