2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
28 #include "tu_private.h"
30 #include "registers/adreno_pm4.xml.h"
31 #include "registers/adreno_common.xml.h"
33 #include "vk_format.h"
38 #define OVERFLOW_FLAG_REG REG_A6XX_CP_SCRATCH_REG(0)
41 tu_bo_list_init(struct tu_bo_list
*list
)
43 list
->count
= list
->capacity
= 0;
44 list
->bo_infos
= NULL
;
48 tu_bo_list_destroy(struct tu_bo_list
*list
)
54 tu_bo_list_reset(struct tu_bo_list
*list
)
60 * \a flags consists of MSM_SUBMIT_BO_FLAGS.
63 tu_bo_list_add_info(struct tu_bo_list
*list
,
64 const struct drm_msm_gem_submit_bo
*bo_info
)
66 assert(bo_info
->handle
!= 0);
68 for (uint32_t i
= 0; i
< list
->count
; ++i
) {
69 if (list
->bo_infos
[i
].handle
== bo_info
->handle
) {
70 assert(list
->bo_infos
[i
].presumed
== bo_info
->presumed
);
71 list
->bo_infos
[i
].flags
|= bo_info
->flags
;
76 /* grow list->bo_infos if needed */
77 if (list
->count
== list
->capacity
) {
78 uint32_t new_capacity
= MAX2(2 * list
->count
, 16);
79 struct drm_msm_gem_submit_bo
*new_bo_infos
= realloc(
80 list
->bo_infos
, new_capacity
* sizeof(struct drm_msm_gem_submit_bo
));
82 return TU_BO_LIST_FAILED
;
83 list
->bo_infos
= new_bo_infos
;
84 list
->capacity
= new_capacity
;
87 list
->bo_infos
[list
->count
] = *bo_info
;
92 tu_bo_list_add(struct tu_bo_list
*list
,
93 const struct tu_bo
*bo
,
96 return tu_bo_list_add_info(list
, &(struct drm_msm_gem_submit_bo
) {
98 .handle
= bo
->gem_handle
,
104 tu_bo_list_merge(struct tu_bo_list
*list
, const struct tu_bo_list
*other
)
106 for (uint32_t i
= 0; i
< other
->count
; i
++) {
107 if (tu_bo_list_add_info(list
, other
->bo_infos
+ i
) == TU_BO_LIST_FAILED
)
108 return VK_ERROR_OUT_OF_HOST_MEMORY
;
115 is_linear_mipmapped(const struct tu_image_view
*iview
)
117 return iview
->image
->layout
.tile_mode
== TILE6_LINEAR
&&
118 iview
->base_mip
!= iview
->image
->level_count
- 1;
122 force_sysmem(const struct tu_cmd_buffer
*cmd
,
123 const struct VkRect2D
*render_area
)
125 const struct tu_framebuffer
*fb
= cmd
->state
.framebuffer
;
126 const struct tu_physical_device
*device
= cmd
->device
->physical_device
;
127 bool has_linear_mipmapped_store
= false;
128 const struct tu_render_pass
*pass
= cmd
->state
.pass
;
130 /* Layered rendering requires sysmem. */
134 /* Iterate over all the places we call tu6_emit_store_attachment() */
135 for (unsigned i
= 0; i
< pass
->subpass_count
; i
++) {
136 const struct tu_subpass
*subpass
= &pass
->subpasses
[i
];
137 if (subpass
->resolve_attachments
) {
138 for (unsigned i
= 0; i
< subpass
->color_count
; i
++) {
139 uint32_t a
= subpass
->resolve_attachments
[i
].attachment
;
140 if (a
!= VK_ATTACHMENT_UNUSED
&&
141 cmd
->state
.pass
->attachments
[a
].store_op
== VK_ATTACHMENT_STORE_OP_STORE
) {
142 const struct tu_image_view
*iview
= fb
->attachments
[a
].attachment
;
143 if (is_linear_mipmapped(iview
)) {
144 has_linear_mipmapped_store
= true;
152 for (unsigned i
= 0; i
< pass
->attachment_count
; i
++) {
153 if (pass
->attachments
[i
].gmem_offset
>= 0 &&
154 cmd
->state
.pass
->attachments
[i
].store_op
== VK_ATTACHMENT_STORE_OP_STORE
) {
155 const struct tu_image_view
*iview
= fb
->attachments
[i
].attachment
;
156 if (is_linear_mipmapped(iview
)) {
157 has_linear_mipmapped_store
= true;
163 /* Linear textures cannot have any padding between mipmap levels and their
164 * height isn't padded, while at the same time the GMEM->MEM resolve does
165 * not have per-pixel granularity, so if the image height isn't aligned to
166 * the resolve granularity and the render area is tall enough, we may wind
167 * up writing past the bottom of the image into the next miplevel or even
168 * past the end of the image. For the last miplevel, the layout code should
169 * insert enough padding so that the overdraw writes to the padding. To
170 * work around this, we force-enable sysmem rendering.
172 const uint32_t y2
= render_area
->offset
.y
+ render_area
->extent
.height
;
173 const uint32_t aligned_y2
= ALIGN_POT(y2
, device
->tile_align_h
);
175 return has_linear_mipmapped_store
&& aligned_y2
> fb
->height
;
179 tu_tiling_config_update_tile_layout(struct tu_tiling_config
*tiling
,
180 const struct tu_device
*dev
,
183 const uint32_t tile_align_w
= dev
->physical_device
->tile_align_w
;
184 const uint32_t tile_align_h
= dev
->physical_device
->tile_align_h
;
185 const uint32_t max_tile_width
= 1024; /* A6xx */
187 /* note: don't offset the tiling config by render_area.offset,
188 * because binning pass can't deal with it
189 * this means we might end up with more tiles than necessary,
190 * but load/store/etc are still scissored to the render_area
192 tiling
->tile0
.offset
= (VkOffset2D
) {};
194 const uint32_t ra_width
=
195 tiling
->render_area
.extent
.width
+
196 (tiling
->render_area
.offset
.x
- tiling
->tile0
.offset
.x
);
197 const uint32_t ra_height
=
198 tiling
->render_area
.extent
.height
+
199 (tiling
->render_area
.offset
.y
- tiling
->tile0
.offset
.y
);
201 /* start from 1 tile */
202 tiling
->tile_count
= (VkExtent2D
) {
206 tiling
->tile0
.extent
= (VkExtent2D
) {
207 .width
= align(ra_width
, tile_align_w
),
208 .height
= align(ra_height
, tile_align_h
),
211 if (unlikely(dev
->physical_device
->instance
->debug_flags
& TU_DEBUG_FORCEBIN
)) {
212 /* start with 2x2 tiles */
213 tiling
->tile_count
.width
= 2;
214 tiling
->tile_count
.height
= 2;
215 tiling
->tile0
.extent
.width
= align(DIV_ROUND_UP(ra_width
, 2), tile_align_w
);
216 tiling
->tile0
.extent
.height
= align(DIV_ROUND_UP(ra_height
, 2), tile_align_h
);
219 /* do not exceed max tile width */
220 while (tiling
->tile0
.extent
.width
> max_tile_width
) {
221 tiling
->tile_count
.width
++;
222 tiling
->tile0
.extent
.width
=
223 align(DIV_ROUND_UP(ra_width
, tiling
->tile_count
.width
), tile_align_w
);
226 /* will force to sysmem, don't bother trying to have a valid tile config
227 * TODO: just skip all GMEM stuff when sysmem is forced?
232 /* do not exceed gmem size */
233 while (tiling
->tile0
.extent
.width
* tiling
->tile0
.extent
.height
> pixels
) {
234 if (tiling
->tile0
.extent
.width
> MAX2(tile_align_w
, tiling
->tile0
.extent
.height
)) {
235 tiling
->tile_count
.width
++;
236 tiling
->tile0
.extent
.width
=
237 align(DIV_ROUND_UP(ra_width
, tiling
->tile_count
.width
), tile_align_w
);
239 /* if this assert fails then layout is impossible.. */
240 assert(tiling
->tile0
.extent
.height
> tile_align_h
);
241 tiling
->tile_count
.height
++;
242 tiling
->tile0
.extent
.height
=
243 align(DIV_ROUND_UP(ra_height
, tiling
->tile_count
.height
), tile_align_h
);
249 tu_tiling_config_update_pipe_layout(struct tu_tiling_config
*tiling
,
250 const struct tu_device
*dev
)
252 const uint32_t max_pipe_count
= 32; /* A6xx */
254 /* start from 1 tile per pipe */
255 tiling
->pipe0
= (VkExtent2D
) {
259 tiling
->pipe_count
= tiling
->tile_count
;
261 while (tiling
->pipe_count
.width
* tiling
->pipe_count
.height
> max_pipe_count
) {
262 if (tiling
->pipe0
.width
< tiling
->pipe0
.height
) {
263 tiling
->pipe0
.width
+= 1;
264 tiling
->pipe_count
.width
=
265 DIV_ROUND_UP(tiling
->tile_count
.width
, tiling
->pipe0
.width
);
267 tiling
->pipe0
.height
+= 1;
268 tiling
->pipe_count
.height
=
269 DIV_ROUND_UP(tiling
->tile_count
.height
, tiling
->pipe0
.height
);
275 tu_tiling_config_update_pipes(struct tu_tiling_config
*tiling
,
276 const struct tu_device
*dev
)
278 const uint32_t max_pipe_count
= 32; /* A6xx */
279 const uint32_t used_pipe_count
=
280 tiling
->pipe_count
.width
* tiling
->pipe_count
.height
;
281 const VkExtent2D last_pipe
= {
282 .width
= (tiling
->tile_count
.width
- 1) % tiling
->pipe0
.width
+ 1,
283 .height
= (tiling
->tile_count
.height
- 1) % tiling
->pipe0
.height
+ 1,
286 assert(used_pipe_count
<= max_pipe_count
);
287 assert(max_pipe_count
<= ARRAY_SIZE(tiling
->pipe_config
));
289 for (uint32_t y
= 0; y
< tiling
->pipe_count
.height
; y
++) {
290 for (uint32_t x
= 0; x
< tiling
->pipe_count
.width
; x
++) {
291 const uint32_t pipe_x
= tiling
->pipe0
.width
* x
;
292 const uint32_t pipe_y
= tiling
->pipe0
.height
* y
;
293 const uint32_t pipe_w
= (x
== tiling
->pipe_count
.width
- 1)
295 : tiling
->pipe0
.width
;
296 const uint32_t pipe_h
= (y
== tiling
->pipe_count
.height
- 1)
298 : tiling
->pipe0
.height
;
299 const uint32_t n
= tiling
->pipe_count
.width
* y
+ x
;
301 tiling
->pipe_config
[n
] = A6XX_VSC_PIPE_CONFIG_REG_X(pipe_x
) |
302 A6XX_VSC_PIPE_CONFIG_REG_Y(pipe_y
) |
303 A6XX_VSC_PIPE_CONFIG_REG_W(pipe_w
) |
304 A6XX_VSC_PIPE_CONFIG_REG_H(pipe_h
);
305 tiling
->pipe_sizes
[n
] = CP_SET_BIN_DATA5_0_VSC_SIZE(pipe_w
* pipe_h
);
309 memset(tiling
->pipe_config
+ used_pipe_count
, 0,
310 sizeof(uint32_t) * (max_pipe_count
- used_pipe_count
));
314 tu_tiling_config_get_tile(const struct tu_tiling_config
*tiling
,
315 const struct tu_device
*dev
,
318 struct tu_tile
*tile
)
320 /* find the pipe and the slot for tile (tx, ty) */
321 const uint32_t px
= tx
/ tiling
->pipe0
.width
;
322 const uint32_t py
= ty
/ tiling
->pipe0
.height
;
323 const uint32_t sx
= tx
- tiling
->pipe0
.width
* px
;
324 const uint32_t sy
= ty
- tiling
->pipe0
.height
* py
;
325 /* last pipe has different width */
326 const uint32_t pipe_width
=
327 MIN2(tiling
->pipe0
.width
,
328 tiling
->tile_count
.width
- px
* tiling
->pipe0
.width
);
330 assert(tx
< tiling
->tile_count
.width
&& ty
< tiling
->tile_count
.height
);
331 assert(px
< tiling
->pipe_count
.width
&& py
< tiling
->pipe_count
.height
);
332 assert(sx
< tiling
->pipe0
.width
&& sy
< tiling
->pipe0
.height
);
334 /* convert to 1D indices */
335 tile
->pipe
= tiling
->pipe_count
.width
* py
+ px
;
336 tile
->slot
= pipe_width
* sy
+ sx
;
338 /* get the blit area for the tile */
339 tile
->begin
= (VkOffset2D
) {
340 .x
= tiling
->tile0
.offset
.x
+ tiling
->tile0
.extent
.width
* tx
,
341 .y
= tiling
->tile0
.offset
.y
+ tiling
->tile0
.extent
.height
* ty
,
344 (tx
== tiling
->tile_count
.width
- 1)
345 ? tiling
->render_area
.offset
.x
+ tiling
->render_area
.extent
.width
346 : tile
->begin
.x
+ tiling
->tile0
.extent
.width
;
348 (ty
== tiling
->tile_count
.height
- 1)
349 ? tiling
->render_area
.offset
.y
+ tiling
->render_area
.extent
.height
350 : tile
->begin
.y
+ tiling
->tile0
.extent
.height
;
353 enum a3xx_msaa_samples
354 tu_msaa_samples(uint32_t samples
)
366 assert(!"invalid sample count");
371 static enum a4xx_index_size
372 tu6_index_size(VkIndexType type
)
375 case VK_INDEX_TYPE_UINT16
:
376 return INDEX4_SIZE_16_BIT
;
377 case VK_INDEX_TYPE_UINT32
:
378 return INDEX4_SIZE_32_BIT
;
380 unreachable("invalid VkIndexType");
381 return INDEX4_SIZE_8_BIT
;
386 tu6_emit_event_write(struct tu_cmd_buffer
*cmd
,
388 enum vgt_event_type event
,
393 tu_cs_emit_pkt7(cs
, CP_EVENT_WRITE
, need_seqno
? 4 : 1);
394 tu_cs_emit(cs
, CP_EVENT_WRITE_0_EVENT(event
));
396 tu_cs_emit_qw(cs
, cmd
->scratch_bo
.iova
);
397 seqno
= ++cmd
->scratch_seqno
;
398 tu_cs_emit(cs
, seqno
);
405 tu6_emit_cache_flush(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
407 tu6_emit_event_write(cmd
, cs
, 0x31, false);
411 tu6_emit_lrz_flush(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
413 tu6_emit_event_write(cmd
, cs
, LRZ_FLUSH
, false);
417 tu6_emit_wfi(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
419 if (cmd
->wait_for_idle
) {
421 cmd
->wait_for_idle
= false;
425 #define tu_image_view_ubwc_pitches(iview) \
426 .pitch = tu_image_ubwc_pitch(iview->image, iview->base_mip), \
427 .array_pitch = tu_image_ubwc_size(iview->image, iview->base_mip) >> 2
430 tu6_emit_zs(struct tu_cmd_buffer
*cmd
,
431 const struct tu_subpass
*subpass
,
434 const struct tu_framebuffer
*fb
= cmd
->state
.framebuffer
;
436 const uint32_t a
= subpass
->depth_stencil_attachment
.attachment
;
437 if (a
== VK_ATTACHMENT_UNUSED
) {
439 A6XX_RB_DEPTH_BUFFER_INFO(.depth_format
= DEPTH6_NONE
),
440 A6XX_RB_DEPTH_BUFFER_PITCH(0),
441 A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH(0),
442 A6XX_RB_DEPTH_BUFFER_BASE(0),
443 A6XX_RB_DEPTH_BUFFER_BASE_GMEM(0));
446 A6XX_GRAS_SU_DEPTH_BUFFER_INFO(.depth_format
= DEPTH6_NONE
));
449 A6XX_GRAS_LRZ_BUFFER_BASE(0),
450 A6XX_GRAS_LRZ_BUFFER_PITCH(0),
451 A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE(0));
453 tu_cs_emit_regs(cs
, A6XX_RB_STENCIL_INFO(0));
458 const struct tu_image_view
*iview
= fb
->attachments
[a
].attachment
;
459 enum a6xx_depth_format fmt
= tu6_pipe2depth(iview
->vk_format
);
462 A6XX_RB_DEPTH_BUFFER_INFO(.depth_format
= fmt
),
463 A6XX_RB_DEPTH_BUFFER_PITCH(tu_image_stride(iview
->image
, iview
->base_mip
)),
464 A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH(iview
->image
->layout
.layer_size
),
465 A6XX_RB_DEPTH_BUFFER_BASE(tu_image_view_base_ref(iview
)),
466 A6XX_RB_DEPTH_BUFFER_BASE_GMEM(cmd
->state
.pass
->attachments
[a
].gmem_offset
));
469 A6XX_GRAS_SU_DEPTH_BUFFER_INFO(.depth_format
= fmt
));
472 A6XX_RB_DEPTH_FLAG_BUFFER_BASE(tu_image_view_ubwc_base_ref(iview
)),
473 A6XX_RB_DEPTH_FLAG_BUFFER_PITCH(tu_image_view_ubwc_pitches(iview
)));
476 A6XX_GRAS_LRZ_BUFFER_BASE(0),
477 A6XX_GRAS_LRZ_BUFFER_PITCH(0),
478 A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE(0));
481 A6XX_RB_STENCIL_INFO(0));
487 tu6_emit_mrt(struct tu_cmd_buffer
*cmd
,
488 const struct tu_subpass
*subpass
,
491 const struct tu_framebuffer
*fb
= cmd
->state
.framebuffer
;
492 unsigned char mrt_comp
[MAX_RTS
] = { 0 };
493 unsigned srgb_cntl
= 0;
495 for (uint32_t i
= 0; i
< subpass
->color_count
; ++i
) {
496 uint32_t a
= subpass
->color_attachments
[i
].attachment
;
497 if (a
== VK_ATTACHMENT_UNUSED
)
500 const struct tu_image_view
*iview
= fb
->attachments
[a
].attachment
;
501 const enum a6xx_tile_mode tile_mode
=
502 tu6_get_image_tile_mode(iview
->image
, iview
->base_mip
);
506 if (vk_format_is_srgb(iview
->vk_format
))
507 srgb_cntl
|= (1 << i
);
509 const struct tu_native_format format
=
510 tu6_format_color(iview
->vk_format
, iview
->image
->layout
.tile_mode
);
513 A6XX_RB_MRT_BUF_INFO(i
,
514 .color_tile_mode
= tile_mode
,
515 .color_format
= format
.fmt
,
516 .color_swap
= format
.swap
),
517 A6XX_RB_MRT_PITCH(i
, tu_image_stride(iview
->image
, iview
->base_mip
)),
518 A6XX_RB_MRT_ARRAY_PITCH(i
, iview
->image
->layout
.layer_size
),
519 A6XX_RB_MRT_BASE(i
, tu_image_view_base_ref(iview
)),
520 A6XX_RB_MRT_BASE_GMEM(i
, cmd
->state
.pass
->attachments
[a
].gmem_offset
));
523 A6XX_SP_FS_MRT_REG(i
,
524 .color_format
= format
.fmt
,
525 .color_sint
= vk_format_is_sint(iview
->vk_format
),
526 .color_uint
= vk_format_is_uint(iview
->vk_format
)));
529 A6XX_RB_MRT_FLAG_BUFFER_ADDR(i
, tu_image_view_ubwc_base_ref(iview
)),
530 A6XX_RB_MRT_FLAG_BUFFER_PITCH(i
, tu_image_view_ubwc_pitches(iview
)));
534 A6XX_RB_SRGB_CNTL(.dword
= srgb_cntl
));
537 A6XX_SP_SRGB_CNTL(.dword
= srgb_cntl
));
540 A6XX_RB_RENDER_COMPONENTS(
548 .rt7
= mrt_comp
[7]));
551 A6XX_SP_FS_RENDER_COMPONENTS(
559 .rt7
= mrt_comp
[7]));
561 // XXX: We probably can't hardcode LAYER_CNTL_TYPE.
563 A6XX_GRAS_LAYER_CNTL(.layered
= fb
->layers
> 1,
564 .type
= LAYER_2D_ARRAY
));
568 tu6_emit_msaa(struct tu_cmd_buffer
*cmd
,
569 const struct tu_subpass
*subpass
,
572 const enum a3xx_msaa_samples samples
= tu_msaa_samples(subpass
->samples
);
573 bool msaa_disable
= samples
== MSAA_ONE
;
576 A6XX_SP_TP_RAS_MSAA_CNTL(samples
),
577 A6XX_SP_TP_DEST_MSAA_CNTL(.samples
= samples
,
578 .msaa_disable
= msaa_disable
));
581 A6XX_GRAS_RAS_MSAA_CNTL(samples
),
582 A6XX_GRAS_DEST_MSAA_CNTL(.samples
= samples
,
583 .msaa_disable
= msaa_disable
));
586 A6XX_RB_RAS_MSAA_CNTL(samples
),
587 A6XX_RB_DEST_MSAA_CNTL(.samples
= samples
,
588 .msaa_disable
= msaa_disable
));
591 A6XX_RB_MSAA_CNTL(samples
));
595 tu6_emit_bin_size(struct tu_cs
*cs
,
596 uint32_t bin_w
, uint32_t bin_h
, uint32_t flags
)
599 A6XX_GRAS_BIN_CONTROL(.binw
= bin_w
,
604 A6XX_RB_BIN_CONTROL(.binw
= bin_w
,
608 /* no flag for RB_BIN_CONTROL2... */
610 A6XX_RB_BIN_CONTROL2(.binw
= bin_w
,
615 tu6_emit_render_cntl(struct tu_cmd_buffer
*cmd
,
616 const struct tu_subpass
*subpass
,
620 const struct tu_framebuffer
*fb
= cmd
->state
.framebuffer
;
622 cntl
|= A6XX_RB_RENDER_CNTL_UNK4
;
624 cntl
|= A6XX_RB_RENDER_CNTL_BINNING
;
626 uint32_t mrts_ubwc_enable
= 0;
627 for (uint32_t i
= 0; i
< subpass
->color_count
; ++i
) {
628 uint32_t a
= subpass
->color_attachments
[i
].attachment
;
629 if (a
== VK_ATTACHMENT_UNUSED
)
632 const struct tu_image_view
*iview
= fb
->attachments
[a
].attachment
;
633 if (iview
->image
->layout
.ubwc_layer_size
!= 0)
634 mrts_ubwc_enable
|= 1 << i
;
637 cntl
|= A6XX_RB_RENDER_CNTL_FLAG_MRTS(mrts_ubwc_enable
);
639 const uint32_t a
= subpass
->depth_stencil_attachment
.attachment
;
640 if (a
!= VK_ATTACHMENT_UNUSED
) {
641 const struct tu_image_view
*iview
= fb
->attachments
[a
].attachment
;
642 if (iview
->image
->layout
.ubwc_layer_size
!= 0)
643 cntl
|= A6XX_RB_RENDER_CNTL_FLAG_DEPTH
;
646 /* In the !binning case, we need to set RB_RENDER_CNTL in the draw_cs
647 * in order to set it correctly for the different subpasses. However,
648 * that means the packets we're emitting also happen during binning. So
649 * we need to guard the write on !BINNING at CP execution time.
651 tu_cs_reserve(cs
, 3 + 4);
652 tu_cs_emit_pkt7(cs
, CP_COND_REG_EXEC
, 2);
653 tu_cs_emit(cs
, CP_COND_REG_EXEC_0_MODE(RENDER_MODE
) |
654 CP_COND_REG_EXEC_0_GMEM
| CP_COND_REG_EXEC_0_SYSMEM
);
655 tu_cs_emit(cs
, CP_COND_REG_EXEC_1_DWORDS(4));
658 tu_cs_emit_pkt7(cs
, CP_REG_WRITE
, 3);
659 tu_cs_emit(cs
, CP_REG_WRITE_0_TRACKER(TRACK_RENDER_CNTL
));
660 tu_cs_emit(cs
, REG_A6XX_RB_RENDER_CNTL
);
661 tu_cs_emit(cs
, cntl
);
665 tu6_emit_blit_scissor(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
, bool align
)
667 const VkRect2D
*render_area
= &cmd
->state
.tiling_config
.render_area
;
668 uint32_t x1
= render_area
->offset
.x
;
669 uint32_t y1
= render_area
->offset
.y
;
670 uint32_t x2
= x1
+ render_area
->extent
.width
- 1;
671 uint32_t y2
= y1
+ render_area
->extent
.height
- 1;
673 /* TODO: alignment requirement seems to be less than tile_align_w/h */
675 x1
= x1
& ~cmd
->device
->physical_device
->tile_align_w
;
676 y1
= y1
& ~cmd
->device
->physical_device
->tile_align_h
;
677 x2
= ALIGN_POT(x2
+ 1, cmd
->device
->physical_device
->tile_align_w
) - 1;
678 y2
= ALIGN_POT(y2
+ 1, cmd
->device
->physical_device
->tile_align_h
) - 1;
682 A6XX_RB_BLIT_SCISSOR_TL(.x
= x1
, .y
= y1
),
683 A6XX_RB_BLIT_SCISSOR_BR(.x
= x2
, .y
= y2
));
687 tu6_emit_blit_info(struct tu_cmd_buffer
*cmd
,
689 const struct tu_image_view
*iview
,
690 uint32_t gmem_offset
,
694 A6XX_RB_BLIT_INFO(.unk0
= !resolve
, .gmem
= !resolve
));
696 const struct tu_native_format format
=
697 tu6_format_color(iview
->vk_format
, iview
->image
->layout
.tile_mode
);
699 enum a6xx_tile_mode tile_mode
=
700 tu6_get_image_tile_mode(iview
->image
, iview
->base_mip
);
702 A6XX_RB_BLIT_DST_INFO(
703 .tile_mode
= tile_mode
,
704 .samples
= tu_msaa_samples(iview
->image
->samples
),
705 .color_format
= format
.fmt
,
706 .color_swap
= format
.swap
,
707 .flags
= iview
->image
->layout
.ubwc_layer_size
!= 0),
708 A6XX_RB_BLIT_DST(tu_image_view_base_ref(iview
)),
709 A6XX_RB_BLIT_DST_PITCH(tu_image_stride(iview
->image
, iview
->base_mip
)),
710 A6XX_RB_BLIT_DST_ARRAY_PITCH(iview
->image
->layout
.layer_size
));
712 if (iview
->image
->layout
.ubwc_layer_size
) {
714 A6XX_RB_BLIT_FLAG_DST(tu_image_view_ubwc_base_ref(iview
)),
715 A6XX_RB_BLIT_FLAG_DST_PITCH(tu_image_view_ubwc_pitches(iview
)));
719 A6XX_RB_BLIT_BASE_GMEM(gmem_offset
));
723 tu6_emit_blit(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
725 tu6_emit_event_write(cmd
, cs
, BLIT
, false);
729 tu6_emit_window_scissor(struct tu_cmd_buffer
*cmd
,
737 A6XX_GRAS_SC_WINDOW_SCISSOR_TL(.x
= x1
, .y
= y1
),
738 A6XX_GRAS_SC_WINDOW_SCISSOR_BR(.x
= x2
, .y
= y2
));
741 A6XX_GRAS_RESOLVE_CNTL_1(.x
= x1
, .y
= y1
),
742 A6XX_GRAS_RESOLVE_CNTL_2(.x
= x2
, .y
= y2
));
746 tu6_emit_window_offset(struct tu_cmd_buffer
*cmd
,
752 A6XX_RB_WINDOW_OFFSET(.x
= x1
, .y
= y1
));
755 A6XX_RB_WINDOW_OFFSET2(.x
= x1
, .y
= y1
));
758 A6XX_SP_WINDOW_OFFSET(.x
= x1
, .y
= y1
));
761 A6XX_SP_TP_WINDOW_OFFSET(.x
= x1
, .y
= y1
));
765 use_hw_binning(struct tu_cmd_buffer
*cmd
)
767 const struct tu_tiling_config
*tiling
= &cmd
->state
.tiling_config
;
769 if (unlikely(cmd
->device
->physical_device
->instance
->debug_flags
& TU_DEBUG_NOBIN
))
772 if (unlikely(cmd
->device
->physical_device
->instance
->debug_flags
& TU_DEBUG_FORCEBIN
))
775 return (tiling
->tile_count
.width
* tiling
->tile_count
.height
) > 2;
779 use_sysmem_rendering(struct tu_cmd_buffer
*cmd
)
781 if (unlikely(cmd
->device
->physical_device
->instance
->debug_flags
& TU_DEBUG_SYSMEM
))
784 /* can't fit attachments into gmem */
785 if (!cmd
->state
.pass
->gmem_pixels
)
788 return cmd
->state
.tiling_config
.force_sysmem
;
792 tu6_emit_tile_select(struct tu_cmd_buffer
*cmd
,
794 const struct tu_tile
*tile
)
796 tu_cs_emit_pkt7(cs
, CP_SET_MARKER
, 1);
797 tu_cs_emit(cs
, A6XX_CP_SET_MARKER_0_MODE(RM6_YIELD
));
799 tu_cs_emit_pkt7(cs
, CP_SET_MARKER
, 1);
800 tu_cs_emit(cs
, A6XX_CP_SET_MARKER_0_MODE(RM6_GMEM
));
802 const uint32_t x1
= tile
->begin
.x
;
803 const uint32_t y1
= tile
->begin
.y
;
804 const uint32_t x2
= tile
->end
.x
- 1;
805 const uint32_t y2
= tile
->end
.y
- 1;
806 tu6_emit_window_scissor(cmd
, cs
, x1
, y1
, x2
, y2
);
807 tu6_emit_window_offset(cmd
, cs
, x1
, y1
);
810 A6XX_VPC_SO_OVERRIDE(.so_disable
= false));
812 if (use_hw_binning(cmd
)) {
813 tu_cs_emit_pkt7(cs
, CP_WAIT_FOR_ME
, 0);
815 tu_cs_emit_pkt7(cs
, CP_SET_MODE
, 1);
818 tu_cs_emit_pkt7(cs
, CP_REG_TEST
, 1);
819 tu_cs_emit(cs
, A6XX_CP_REG_TEST_0_REG(OVERFLOW_FLAG_REG
) |
820 A6XX_CP_REG_TEST_0_BIT(0) |
821 A6XX_CP_REG_TEST_0_WAIT_FOR_ME
);
823 tu_cs_reserve(cs
, 3 + 11);
824 tu_cs_emit_pkt7(cs
, CP_COND_REG_EXEC
, 2);
825 tu_cs_emit(cs
, CP_COND_REG_EXEC_0_MODE(PRED_TEST
));
826 tu_cs_emit(cs
, CP_COND_REG_EXEC_1_DWORDS(11));
828 /* if (no overflow) */ {
829 tu_cs_emit_pkt7(cs
, CP_SET_BIN_DATA5
, 7);
830 tu_cs_emit(cs
, cmd
->state
.tiling_config
.pipe_sizes
[tile
->pipe
] |
831 CP_SET_BIN_DATA5_0_VSC_N(tile
->slot
));
832 tu_cs_emit_qw(cs
, cmd
->vsc_data
.iova
+ tile
->pipe
* cmd
->vsc_data_pitch
);
833 tu_cs_emit_qw(cs
, cmd
->vsc_data
.iova
+ (tile
->pipe
* 4) + (32 * cmd
->vsc_data_pitch
));
834 tu_cs_emit_qw(cs
, cmd
->vsc_data2
.iova
+ (tile
->pipe
* cmd
->vsc_data2_pitch
));
836 tu_cs_emit_pkt7(cs
, CP_SET_VISIBILITY_OVERRIDE
, 1);
839 /* use a NOP packet to skip over the 'else' side: */
840 tu_cs_emit_pkt7(cs
, CP_NOP
, 2);
842 tu_cs_emit_pkt7(cs
, CP_SET_VISIBILITY_OVERRIDE
, 1);
846 tu_cs_emit_pkt7(cs
, CP_SET_MODE
, 1);
850 A6XX_RB_UNKNOWN_8804(0));
853 A6XX_SP_TP_UNKNOWN_B304(0));
856 A6XX_GRAS_UNKNOWN_80A4(0));
858 tu_cs_emit_pkt7(cs
, CP_SET_VISIBILITY_OVERRIDE
, 1);
861 tu_cs_emit_pkt7(cs
, CP_SET_MODE
, 1);
867 tu6_emit_load_attachment(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
, uint32_t a
)
869 const struct tu_tiling_config
*tiling
= &cmd
->state
.tiling_config
;
870 const struct tu_framebuffer
*fb
= cmd
->state
.framebuffer
;
871 const struct tu_image_view
*iview
= fb
->attachments
[a
].attachment
;
872 const struct tu_render_pass_attachment
*attachment
=
873 &cmd
->state
.pass
->attachments
[a
];
875 if (attachment
->gmem_offset
< 0)
878 const uint32_t x1
= tiling
->render_area
.offset
.x
;
879 const uint32_t y1
= tiling
->render_area
.offset
.y
;
880 const uint32_t x2
= x1
+ tiling
->render_area
.extent
.width
;
881 const uint32_t y2
= y1
+ tiling
->render_area
.extent
.height
;
882 const uint32_t tile_x2
=
883 tiling
->tile0
.offset
.x
+ tiling
->tile0
.extent
.width
* tiling
->tile_count
.width
;
884 const uint32_t tile_y2
=
885 tiling
->tile0
.offset
.y
+ tiling
->tile0
.extent
.height
* tiling
->tile_count
.height
;
887 x1
!= tiling
->tile0
.offset
.x
|| x2
!= MIN2(fb
->width
, tile_x2
) ||
888 y1
!= tiling
->tile0
.offset
.y
|| y2
!= MIN2(fb
->height
, tile_y2
);
891 tu_finishme("improve handling of unaligned render area");
893 if (attachment
->load_op
== VK_ATTACHMENT_LOAD_OP_LOAD
)
896 if (vk_format_has_stencil(iview
->vk_format
) &&
897 attachment
->stencil_load_op
== VK_ATTACHMENT_LOAD_OP_LOAD
)
901 tu6_emit_blit_info(cmd
, cs
, iview
, attachment
->gmem_offset
, false);
902 tu6_emit_blit(cmd
, cs
);
907 tu6_emit_clear_attachment(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
,
909 const VkRenderPassBeginInfo
*info
)
911 const struct tu_framebuffer
*fb
= cmd
->state
.framebuffer
;
912 const struct tu_image_view
*iview
= fb
->attachments
[a
].attachment
;
913 const struct tu_render_pass_attachment
*attachment
=
914 &cmd
->state
.pass
->attachments
[a
];
915 unsigned clear_mask
= 0;
917 /* note: this means it isn't used by any subpass and shouldn't be cleared anyway */
918 if (attachment
->gmem_offset
< 0)
921 if (attachment
->load_op
== VK_ATTACHMENT_LOAD_OP_CLEAR
)
924 if (vk_format_has_stencil(iview
->vk_format
)) {
926 if (attachment
->stencil_load_op
== VK_ATTACHMENT_LOAD_OP_CLEAR
)
932 tu_clear_gmem_attachment(cmd
, cs
, a
, clear_mask
,
933 &info
->pClearValues
[a
]);
937 tu6_emit_predicated_blit(struct tu_cmd_buffer
*cmd
,
943 tu_cond_exec_start(cs
, CP_COND_EXEC_0_RENDER_MODE_GMEM
);
945 tu6_emit_blit_info(cmd
, cs
,
946 cmd
->state
.framebuffer
->attachments
[a
].attachment
,
947 cmd
->state
.pass
->attachments
[gmem_a
].gmem_offset
, resolve
);
948 tu6_emit_blit(cmd
, cs
);
950 tu_cond_exec_end(cs
);
954 tu6_emit_sysmem_resolve(struct tu_cmd_buffer
*cmd
,
959 const struct tu_framebuffer
*fb
= cmd
->state
.framebuffer
;
960 const struct tu_image_view
*dst
= fb
->attachments
[a
].attachment
;
961 const struct tu_image_view
*src
= fb
->attachments
[gmem_a
].attachment
;
963 tu_blit(cmd
, cs
, &(struct tu_blit
) {
964 .dst
= sysmem_attachment_surf(dst
, dst
->base_layer
,
965 &cmd
->state
.tiling_config
.render_area
),
966 .src
= sysmem_attachment_surf(src
, src
->base_layer
,
967 &cmd
->state
.tiling_config
.render_area
),
968 .layers
= fb
->layers
,
973 /* Emit a MSAA resolve operation, with both gmem and sysmem paths. */
974 static void tu6_emit_resolve(struct tu_cmd_buffer
*cmd
,
979 if (cmd
->state
.pass
->attachments
[a
].store_op
== VK_ATTACHMENT_STORE_OP_DONT_CARE
)
982 tu6_emit_predicated_blit(cmd
, cs
, a
, gmem_a
, true);
984 tu_cond_exec_start(cs
, CP_COND_EXEC_0_RENDER_MODE_SYSMEM
);
985 tu6_emit_sysmem_resolve(cmd
, cs
, a
, gmem_a
);
986 tu_cond_exec_end(cs
);
990 tu6_emit_store_attachment(struct tu_cmd_buffer
*cmd
,
995 if (cmd
->state
.pass
->attachments
[a
].store_op
== VK_ATTACHMENT_STORE_OP_DONT_CARE
)
998 tu6_emit_blit_info(cmd
, cs
,
999 cmd
->state
.framebuffer
->attachments
[a
].attachment
,
1000 cmd
->state
.pass
->attachments
[gmem_a
].gmem_offset
, true);
1001 tu6_emit_blit(cmd
, cs
);
1005 tu6_emit_tile_store(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
1007 const struct tu_render_pass
*pass
= cmd
->state
.pass
;
1008 const struct tu_subpass
*subpass
= &pass
->subpasses
[pass
->subpass_count
-1];
1010 tu_cs_emit_pkt7(cs
, CP_SET_DRAW_STATE
, 3);
1011 tu_cs_emit(cs
, CP_SET_DRAW_STATE__0_COUNT(0) |
1012 CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS
|
1013 CP_SET_DRAW_STATE__0_GROUP_ID(0));
1014 tu_cs_emit(cs
, CP_SET_DRAW_STATE__1_ADDR_LO(0));
1015 tu_cs_emit(cs
, CP_SET_DRAW_STATE__2_ADDR_HI(0));
1017 tu_cs_emit_pkt7(cs
, CP_SKIP_IB2_ENABLE_GLOBAL
, 1);
1018 tu_cs_emit(cs
, 0x0);
1020 tu_cs_emit_pkt7(cs
, CP_SET_MARKER
, 1);
1021 tu_cs_emit(cs
, A6XX_CP_SET_MARKER_0_MODE(RM6_RESOLVE
));
1023 tu6_emit_blit_scissor(cmd
, cs
, true);
1025 for (uint32_t a
= 0; a
< pass
->attachment_count
; ++a
) {
1026 if (pass
->attachments
[a
].gmem_offset
>= 0)
1027 tu6_emit_store_attachment(cmd
, cs
, a
, a
);
1030 if (subpass
->resolve_attachments
) {
1031 for (unsigned i
= 0; i
< subpass
->color_count
; i
++) {
1032 uint32_t a
= subpass
->resolve_attachments
[i
].attachment
;
1033 if (a
!= VK_ATTACHMENT_UNUSED
)
1034 tu6_emit_store_attachment(cmd
, cs
, a
,
1035 subpass
->color_attachments
[i
].attachment
);
1041 tu6_emit_restart_index(struct tu_cs
*cs
, uint32_t restart_index
)
1044 A6XX_PC_RESTART_INDEX(restart_index
));
1048 tu6_init_hw(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
1050 tu6_emit_cache_flush(cmd
, cs
);
1052 tu_cs_emit_write_reg(cs
, REG_A6XX_HLSQ_UPDATE_CNTL
, 0xfffff);
1054 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_CCU_CNTL
, 0x10000000);
1055 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_8E04
, 0x00100000);
1056 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_AE04
, 0x8);
1057 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_AE00
, 0);
1058 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_AE0F
, 0x3f);
1059 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_B605
, 0x44);
1060 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_B600
, 0x100000);
1061 tu_cs_emit_write_reg(cs
, REG_A6XX_HLSQ_UNKNOWN_BE00
, 0x80);
1062 tu_cs_emit_write_reg(cs
, REG_A6XX_HLSQ_UNKNOWN_BE01
, 0);
1064 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9600
, 0);
1065 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_UNKNOWN_8600
, 0x880);
1066 tu_cs_emit_write_reg(cs
, REG_A6XX_HLSQ_UNKNOWN_BE04
, 0);
1067 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_AE03
, 0x00000410);
1068 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_IBO_COUNT
, 0);
1069 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_B182
, 0);
1070 tu_cs_emit_write_reg(cs
, REG_A6XX_HLSQ_UNKNOWN_BB11
, 0);
1071 tu_cs_emit_write_reg(cs
, REG_A6XX_UCHE_UNKNOWN_0E12
, 0x3200000);
1072 tu_cs_emit_write_reg(cs
, REG_A6XX_UCHE_CLIENT_PF
, 4);
1073 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_8E01
, 0x0);
1074 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_A982
, 0);
1075 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_A9A8
, 0);
1076 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_AB00
, 0x5);
1077 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_GS_SIV_CNTL
, 0x0000ffff);
1079 tu_cs_emit_write_reg(cs
, REG_A6XX_VFD_ADD_OFFSET
, A6XX_VFD_ADD_OFFSET_VERTEX
);
1080 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_8811
, 0x00000010);
1081 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_MODE_CNTL
, 0x1f);
1083 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_SRGB_CNTL
, 0);
1085 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_UNKNOWN_8110
, 0);
1087 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_RENDER_CONTROL0
, 0x401);
1088 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_RENDER_CONTROL1
, 0);
1089 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_FS_OUTPUT_CNTL0
, 0);
1090 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_8818
, 0);
1091 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_8819
, 0);
1092 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_881A
, 0);
1093 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_881B
, 0);
1094 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_881C
, 0);
1095 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_881D
, 0);
1096 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_881E
, 0);
1097 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_88F0
, 0);
1099 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9101
, 0xffff00);
1100 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9107
, 0);
1102 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9236
, 1);
1103 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9300
, 0);
1105 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_SO_OVERRIDE
,
1106 A6XX_VPC_SO_OVERRIDE_SO_DISABLE
);
1108 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_UNKNOWN_9801
, 0);
1109 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_UNKNOWN_9806
, 0);
1110 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_UNKNOWN_9980
, 0);
1111 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_UNKNOWN_9990
, 0);
1113 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_PRIMITIVE_CNTL_6
, 0);
1114 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_UNKNOWN_9B07
, 0);
1116 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_A81B
, 0);
1118 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_B183
, 0);
1120 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_UNKNOWN_8099
, 0);
1121 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_UNKNOWN_809B
, 0);
1122 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_UNKNOWN_80A0
, 2);
1123 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_UNKNOWN_80AF
, 0);
1124 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9210
, 0);
1125 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9211
, 0);
1126 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9602
, 0);
1127 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_UNKNOWN_9981
, 0x3);
1128 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_UNKNOWN_9E72
, 0);
1129 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9108
, 0x3);
1130 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_TP_UNKNOWN_B304
, 0);
1131 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_TP_UNKNOWN_B309
, 0x000000a2);
1132 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_8804
, 0);
1133 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_UNKNOWN_80A4
, 0);
1134 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_UNKNOWN_80A5
, 0);
1135 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_UNKNOWN_80A6
, 0);
1136 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_8805
, 0);
1137 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_8806
, 0);
1138 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_8878
, 0);
1139 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_8879
, 0);
1140 tu_cs_emit_write_reg(cs
, REG_A6XX_HLSQ_CONTROL_5_REG
, 0xfc);
1142 tu_cs_emit_write_reg(cs
, REG_A6XX_VFD_MODE_CNTL
, 0x00000000);
1144 tu_cs_emit_write_reg(cs
, REG_A6XX_VFD_UNKNOWN_A008
, 0);
1146 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_MODE_CNTL
, 0x0000001f);
1148 /* we don't use this yet.. probably best to disable.. */
1149 tu_cs_emit_pkt7(cs
, CP_SET_DRAW_STATE
, 3);
1150 tu_cs_emit(cs
, CP_SET_DRAW_STATE__0_COUNT(0) |
1151 CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS
|
1152 CP_SET_DRAW_STATE__0_GROUP_ID(0));
1153 tu_cs_emit(cs
, CP_SET_DRAW_STATE__1_ADDR_LO(0));
1154 tu_cs_emit(cs
, CP_SET_DRAW_STATE__2_ADDR_HI(0));
1156 /* Set not to use streamout by default, */
1157 tu_cs_emit_pkt7(cs
, CP_CONTEXT_REG_BUNCH
, 4);
1158 tu_cs_emit(cs
, REG_A6XX_VPC_SO_CNTL
);
1160 tu_cs_emit(cs
, REG_A6XX_VPC_SO_BUF_CNTL
);
1164 A6XX_SP_HS_CTRL_REG0(0));
1167 A6XX_SP_GS_CTRL_REG0(0));
1170 A6XX_GRAS_LRZ_CNTL(0));
1173 A6XX_RB_LRZ_CNTL(0));
1176 A6XX_SP_TP_BORDER_COLOR_BASE_ADDR(.bo
= &cmd
->device
->border_color
));
1178 A6XX_SP_PS_TP_BORDER_COLOR_BASE_ADDR(.bo
= &cmd
->device
->border_color
));
1180 tu_cs_sanity_check(cs
);
1184 tu6_cache_flush(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
1188 seqno
= tu6_emit_event_write(cmd
, cs
, RB_DONE_TS
, true);
1190 tu_cs_emit_pkt7(cs
, CP_WAIT_REG_MEM
, 6);
1191 tu_cs_emit(cs
, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ
) |
1192 CP_WAIT_REG_MEM_0_POLL_MEMORY
);
1193 tu_cs_emit_qw(cs
, cmd
->scratch_bo
.iova
);
1194 tu_cs_emit(cs
, CP_WAIT_REG_MEM_3_REF(seqno
));
1195 tu_cs_emit(cs
, CP_WAIT_REG_MEM_4_MASK(~0));
1196 tu_cs_emit(cs
, CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(16));
1198 seqno
= tu6_emit_event_write(cmd
, cs
, CACHE_FLUSH_TS
, true);
1200 tu_cs_emit_pkt7(cs
, CP_WAIT_MEM_GTE
, 4);
1201 tu_cs_emit(cs
, CP_WAIT_MEM_GTE_0_RESERVED(0));
1202 tu_cs_emit_qw(cs
, cmd
->scratch_bo
.iova
);
1203 tu_cs_emit(cs
, CP_WAIT_MEM_GTE_3_REF(seqno
));
1207 update_vsc_pipe(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
1209 const struct tu_tiling_config
*tiling
= &cmd
->state
.tiling_config
;
1212 A6XX_VSC_BIN_SIZE(.width
= tiling
->tile0
.extent
.width
,
1213 .height
= tiling
->tile0
.extent
.height
),
1214 A6XX_VSC_SIZE_ADDRESS(.bo
= &cmd
->vsc_data
,
1215 .bo_offset
= 32 * cmd
->vsc_data_pitch
));
1218 A6XX_VSC_BIN_COUNT(.nx
= tiling
->tile_count
.width
,
1219 .ny
= tiling
->tile_count
.height
));
1221 tu_cs_emit_pkt4(cs
, REG_A6XX_VSC_PIPE_CONFIG_REG(0), 32);
1222 for (unsigned i
= 0; i
< 32; i
++)
1223 tu_cs_emit(cs
, tiling
->pipe_config
[i
]);
1226 A6XX_VSC_PIPE_DATA2_ADDRESS(.bo
= &cmd
->vsc_data2
),
1227 A6XX_VSC_PIPE_DATA2_PITCH(cmd
->vsc_data2_pitch
),
1228 A6XX_VSC_PIPE_DATA2_ARRAY_PITCH(cmd
->vsc_data2
.size
));
1231 A6XX_VSC_PIPE_DATA_ADDRESS(.bo
= &cmd
->vsc_data
),
1232 A6XX_VSC_PIPE_DATA_PITCH(cmd
->vsc_data_pitch
),
1233 A6XX_VSC_PIPE_DATA_ARRAY_PITCH(cmd
->vsc_data
.size
));
1237 emit_vsc_overflow_test(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
1239 const struct tu_tiling_config
*tiling
= &cmd
->state
.tiling_config
;
1240 const uint32_t used_pipe_count
=
1241 tiling
->pipe_count
.width
* tiling
->pipe_count
.height
;
1243 /* Clear vsc_scratch: */
1244 tu_cs_emit_pkt7(cs
, CP_MEM_WRITE
, 3);
1245 tu_cs_emit_qw(cs
, cmd
->scratch_bo
.iova
+ ctrl_offset(vsc_scratch
));
1246 tu_cs_emit(cs
, 0x0);
1248 /* Check for overflow, write vsc_scratch if detected: */
1249 for (int i
= 0; i
< used_pipe_count
; i
++) {
1250 tu_cs_emit_pkt7(cs
, CP_COND_WRITE5
, 8);
1251 tu_cs_emit(cs
, CP_COND_WRITE5_0_FUNCTION(WRITE_GE
) |
1252 CP_COND_WRITE5_0_WRITE_MEMORY
);
1253 tu_cs_emit(cs
, CP_COND_WRITE5_1_POLL_ADDR_LO(REG_A6XX_VSC_SIZE_REG(i
)));
1254 tu_cs_emit(cs
, CP_COND_WRITE5_2_POLL_ADDR_HI(0));
1255 tu_cs_emit(cs
, CP_COND_WRITE5_3_REF(cmd
->vsc_data_pitch
));
1256 tu_cs_emit(cs
, CP_COND_WRITE5_4_MASK(~0));
1257 tu_cs_emit_qw(cs
, cmd
->scratch_bo
.iova
+ ctrl_offset(vsc_scratch
));
1258 tu_cs_emit(cs
, CP_COND_WRITE5_7_WRITE_DATA(1 + cmd
->vsc_data_pitch
));
1260 tu_cs_emit_pkt7(cs
, CP_COND_WRITE5
, 8);
1261 tu_cs_emit(cs
, CP_COND_WRITE5_0_FUNCTION(WRITE_GE
) |
1262 CP_COND_WRITE5_0_WRITE_MEMORY
);
1263 tu_cs_emit(cs
, CP_COND_WRITE5_1_POLL_ADDR_LO(REG_A6XX_VSC_SIZE2_REG(i
)));
1264 tu_cs_emit(cs
, CP_COND_WRITE5_2_POLL_ADDR_HI(0));
1265 tu_cs_emit(cs
, CP_COND_WRITE5_3_REF(cmd
->vsc_data2_pitch
));
1266 tu_cs_emit(cs
, CP_COND_WRITE5_4_MASK(~0));
1267 tu_cs_emit_qw(cs
, cmd
->scratch_bo
.iova
+ ctrl_offset(vsc_scratch
));
1268 tu_cs_emit(cs
, CP_COND_WRITE5_7_WRITE_DATA(3 + cmd
->vsc_data2_pitch
));
1271 tu_cs_emit_pkt7(cs
, CP_WAIT_MEM_WRITES
, 0);
1273 tu_cs_emit_pkt7(cs
, CP_WAIT_FOR_ME
, 0);
1275 tu_cs_emit_pkt7(cs
, CP_MEM_TO_REG
, 3);
1276 tu_cs_emit(cs
, CP_MEM_TO_REG_0_REG(OVERFLOW_FLAG_REG
) |
1277 CP_MEM_TO_REG_0_CNT(1 - 1));
1278 tu_cs_emit_qw(cs
, cmd
->scratch_bo
.iova
+ ctrl_offset(vsc_scratch
));
1281 * This is a bit awkward, we really want a way to invert the
1282 * CP_REG_TEST/CP_COND_REG_EXEC logic, so that we can conditionally
1283 * execute cmds to use hwbinning when a bit is *not* set. This
1284 * dance is to invert OVERFLOW_FLAG_REG
1286 * A CP_NOP packet is used to skip executing the 'else' clause
1290 /* b0 will be set if VSC_DATA or VSC_DATA2 overflow: */
1291 tu_cs_emit_pkt7(cs
, CP_REG_TEST
, 1);
1292 tu_cs_emit(cs
, A6XX_CP_REG_TEST_0_REG(OVERFLOW_FLAG_REG
) |
1293 A6XX_CP_REG_TEST_0_BIT(0) |
1294 A6XX_CP_REG_TEST_0_WAIT_FOR_ME
);
1296 tu_cs_reserve(cs
, 3 + 7);
1297 tu_cs_emit_pkt7(cs
, CP_COND_REG_EXEC
, 2);
1298 tu_cs_emit(cs
, CP_COND_REG_EXEC_0_MODE(PRED_TEST
));
1299 tu_cs_emit(cs
, CP_COND_REG_EXEC_1_DWORDS(7));
1303 * On overflow, mirror the value to control->vsc_overflow
1304 * which CPU is checking to detect overflow (see
1305 * check_vsc_overflow())
1307 tu_cs_emit_pkt7(cs
, CP_REG_TO_MEM
, 3);
1308 tu_cs_emit(cs
, CP_REG_TO_MEM_0_REG(OVERFLOW_FLAG_REG
) |
1309 CP_REG_TO_MEM_0_CNT(0));
1310 tu_cs_emit_qw(cs
, cmd
->scratch_bo
.iova
+ ctrl_offset(vsc_overflow
));
1312 tu_cs_emit_pkt4(cs
, OVERFLOW_FLAG_REG
, 1);
1313 tu_cs_emit(cs
, 0x0);
1315 tu_cs_emit_pkt7(cs
, CP_NOP
, 2); /* skip 'else' when 'if' is taken */
1317 tu_cs_emit_pkt4(cs
, OVERFLOW_FLAG_REG
, 1);
1318 tu_cs_emit(cs
, 0x1);
1323 tu6_emit_binning_pass(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
1325 struct tu_physical_device
*phys_dev
= cmd
->device
->physical_device
;
1326 const struct tu_tiling_config
*tiling
= &cmd
->state
.tiling_config
;
1328 uint32_t x1
= tiling
->tile0
.offset
.x
;
1329 uint32_t y1
= tiling
->tile0
.offset
.y
;
1330 uint32_t x2
= tiling
->render_area
.offset
.x
+ tiling
->render_area
.extent
.width
- 1;
1331 uint32_t y2
= tiling
->render_area
.offset
.y
+ tiling
->render_area
.extent
.height
- 1;
1333 tu6_emit_window_scissor(cmd
, cs
, x1
, y1
, x2
, y2
);
1335 tu_cs_emit_pkt7(cs
, CP_SET_MARKER
, 1);
1336 tu_cs_emit(cs
, A6XX_CP_SET_MARKER_0_MODE(RM6_BINNING
));
1338 tu_cs_emit_pkt7(cs
, CP_SET_VISIBILITY_OVERRIDE
, 1);
1339 tu_cs_emit(cs
, 0x1);
1341 tu_cs_emit_pkt7(cs
, CP_SET_MODE
, 1);
1342 tu_cs_emit(cs
, 0x1);
1347 A6XX_VFD_MODE_CNTL(.binning_pass
= true));
1349 update_vsc_pipe(cmd
, cs
);
1352 A6XX_PC_UNKNOWN_9805(.unknown
= phys_dev
->magic
.PC_UNKNOWN_9805
));
1355 A6XX_SP_UNKNOWN_A0F8(.unknown
= phys_dev
->magic
.SP_UNKNOWN_A0F8
));
1357 tu_cs_emit_pkt7(cs
, CP_EVENT_WRITE
, 1);
1358 tu_cs_emit(cs
, UNK_2C
);
1361 A6XX_RB_WINDOW_OFFSET(.x
= 0, .y
= 0));
1364 A6XX_SP_TP_WINDOW_OFFSET(.x
= 0, .y
= 0));
1366 /* emit IB to binning drawcmds: */
1367 tu_cs_emit_call(cs
, &cmd
->draw_cs
);
1369 tu_cs_emit_pkt7(cs
, CP_SET_DRAW_STATE
, 3);
1370 tu_cs_emit(cs
, CP_SET_DRAW_STATE__0_COUNT(0) |
1371 CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS
|
1372 CP_SET_DRAW_STATE__0_GROUP_ID(0));
1373 tu_cs_emit(cs
, CP_SET_DRAW_STATE__1_ADDR_LO(0));
1374 tu_cs_emit(cs
, CP_SET_DRAW_STATE__2_ADDR_HI(0));
1376 tu_cs_emit_pkt7(cs
, CP_EVENT_WRITE
, 1);
1377 tu_cs_emit(cs
, UNK_2D
);
1379 tu6_emit_event_write(cmd
, cs
, CACHE_INVALIDATE
, false);
1380 tu6_cache_flush(cmd
, cs
);
1384 tu_cs_emit_pkt7(cs
, CP_WAIT_FOR_ME
, 0);
1386 emit_vsc_overflow_test(cmd
, cs
);
1388 tu_cs_emit_pkt7(cs
, CP_SET_VISIBILITY_OVERRIDE
, 1);
1389 tu_cs_emit(cs
, 0x0);
1391 tu_cs_emit_pkt7(cs
, CP_SET_MODE
, 1);
1392 tu_cs_emit(cs
, 0x0);
1397 A6XX_RB_CCU_CNTL(.unknown
= phys_dev
->magic
.RB_CCU_CNTL_gmem
));
1399 cmd
->wait_for_idle
= false;
1403 tu_emit_sysmem_clear_attachment(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
,
1405 const VkRenderPassBeginInfo
*info
)
1407 const struct tu_framebuffer
*fb
= cmd
->state
.framebuffer
;
1408 const struct tu_image_view
*iview
= fb
->attachments
[a
].attachment
;
1409 const struct tu_render_pass_attachment
*attachment
=
1410 &cmd
->state
.pass
->attachments
[a
];
1411 unsigned clear_mask
= 0;
1413 /* note: this means it isn't used by any subpass and shouldn't be cleared anyway */
1414 if (attachment
->gmem_offset
< 0)
1417 if (attachment
->load_op
== VK_ATTACHMENT_LOAD_OP_CLEAR
) {
1421 if (vk_format_has_stencil(iview
->vk_format
)) {
1423 if (attachment
->stencil_load_op
== VK_ATTACHMENT_LOAD_OP_CLEAR
)
1425 if (clear_mask
!= 0x3)
1426 tu_finishme("depth/stencil only load op");
1432 tu_clear_sysmem_attachment(cmd
, cs
, a
,
1433 &info
->pClearValues
[a
], &(struct VkClearRect
) {
1434 .rect
= info
->renderArea
,
1435 .baseArrayLayer
= iview
->base_layer
,
1436 .layerCount
= iview
->layer_count
,
1441 tu_emit_load_clear(struct tu_cmd_buffer
*cmd
,
1442 const VkRenderPassBeginInfo
*info
)
1444 struct tu_cs
*cs
= &cmd
->draw_cs
;
1446 tu_cond_exec_start(cs
, CP_COND_EXEC_0_RENDER_MODE_GMEM
);
1448 tu6_emit_blit_scissor(cmd
, cs
, true);
1450 for (uint32_t i
= 0; i
< cmd
->state
.pass
->attachment_count
; ++i
)
1451 tu6_emit_load_attachment(cmd
, cs
, i
);
1453 tu6_emit_blit_scissor(cmd
, cs
, false);
1455 for (uint32_t i
= 0; i
< cmd
->state
.pass
->attachment_count
; ++i
)
1456 tu6_emit_clear_attachment(cmd
, cs
, i
, info
);
1458 tu_cond_exec_end(cs
);
1460 /* invalidate because reading input attachments will cache GMEM and
1461 * the cache isn''t updated when GMEM is written
1462 * TODO: is there a no-cache bit for textures?
1464 if (cmd
->state
.subpass
->input_count
)
1465 tu6_emit_event_write(cmd
, cs
, CACHE_INVALIDATE
, false);
1467 tu_cond_exec_start(cs
, CP_COND_EXEC_0_RENDER_MODE_SYSMEM
);
1469 for (uint32_t i
= 0; i
< cmd
->state
.pass
->attachment_count
; ++i
)
1470 tu_emit_sysmem_clear_attachment(cmd
, cs
, i
, info
);
1472 tu_cond_exec_end(cs
);
1476 tu6_sysmem_render_begin(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
,
1477 const struct VkRect2D
*renderArea
)
1479 const struct tu_framebuffer
*fb
= cmd
->state
.framebuffer
;
1481 assert(fb
->width
> 0 && fb
->height
> 0);
1482 tu6_emit_window_scissor(cmd
, cs
, 0, 0, fb
->width
- 1, fb
->height
- 1);
1483 tu6_emit_window_offset(cmd
, cs
, 0, 0);
1485 tu6_emit_bin_size(cs
, 0, 0, 0xc00000); /* 0xc00000 = BYPASS? */
1487 tu6_emit_lrz_flush(cmd
, cs
);
1489 tu_cs_emit_pkt7(cs
, CP_SET_MARKER
, 1);
1490 tu_cs_emit(cs
, A6XX_CP_SET_MARKER_0_MODE(RM6_BYPASS
));
1492 tu_cs_emit_pkt7(cs
, CP_SKIP_IB2_ENABLE_GLOBAL
, 1);
1493 tu_cs_emit(cs
, 0x0);
1495 tu6_emit_event_write(cmd
, cs
, PC_CCU_INVALIDATE_COLOR
, false);
1496 tu6_emit_event_write(cmd
, cs
, PC_CCU_INVALIDATE_DEPTH
, false);
1497 tu6_emit_event_write(cmd
, cs
, CACHE_INVALIDATE
, false);
1499 tu6_emit_wfi(cmd
, cs
);
1501 A6XX_RB_CCU_CNTL(0x10000000));
1503 /* enable stream-out, with sysmem there is only one pass: */
1505 A6XX_VPC_SO_OVERRIDE(.so_disable
= false));
1507 tu_cs_emit_pkt7(cs
, CP_SET_VISIBILITY_OVERRIDE
, 1);
1508 tu_cs_emit(cs
, 0x1);
1510 tu_cs_emit_pkt7(cs
, CP_SET_MODE
, 1);
1511 tu_cs_emit(cs
, 0x0);
1513 tu_cs_sanity_check(cs
);
1517 tu6_sysmem_render_end(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
1519 /* Do any resolves of the last subpass. These are handled in the
1520 * tile_store_ib in the gmem path.
1523 const struct tu_subpass
*subpass
= cmd
->state
.subpass
;
1524 if (subpass
->resolve_attachments
) {
1525 for (unsigned i
= 0; i
< subpass
->color_count
; i
++) {
1526 uint32_t a
= subpass
->resolve_attachments
[i
].attachment
;
1527 if (a
!= VK_ATTACHMENT_UNUSED
)
1528 tu6_emit_sysmem_resolve(cmd
, cs
, a
,
1529 subpass
->color_attachments
[i
].attachment
);
1533 tu_cs_emit_call(cs
, &cmd
->draw_epilogue_cs
);
1535 tu_cs_emit_pkt7(cs
, CP_SKIP_IB2_ENABLE_GLOBAL
, 1);
1536 tu_cs_emit(cs
, 0x0);
1538 tu6_emit_lrz_flush(cmd
, cs
);
1540 tu6_emit_event_write(cmd
, cs
, PC_CCU_FLUSH_COLOR_TS
, true);
1541 tu6_emit_event_write(cmd
, cs
, PC_CCU_FLUSH_DEPTH_TS
, true);
1543 tu_cs_sanity_check(cs
);
1548 tu6_tile_render_begin(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
1550 struct tu_physical_device
*phys_dev
= cmd
->device
->physical_device
;
1552 tu6_emit_lrz_flush(cmd
, cs
);
1556 tu6_emit_cache_flush(cmd
, cs
);
1558 tu_cs_emit_pkt7(cs
, CP_SKIP_IB2_ENABLE_GLOBAL
, 1);
1559 tu_cs_emit(cs
, 0x0);
1561 /* 0x10000000 for BYPASS.. 0x7c13c080 for GMEM: */
1562 tu6_emit_wfi(cmd
, cs
);
1564 A6XX_RB_CCU_CNTL(phys_dev
->magic
.RB_CCU_CNTL_gmem
));
1566 const struct tu_tiling_config
*tiling
= &cmd
->state
.tiling_config
;
1567 if (use_hw_binning(cmd
)) {
1568 /* enable stream-out during binning pass: */
1569 tu_cs_emit_regs(cs
, A6XX_VPC_SO_OVERRIDE(.so_disable
=false));
1571 tu6_emit_bin_size(cs
,
1572 tiling
->tile0
.extent
.width
,
1573 tiling
->tile0
.extent
.height
,
1574 A6XX_RB_BIN_CONTROL_BINNING_PASS
| 0x6000000);
1576 tu6_emit_render_cntl(cmd
, cmd
->state
.subpass
, cs
, true);
1578 tu6_emit_binning_pass(cmd
, cs
);
1580 /* and disable stream-out for draw pass: */
1581 tu_cs_emit_regs(cs
, A6XX_VPC_SO_OVERRIDE(.so_disable
=true));
1583 tu6_emit_bin_size(cs
,
1584 tiling
->tile0
.extent
.width
,
1585 tiling
->tile0
.extent
.height
,
1586 A6XX_RB_BIN_CONTROL_USE_VIZ
| 0x6000000);
1589 A6XX_VFD_MODE_CNTL(0));
1591 tu_cs_emit_regs(cs
, A6XX_PC_UNKNOWN_9805(.unknown
= phys_dev
->magic
.PC_UNKNOWN_9805
));
1593 tu_cs_emit_regs(cs
, A6XX_SP_UNKNOWN_A0F8(.unknown
= phys_dev
->magic
.SP_UNKNOWN_A0F8
));
1595 tu_cs_emit_pkt7(cs
, CP_SKIP_IB2_ENABLE_GLOBAL
, 1);
1596 tu_cs_emit(cs
, 0x1);
1598 /* no binning pass, so enable stream-out for draw pass:: */
1599 tu_cs_emit_regs(cs
, A6XX_VPC_SO_OVERRIDE(.so_disable
=false));
1601 tu6_emit_bin_size(cs
,
1602 tiling
->tile0
.extent
.width
,
1603 tiling
->tile0
.extent
.height
,
1607 tu_cs_sanity_check(cs
);
1611 tu6_render_tile(struct tu_cmd_buffer
*cmd
,
1613 const struct tu_tile
*tile
)
1615 tu6_emit_tile_select(cmd
, cs
, tile
);
1617 tu_cs_emit_call(cs
, &cmd
->draw_cs
);
1618 cmd
->wait_for_idle
= true;
1620 if (use_hw_binning(cmd
)) {
1621 tu_cs_emit_pkt7(cs
, CP_REG_TEST
, 1);
1622 tu_cs_emit(cs
, A6XX_CP_REG_TEST_0_REG(OVERFLOW_FLAG_REG
) |
1623 A6XX_CP_REG_TEST_0_BIT(0) |
1624 A6XX_CP_REG_TEST_0_WAIT_FOR_ME
);
1626 tu_cs_reserve(cs
, 3 + 2);
1627 tu_cs_emit_pkt7(cs
, CP_COND_REG_EXEC
, 2);
1628 tu_cs_emit(cs
, CP_COND_REG_EXEC_0_MODE(PRED_TEST
));
1629 tu_cs_emit(cs
, CP_COND_REG_EXEC_1_DWORDS(2));
1631 /* if (no overflow) */ {
1632 tu_cs_emit_pkt7(cs
, CP_SET_MARKER
, 1);
1633 tu_cs_emit(cs
, A6XX_CP_SET_MARKER_0_MODE(RM6_ENDVIS
));
1637 tu_cs_emit_ib(cs
, &cmd
->state
.tile_store_ib
);
1639 tu_cs_sanity_check(cs
);
1643 tu6_tile_render_end(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
1645 tu_cs_emit_call(cs
, &cmd
->draw_epilogue_cs
);
1648 A6XX_GRAS_LRZ_CNTL(0));
1650 tu6_emit_lrz_flush(cmd
, cs
);
1652 tu6_emit_event_write(cmd
, cs
, CACHE_FLUSH_TS
, true);
1654 tu_cs_sanity_check(cs
);
1658 tu_cmd_render_tiles(struct tu_cmd_buffer
*cmd
)
1660 const struct tu_tiling_config
*tiling
= &cmd
->state
.tiling_config
;
1662 tu6_tile_render_begin(cmd
, &cmd
->cs
);
1664 for (uint32_t y
= 0; y
< tiling
->tile_count
.height
; y
++) {
1665 for (uint32_t x
= 0; x
< tiling
->tile_count
.width
; x
++) {
1666 struct tu_tile tile
;
1667 tu_tiling_config_get_tile(tiling
, cmd
->device
, x
, y
, &tile
);
1668 tu6_render_tile(cmd
, &cmd
->cs
, &tile
);
1672 tu6_tile_render_end(cmd
, &cmd
->cs
);
1676 tu_cmd_render_sysmem(struct tu_cmd_buffer
*cmd
)
1678 const struct tu_tiling_config
*tiling
= &cmd
->state
.tiling_config
;
1680 tu6_sysmem_render_begin(cmd
, &cmd
->cs
, &tiling
->render_area
);
1682 tu_cs_emit_call(&cmd
->cs
, &cmd
->draw_cs
);
1683 cmd
->wait_for_idle
= true;
1685 tu6_sysmem_render_end(cmd
, &cmd
->cs
);
1689 tu_cmd_prepare_tile_store_ib(struct tu_cmd_buffer
*cmd
)
1691 const uint32_t tile_store_space
= 32 + 23 * cmd
->state
.pass
->attachment_count
;
1692 struct tu_cs sub_cs
;
1695 tu_cs_begin_sub_stream(&cmd
->sub_cs
, tile_store_space
, &sub_cs
);
1696 if (result
!= VK_SUCCESS
) {
1697 cmd
->record_result
= result
;
1701 /* emit to tile-store sub_cs */
1702 tu6_emit_tile_store(cmd
, &sub_cs
);
1704 cmd
->state
.tile_store_ib
= tu_cs_end_sub_stream(&cmd
->sub_cs
, &sub_cs
);
1708 tu_cmd_update_tiling_config(struct tu_cmd_buffer
*cmd
,
1709 const VkRect2D
*render_area
)
1711 const struct tu_device
*dev
= cmd
->device
;
1712 struct tu_tiling_config
*tiling
= &cmd
->state
.tiling_config
;
1714 tiling
->render_area
= *render_area
;
1715 tiling
->force_sysmem
= force_sysmem(cmd
, render_area
);
1717 tu_tiling_config_update_tile_layout(tiling
, dev
, cmd
->state
.pass
->gmem_pixels
);
1718 tu_tiling_config_update_pipe_layout(tiling
, dev
);
1719 tu_tiling_config_update_pipes(tiling
, dev
);
1722 const struct tu_dynamic_state default_dynamic_state
= {
1738 .blend_constants
= { 0.0f
, 0.0f
, 0.0f
, 0.0f
},
1744 .stencil_compare_mask
=
1749 .stencil_write_mask
=
1754 .stencil_reference
=
1761 static void UNUSED
/* FINISHME */
1762 tu_bind_dynamic_state(struct tu_cmd_buffer
*cmd_buffer
,
1763 const struct tu_dynamic_state
*src
)
1765 struct tu_dynamic_state
*dest
= &cmd_buffer
->state
.dynamic
;
1766 uint32_t copy_mask
= src
->mask
;
1767 uint32_t dest_mask
= 0;
1769 tu_use_args(cmd_buffer
); /* FINISHME */
1771 /* Make sure to copy the number of viewports/scissors because they can
1772 * only be specified at pipeline creation time.
1774 dest
->viewport
.count
= src
->viewport
.count
;
1775 dest
->scissor
.count
= src
->scissor
.count
;
1776 dest
->discard_rectangle
.count
= src
->discard_rectangle
.count
;
1778 if (copy_mask
& TU_DYNAMIC_VIEWPORT
) {
1779 if (memcmp(&dest
->viewport
.viewports
, &src
->viewport
.viewports
,
1780 src
->viewport
.count
* sizeof(VkViewport
))) {
1781 typed_memcpy(dest
->viewport
.viewports
, src
->viewport
.viewports
,
1782 src
->viewport
.count
);
1783 dest_mask
|= TU_DYNAMIC_VIEWPORT
;
1787 if (copy_mask
& TU_DYNAMIC_SCISSOR
) {
1788 if (memcmp(&dest
->scissor
.scissors
, &src
->scissor
.scissors
,
1789 src
->scissor
.count
* sizeof(VkRect2D
))) {
1790 typed_memcpy(dest
->scissor
.scissors
, src
->scissor
.scissors
,
1791 src
->scissor
.count
);
1792 dest_mask
|= TU_DYNAMIC_SCISSOR
;
1796 if (copy_mask
& TU_DYNAMIC_LINE_WIDTH
) {
1797 if (dest
->line_width
!= src
->line_width
) {
1798 dest
->line_width
= src
->line_width
;
1799 dest_mask
|= TU_DYNAMIC_LINE_WIDTH
;
1803 if (copy_mask
& TU_DYNAMIC_DEPTH_BIAS
) {
1804 if (memcmp(&dest
->depth_bias
, &src
->depth_bias
,
1805 sizeof(src
->depth_bias
))) {
1806 dest
->depth_bias
= src
->depth_bias
;
1807 dest_mask
|= TU_DYNAMIC_DEPTH_BIAS
;
1811 if (copy_mask
& TU_DYNAMIC_BLEND_CONSTANTS
) {
1812 if (memcmp(&dest
->blend_constants
, &src
->blend_constants
,
1813 sizeof(src
->blend_constants
))) {
1814 typed_memcpy(dest
->blend_constants
, src
->blend_constants
, 4);
1815 dest_mask
|= TU_DYNAMIC_BLEND_CONSTANTS
;
1819 if (copy_mask
& TU_DYNAMIC_DEPTH_BOUNDS
) {
1820 if (memcmp(&dest
->depth_bounds
, &src
->depth_bounds
,
1821 sizeof(src
->depth_bounds
))) {
1822 dest
->depth_bounds
= src
->depth_bounds
;
1823 dest_mask
|= TU_DYNAMIC_DEPTH_BOUNDS
;
1827 if (copy_mask
& TU_DYNAMIC_STENCIL_COMPARE_MASK
) {
1828 if (memcmp(&dest
->stencil_compare_mask
, &src
->stencil_compare_mask
,
1829 sizeof(src
->stencil_compare_mask
))) {
1830 dest
->stencil_compare_mask
= src
->stencil_compare_mask
;
1831 dest_mask
|= TU_DYNAMIC_STENCIL_COMPARE_MASK
;
1835 if (copy_mask
& TU_DYNAMIC_STENCIL_WRITE_MASK
) {
1836 if (memcmp(&dest
->stencil_write_mask
, &src
->stencil_write_mask
,
1837 sizeof(src
->stencil_write_mask
))) {
1838 dest
->stencil_write_mask
= src
->stencil_write_mask
;
1839 dest_mask
|= TU_DYNAMIC_STENCIL_WRITE_MASK
;
1843 if (copy_mask
& TU_DYNAMIC_STENCIL_REFERENCE
) {
1844 if (memcmp(&dest
->stencil_reference
, &src
->stencil_reference
,
1845 sizeof(src
->stencil_reference
))) {
1846 dest
->stencil_reference
= src
->stencil_reference
;
1847 dest_mask
|= TU_DYNAMIC_STENCIL_REFERENCE
;
1851 if (copy_mask
& TU_DYNAMIC_DISCARD_RECTANGLE
) {
1852 if (memcmp(&dest
->discard_rectangle
.rectangles
,
1853 &src
->discard_rectangle
.rectangles
,
1854 src
->discard_rectangle
.count
* sizeof(VkRect2D
))) {
1855 typed_memcpy(dest
->discard_rectangle
.rectangles
,
1856 src
->discard_rectangle
.rectangles
,
1857 src
->discard_rectangle
.count
);
1858 dest_mask
|= TU_DYNAMIC_DISCARD_RECTANGLE
;
1864 tu_create_cmd_buffer(struct tu_device
*device
,
1865 struct tu_cmd_pool
*pool
,
1866 VkCommandBufferLevel level
,
1867 VkCommandBuffer
*pCommandBuffer
)
1869 struct tu_cmd_buffer
*cmd_buffer
;
1870 cmd_buffer
= vk_zalloc(&pool
->alloc
, sizeof(*cmd_buffer
), 8,
1871 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1872 if (cmd_buffer
== NULL
)
1873 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
1875 cmd_buffer
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
1876 cmd_buffer
->device
= device
;
1877 cmd_buffer
->pool
= pool
;
1878 cmd_buffer
->level
= level
;
1881 list_addtail(&cmd_buffer
->pool_link
, &pool
->cmd_buffers
);
1882 cmd_buffer
->queue_family_index
= pool
->queue_family_index
;
1885 /* Init the pool_link so we can safely call list_del when we destroy
1886 * the command buffer
1888 list_inithead(&cmd_buffer
->pool_link
);
1889 cmd_buffer
->queue_family_index
= TU_QUEUE_GENERAL
;
1892 tu_bo_list_init(&cmd_buffer
->bo_list
);
1893 tu_cs_init(&cmd_buffer
->cs
, device
, TU_CS_MODE_GROW
, 4096);
1894 tu_cs_init(&cmd_buffer
->draw_cs
, device
, TU_CS_MODE_GROW
, 4096);
1895 tu_cs_init(&cmd_buffer
->draw_epilogue_cs
, device
, TU_CS_MODE_GROW
, 4096);
1896 tu_cs_init(&cmd_buffer
->sub_cs
, device
, TU_CS_MODE_SUB_STREAM
, 2048);
1898 *pCommandBuffer
= tu_cmd_buffer_to_handle(cmd_buffer
);
1900 list_inithead(&cmd_buffer
->upload
.list
);
1902 VkResult result
= tu_bo_init_new(device
, &cmd_buffer
->scratch_bo
, 0x1000);
1903 if (result
!= VK_SUCCESS
)
1904 goto fail_scratch_bo
;
1906 /* TODO: resize on overflow */
1907 cmd_buffer
->vsc_data_pitch
= device
->vsc_data_pitch
;
1908 cmd_buffer
->vsc_data2_pitch
= device
->vsc_data2_pitch
;
1909 cmd_buffer
->vsc_data
= device
->vsc_data
;
1910 cmd_buffer
->vsc_data2
= device
->vsc_data2
;
1915 list_del(&cmd_buffer
->pool_link
);
1920 tu_cmd_buffer_destroy(struct tu_cmd_buffer
*cmd_buffer
)
1922 tu_bo_finish(cmd_buffer
->device
, &cmd_buffer
->scratch_bo
);
1924 list_del(&cmd_buffer
->pool_link
);
1926 for (unsigned i
= 0; i
< VK_PIPELINE_BIND_POINT_RANGE_SIZE
; i
++)
1927 free(cmd_buffer
->descriptors
[i
].push_set
.set
.mapped_ptr
);
1929 tu_cs_finish(&cmd_buffer
->cs
);
1930 tu_cs_finish(&cmd_buffer
->draw_cs
);
1931 tu_cs_finish(&cmd_buffer
->draw_epilogue_cs
);
1932 tu_cs_finish(&cmd_buffer
->sub_cs
);
1934 tu_bo_list_destroy(&cmd_buffer
->bo_list
);
1935 vk_free(&cmd_buffer
->pool
->alloc
, cmd_buffer
);
1939 tu_reset_cmd_buffer(struct tu_cmd_buffer
*cmd_buffer
)
1941 cmd_buffer
->wait_for_idle
= true;
1943 cmd_buffer
->record_result
= VK_SUCCESS
;
1945 tu_bo_list_reset(&cmd_buffer
->bo_list
);
1946 tu_cs_reset(&cmd_buffer
->cs
);
1947 tu_cs_reset(&cmd_buffer
->draw_cs
);
1948 tu_cs_reset(&cmd_buffer
->draw_epilogue_cs
);
1949 tu_cs_reset(&cmd_buffer
->sub_cs
);
1951 for (unsigned i
= 0; i
< VK_PIPELINE_BIND_POINT_RANGE_SIZE
; i
++) {
1952 cmd_buffer
->descriptors
[i
].valid
= 0;
1953 cmd_buffer
->descriptors
[i
].push_dirty
= false;
1956 cmd_buffer
->status
= TU_CMD_BUFFER_STATUS_INITIAL
;
1958 return cmd_buffer
->record_result
;
1962 tu_AllocateCommandBuffers(VkDevice _device
,
1963 const VkCommandBufferAllocateInfo
*pAllocateInfo
,
1964 VkCommandBuffer
*pCommandBuffers
)
1966 TU_FROM_HANDLE(tu_device
, device
, _device
);
1967 TU_FROM_HANDLE(tu_cmd_pool
, pool
, pAllocateInfo
->commandPool
);
1969 VkResult result
= VK_SUCCESS
;
1972 for (i
= 0; i
< pAllocateInfo
->commandBufferCount
; i
++) {
1974 if (!list_is_empty(&pool
->free_cmd_buffers
)) {
1975 struct tu_cmd_buffer
*cmd_buffer
= list_first_entry(
1976 &pool
->free_cmd_buffers
, struct tu_cmd_buffer
, pool_link
);
1978 list_del(&cmd_buffer
->pool_link
);
1979 list_addtail(&cmd_buffer
->pool_link
, &pool
->cmd_buffers
);
1981 result
= tu_reset_cmd_buffer(cmd_buffer
);
1982 cmd_buffer
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
1983 cmd_buffer
->level
= pAllocateInfo
->level
;
1985 pCommandBuffers
[i
] = tu_cmd_buffer_to_handle(cmd_buffer
);
1987 result
= tu_create_cmd_buffer(device
, pool
, pAllocateInfo
->level
,
1988 &pCommandBuffers
[i
]);
1990 if (result
!= VK_SUCCESS
)
1994 if (result
!= VK_SUCCESS
) {
1995 tu_FreeCommandBuffers(_device
, pAllocateInfo
->commandPool
, i
,
1998 /* From the Vulkan 1.0.66 spec:
2000 * "vkAllocateCommandBuffers can be used to create multiple
2001 * command buffers. If the creation of any of those command
2002 * buffers fails, the implementation must destroy all
2003 * successfully created command buffer objects from this
2004 * command, set all entries of the pCommandBuffers array to
2005 * NULL and return the error."
2007 memset(pCommandBuffers
, 0,
2008 sizeof(*pCommandBuffers
) * pAllocateInfo
->commandBufferCount
);
2015 tu_FreeCommandBuffers(VkDevice device
,
2016 VkCommandPool commandPool
,
2017 uint32_t commandBufferCount
,
2018 const VkCommandBuffer
*pCommandBuffers
)
2020 for (uint32_t i
= 0; i
< commandBufferCount
; i
++) {
2021 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, pCommandBuffers
[i
]);
2024 if (cmd_buffer
->pool
) {
2025 list_del(&cmd_buffer
->pool_link
);
2026 list_addtail(&cmd_buffer
->pool_link
,
2027 &cmd_buffer
->pool
->free_cmd_buffers
);
2029 tu_cmd_buffer_destroy(cmd_buffer
);
2035 tu_ResetCommandBuffer(VkCommandBuffer commandBuffer
,
2036 VkCommandBufferResetFlags flags
)
2038 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
2039 return tu_reset_cmd_buffer(cmd_buffer
);
2043 tu_BeginCommandBuffer(VkCommandBuffer commandBuffer
,
2044 const VkCommandBufferBeginInfo
*pBeginInfo
)
2046 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
2047 VkResult result
= VK_SUCCESS
;
2049 if (cmd_buffer
->status
!= TU_CMD_BUFFER_STATUS_INITIAL
) {
2050 /* If the command buffer has already been resetted with
2051 * vkResetCommandBuffer, no need to do it again.
2053 result
= tu_reset_cmd_buffer(cmd_buffer
);
2054 if (result
!= VK_SUCCESS
)
2058 memset(&cmd_buffer
->state
, 0, sizeof(cmd_buffer
->state
));
2059 cmd_buffer
->usage_flags
= pBeginInfo
->flags
;
2061 tu_cs_begin(&cmd_buffer
->cs
);
2062 tu_cs_begin(&cmd_buffer
->draw_cs
);
2063 tu_cs_begin(&cmd_buffer
->draw_epilogue_cs
);
2065 cmd_buffer
->scratch_seqno
= 0;
2067 /* setup initial configuration into command buffer */
2068 if (cmd_buffer
->level
== VK_COMMAND_BUFFER_LEVEL_PRIMARY
) {
2069 switch (cmd_buffer
->queue_family_index
) {
2070 case TU_QUEUE_GENERAL
:
2071 tu6_init_hw(cmd_buffer
, &cmd_buffer
->cs
);
2076 } else if (cmd_buffer
->level
== VK_COMMAND_BUFFER_LEVEL_SECONDARY
&&
2077 (pBeginInfo
->flags
& VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT
)) {
2078 assert(pBeginInfo
->pInheritanceInfo
);
2079 cmd_buffer
->state
.pass
= tu_render_pass_from_handle(pBeginInfo
->pInheritanceInfo
->renderPass
);
2080 cmd_buffer
->state
.subpass
= &cmd_buffer
->state
.pass
->subpasses
[pBeginInfo
->pInheritanceInfo
->subpass
];
2083 cmd_buffer
->status
= TU_CMD_BUFFER_STATUS_RECORDING
;
2089 tu_CmdBindVertexBuffers(VkCommandBuffer commandBuffer
,
2090 uint32_t firstBinding
,
2091 uint32_t bindingCount
,
2092 const VkBuffer
*pBuffers
,
2093 const VkDeviceSize
*pOffsets
)
2095 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2097 assert(firstBinding
+ bindingCount
<= MAX_VBS
);
2099 for (uint32_t i
= 0; i
< bindingCount
; i
++) {
2100 cmd
->state
.vb
.buffers
[firstBinding
+ i
] =
2101 tu_buffer_from_handle(pBuffers
[i
]);
2102 cmd
->state
.vb
.offsets
[firstBinding
+ i
] = pOffsets
[i
];
2105 /* VB states depend on VkPipelineVertexInputStateCreateInfo */
2106 cmd
->state
.dirty
|= TU_CMD_DIRTY_VERTEX_BUFFERS
;
2110 tu_CmdBindIndexBuffer(VkCommandBuffer commandBuffer
,
2112 VkDeviceSize offset
,
2113 VkIndexType indexType
)
2115 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2116 TU_FROM_HANDLE(tu_buffer
, buf
, buffer
);
2118 /* initialize/update the restart index */
2119 if (!cmd
->state
.index_buffer
|| cmd
->state
.index_type
!= indexType
) {
2120 struct tu_cs
*draw_cs
= &cmd
->draw_cs
;
2122 tu6_emit_restart_index(
2123 draw_cs
, indexType
== VK_INDEX_TYPE_UINT32
? 0xffffffff : 0xffff);
2125 tu_cs_sanity_check(draw_cs
);
2129 if (cmd
->state
.index_buffer
!= buf
)
2130 tu_bo_list_add(&cmd
->bo_list
, buf
->bo
, MSM_SUBMIT_BO_READ
);
2132 cmd
->state
.index_buffer
= buf
;
2133 cmd
->state
.index_offset
= offset
;
2134 cmd
->state
.index_type
= indexType
;
2138 tu_CmdBindDescriptorSets(VkCommandBuffer commandBuffer
,
2139 VkPipelineBindPoint pipelineBindPoint
,
2140 VkPipelineLayout _layout
,
2142 uint32_t descriptorSetCount
,
2143 const VkDescriptorSet
*pDescriptorSets
,
2144 uint32_t dynamicOffsetCount
,
2145 const uint32_t *pDynamicOffsets
)
2147 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
2148 TU_FROM_HANDLE(tu_pipeline_layout
, layout
, _layout
);
2149 unsigned dyn_idx
= 0;
2151 struct tu_descriptor_state
*descriptors_state
=
2152 tu_get_descriptors_state(cmd_buffer
, pipelineBindPoint
);
2154 for (unsigned i
= 0; i
< descriptorSetCount
; ++i
) {
2155 unsigned idx
= i
+ firstSet
;
2156 TU_FROM_HANDLE(tu_descriptor_set
, set
, pDescriptorSets
[i
]);
2158 descriptors_state
->sets
[idx
] = set
;
2159 descriptors_state
->valid
|= (1u << idx
);
2161 for(unsigned j
= 0; j
< set
->layout
->dynamic_offset_count
; ++j
, ++dyn_idx
) {
2162 unsigned idx
= j
+ layout
->set
[i
+ firstSet
].dynamic_offset_start
;
2163 assert(dyn_idx
< dynamicOffsetCount
);
2165 descriptors_state
->dynamic_buffers
[idx
] =
2166 set
->dynamic_descriptors
[j
].va
+ pDynamicOffsets
[dyn_idx
];
2170 cmd_buffer
->state
.dirty
|= TU_CMD_DIRTY_DESCRIPTOR_SETS
;
2173 void tu_CmdBindTransformFeedbackBuffersEXT(VkCommandBuffer commandBuffer
,
2174 uint32_t firstBinding
,
2175 uint32_t bindingCount
,
2176 const VkBuffer
*pBuffers
,
2177 const VkDeviceSize
*pOffsets
,
2178 const VkDeviceSize
*pSizes
)
2180 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2181 assert(firstBinding
+ bindingCount
<= IR3_MAX_SO_BUFFERS
);
2183 for (uint32_t i
= 0; i
< bindingCount
; i
++) {
2184 uint32_t idx
= firstBinding
+ i
;
2185 TU_FROM_HANDLE(tu_buffer
, buf
, pBuffers
[i
]);
2187 if (pOffsets
[i
] != 0)
2188 cmd
->state
.streamout_reset
|= 1 << idx
;
2190 cmd
->state
.streamout_buf
.buffers
[idx
] = buf
;
2191 cmd
->state
.streamout_buf
.offsets
[idx
] = pOffsets
[i
];
2192 cmd
->state
.streamout_buf
.sizes
[idx
] = pSizes
[i
];
2194 cmd
->state
.streamout_enabled
|= 1 << idx
;
2197 cmd
->state
.dirty
|= TU_CMD_DIRTY_STREAMOUT_BUFFERS
;
2200 void tu_CmdBeginTransformFeedbackEXT(VkCommandBuffer commandBuffer
,
2201 uint32_t firstCounterBuffer
,
2202 uint32_t counterBufferCount
,
2203 const VkBuffer
*pCounterBuffers
,
2204 const VkDeviceSize
*pCounterBufferOffsets
)
2206 assert(firstCounterBuffer
+ counterBufferCount
<= IR3_MAX_SO_BUFFERS
);
2207 /* TODO do something with counter buffer? */
2210 void tu_CmdEndTransformFeedbackEXT(VkCommandBuffer commandBuffer
,
2211 uint32_t firstCounterBuffer
,
2212 uint32_t counterBufferCount
,
2213 const VkBuffer
*pCounterBuffers
,
2214 const VkDeviceSize
*pCounterBufferOffsets
)
2216 assert(firstCounterBuffer
+ counterBufferCount
<= IR3_MAX_SO_BUFFERS
);
2217 /* TODO do something with counter buffer? */
2219 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2220 cmd
->state
.streamout_enabled
= 0;
2224 tu_CmdPushConstants(VkCommandBuffer commandBuffer
,
2225 VkPipelineLayout layout
,
2226 VkShaderStageFlags stageFlags
,
2229 const void *pValues
)
2231 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2232 memcpy((void*) cmd
->push_constants
+ offset
, pValues
, size
);
2233 cmd
->state
.dirty
|= TU_CMD_DIRTY_PUSH_CONSTANTS
;
2237 tu_EndCommandBuffer(VkCommandBuffer commandBuffer
)
2239 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
2241 if (cmd_buffer
->scratch_seqno
) {
2242 tu_bo_list_add(&cmd_buffer
->bo_list
, &cmd_buffer
->scratch_bo
,
2243 MSM_SUBMIT_BO_WRITE
);
2246 if (cmd_buffer
->use_vsc_data
) {
2247 tu_bo_list_add(&cmd_buffer
->bo_list
, &cmd_buffer
->vsc_data
,
2248 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_WRITE
);
2249 tu_bo_list_add(&cmd_buffer
->bo_list
, &cmd_buffer
->vsc_data2
,
2250 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_WRITE
);
2253 tu_bo_list_add(&cmd_buffer
->bo_list
, &cmd_buffer
->device
->border_color
,
2254 MSM_SUBMIT_BO_READ
);
2256 for (uint32_t i
= 0; i
< cmd_buffer
->draw_cs
.bo_count
; i
++) {
2257 tu_bo_list_add(&cmd_buffer
->bo_list
, cmd_buffer
->draw_cs
.bos
[i
],
2258 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_DUMP
);
2261 for (uint32_t i
= 0; i
< cmd_buffer
->draw_epilogue_cs
.bo_count
; i
++) {
2262 tu_bo_list_add(&cmd_buffer
->bo_list
, cmd_buffer
->draw_epilogue_cs
.bos
[i
],
2263 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_DUMP
);
2266 for (uint32_t i
= 0; i
< cmd_buffer
->sub_cs
.bo_count
; i
++) {
2267 tu_bo_list_add(&cmd_buffer
->bo_list
, cmd_buffer
->sub_cs
.bos
[i
],
2268 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_DUMP
);
2271 tu_cs_end(&cmd_buffer
->cs
);
2272 tu_cs_end(&cmd_buffer
->draw_cs
);
2273 tu_cs_end(&cmd_buffer
->draw_epilogue_cs
);
2275 cmd_buffer
->status
= TU_CMD_BUFFER_STATUS_EXECUTABLE
;
2277 return cmd_buffer
->record_result
;
2281 tu_CmdBindPipeline(VkCommandBuffer commandBuffer
,
2282 VkPipelineBindPoint pipelineBindPoint
,
2283 VkPipeline _pipeline
)
2285 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2286 TU_FROM_HANDLE(tu_pipeline
, pipeline
, _pipeline
);
2288 switch (pipelineBindPoint
) {
2289 case VK_PIPELINE_BIND_POINT_GRAPHICS
:
2290 cmd
->state
.pipeline
= pipeline
;
2291 cmd
->state
.dirty
|= TU_CMD_DIRTY_PIPELINE
;
2293 case VK_PIPELINE_BIND_POINT_COMPUTE
:
2294 cmd
->state
.compute_pipeline
= pipeline
;
2295 cmd
->state
.dirty
|= TU_CMD_DIRTY_COMPUTE_PIPELINE
;
2298 unreachable("unrecognized pipeline bind point");
2302 tu_bo_list_add(&cmd
->bo_list
, &pipeline
->program
.binary_bo
,
2303 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_DUMP
);
2304 for (uint32_t i
= 0; i
< pipeline
->cs
.bo_count
; i
++) {
2305 tu_bo_list_add(&cmd
->bo_list
, pipeline
->cs
.bos
[i
],
2306 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_DUMP
);
2311 tu_CmdSetViewport(VkCommandBuffer commandBuffer
,
2312 uint32_t firstViewport
,
2313 uint32_t viewportCount
,
2314 const VkViewport
*pViewports
)
2316 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2317 struct tu_cs
*draw_cs
= &cmd
->draw_cs
;
2319 assert(firstViewport
== 0 && viewportCount
== 1);
2320 tu6_emit_viewport(draw_cs
, pViewports
);
2322 tu_cs_sanity_check(draw_cs
);
2326 tu_CmdSetScissor(VkCommandBuffer commandBuffer
,
2327 uint32_t firstScissor
,
2328 uint32_t scissorCount
,
2329 const VkRect2D
*pScissors
)
2331 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2332 struct tu_cs
*draw_cs
= &cmd
->draw_cs
;
2334 assert(firstScissor
== 0 && scissorCount
== 1);
2335 tu6_emit_scissor(draw_cs
, pScissors
);
2337 tu_cs_sanity_check(draw_cs
);
2341 tu_CmdSetLineWidth(VkCommandBuffer commandBuffer
, float lineWidth
)
2343 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2345 cmd
->state
.dynamic
.line_width
= lineWidth
;
2347 /* line width depends on VkPipelineRasterizationStateCreateInfo */
2348 cmd
->state
.dirty
|= TU_CMD_DIRTY_DYNAMIC_LINE_WIDTH
;
2352 tu_CmdSetDepthBias(VkCommandBuffer commandBuffer
,
2353 float depthBiasConstantFactor
,
2354 float depthBiasClamp
,
2355 float depthBiasSlopeFactor
)
2357 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2358 struct tu_cs
*draw_cs
= &cmd
->draw_cs
;
2360 tu6_emit_depth_bias(draw_cs
, depthBiasConstantFactor
, depthBiasClamp
,
2361 depthBiasSlopeFactor
);
2363 tu_cs_sanity_check(draw_cs
);
2367 tu_CmdSetBlendConstants(VkCommandBuffer commandBuffer
,
2368 const float blendConstants
[4])
2370 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2371 struct tu_cs
*draw_cs
= &cmd
->draw_cs
;
2373 tu6_emit_blend_constants(draw_cs
, blendConstants
);
2375 tu_cs_sanity_check(draw_cs
);
2379 tu_CmdSetDepthBounds(VkCommandBuffer commandBuffer
,
2380 float minDepthBounds
,
2381 float maxDepthBounds
)
2386 tu_CmdSetStencilCompareMask(VkCommandBuffer commandBuffer
,
2387 VkStencilFaceFlags faceMask
,
2388 uint32_t compareMask
)
2390 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2392 if (faceMask
& VK_STENCIL_FACE_FRONT_BIT
)
2393 cmd
->state
.dynamic
.stencil_compare_mask
.front
= compareMask
;
2394 if (faceMask
& VK_STENCIL_FACE_BACK_BIT
)
2395 cmd
->state
.dynamic
.stencil_compare_mask
.back
= compareMask
;
2397 /* the front/back compare masks must be updated together */
2398 cmd
->state
.dirty
|= TU_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK
;
2402 tu_CmdSetStencilWriteMask(VkCommandBuffer commandBuffer
,
2403 VkStencilFaceFlags faceMask
,
2406 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2408 if (faceMask
& VK_STENCIL_FACE_FRONT_BIT
)
2409 cmd
->state
.dynamic
.stencil_write_mask
.front
= writeMask
;
2410 if (faceMask
& VK_STENCIL_FACE_BACK_BIT
)
2411 cmd
->state
.dynamic
.stencil_write_mask
.back
= writeMask
;
2413 /* the front/back write masks must be updated together */
2414 cmd
->state
.dirty
|= TU_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK
;
2418 tu_CmdSetStencilReference(VkCommandBuffer commandBuffer
,
2419 VkStencilFaceFlags faceMask
,
2422 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2424 if (faceMask
& VK_STENCIL_FACE_FRONT_BIT
)
2425 cmd
->state
.dynamic
.stencil_reference
.front
= reference
;
2426 if (faceMask
& VK_STENCIL_FACE_BACK_BIT
)
2427 cmd
->state
.dynamic
.stencil_reference
.back
= reference
;
2429 /* the front/back references must be updated together */
2430 cmd
->state
.dirty
|= TU_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE
;
2434 tu_CmdExecuteCommands(VkCommandBuffer commandBuffer
,
2435 uint32_t commandBufferCount
,
2436 const VkCommandBuffer
*pCmdBuffers
)
2438 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2441 assert(commandBufferCount
> 0);
2443 for (uint32_t i
= 0; i
< commandBufferCount
; i
++) {
2444 TU_FROM_HANDLE(tu_cmd_buffer
, secondary
, pCmdBuffers
[i
]);
2446 result
= tu_bo_list_merge(&cmd
->bo_list
, &secondary
->bo_list
);
2447 if (result
!= VK_SUCCESS
) {
2448 cmd
->record_result
= result
;
2452 if (secondary
->usage_flags
&
2453 VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT
) {
2454 assert(tu_cs_is_empty(&secondary
->cs
));
2456 result
= tu_cs_add_entries(&cmd
->draw_cs
, &secondary
->draw_cs
);
2457 if (result
!= VK_SUCCESS
) {
2458 cmd
->record_result
= result
;
2462 result
= tu_cs_add_entries(&cmd
->draw_epilogue_cs
,
2463 &secondary
->draw_epilogue_cs
);
2464 if (result
!= VK_SUCCESS
) {
2465 cmd
->record_result
= result
;
2469 assert(tu_cs_is_empty(&secondary
->draw_cs
));
2470 assert(tu_cs_is_empty(&secondary
->draw_epilogue_cs
));
2472 for (uint32_t j
= 0; j
< secondary
->cs
.bo_count
; j
++) {
2473 tu_bo_list_add(&cmd
->bo_list
, secondary
->cs
.bos
[j
],
2474 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_DUMP
);
2477 tu_cs_emit_call(&cmd
->cs
, &secondary
->cs
);
2480 cmd
->state
.dirty
= ~0u; /* TODO: set dirty only what needs to be */
2484 tu_CreateCommandPool(VkDevice _device
,
2485 const VkCommandPoolCreateInfo
*pCreateInfo
,
2486 const VkAllocationCallbacks
*pAllocator
,
2487 VkCommandPool
*pCmdPool
)
2489 TU_FROM_HANDLE(tu_device
, device
, _device
);
2490 struct tu_cmd_pool
*pool
;
2492 pool
= vk_alloc2(&device
->alloc
, pAllocator
, sizeof(*pool
), 8,
2493 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
2495 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
2498 pool
->alloc
= *pAllocator
;
2500 pool
->alloc
= device
->alloc
;
2502 list_inithead(&pool
->cmd_buffers
);
2503 list_inithead(&pool
->free_cmd_buffers
);
2505 pool
->queue_family_index
= pCreateInfo
->queueFamilyIndex
;
2507 *pCmdPool
= tu_cmd_pool_to_handle(pool
);
2513 tu_DestroyCommandPool(VkDevice _device
,
2514 VkCommandPool commandPool
,
2515 const VkAllocationCallbacks
*pAllocator
)
2517 TU_FROM_HANDLE(tu_device
, device
, _device
);
2518 TU_FROM_HANDLE(tu_cmd_pool
, pool
, commandPool
);
2523 list_for_each_entry_safe(struct tu_cmd_buffer
, cmd_buffer
,
2524 &pool
->cmd_buffers
, pool_link
)
2526 tu_cmd_buffer_destroy(cmd_buffer
);
2529 list_for_each_entry_safe(struct tu_cmd_buffer
, cmd_buffer
,
2530 &pool
->free_cmd_buffers
, pool_link
)
2532 tu_cmd_buffer_destroy(cmd_buffer
);
2535 vk_free2(&device
->alloc
, pAllocator
, pool
);
2539 tu_ResetCommandPool(VkDevice device
,
2540 VkCommandPool commandPool
,
2541 VkCommandPoolResetFlags flags
)
2543 TU_FROM_HANDLE(tu_cmd_pool
, pool
, commandPool
);
2546 list_for_each_entry(struct tu_cmd_buffer
, cmd_buffer
, &pool
->cmd_buffers
,
2549 result
= tu_reset_cmd_buffer(cmd_buffer
);
2550 if (result
!= VK_SUCCESS
)
2558 tu_TrimCommandPool(VkDevice device
,
2559 VkCommandPool commandPool
,
2560 VkCommandPoolTrimFlags flags
)
2562 TU_FROM_HANDLE(tu_cmd_pool
, pool
, commandPool
);
2567 list_for_each_entry_safe(struct tu_cmd_buffer
, cmd_buffer
,
2568 &pool
->free_cmd_buffers
, pool_link
)
2570 tu_cmd_buffer_destroy(cmd_buffer
);
2575 tu_CmdBeginRenderPass(VkCommandBuffer commandBuffer
,
2576 const VkRenderPassBeginInfo
*pRenderPassBegin
,
2577 VkSubpassContents contents
)
2579 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2580 TU_FROM_HANDLE(tu_render_pass
, pass
, pRenderPassBegin
->renderPass
);
2581 TU_FROM_HANDLE(tu_framebuffer
, fb
, pRenderPassBegin
->framebuffer
);
2583 cmd
->state
.pass
= pass
;
2584 cmd
->state
.subpass
= pass
->subpasses
;
2585 cmd
->state
.framebuffer
= fb
;
2587 tu_cmd_update_tiling_config(cmd
, &pRenderPassBegin
->renderArea
);
2588 tu_cmd_prepare_tile_store_ib(cmd
);
2590 tu_emit_load_clear(cmd
, pRenderPassBegin
);
2592 tu6_emit_zs(cmd
, cmd
->state
.subpass
, &cmd
->draw_cs
);
2593 tu6_emit_mrt(cmd
, cmd
->state
.subpass
, &cmd
->draw_cs
);
2594 tu6_emit_msaa(cmd
, cmd
->state
.subpass
, &cmd
->draw_cs
);
2595 tu6_emit_render_cntl(cmd
, cmd
->state
.subpass
, &cmd
->draw_cs
, false);
2597 /* note: use_hw_binning only checks tiling config */
2598 if (use_hw_binning(cmd
))
2599 cmd
->use_vsc_data
= true;
2601 for (uint32_t i
= 0; i
< fb
->attachment_count
; ++i
) {
2602 const struct tu_image_view
*iview
= fb
->attachments
[i
].attachment
;
2603 tu_bo_list_add(&cmd
->bo_list
, iview
->image
->bo
,
2604 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_WRITE
);
2609 tu_CmdBeginRenderPass2(VkCommandBuffer commandBuffer
,
2610 const VkRenderPassBeginInfo
*pRenderPassBeginInfo
,
2611 const VkSubpassBeginInfoKHR
*pSubpassBeginInfo
)
2613 tu_CmdBeginRenderPass(commandBuffer
, pRenderPassBeginInfo
,
2614 pSubpassBeginInfo
->contents
);
2618 tu_CmdNextSubpass(VkCommandBuffer commandBuffer
, VkSubpassContents contents
)
2620 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2621 const struct tu_render_pass
*pass
= cmd
->state
.pass
;
2622 struct tu_cs
*cs
= &cmd
->draw_cs
;
2624 const struct tu_subpass
*subpass
= cmd
->state
.subpass
++;
2626 * if msaa samples change between subpasses,
2627 * attachment store is broken for some attachments
2629 if (subpass
->resolve_attachments
) {
2630 tu6_emit_blit_scissor(cmd
, cs
, true);
2631 for (unsigned i
= 0; i
< subpass
->color_count
; i
++) {
2632 uint32_t a
= subpass
->resolve_attachments
[i
].attachment
;
2633 if (a
!= VK_ATTACHMENT_UNUSED
) {
2634 tu6_emit_resolve(cmd
, cs
, a
,
2635 subpass
->color_attachments
[i
].attachment
);
2640 /* invalidate because reading input attachments will cache GMEM and
2641 * the cache isn''t updated when GMEM is written
2642 * TODO: is there a no-cache bit for textures?
2644 if (cmd
->state
.subpass
->input_count
)
2645 tu6_emit_event_write(cmd
, cs
, CACHE_INVALIDATE
, false);
2647 /* emit mrt/zs/msaa/ubwc state for the subpass that is starting */
2648 tu6_emit_zs(cmd
, cmd
->state
.subpass
, cs
);
2649 tu6_emit_mrt(cmd
, cmd
->state
.subpass
, cs
);
2650 tu6_emit_msaa(cmd
, cmd
->state
.subpass
, cs
);
2651 tu6_emit_render_cntl(cmd
, cmd
->state
.subpass
, cs
, false);
2653 /* Emit flushes so that input attachments will read the correct value. This
2654 * is for sysmem only, although it shouldn't do much harm on gmem.
2656 tu6_emit_event_write(cmd
, cs
, PC_CCU_FLUSH_COLOR_TS
, true);
2657 tu6_emit_event_write(cmd
, cs
, PC_CCU_FLUSH_DEPTH_TS
, true);
2660 * since we don't know how to do GMEM->GMEM resolve,
2661 * resolve attachments are resolved to memory then loaded to GMEM again if needed
2663 if (subpass
->resolve_attachments
) {
2664 for (unsigned i
= 0; i
< subpass
->color_count
; i
++) {
2665 uint32_t a
= subpass
->resolve_attachments
[i
].attachment
;
2666 if (a
!= VK_ATTACHMENT_UNUSED
&& pass
->attachments
[a
].gmem_offset
>= 0) {
2667 tu_finishme("missing GMEM->GMEM resolve, performance will suffer\n");
2668 tu6_emit_predicated_blit(cmd
, cs
, a
, a
, false);
2675 tu_CmdNextSubpass2(VkCommandBuffer commandBuffer
,
2676 const VkSubpassBeginInfoKHR
*pSubpassBeginInfo
,
2677 const VkSubpassEndInfoKHR
*pSubpassEndInfo
)
2679 tu_CmdNextSubpass(commandBuffer
, pSubpassBeginInfo
->contents
);
2685 * Number of vertices.
2690 * Index of the first vertex.
2692 int32_t vertex_offset
;
2695 * First instance id.
2697 uint32_t first_instance
;
2700 * Number of instances.
2702 uint32_t instance_count
;
2705 * First index (indexed draws only).
2707 uint32_t first_index
;
2710 * Whether it's an indexed draw.
2715 * Indirect draw parameters resource.
2717 struct tu_buffer
*indirect
;
2718 uint64_t indirect_offset
;
2722 * Draw count parameters resource.
2724 struct tu_buffer
*count_buffer
;
2725 uint64_t count_buffer_offset
;
2728 * Stream output parameters resource.
2730 struct tu_buffer
*streamout_buffer
;
2731 uint64_t streamout_buffer_offset
;
2734 #define ENABLE_ALL (CP_SET_DRAW_STATE__0_BINNING | CP_SET_DRAW_STATE__0_GMEM | CP_SET_DRAW_STATE__0_SYSMEM)
2735 #define ENABLE_DRAW (CP_SET_DRAW_STATE__0_GMEM | CP_SET_DRAW_STATE__0_SYSMEM)
2737 enum tu_draw_state_group_id
2739 TU_DRAW_STATE_PROGRAM
,
2740 TU_DRAW_STATE_PROGRAM_BINNING
,
2742 TU_DRAW_STATE_VI_BINNING
,
2746 TU_DRAW_STATE_BLEND
,
2747 TU_DRAW_STATE_VS_CONST
,
2748 TU_DRAW_STATE_FS_CONST
,
2749 TU_DRAW_STATE_VS_TEX
,
2750 TU_DRAW_STATE_FS_TEX_SYSMEM
,
2751 TU_DRAW_STATE_FS_TEX_GMEM
,
2752 TU_DRAW_STATE_FS_IBO
,
2753 TU_DRAW_STATE_VS_PARAMS
,
2755 TU_DRAW_STATE_COUNT
,
2758 struct tu_draw_state_group
2760 enum tu_draw_state_group_id id
;
2761 uint32_t enable_mask
;
2762 struct tu_cs_entry ib
;
2766 sampler_ptr(struct tu_descriptor_state
*descriptors_state
,
2767 const struct tu_descriptor_map
*map
, unsigned i
,
2768 unsigned array_index
)
2770 assert(descriptors_state
->valid
& (1 << map
->set
[i
]));
2772 struct tu_descriptor_set
*set
= descriptors_state
->sets
[map
->set
[i
]];
2773 assert(map
->binding
[i
] < set
->layout
->binding_count
);
2775 const struct tu_descriptor_set_binding_layout
*layout
=
2776 &set
->layout
->binding
[map
->binding
[i
]];
2778 if (layout
->immutable_samplers_offset
) {
2779 const uint32_t *immutable_samplers
=
2780 tu_immutable_samplers(set
->layout
, layout
);
2782 return &immutable_samplers
[array_index
* A6XX_TEX_SAMP_DWORDS
];
2785 switch (layout
->type
) {
2786 case VK_DESCRIPTOR_TYPE_SAMPLER
:
2787 return &set
->mapped_ptr
[layout
->offset
/ 4];
2788 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
2789 return &set
->mapped_ptr
[layout
->offset
/ 4 + A6XX_TEX_CONST_DWORDS
+
2790 array_index
* (A6XX_TEX_CONST_DWORDS
+ A6XX_TEX_SAMP_DWORDS
)];
2792 unreachable("unimplemented descriptor type");
2798 write_tex_const(struct tu_cmd_buffer
*cmd
,
2800 struct tu_descriptor_state
*descriptors_state
,
2801 const struct tu_descriptor_map
*map
,
2802 unsigned i
, unsigned array_index
, bool is_sysmem
)
2804 assert(descriptors_state
->valid
& (1 << map
->set
[i
]));
2806 struct tu_descriptor_set
*set
= descriptors_state
->sets
[map
->set
[i
]];
2807 assert(map
->binding
[i
] < set
->layout
->binding_count
);
2809 const struct tu_descriptor_set_binding_layout
*layout
=
2810 &set
->layout
->binding
[map
->binding
[i
]];
2812 switch (layout
->type
) {
2813 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
:
2814 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
:
2815 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER
:
2816 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT
:
2817 memcpy(dst
, &set
->mapped_ptr
[layout
->offset
/ 4 +
2818 array_index
* A6XX_TEX_CONST_DWORDS
],
2819 A6XX_TEX_CONST_DWORDS
* 4);
2821 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
2822 memcpy(dst
, &set
->mapped_ptr
[layout
->offset
/ 4 +
2824 (A6XX_TEX_CONST_DWORDS
+
2825 A6XX_TEX_SAMP_DWORDS
)],
2826 A6XX_TEX_CONST_DWORDS
* 4);
2829 unreachable("unimplemented descriptor type");
2833 if (layout
->type
== VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT
&& !is_sysmem
) {
2834 const struct tu_tiling_config
*tiling
= &cmd
->state
.tiling_config
;
2835 uint32_t a
= cmd
->state
.subpass
->input_attachments
[map
->value
[i
] +
2836 array_index
].attachment
;
2837 const struct tu_render_pass_attachment
*att
= &cmd
->state
.pass
->attachments
[a
];
2839 assert(att
->gmem_offset
>= 0);
2841 dst
[0] &= ~(A6XX_TEX_CONST_0_SWAP__MASK
| A6XX_TEX_CONST_0_TILE_MODE__MASK
);
2842 dst
[0] |= A6XX_TEX_CONST_0_TILE_MODE(TILE6_2
);
2843 dst
[2] &= ~(A6XX_TEX_CONST_2_TYPE__MASK
| A6XX_TEX_CONST_2_PITCH__MASK
);
2845 A6XX_TEX_CONST_2_TYPE(A6XX_TEX_2D
) |
2846 A6XX_TEX_CONST_2_PITCH(tiling
->tile0
.extent
.width
* att
->cpp
);
2848 dst
[4] = cmd
->device
->physical_device
->gmem_base
+ att
->gmem_offset
;
2849 dst
[5] = A6XX_TEX_CONST_5_DEPTH(1);
2850 for (unsigned i
= 6; i
< A6XX_TEX_CONST_DWORDS
; i
++)
2853 if (cmd
->level
== VK_COMMAND_BUFFER_LEVEL_SECONDARY
)
2854 tu_finishme("patch input attachment pitch for secondary cmd buffer");
2859 write_image_ibo(struct tu_cmd_buffer
*cmd
,
2861 struct tu_descriptor_state
*descriptors_state
,
2862 const struct tu_descriptor_map
*map
,
2863 unsigned i
, unsigned array_index
)
2865 assert(descriptors_state
->valid
& (1 << map
->set
[i
]));
2867 struct tu_descriptor_set
*set
= descriptors_state
->sets
[map
->set
[i
]];
2868 assert(map
->binding
[i
] < set
->layout
->binding_count
);
2870 const struct tu_descriptor_set_binding_layout
*layout
=
2871 &set
->layout
->binding
[map
->binding
[i
]];
2873 assert(layout
->type
== VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
);
2875 memcpy(dst
, &set
->mapped_ptr
[layout
->offset
/ 4 +
2876 (array_index
* 2 + 1) * A6XX_TEX_CONST_DWORDS
],
2877 A6XX_TEX_CONST_DWORDS
* 4);
2881 buffer_ptr(struct tu_descriptor_state
*descriptors_state
,
2882 const struct tu_descriptor_map
*map
,
2883 unsigned i
, unsigned array_index
)
2885 assert(descriptors_state
->valid
& (1 << map
->set
[i
]));
2887 struct tu_descriptor_set
*set
= descriptors_state
->sets
[map
->set
[i
]];
2888 assert(map
->binding
[i
] < set
->layout
->binding_count
);
2890 const struct tu_descriptor_set_binding_layout
*layout
=
2891 &set
->layout
->binding
[map
->binding
[i
]];
2893 switch (layout
->type
) {
2894 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
2895 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
2896 return descriptors_state
->dynamic_buffers
[layout
->dynamic_offset_offset
+
2898 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
2899 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
2900 return (uint64_t) set
->mapped_ptr
[layout
->offset
/ 4 + array_index
* 2 + 1] << 32 |
2901 set
->mapped_ptr
[layout
->offset
/ 4 + array_index
* 2];
2903 unreachable("unimplemented descriptor type");
2908 static inline uint32_t
2909 tu6_stage2opcode(gl_shader_stage type
)
2912 case MESA_SHADER_VERTEX
:
2913 case MESA_SHADER_TESS_CTRL
:
2914 case MESA_SHADER_TESS_EVAL
:
2915 case MESA_SHADER_GEOMETRY
:
2916 return CP_LOAD_STATE6_GEOM
;
2917 case MESA_SHADER_FRAGMENT
:
2918 case MESA_SHADER_COMPUTE
:
2919 case MESA_SHADER_KERNEL
:
2920 return CP_LOAD_STATE6_FRAG
;
2922 unreachable("bad shader type");
2926 static inline enum a6xx_state_block
2927 tu6_stage2shadersb(gl_shader_stage type
)
2930 case MESA_SHADER_VERTEX
:
2931 return SB6_VS_SHADER
;
2932 case MESA_SHADER_FRAGMENT
:
2933 return SB6_FS_SHADER
;
2934 case MESA_SHADER_COMPUTE
:
2935 case MESA_SHADER_KERNEL
:
2936 return SB6_CS_SHADER
;
2938 unreachable("bad shader type");
2944 tu6_emit_user_consts(struct tu_cs
*cs
, const struct tu_pipeline
*pipeline
,
2945 struct tu_descriptor_state
*descriptors_state
,
2946 gl_shader_stage type
,
2947 uint32_t *push_constants
)
2949 const struct tu_program_descriptor_linkage
*link
=
2950 &pipeline
->program
.link
[type
];
2951 const struct ir3_ubo_analysis_state
*state
= &link
->ubo_state
;
2953 for (uint32_t i
= 0; i
< ARRAY_SIZE(state
->range
); i
++) {
2954 if (state
->range
[i
].start
< state
->range
[i
].end
) {
2955 uint32_t size
= state
->range
[i
].end
- state
->range
[i
].start
;
2956 uint32_t offset
= state
->range
[i
].start
;
2958 /* and even if the start of the const buffer is before
2959 * first_immediate, the end may not be:
2961 size
= MIN2(size
, (16 * link
->constlen
) - state
->range
[i
].offset
);
2966 /* things should be aligned to vec4: */
2967 debug_assert((state
->range
[i
].offset
% 16) == 0);
2968 debug_assert((size
% 16) == 0);
2969 debug_assert((offset
% 16) == 0);
2972 /* push constants */
2973 tu_cs_emit_pkt7(cs
, tu6_stage2opcode(type
), 3 + (size
/ 4));
2974 tu_cs_emit(cs
, CP_LOAD_STATE6_0_DST_OFF(state
->range
[i
].offset
/ 16) |
2975 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS
) |
2976 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT
) |
2977 CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type
)) |
2978 CP_LOAD_STATE6_0_NUM_UNIT(size
/ 16));
2981 for (unsigned i
= 0; i
< size
/ 4; i
++)
2982 tu_cs_emit(cs
, push_constants
[i
+ offset
/ 4]);
2986 /* Look through the UBO map to find our UBO index, and get the VA for
2990 uint32_t ubo_idx
= i
- 1;
2991 uint32_t ubo_map_base
= 0;
2992 for (int j
= 0; j
< link
->ubo_map
.num
; j
++) {
2993 if (ubo_idx
>= ubo_map_base
&&
2994 ubo_idx
< ubo_map_base
+ link
->ubo_map
.array_size
[j
]) {
2995 va
= buffer_ptr(descriptors_state
, &link
->ubo_map
, j
,
2996 ubo_idx
- ubo_map_base
);
2999 ubo_map_base
+= link
->ubo_map
.array_size
[j
];
3003 tu_cs_emit_pkt7(cs
, tu6_stage2opcode(type
), 3);
3004 tu_cs_emit(cs
, CP_LOAD_STATE6_0_DST_OFF(state
->range
[i
].offset
/ 16) |
3005 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS
) |
3006 CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT
) |
3007 CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type
)) |
3008 CP_LOAD_STATE6_0_NUM_UNIT(size
/ 16));
3009 tu_cs_emit_qw(cs
, va
+ offset
);
3015 tu6_emit_ubos(struct tu_cs
*cs
, const struct tu_pipeline
*pipeline
,
3016 struct tu_descriptor_state
*descriptors_state
,
3017 gl_shader_stage type
)
3019 const struct tu_program_descriptor_linkage
*link
=
3020 &pipeline
->program
.link
[type
];
3022 uint32_t num
= MIN2(link
->ubo_map
.num_desc
, link
->const_state
.num_ubos
);
3023 uint32_t anum
= align(num
, 2);
3028 tu_cs_emit_pkt7(cs
, tu6_stage2opcode(type
), 3 + (2 * anum
));
3029 tu_cs_emit(cs
, CP_LOAD_STATE6_0_DST_OFF(link
->const_state
.offsets
.ubo
) |
3030 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS
) |
3031 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT
) |
3032 CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type
)) |
3033 CP_LOAD_STATE6_0_NUM_UNIT(anum
/2));
3034 tu_cs_emit(cs
, CP_LOAD_STATE6_1_EXT_SRC_ADDR(0));
3035 tu_cs_emit(cs
, CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI(0));
3037 unsigned emitted
= 0;
3038 for (unsigned i
= 0; emitted
< num
&& i
< link
->ubo_map
.num
; i
++) {
3039 for (unsigned j
= 0; emitted
< num
&& j
< link
->ubo_map
.array_size
[i
]; j
++) {
3040 tu_cs_emit_qw(cs
, buffer_ptr(descriptors_state
, &link
->ubo_map
, i
, j
));
3045 for (; emitted
< anum
; emitted
++) {
3046 tu_cs_emit(cs
, 0xffffffff);
3047 tu_cs_emit(cs
, 0xffffffff);
3051 static struct tu_cs_entry
3052 tu6_emit_consts(struct tu_cmd_buffer
*cmd
,
3053 const struct tu_pipeline
*pipeline
,
3054 struct tu_descriptor_state
*descriptors_state
,
3055 gl_shader_stage type
)
3058 tu_cs_begin_sub_stream(&cmd
->sub_cs
, 512, &cs
); /* TODO: maximum size? */
3060 tu6_emit_user_consts(&cs
, pipeline
, descriptors_state
, type
, cmd
->push_constants
);
3061 tu6_emit_ubos(&cs
, pipeline
, descriptors_state
, type
);
3063 return tu_cs_end_sub_stream(&cmd
->sub_cs
, &cs
);
3067 tu6_emit_vs_params(struct tu_cmd_buffer
*cmd
,
3068 const struct tu_draw_info
*draw
,
3069 struct tu_cs_entry
*entry
)
3071 /* TODO: fill out more than just base instance */
3072 const struct tu_program_descriptor_linkage
*link
=
3073 &cmd
->state
.pipeline
->program
.link
[MESA_SHADER_VERTEX
];
3074 const struct ir3_const_state
*const_state
= &link
->const_state
;
3077 if (const_state
->offsets
.driver_param
>= link
->constlen
) {
3078 *entry
= (struct tu_cs_entry
) {};
3082 VkResult result
= tu_cs_begin_sub_stream(&cmd
->sub_cs
, 8, &cs
);
3083 if (result
!= VK_SUCCESS
)
3086 tu_cs_emit_pkt7(&cs
, CP_LOAD_STATE6_GEOM
, 3 + 4);
3087 tu_cs_emit(&cs
, CP_LOAD_STATE6_0_DST_OFF(const_state
->offsets
.driver_param
) |
3088 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS
) |
3089 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT
) |
3090 CP_LOAD_STATE6_0_STATE_BLOCK(SB6_VS_SHADER
) |
3091 CP_LOAD_STATE6_0_NUM_UNIT(1));
3095 STATIC_ASSERT(IR3_DP_INSTID_BASE
== 2);
3099 tu_cs_emit(&cs
, draw
->first_instance
);
3102 *entry
= tu_cs_end_sub_stream(&cmd
->sub_cs
, &cs
);
3107 tu6_emit_textures(struct tu_cmd_buffer
*cmd
,
3108 const struct tu_pipeline
*pipeline
,
3109 struct tu_descriptor_state
*descriptors_state
,
3110 gl_shader_stage type
,
3111 struct tu_cs_entry
*entry
,
3114 struct tu_cs
*draw_state
= &cmd
->sub_cs
;
3115 const struct tu_program_descriptor_linkage
*link
=
3116 &pipeline
->program
.link
[type
];
3119 if (link
->texture_map
.num_desc
== 0 && link
->sampler_map
.num_desc
== 0) {
3120 *entry
= (struct tu_cs_entry
) {};
3124 /* allocate and fill texture state */
3125 struct ts_cs_memory tex_const
;
3126 result
= tu_cs_alloc(draw_state
, link
->texture_map
.num_desc
,
3127 A6XX_TEX_CONST_DWORDS
, &tex_const
);
3128 if (result
!= VK_SUCCESS
)
3132 for (unsigned i
= 0; i
< link
->texture_map
.num
; i
++) {
3133 for (int j
= 0; j
< link
->texture_map
.array_size
[i
]; j
++) {
3134 write_tex_const(cmd
,
3135 &tex_const
.map
[A6XX_TEX_CONST_DWORDS
* tex_index
++],
3136 descriptors_state
, &link
->texture_map
, i
, j
,
3141 /* allocate and fill sampler state */
3142 struct ts_cs_memory tex_samp
= { 0 };
3143 if (link
->sampler_map
.num_desc
) {
3144 result
= tu_cs_alloc(draw_state
, link
->sampler_map
.num_desc
,
3145 A6XX_TEX_SAMP_DWORDS
, &tex_samp
);
3146 if (result
!= VK_SUCCESS
)
3149 int sampler_index
= 0;
3150 for (unsigned i
= 0; i
< link
->sampler_map
.num
; i
++) {
3151 for (int j
= 0; j
< link
->sampler_map
.array_size
[i
]; j
++) {
3152 const uint32_t *sampler
= sampler_ptr(descriptors_state
,
3155 memcpy(&tex_samp
.map
[A6XX_TEX_SAMP_DWORDS
* sampler_index
++],
3156 sampler
, A6XX_TEX_SAMP_DWORDS
* 4);
3161 unsigned tex_samp_reg
, tex_const_reg
, tex_count_reg
;
3162 enum a6xx_state_block sb
;
3165 case MESA_SHADER_VERTEX
:
3167 tex_samp_reg
= REG_A6XX_SP_VS_TEX_SAMP_LO
;
3168 tex_const_reg
= REG_A6XX_SP_VS_TEX_CONST_LO
;
3169 tex_count_reg
= REG_A6XX_SP_VS_TEX_COUNT
;
3171 case MESA_SHADER_FRAGMENT
:
3173 tex_samp_reg
= REG_A6XX_SP_FS_TEX_SAMP_LO
;
3174 tex_const_reg
= REG_A6XX_SP_FS_TEX_CONST_LO
;
3175 tex_count_reg
= REG_A6XX_SP_FS_TEX_COUNT
;
3177 case MESA_SHADER_COMPUTE
:
3179 tex_samp_reg
= REG_A6XX_SP_CS_TEX_SAMP_LO
;
3180 tex_const_reg
= REG_A6XX_SP_CS_TEX_CONST_LO
;
3181 tex_count_reg
= REG_A6XX_SP_CS_TEX_COUNT
;
3184 unreachable("bad state block");
3188 result
= tu_cs_begin_sub_stream(draw_state
, 16, &cs
);
3189 if (result
!= VK_SUCCESS
)
3192 if (link
->sampler_map
.num_desc
) {
3193 /* output sampler state: */
3194 tu_cs_emit_pkt7(&cs
, tu6_stage2opcode(type
), 3);
3195 tu_cs_emit(&cs
, CP_LOAD_STATE6_0_DST_OFF(0) |
3196 CP_LOAD_STATE6_0_STATE_TYPE(ST6_SHADER
) |
3197 CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT
) |
3198 CP_LOAD_STATE6_0_STATE_BLOCK(sb
) |
3199 CP_LOAD_STATE6_0_NUM_UNIT(link
->sampler_map
.num_desc
));
3200 tu_cs_emit_qw(&cs
, tex_samp
.iova
); /* SRC_ADDR_LO/HI */
3202 tu_cs_emit_pkt4(&cs
, tex_samp_reg
, 2);
3203 tu_cs_emit_qw(&cs
, tex_samp
.iova
); /* SRC_ADDR_LO/HI */
3206 /* emit texture state: */
3207 tu_cs_emit_pkt7(&cs
, tu6_stage2opcode(type
), 3);
3208 tu_cs_emit(&cs
, CP_LOAD_STATE6_0_DST_OFF(0) |
3209 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS
) |
3210 CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT
) |
3211 CP_LOAD_STATE6_0_STATE_BLOCK(sb
) |
3212 CP_LOAD_STATE6_0_NUM_UNIT(link
->texture_map
.num_desc
));
3213 tu_cs_emit_qw(&cs
, tex_const
.iova
); /* SRC_ADDR_LO/HI */
3215 tu_cs_emit_pkt4(&cs
, tex_const_reg
, 2);
3216 tu_cs_emit_qw(&cs
, tex_const
.iova
); /* SRC_ADDR_LO/HI */
3218 tu_cs_emit_pkt4(&cs
, tex_count_reg
, 1);
3219 tu_cs_emit(&cs
, link
->texture_map
.num_desc
);
3221 *entry
= tu_cs_end_sub_stream(draw_state
, &cs
);
3226 tu6_emit_ibo(struct tu_cmd_buffer
*cmd
,
3227 const struct tu_pipeline
*pipeline
,
3228 struct tu_descriptor_state
*descriptors_state
,
3229 gl_shader_stage type
,
3230 struct tu_cs_entry
*entry
)
3232 struct tu_cs
*draw_state
= &cmd
->sub_cs
;
3233 const struct tu_program_descriptor_linkage
*link
=
3234 &pipeline
->program
.link
[type
];
3237 unsigned num_desc
= link
->ssbo_map
.num_desc
+ link
->image_map
.num_desc
;
3239 if (num_desc
== 0) {
3240 *entry
= (struct tu_cs_entry
) {};
3244 struct ts_cs_memory ibo_const
;
3245 result
= tu_cs_alloc(draw_state
, num_desc
,
3246 A6XX_TEX_CONST_DWORDS
, &ibo_const
);
3247 if (result
!= VK_SUCCESS
)
3251 for (unsigned i
= 0; i
< link
->ssbo_map
.num
; i
++) {
3252 for (int j
= 0; j
< link
->ssbo_map
.array_size
[i
]; j
++) {
3253 uint32_t *dst
= &ibo_const
.map
[A6XX_TEX_CONST_DWORDS
* ssbo_index
];
3255 uint64_t va
= buffer_ptr(descriptors_state
, &link
->ssbo_map
, i
, j
);
3256 /* We don't expose robustBufferAccess, so leave the size unlimited. */
3257 uint32_t sz
= MAX_STORAGE_BUFFER_RANGE
/ 4;
3259 dst
[0] = A6XX_IBO_0_FMT(FMT6_32_UINT
);
3260 dst
[1] = A6XX_IBO_1_WIDTH(sz
& MASK(15)) |
3261 A6XX_IBO_1_HEIGHT(sz
>> 15);
3262 dst
[2] = A6XX_IBO_2_UNK4
|
3264 A6XX_IBO_2_TYPE(A6XX_TEX_1D
);
3268 for (int i
= 6; i
< A6XX_TEX_CONST_DWORDS
; i
++)
3275 for (unsigned i
= 0; i
< link
->image_map
.num
; i
++) {
3276 for (int j
= 0; j
< link
->image_map
.array_size
[i
]; j
++) {
3277 uint32_t *dst
= &ibo_const
.map
[A6XX_TEX_CONST_DWORDS
* ssbo_index
];
3279 write_image_ibo(cmd
, dst
,
3280 descriptors_state
, &link
->image_map
, i
, j
);
3286 assert(ssbo_index
== num_desc
);
3289 result
= tu_cs_begin_sub_stream(draw_state
, 7, &cs
);
3290 if (result
!= VK_SUCCESS
)
3293 uint32_t opcode
, ibo_addr_reg
;
3294 enum a6xx_state_block sb
;
3295 enum a6xx_state_type st
;
3298 case MESA_SHADER_FRAGMENT
:
3299 opcode
= CP_LOAD_STATE6
;
3302 ibo_addr_reg
= REG_A6XX_SP_IBO_LO
;
3304 case MESA_SHADER_COMPUTE
:
3305 opcode
= CP_LOAD_STATE6_FRAG
;
3308 ibo_addr_reg
= REG_A6XX_SP_CS_IBO_LO
;
3311 unreachable("unsupported stage for ibos");
3314 /* emit texture state: */
3315 tu_cs_emit_pkt7(&cs
, opcode
, 3);
3316 tu_cs_emit(&cs
, CP_LOAD_STATE6_0_DST_OFF(0) |
3317 CP_LOAD_STATE6_0_STATE_TYPE(st
) |
3318 CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT
) |
3319 CP_LOAD_STATE6_0_STATE_BLOCK(sb
) |
3320 CP_LOAD_STATE6_0_NUM_UNIT(num_desc
));
3321 tu_cs_emit_qw(&cs
, ibo_const
.iova
); /* SRC_ADDR_LO/HI */
3323 tu_cs_emit_pkt4(&cs
, ibo_addr_reg
, 2);
3324 tu_cs_emit_qw(&cs
, ibo_const
.iova
); /* SRC_ADDR_LO/HI */
3326 *entry
= tu_cs_end_sub_stream(draw_state
, &cs
);
3331 tu6_emit_streamout(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
3333 struct tu_streamout_state
*tf
= &cmd
->state
.pipeline
->streamout
;
3335 for (unsigned i
= 0; i
< IR3_MAX_SO_BUFFERS
; i
++) {
3336 struct tu_buffer
*buf
= cmd
->state
.streamout_buf
.buffers
[i
];
3341 offset
= cmd
->state
.streamout_buf
.offsets
[i
];
3343 tu_cs_emit_regs(cs
, A6XX_VPC_SO_BUFFER_BASE(i
, .bo
= buf
->bo
,
3344 .bo_offset
= buf
->bo_offset
));
3345 tu_cs_emit_regs(cs
, A6XX_VPC_SO_BUFFER_SIZE(i
, buf
->size
));
3347 if (cmd
->state
.streamout_reset
& (1 << i
)) {
3348 offset
*= tf
->stride
[i
];
3350 tu_cs_emit_regs(cs
, A6XX_VPC_SO_BUFFER_OFFSET(i
, offset
));
3351 cmd
->state
.streamout_reset
&= ~(1 << i
);
3353 tu_cs_emit_pkt7(cs
, CP_MEM_TO_REG
, 3);
3354 tu_cs_emit(cs
, CP_MEM_TO_REG_0_REG(REG_A6XX_VPC_SO_BUFFER_OFFSET(i
)) |
3355 CP_MEM_TO_REG_0_SHIFT_BY_2
| CP_MEM_TO_REG_0_UNK31
|
3356 CP_MEM_TO_REG_0_CNT(0));
3357 tu_cs_emit_qw(cs
, cmd
->scratch_bo
.iova
+
3358 ctrl_offset(flush_base
[i
].offset
));
3361 tu_cs_emit_regs(cs
, A6XX_VPC_SO_FLUSH_BASE(i
, .bo
= &cmd
->scratch_bo
,
3363 ctrl_offset(flush_base
[i
])));
3366 if (cmd
->state
.streamout_enabled
) {
3367 tu_cs_emit_pkt7(cs
, CP_CONTEXT_REG_BUNCH
, 12 + (2 * tf
->prog_count
));
3368 tu_cs_emit(cs
, REG_A6XX_VPC_SO_BUF_CNTL
);
3369 tu_cs_emit(cs
, tf
->vpc_so_buf_cntl
);
3370 tu_cs_emit(cs
, REG_A6XX_VPC_SO_NCOMP(0));
3371 tu_cs_emit(cs
, tf
->ncomp
[0]);
3372 tu_cs_emit(cs
, REG_A6XX_VPC_SO_NCOMP(1));
3373 tu_cs_emit(cs
, tf
->ncomp
[1]);
3374 tu_cs_emit(cs
, REG_A6XX_VPC_SO_NCOMP(2));
3375 tu_cs_emit(cs
, tf
->ncomp
[2]);
3376 tu_cs_emit(cs
, REG_A6XX_VPC_SO_NCOMP(3));
3377 tu_cs_emit(cs
, tf
->ncomp
[3]);
3378 tu_cs_emit(cs
, REG_A6XX_VPC_SO_CNTL
);
3379 tu_cs_emit(cs
, A6XX_VPC_SO_CNTL_ENABLE
);
3380 for (unsigned i
= 0; i
< tf
->prog_count
; i
++) {
3381 tu_cs_emit(cs
, REG_A6XX_VPC_SO_PROG
);
3382 tu_cs_emit(cs
, tf
->prog
[i
]);
3385 tu_cs_emit_pkt7(cs
, CP_CONTEXT_REG_BUNCH
, 4);
3386 tu_cs_emit(cs
, REG_A6XX_VPC_SO_CNTL
);
3388 tu_cs_emit(cs
, REG_A6XX_VPC_SO_BUF_CNTL
);
3394 tu6_bind_draw_states(struct tu_cmd_buffer
*cmd
,
3396 const struct tu_draw_info
*draw
)
3398 const struct tu_pipeline
*pipeline
= cmd
->state
.pipeline
;
3399 const struct tu_dynamic_state
*dynamic
= &cmd
->state
.dynamic
;
3400 struct tu_draw_state_group draw_state_groups
[TU_DRAW_STATE_COUNT
];
3401 uint32_t draw_state_group_count
= 0;
3404 struct tu_descriptor_state
*descriptors_state
=
3405 &cmd
->descriptors
[VK_PIPELINE_BIND_POINT_GRAPHICS
];
3410 A6XX_PC_PRIMITIVE_CNTL_0(.primitive_restart
=
3411 pipeline
->ia
.primitive_restart
&& draw
->indexed
));
3413 if (cmd
->state
.dirty
&
3414 (TU_CMD_DIRTY_PIPELINE
| TU_CMD_DIRTY_DYNAMIC_LINE_WIDTH
) &&
3415 (pipeline
->dynamic_state
.mask
& TU_DYNAMIC_LINE_WIDTH
)) {
3416 tu6_emit_gras_su_cntl(cs
, pipeline
->rast
.gras_su_cntl
,
3417 dynamic
->line_width
);
3420 if ((cmd
->state
.dirty
& TU_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK
) &&
3421 (pipeline
->dynamic_state
.mask
& TU_DYNAMIC_STENCIL_COMPARE_MASK
)) {
3422 tu6_emit_stencil_compare_mask(cs
, dynamic
->stencil_compare_mask
.front
,
3423 dynamic
->stencil_compare_mask
.back
);
3426 if ((cmd
->state
.dirty
& TU_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK
) &&
3427 (pipeline
->dynamic_state
.mask
& TU_DYNAMIC_STENCIL_WRITE_MASK
)) {
3428 tu6_emit_stencil_write_mask(cs
, dynamic
->stencil_write_mask
.front
,
3429 dynamic
->stencil_write_mask
.back
);
3432 if ((cmd
->state
.dirty
& TU_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE
) &&
3433 (pipeline
->dynamic_state
.mask
& TU_DYNAMIC_STENCIL_REFERENCE
)) {
3434 tu6_emit_stencil_reference(cs
, dynamic
->stencil_reference
.front
,
3435 dynamic
->stencil_reference
.back
);
3438 if (cmd
->state
.dirty
&
3439 (TU_CMD_DIRTY_PIPELINE
| TU_CMD_DIRTY_VERTEX_BUFFERS
)) {
3440 for (uint32_t i
= 0; i
< pipeline
->vi
.count
; i
++) {
3441 const uint32_t binding
= pipeline
->vi
.bindings
[i
];
3442 const struct tu_buffer
*buf
= cmd
->state
.vb
.buffers
[binding
];
3443 const VkDeviceSize offset
= buf
->bo_offset
+
3444 cmd
->state
.vb
.offsets
[binding
];
3445 const VkDeviceSize size
=
3446 offset
< buf
->size
? buf
->size
- offset
: 0;
3449 A6XX_VFD_FETCH_BASE(i
, .bo
= buf
->bo
, .bo_offset
= offset
),
3450 A6XX_VFD_FETCH_SIZE(i
, size
));
3454 if (cmd
->state
.dirty
& TU_CMD_DIRTY_PIPELINE
) {
3455 draw_state_groups
[draw_state_group_count
++] =
3456 (struct tu_draw_state_group
) {
3457 .id
= TU_DRAW_STATE_PROGRAM
,
3458 .enable_mask
= ENABLE_DRAW
,
3459 .ib
= pipeline
->program
.state_ib
,
3461 draw_state_groups
[draw_state_group_count
++] =
3462 (struct tu_draw_state_group
) {
3463 .id
= TU_DRAW_STATE_PROGRAM_BINNING
,
3464 .enable_mask
= CP_SET_DRAW_STATE__0_BINNING
,
3465 .ib
= pipeline
->program
.binning_state_ib
,
3467 draw_state_groups
[draw_state_group_count
++] =
3468 (struct tu_draw_state_group
) {
3469 .id
= TU_DRAW_STATE_VI
,
3470 .enable_mask
= ENABLE_DRAW
,
3471 .ib
= pipeline
->vi
.state_ib
,
3473 draw_state_groups
[draw_state_group_count
++] =
3474 (struct tu_draw_state_group
) {
3475 .id
= TU_DRAW_STATE_VI_BINNING
,
3476 .enable_mask
= CP_SET_DRAW_STATE__0_BINNING
,
3477 .ib
= pipeline
->vi
.binning_state_ib
,
3479 draw_state_groups
[draw_state_group_count
++] =
3480 (struct tu_draw_state_group
) {
3481 .id
= TU_DRAW_STATE_VP
,
3482 .enable_mask
= ENABLE_ALL
,
3483 .ib
= pipeline
->vp
.state_ib
,
3485 draw_state_groups
[draw_state_group_count
++] =
3486 (struct tu_draw_state_group
) {
3487 .id
= TU_DRAW_STATE_RAST
,
3488 .enable_mask
= ENABLE_ALL
,
3489 .ib
= pipeline
->rast
.state_ib
,
3491 draw_state_groups
[draw_state_group_count
++] =
3492 (struct tu_draw_state_group
) {
3493 .id
= TU_DRAW_STATE_DS
,
3494 .enable_mask
= ENABLE_ALL
,
3495 .ib
= pipeline
->ds
.state_ib
,
3497 draw_state_groups
[draw_state_group_count
++] =
3498 (struct tu_draw_state_group
) {
3499 .id
= TU_DRAW_STATE_BLEND
,
3500 .enable_mask
= ENABLE_ALL
,
3501 .ib
= pipeline
->blend
.state_ib
,
3505 if (cmd
->state
.dirty
&
3506 (TU_CMD_DIRTY_PIPELINE
| TU_CMD_DIRTY_DESCRIPTOR_SETS
| TU_CMD_DIRTY_PUSH_CONSTANTS
)) {
3507 draw_state_groups
[draw_state_group_count
++] =
3508 (struct tu_draw_state_group
) {
3509 .id
= TU_DRAW_STATE_VS_CONST
,
3510 .enable_mask
= ENABLE_ALL
,
3511 .ib
= tu6_emit_consts(cmd
, pipeline
, descriptors_state
, MESA_SHADER_VERTEX
)
3513 draw_state_groups
[draw_state_group_count
++] =
3514 (struct tu_draw_state_group
) {
3515 .id
= TU_DRAW_STATE_FS_CONST
,
3516 .enable_mask
= ENABLE_DRAW
,
3517 .ib
= tu6_emit_consts(cmd
, pipeline
, descriptors_state
, MESA_SHADER_FRAGMENT
)
3521 if (cmd
->state
.dirty
& TU_CMD_DIRTY_STREAMOUT_BUFFERS
)
3522 tu6_emit_streamout(cmd
, cs
);
3524 if (cmd
->state
.dirty
&
3525 (TU_CMD_DIRTY_PIPELINE
| TU_CMD_DIRTY_DESCRIPTOR_SETS
)) {
3526 struct tu_cs_entry vs_tex
, fs_tex_sysmem
, fs_tex_gmem
, fs_ibo
;
3528 result
= tu6_emit_textures(cmd
, pipeline
, descriptors_state
,
3529 MESA_SHADER_VERTEX
, &vs_tex
, false);
3530 if (result
!= VK_SUCCESS
)
3533 /* TODO: we could emit just one texture descriptor draw state when there
3534 * are no input attachments, which is the most common case. We could
3535 * also split out the sampler state, which doesn't change even for input
3538 result
= tu6_emit_textures(cmd
, pipeline
, descriptors_state
,
3539 MESA_SHADER_FRAGMENT
, &fs_tex_sysmem
, true);
3540 if (result
!= VK_SUCCESS
)
3543 result
= tu6_emit_textures(cmd
, pipeline
, descriptors_state
,
3544 MESA_SHADER_FRAGMENT
, &fs_tex_gmem
, false);
3545 if (result
!= VK_SUCCESS
)
3548 result
= tu6_emit_ibo(cmd
, pipeline
, descriptors_state
,
3549 MESA_SHADER_FRAGMENT
, &fs_ibo
);
3550 if (result
!= VK_SUCCESS
)
3553 draw_state_groups
[draw_state_group_count
++] =
3554 (struct tu_draw_state_group
) {
3555 .id
= TU_DRAW_STATE_VS_TEX
,
3556 .enable_mask
= ENABLE_ALL
,
3559 draw_state_groups
[draw_state_group_count
++] =
3560 (struct tu_draw_state_group
) {
3561 .id
= TU_DRAW_STATE_FS_TEX_GMEM
,
3562 .enable_mask
= CP_SET_DRAW_STATE__0_GMEM
,
3565 draw_state_groups
[draw_state_group_count
++] =
3566 (struct tu_draw_state_group
) {
3567 .id
= TU_DRAW_STATE_FS_TEX_SYSMEM
,
3568 .enable_mask
= CP_SET_DRAW_STATE__0_SYSMEM
,
3569 .ib
= fs_tex_sysmem
,
3571 draw_state_groups
[draw_state_group_count
++] =
3572 (struct tu_draw_state_group
) {
3573 .id
= TU_DRAW_STATE_FS_IBO
,
3574 .enable_mask
= ENABLE_DRAW
,
3579 struct tu_cs_entry vs_params
;
3580 result
= tu6_emit_vs_params(cmd
, draw
, &vs_params
);
3581 if (result
!= VK_SUCCESS
)
3584 draw_state_groups
[draw_state_group_count
++] =
3585 (struct tu_draw_state_group
) {
3586 .id
= TU_DRAW_STATE_VS_PARAMS
,
3587 .enable_mask
= ENABLE_ALL
,
3591 tu_cs_emit_pkt7(cs
, CP_SET_DRAW_STATE
, 3 * draw_state_group_count
);
3592 for (uint32_t i
= 0; i
< draw_state_group_count
; i
++) {
3593 const struct tu_draw_state_group
*group
= &draw_state_groups
[i
];
3594 debug_assert((group
->enable_mask
& ~ENABLE_ALL
) == 0);
3595 uint32_t cp_set_draw_state
=
3596 CP_SET_DRAW_STATE__0_COUNT(group
->ib
.size
/ 4) |
3597 group
->enable_mask
|
3598 CP_SET_DRAW_STATE__0_GROUP_ID(group
->id
);
3600 if (group
->ib
.size
) {
3601 iova
= group
->ib
.bo
->iova
+ group
->ib
.offset
;
3603 cp_set_draw_state
|= CP_SET_DRAW_STATE__0_DISABLE
;
3607 tu_cs_emit(cs
, cp_set_draw_state
);
3608 tu_cs_emit_qw(cs
, iova
);
3611 tu_cs_sanity_check(cs
);
3614 if (cmd
->state
.dirty
& TU_CMD_DIRTY_VERTEX_BUFFERS
) {
3615 for (uint32_t i
= 0; i
< MAX_VBS
; i
++) {
3616 const struct tu_buffer
*buf
= cmd
->state
.vb
.buffers
[i
];
3618 tu_bo_list_add(&cmd
->bo_list
, buf
->bo
, MSM_SUBMIT_BO_READ
);
3621 if (cmd
->state
.dirty
& TU_CMD_DIRTY_DESCRIPTOR_SETS
) {
3623 for_each_bit(i
, descriptors_state
->valid
) {
3624 struct tu_descriptor_set
*set
= descriptors_state
->sets
[i
];
3625 for (unsigned j
= 0; j
< set
->layout
->buffer_count
; ++j
)
3626 if (set
->descriptors
[j
]) {
3627 tu_bo_list_add(&cmd
->bo_list
, set
->descriptors
[j
],
3628 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_WRITE
);
3632 if (cmd
->state
.dirty
& TU_CMD_DIRTY_STREAMOUT_BUFFERS
) {
3633 for (unsigned i
= 0; i
< IR3_MAX_SO_BUFFERS
; i
++) {
3634 const struct tu_buffer
*buf
= cmd
->state
.streamout_buf
.buffers
[i
];
3636 tu_bo_list_add(&cmd
->bo_list
, buf
->bo
,
3637 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_WRITE
);
3642 /* Fragment shader state overwrites compute shader state, so flag the
3643 * compute pipeline for re-emit.
3645 cmd
->state
.dirty
= TU_CMD_DIRTY_COMPUTE_PIPELINE
;
3650 tu6_emit_draw_indirect(struct tu_cmd_buffer
*cmd
,
3652 const struct tu_draw_info
*draw
)
3654 const enum pc_di_primtype primtype
= cmd
->state
.pipeline
->ia
.primtype
;
3655 bool has_gs
= cmd
->state
.pipeline
->active_stages
&
3656 VK_SHADER_STAGE_GEOMETRY_BIT
;
3659 A6XX_VFD_INDEX_OFFSET(draw
->vertex_offset
),
3660 A6XX_VFD_INSTANCE_START_OFFSET(draw
->first_instance
));
3662 if (draw
->indexed
) {
3663 const enum a4xx_index_size index_size
=
3664 tu6_index_size(cmd
->state
.index_type
);
3665 const uint32_t index_bytes
=
3666 (cmd
->state
.index_type
== VK_INDEX_TYPE_UINT32
) ? 4 : 2;
3667 const struct tu_buffer
*index_buf
= cmd
->state
.index_buffer
;
3668 unsigned max_indicies
=
3669 (index_buf
->size
- cmd
->state
.index_offset
) / index_bytes
;
3671 const uint32_t cp_draw_indx
=
3672 CP_DRAW_INDX_OFFSET_0_PRIM_TYPE(primtype
) |
3673 CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(DI_SRC_SEL_DMA
) |
3674 CP_DRAW_INDX_OFFSET_0_INDEX_SIZE(index_size
) |
3675 CP_DRAW_INDX_OFFSET_0_VIS_CULL(USE_VISIBILITY
) |
3676 COND(has_gs
, CP_DRAW_INDX_OFFSET_0_GS_ENABLE
) | 0x2000;
3678 tu_cs_emit_pkt7(cs
, CP_DRAW_INDX_INDIRECT
, 6);
3679 tu_cs_emit(cs
, cp_draw_indx
);
3680 tu_cs_emit_qw(cs
, index_buf
->bo
->iova
+ cmd
->state
.index_offset
);
3681 tu_cs_emit(cs
, A5XX_CP_DRAW_INDX_INDIRECT_3_MAX_INDICES(max_indicies
));
3682 tu_cs_emit_qw(cs
, draw
->indirect
->bo
->iova
+ draw
->indirect_offset
);
3684 const uint32_t cp_draw_indx
=
3685 CP_DRAW_INDX_OFFSET_0_PRIM_TYPE(primtype
) |
3686 CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(DI_SRC_SEL_AUTO_INDEX
) |
3687 CP_DRAW_INDX_OFFSET_0_VIS_CULL(USE_VISIBILITY
) |
3688 COND(has_gs
, CP_DRAW_INDX_OFFSET_0_GS_ENABLE
) | 0x2000;
3690 tu_cs_emit_pkt7(cs
, CP_DRAW_INDIRECT
, 3);
3691 tu_cs_emit(cs
, cp_draw_indx
);
3692 tu_cs_emit_qw(cs
, draw
->indirect
->bo
->iova
+ draw
->indirect_offset
);
3695 tu_bo_list_add(&cmd
->bo_list
, draw
->indirect
->bo
, MSM_SUBMIT_BO_READ
);
3699 tu6_emit_draw_direct(struct tu_cmd_buffer
*cmd
,
3701 const struct tu_draw_info
*draw
)
3704 const enum pc_di_primtype primtype
= cmd
->state
.pipeline
->ia
.primtype
;
3705 bool has_gs
= cmd
->state
.pipeline
->active_stages
&
3706 VK_SHADER_STAGE_GEOMETRY_BIT
;
3709 A6XX_VFD_INDEX_OFFSET(draw
->vertex_offset
),
3710 A6XX_VFD_INSTANCE_START_OFFSET(draw
->first_instance
));
3712 /* TODO hw binning */
3713 if (draw
->indexed
) {
3714 const enum a4xx_index_size index_size
=
3715 tu6_index_size(cmd
->state
.index_type
);
3716 const uint32_t index_bytes
=
3717 (cmd
->state
.index_type
== VK_INDEX_TYPE_UINT32
) ? 4 : 2;
3718 const struct tu_buffer
*buf
= cmd
->state
.index_buffer
;
3719 const VkDeviceSize offset
= buf
->bo_offset
+ cmd
->state
.index_offset
+
3720 index_bytes
* draw
->first_index
;
3721 const uint32_t size
= index_bytes
* draw
->count
;
3723 const uint32_t cp_draw_indx
=
3724 CP_DRAW_INDX_OFFSET_0_PRIM_TYPE(primtype
) |
3725 CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(DI_SRC_SEL_DMA
) |
3726 CP_DRAW_INDX_OFFSET_0_INDEX_SIZE(index_size
) |
3727 CP_DRAW_INDX_OFFSET_0_VIS_CULL(USE_VISIBILITY
) |
3728 COND(has_gs
, CP_DRAW_INDX_OFFSET_0_GS_ENABLE
) | 0x2000;
3730 tu_cs_emit_pkt7(cs
, CP_DRAW_INDX_OFFSET
, 7);
3731 tu_cs_emit(cs
, cp_draw_indx
);
3732 tu_cs_emit(cs
, draw
->instance_count
);
3733 tu_cs_emit(cs
, draw
->count
);
3734 tu_cs_emit(cs
, 0x0); /* XXX */
3735 tu_cs_emit_qw(cs
, buf
->bo
->iova
+ offset
);
3736 tu_cs_emit(cs
, size
);
3738 const uint32_t cp_draw_indx
=
3739 CP_DRAW_INDX_OFFSET_0_PRIM_TYPE(primtype
) |
3740 CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(DI_SRC_SEL_AUTO_INDEX
) |
3741 CP_DRAW_INDX_OFFSET_0_VIS_CULL(USE_VISIBILITY
) |
3742 COND(has_gs
, CP_DRAW_INDX_OFFSET_0_GS_ENABLE
) | 0x2000;
3744 tu_cs_emit_pkt7(cs
, CP_DRAW_INDX_OFFSET
, 3);
3745 tu_cs_emit(cs
, cp_draw_indx
);
3746 tu_cs_emit(cs
, draw
->instance_count
);
3747 tu_cs_emit(cs
, draw
->count
);
3752 tu_draw(struct tu_cmd_buffer
*cmd
, const struct tu_draw_info
*draw
)
3754 struct tu_cs
*cs
= &cmd
->draw_cs
;
3757 result
= tu6_bind_draw_states(cmd
, cs
, draw
);
3758 if (result
!= VK_SUCCESS
) {
3759 cmd
->record_result
= result
;
3764 tu6_emit_draw_indirect(cmd
, cs
, draw
);
3766 tu6_emit_draw_direct(cmd
, cs
, draw
);
3768 if (cmd
->state
.streamout_enabled
) {
3769 for (unsigned i
= 0; i
< IR3_MAX_SO_BUFFERS
; i
++) {
3770 if (cmd
->state
.streamout_enabled
& (1 << i
))
3771 tu6_emit_event_write(cmd
, cs
, FLUSH_SO_0
+ i
, false);
3775 cmd
->wait_for_idle
= true;
3777 tu_cs_sanity_check(cs
);
3781 tu_CmdDraw(VkCommandBuffer commandBuffer
,
3782 uint32_t vertexCount
,
3783 uint32_t instanceCount
,
3784 uint32_t firstVertex
,
3785 uint32_t firstInstance
)
3787 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
3788 struct tu_draw_info info
= {};
3790 info
.count
= vertexCount
;
3791 info
.instance_count
= instanceCount
;
3792 info
.first_instance
= firstInstance
;
3793 info
.vertex_offset
= firstVertex
;
3795 tu_draw(cmd_buffer
, &info
);
3799 tu_CmdDrawIndexed(VkCommandBuffer commandBuffer
,
3800 uint32_t indexCount
,
3801 uint32_t instanceCount
,
3802 uint32_t firstIndex
,
3803 int32_t vertexOffset
,
3804 uint32_t firstInstance
)
3806 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
3807 struct tu_draw_info info
= {};
3809 info
.indexed
= true;
3810 info
.count
= indexCount
;
3811 info
.instance_count
= instanceCount
;
3812 info
.first_index
= firstIndex
;
3813 info
.vertex_offset
= vertexOffset
;
3814 info
.first_instance
= firstInstance
;
3816 tu_draw(cmd_buffer
, &info
);
3820 tu_CmdDrawIndirect(VkCommandBuffer commandBuffer
,
3822 VkDeviceSize offset
,
3826 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
3827 TU_FROM_HANDLE(tu_buffer
, buffer
, _buffer
);
3828 struct tu_draw_info info
= {};
3830 info
.count
= drawCount
;
3831 info
.indirect
= buffer
;
3832 info
.indirect_offset
= offset
;
3833 info
.stride
= stride
;
3835 tu_draw(cmd_buffer
, &info
);
3839 tu_CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer
,
3841 VkDeviceSize offset
,
3845 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
3846 TU_FROM_HANDLE(tu_buffer
, buffer
, _buffer
);
3847 struct tu_draw_info info
= {};
3849 info
.indexed
= true;
3850 info
.count
= drawCount
;
3851 info
.indirect
= buffer
;
3852 info
.indirect_offset
= offset
;
3853 info
.stride
= stride
;
3855 tu_draw(cmd_buffer
, &info
);
3858 void tu_CmdDrawIndirectByteCountEXT(VkCommandBuffer commandBuffer
,
3859 uint32_t instanceCount
,
3860 uint32_t firstInstance
,
3861 VkBuffer _counterBuffer
,
3862 VkDeviceSize counterBufferOffset
,
3863 uint32_t counterOffset
,
3864 uint32_t vertexStride
)
3866 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
3867 TU_FROM_HANDLE(tu_buffer
, buffer
, _counterBuffer
);
3869 struct tu_draw_info info
= {};
3871 info
.instance_count
= instanceCount
;
3872 info
.first_instance
= firstInstance
;
3873 info
.streamout_buffer
= buffer
;
3874 info
.streamout_buffer_offset
= counterBufferOffset
;
3875 info
.stride
= vertexStride
;
3877 tu_draw(cmd_buffer
, &info
);
3880 struct tu_dispatch_info
3883 * Determine the layout of the grid (in block units) to be used.
3888 * A starting offset for the grid. If unaligned is set, the offset
3889 * must still be aligned.
3891 uint32_t offsets
[3];
3893 * Whether it's an unaligned compute dispatch.
3898 * Indirect compute parameters resource.
3900 struct tu_buffer
*indirect
;
3901 uint64_t indirect_offset
;
3905 tu_emit_compute_driver_params(struct tu_cs
*cs
, struct tu_pipeline
*pipeline
,
3906 const struct tu_dispatch_info
*info
)
3908 gl_shader_stage type
= MESA_SHADER_COMPUTE
;
3909 const struct tu_program_descriptor_linkage
*link
=
3910 &pipeline
->program
.link
[type
];
3911 const struct ir3_const_state
*const_state
= &link
->const_state
;
3912 uint32_t offset
= const_state
->offsets
.driver_param
;
3914 if (link
->constlen
<= offset
)
3917 if (!info
->indirect
) {
3918 uint32_t driver_params
[IR3_DP_CS_COUNT
] = {
3919 [IR3_DP_NUM_WORK_GROUPS_X
] = info
->blocks
[0],
3920 [IR3_DP_NUM_WORK_GROUPS_Y
] = info
->blocks
[1],
3921 [IR3_DP_NUM_WORK_GROUPS_Z
] = info
->blocks
[2],
3922 [IR3_DP_LOCAL_GROUP_SIZE_X
] = pipeline
->compute
.local_size
[0],
3923 [IR3_DP_LOCAL_GROUP_SIZE_Y
] = pipeline
->compute
.local_size
[1],
3924 [IR3_DP_LOCAL_GROUP_SIZE_Z
] = pipeline
->compute
.local_size
[2],
3927 uint32_t num_consts
= MIN2(const_state
->num_driver_params
,
3928 (link
->constlen
- offset
) * 4);
3929 /* push constants */
3930 tu_cs_emit_pkt7(cs
, tu6_stage2opcode(type
), 3 + num_consts
);
3931 tu_cs_emit(cs
, CP_LOAD_STATE6_0_DST_OFF(offset
) |
3932 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS
) |
3933 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT
) |
3934 CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type
)) |
3935 CP_LOAD_STATE6_0_NUM_UNIT(num_consts
/ 4));
3939 for (i
= 0; i
< num_consts
; i
++)
3940 tu_cs_emit(cs
, driver_params
[i
]);
3942 tu_finishme("Indirect driver params");
3947 tu_dispatch(struct tu_cmd_buffer
*cmd
,
3948 const struct tu_dispatch_info
*info
)
3950 struct tu_cs
*cs
= &cmd
->cs
;
3951 struct tu_pipeline
*pipeline
= cmd
->state
.compute_pipeline
;
3952 struct tu_descriptor_state
*descriptors_state
=
3953 &cmd
->descriptors
[VK_PIPELINE_BIND_POINT_COMPUTE
];
3956 if (cmd
->state
.dirty
& TU_CMD_DIRTY_COMPUTE_PIPELINE
)
3957 tu_cs_emit_ib(cs
, &pipeline
->program
.state_ib
);
3959 struct tu_cs_entry ib
;
3961 ib
= tu6_emit_consts(cmd
, pipeline
, descriptors_state
, MESA_SHADER_COMPUTE
);
3963 tu_cs_emit_ib(cs
, &ib
);
3965 tu_emit_compute_driver_params(cs
, pipeline
, info
);
3967 result
= tu6_emit_textures(cmd
, pipeline
, descriptors_state
,
3968 MESA_SHADER_COMPUTE
, &ib
, false);
3969 if (result
!= VK_SUCCESS
) {
3970 cmd
->record_result
= result
;
3975 tu_cs_emit_ib(cs
, &ib
);
3977 result
= tu6_emit_ibo(cmd
, pipeline
, descriptors_state
, MESA_SHADER_COMPUTE
, &ib
);
3978 if (result
!= VK_SUCCESS
) {
3979 cmd
->record_result
= result
;
3984 tu_cs_emit_ib(cs
, &ib
);
3987 if (cmd
->state
.dirty
& TU_CMD_DIRTY_DESCRIPTOR_SETS
) {
3989 for_each_bit(i
, descriptors_state
->valid
) {
3990 struct tu_descriptor_set
*set
= descriptors_state
->sets
[i
];
3991 for (unsigned j
= 0; j
< set
->layout
->buffer_count
; ++j
)
3992 if (set
->descriptors
[j
]) {
3993 tu_bo_list_add(&cmd
->bo_list
, set
->descriptors
[j
],
3994 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_WRITE
);
3999 /* Compute shader state overwrites fragment shader state, so we flag the
4000 * graphics pipeline for re-emit.
4002 cmd
->state
.dirty
= TU_CMD_DIRTY_PIPELINE
;
4004 tu_cs_emit_pkt7(cs
, CP_SET_MARKER
, 1);
4005 tu_cs_emit(cs
, A6XX_CP_SET_MARKER_0_MODE(RM6_COMPUTE
));
4007 const uint32_t *local_size
= pipeline
->compute
.local_size
;
4008 const uint32_t *num_groups
= info
->blocks
;
4010 A6XX_HLSQ_CS_NDRANGE_0(.kerneldim
= 3,
4011 .localsizex
= local_size
[0] - 1,
4012 .localsizey
= local_size
[1] - 1,
4013 .localsizez
= local_size
[2] - 1),
4014 A6XX_HLSQ_CS_NDRANGE_1(.globalsize_x
= local_size
[0] * num_groups
[0]),
4015 A6XX_HLSQ_CS_NDRANGE_2(.globaloff_x
= 0),
4016 A6XX_HLSQ_CS_NDRANGE_3(.globalsize_y
= local_size
[1] * num_groups
[1]),
4017 A6XX_HLSQ_CS_NDRANGE_4(.globaloff_y
= 0),
4018 A6XX_HLSQ_CS_NDRANGE_5(.globalsize_z
= local_size
[2] * num_groups
[2]),
4019 A6XX_HLSQ_CS_NDRANGE_6(.globaloff_z
= 0));
4022 A6XX_HLSQ_CS_KERNEL_GROUP_X(1),
4023 A6XX_HLSQ_CS_KERNEL_GROUP_Y(1),
4024 A6XX_HLSQ_CS_KERNEL_GROUP_Z(1));
4026 if (info
->indirect
) {
4027 uint64_t iova
= tu_buffer_iova(info
->indirect
) + info
->indirect_offset
;
4029 tu_bo_list_add(&cmd
->bo_list
, info
->indirect
->bo
,
4030 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_WRITE
);
4032 tu_cs_emit_pkt7(cs
, CP_EXEC_CS_INDIRECT
, 4);
4033 tu_cs_emit(cs
, 0x00000000);
4034 tu_cs_emit_qw(cs
, iova
);
4036 A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEX(local_size
[0] - 1) |
4037 A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEY(local_size
[1] - 1) |
4038 A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEZ(local_size
[2] - 1));
4040 tu_cs_emit_pkt7(cs
, CP_EXEC_CS
, 4);
4041 tu_cs_emit(cs
, 0x00000000);
4042 tu_cs_emit(cs
, CP_EXEC_CS_1_NGROUPS_X(info
->blocks
[0]));
4043 tu_cs_emit(cs
, CP_EXEC_CS_2_NGROUPS_Y(info
->blocks
[1]));
4044 tu_cs_emit(cs
, CP_EXEC_CS_3_NGROUPS_Z(info
->blocks
[2]));
4049 tu6_emit_cache_flush(cmd
, cs
);
4053 tu_CmdDispatchBase(VkCommandBuffer commandBuffer
,
4061 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
4062 struct tu_dispatch_info info
= {};
4068 info
.offsets
[0] = base_x
;
4069 info
.offsets
[1] = base_y
;
4070 info
.offsets
[2] = base_z
;
4071 tu_dispatch(cmd_buffer
, &info
);
4075 tu_CmdDispatch(VkCommandBuffer commandBuffer
,
4080 tu_CmdDispatchBase(commandBuffer
, 0, 0, 0, x
, y
, z
);
4084 tu_CmdDispatchIndirect(VkCommandBuffer commandBuffer
,
4086 VkDeviceSize offset
)
4088 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
4089 TU_FROM_HANDLE(tu_buffer
, buffer
, _buffer
);
4090 struct tu_dispatch_info info
= {};
4092 info
.indirect
= buffer
;
4093 info
.indirect_offset
= offset
;
4095 tu_dispatch(cmd_buffer
, &info
);
4099 tu_CmdEndRenderPass(VkCommandBuffer commandBuffer
)
4101 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
4103 tu_cs_end(&cmd_buffer
->draw_cs
);
4104 tu_cs_end(&cmd_buffer
->draw_epilogue_cs
);
4106 if (use_sysmem_rendering(cmd_buffer
))
4107 tu_cmd_render_sysmem(cmd_buffer
);
4109 tu_cmd_render_tiles(cmd_buffer
);
4111 /* discard draw_cs and draw_epilogue_cs entries now that the tiles are
4113 tu_cs_discard_entries(&cmd_buffer
->draw_cs
);
4114 tu_cs_begin(&cmd_buffer
->draw_cs
);
4115 tu_cs_discard_entries(&cmd_buffer
->draw_epilogue_cs
);
4116 tu_cs_begin(&cmd_buffer
->draw_epilogue_cs
);
4118 cmd_buffer
->state
.pass
= NULL
;
4119 cmd_buffer
->state
.subpass
= NULL
;
4120 cmd_buffer
->state
.framebuffer
= NULL
;
4124 tu_CmdEndRenderPass2(VkCommandBuffer commandBuffer
,
4125 const VkSubpassEndInfoKHR
*pSubpassEndInfo
)
4127 tu_CmdEndRenderPass(commandBuffer
);
4130 struct tu_barrier_info
4132 uint32_t eventCount
;
4133 const VkEvent
*pEvents
;
4134 VkPipelineStageFlags srcStageMask
;
4138 tu_barrier(struct tu_cmd_buffer
*cmd_buffer
,
4139 uint32_t memoryBarrierCount
,
4140 const VkMemoryBarrier
*pMemoryBarriers
,
4141 uint32_t bufferMemoryBarrierCount
,
4142 const VkBufferMemoryBarrier
*pBufferMemoryBarriers
,
4143 uint32_t imageMemoryBarrierCount
,
4144 const VkImageMemoryBarrier
*pImageMemoryBarriers
,
4145 const struct tu_barrier_info
*info
)
4150 tu_CmdPipelineBarrier(VkCommandBuffer commandBuffer
,
4151 VkPipelineStageFlags srcStageMask
,
4152 VkPipelineStageFlags destStageMask
,
4154 uint32_t memoryBarrierCount
,
4155 const VkMemoryBarrier
*pMemoryBarriers
,
4156 uint32_t bufferMemoryBarrierCount
,
4157 const VkBufferMemoryBarrier
*pBufferMemoryBarriers
,
4158 uint32_t imageMemoryBarrierCount
,
4159 const VkImageMemoryBarrier
*pImageMemoryBarriers
)
4161 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
4162 struct tu_barrier_info info
;
4164 info
.eventCount
= 0;
4165 info
.pEvents
= NULL
;
4166 info
.srcStageMask
= srcStageMask
;
4168 tu_barrier(cmd_buffer
, memoryBarrierCount
, pMemoryBarriers
,
4169 bufferMemoryBarrierCount
, pBufferMemoryBarriers
,
4170 imageMemoryBarrierCount
, pImageMemoryBarriers
, &info
);
4174 write_event(struct tu_cmd_buffer
*cmd
, struct tu_event
*event
, unsigned value
)
4176 struct tu_cs
*cs
= &cmd
->cs
;
4178 tu_bo_list_add(&cmd
->bo_list
, &event
->bo
, MSM_SUBMIT_BO_WRITE
);
4180 /* TODO: any flush required before/after ? */
4182 tu_cs_emit_pkt7(cs
, CP_MEM_WRITE
, 3);
4183 tu_cs_emit_qw(cs
, event
->bo
.iova
); /* ADDR_LO/HI */
4184 tu_cs_emit(cs
, value
);
4188 tu_CmdSetEvent(VkCommandBuffer commandBuffer
,
4190 VkPipelineStageFlags stageMask
)
4192 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
4193 TU_FROM_HANDLE(tu_event
, event
, _event
);
4195 write_event(cmd
, event
, 1);
4199 tu_CmdResetEvent(VkCommandBuffer commandBuffer
,
4201 VkPipelineStageFlags stageMask
)
4203 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
4204 TU_FROM_HANDLE(tu_event
, event
, _event
);
4206 write_event(cmd
, event
, 0);
4210 tu_CmdWaitEvents(VkCommandBuffer commandBuffer
,
4211 uint32_t eventCount
,
4212 const VkEvent
*pEvents
,
4213 VkPipelineStageFlags srcStageMask
,
4214 VkPipelineStageFlags dstStageMask
,
4215 uint32_t memoryBarrierCount
,
4216 const VkMemoryBarrier
*pMemoryBarriers
,
4217 uint32_t bufferMemoryBarrierCount
,
4218 const VkBufferMemoryBarrier
*pBufferMemoryBarriers
,
4219 uint32_t imageMemoryBarrierCount
,
4220 const VkImageMemoryBarrier
*pImageMemoryBarriers
)
4222 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
4223 struct tu_cs
*cs
= &cmd
->cs
;
4225 /* TODO: any flush required before/after? (CP_WAIT_FOR_ME?) */
4227 for (uint32_t i
= 0; i
< eventCount
; i
++) {
4228 TU_FROM_HANDLE(tu_event
, event
, pEvents
[i
]);
4230 tu_bo_list_add(&cmd
->bo_list
, &event
->bo
, MSM_SUBMIT_BO_READ
);
4232 tu_cs_emit_pkt7(cs
, CP_WAIT_REG_MEM
, 6);
4233 tu_cs_emit(cs
, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ
) |
4234 CP_WAIT_REG_MEM_0_POLL_MEMORY
);
4235 tu_cs_emit_qw(cs
, event
->bo
.iova
); /* POLL_ADDR_LO/HI */
4236 tu_cs_emit(cs
, CP_WAIT_REG_MEM_3_REF(1));
4237 tu_cs_emit(cs
, CP_WAIT_REG_MEM_4_MASK(~0u));
4238 tu_cs_emit(cs
, CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(20));
4243 tu_CmdSetDeviceMask(VkCommandBuffer commandBuffer
, uint32_t deviceMask
)