2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
28 #include "tu_private.h"
30 #include "registers/adreno_pm4.xml.h"
31 #include "registers/adreno_common.xml.h"
33 #include "vk_format.h"
38 #define OVERFLOW_FLAG_REG REG_A6XX_CP_SCRATCH_REG(0)
41 tu_bo_list_init(struct tu_bo_list
*list
)
43 list
->count
= list
->capacity
= 0;
44 list
->bo_infos
= NULL
;
48 tu_bo_list_destroy(struct tu_bo_list
*list
)
54 tu_bo_list_reset(struct tu_bo_list
*list
)
60 * \a flags consists of MSM_SUBMIT_BO_FLAGS.
63 tu_bo_list_add_info(struct tu_bo_list
*list
,
64 const struct drm_msm_gem_submit_bo
*bo_info
)
66 assert(bo_info
->handle
!= 0);
68 for (uint32_t i
= 0; i
< list
->count
; ++i
) {
69 if (list
->bo_infos
[i
].handle
== bo_info
->handle
) {
70 assert(list
->bo_infos
[i
].presumed
== bo_info
->presumed
);
71 list
->bo_infos
[i
].flags
|= bo_info
->flags
;
76 /* grow list->bo_infos if needed */
77 if (list
->count
== list
->capacity
) {
78 uint32_t new_capacity
= MAX2(2 * list
->count
, 16);
79 struct drm_msm_gem_submit_bo
*new_bo_infos
= realloc(
80 list
->bo_infos
, new_capacity
* sizeof(struct drm_msm_gem_submit_bo
));
82 return TU_BO_LIST_FAILED
;
83 list
->bo_infos
= new_bo_infos
;
84 list
->capacity
= new_capacity
;
87 list
->bo_infos
[list
->count
] = *bo_info
;
92 tu_bo_list_add(struct tu_bo_list
*list
,
93 const struct tu_bo
*bo
,
96 return tu_bo_list_add_info(list
, &(struct drm_msm_gem_submit_bo
) {
98 .handle
= bo
->gem_handle
,
104 tu_bo_list_merge(struct tu_bo_list
*list
, const struct tu_bo_list
*other
)
106 for (uint32_t i
= 0; i
< other
->count
; i
++) {
107 if (tu_bo_list_add_info(list
, other
->bo_infos
+ i
) == TU_BO_LIST_FAILED
)
108 return VK_ERROR_OUT_OF_HOST_MEMORY
;
115 is_linear_mipmapped(const struct tu_image_view
*iview
)
117 return iview
->image
->layout
.tile_mode
== TILE6_LINEAR
&&
118 iview
->base_mip
!= iview
->image
->level_count
- 1;
122 force_sysmem(const struct tu_cmd_buffer
*cmd
,
123 const struct VkRect2D
*render_area
)
125 const struct tu_framebuffer
*fb
= cmd
->state
.framebuffer
;
126 const struct tu_physical_device
*device
= cmd
->device
->physical_device
;
127 bool has_linear_mipmapped_store
= false;
128 const struct tu_render_pass
*pass
= cmd
->state
.pass
;
130 /* Iterate over all the places we call tu6_emit_store_attachment() */
131 for (unsigned i
= 0; i
< pass
->subpass_count
; i
++) {
132 const struct tu_subpass
*subpass
= &pass
->subpasses
[i
];
133 if (subpass
->resolve_attachments
) {
134 for (unsigned i
= 0; i
< subpass
->color_count
; i
++) {
135 uint32_t a
= subpass
->resolve_attachments
[i
].attachment
;
136 if (a
!= VK_ATTACHMENT_UNUSED
&&
137 cmd
->state
.pass
->attachments
[a
].store_op
== VK_ATTACHMENT_STORE_OP_STORE
) {
138 const struct tu_image_view
*iview
= fb
->attachments
[a
].attachment
;
139 if (is_linear_mipmapped(iview
)) {
140 has_linear_mipmapped_store
= true;
148 for (unsigned i
= 0; i
< pass
->attachment_count
; i
++) {
149 if (pass
->attachments
[i
].gmem_offset
>= 0 &&
150 cmd
->state
.pass
->attachments
[i
].store_op
== VK_ATTACHMENT_STORE_OP_STORE
) {
151 const struct tu_image_view
*iview
= fb
->attachments
[i
].attachment
;
152 if (is_linear_mipmapped(iview
)) {
153 has_linear_mipmapped_store
= true;
159 /* Linear textures cannot have any padding between mipmap levels and their
160 * height isn't padded, while at the same time the GMEM->MEM resolve does
161 * not have per-pixel granularity, so if the image height isn't aligned to
162 * the resolve granularity and the render area is tall enough, we may wind
163 * up writing past the bottom of the image into the next miplevel or even
164 * past the end of the image. For the last miplevel, the layout code should
165 * insert enough padding so that the overdraw writes to the padding. To
166 * work around this, we force-enable sysmem rendering.
168 const uint32_t y2
= render_area
->offset
.y
+ render_area
->extent
.height
;
169 const uint32_t aligned_y2
= ALIGN_POT(y2
, device
->tile_align_h
);
171 return has_linear_mipmapped_store
&& aligned_y2
> fb
->height
;
175 tu_tiling_config_update_tile_layout(struct tu_tiling_config
*tiling
,
176 const struct tu_device
*dev
,
179 const uint32_t tile_align_w
= dev
->physical_device
->tile_align_w
;
180 const uint32_t tile_align_h
= dev
->physical_device
->tile_align_h
;
181 const uint32_t max_tile_width
= 1024; /* A6xx */
183 tiling
->tile0
.offset
= (VkOffset2D
) {
184 .x
= tiling
->render_area
.offset
.x
& ~(tile_align_w
- 1),
185 .y
= tiling
->render_area
.offset
.y
& ~(tile_align_h
- 1),
188 const uint32_t ra_width
=
189 tiling
->render_area
.extent
.width
+
190 (tiling
->render_area
.offset
.x
- tiling
->tile0
.offset
.x
);
191 const uint32_t ra_height
=
192 tiling
->render_area
.extent
.height
+
193 (tiling
->render_area
.offset
.y
- tiling
->tile0
.offset
.y
);
195 /* start from 1 tile */
196 tiling
->tile_count
= (VkExtent2D
) {
200 tiling
->tile0
.extent
= (VkExtent2D
) {
201 .width
= align(ra_width
, tile_align_w
),
202 .height
= align(ra_height
, tile_align_h
),
205 if (unlikely(dev
->physical_device
->instance
->debug_flags
& TU_DEBUG_FORCEBIN
)) {
206 /* start with 2x2 tiles */
207 tiling
->tile_count
.width
= 2;
208 tiling
->tile_count
.height
= 2;
209 tiling
->tile0
.extent
.width
= align(DIV_ROUND_UP(ra_width
, 2), tile_align_w
);
210 tiling
->tile0
.extent
.height
= align(DIV_ROUND_UP(ra_height
, 2), tile_align_h
);
213 /* do not exceed max tile width */
214 while (tiling
->tile0
.extent
.width
> max_tile_width
) {
215 tiling
->tile_count
.width
++;
216 tiling
->tile0
.extent
.width
=
217 align(DIV_ROUND_UP(ra_width
, tiling
->tile_count
.width
), tile_align_w
);
220 /* do not exceed gmem size */
221 while (tiling
->tile0
.extent
.width
* tiling
->tile0
.extent
.height
> pixels
) {
222 if (tiling
->tile0
.extent
.width
> MAX2(tile_align_w
, tiling
->tile0
.extent
.height
)) {
223 tiling
->tile_count
.width
++;
224 tiling
->tile0
.extent
.width
=
225 align(DIV_ROUND_UP(ra_width
, tiling
->tile_count
.width
), tile_align_w
);
227 /* if this assert fails then layout is impossible.. */
228 assert(tiling
->tile0
.extent
.height
> tile_align_h
);
229 tiling
->tile_count
.height
++;
230 tiling
->tile0
.extent
.height
=
231 align(DIV_ROUND_UP(ra_height
, tiling
->tile_count
.height
), tile_align_h
);
237 tu_tiling_config_update_pipe_layout(struct tu_tiling_config
*tiling
,
238 const struct tu_device
*dev
)
240 const uint32_t max_pipe_count
= 32; /* A6xx */
242 /* start from 1 tile per pipe */
243 tiling
->pipe0
= (VkExtent2D
) {
247 tiling
->pipe_count
= tiling
->tile_count
;
249 /* do not exceed max pipe count vertically */
250 while (tiling
->pipe_count
.height
> max_pipe_count
) {
251 tiling
->pipe0
.height
+= 2;
252 tiling
->pipe_count
.height
=
253 (tiling
->tile_count
.height
+ tiling
->pipe0
.height
- 1) /
254 tiling
->pipe0
.height
;
257 /* do not exceed max pipe count */
258 while (tiling
->pipe_count
.width
* tiling
->pipe_count
.height
>
260 tiling
->pipe0
.width
+= 1;
261 tiling
->pipe_count
.width
=
262 (tiling
->tile_count
.width
+ tiling
->pipe0
.width
- 1) /
268 tu_tiling_config_update_pipes(struct tu_tiling_config
*tiling
,
269 const struct tu_device
*dev
)
271 const uint32_t max_pipe_count
= 32; /* A6xx */
272 const uint32_t used_pipe_count
=
273 tiling
->pipe_count
.width
* tiling
->pipe_count
.height
;
274 const VkExtent2D last_pipe
= {
275 .width
= (tiling
->tile_count
.width
- 1) % tiling
->pipe0
.width
+ 1,
276 .height
= (tiling
->tile_count
.height
- 1) % tiling
->pipe0
.height
+ 1,
279 assert(used_pipe_count
<= max_pipe_count
);
280 assert(max_pipe_count
<= ARRAY_SIZE(tiling
->pipe_config
));
282 for (uint32_t y
= 0; y
< tiling
->pipe_count
.height
; y
++) {
283 for (uint32_t x
= 0; x
< tiling
->pipe_count
.width
; x
++) {
284 const uint32_t pipe_x
= tiling
->pipe0
.width
* x
;
285 const uint32_t pipe_y
= tiling
->pipe0
.height
* y
;
286 const uint32_t pipe_w
= (x
== tiling
->pipe_count
.width
- 1)
288 : tiling
->pipe0
.width
;
289 const uint32_t pipe_h
= (y
== tiling
->pipe_count
.height
- 1)
291 : tiling
->pipe0
.height
;
292 const uint32_t n
= tiling
->pipe_count
.width
* y
+ x
;
294 tiling
->pipe_config
[n
] = A6XX_VSC_PIPE_CONFIG_REG_X(pipe_x
) |
295 A6XX_VSC_PIPE_CONFIG_REG_Y(pipe_y
) |
296 A6XX_VSC_PIPE_CONFIG_REG_W(pipe_w
) |
297 A6XX_VSC_PIPE_CONFIG_REG_H(pipe_h
);
298 tiling
->pipe_sizes
[n
] = CP_SET_BIN_DATA5_0_VSC_SIZE(pipe_w
* pipe_h
);
302 memset(tiling
->pipe_config
+ used_pipe_count
, 0,
303 sizeof(uint32_t) * (max_pipe_count
- used_pipe_count
));
307 tu_tiling_config_get_tile(const struct tu_tiling_config
*tiling
,
308 const struct tu_device
*dev
,
311 struct tu_tile
*tile
)
313 /* find the pipe and the slot for tile (tx, ty) */
314 const uint32_t px
= tx
/ tiling
->pipe0
.width
;
315 const uint32_t py
= ty
/ tiling
->pipe0
.height
;
316 const uint32_t sx
= tx
- tiling
->pipe0
.width
* px
;
317 const uint32_t sy
= ty
- tiling
->pipe0
.height
* py
;
319 assert(tx
< tiling
->tile_count
.width
&& ty
< tiling
->tile_count
.height
);
320 assert(px
< tiling
->pipe_count
.width
&& py
< tiling
->pipe_count
.height
);
321 assert(sx
< tiling
->pipe0
.width
&& sy
< tiling
->pipe0
.height
);
323 /* convert to 1D indices */
324 tile
->pipe
= tiling
->pipe_count
.width
* py
+ px
;
325 tile
->slot
= tiling
->pipe0
.width
* sy
+ sx
;
327 /* get the blit area for the tile */
328 tile
->begin
= (VkOffset2D
) {
329 .x
= tiling
->tile0
.offset
.x
+ tiling
->tile0
.extent
.width
* tx
,
330 .y
= tiling
->tile0
.offset
.y
+ tiling
->tile0
.extent
.height
* ty
,
333 (tx
== tiling
->tile_count
.width
- 1)
334 ? tiling
->render_area
.offset
.x
+ tiling
->render_area
.extent
.width
335 : tile
->begin
.x
+ tiling
->tile0
.extent
.width
;
337 (ty
== tiling
->tile_count
.height
- 1)
338 ? tiling
->render_area
.offset
.y
+ tiling
->render_area
.extent
.height
339 : tile
->begin
.y
+ tiling
->tile0
.extent
.height
;
342 enum a3xx_msaa_samples
343 tu_msaa_samples(uint32_t samples
)
355 assert(!"invalid sample count");
360 static enum a4xx_index_size
361 tu6_index_size(VkIndexType type
)
364 case VK_INDEX_TYPE_UINT16
:
365 return INDEX4_SIZE_16_BIT
;
366 case VK_INDEX_TYPE_UINT32
:
367 return INDEX4_SIZE_32_BIT
;
369 unreachable("invalid VkIndexType");
370 return INDEX4_SIZE_8_BIT
;
375 tu6_emit_marker(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
377 tu_cs_emit_write_reg(cs
, cmd
->marker_reg
, ++cmd
->marker_seqno
);
381 tu6_emit_event_write(struct tu_cmd_buffer
*cmd
,
383 enum vgt_event_type event
,
388 tu_cs_emit_pkt7(cs
, CP_EVENT_WRITE
, need_seqno
? 4 : 1);
389 tu_cs_emit(cs
, CP_EVENT_WRITE_0_EVENT(event
));
391 tu_cs_emit_qw(cs
, cmd
->scratch_bo
.iova
);
392 seqno
= ++cmd
->scratch_seqno
;
393 tu_cs_emit(cs
, seqno
);
400 tu6_emit_cache_flush(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
402 tu6_emit_event_write(cmd
, cs
, 0x31, false);
406 tu6_emit_lrz_flush(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
408 tu6_emit_event_write(cmd
, cs
, LRZ_FLUSH
, false);
412 tu6_emit_wfi(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
414 if (cmd
->wait_for_idle
) {
416 cmd
->wait_for_idle
= false;
420 #define tu_image_view_ubwc_pitches(iview) \
421 .pitch = tu_image_ubwc_pitch(iview->image, iview->base_mip), \
422 .array_pitch = tu_image_ubwc_size(iview->image, iview->base_mip) >> 2
425 tu6_emit_zs(struct tu_cmd_buffer
*cmd
,
426 const struct tu_subpass
*subpass
,
429 const struct tu_framebuffer
*fb
= cmd
->state
.framebuffer
;
431 const uint32_t a
= subpass
->depth_stencil_attachment
.attachment
;
432 if (a
== VK_ATTACHMENT_UNUSED
) {
434 A6XX_RB_DEPTH_BUFFER_INFO(.depth_format
= DEPTH6_NONE
),
435 A6XX_RB_DEPTH_BUFFER_PITCH(0),
436 A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH(0),
437 A6XX_RB_DEPTH_BUFFER_BASE(0),
438 A6XX_RB_DEPTH_BUFFER_BASE_GMEM(0));
441 A6XX_GRAS_SU_DEPTH_BUFFER_INFO(.depth_format
= DEPTH6_NONE
));
444 A6XX_GRAS_LRZ_BUFFER_BASE(0),
445 A6XX_GRAS_LRZ_BUFFER_PITCH(0),
446 A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE(0));
448 tu_cs_emit_regs(cs
, A6XX_RB_STENCIL_INFO(0));
453 const struct tu_image_view
*iview
= fb
->attachments
[a
].attachment
;
454 enum a6xx_depth_format fmt
= tu6_pipe2depth(iview
->vk_format
);
457 A6XX_RB_DEPTH_BUFFER_INFO(.depth_format
= fmt
),
458 A6XX_RB_DEPTH_BUFFER_PITCH(tu_image_stride(iview
->image
, iview
->base_mip
)),
459 A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH(iview
->image
->layout
.layer_size
),
460 A6XX_RB_DEPTH_BUFFER_BASE(tu_image_view_base_ref(iview
)),
461 A6XX_RB_DEPTH_BUFFER_BASE_GMEM(cmd
->state
.pass
->attachments
[a
].gmem_offset
));
464 A6XX_GRAS_SU_DEPTH_BUFFER_INFO(.depth_format
= fmt
));
467 A6XX_RB_DEPTH_FLAG_BUFFER_BASE(tu_image_view_ubwc_base_ref(iview
)),
468 A6XX_RB_DEPTH_FLAG_BUFFER_PITCH(tu_image_view_ubwc_pitches(iview
)));
471 A6XX_GRAS_LRZ_BUFFER_BASE(0),
472 A6XX_GRAS_LRZ_BUFFER_PITCH(0),
473 A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE(0));
476 A6XX_RB_STENCIL_INFO(0));
482 tu6_emit_mrt(struct tu_cmd_buffer
*cmd
,
483 const struct tu_subpass
*subpass
,
486 const struct tu_framebuffer
*fb
= cmd
->state
.framebuffer
;
487 unsigned char mrt_comp
[MAX_RTS
] = { 0 };
488 unsigned srgb_cntl
= 0;
490 for (uint32_t i
= 0; i
< subpass
->color_count
; ++i
) {
491 uint32_t a
= subpass
->color_attachments
[i
].attachment
;
492 if (a
== VK_ATTACHMENT_UNUSED
)
495 const struct tu_image_view
*iview
= fb
->attachments
[a
].attachment
;
496 const enum a6xx_tile_mode tile_mode
=
497 tu6_get_image_tile_mode(iview
->image
, iview
->base_mip
);
501 if (vk_format_is_srgb(iview
->vk_format
))
502 srgb_cntl
|= (1 << i
);
504 const struct tu_native_format
*format
=
505 tu6_get_native_format(iview
->vk_format
);
506 assert(format
&& format
->rb
>= 0);
509 A6XX_RB_MRT_BUF_INFO(i
,
510 .color_tile_mode
= tile_mode
,
511 .color_format
= format
->rb
,
512 .color_swap
= format
->swap
),
513 A6XX_RB_MRT_PITCH(i
, tu_image_stride(iview
->image
, iview
->base_mip
)),
514 A6XX_RB_MRT_ARRAY_PITCH(i
, iview
->image
->layout
.layer_size
),
515 A6XX_RB_MRT_BASE(i
, tu_image_view_base_ref(iview
)),
516 A6XX_RB_MRT_BASE_GMEM(i
, cmd
->state
.pass
->attachments
[a
].gmem_offset
));
519 A6XX_SP_FS_MRT_REG(i
,
520 .color_format
= format
->rb
,
521 .color_sint
= vk_format_is_sint(iview
->vk_format
),
522 .color_uint
= vk_format_is_uint(iview
->vk_format
)));
525 A6XX_RB_MRT_FLAG_BUFFER_ADDR(i
, tu_image_view_ubwc_base_ref(iview
)),
526 A6XX_RB_MRT_FLAG_BUFFER_PITCH(i
, tu_image_view_ubwc_pitches(iview
)));
530 A6XX_RB_SRGB_CNTL(srgb_cntl
));
533 A6XX_SP_SRGB_CNTL(srgb_cntl
));
536 A6XX_RB_RENDER_COMPONENTS(
544 .rt7
= mrt_comp
[7]));
547 A6XX_SP_FS_RENDER_COMPONENTS(
555 .rt7
= mrt_comp
[7]));
559 tu6_emit_msaa(struct tu_cmd_buffer
*cmd
,
560 const struct tu_subpass
*subpass
,
563 const enum a3xx_msaa_samples samples
= tu_msaa_samples(subpass
->samples
);
564 bool msaa_disable
= samples
== MSAA_ONE
;
567 A6XX_SP_TP_RAS_MSAA_CNTL(samples
),
568 A6XX_SP_TP_DEST_MSAA_CNTL(.samples
= samples
,
569 .msaa_disable
= msaa_disable
));
572 A6XX_GRAS_RAS_MSAA_CNTL(samples
),
573 A6XX_GRAS_DEST_MSAA_CNTL(.samples
= samples
,
574 .msaa_disable
= msaa_disable
));
577 A6XX_RB_RAS_MSAA_CNTL(samples
),
578 A6XX_RB_DEST_MSAA_CNTL(.samples
= samples
,
579 .msaa_disable
= msaa_disable
));
582 A6XX_RB_MSAA_CNTL(samples
));
586 tu6_emit_bin_size(struct tu_cs
*cs
,
587 uint32_t bin_w
, uint32_t bin_h
, uint32_t flags
)
590 A6XX_GRAS_BIN_CONTROL(.binw
= bin_w
,
595 A6XX_RB_BIN_CONTROL(.binw
= bin_w
,
599 /* no flag for RB_BIN_CONTROL2... */
601 A6XX_RB_BIN_CONTROL2(.binw
= bin_w
,
606 tu6_emit_render_cntl(struct tu_cmd_buffer
*cmd
,
607 const struct tu_subpass
*subpass
,
611 const struct tu_framebuffer
*fb
= cmd
->state
.framebuffer
;
613 cntl
|= A6XX_RB_RENDER_CNTL_UNK4
;
615 cntl
|= A6XX_RB_RENDER_CNTL_BINNING
;
617 uint32_t mrts_ubwc_enable
= 0;
618 for (uint32_t i
= 0; i
< subpass
->color_count
; ++i
) {
619 uint32_t a
= subpass
->color_attachments
[i
].attachment
;
620 if (a
== VK_ATTACHMENT_UNUSED
)
623 const struct tu_image_view
*iview
= fb
->attachments
[a
].attachment
;
624 if (iview
->image
->layout
.ubwc_layer_size
!= 0)
625 mrts_ubwc_enable
|= 1 << i
;
628 cntl
|= A6XX_RB_RENDER_CNTL_FLAG_MRTS(mrts_ubwc_enable
);
630 const uint32_t a
= subpass
->depth_stencil_attachment
.attachment
;
631 if (a
!= VK_ATTACHMENT_UNUSED
) {
632 const struct tu_image_view
*iview
= fb
->attachments
[a
].attachment
;
633 if (iview
->image
->layout
.ubwc_layer_size
!= 0)
634 cntl
|= A6XX_RB_RENDER_CNTL_FLAG_DEPTH
;
637 /* In the !binning case, we need to set RB_RENDER_CNTL in the draw_cs
638 * in order to set it correctly for the different subpasses. However,
639 * that means the packets we're emitting also happen during binning. So
640 * we need to guard the write on !BINNING at CP execution time.
642 tu_cs_emit_pkt7(cs
, CP_COND_REG_EXEC
, 2);
643 tu_cs_emit(cs
, CP_COND_REG_EXEC_0_MODE(RENDER_MODE
) |
644 CP_COND_REG_EXEC_0_GMEM
| CP_COND_REG_EXEC_0_SYSMEM
);
645 tu_cs_emit(cs
, CP_COND_REG_EXEC_1_DWORDS(4));
648 tu_cs_emit_pkt7(cs
, CP_REG_WRITE
, 3);
649 tu_cs_emit(cs
, CP_REG_WRITE_0_TRACKER(TRACK_RENDER_CNTL
));
650 tu_cs_emit(cs
, REG_A6XX_RB_RENDER_CNTL
);
651 tu_cs_emit(cs
, cntl
);
655 tu6_emit_blit_scissor(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
, bool align
)
657 const VkRect2D
*render_area
= &cmd
->state
.tiling_config
.render_area
;
658 uint32_t x1
= render_area
->offset
.x
;
659 uint32_t y1
= render_area
->offset
.y
;
660 uint32_t x2
= x1
+ render_area
->extent
.width
- 1;
661 uint32_t y2
= y1
+ render_area
->extent
.height
- 1;
663 /* TODO: alignment requirement seems to be less than tile_align_w/h */
665 x1
= x1
& ~cmd
->device
->physical_device
->tile_align_w
;
666 y1
= y1
& ~cmd
->device
->physical_device
->tile_align_h
;
667 x2
= ALIGN_POT(x2
+ 1, cmd
->device
->physical_device
->tile_align_w
) - 1;
668 y2
= ALIGN_POT(y2
+ 1, cmd
->device
->physical_device
->tile_align_h
) - 1;
672 A6XX_RB_BLIT_SCISSOR_TL(.x
= x1
, .y
= y1
),
673 A6XX_RB_BLIT_SCISSOR_BR(.x
= x2
, .y
= y2
));
677 tu6_emit_blit_info(struct tu_cmd_buffer
*cmd
,
679 const struct tu_image_view
*iview
,
680 uint32_t gmem_offset
,
684 A6XX_RB_BLIT_INFO(.unk0
= !resolve
, .gmem
= !resolve
));
686 const struct tu_native_format
*format
=
687 tu6_get_native_format(iview
->vk_format
);
688 assert(format
&& format
->rb
>= 0);
690 enum a6xx_tile_mode tile_mode
=
691 tu6_get_image_tile_mode(iview
->image
, iview
->base_mip
);
693 A6XX_RB_BLIT_DST_INFO(
694 .tile_mode
= tile_mode
,
695 .samples
= tu_msaa_samples(iview
->image
->samples
),
696 .color_format
= format
->rb
,
697 .color_swap
= format
->swap
,
698 .flags
= iview
->image
->layout
.ubwc_layer_size
!= 0),
699 A6XX_RB_BLIT_DST(tu_image_view_base_ref(iview
)),
700 A6XX_RB_BLIT_DST_PITCH(tu_image_stride(iview
->image
, iview
->base_mip
)),
701 A6XX_RB_BLIT_DST_ARRAY_PITCH(iview
->image
->layout
.layer_size
));
703 if (iview
->image
->layout
.ubwc_layer_size
) {
705 A6XX_RB_BLIT_FLAG_DST(tu_image_view_ubwc_base_ref(iview
)),
706 A6XX_RB_BLIT_FLAG_DST_PITCH(tu_image_view_ubwc_pitches(iview
)));
710 A6XX_RB_BLIT_BASE_GMEM(gmem_offset
));
714 tu6_emit_blit(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
716 tu6_emit_marker(cmd
, cs
);
717 tu6_emit_event_write(cmd
, cs
, BLIT
, false);
718 tu6_emit_marker(cmd
, cs
);
722 tu6_emit_window_scissor(struct tu_cmd_buffer
*cmd
,
730 A6XX_GRAS_SC_WINDOW_SCISSOR_TL(.x
= x1
, .y
= y1
),
731 A6XX_GRAS_SC_WINDOW_SCISSOR_BR(.x
= x2
, .y
= y2
));
734 A6XX_GRAS_RESOLVE_CNTL_1(.x
= x1
, .y
= y1
),
735 A6XX_GRAS_RESOLVE_CNTL_2(.x
= x2
, .y
= y2
));
739 tu6_emit_window_offset(struct tu_cmd_buffer
*cmd
,
745 A6XX_RB_WINDOW_OFFSET(.x
= x1
, .y
= y1
));
748 A6XX_RB_WINDOW_OFFSET2(.x
= x1
, .y
= y1
));
751 A6XX_SP_WINDOW_OFFSET(.x
= x1
, .y
= y1
));
754 A6XX_SP_TP_WINDOW_OFFSET(.x
= x1
, .y
= y1
));
758 use_hw_binning(struct tu_cmd_buffer
*cmd
)
760 const struct tu_tiling_config
*tiling
= &cmd
->state
.tiling_config
;
762 if (unlikely(cmd
->device
->physical_device
->instance
->debug_flags
& TU_DEBUG_NOBIN
))
765 if (unlikely(cmd
->device
->physical_device
->instance
->debug_flags
& TU_DEBUG_FORCEBIN
))
768 return (tiling
->tile_count
.width
* tiling
->tile_count
.height
) > 2;
772 use_sysmem_rendering(struct tu_cmd_buffer
*cmd
)
774 if (unlikely(cmd
->device
->physical_device
->instance
->debug_flags
& TU_DEBUG_SYSMEM
))
777 return cmd
->state
.tiling_config
.force_sysmem
;
781 tu6_emit_tile_select(struct tu_cmd_buffer
*cmd
,
783 const struct tu_tile
*tile
)
785 tu_cs_emit_pkt7(cs
, CP_SET_MARKER
, 1);
786 tu_cs_emit(cs
, A6XX_CP_SET_MARKER_0_MODE(RM6_YIELD
));
788 tu6_emit_marker(cmd
, cs
);
789 tu_cs_emit_pkt7(cs
, CP_SET_MARKER
, 1);
790 tu_cs_emit(cs
, A6XX_CP_SET_MARKER_0_MODE(RM6_GMEM
));
791 tu6_emit_marker(cmd
, cs
);
793 const uint32_t x1
= tile
->begin
.x
;
794 const uint32_t y1
= tile
->begin
.y
;
795 const uint32_t x2
= tile
->end
.x
- 1;
796 const uint32_t y2
= tile
->end
.y
- 1;
797 tu6_emit_window_scissor(cmd
, cs
, x1
, y1
, x2
, y2
);
798 tu6_emit_window_offset(cmd
, cs
, x1
, y1
);
801 A6XX_VPC_SO_OVERRIDE(.so_disable
= true));
803 if (use_hw_binning(cmd
)) {
804 tu_cs_emit_pkt7(cs
, CP_WAIT_FOR_ME
, 0);
806 tu_cs_emit_pkt7(cs
, CP_SET_MODE
, 1);
809 tu_cs_emit_pkt7(cs
, CP_REG_TEST
, 1);
810 tu_cs_emit(cs
, A6XX_CP_REG_TEST_0_REG(OVERFLOW_FLAG_REG
) |
811 A6XX_CP_REG_TEST_0_BIT(0) |
812 A6XX_CP_REG_TEST_0_WAIT_FOR_ME
);
814 tu_cs_emit_pkt7(cs
, CP_COND_REG_EXEC
, 2);
815 tu_cs_emit(cs
, CP_COND_REG_EXEC_0_MODE(PRED_TEST
));
816 tu_cs_emit(cs
, CP_COND_REG_EXEC_1_DWORDS(11));
818 /* if (no overflow) */ {
819 tu_cs_emit_pkt7(cs
, CP_SET_BIN_DATA5
, 7);
820 tu_cs_emit(cs
, cmd
->state
.tiling_config
.pipe_sizes
[tile
->pipe
] |
821 CP_SET_BIN_DATA5_0_VSC_N(tile
->slot
));
822 tu_cs_emit_qw(cs
, cmd
->vsc_data
.iova
+ tile
->pipe
* cmd
->vsc_data_pitch
);
823 tu_cs_emit_qw(cs
, cmd
->vsc_data
.iova
+ (tile
->pipe
* 4) + (32 * cmd
->vsc_data_pitch
));
824 tu_cs_emit_qw(cs
, cmd
->vsc_data2
.iova
+ (tile
->pipe
* cmd
->vsc_data2_pitch
));
826 tu_cs_emit_pkt7(cs
, CP_SET_VISIBILITY_OVERRIDE
, 1);
829 /* use a NOP packet to skip over the 'else' side: */
830 tu_cs_emit_pkt7(cs
, CP_NOP
, 2);
832 tu_cs_emit_pkt7(cs
, CP_SET_VISIBILITY_OVERRIDE
, 1);
836 tu_cs_emit_pkt7(cs
, CP_SET_MODE
, 1);
840 A6XX_RB_UNKNOWN_8804(0));
843 A6XX_SP_TP_UNKNOWN_B304(0));
846 A6XX_GRAS_UNKNOWN_80A4(0));
848 tu_cs_emit_pkt7(cs
, CP_SET_VISIBILITY_OVERRIDE
, 1);
851 tu_cs_emit_pkt7(cs
, CP_SET_MODE
, 1);
857 tu6_emit_load_attachment(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
, uint32_t a
)
859 const struct tu_tiling_config
*tiling
= &cmd
->state
.tiling_config
;
860 const struct tu_framebuffer
*fb
= cmd
->state
.framebuffer
;
861 const struct tu_image_view
*iview
= fb
->attachments
[a
].attachment
;
862 const struct tu_render_pass_attachment
*attachment
=
863 &cmd
->state
.pass
->attachments
[a
];
865 if (attachment
->gmem_offset
< 0)
868 const uint32_t x1
= tiling
->render_area
.offset
.x
;
869 const uint32_t y1
= tiling
->render_area
.offset
.y
;
870 const uint32_t x2
= x1
+ tiling
->render_area
.extent
.width
;
871 const uint32_t y2
= y1
+ tiling
->render_area
.extent
.height
;
872 const uint32_t tile_x2
=
873 tiling
->tile0
.offset
.x
+ tiling
->tile0
.extent
.width
* tiling
->tile_count
.width
;
874 const uint32_t tile_y2
=
875 tiling
->tile0
.offset
.y
+ tiling
->tile0
.extent
.height
* tiling
->tile_count
.height
;
877 x1
!= tiling
->tile0
.offset
.x
|| x2
!= MIN2(fb
->width
, tile_x2
) ||
878 y1
!= tiling
->tile0
.offset
.y
|| y2
!= MIN2(fb
->height
, tile_y2
);
881 tu_finishme("improve handling of unaligned render area");
883 if (attachment
->load_op
== VK_ATTACHMENT_LOAD_OP_LOAD
)
886 if (vk_format_has_stencil(iview
->vk_format
) &&
887 attachment
->stencil_load_op
== VK_ATTACHMENT_LOAD_OP_LOAD
)
891 tu6_emit_blit_info(cmd
, cs
, iview
, attachment
->gmem_offset
, false);
892 tu6_emit_blit(cmd
, cs
);
897 tu6_emit_clear_attachment(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
,
899 const VkRenderPassBeginInfo
*info
)
901 const struct tu_framebuffer
*fb
= cmd
->state
.framebuffer
;
902 const struct tu_image_view
*iview
= fb
->attachments
[a
].attachment
;
903 const struct tu_render_pass_attachment
*attachment
=
904 &cmd
->state
.pass
->attachments
[a
];
905 unsigned clear_mask
= 0;
907 /* note: this means it isn't used by any subpass and shouldn't be cleared anyway */
908 if (attachment
->gmem_offset
< 0)
911 if (attachment
->load_op
== VK_ATTACHMENT_LOAD_OP_CLEAR
)
914 if (vk_format_has_stencil(iview
->vk_format
)) {
916 if (attachment
->stencil_load_op
== VK_ATTACHMENT_LOAD_OP_CLEAR
)
922 tu_clear_gmem_attachment(cmd
, cs
, a
, clear_mask
,
923 &info
->pClearValues
[a
]);
927 tu6_emit_predicated_blit(struct tu_cmd_buffer
*cmd
,
933 const uint32_t space
= 14 + 6;
934 struct tu_cond_exec_state state
;
936 VkResult result
= tu_cond_exec_start(cmd
->device
, cs
, &state
,
937 CP_COND_REG_EXEC_0_MODE(RENDER_MODE
) |
938 CP_COND_REG_EXEC_0_GMEM
,
940 if (result
!= VK_SUCCESS
) {
941 cmd
->record_result
= result
;
945 tu6_emit_blit_info(cmd
, cs
,
946 cmd
->state
.framebuffer
->attachments
[a
].attachment
,
947 cmd
->state
.pass
->attachments
[gmem_a
].gmem_offset
, resolve
);
948 tu6_emit_blit(cmd
, cs
);
950 tu_cond_exec_end(cs
, &state
);
954 tu6_emit_sysmem_resolve(struct tu_cmd_buffer
*cmd
,
959 const struct tu_framebuffer
*fb
= cmd
->state
.framebuffer
;
960 const struct tu_image_view
*dst
= fb
->attachments
[a
].attachment
;
961 const struct tu_image_view
*src
= fb
->attachments
[gmem_a
].attachment
;
963 tu_blit(cmd
, cs
, &(struct tu_blit
) {
964 .dst
= sysmem_attachment_surf(dst
, dst
->base_layer
,
965 &cmd
->state
.tiling_config
.render_area
),
966 .src
= sysmem_attachment_surf(src
, src
->base_layer
,
967 &cmd
->state
.tiling_config
.render_area
),
968 .layers
= fb
->layers
,
973 /* Emit a MSAA resolve operation, with both gmem and sysmem paths. */
974 static void tu6_emit_resolve(struct tu_cmd_buffer
*cmd
,
979 if (cmd
->state
.pass
->attachments
[a
].store_op
== VK_ATTACHMENT_STORE_OP_DONT_CARE
)
982 tu6_emit_predicated_blit(cmd
, cs
, a
, gmem_a
, true);
984 const struct tu_framebuffer
*fb
= cmd
->state
.framebuffer
;
985 const uint32_t space
= 25 + 66 * fb
->layers
+ 17;
986 struct tu_cond_exec_state state
;
988 VkResult result
= tu_cond_exec_start(cmd
->device
, cs
, &state
,
989 CP_COND_REG_EXEC_0_MODE(RENDER_MODE
) |
990 CP_COND_REG_EXEC_0_SYSMEM
,
992 if (result
!= VK_SUCCESS
) {
993 cmd
->record_result
= result
;
997 tu6_emit_sysmem_resolve(cmd
, cs
, a
, gmem_a
);
998 tu_cond_exec_end(cs
, &state
);
1002 tu6_emit_store_attachment(struct tu_cmd_buffer
*cmd
,
1007 if (cmd
->state
.pass
->attachments
[a
].store_op
== VK_ATTACHMENT_STORE_OP_DONT_CARE
)
1010 tu6_emit_blit_info(cmd
, cs
,
1011 cmd
->state
.framebuffer
->attachments
[a
].attachment
,
1012 cmd
->state
.pass
->attachments
[gmem_a
].gmem_offset
, true);
1013 tu6_emit_blit(cmd
, cs
);
1017 tu6_emit_tile_store(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
1019 const struct tu_render_pass
*pass
= cmd
->state
.pass
;
1020 const struct tu_subpass
*subpass
= &pass
->subpasses
[pass
->subpass_count
-1];
1022 tu_cs_emit_pkt7(cs
, CP_SET_DRAW_STATE
, 3);
1023 tu_cs_emit(cs
, CP_SET_DRAW_STATE__0_COUNT(0) |
1024 CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS
|
1025 CP_SET_DRAW_STATE__0_GROUP_ID(0));
1026 tu_cs_emit(cs
, CP_SET_DRAW_STATE__1_ADDR_LO(0));
1027 tu_cs_emit(cs
, CP_SET_DRAW_STATE__2_ADDR_HI(0));
1029 tu_cs_emit_pkt7(cs
, CP_SKIP_IB2_ENABLE_GLOBAL
, 1);
1030 tu_cs_emit(cs
, 0x0);
1032 tu6_emit_marker(cmd
, cs
);
1033 tu_cs_emit_pkt7(cs
, CP_SET_MARKER
, 1);
1034 tu_cs_emit(cs
, A6XX_CP_SET_MARKER_0_MODE(RM6_RESOLVE
));
1035 tu6_emit_marker(cmd
, cs
);
1037 tu6_emit_blit_scissor(cmd
, cs
, true);
1039 for (uint32_t a
= 0; a
< pass
->attachment_count
; ++a
) {
1040 if (pass
->attachments
[a
].gmem_offset
>= 0)
1041 tu6_emit_store_attachment(cmd
, cs
, a
, a
);
1044 if (subpass
->resolve_attachments
) {
1045 for (unsigned i
= 0; i
< subpass
->color_count
; i
++) {
1046 uint32_t a
= subpass
->resolve_attachments
[i
].attachment
;
1047 if (a
!= VK_ATTACHMENT_UNUSED
)
1048 tu6_emit_store_attachment(cmd
, cs
, a
,
1049 subpass
->color_attachments
[i
].attachment
);
1055 tu6_emit_restart_index(struct tu_cs
*cs
, uint32_t restart_index
)
1058 A6XX_PC_RESTART_INDEX(restart_index
));
1062 tu6_init_hw(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
1064 VkResult result
= tu_cs_reserve_space(cs
, 256);
1065 if (result
!= VK_SUCCESS
) {
1066 cmd
->record_result
= result
;
1070 tu6_emit_cache_flush(cmd
, cs
);
1072 tu_cs_emit_write_reg(cs
, REG_A6XX_HLSQ_UPDATE_CNTL
, 0xfffff);
1074 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_CCU_CNTL
, 0x10000000);
1075 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_8E04
, 0x00100000);
1076 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_AE04
, 0x8);
1077 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_AE00
, 0);
1078 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_AE0F
, 0x3f);
1079 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_B605
, 0x44);
1080 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_B600
, 0x100000);
1081 tu_cs_emit_write_reg(cs
, REG_A6XX_HLSQ_UNKNOWN_BE00
, 0x80);
1082 tu_cs_emit_write_reg(cs
, REG_A6XX_HLSQ_UNKNOWN_BE01
, 0);
1084 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9600
, 0);
1085 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_UNKNOWN_8600
, 0x880);
1086 tu_cs_emit_write_reg(cs
, REG_A6XX_HLSQ_UNKNOWN_BE04
, 0);
1087 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_AE03
, 0x00000410);
1088 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_IBO_COUNT
, 0);
1089 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_B182
, 0);
1090 tu_cs_emit_write_reg(cs
, REG_A6XX_HLSQ_UNKNOWN_BB11
, 0);
1091 tu_cs_emit_write_reg(cs
, REG_A6XX_UCHE_UNKNOWN_0E12
, 0x3200000);
1092 tu_cs_emit_write_reg(cs
, REG_A6XX_UCHE_CLIENT_PF
, 4);
1093 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_8E01
, 0x0);
1094 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_AB00
, 0x5);
1095 tu_cs_emit_write_reg(cs
, REG_A6XX_VFD_ADD_OFFSET
, A6XX_VFD_ADD_OFFSET_VERTEX
);
1096 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_8811
, 0x00000010);
1097 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_MODE_CNTL
, 0x1f);
1099 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_SRGB_CNTL
, 0);
1101 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_UNKNOWN_8101
, 0);
1102 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_SAMPLE_CNTL
, 0);
1103 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_UNKNOWN_8110
, 0);
1105 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_RENDER_CONTROL0
, 0x401);
1106 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_RENDER_CONTROL1
, 0);
1107 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_FS_OUTPUT_CNTL0
, 0);
1108 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_SAMPLE_CNTL
, 0);
1109 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_8818
, 0);
1110 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_8819
, 0);
1111 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_881A
, 0);
1112 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_881B
, 0);
1113 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_881C
, 0);
1114 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_881D
, 0);
1115 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_881E
, 0);
1116 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_88F0
, 0);
1118 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9101
, 0xffff00);
1119 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9107
, 0);
1121 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9236
, 1);
1122 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9300
, 0);
1124 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_SO_OVERRIDE
,
1125 A6XX_VPC_SO_OVERRIDE_SO_DISABLE
);
1127 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_UNKNOWN_9801
, 0);
1128 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_UNKNOWN_9806
, 0);
1129 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_UNKNOWN_9980
, 0);
1131 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_PRIMITIVE_CNTL_6
, 0);
1132 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_UNKNOWN_9B07
, 0);
1134 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_A81B
, 0);
1136 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_B183
, 0);
1138 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_UNKNOWN_8099
, 0);
1139 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_UNKNOWN_809B
, 0);
1140 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_UNKNOWN_80A0
, 2);
1141 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_UNKNOWN_80AF
, 0);
1142 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9210
, 0);
1143 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9211
, 0);
1144 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9602
, 0);
1145 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_UNKNOWN_9981
, 0x3);
1146 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_UNKNOWN_9E72
, 0);
1147 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9108
, 0x3);
1148 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_TP_UNKNOWN_B304
, 0);
1149 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_TP_UNKNOWN_B309
, 0x000000a2);
1150 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_8804
, 0);
1151 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_UNKNOWN_80A4
, 0);
1152 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_UNKNOWN_80A5
, 0);
1153 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_UNKNOWN_80A6
, 0);
1154 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_8805
, 0);
1155 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_8806
, 0);
1156 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_8878
, 0);
1157 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_8879
, 0);
1158 tu_cs_emit_write_reg(cs
, REG_A6XX_HLSQ_CONTROL_5_REG
, 0xfc);
1160 tu6_emit_marker(cmd
, cs
);
1162 tu_cs_emit_write_reg(cs
, REG_A6XX_VFD_MODE_CNTL
, 0x00000000);
1164 tu_cs_emit_write_reg(cs
, REG_A6XX_VFD_UNKNOWN_A008
, 0);
1166 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_MODE_CNTL
, 0x0000001f);
1168 /* we don't use this yet.. probably best to disable.. */
1169 tu_cs_emit_pkt7(cs
, CP_SET_DRAW_STATE
, 3);
1170 tu_cs_emit(cs
, CP_SET_DRAW_STATE__0_COUNT(0) |
1171 CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS
|
1172 CP_SET_DRAW_STATE__0_GROUP_ID(0));
1173 tu_cs_emit(cs
, CP_SET_DRAW_STATE__1_ADDR_LO(0));
1174 tu_cs_emit(cs
, CP_SET_DRAW_STATE__2_ADDR_HI(0));
1177 A6XX_VPC_SO_BUFFER_BASE(0),
1178 A6XX_VPC_SO_BUFFER_SIZE(0));
1181 A6XX_VPC_SO_FLUSH_BASE(0));
1184 A6XX_VPC_SO_BUF_CNTL(0));
1187 A6XX_VPC_SO_BUFFER_OFFSET(0, 0));
1190 A6XX_VPC_SO_BUFFER_BASE(1, 0),
1191 A6XX_VPC_SO_BUFFER_SIZE(1, 0));
1194 A6XX_VPC_SO_BUFFER_OFFSET(1, 0),
1195 A6XX_VPC_SO_FLUSH_BASE(1, 0),
1196 A6XX_VPC_SO_BUFFER_BASE(2, 0),
1197 A6XX_VPC_SO_BUFFER_SIZE(2, 0));
1200 A6XX_VPC_SO_BUFFER_OFFSET(2, 0),
1201 A6XX_VPC_SO_FLUSH_BASE(2, 0),
1202 A6XX_VPC_SO_BUFFER_BASE(3, 0),
1203 A6XX_VPC_SO_BUFFER_SIZE(3, 0));
1206 A6XX_VPC_SO_BUFFER_OFFSET(3, 0),
1207 A6XX_VPC_SO_FLUSH_BASE(3, 0));
1210 A6XX_SP_HS_CTRL_REG0(0));
1213 A6XX_SP_GS_CTRL_REG0(0));
1216 A6XX_GRAS_LRZ_CNTL(0));
1219 A6XX_RB_LRZ_CNTL(0));
1221 tu_cs_sanity_check(cs
);
1225 tu6_cache_flush(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
1229 seqno
= tu6_emit_event_write(cmd
, cs
, CACHE_FLUSH_AND_INV_EVENT
, true);
1231 tu_cs_emit_pkt7(cs
, CP_WAIT_REG_MEM
, 6);
1232 tu_cs_emit(cs
, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ
) |
1233 CP_WAIT_REG_MEM_0_POLL_MEMORY
);
1234 tu_cs_emit_qw(cs
, cmd
->scratch_bo
.iova
);
1235 tu_cs_emit(cs
, CP_WAIT_REG_MEM_3_REF(seqno
));
1236 tu_cs_emit(cs
, CP_WAIT_REG_MEM_4_MASK(~0));
1237 tu_cs_emit(cs
, CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(16));
1239 seqno
= tu6_emit_event_write(cmd
, cs
, CACHE_FLUSH_TS
, true);
1241 tu_cs_emit_pkt7(cs
, CP_WAIT_MEM_GTE
, 4);
1242 tu_cs_emit(cs
, CP_WAIT_MEM_GTE_0_RESERVED(0));
1243 tu_cs_emit_qw(cs
, cmd
->scratch_bo
.iova
);
1244 tu_cs_emit(cs
, CP_WAIT_MEM_GTE_3_REF(seqno
));
1248 update_vsc_pipe(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
1250 const struct tu_tiling_config
*tiling
= &cmd
->state
.tiling_config
;
1253 A6XX_VSC_BIN_SIZE(.width
= tiling
->tile0
.extent
.width
,
1254 .height
= tiling
->tile0
.extent
.height
),
1255 A6XX_VSC_SIZE_ADDRESS(.bo
= &cmd
->vsc_data
,
1256 .bo_offset
= 32 * cmd
->vsc_data_pitch
));
1259 A6XX_VSC_BIN_COUNT(.nx
= tiling
->tile_count
.width
,
1260 .ny
= tiling
->tile_count
.height
));
1262 tu_cs_emit_pkt4(cs
, REG_A6XX_VSC_PIPE_CONFIG_REG(0), 32);
1263 for (unsigned i
= 0; i
< 32; i
++)
1264 tu_cs_emit(cs
, tiling
->pipe_config
[i
]);
1267 A6XX_VSC_PIPE_DATA2_ADDRESS(.bo
= &cmd
->vsc_data2
),
1268 A6XX_VSC_PIPE_DATA2_PITCH(cmd
->vsc_data2_pitch
),
1269 A6XX_VSC_PIPE_DATA2_ARRAY_PITCH(cmd
->vsc_data2
.size
));
1272 A6XX_VSC_PIPE_DATA_ADDRESS(.bo
= &cmd
->vsc_data
),
1273 A6XX_VSC_PIPE_DATA_PITCH(cmd
->vsc_data_pitch
),
1274 A6XX_VSC_PIPE_DATA_ARRAY_PITCH(cmd
->vsc_data
.size
));
1278 emit_vsc_overflow_test(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
1280 const struct tu_tiling_config
*tiling
= &cmd
->state
.tiling_config
;
1281 const uint32_t used_pipe_count
=
1282 tiling
->pipe_count
.width
* tiling
->pipe_count
.height
;
1284 /* Clear vsc_scratch: */
1285 tu_cs_emit_pkt7(cs
, CP_MEM_WRITE
, 3);
1286 tu_cs_emit_qw(cs
, cmd
->scratch_bo
.iova
+ VSC_SCRATCH
);
1287 tu_cs_emit(cs
, 0x0);
1289 /* Check for overflow, write vsc_scratch if detected: */
1290 for (int i
= 0; i
< used_pipe_count
; i
++) {
1291 tu_cs_emit_pkt7(cs
, CP_COND_WRITE5
, 8);
1292 tu_cs_emit(cs
, CP_COND_WRITE5_0_FUNCTION(WRITE_GE
) |
1293 CP_COND_WRITE5_0_WRITE_MEMORY
);
1294 tu_cs_emit(cs
, CP_COND_WRITE5_1_POLL_ADDR_LO(REG_A6XX_VSC_SIZE_REG(i
)));
1295 tu_cs_emit(cs
, CP_COND_WRITE5_2_POLL_ADDR_HI(0));
1296 tu_cs_emit(cs
, CP_COND_WRITE5_3_REF(cmd
->vsc_data_pitch
));
1297 tu_cs_emit(cs
, CP_COND_WRITE5_4_MASK(~0));
1298 tu_cs_emit_qw(cs
, cmd
->scratch_bo
.iova
+ VSC_SCRATCH
);
1299 tu_cs_emit(cs
, CP_COND_WRITE5_7_WRITE_DATA(1 + cmd
->vsc_data_pitch
));
1301 tu_cs_emit_pkt7(cs
, CP_COND_WRITE5
, 8);
1302 tu_cs_emit(cs
, CP_COND_WRITE5_0_FUNCTION(WRITE_GE
) |
1303 CP_COND_WRITE5_0_WRITE_MEMORY
);
1304 tu_cs_emit(cs
, CP_COND_WRITE5_1_POLL_ADDR_LO(REG_A6XX_VSC_SIZE2_REG(i
)));
1305 tu_cs_emit(cs
, CP_COND_WRITE5_2_POLL_ADDR_HI(0));
1306 tu_cs_emit(cs
, CP_COND_WRITE5_3_REF(cmd
->vsc_data2_pitch
));
1307 tu_cs_emit(cs
, CP_COND_WRITE5_4_MASK(~0));
1308 tu_cs_emit_qw(cs
, cmd
->scratch_bo
.iova
+ VSC_SCRATCH
);
1309 tu_cs_emit(cs
, CP_COND_WRITE5_7_WRITE_DATA(3 + cmd
->vsc_data2_pitch
));
1312 tu_cs_emit_pkt7(cs
, CP_WAIT_MEM_WRITES
, 0);
1314 tu_cs_emit_pkt7(cs
, CP_WAIT_FOR_ME
, 0);
1316 tu_cs_emit_pkt7(cs
, CP_MEM_TO_REG
, 3);
1317 tu_cs_emit(cs
, CP_MEM_TO_REG_0_REG(OVERFLOW_FLAG_REG
) |
1318 CP_MEM_TO_REG_0_CNT(1 - 1));
1319 tu_cs_emit_qw(cs
, cmd
->scratch_bo
.iova
+ VSC_SCRATCH
);
1322 * This is a bit awkward, we really want a way to invert the
1323 * CP_REG_TEST/CP_COND_REG_EXEC logic, so that we can conditionally
1324 * execute cmds to use hwbinning when a bit is *not* set. This
1325 * dance is to invert OVERFLOW_FLAG_REG
1327 * A CP_NOP packet is used to skip executing the 'else' clause
1331 /* b0 will be set if VSC_DATA or VSC_DATA2 overflow: */
1332 tu_cs_emit_pkt7(cs
, CP_REG_TEST
, 1);
1333 tu_cs_emit(cs
, A6XX_CP_REG_TEST_0_REG(OVERFLOW_FLAG_REG
) |
1334 A6XX_CP_REG_TEST_0_BIT(0) |
1335 A6XX_CP_REG_TEST_0_WAIT_FOR_ME
);
1337 tu_cs_emit_pkt7(cs
, CP_COND_REG_EXEC
, 2);
1338 tu_cs_emit(cs
, CP_COND_REG_EXEC_0_MODE(PRED_TEST
));
1339 tu_cs_emit(cs
, CP_COND_REG_EXEC_1_DWORDS(7));
1343 * On overflow, mirror the value to control->vsc_overflow
1344 * which CPU is checking to detect overflow (see
1345 * check_vsc_overflow())
1347 tu_cs_emit_pkt7(cs
, CP_REG_TO_MEM
, 3);
1348 tu_cs_emit(cs
, CP_REG_TO_MEM_0_REG(OVERFLOW_FLAG_REG
) |
1349 CP_REG_TO_MEM_0_CNT(0));
1350 tu_cs_emit_qw(cs
, cmd
->scratch_bo
.iova
+ VSC_OVERFLOW
);
1352 tu_cs_emit_pkt4(cs
, OVERFLOW_FLAG_REG
, 1);
1353 tu_cs_emit(cs
, 0x0);
1355 tu_cs_emit_pkt7(cs
, CP_NOP
, 2); /* skip 'else' when 'if' is taken */
1357 tu_cs_emit_pkt4(cs
, OVERFLOW_FLAG_REG
, 1);
1358 tu_cs_emit(cs
, 0x1);
1363 tu6_emit_binning_pass(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
1365 struct tu_physical_device
*phys_dev
= cmd
->device
->physical_device
;
1366 const struct tu_tiling_config
*tiling
= &cmd
->state
.tiling_config
;
1368 uint32_t x1
= tiling
->tile0
.offset
.x
;
1369 uint32_t y1
= tiling
->tile0
.offset
.y
;
1370 uint32_t x2
= tiling
->render_area
.offset
.x
+ tiling
->render_area
.extent
.width
- 1;
1371 uint32_t y2
= tiling
->render_area
.offset
.y
+ tiling
->render_area
.extent
.height
- 1;
1373 tu6_emit_window_scissor(cmd
, cs
, x1
, y1
, x2
, y2
);
1375 tu6_emit_marker(cmd
, cs
);
1376 tu_cs_emit_pkt7(cs
, CP_SET_MARKER
, 1);
1377 tu_cs_emit(cs
, A6XX_CP_SET_MARKER_0_MODE(RM6_BINNING
));
1378 tu6_emit_marker(cmd
, cs
);
1380 tu_cs_emit_pkt7(cs
, CP_SET_VISIBILITY_OVERRIDE
, 1);
1381 tu_cs_emit(cs
, 0x1);
1383 tu_cs_emit_pkt7(cs
, CP_SET_MODE
, 1);
1384 tu_cs_emit(cs
, 0x1);
1389 A6XX_VFD_MODE_CNTL(.binning_pass
= true));
1391 update_vsc_pipe(cmd
, cs
);
1394 A6XX_PC_UNKNOWN_9805(.unknown
= phys_dev
->magic
.PC_UNKNOWN_9805
));
1397 A6XX_SP_UNKNOWN_A0F8(.unknown
= phys_dev
->magic
.SP_UNKNOWN_A0F8
));
1399 tu_cs_emit_pkt7(cs
, CP_EVENT_WRITE
, 1);
1400 tu_cs_emit(cs
, UNK_2C
);
1403 A6XX_RB_WINDOW_OFFSET(.x
= 0, .y
= 0));
1406 A6XX_SP_TP_WINDOW_OFFSET(.x
= 0, .y
= 0));
1408 /* emit IB to binning drawcmds: */
1409 tu_cs_emit_call(cs
, &cmd
->draw_cs
);
1411 tu_cs_emit_pkt7(cs
, CP_SET_DRAW_STATE
, 3);
1412 tu_cs_emit(cs
, CP_SET_DRAW_STATE__0_COUNT(0) |
1413 CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS
|
1414 CP_SET_DRAW_STATE__0_GROUP_ID(0));
1415 tu_cs_emit(cs
, CP_SET_DRAW_STATE__1_ADDR_LO(0));
1416 tu_cs_emit(cs
, CP_SET_DRAW_STATE__2_ADDR_HI(0));
1418 tu_cs_emit_pkt7(cs
, CP_EVENT_WRITE
, 1);
1419 tu_cs_emit(cs
, UNK_2D
);
1421 tu6_emit_event_write(cmd
, cs
, CACHE_INVALIDATE
, false);
1422 tu6_cache_flush(cmd
, cs
);
1426 tu_cs_emit_pkt7(cs
, CP_WAIT_FOR_ME
, 0);
1428 emit_vsc_overflow_test(cmd
, cs
);
1430 tu_cs_emit_pkt7(cs
, CP_SET_VISIBILITY_OVERRIDE
, 1);
1431 tu_cs_emit(cs
, 0x0);
1433 tu_cs_emit_pkt7(cs
, CP_SET_MODE
, 1);
1434 tu_cs_emit(cs
, 0x0);
1439 A6XX_RB_CCU_CNTL(.unknown
= phys_dev
->magic
.RB_CCU_CNTL_gmem
));
1441 cmd
->wait_for_idle
= false;
1445 tu_emit_sysmem_clear_attachment(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
,
1447 const VkRenderPassBeginInfo
*info
)
1449 const struct tu_framebuffer
*fb
= cmd
->state
.framebuffer
;
1450 const struct tu_image_view
*iview
= fb
->attachments
[a
].attachment
;
1451 const struct tu_render_pass_attachment
*attachment
=
1452 &cmd
->state
.pass
->attachments
[a
];
1453 unsigned clear_mask
= 0;
1455 /* note: this means it isn't used by any subpass and shouldn't be cleared anyway */
1456 if (attachment
->gmem_offset
< 0)
1459 if (attachment
->load_op
== VK_ATTACHMENT_LOAD_OP_CLEAR
) {
1463 if (vk_format_has_stencil(iview
->vk_format
)) {
1465 if (attachment
->stencil_load_op
== VK_ATTACHMENT_LOAD_OP_CLEAR
)
1467 if (clear_mask
!= 0x3)
1468 tu_finishme("depth/stencil only load op");
1474 tu_clear_sysmem_attachment(cmd
, cs
, a
,
1475 &info
->pClearValues
[a
], &(struct VkClearRect
) {
1476 .rect
= info
->renderArea
,
1477 .baseArrayLayer
= iview
->base_layer
,
1478 .layerCount
= iview
->layer_count
,
1483 tu_cmd_prepare_sysmem_clear_ib(struct tu_cmd_buffer
*cmd
,
1484 const VkRenderPassBeginInfo
*info
)
1486 const struct tu_framebuffer
*fb
= cmd
->state
.framebuffer
;
1487 const uint32_t blit_cmd_space
= 25 + 66 * fb
->layers
+ 17;
1488 const uint32_t clear_space
=
1489 blit_cmd_space
* cmd
->state
.pass
->attachment_count
+ 5;
1491 struct tu_cs sub_cs
;
1494 tu_cs_begin_sub_stream(&cmd
->sub_cs
, clear_space
, &sub_cs
);
1495 if (result
!= VK_SUCCESS
) {
1496 cmd
->record_result
= result
;
1500 for (uint32_t i
= 0; i
< cmd
->state
.pass
->attachment_count
; ++i
)
1501 tu_emit_sysmem_clear_attachment(cmd
, &sub_cs
, i
, info
);
1503 /* TODO: We shouldn't need this flush, but without it we'd have an empty IB
1504 * when nothing clears which we currently can't handle.
1506 tu_cs_reserve_space(&sub_cs
, 5);
1507 tu6_emit_event_write(cmd
, &sub_cs
, PC_CCU_FLUSH_COLOR_TS
, true);
1509 cmd
->state
.sysmem_clear_ib
= tu_cs_end_sub_stream(&cmd
->sub_cs
, &sub_cs
);
1513 tu6_sysmem_render_begin(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
,
1514 const struct VkRect2D
*renderArea
)
1516 VkResult result
= tu_cs_reserve_space(cs
, 1024);
1517 if (result
!= VK_SUCCESS
) {
1518 cmd
->record_result
= result
;
1522 const struct tu_framebuffer
*fb
= cmd
->state
.framebuffer
;
1523 if (fb
->width
> 0 && fb
->height
> 0) {
1524 tu6_emit_window_scissor(cmd
, cs
,
1525 0, 0, fb
->width
- 1, fb
->height
- 1);
1527 tu6_emit_window_scissor(cmd
, cs
, 0, 0, 0, 0);
1530 tu6_emit_window_offset(cmd
, cs
, 0, 0);
1532 tu6_emit_bin_size(cs
, 0, 0, 0xc00000); /* 0xc00000 = BYPASS? */
1534 tu_cs_emit_ib(cs
, &cmd
->state
.sysmem_clear_ib
);
1536 tu6_emit_lrz_flush(cmd
, cs
);
1538 tu6_emit_marker(cmd
, cs
);
1539 tu_cs_emit_pkt7(cs
, CP_SET_MARKER
, 1);
1540 tu_cs_emit(cs
, A6XX_CP_SET_MARKER_0_MODE(RM6_BYPASS
));
1541 tu6_emit_marker(cmd
, cs
);
1543 tu_cs_emit_pkt7(cs
, CP_SKIP_IB2_ENABLE_GLOBAL
, 1);
1544 tu_cs_emit(cs
, 0x0);
1546 tu6_emit_event_write(cmd
, cs
, PC_CCU_INVALIDATE_COLOR
, false);
1547 tu6_emit_event_write(cmd
, cs
, PC_CCU_INVALIDATE_DEPTH
, false);
1548 tu6_emit_event_write(cmd
, cs
, CACHE_INVALIDATE
, false);
1550 tu6_emit_wfi(cmd
, cs
);
1552 A6XX_RB_CCU_CNTL(0x10000000));
1554 /* enable stream-out, with sysmem there is only one pass: */
1556 A6XX_VPC_SO_OVERRIDE(.so_disable
= false));
1558 tu_cs_emit_pkt7(cs
, CP_SET_VISIBILITY_OVERRIDE
, 1);
1559 tu_cs_emit(cs
, 0x1);
1561 tu_cs_emit_pkt7(cs
, CP_SET_MODE
, 1);
1562 tu_cs_emit(cs
, 0x0);
1564 tu_cs_sanity_check(cs
);
1568 tu6_sysmem_render_end(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
1570 /* Do any resolves of the last subpass. These are handled in the
1571 * tile_store_ib in the gmem path.
1574 const struct tu_subpass
*subpass
= cmd
->state
.subpass
;
1575 if (subpass
->resolve_attachments
) {
1576 for (unsigned i
= 0; i
< subpass
->color_count
; i
++) {
1577 uint32_t a
= subpass
->resolve_attachments
[i
].attachment
;
1578 if (a
!= VK_ATTACHMENT_UNUSED
)
1579 tu6_emit_sysmem_resolve(cmd
, cs
, a
,
1580 subpass
->color_attachments
[i
].attachment
);
1584 const uint32_t space
= 14 + tu_cs_get_call_size(&cmd
->draw_epilogue_cs
);
1585 VkResult result
= tu_cs_reserve_space(cs
, space
);
1586 if (result
!= VK_SUCCESS
) {
1587 cmd
->record_result
= result
;
1591 tu_cs_emit_call(cs
, &cmd
->draw_epilogue_cs
);
1593 tu_cs_emit_pkt7(cs
, CP_SKIP_IB2_ENABLE_GLOBAL
, 1);
1594 tu_cs_emit(cs
, 0x0);
1596 tu6_emit_lrz_flush(cmd
, cs
);
1598 tu6_emit_event_write(cmd
, cs
, PC_CCU_FLUSH_COLOR_TS
, true);
1599 tu6_emit_event_write(cmd
, cs
, PC_CCU_FLUSH_DEPTH_TS
, true);
1601 tu_cs_sanity_check(cs
);
1606 tu6_tile_render_begin(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
1608 struct tu_physical_device
*phys_dev
= cmd
->device
->physical_device
;
1610 VkResult result
= tu_cs_reserve_space(cs
, 1024);
1611 if (result
!= VK_SUCCESS
) {
1612 cmd
->record_result
= result
;
1616 tu6_emit_lrz_flush(cmd
, cs
);
1620 tu6_emit_cache_flush(cmd
, cs
);
1622 tu_cs_emit_pkt7(cs
, CP_SKIP_IB2_ENABLE_GLOBAL
, 1);
1623 tu_cs_emit(cs
, 0x0);
1625 /* 0x10000000 for BYPASS.. 0x7c13c080 for GMEM: */
1626 tu6_emit_wfi(cmd
, cs
);
1628 A6XX_RB_CCU_CNTL(phys_dev
->magic
.RB_CCU_CNTL_gmem
));
1630 const struct tu_tiling_config
*tiling
= &cmd
->state
.tiling_config
;
1631 if (use_hw_binning(cmd
)) {
1632 tu6_emit_bin_size(cs
,
1633 tiling
->tile0
.extent
.width
,
1634 tiling
->tile0
.extent
.height
,
1635 A6XX_RB_BIN_CONTROL_BINNING_PASS
| 0x6000000);
1637 tu6_emit_render_cntl(cmd
, cmd
->state
.subpass
, cs
, true);
1639 tu6_emit_binning_pass(cmd
, cs
);
1641 tu6_emit_bin_size(cs
,
1642 tiling
->tile0
.extent
.width
,
1643 tiling
->tile0
.extent
.height
,
1644 A6XX_RB_BIN_CONTROL_USE_VIZ
| 0x6000000);
1647 A6XX_VFD_MODE_CNTL(0));
1649 tu_cs_emit_regs(cs
, A6XX_PC_UNKNOWN_9805(.unknown
= phys_dev
->magic
.PC_UNKNOWN_9805
));
1651 tu_cs_emit_regs(cs
, A6XX_SP_UNKNOWN_A0F8(.unknown
= phys_dev
->magic
.SP_UNKNOWN_A0F8
));
1653 tu_cs_emit_pkt7(cs
, CP_SKIP_IB2_ENABLE_GLOBAL
, 1);
1654 tu_cs_emit(cs
, 0x1);
1656 tu6_emit_bin_size(cs
,
1657 tiling
->tile0
.extent
.width
,
1658 tiling
->tile0
.extent
.height
,
1662 tu_cs_sanity_check(cs
);
1666 tu6_render_tile(struct tu_cmd_buffer
*cmd
,
1668 const struct tu_tile
*tile
)
1670 const uint32_t render_tile_space
= 256 + tu_cs_get_call_size(&cmd
->draw_cs
);
1671 VkResult result
= tu_cs_reserve_space(cs
, render_tile_space
);
1672 if (result
!= VK_SUCCESS
) {
1673 cmd
->record_result
= result
;
1677 tu6_emit_tile_select(cmd
, cs
, tile
);
1678 tu_cs_emit_ib(cs
, &cmd
->state
.tile_load_ib
);
1680 tu_cs_emit_call(cs
, &cmd
->draw_cs
);
1681 cmd
->wait_for_idle
= true;
1683 if (use_hw_binning(cmd
)) {
1684 tu_cs_emit_pkt7(cs
, CP_REG_TEST
, 1);
1685 tu_cs_emit(cs
, A6XX_CP_REG_TEST_0_REG(OVERFLOW_FLAG_REG
) |
1686 A6XX_CP_REG_TEST_0_BIT(0) |
1687 A6XX_CP_REG_TEST_0_WAIT_FOR_ME
);
1689 tu_cs_emit_pkt7(cs
, CP_COND_REG_EXEC
, 2);
1690 tu_cs_emit(cs
, 0x10000000);
1691 tu_cs_emit(cs
, 2); /* conditionally execute next 2 dwords */
1693 /* if (no overflow) */ {
1694 tu_cs_emit_pkt7(cs
, CP_SET_MARKER
, 1);
1695 tu_cs_emit(cs
, A6XX_CP_SET_MARKER_0_MODE(RM6_ENDVIS
));
1699 tu_cs_emit_ib(cs
, &cmd
->state
.tile_store_ib
);
1701 tu_cs_sanity_check(cs
);
1705 tu6_tile_render_end(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
1707 const uint32_t space
= 16 + tu_cs_get_call_size(&cmd
->draw_epilogue_cs
);
1708 VkResult result
= tu_cs_reserve_space(cs
, space
);
1709 if (result
!= VK_SUCCESS
) {
1710 cmd
->record_result
= result
;
1714 tu_cs_emit_call(cs
, &cmd
->draw_epilogue_cs
);
1717 A6XX_GRAS_LRZ_CNTL(0));
1719 tu6_emit_lrz_flush(cmd
, cs
);
1721 tu6_emit_event_write(cmd
, cs
, CACHE_FLUSH_TS
, true);
1723 tu_cs_sanity_check(cs
);
1727 tu_cmd_render_tiles(struct tu_cmd_buffer
*cmd
)
1729 const struct tu_tiling_config
*tiling
= &cmd
->state
.tiling_config
;
1731 tu6_tile_render_begin(cmd
, &cmd
->cs
);
1733 for (uint32_t y
= 0; y
< tiling
->tile_count
.height
; y
++) {
1734 for (uint32_t x
= 0; x
< tiling
->tile_count
.width
; x
++) {
1735 struct tu_tile tile
;
1736 tu_tiling_config_get_tile(tiling
, cmd
->device
, x
, y
, &tile
);
1737 tu6_render_tile(cmd
, &cmd
->cs
, &tile
);
1741 tu6_tile_render_end(cmd
, &cmd
->cs
);
1745 tu_cmd_render_sysmem(struct tu_cmd_buffer
*cmd
)
1747 const struct tu_tiling_config
*tiling
= &cmd
->state
.tiling_config
;
1749 tu6_sysmem_render_begin(cmd
, &cmd
->cs
, &tiling
->render_area
);
1751 const uint32_t space
= tu_cs_get_call_size(&cmd
->draw_cs
);
1752 VkResult result
= tu_cs_reserve_space(&cmd
->cs
, space
);
1753 if (result
!= VK_SUCCESS
) {
1754 cmd
->record_result
= result
;
1758 tu_cs_emit_call(&cmd
->cs
, &cmd
->draw_cs
);
1759 cmd
->wait_for_idle
= true;
1761 tu6_sysmem_render_end(cmd
, &cmd
->cs
);
1765 tu_cmd_prepare_tile_load_ib(struct tu_cmd_buffer
*cmd
,
1766 const VkRenderPassBeginInfo
*info
)
1768 const uint32_t tile_load_space
=
1769 2 * 3 /* blit_scissor */ +
1770 (20 /* load */ + 19 /* clear */) * cmd
->state
.pass
->attachment_count
+
1771 2 /* cache invalidate */;
1773 struct tu_cs sub_cs
;
1776 tu_cs_begin_sub_stream(&cmd
->sub_cs
, tile_load_space
, &sub_cs
);
1777 if (result
!= VK_SUCCESS
) {
1778 cmd
->record_result
= result
;
1782 tu6_emit_blit_scissor(cmd
, &sub_cs
, true);
1784 for (uint32_t i
= 0; i
< cmd
->state
.pass
->attachment_count
; ++i
)
1785 tu6_emit_load_attachment(cmd
, &sub_cs
, i
);
1787 tu6_emit_blit_scissor(cmd
, &sub_cs
, false);
1789 for (uint32_t i
= 0; i
< cmd
->state
.pass
->attachment_count
; ++i
)
1790 tu6_emit_clear_attachment(cmd
, &sub_cs
, i
, info
);
1792 /* invalidate because reading input attachments will cache GMEM and
1793 * the cache isn''t updated when GMEM is written
1794 * TODO: is there a no-cache bit for textures?
1796 if (cmd
->state
.subpass
->input_count
)
1797 tu6_emit_event_write(cmd
, &sub_cs
, CACHE_INVALIDATE
, false);
1799 cmd
->state
.tile_load_ib
= tu_cs_end_sub_stream(&cmd
->sub_cs
, &sub_cs
);
1803 tu_cmd_prepare_tile_store_ib(struct tu_cmd_buffer
*cmd
)
1805 const uint32_t tile_store_space
= 32 + 23 * cmd
->state
.pass
->attachment_count
;
1806 struct tu_cs sub_cs
;
1809 tu_cs_begin_sub_stream(&cmd
->sub_cs
, tile_store_space
, &sub_cs
);
1810 if (result
!= VK_SUCCESS
) {
1811 cmd
->record_result
= result
;
1815 /* emit to tile-store sub_cs */
1816 tu6_emit_tile_store(cmd
, &sub_cs
);
1818 cmd
->state
.tile_store_ib
= tu_cs_end_sub_stream(&cmd
->sub_cs
, &sub_cs
);
1822 tu_cmd_update_tiling_config(struct tu_cmd_buffer
*cmd
,
1823 const VkRect2D
*render_area
)
1825 const struct tu_device
*dev
= cmd
->device
;
1826 struct tu_tiling_config
*tiling
= &cmd
->state
.tiling_config
;
1828 tiling
->render_area
= *render_area
;
1829 tiling
->force_sysmem
= force_sysmem(cmd
, render_area
);
1831 tu_tiling_config_update_tile_layout(tiling
, dev
, cmd
->state
.pass
->gmem_pixels
);
1832 tu_tiling_config_update_pipe_layout(tiling
, dev
);
1833 tu_tiling_config_update_pipes(tiling
, dev
);
1836 const struct tu_dynamic_state default_dynamic_state
= {
1852 .blend_constants
= { 0.0f
, 0.0f
, 0.0f
, 0.0f
},
1858 .stencil_compare_mask
=
1863 .stencil_write_mask
=
1868 .stencil_reference
=
1875 static void UNUSED
/* FINISHME */
1876 tu_bind_dynamic_state(struct tu_cmd_buffer
*cmd_buffer
,
1877 const struct tu_dynamic_state
*src
)
1879 struct tu_dynamic_state
*dest
= &cmd_buffer
->state
.dynamic
;
1880 uint32_t copy_mask
= src
->mask
;
1881 uint32_t dest_mask
= 0;
1883 tu_use_args(cmd_buffer
); /* FINISHME */
1885 /* Make sure to copy the number of viewports/scissors because they can
1886 * only be specified at pipeline creation time.
1888 dest
->viewport
.count
= src
->viewport
.count
;
1889 dest
->scissor
.count
= src
->scissor
.count
;
1890 dest
->discard_rectangle
.count
= src
->discard_rectangle
.count
;
1892 if (copy_mask
& TU_DYNAMIC_VIEWPORT
) {
1893 if (memcmp(&dest
->viewport
.viewports
, &src
->viewport
.viewports
,
1894 src
->viewport
.count
* sizeof(VkViewport
))) {
1895 typed_memcpy(dest
->viewport
.viewports
, src
->viewport
.viewports
,
1896 src
->viewport
.count
);
1897 dest_mask
|= TU_DYNAMIC_VIEWPORT
;
1901 if (copy_mask
& TU_DYNAMIC_SCISSOR
) {
1902 if (memcmp(&dest
->scissor
.scissors
, &src
->scissor
.scissors
,
1903 src
->scissor
.count
* sizeof(VkRect2D
))) {
1904 typed_memcpy(dest
->scissor
.scissors
, src
->scissor
.scissors
,
1905 src
->scissor
.count
);
1906 dest_mask
|= TU_DYNAMIC_SCISSOR
;
1910 if (copy_mask
& TU_DYNAMIC_LINE_WIDTH
) {
1911 if (dest
->line_width
!= src
->line_width
) {
1912 dest
->line_width
= src
->line_width
;
1913 dest_mask
|= TU_DYNAMIC_LINE_WIDTH
;
1917 if (copy_mask
& TU_DYNAMIC_DEPTH_BIAS
) {
1918 if (memcmp(&dest
->depth_bias
, &src
->depth_bias
,
1919 sizeof(src
->depth_bias
))) {
1920 dest
->depth_bias
= src
->depth_bias
;
1921 dest_mask
|= TU_DYNAMIC_DEPTH_BIAS
;
1925 if (copy_mask
& TU_DYNAMIC_BLEND_CONSTANTS
) {
1926 if (memcmp(&dest
->blend_constants
, &src
->blend_constants
,
1927 sizeof(src
->blend_constants
))) {
1928 typed_memcpy(dest
->blend_constants
, src
->blend_constants
, 4);
1929 dest_mask
|= TU_DYNAMIC_BLEND_CONSTANTS
;
1933 if (copy_mask
& TU_DYNAMIC_DEPTH_BOUNDS
) {
1934 if (memcmp(&dest
->depth_bounds
, &src
->depth_bounds
,
1935 sizeof(src
->depth_bounds
))) {
1936 dest
->depth_bounds
= src
->depth_bounds
;
1937 dest_mask
|= TU_DYNAMIC_DEPTH_BOUNDS
;
1941 if (copy_mask
& TU_DYNAMIC_STENCIL_COMPARE_MASK
) {
1942 if (memcmp(&dest
->stencil_compare_mask
, &src
->stencil_compare_mask
,
1943 sizeof(src
->stencil_compare_mask
))) {
1944 dest
->stencil_compare_mask
= src
->stencil_compare_mask
;
1945 dest_mask
|= TU_DYNAMIC_STENCIL_COMPARE_MASK
;
1949 if (copy_mask
& TU_DYNAMIC_STENCIL_WRITE_MASK
) {
1950 if (memcmp(&dest
->stencil_write_mask
, &src
->stencil_write_mask
,
1951 sizeof(src
->stencil_write_mask
))) {
1952 dest
->stencil_write_mask
= src
->stencil_write_mask
;
1953 dest_mask
|= TU_DYNAMIC_STENCIL_WRITE_MASK
;
1957 if (copy_mask
& TU_DYNAMIC_STENCIL_REFERENCE
) {
1958 if (memcmp(&dest
->stencil_reference
, &src
->stencil_reference
,
1959 sizeof(src
->stencil_reference
))) {
1960 dest
->stencil_reference
= src
->stencil_reference
;
1961 dest_mask
|= TU_DYNAMIC_STENCIL_REFERENCE
;
1965 if (copy_mask
& TU_DYNAMIC_DISCARD_RECTANGLE
) {
1966 if (memcmp(&dest
->discard_rectangle
.rectangles
,
1967 &src
->discard_rectangle
.rectangles
,
1968 src
->discard_rectangle
.count
* sizeof(VkRect2D
))) {
1969 typed_memcpy(dest
->discard_rectangle
.rectangles
,
1970 src
->discard_rectangle
.rectangles
,
1971 src
->discard_rectangle
.count
);
1972 dest_mask
|= TU_DYNAMIC_DISCARD_RECTANGLE
;
1978 tu_create_cmd_buffer(struct tu_device
*device
,
1979 struct tu_cmd_pool
*pool
,
1980 VkCommandBufferLevel level
,
1981 VkCommandBuffer
*pCommandBuffer
)
1983 struct tu_cmd_buffer
*cmd_buffer
;
1984 cmd_buffer
= vk_zalloc(&pool
->alloc
, sizeof(*cmd_buffer
), 8,
1985 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1986 if (cmd_buffer
== NULL
)
1987 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
1989 cmd_buffer
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
1990 cmd_buffer
->device
= device
;
1991 cmd_buffer
->pool
= pool
;
1992 cmd_buffer
->level
= level
;
1995 list_addtail(&cmd_buffer
->pool_link
, &pool
->cmd_buffers
);
1996 cmd_buffer
->queue_family_index
= pool
->queue_family_index
;
1999 /* Init the pool_link so we can safely call list_del when we destroy
2000 * the command buffer
2002 list_inithead(&cmd_buffer
->pool_link
);
2003 cmd_buffer
->queue_family_index
= TU_QUEUE_GENERAL
;
2006 tu_bo_list_init(&cmd_buffer
->bo_list
);
2007 tu_cs_init(&cmd_buffer
->cs
, device
, TU_CS_MODE_GROW
, 4096);
2008 tu_cs_init(&cmd_buffer
->draw_cs
, device
, TU_CS_MODE_GROW
, 4096);
2009 tu_cs_init(&cmd_buffer
->draw_epilogue_cs
, device
, TU_CS_MODE_GROW
, 4096);
2010 tu_cs_init(&cmd_buffer
->sub_cs
, device
, TU_CS_MODE_SUB_STREAM
, 2048);
2012 *pCommandBuffer
= tu_cmd_buffer_to_handle(cmd_buffer
);
2014 list_inithead(&cmd_buffer
->upload
.list
);
2016 cmd_buffer
->marker_reg
= REG_A6XX_CP_SCRATCH_REG(
2017 cmd_buffer
->level
== VK_COMMAND_BUFFER_LEVEL_PRIMARY
? 7 : 6);
2019 VkResult result
= tu_bo_init_new(device
, &cmd_buffer
->scratch_bo
, 0x1000);
2020 if (result
!= VK_SUCCESS
)
2021 goto fail_scratch_bo
;
2023 /* TODO: resize on overflow */
2024 cmd_buffer
->vsc_data_pitch
= device
->vsc_data_pitch
;
2025 cmd_buffer
->vsc_data2_pitch
= device
->vsc_data2_pitch
;
2026 cmd_buffer
->vsc_data
= device
->vsc_data
;
2027 cmd_buffer
->vsc_data2
= device
->vsc_data2
;
2032 list_del(&cmd_buffer
->pool_link
);
2037 tu_cmd_buffer_destroy(struct tu_cmd_buffer
*cmd_buffer
)
2039 tu_bo_finish(cmd_buffer
->device
, &cmd_buffer
->scratch_bo
);
2041 list_del(&cmd_buffer
->pool_link
);
2043 for (unsigned i
= 0; i
< VK_PIPELINE_BIND_POINT_RANGE_SIZE
; i
++)
2044 free(cmd_buffer
->descriptors
[i
].push_set
.set
.mapped_ptr
);
2046 tu_cs_finish(&cmd_buffer
->cs
);
2047 tu_cs_finish(&cmd_buffer
->draw_cs
);
2048 tu_cs_finish(&cmd_buffer
->draw_epilogue_cs
);
2049 tu_cs_finish(&cmd_buffer
->sub_cs
);
2051 tu_bo_list_destroy(&cmd_buffer
->bo_list
);
2052 vk_free(&cmd_buffer
->pool
->alloc
, cmd_buffer
);
2056 tu_reset_cmd_buffer(struct tu_cmd_buffer
*cmd_buffer
)
2058 cmd_buffer
->wait_for_idle
= true;
2060 cmd_buffer
->record_result
= VK_SUCCESS
;
2062 tu_bo_list_reset(&cmd_buffer
->bo_list
);
2063 tu_cs_reset(&cmd_buffer
->cs
);
2064 tu_cs_reset(&cmd_buffer
->draw_cs
);
2065 tu_cs_reset(&cmd_buffer
->draw_epilogue_cs
);
2066 tu_cs_reset(&cmd_buffer
->sub_cs
);
2068 for (unsigned i
= 0; i
< VK_PIPELINE_BIND_POINT_RANGE_SIZE
; i
++) {
2069 cmd_buffer
->descriptors
[i
].valid
= 0;
2070 cmd_buffer
->descriptors
[i
].push_dirty
= false;
2073 cmd_buffer
->status
= TU_CMD_BUFFER_STATUS_INITIAL
;
2075 return cmd_buffer
->record_result
;
2079 tu_AllocateCommandBuffers(VkDevice _device
,
2080 const VkCommandBufferAllocateInfo
*pAllocateInfo
,
2081 VkCommandBuffer
*pCommandBuffers
)
2083 TU_FROM_HANDLE(tu_device
, device
, _device
);
2084 TU_FROM_HANDLE(tu_cmd_pool
, pool
, pAllocateInfo
->commandPool
);
2086 VkResult result
= VK_SUCCESS
;
2089 for (i
= 0; i
< pAllocateInfo
->commandBufferCount
; i
++) {
2091 if (!list_is_empty(&pool
->free_cmd_buffers
)) {
2092 struct tu_cmd_buffer
*cmd_buffer
= list_first_entry(
2093 &pool
->free_cmd_buffers
, struct tu_cmd_buffer
, pool_link
);
2095 list_del(&cmd_buffer
->pool_link
);
2096 list_addtail(&cmd_buffer
->pool_link
, &pool
->cmd_buffers
);
2098 result
= tu_reset_cmd_buffer(cmd_buffer
);
2099 cmd_buffer
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
2100 cmd_buffer
->level
= pAllocateInfo
->level
;
2102 pCommandBuffers
[i
] = tu_cmd_buffer_to_handle(cmd_buffer
);
2104 result
= tu_create_cmd_buffer(device
, pool
, pAllocateInfo
->level
,
2105 &pCommandBuffers
[i
]);
2107 if (result
!= VK_SUCCESS
)
2111 if (result
!= VK_SUCCESS
) {
2112 tu_FreeCommandBuffers(_device
, pAllocateInfo
->commandPool
, i
,
2115 /* From the Vulkan 1.0.66 spec:
2117 * "vkAllocateCommandBuffers can be used to create multiple
2118 * command buffers. If the creation of any of those command
2119 * buffers fails, the implementation must destroy all
2120 * successfully created command buffer objects from this
2121 * command, set all entries of the pCommandBuffers array to
2122 * NULL and return the error."
2124 memset(pCommandBuffers
, 0,
2125 sizeof(*pCommandBuffers
) * pAllocateInfo
->commandBufferCount
);
2132 tu_FreeCommandBuffers(VkDevice device
,
2133 VkCommandPool commandPool
,
2134 uint32_t commandBufferCount
,
2135 const VkCommandBuffer
*pCommandBuffers
)
2137 for (uint32_t i
= 0; i
< commandBufferCount
; i
++) {
2138 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, pCommandBuffers
[i
]);
2141 if (cmd_buffer
->pool
) {
2142 list_del(&cmd_buffer
->pool_link
);
2143 list_addtail(&cmd_buffer
->pool_link
,
2144 &cmd_buffer
->pool
->free_cmd_buffers
);
2146 tu_cmd_buffer_destroy(cmd_buffer
);
2152 tu_ResetCommandBuffer(VkCommandBuffer commandBuffer
,
2153 VkCommandBufferResetFlags flags
)
2155 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
2156 return tu_reset_cmd_buffer(cmd_buffer
);
2160 tu_BeginCommandBuffer(VkCommandBuffer commandBuffer
,
2161 const VkCommandBufferBeginInfo
*pBeginInfo
)
2163 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
2164 VkResult result
= VK_SUCCESS
;
2166 if (cmd_buffer
->status
!= TU_CMD_BUFFER_STATUS_INITIAL
) {
2167 /* If the command buffer has already been resetted with
2168 * vkResetCommandBuffer, no need to do it again.
2170 result
= tu_reset_cmd_buffer(cmd_buffer
);
2171 if (result
!= VK_SUCCESS
)
2175 memset(&cmd_buffer
->state
, 0, sizeof(cmd_buffer
->state
));
2176 cmd_buffer
->usage_flags
= pBeginInfo
->flags
;
2178 tu_cs_begin(&cmd_buffer
->cs
);
2179 tu_cs_begin(&cmd_buffer
->draw_cs
);
2180 tu_cs_begin(&cmd_buffer
->draw_epilogue_cs
);
2182 cmd_buffer
->marker_seqno
= 0;
2183 cmd_buffer
->scratch_seqno
= 0;
2185 /* setup initial configuration into command buffer */
2186 if (cmd_buffer
->level
== VK_COMMAND_BUFFER_LEVEL_PRIMARY
) {
2187 switch (cmd_buffer
->queue_family_index
) {
2188 case TU_QUEUE_GENERAL
:
2189 tu6_init_hw(cmd_buffer
, &cmd_buffer
->cs
);
2194 } else if (cmd_buffer
->level
== VK_COMMAND_BUFFER_LEVEL_SECONDARY
&&
2195 (pBeginInfo
->flags
& VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT
)) {
2196 assert(pBeginInfo
->pInheritanceInfo
);
2197 cmd_buffer
->state
.pass
= tu_render_pass_from_handle(pBeginInfo
->pInheritanceInfo
->renderPass
);
2198 cmd_buffer
->state
.subpass
= &cmd_buffer
->state
.pass
->subpasses
[pBeginInfo
->pInheritanceInfo
->subpass
];
2201 cmd_buffer
->status
= TU_CMD_BUFFER_STATUS_RECORDING
;
2207 tu_CmdBindVertexBuffers(VkCommandBuffer commandBuffer
,
2208 uint32_t firstBinding
,
2209 uint32_t bindingCount
,
2210 const VkBuffer
*pBuffers
,
2211 const VkDeviceSize
*pOffsets
)
2213 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2215 assert(firstBinding
+ bindingCount
<= MAX_VBS
);
2217 for (uint32_t i
= 0; i
< bindingCount
; i
++) {
2218 cmd
->state
.vb
.buffers
[firstBinding
+ i
] =
2219 tu_buffer_from_handle(pBuffers
[i
]);
2220 cmd
->state
.vb
.offsets
[firstBinding
+ i
] = pOffsets
[i
];
2223 /* VB states depend on VkPipelineVertexInputStateCreateInfo */
2224 cmd
->state
.dirty
|= TU_CMD_DIRTY_VERTEX_BUFFERS
;
2228 tu_CmdBindIndexBuffer(VkCommandBuffer commandBuffer
,
2230 VkDeviceSize offset
,
2231 VkIndexType indexType
)
2233 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2234 TU_FROM_HANDLE(tu_buffer
, buf
, buffer
);
2236 /* initialize/update the restart index */
2237 if (!cmd
->state
.index_buffer
|| cmd
->state
.index_type
!= indexType
) {
2238 struct tu_cs
*draw_cs
= &cmd
->draw_cs
;
2239 VkResult result
= tu_cs_reserve_space(draw_cs
, 2);
2240 if (result
!= VK_SUCCESS
) {
2241 cmd
->record_result
= result
;
2245 tu6_emit_restart_index(
2246 draw_cs
, indexType
== VK_INDEX_TYPE_UINT32
? 0xffffffff : 0xffff);
2248 tu_cs_sanity_check(draw_cs
);
2252 if (cmd
->state
.index_buffer
!= buf
)
2253 tu_bo_list_add(&cmd
->bo_list
, buf
->bo
, MSM_SUBMIT_BO_READ
);
2255 cmd
->state
.index_buffer
= buf
;
2256 cmd
->state
.index_offset
= offset
;
2257 cmd
->state
.index_type
= indexType
;
2261 tu_CmdBindDescriptorSets(VkCommandBuffer commandBuffer
,
2262 VkPipelineBindPoint pipelineBindPoint
,
2263 VkPipelineLayout _layout
,
2265 uint32_t descriptorSetCount
,
2266 const VkDescriptorSet
*pDescriptorSets
,
2267 uint32_t dynamicOffsetCount
,
2268 const uint32_t *pDynamicOffsets
)
2270 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
2271 TU_FROM_HANDLE(tu_pipeline_layout
, layout
, _layout
);
2272 unsigned dyn_idx
= 0;
2274 struct tu_descriptor_state
*descriptors_state
=
2275 tu_get_descriptors_state(cmd_buffer
, pipelineBindPoint
);
2277 for (unsigned i
= 0; i
< descriptorSetCount
; ++i
) {
2278 unsigned idx
= i
+ firstSet
;
2279 TU_FROM_HANDLE(tu_descriptor_set
, set
, pDescriptorSets
[i
]);
2281 descriptors_state
->sets
[idx
] = set
;
2282 descriptors_state
->valid
|= (1u << idx
);
2284 for(unsigned j
= 0; j
< set
->layout
->dynamic_offset_count
; ++j
, ++dyn_idx
) {
2285 unsigned idx
= j
+ layout
->set
[i
+ firstSet
].dynamic_offset_start
;
2286 assert(dyn_idx
< dynamicOffsetCount
);
2288 descriptors_state
->dynamic_buffers
[idx
] =
2289 set
->dynamic_descriptors
[j
].va
+ pDynamicOffsets
[dyn_idx
];
2293 cmd_buffer
->state
.dirty
|= TU_CMD_DIRTY_DESCRIPTOR_SETS
;
2297 tu_CmdPushConstants(VkCommandBuffer commandBuffer
,
2298 VkPipelineLayout layout
,
2299 VkShaderStageFlags stageFlags
,
2302 const void *pValues
)
2304 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2305 memcpy((void*) cmd
->push_constants
+ offset
, pValues
, size
);
2306 cmd
->state
.dirty
|= TU_CMD_DIRTY_PUSH_CONSTANTS
;
2310 tu_EndCommandBuffer(VkCommandBuffer commandBuffer
)
2312 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
2314 if (cmd_buffer
->scratch_seqno
) {
2315 tu_bo_list_add(&cmd_buffer
->bo_list
, &cmd_buffer
->scratch_bo
,
2316 MSM_SUBMIT_BO_WRITE
);
2319 if (cmd_buffer
->use_vsc_data
) {
2320 tu_bo_list_add(&cmd_buffer
->bo_list
, &cmd_buffer
->vsc_data
,
2321 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_WRITE
);
2322 tu_bo_list_add(&cmd_buffer
->bo_list
, &cmd_buffer
->vsc_data2
,
2323 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_WRITE
);
2326 for (uint32_t i
= 0; i
< cmd_buffer
->draw_cs
.bo_count
; i
++) {
2327 tu_bo_list_add(&cmd_buffer
->bo_list
, cmd_buffer
->draw_cs
.bos
[i
],
2328 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_DUMP
);
2331 for (uint32_t i
= 0; i
< cmd_buffer
->draw_epilogue_cs
.bo_count
; i
++) {
2332 tu_bo_list_add(&cmd_buffer
->bo_list
, cmd_buffer
->draw_epilogue_cs
.bos
[i
],
2333 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_DUMP
);
2336 for (uint32_t i
= 0; i
< cmd_buffer
->sub_cs
.bo_count
; i
++) {
2337 tu_bo_list_add(&cmd_buffer
->bo_list
, cmd_buffer
->sub_cs
.bos
[i
],
2338 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_DUMP
);
2341 tu_cs_end(&cmd_buffer
->cs
);
2342 tu_cs_end(&cmd_buffer
->draw_cs
);
2343 tu_cs_end(&cmd_buffer
->draw_epilogue_cs
);
2345 cmd_buffer
->status
= TU_CMD_BUFFER_STATUS_EXECUTABLE
;
2347 return cmd_buffer
->record_result
;
2351 tu_CmdBindPipeline(VkCommandBuffer commandBuffer
,
2352 VkPipelineBindPoint pipelineBindPoint
,
2353 VkPipeline _pipeline
)
2355 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2356 TU_FROM_HANDLE(tu_pipeline
, pipeline
, _pipeline
);
2358 switch (pipelineBindPoint
) {
2359 case VK_PIPELINE_BIND_POINT_GRAPHICS
:
2360 cmd
->state
.pipeline
= pipeline
;
2361 cmd
->state
.dirty
|= TU_CMD_DIRTY_PIPELINE
;
2363 case VK_PIPELINE_BIND_POINT_COMPUTE
:
2364 cmd
->state
.compute_pipeline
= pipeline
;
2365 cmd
->state
.dirty
|= TU_CMD_DIRTY_COMPUTE_PIPELINE
;
2368 unreachable("unrecognized pipeline bind point");
2372 tu_bo_list_add(&cmd
->bo_list
, &pipeline
->program
.binary_bo
,
2373 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_DUMP
);
2374 for (uint32_t i
= 0; i
< pipeline
->cs
.bo_count
; i
++) {
2375 tu_bo_list_add(&cmd
->bo_list
, pipeline
->cs
.bos
[i
],
2376 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_DUMP
);
2381 tu_CmdSetViewport(VkCommandBuffer commandBuffer
,
2382 uint32_t firstViewport
,
2383 uint32_t viewportCount
,
2384 const VkViewport
*pViewports
)
2386 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2387 struct tu_cs
*draw_cs
= &cmd
->draw_cs
;
2389 VkResult result
= tu_cs_reserve_space(draw_cs
, 12);
2390 if (result
!= VK_SUCCESS
) {
2391 cmd
->record_result
= result
;
2395 assert(firstViewport
== 0 && viewportCount
== 1);
2396 tu6_emit_viewport(draw_cs
, pViewports
);
2398 tu_cs_sanity_check(draw_cs
);
2402 tu_CmdSetScissor(VkCommandBuffer commandBuffer
,
2403 uint32_t firstScissor
,
2404 uint32_t scissorCount
,
2405 const VkRect2D
*pScissors
)
2407 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2408 struct tu_cs
*draw_cs
= &cmd
->draw_cs
;
2410 VkResult result
= tu_cs_reserve_space(draw_cs
, 3);
2411 if (result
!= VK_SUCCESS
) {
2412 cmd
->record_result
= result
;
2416 assert(firstScissor
== 0 && scissorCount
== 1);
2417 tu6_emit_scissor(draw_cs
, pScissors
);
2419 tu_cs_sanity_check(draw_cs
);
2423 tu_CmdSetLineWidth(VkCommandBuffer commandBuffer
, float lineWidth
)
2425 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2427 cmd
->state
.dynamic
.line_width
= lineWidth
;
2429 /* line width depends on VkPipelineRasterizationStateCreateInfo */
2430 cmd
->state
.dirty
|= TU_CMD_DIRTY_DYNAMIC_LINE_WIDTH
;
2434 tu_CmdSetDepthBias(VkCommandBuffer commandBuffer
,
2435 float depthBiasConstantFactor
,
2436 float depthBiasClamp
,
2437 float depthBiasSlopeFactor
)
2439 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2440 struct tu_cs
*draw_cs
= &cmd
->draw_cs
;
2442 VkResult result
= tu_cs_reserve_space(draw_cs
, 4);
2443 if (result
!= VK_SUCCESS
) {
2444 cmd
->record_result
= result
;
2448 tu6_emit_depth_bias(draw_cs
, depthBiasConstantFactor
, depthBiasClamp
,
2449 depthBiasSlopeFactor
);
2451 tu_cs_sanity_check(draw_cs
);
2455 tu_CmdSetBlendConstants(VkCommandBuffer commandBuffer
,
2456 const float blendConstants
[4])
2458 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2459 struct tu_cs
*draw_cs
= &cmd
->draw_cs
;
2461 VkResult result
= tu_cs_reserve_space(draw_cs
, 5);
2462 if (result
!= VK_SUCCESS
) {
2463 cmd
->record_result
= result
;
2467 tu6_emit_blend_constants(draw_cs
, blendConstants
);
2469 tu_cs_sanity_check(draw_cs
);
2473 tu_CmdSetDepthBounds(VkCommandBuffer commandBuffer
,
2474 float minDepthBounds
,
2475 float maxDepthBounds
)
2480 tu_CmdSetStencilCompareMask(VkCommandBuffer commandBuffer
,
2481 VkStencilFaceFlags faceMask
,
2482 uint32_t compareMask
)
2484 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2486 if (faceMask
& VK_STENCIL_FACE_FRONT_BIT
)
2487 cmd
->state
.dynamic
.stencil_compare_mask
.front
= compareMask
;
2488 if (faceMask
& VK_STENCIL_FACE_BACK_BIT
)
2489 cmd
->state
.dynamic
.stencil_compare_mask
.back
= compareMask
;
2491 /* the front/back compare masks must be updated together */
2492 cmd
->state
.dirty
|= TU_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK
;
2496 tu_CmdSetStencilWriteMask(VkCommandBuffer commandBuffer
,
2497 VkStencilFaceFlags faceMask
,
2500 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2502 if (faceMask
& VK_STENCIL_FACE_FRONT_BIT
)
2503 cmd
->state
.dynamic
.stencil_write_mask
.front
= writeMask
;
2504 if (faceMask
& VK_STENCIL_FACE_BACK_BIT
)
2505 cmd
->state
.dynamic
.stencil_write_mask
.back
= writeMask
;
2507 /* the front/back write masks must be updated together */
2508 cmd
->state
.dirty
|= TU_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK
;
2512 tu_CmdSetStencilReference(VkCommandBuffer commandBuffer
,
2513 VkStencilFaceFlags faceMask
,
2516 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2518 if (faceMask
& VK_STENCIL_FACE_FRONT_BIT
)
2519 cmd
->state
.dynamic
.stencil_reference
.front
= reference
;
2520 if (faceMask
& VK_STENCIL_FACE_BACK_BIT
)
2521 cmd
->state
.dynamic
.stencil_reference
.back
= reference
;
2523 /* the front/back references must be updated together */
2524 cmd
->state
.dirty
|= TU_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE
;
2528 tu_CmdExecuteCommands(VkCommandBuffer commandBuffer
,
2529 uint32_t commandBufferCount
,
2530 const VkCommandBuffer
*pCmdBuffers
)
2532 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2535 assert(commandBufferCount
> 0);
2537 for (uint32_t i
= 0; i
< commandBufferCount
; i
++) {
2538 TU_FROM_HANDLE(tu_cmd_buffer
, secondary
, pCmdBuffers
[i
]);
2540 result
= tu_bo_list_merge(&cmd
->bo_list
, &secondary
->bo_list
);
2541 if (result
!= VK_SUCCESS
) {
2542 cmd
->record_result
= result
;
2546 result
= tu_cs_add_entries(&cmd
->draw_cs
, &secondary
->draw_cs
);
2547 if (result
!= VK_SUCCESS
) {
2548 cmd
->record_result
= result
;
2552 result
= tu_cs_add_entries(&cmd
->draw_epilogue_cs
,
2553 &secondary
->draw_epilogue_cs
);
2554 if (result
!= VK_SUCCESS
) {
2555 cmd
->record_result
= result
;
2559 cmd
->state
.dirty
= ~0u; /* TODO: set dirty only what needs to be */
2563 tu_CreateCommandPool(VkDevice _device
,
2564 const VkCommandPoolCreateInfo
*pCreateInfo
,
2565 const VkAllocationCallbacks
*pAllocator
,
2566 VkCommandPool
*pCmdPool
)
2568 TU_FROM_HANDLE(tu_device
, device
, _device
);
2569 struct tu_cmd_pool
*pool
;
2571 pool
= vk_alloc2(&device
->alloc
, pAllocator
, sizeof(*pool
), 8,
2572 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
2574 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
2577 pool
->alloc
= *pAllocator
;
2579 pool
->alloc
= device
->alloc
;
2581 list_inithead(&pool
->cmd_buffers
);
2582 list_inithead(&pool
->free_cmd_buffers
);
2584 pool
->queue_family_index
= pCreateInfo
->queueFamilyIndex
;
2586 *pCmdPool
= tu_cmd_pool_to_handle(pool
);
2592 tu_DestroyCommandPool(VkDevice _device
,
2593 VkCommandPool commandPool
,
2594 const VkAllocationCallbacks
*pAllocator
)
2596 TU_FROM_HANDLE(tu_device
, device
, _device
);
2597 TU_FROM_HANDLE(tu_cmd_pool
, pool
, commandPool
);
2602 list_for_each_entry_safe(struct tu_cmd_buffer
, cmd_buffer
,
2603 &pool
->cmd_buffers
, pool_link
)
2605 tu_cmd_buffer_destroy(cmd_buffer
);
2608 list_for_each_entry_safe(struct tu_cmd_buffer
, cmd_buffer
,
2609 &pool
->free_cmd_buffers
, pool_link
)
2611 tu_cmd_buffer_destroy(cmd_buffer
);
2614 vk_free2(&device
->alloc
, pAllocator
, pool
);
2618 tu_ResetCommandPool(VkDevice device
,
2619 VkCommandPool commandPool
,
2620 VkCommandPoolResetFlags flags
)
2622 TU_FROM_HANDLE(tu_cmd_pool
, pool
, commandPool
);
2625 list_for_each_entry(struct tu_cmd_buffer
, cmd_buffer
, &pool
->cmd_buffers
,
2628 result
= tu_reset_cmd_buffer(cmd_buffer
);
2629 if (result
!= VK_SUCCESS
)
2637 tu_TrimCommandPool(VkDevice device
,
2638 VkCommandPool commandPool
,
2639 VkCommandPoolTrimFlags flags
)
2641 TU_FROM_HANDLE(tu_cmd_pool
, pool
, commandPool
);
2646 list_for_each_entry_safe(struct tu_cmd_buffer
, cmd_buffer
,
2647 &pool
->free_cmd_buffers
, pool_link
)
2649 tu_cmd_buffer_destroy(cmd_buffer
);
2654 tu_CmdBeginRenderPass(VkCommandBuffer commandBuffer
,
2655 const VkRenderPassBeginInfo
*pRenderPassBegin
,
2656 VkSubpassContents contents
)
2658 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2659 TU_FROM_HANDLE(tu_render_pass
, pass
, pRenderPassBegin
->renderPass
);
2660 TU_FROM_HANDLE(tu_framebuffer
, fb
, pRenderPassBegin
->framebuffer
);
2662 cmd
->state
.pass
= pass
;
2663 cmd
->state
.subpass
= pass
->subpasses
;
2664 cmd
->state
.framebuffer
= fb
;
2666 tu_cmd_update_tiling_config(cmd
, &pRenderPassBegin
->renderArea
);
2667 tu_cmd_prepare_sysmem_clear_ib(cmd
, pRenderPassBegin
);
2668 tu_cmd_prepare_tile_load_ib(cmd
, pRenderPassBegin
);
2669 tu_cmd_prepare_tile_store_ib(cmd
);
2671 VkResult result
= tu_cs_reserve_space(&cmd
->draw_cs
, 1024);
2672 if (result
!= VK_SUCCESS
) {
2673 cmd
->record_result
= result
;
2677 tu6_emit_zs(cmd
, cmd
->state
.subpass
, &cmd
->draw_cs
);
2678 tu6_emit_mrt(cmd
, cmd
->state
.subpass
, &cmd
->draw_cs
);
2679 tu6_emit_msaa(cmd
, cmd
->state
.subpass
, &cmd
->draw_cs
);
2680 tu6_emit_render_cntl(cmd
, cmd
->state
.subpass
, &cmd
->draw_cs
, false);
2682 /* note: use_hw_binning only checks tiling config */
2683 if (use_hw_binning(cmd
))
2684 cmd
->use_vsc_data
= true;
2686 for (uint32_t i
= 0; i
< fb
->attachment_count
; ++i
) {
2687 const struct tu_image_view
*iview
= fb
->attachments
[i
].attachment
;
2688 tu_bo_list_add(&cmd
->bo_list
, iview
->image
->bo
,
2689 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_WRITE
);
2694 tu_CmdBeginRenderPass2(VkCommandBuffer commandBuffer
,
2695 const VkRenderPassBeginInfo
*pRenderPassBeginInfo
,
2696 const VkSubpassBeginInfoKHR
*pSubpassBeginInfo
)
2698 tu_CmdBeginRenderPass(commandBuffer
, pRenderPassBeginInfo
,
2699 pSubpassBeginInfo
->contents
);
2703 tu_CmdNextSubpass(VkCommandBuffer commandBuffer
, VkSubpassContents contents
)
2705 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2706 const struct tu_render_pass
*pass
= cmd
->state
.pass
;
2707 struct tu_cs
*cs
= &cmd
->draw_cs
;
2709 const struct tu_subpass
*subpass
= cmd
->state
.subpass
++;
2711 * if msaa samples change between subpasses,
2712 * attachment store is broken for some attachments
2714 if (subpass
->resolve_attachments
) {
2715 tu6_emit_blit_scissor(cmd
, cs
, true);
2716 for (unsigned i
= 0; i
< subpass
->color_count
; i
++) {
2717 uint32_t a
= subpass
->resolve_attachments
[i
].attachment
;
2718 if (a
!= VK_ATTACHMENT_UNUSED
) {
2719 tu6_emit_resolve(cmd
, cs
, a
,
2720 subpass
->color_attachments
[i
].attachment
);
2725 VkResult result
= tu_cs_reserve_space(&cmd
->draw_cs
, 1024);
2726 if (result
!= VK_SUCCESS
) {
2727 cmd
->record_result
= result
;
2731 /* invalidate because reading input attachments will cache GMEM and
2732 * the cache isn''t updated when GMEM is written
2733 * TODO: is there a no-cache bit for textures?
2735 if (cmd
->state
.subpass
->input_count
)
2736 tu6_emit_event_write(cmd
, cs
, CACHE_INVALIDATE
, false);
2738 /* emit mrt/zs/msaa/ubwc state for the subpass that is starting */
2739 tu6_emit_zs(cmd
, cmd
->state
.subpass
, cs
);
2740 tu6_emit_mrt(cmd
, cmd
->state
.subpass
, cs
);
2741 tu6_emit_msaa(cmd
, cmd
->state
.subpass
, cs
);
2742 tu6_emit_render_cntl(cmd
, cmd
->state
.subpass
, cs
, false);
2744 /* Emit flushes so that input attachments will read the correct value. This
2745 * is for sysmem only, although it shouldn't do much harm on gmem.
2747 tu6_emit_event_write(cmd
, cs
, PC_CCU_FLUSH_COLOR_TS
, true);
2748 tu6_emit_event_write(cmd
, cs
, PC_CCU_FLUSH_DEPTH_TS
, true);
2751 * since we don't know how to do GMEM->GMEM resolve,
2752 * resolve attachments are resolved to memory then loaded to GMEM again if needed
2754 if (subpass
->resolve_attachments
) {
2755 for (unsigned i
= 0; i
< subpass
->color_count
; i
++) {
2756 uint32_t a
= subpass
->resolve_attachments
[i
].attachment
;
2757 if (a
!= VK_ATTACHMENT_UNUSED
&& pass
->attachments
[a
].gmem_offset
>= 0) {
2758 tu_finishme("missing GMEM->GMEM resolve, performance will suffer\n");
2759 tu6_emit_predicated_blit(cmd
, cs
, a
, a
, false);
2766 tu_CmdNextSubpass2(VkCommandBuffer commandBuffer
,
2767 const VkSubpassBeginInfoKHR
*pSubpassBeginInfo
,
2768 const VkSubpassEndInfoKHR
*pSubpassEndInfo
)
2770 tu_CmdNextSubpass(commandBuffer
, pSubpassBeginInfo
->contents
);
2776 * Number of vertices.
2781 * Index of the first vertex.
2783 int32_t vertex_offset
;
2786 * First instance id.
2788 uint32_t first_instance
;
2791 * Number of instances.
2793 uint32_t instance_count
;
2796 * First index (indexed draws only).
2798 uint32_t first_index
;
2801 * Whether it's an indexed draw.
2806 * Indirect draw parameters resource.
2808 struct tu_buffer
*indirect
;
2809 uint64_t indirect_offset
;
2813 * Draw count parameters resource.
2815 struct tu_buffer
*count_buffer
;
2816 uint64_t count_buffer_offset
;
2819 #define ENABLE_ALL (CP_SET_DRAW_STATE__0_BINNING | CP_SET_DRAW_STATE__0_GMEM | CP_SET_DRAW_STATE__0_SYSMEM)
2820 #define ENABLE_DRAW (CP_SET_DRAW_STATE__0_GMEM | CP_SET_DRAW_STATE__0_SYSMEM)
2822 enum tu_draw_state_group_id
2824 TU_DRAW_STATE_PROGRAM
,
2825 TU_DRAW_STATE_PROGRAM_BINNING
,
2827 TU_DRAW_STATE_VI_BINNING
,
2831 TU_DRAW_STATE_BLEND
,
2832 TU_DRAW_STATE_VS_CONST
,
2833 TU_DRAW_STATE_FS_CONST
,
2834 TU_DRAW_STATE_VS_TEX
,
2835 TU_DRAW_STATE_FS_TEX_SYSMEM
,
2836 TU_DRAW_STATE_FS_TEX_GMEM
,
2837 TU_DRAW_STATE_FS_IBO
,
2838 TU_DRAW_STATE_VS_PARAMS
,
2840 TU_DRAW_STATE_COUNT
,
2843 struct tu_draw_state_group
2845 enum tu_draw_state_group_id id
;
2846 uint32_t enable_mask
;
2847 struct tu_cs_entry ib
;
2850 const static struct tu_sampler
*
2851 sampler_ptr(struct tu_descriptor_state
*descriptors_state
,
2852 const struct tu_descriptor_map
*map
, unsigned i
,
2853 unsigned array_index
)
2855 assert(descriptors_state
->valid
& (1 << map
->set
[i
]));
2857 struct tu_descriptor_set
*set
= descriptors_state
->sets
[map
->set
[i
]];
2858 assert(map
->binding
[i
] < set
->layout
->binding_count
);
2860 const struct tu_descriptor_set_binding_layout
*layout
=
2861 &set
->layout
->binding
[map
->binding
[i
]];
2863 if (layout
->immutable_samplers_offset
) {
2864 const struct tu_sampler
*immutable_samplers
=
2865 tu_immutable_samplers(set
->layout
, layout
);
2867 return &immutable_samplers
[array_index
];
2870 switch (layout
->type
) {
2871 case VK_DESCRIPTOR_TYPE_SAMPLER
:
2872 return (struct tu_sampler
*) &set
->mapped_ptr
[layout
->offset
/ 4];
2873 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
2874 return (struct tu_sampler
*) &set
->mapped_ptr
[layout
->offset
/ 4 + A6XX_TEX_CONST_DWORDS
+
2876 (A6XX_TEX_CONST_DWORDS
+
2877 sizeof(struct tu_sampler
) / 4)];
2879 unreachable("unimplemented descriptor type");
2885 write_tex_const(struct tu_cmd_buffer
*cmd
,
2887 struct tu_descriptor_state
*descriptors_state
,
2888 const struct tu_descriptor_map
*map
,
2889 unsigned i
, unsigned array_index
, bool is_sysmem
)
2891 assert(descriptors_state
->valid
& (1 << map
->set
[i
]));
2893 struct tu_descriptor_set
*set
= descriptors_state
->sets
[map
->set
[i
]];
2894 assert(map
->binding
[i
] < set
->layout
->binding_count
);
2896 const struct tu_descriptor_set_binding_layout
*layout
=
2897 &set
->layout
->binding
[map
->binding
[i
]];
2899 switch (layout
->type
) {
2900 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
:
2901 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
:
2902 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER
:
2903 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT
:
2904 memcpy(dst
, &set
->mapped_ptr
[layout
->offset
/ 4 +
2905 array_index
* A6XX_TEX_CONST_DWORDS
],
2906 A6XX_TEX_CONST_DWORDS
* 4);
2908 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
2909 memcpy(dst
, &set
->mapped_ptr
[layout
->offset
/ 4 +
2911 (A6XX_TEX_CONST_DWORDS
+
2912 sizeof(struct tu_sampler
) / 4)],
2913 A6XX_TEX_CONST_DWORDS
* 4);
2916 unreachable("unimplemented descriptor type");
2920 if (layout
->type
== VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT
&& !is_sysmem
) {
2921 const struct tu_tiling_config
*tiling
= &cmd
->state
.tiling_config
;
2922 uint32_t a
= cmd
->state
.subpass
->input_attachments
[map
->value
[i
] +
2923 array_index
].attachment
;
2924 const struct tu_render_pass_attachment
*att
= &cmd
->state
.pass
->attachments
[a
];
2926 assert(att
->gmem_offset
>= 0);
2928 dst
[0] &= ~(A6XX_TEX_CONST_0_SWAP__MASK
| A6XX_TEX_CONST_0_TILE_MODE__MASK
);
2929 dst
[0] |= A6XX_TEX_CONST_0_TILE_MODE(TILE6_2
);
2930 dst
[2] &= ~(A6XX_TEX_CONST_2_TYPE__MASK
| A6XX_TEX_CONST_2_PITCH__MASK
);
2932 A6XX_TEX_CONST_2_TYPE(A6XX_TEX_2D
) |
2933 A6XX_TEX_CONST_2_PITCH(tiling
->tile0
.extent
.width
* att
->cpp
);
2935 dst
[4] = 0x100000 + att
->gmem_offset
;
2936 dst
[5] = A6XX_TEX_CONST_5_DEPTH(1);
2937 for (unsigned i
= 6; i
< A6XX_TEX_CONST_DWORDS
; i
++)
2940 if (cmd
->level
== VK_COMMAND_BUFFER_LEVEL_SECONDARY
)
2941 tu_finishme("patch input attachment pitch for secondary cmd buffer");
2946 write_image_ibo(struct tu_cmd_buffer
*cmd
,
2948 struct tu_descriptor_state
*descriptors_state
,
2949 const struct tu_descriptor_map
*map
,
2950 unsigned i
, unsigned array_index
)
2952 assert(descriptors_state
->valid
& (1 << map
->set
[i
]));
2954 struct tu_descriptor_set
*set
= descriptors_state
->sets
[map
->set
[i
]];
2955 assert(map
->binding
[i
] < set
->layout
->binding_count
);
2957 const struct tu_descriptor_set_binding_layout
*layout
=
2958 &set
->layout
->binding
[map
->binding
[i
]];
2960 assert(layout
->type
== VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
);
2962 memcpy(dst
, &set
->mapped_ptr
[layout
->offset
/ 4 +
2963 (array_index
* 2 + 1) * A6XX_TEX_CONST_DWORDS
],
2964 A6XX_TEX_CONST_DWORDS
* 4);
2968 buffer_ptr(struct tu_descriptor_state
*descriptors_state
,
2969 const struct tu_descriptor_map
*map
,
2970 unsigned i
, unsigned array_index
)
2972 assert(descriptors_state
->valid
& (1 << map
->set
[i
]));
2974 struct tu_descriptor_set
*set
= descriptors_state
->sets
[map
->set
[i
]];
2975 assert(map
->binding
[i
] < set
->layout
->binding_count
);
2977 const struct tu_descriptor_set_binding_layout
*layout
=
2978 &set
->layout
->binding
[map
->binding
[i
]];
2980 switch (layout
->type
) {
2981 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
2982 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
2983 return descriptors_state
->dynamic_buffers
[layout
->dynamic_offset_offset
+
2985 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
2986 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
2987 return (uint64_t) set
->mapped_ptr
[layout
->offset
/ 4 + array_index
* 2 + 1] << 32 |
2988 set
->mapped_ptr
[layout
->offset
/ 4 + array_index
* 2];
2990 unreachable("unimplemented descriptor type");
2995 static inline uint32_t
2996 tu6_stage2opcode(gl_shader_stage type
)
2999 case MESA_SHADER_VERTEX
:
3000 case MESA_SHADER_TESS_CTRL
:
3001 case MESA_SHADER_TESS_EVAL
:
3002 case MESA_SHADER_GEOMETRY
:
3003 return CP_LOAD_STATE6_GEOM
;
3004 case MESA_SHADER_FRAGMENT
:
3005 case MESA_SHADER_COMPUTE
:
3006 case MESA_SHADER_KERNEL
:
3007 return CP_LOAD_STATE6_FRAG
;
3009 unreachable("bad shader type");
3013 static inline enum a6xx_state_block
3014 tu6_stage2shadersb(gl_shader_stage type
)
3017 case MESA_SHADER_VERTEX
:
3018 return SB6_VS_SHADER
;
3019 case MESA_SHADER_FRAGMENT
:
3020 return SB6_FS_SHADER
;
3021 case MESA_SHADER_COMPUTE
:
3022 case MESA_SHADER_KERNEL
:
3023 return SB6_CS_SHADER
;
3025 unreachable("bad shader type");
3031 tu6_emit_user_consts(struct tu_cs
*cs
, const struct tu_pipeline
*pipeline
,
3032 struct tu_descriptor_state
*descriptors_state
,
3033 gl_shader_stage type
,
3034 uint32_t *push_constants
)
3036 const struct tu_program_descriptor_linkage
*link
=
3037 &pipeline
->program
.link
[type
];
3038 const struct ir3_ubo_analysis_state
*state
= &link
->ubo_state
;
3040 for (uint32_t i
= 0; i
< ARRAY_SIZE(state
->range
); i
++) {
3041 if (state
->range
[i
].start
< state
->range
[i
].end
) {
3042 uint32_t size
= state
->range
[i
].end
- state
->range
[i
].start
;
3043 uint32_t offset
= state
->range
[i
].start
;
3045 /* and even if the start of the const buffer is before
3046 * first_immediate, the end may not be:
3048 size
= MIN2(size
, (16 * link
->constlen
) - state
->range
[i
].offset
);
3053 /* things should be aligned to vec4: */
3054 debug_assert((state
->range
[i
].offset
% 16) == 0);
3055 debug_assert((size
% 16) == 0);
3056 debug_assert((offset
% 16) == 0);
3059 /* push constants */
3060 tu_cs_emit_pkt7(cs
, tu6_stage2opcode(type
), 3 + (size
/ 4));
3061 tu_cs_emit(cs
, CP_LOAD_STATE6_0_DST_OFF(state
->range
[i
].offset
/ 16) |
3062 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS
) |
3063 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT
) |
3064 CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type
)) |
3065 CP_LOAD_STATE6_0_NUM_UNIT(size
/ 16));
3068 for (unsigned i
= 0; i
< size
/ 4; i
++)
3069 tu_cs_emit(cs
, push_constants
[i
+ offset
/ 4]);
3073 /* Look through the UBO map to find our UBO index, and get the VA for
3077 uint32_t ubo_idx
= i
- 1;
3078 uint32_t ubo_map_base
= 0;
3079 for (int j
= 0; j
< link
->ubo_map
.num
; j
++) {
3080 if (ubo_idx
>= ubo_map_base
&&
3081 ubo_idx
< ubo_map_base
+ link
->ubo_map
.array_size
[j
]) {
3082 va
= buffer_ptr(descriptors_state
, &link
->ubo_map
, j
,
3083 ubo_idx
- ubo_map_base
);
3086 ubo_map_base
+= link
->ubo_map
.array_size
[j
];
3090 tu_cs_emit_pkt7(cs
, tu6_stage2opcode(type
), 3);
3091 tu_cs_emit(cs
, CP_LOAD_STATE6_0_DST_OFF(state
->range
[i
].offset
/ 16) |
3092 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS
) |
3093 CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT
) |
3094 CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type
)) |
3095 CP_LOAD_STATE6_0_NUM_UNIT(size
/ 16));
3096 tu_cs_emit_qw(cs
, va
+ offset
);
3102 tu6_emit_ubos(struct tu_cs
*cs
, const struct tu_pipeline
*pipeline
,
3103 struct tu_descriptor_state
*descriptors_state
,
3104 gl_shader_stage type
)
3106 const struct tu_program_descriptor_linkage
*link
=
3107 &pipeline
->program
.link
[type
];
3109 uint32_t num
= MIN2(link
->ubo_map
.num_desc
, link
->const_state
.num_ubos
);
3110 uint32_t anum
= align(num
, 2);
3115 tu_cs_emit_pkt7(cs
, tu6_stage2opcode(type
), 3 + (2 * anum
));
3116 tu_cs_emit(cs
, CP_LOAD_STATE6_0_DST_OFF(link
->const_state
.offsets
.ubo
) |
3117 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS
) |
3118 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT
) |
3119 CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type
)) |
3120 CP_LOAD_STATE6_0_NUM_UNIT(anum
/2));
3121 tu_cs_emit(cs
, CP_LOAD_STATE6_1_EXT_SRC_ADDR(0));
3122 tu_cs_emit(cs
, CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI(0));
3124 unsigned emitted
= 0;
3125 for (unsigned i
= 0; emitted
< num
&& i
< link
->ubo_map
.num
; i
++) {
3126 for (unsigned j
= 0; emitted
< num
&& j
< link
->ubo_map
.array_size
[i
]; j
++) {
3127 tu_cs_emit_qw(cs
, buffer_ptr(descriptors_state
, &link
->ubo_map
, i
, j
));
3132 for (; emitted
< anum
; emitted
++) {
3133 tu_cs_emit(cs
, 0xffffffff);
3134 tu_cs_emit(cs
, 0xffffffff);
3138 static struct tu_cs_entry
3139 tu6_emit_consts(struct tu_cmd_buffer
*cmd
,
3140 const struct tu_pipeline
*pipeline
,
3141 struct tu_descriptor_state
*descriptors_state
,
3142 gl_shader_stage type
)
3145 tu_cs_begin_sub_stream(&cmd
->sub_cs
, 512, &cs
); /* TODO: maximum size? */
3147 tu6_emit_user_consts(&cs
, pipeline
, descriptors_state
, type
, cmd
->push_constants
);
3148 tu6_emit_ubos(&cs
, pipeline
, descriptors_state
, type
);
3150 return tu_cs_end_sub_stream(&cmd
->sub_cs
, &cs
);
3154 tu6_emit_vs_params(struct tu_cmd_buffer
*cmd
,
3155 const struct tu_draw_info
*draw
,
3156 struct tu_cs_entry
*entry
)
3158 /* TODO: fill out more than just base instance */
3159 const struct tu_program_descriptor_linkage
*link
=
3160 &cmd
->state
.pipeline
->program
.link
[MESA_SHADER_VERTEX
];
3161 const struct ir3_const_state
*const_state
= &link
->const_state
;
3164 if (const_state
->offsets
.driver_param
>= link
->constlen
) {
3165 *entry
= (struct tu_cs_entry
) {};
3169 VkResult result
= tu_cs_begin_sub_stream(&cmd
->sub_cs
, 8, &cs
);
3170 if (result
!= VK_SUCCESS
)
3173 tu_cs_emit_pkt7(&cs
, CP_LOAD_STATE6_GEOM
, 3 + 4);
3174 tu_cs_emit(&cs
, CP_LOAD_STATE6_0_DST_OFF(const_state
->offsets
.driver_param
) |
3175 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS
) |
3176 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT
) |
3177 CP_LOAD_STATE6_0_STATE_BLOCK(SB6_VS_SHADER
) |
3178 CP_LOAD_STATE6_0_NUM_UNIT(1));
3182 STATIC_ASSERT(IR3_DP_INSTID_BASE
== 2);
3186 tu_cs_emit(&cs
, draw
->first_instance
);
3189 *entry
= tu_cs_end_sub_stream(&cmd
->sub_cs
, &cs
);
3194 tu6_emit_textures(struct tu_cmd_buffer
*cmd
,
3195 const struct tu_pipeline
*pipeline
,
3196 struct tu_descriptor_state
*descriptors_state
,
3197 gl_shader_stage type
,
3198 struct tu_cs_entry
*entry
,
3202 struct tu_cs
*draw_state
= &cmd
->sub_cs
;
3203 const struct tu_program_descriptor_linkage
*link
=
3204 &pipeline
->program
.link
[type
];
3207 if (link
->texture_map
.num_desc
== 0 && link
->sampler_map
.num_desc
== 0) {
3208 *entry
= (struct tu_cs_entry
) {};
3212 /* allocate and fill texture state */
3213 struct ts_cs_memory tex_const
;
3214 result
= tu_cs_alloc(draw_state
, link
->texture_map
.num_desc
,
3215 A6XX_TEX_CONST_DWORDS
, &tex_const
);
3216 if (result
!= VK_SUCCESS
)
3220 for (unsigned i
= 0; i
< link
->texture_map
.num
; i
++) {
3221 for (int j
= 0; j
< link
->texture_map
.array_size
[i
]; j
++) {
3222 write_tex_const(cmd
,
3223 &tex_const
.map
[A6XX_TEX_CONST_DWORDS
* tex_index
++],
3224 descriptors_state
, &link
->texture_map
, i
, j
,
3229 /* allocate and fill sampler state */
3230 struct ts_cs_memory tex_samp
= { 0 };
3231 if (link
->sampler_map
.num_desc
) {
3232 result
= tu_cs_alloc(draw_state
, link
->sampler_map
.num_desc
,
3233 A6XX_TEX_SAMP_DWORDS
, &tex_samp
);
3234 if (result
!= VK_SUCCESS
)
3237 int sampler_index
= 0;
3238 for (unsigned i
= 0; i
< link
->sampler_map
.num
; i
++) {
3239 for (int j
= 0; j
< link
->sampler_map
.array_size
[i
]; j
++) {
3240 const struct tu_sampler
*sampler
= sampler_ptr(descriptors_state
,
3243 memcpy(&tex_samp
.map
[A6XX_TEX_SAMP_DWORDS
* sampler_index
++],
3244 sampler
->state
, sizeof(sampler
->state
));
3245 *needs_border
|= sampler
->needs_border
;
3250 unsigned tex_samp_reg
, tex_const_reg
, tex_count_reg
;
3251 enum a6xx_state_block sb
;
3254 case MESA_SHADER_VERTEX
:
3256 tex_samp_reg
= REG_A6XX_SP_VS_TEX_SAMP_LO
;
3257 tex_const_reg
= REG_A6XX_SP_VS_TEX_CONST_LO
;
3258 tex_count_reg
= REG_A6XX_SP_VS_TEX_COUNT
;
3260 case MESA_SHADER_FRAGMENT
:
3262 tex_samp_reg
= REG_A6XX_SP_FS_TEX_SAMP_LO
;
3263 tex_const_reg
= REG_A6XX_SP_FS_TEX_CONST_LO
;
3264 tex_count_reg
= REG_A6XX_SP_FS_TEX_COUNT
;
3266 case MESA_SHADER_COMPUTE
:
3268 tex_samp_reg
= REG_A6XX_SP_CS_TEX_SAMP_LO
;
3269 tex_const_reg
= REG_A6XX_SP_CS_TEX_CONST_LO
;
3270 tex_count_reg
= REG_A6XX_SP_CS_TEX_COUNT
;
3273 unreachable("bad state block");
3277 result
= tu_cs_begin_sub_stream(draw_state
, 16, &cs
);
3278 if (result
!= VK_SUCCESS
)
3281 if (link
->sampler_map
.num_desc
) {
3282 /* output sampler state: */
3283 tu_cs_emit_pkt7(&cs
, tu6_stage2opcode(type
), 3);
3284 tu_cs_emit(&cs
, CP_LOAD_STATE6_0_DST_OFF(0) |
3285 CP_LOAD_STATE6_0_STATE_TYPE(ST6_SHADER
) |
3286 CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT
) |
3287 CP_LOAD_STATE6_0_STATE_BLOCK(sb
) |
3288 CP_LOAD_STATE6_0_NUM_UNIT(link
->sampler_map
.num_desc
));
3289 tu_cs_emit_qw(&cs
, tex_samp
.iova
); /* SRC_ADDR_LO/HI */
3291 tu_cs_emit_pkt4(&cs
, tex_samp_reg
, 2);
3292 tu_cs_emit_qw(&cs
, tex_samp
.iova
); /* SRC_ADDR_LO/HI */
3295 /* emit texture state: */
3296 tu_cs_emit_pkt7(&cs
, tu6_stage2opcode(type
), 3);
3297 tu_cs_emit(&cs
, CP_LOAD_STATE6_0_DST_OFF(0) |
3298 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS
) |
3299 CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT
) |
3300 CP_LOAD_STATE6_0_STATE_BLOCK(sb
) |
3301 CP_LOAD_STATE6_0_NUM_UNIT(link
->texture_map
.num_desc
));
3302 tu_cs_emit_qw(&cs
, tex_const
.iova
); /* SRC_ADDR_LO/HI */
3304 tu_cs_emit_pkt4(&cs
, tex_const_reg
, 2);
3305 tu_cs_emit_qw(&cs
, tex_const
.iova
); /* SRC_ADDR_LO/HI */
3307 tu_cs_emit_pkt4(&cs
, tex_count_reg
, 1);
3308 tu_cs_emit(&cs
, link
->texture_map
.num_desc
);
3310 *entry
= tu_cs_end_sub_stream(draw_state
, &cs
);
3315 tu6_emit_ibo(struct tu_cmd_buffer
*cmd
,
3316 const struct tu_pipeline
*pipeline
,
3317 struct tu_descriptor_state
*descriptors_state
,
3318 gl_shader_stage type
,
3319 struct tu_cs_entry
*entry
)
3321 struct tu_cs
*draw_state
= &cmd
->sub_cs
;
3322 const struct tu_program_descriptor_linkage
*link
=
3323 &pipeline
->program
.link
[type
];
3326 unsigned num_desc
= link
->ssbo_map
.num_desc
+ link
->image_map
.num_desc
;
3328 if (num_desc
== 0) {
3329 *entry
= (struct tu_cs_entry
) {};
3333 struct ts_cs_memory ibo_const
;
3334 result
= tu_cs_alloc(draw_state
, num_desc
,
3335 A6XX_TEX_CONST_DWORDS
, &ibo_const
);
3336 if (result
!= VK_SUCCESS
)
3340 for (unsigned i
= 0; i
< link
->ssbo_map
.num
; i
++) {
3341 for (int j
= 0; j
< link
->ssbo_map
.array_size
[i
]; j
++) {
3342 uint32_t *dst
= &ibo_const
.map
[A6XX_TEX_CONST_DWORDS
* ssbo_index
];
3344 uint64_t va
= buffer_ptr(descriptors_state
, &link
->ssbo_map
, i
, j
);
3345 /* We don't expose robustBufferAccess, so leave the size unlimited. */
3346 uint32_t sz
= MAX_STORAGE_BUFFER_RANGE
/ 4;
3348 dst
[0] = A6XX_IBO_0_FMT(FMT6_32_UINT
);
3349 dst
[1] = A6XX_IBO_1_WIDTH(sz
& MASK(15)) |
3350 A6XX_IBO_1_HEIGHT(sz
>> 15);
3351 dst
[2] = A6XX_IBO_2_UNK4
|
3353 A6XX_IBO_2_TYPE(A6XX_TEX_1D
);
3357 for (int i
= 6; i
< A6XX_TEX_CONST_DWORDS
; i
++)
3364 for (unsigned i
= 0; i
< link
->image_map
.num
; i
++) {
3365 for (int j
= 0; j
< link
->image_map
.array_size
[i
]; j
++) {
3366 uint32_t *dst
= &ibo_const
.map
[A6XX_TEX_CONST_DWORDS
* ssbo_index
];
3368 write_image_ibo(cmd
, dst
,
3369 descriptors_state
, &link
->image_map
, i
, j
);
3375 assert(ssbo_index
== num_desc
);
3378 result
= tu_cs_begin_sub_stream(draw_state
, 7, &cs
);
3379 if (result
!= VK_SUCCESS
)
3382 uint32_t opcode
, ibo_addr_reg
;
3383 enum a6xx_state_block sb
;
3384 enum a6xx_state_type st
;
3387 case MESA_SHADER_FRAGMENT
:
3388 opcode
= CP_LOAD_STATE6
;
3391 ibo_addr_reg
= REG_A6XX_SP_IBO_LO
;
3393 case MESA_SHADER_COMPUTE
:
3394 opcode
= CP_LOAD_STATE6_FRAG
;
3397 ibo_addr_reg
= REG_A6XX_SP_CS_IBO_LO
;
3400 unreachable("unsupported stage for ibos");
3403 /* emit texture state: */
3404 tu_cs_emit_pkt7(&cs
, opcode
, 3);
3405 tu_cs_emit(&cs
, CP_LOAD_STATE6_0_DST_OFF(0) |
3406 CP_LOAD_STATE6_0_STATE_TYPE(st
) |
3407 CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT
) |
3408 CP_LOAD_STATE6_0_STATE_BLOCK(sb
) |
3409 CP_LOAD_STATE6_0_NUM_UNIT(num_desc
));
3410 tu_cs_emit_qw(&cs
, ibo_const
.iova
); /* SRC_ADDR_LO/HI */
3412 tu_cs_emit_pkt4(&cs
, ibo_addr_reg
, 2);
3413 tu_cs_emit_qw(&cs
, ibo_const
.iova
); /* SRC_ADDR_LO/HI */
3415 *entry
= tu_cs_end_sub_stream(draw_state
, &cs
);
3419 struct PACKED bcolor_entry
{
3431 uint32_t z24
; /* also s8? */
3432 uint16_t srgb
[4]; /* appears to duplicate fp16[], but clamped, used for srgb */
3434 } border_color
[] = {
3435 [VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK
] = {},
3436 [VK_BORDER_COLOR_INT_TRANSPARENT_BLACK
] = {},
3437 [VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK
] = {
3438 .fp32
[3] = 0x3f800000,
3446 .rgb10a2
= 0xc0000000,
3449 [VK_BORDER_COLOR_INT_OPAQUE_BLACK
] = {
3453 [VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE
] = {
3454 .fp32
[0 ... 3] = 0x3f800000,
3455 .ui16
[0 ... 3] = 0xffff,
3456 .si16
[0 ... 3] = 0x7fff,
3457 .fp16
[0 ... 3] = 0x3c00,
3461 .ui8
[0 ... 3] = 0xff,
3462 .si8
[0 ... 3] = 0x7f,
3463 .rgb10a2
= 0xffffffff,
3465 .srgb
[0 ... 3] = 0x3c00,
3467 [VK_BORDER_COLOR_INT_OPAQUE_WHITE
] = {
3474 tu6_emit_border_color(struct tu_cmd_buffer
*cmd
,
3477 STATIC_ASSERT(sizeof(struct bcolor_entry
) == 128);
3479 const struct tu_pipeline
*pipeline
= cmd
->state
.pipeline
;
3480 struct tu_descriptor_state
*descriptors_state
=
3481 &cmd
->descriptors
[VK_PIPELINE_BIND_POINT_GRAPHICS
];
3482 const struct tu_descriptor_map
*vs_sampler
=
3483 &pipeline
->program
.link
[MESA_SHADER_VERTEX
].sampler_map
;
3484 const struct tu_descriptor_map
*fs_sampler
=
3485 &pipeline
->program
.link
[MESA_SHADER_FRAGMENT
].sampler_map
;
3486 struct ts_cs_memory ptr
;
3488 VkResult result
= tu_cs_alloc(&cmd
->sub_cs
,
3489 vs_sampler
->num_desc
+ fs_sampler
->num_desc
,
3492 if (result
!= VK_SUCCESS
)
3495 for (unsigned i
= 0; i
< vs_sampler
->num
; i
++) {
3496 for (unsigned j
= 0; j
< vs_sampler
->array_size
[i
]; j
++) {
3497 const struct tu_sampler
*sampler
= sampler_ptr(descriptors_state
,
3499 memcpy(ptr
.map
, &border_color
[sampler
->border
], 128);
3504 for (unsigned i
= 0; i
< fs_sampler
->num
; i
++) {
3505 for (unsigned j
= 0; j
< fs_sampler
->array_size
[i
]; j
++) {
3506 const struct tu_sampler
*sampler
= sampler_ptr(descriptors_state
,
3508 memcpy(ptr
.map
, &border_color
[sampler
->border
], 128);
3513 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_TP_BORDER_COLOR_BASE_ADDR_LO
, 2);
3514 tu_cs_emit_qw(cs
, ptr
.iova
);
3519 tu6_bind_draw_states(struct tu_cmd_buffer
*cmd
,
3521 const struct tu_draw_info
*draw
)
3523 const struct tu_pipeline
*pipeline
= cmd
->state
.pipeline
;
3524 const struct tu_dynamic_state
*dynamic
= &cmd
->state
.dynamic
;
3525 struct tu_draw_state_group draw_state_groups
[TU_DRAW_STATE_COUNT
];
3526 uint32_t draw_state_group_count
= 0;
3528 struct tu_descriptor_state
*descriptors_state
=
3529 &cmd
->descriptors
[VK_PIPELINE_BIND_POINT_GRAPHICS
];
3531 VkResult result
= tu_cs_reserve_space(cs
, 256);
3532 if (result
!= VK_SUCCESS
)
3537 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_UNKNOWN_9806
, 0);
3538 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_UNKNOWN_9990
, 0);
3539 tu_cs_emit_write_reg(cs
, REG_A6XX_VFD_UNKNOWN_A008
, 0);
3542 A6XX_PC_PRIMITIVE_CNTL_0(.primitive_restart
=
3543 pipeline
->ia
.primitive_restart
&& draw
->indexed
));
3545 if (cmd
->state
.dirty
&
3546 (TU_CMD_DIRTY_PIPELINE
| TU_CMD_DIRTY_DYNAMIC_LINE_WIDTH
) &&
3547 (pipeline
->dynamic_state
.mask
& TU_DYNAMIC_LINE_WIDTH
)) {
3548 tu6_emit_gras_su_cntl(cs
, pipeline
->rast
.gras_su_cntl
,
3549 dynamic
->line_width
);
3552 if ((cmd
->state
.dirty
& TU_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK
) &&
3553 (pipeline
->dynamic_state
.mask
& TU_DYNAMIC_STENCIL_COMPARE_MASK
)) {
3554 tu6_emit_stencil_compare_mask(cs
, dynamic
->stencil_compare_mask
.front
,
3555 dynamic
->stencil_compare_mask
.back
);
3558 if ((cmd
->state
.dirty
& TU_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK
) &&
3559 (pipeline
->dynamic_state
.mask
& TU_DYNAMIC_STENCIL_WRITE_MASK
)) {
3560 tu6_emit_stencil_write_mask(cs
, dynamic
->stencil_write_mask
.front
,
3561 dynamic
->stencil_write_mask
.back
);
3564 if ((cmd
->state
.dirty
& TU_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE
) &&
3565 (pipeline
->dynamic_state
.mask
& TU_DYNAMIC_STENCIL_REFERENCE
)) {
3566 tu6_emit_stencil_reference(cs
, dynamic
->stencil_reference
.front
,
3567 dynamic
->stencil_reference
.back
);
3570 if (cmd
->state
.dirty
&
3571 (TU_CMD_DIRTY_PIPELINE
| TU_CMD_DIRTY_VERTEX_BUFFERS
)) {
3572 for (uint32_t i
= 0; i
< pipeline
->vi
.count
; i
++) {
3573 const uint32_t binding
= pipeline
->vi
.bindings
[i
];
3574 const uint32_t stride
= pipeline
->vi
.strides
[i
];
3575 const struct tu_buffer
*buf
= cmd
->state
.vb
.buffers
[binding
];
3576 const VkDeviceSize offset
= buf
->bo_offset
+
3577 cmd
->state
.vb
.offsets
[binding
] +
3578 pipeline
->vi
.offsets
[i
];
3579 const VkDeviceSize size
=
3580 offset
< buf
->bo
->size
? buf
->bo
->size
- offset
: 0;
3583 A6XX_VFD_FETCH_BASE(i
, .bo
= buf
->bo
, .bo_offset
= offset
),
3584 A6XX_VFD_FETCH_SIZE(i
, size
),
3585 A6XX_VFD_FETCH_STRIDE(i
, stride
));
3589 if (cmd
->state
.dirty
& TU_CMD_DIRTY_PIPELINE
) {
3590 draw_state_groups
[draw_state_group_count
++] =
3591 (struct tu_draw_state_group
) {
3592 .id
= TU_DRAW_STATE_PROGRAM
,
3593 .enable_mask
= ENABLE_DRAW
,
3594 .ib
= pipeline
->program
.state_ib
,
3596 draw_state_groups
[draw_state_group_count
++] =
3597 (struct tu_draw_state_group
) {
3598 .id
= TU_DRAW_STATE_PROGRAM_BINNING
,
3599 .enable_mask
= CP_SET_DRAW_STATE__0_BINNING
,
3600 .ib
= pipeline
->program
.binning_state_ib
,
3602 draw_state_groups
[draw_state_group_count
++] =
3603 (struct tu_draw_state_group
) {
3604 .id
= TU_DRAW_STATE_VI
,
3605 .enable_mask
= ENABLE_DRAW
,
3606 .ib
= pipeline
->vi
.state_ib
,
3608 draw_state_groups
[draw_state_group_count
++] =
3609 (struct tu_draw_state_group
) {
3610 .id
= TU_DRAW_STATE_VI_BINNING
,
3611 .enable_mask
= CP_SET_DRAW_STATE__0_BINNING
,
3612 .ib
= pipeline
->vi
.binning_state_ib
,
3614 draw_state_groups
[draw_state_group_count
++] =
3615 (struct tu_draw_state_group
) {
3616 .id
= TU_DRAW_STATE_VP
,
3617 .enable_mask
= ENABLE_ALL
,
3618 .ib
= pipeline
->vp
.state_ib
,
3620 draw_state_groups
[draw_state_group_count
++] =
3621 (struct tu_draw_state_group
) {
3622 .id
= TU_DRAW_STATE_RAST
,
3623 .enable_mask
= ENABLE_ALL
,
3624 .ib
= pipeline
->rast
.state_ib
,
3626 draw_state_groups
[draw_state_group_count
++] =
3627 (struct tu_draw_state_group
) {
3628 .id
= TU_DRAW_STATE_DS
,
3629 .enable_mask
= ENABLE_ALL
,
3630 .ib
= pipeline
->ds
.state_ib
,
3632 draw_state_groups
[draw_state_group_count
++] =
3633 (struct tu_draw_state_group
) {
3634 .id
= TU_DRAW_STATE_BLEND
,
3635 .enable_mask
= ENABLE_ALL
,
3636 .ib
= pipeline
->blend
.state_ib
,
3640 if (cmd
->state
.dirty
&
3641 (TU_CMD_DIRTY_PIPELINE
| TU_CMD_DIRTY_DESCRIPTOR_SETS
| TU_CMD_DIRTY_PUSH_CONSTANTS
)) {
3642 draw_state_groups
[draw_state_group_count
++] =
3643 (struct tu_draw_state_group
) {
3644 .id
= TU_DRAW_STATE_VS_CONST
,
3645 .enable_mask
= ENABLE_ALL
,
3646 .ib
= tu6_emit_consts(cmd
, pipeline
, descriptors_state
, MESA_SHADER_VERTEX
)
3648 draw_state_groups
[draw_state_group_count
++] =
3649 (struct tu_draw_state_group
) {
3650 .id
= TU_DRAW_STATE_FS_CONST
,
3651 .enable_mask
= ENABLE_DRAW
,
3652 .ib
= tu6_emit_consts(cmd
, pipeline
, descriptors_state
, MESA_SHADER_FRAGMENT
)
3656 if (cmd
->state
.dirty
&
3657 (TU_CMD_DIRTY_PIPELINE
| TU_CMD_DIRTY_DESCRIPTOR_SETS
)) {
3658 bool needs_border
= false;
3659 struct tu_cs_entry vs_tex
, fs_tex_sysmem
, fs_tex_gmem
, fs_ibo
;
3661 result
= tu6_emit_textures(cmd
, pipeline
, descriptors_state
,
3662 MESA_SHADER_VERTEX
, &vs_tex
, &needs_border
,
3664 if (result
!= VK_SUCCESS
)
3667 /* TODO: we could emit just one texture descriptor draw state when there
3668 * are no input attachments, which is the most common case. We could
3669 * also split out the sampler state, which doesn't change even for input
3672 result
= tu6_emit_textures(cmd
, pipeline
, descriptors_state
,
3673 MESA_SHADER_FRAGMENT
, &fs_tex_sysmem
,
3674 &needs_border
, true);
3675 if (result
!= VK_SUCCESS
)
3678 result
= tu6_emit_textures(cmd
, pipeline
, descriptors_state
,
3679 MESA_SHADER_FRAGMENT
, &fs_tex_gmem
,
3680 &needs_border
, false);
3681 if (result
!= VK_SUCCESS
)
3684 result
= tu6_emit_ibo(cmd
, pipeline
, descriptors_state
,
3685 MESA_SHADER_FRAGMENT
, &fs_ibo
);
3686 if (result
!= VK_SUCCESS
)
3689 draw_state_groups
[draw_state_group_count
++] =
3690 (struct tu_draw_state_group
) {
3691 .id
= TU_DRAW_STATE_VS_TEX
,
3692 .enable_mask
= ENABLE_ALL
,
3695 draw_state_groups
[draw_state_group_count
++] =
3696 (struct tu_draw_state_group
) {
3697 .id
= TU_DRAW_STATE_FS_TEX_GMEM
,
3698 .enable_mask
= CP_SET_DRAW_STATE__0_GMEM
,
3701 draw_state_groups
[draw_state_group_count
++] =
3702 (struct tu_draw_state_group
) {
3703 .id
= TU_DRAW_STATE_FS_TEX_SYSMEM
,
3704 .enable_mask
= CP_SET_DRAW_STATE__0_SYSMEM
,
3705 .ib
= fs_tex_sysmem
,
3707 draw_state_groups
[draw_state_group_count
++] =
3708 (struct tu_draw_state_group
) {
3709 .id
= TU_DRAW_STATE_FS_IBO
,
3710 .enable_mask
= ENABLE_DRAW
,
3715 result
= tu6_emit_border_color(cmd
, cs
);
3716 if (result
!= VK_SUCCESS
)
3721 struct tu_cs_entry vs_params
;
3722 result
= tu6_emit_vs_params(cmd
, draw
, &vs_params
);
3723 if (result
!= VK_SUCCESS
)
3726 draw_state_groups
[draw_state_group_count
++] =
3727 (struct tu_draw_state_group
) {
3728 .id
= TU_DRAW_STATE_VS_PARAMS
,
3729 .enable_mask
= ENABLE_ALL
,
3733 tu_cs_emit_pkt7(cs
, CP_SET_DRAW_STATE
, 3 * draw_state_group_count
);
3734 for (uint32_t i
= 0; i
< draw_state_group_count
; i
++) {
3735 const struct tu_draw_state_group
*group
= &draw_state_groups
[i
];
3736 debug_assert((group
->enable_mask
& ~ENABLE_ALL
) == 0);
3737 uint32_t cp_set_draw_state
=
3738 CP_SET_DRAW_STATE__0_COUNT(group
->ib
.size
/ 4) |
3739 group
->enable_mask
|
3740 CP_SET_DRAW_STATE__0_GROUP_ID(group
->id
);
3742 if (group
->ib
.size
) {
3743 iova
= group
->ib
.bo
->iova
+ group
->ib
.offset
;
3745 cp_set_draw_state
|= CP_SET_DRAW_STATE__0_DISABLE
;
3749 tu_cs_emit(cs
, cp_set_draw_state
);
3750 tu_cs_emit_qw(cs
, iova
);
3753 tu_cs_sanity_check(cs
);
3756 if (cmd
->state
.dirty
& TU_CMD_DIRTY_VERTEX_BUFFERS
) {
3757 for (uint32_t i
= 0; i
< MAX_VBS
; i
++) {
3758 const struct tu_buffer
*buf
= cmd
->state
.vb
.buffers
[i
];
3760 tu_bo_list_add(&cmd
->bo_list
, buf
->bo
, MSM_SUBMIT_BO_READ
);
3763 if (cmd
->state
.dirty
& TU_CMD_DIRTY_DESCRIPTOR_SETS
) {
3765 for_each_bit(i
, descriptors_state
->valid
) {
3766 struct tu_descriptor_set
*set
= descriptors_state
->sets
[i
];
3767 for (unsigned j
= 0; j
< set
->layout
->buffer_count
; ++j
)
3768 if (set
->descriptors
[j
]) {
3769 tu_bo_list_add(&cmd
->bo_list
, set
->descriptors
[j
],
3770 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_WRITE
);
3775 /* Fragment shader state overwrites compute shader state, so flag the
3776 * compute pipeline for re-emit.
3778 cmd
->state
.dirty
= TU_CMD_DIRTY_COMPUTE_PIPELINE
;
3783 tu6_emit_draw_direct(struct tu_cmd_buffer
*cmd
,
3785 const struct tu_draw_info
*draw
)
3788 const enum pc_di_primtype primtype
= cmd
->state
.pipeline
->ia
.primtype
;
3791 A6XX_VFD_INDEX_OFFSET(draw
->vertex_offset
),
3792 A6XX_VFD_INSTANCE_START_OFFSET(draw
->first_instance
));
3794 /* TODO hw binning */
3795 if (draw
->indexed
) {
3796 const enum a4xx_index_size index_size
=
3797 tu6_index_size(cmd
->state
.index_type
);
3798 const uint32_t index_bytes
=
3799 (cmd
->state
.index_type
== VK_INDEX_TYPE_UINT32
) ? 4 : 2;
3800 const struct tu_buffer
*buf
= cmd
->state
.index_buffer
;
3801 const VkDeviceSize offset
= buf
->bo_offset
+ cmd
->state
.index_offset
+
3802 index_bytes
* draw
->first_index
;
3803 const uint32_t size
= index_bytes
* draw
->count
;
3805 const uint32_t cp_draw_indx
=
3806 CP_DRAW_INDX_OFFSET_0_PRIM_TYPE(primtype
) |
3807 CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(DI_SRC_SEL_DMA
) |
3808 CP_DRAW_INDX_OFFSET_0_INDEX_SIZE(index_size
) |
3809 CP_DRAW_INDX_OFFSET_0_VIS_CULL(USE_VISIBILITY
) | 0x2000;
3811 tu_cs_emit_pkt7(cs
, CP_DRAW_INDX_OFFSET
, 7);
3812 tu_cs_emit(cs
, cp_draw_indx
);
3813 tu_cs_emit(cs
, draw
->instance_count
);
3814 tu_cs_emit(cs
, draw
->count
);
3815 tu_cs_emit(cs
, 0x0); /* XXX */
3816 tu_cs_emit_qw(cs
, buf
->bo
->iova
+ offset
);
3817 tu_cs_emit(cs
, size
);
3819 const uint32_t cp_draw_indx
=
3820 CP_DRAW_INDX_OFFSET_0_PRIM_TYPE(primtype
) |
3821 CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(DI_SRC_SEL_AUTO_INDEX
) |
3822 CP_DRAW_INDX_OFFSET_0_VIS_CULL(USE_VISIBILITY
) | 0x2000;
3824 tu_cs_emit_pkt7(cs
, CP_DRAW_INDX_OFFSET
, 3);
3825 tu_cs_emit(cs
, cp_draw_indx
);
3826 tu_cs_emit(cs
, draw
->instance_count
);
3827 tu_cs_emit(cs
, draw
->count
);
3832 tu_draw(struct tu_cmd_buffer
*cmd
, const struct tu_draw_info
*draw
)
3834 struct tu_cs
*cs
= &cmd
->draw_cs
;
3837 result
= tu6_bind_draw_states(cmd
, cs
, draw
);
3838 if (result
!= VK_SUCCESS
) {
3839 cmd
->record_result
= result
;
3843 result
= tu_cs_reserve_space(cs
, 32);
3844 if (result
!= VK_SUCCESS
) {
3845 cmd
->record_result
= result
;
3849 if (draw
->indirect
) {
3850 tu_finishme("indirect draw");
3854 /* TODO tu6_emit_marker should pick different regs depending on cs */
3856 tu6_emit_marker(cmd
, cs
);
3857 tu6_emit_draw_direct(cmd
, cs
, draw
);
3858 tu6_emit_marker(cmd
, cs
);
3860 cmd
->wait_for_idle
= true;
3862 tu_cs_sanity_check(cs
);
3866 tu_CmdDraw(VkCommandBuffer commandBuffer
,
3867 uint32_t vertexCount
,
3868 uint32_t instanceCount
,
3869 uint32_t firstVertex
,
3870 uint32_t firstInstance
)
3872 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
3873 struct tu_draw_info info
= {};
3875 info
.count
= vertexCount
;
3876 info
.instance_count
= instanceCount
;
3877 info
.first_instance
= firstInstance
;
3878 info
.vertex_offset
= firstVertex
;
3880 tu_draw(cmd_buffer
, &info
);
3884 tu_CmdDrawIndexed(VkCommandBuffer commandBuffer
,
3885 uint32_t indexCount
,
3886 uint32_t instanceCount
,
3887 uint32_t firstIndex
,
3888 int32_t vertexOffset
,
3889 uint32_t firstInstance
)
3891 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
3892 struct tu_draw_info info
= {};
3894 info
.indexed
= true;
3895 info
.count
= indexCount
;
3896 info
.instance_count
= instanceCount
;
3897 info
.first_index
= firstIndex
;
3898 info
.vertex_offset
= vertexOffset
;
3899 info
.first_instance
= firstInstance
;
3901 tu_draw(cmd_buffer
, &info
);
3905 tu_CmdDrawIndirect(VkCommandBuffer commandBuffer
,
3907 VkDeviceSize offset
,
3911 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
3912 TU_FROM_HANDLE(tu_buffer
, buffer
, _buffer
);
3913 struct tu_draw_info info
= {};
3915 info
.count
= drawCount
;
3916 info
.indirect
= buffer
;
3917 info
.indirect_offset
= offset
;
3918 info
.stride
= stride
;
3920 tu_draw(cmd_buffer
, &info
);
3924 tu_CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer
,
3926 VkDeviceSize offset
,
3930 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
3931 TU_FROM_HANDLE(tu_buffer
, buffer
, _buffer
);
3932 struct tu_draw_info info
= {};
3934 info
.indexed
= true;
3935 info
.count
= drawCount
;
3936 info
.indirect
= buffer
;
3937 info
.indirect_offset
= offset
;
3938 info
.stride
= stride
;
3940 tu_draw(cmd_buffer
, &info
);
3943 struct tu_dispatch_info
3946 * Determine the layout of the grid (in block units) to be used.
3951 * A starting offset for the grid. If unaligned is set, the offset
3952 * must still be aligned.
3954 uint32_t offsets
[3];
3956 * Whether it's an unaligned compute dispatch.
3961 * Indirect compute parameters resource.
3963 struct tu_buffer
*indirect
;
3964 uint64_t indirect_offset
;
3968 tu_emit_compute_driver_params(struct tu_cs
*cs
, struct tu_pipeline
*pipeline
,
3969 const struct tu_dispatch_info
*info
)
3971 gl_shader_stage type
= MESA_SHADER_COMPUTE
;
3972 const struct tu_program_descriptor_linkage
*link
=
3973 &pipeline
->program
.link
[type
];
3974 const struct ir3_const_state
*const_state
= &link
->const_state
;
3975 uint32_t offset
= const_state
->offsets
.driver_param
;
3977 if (link
->constlen
<= offset
)
3980 if (!info
->indirect
) {
3981 uint32_t driver_params
[IR3_DP_CS_COUNT
] = {
3982 [IR3_DP_NUM_WORK_GROUPS_X
] = info
->blocks
[0],
3983 [IR3_DP_NUM_WORK_GROUPS_Y
] = info
->blocks
[1],
3984 [IR3_DP_NUM_WORK_GROUPS_Z
] = info
->blocks
[2],
3985 [IR3_DP_LOCAL_GROUP_SIZE_X
] = pipeline
->compute
.local_size
[0],
3986 [IR3_DP_LOCAL_GROUP_SIZE_Y
] = pipeline
->compute
.local_size
[1],
3987 [IR3_DP_LOCAL_GROUP_SIZE_Z
] = pipeline
->compute
.local_size
[2],
3990 uint32_t num_consts
= MIN2(const_state
->num_driver_params
,
3991 (link
->constlen
- offset
) * 4);
3992 /* push constants */
3993 tu_cs_emit_pkt7(cs
, tu6_stage2opcode(type
), 3 + num_consts
);
3994 tu_cs_emit(cs
, CP_LOAD_STATE6_0_DST_OFF(offset
) |
3995 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS
) |
3996 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT
) |
3997 CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type
)) |
3998 CP_LOAD_STATE6_0_NUM_UNIT(num_consts
/ 4));
4002 for (i
= 0; i
< num_consts
; i
++)
4003 tu_cs_emit(cs
, driver_params
[i
]);
4005 tu_finishme("Indirect driver params");
4010 tu_dispatch(struct tu_cmd_buffer
*cmd
,
4011 const struct tu_dispatch_info
*info
)
4013 struct tu_cs
*cs
= &cmd
->cs
;
4014 struct tu_pipeline
*pipeline
= cmd
->state
.compute_pipeline
;
4015 struct tu_descriptor_state
*descriptors_state
=
4016 &cmd
->descriptors
[VK_PIPELINE_BIND_POINT_COMPUTE
];
4018 VkResult result
= tu_cs_reserve_space(cs
, 256);
4019 if (result
!= VK_SUCCESS
) {
4020 cmd
->record_result
= result
;
4024 if (cmd
->state
.dirty
& TU_CMD_DIRTY_COMPUTE_PIPELINE
)
4025 tu_cs_emit_ib(cs
, &pipeline
->program
.state_ib
);
4027 struct tu_cs_entry ib
;
4029 ib
= tu6_emit_consts(cmd
, pipeline
, descriptors_state
, MESA_SHADER_COMPUTE
);
4031 tu_cs_emit_ib(cs
, &ib
);
4033 tu_emit_compute_driver_params(cs
, pipeline
, info
);
4036 result
= tu6_emit_textures(cmd
, pipeline
, descriptors_state
,
4037 MESA_SHADER_COMPUTE
, &ib
, &needs_border
, false);
4038 if (result
!= VK_SUCCESS
) {
4039 cmd
->record_result
= result
;
4044 tu_cs_emit_ib(cs
, &ib
);
4047 tu_finishme("compute border color");
4049 result
= tu6_emit_ibo(cmd
, pipeline
, descriptors_state
, MESA_SHADER_COMPUTE
, &ib
);
4050 if (result
!= VK_SUCCESS
) {
4051 cmd
->record_result
= result
;
4056 tu_cs_emit_ib(cs
, &ib
);
4059 if (cmd
->state
.dirty
& TU_CMD_DIRTY_DESCRIPTOR_SETS
) {
4061 for_each_bit(i
, descriptors_state
->valid
) {
4062 struct tu_descriptor_set
*set
= descriptors_state
->sets
[i
];
4063 for (unsigned j
= 0; j
< set
->layout
->buffer_count
; ++j
)
4064 if (set
->descriptors
[j
]) {
4065 tu_bo_list_add(&cmd
->bo_list
, set
->descriptors
[j
],
4066 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_WRITE
);
4071 /* Compute shader state overwrites fragment shader state, so we flag the
4072 * graphics pipeline for re-emit.
4074 cmd
->state
.dirty
= TU_CMD_DIRTY_PIPELINE
;
4076 tu_cs_emit_pkt7(cs
, CP_SET_MARKER
, 1);
4077 tu_cs_emit(cs
, A6XX_CP_SET_MARKER_0_MODE(RM6_COMPUTE
));
4079 const uint32_t *local_size
= pipeline
->compute
.local_size
;
4080 const uint32_t *num_groups
= info
->blocks
;
4082 A6XX_HLSQ_CS_NDRANGE_0(.kerneldim
= 3,
4083 .localsizex
= local_size
[0] - 1,
4084 .localsizey
= local_size
[1] - 1,
4085 .localsizez
= local_size
[2] - 1),
4086 A6XX_HLSQ_CS_NDRANGE_1(.globalsize_x
= local_size
[0] * num_groups
[0]),
4087 A6XX_HLSQ_CS_NDRANGE_2(.globaloff_x
= 0),
4088 A6XX_HLSQ_CS_NDRANGE_3(.globalsize_y
= local_size
[1] * num_groups
[1]),
4089 A6XX_HLSQ_CS_NDRANGE_4(.globaloff_y
= 0),
4090 A6XX_HLSQ_CS_NDRANGE_5(.globalsize_z
= local_size
[2] * num_groups
[2]),
4091 A6XX_HLSQ_CS_NDRANGE_6(.globaloff_z
= 0));
4094 A6XX_HLSQ_CS_KERNEL_GROUP_X(1),
4095 A6XX_HLSQ_CS_KERNEL_GROUP_Y(1),
4096 A6XX_HLSQ_CS_KERNEL_GROUP_Z(1));
4098 if (info
->indirect
) {
4099 uint64_t iova
= tu_buffer_iova(info
->indirect
) + info
->indirect_offset
;
4101 tu_bo_list_add(&cmd
->bo_list
, info
->indirect
->bo
,
4102 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_WRITE
);
4104 tu_cs_emit_pkt7(cs
, CP_EXEC_CS_INDIRECT
, 4);
4105 tu_cs_emit(cs
, 0x00000000);
4106 tu_cs_emit_qw(cs
, iova
);
4108 A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEX(local_size
[0] - 1) |
4109 A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEY(local_size
[1] - 1) |
4110 A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEZ(local_size
[2] - 1));
4112 tu_cs_emit_pkt7(cs
, CP_EXEC_CS
, 4);
4113 tu_cs_emit(cs
, 0x00000000);
4114 tu_cs_emit(cs
, CP_EXEC_CS_1_NGROUPS_X(info
->blocks
[0]));
4115 tu_cs_emit(cs
, CP_EXEC_CS_2_NGROUPS_Y(info
->blocks
[1]));
4116 tu_cs_emit(cs
, CP_EXEC_CS_3_NGROUPS_Z(info
->blocks
[2]));
4121 tu6_emit_cache_flush(cmd
, cs
);
4125 tu_CmdDispatchBase(VkCommandBuffer commandBuffer
,
4133 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
4134 struct tu_dispatch_info info
= {};
4140 info
.offsets
[0] = base_x
;
4141 info
.offsets
[1] = base_y
;
4142 info
.offsets
[2] = base_z
;
4143 tu_dispatch(cmd_buffer
, &info
);
4147 tu_CmdDispatch(VkCommandBuffer commandBuffer
,
4152 tu_CmdDispatchBase(commandBuffer
, 0, 0, 0, x
, y
, z
);
4156 tu_CmdDispatchIndirect(VkCommandBuffer commandBuffer
,
4158 VkDeviceSize offset
)
4160 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
4161 TU_FROM_HANDLE(tu_buffer
, buffer
, _buffer
);
4162 struct tu_dispatch_info info
= {};
4164 info
.indirect
= buffer
;
4165 info
.indirect_offset
= offset
;
4167 tu_dispatch(cmd_buffer
, &info
);
4171 tu_CmdEndRenderPass(VkCommandBuffer commandBuffer
)
4173 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
4175 tu_cs_end(&cmd_buffer
->draw_cs
);
4176 tu_cs_end(&cmd_buffer
->draw_epilogue_cs
);
4178 if (use_sysmem_rendering(cmd_buffer
))
4179 tu_cmd_render_sysmem(cmd_buffer
);
4181 tu_cmd_render_tiles(cmd_buffer
);
4183 /* discard draw_cs and draw_epilogue_cs entries now that the tiles are
4185 tu_cs_discard_entries(&cmd_buffer
->draw_cs
);
4186 tu_cs_begin(&cmd_buffer
->draw_cs
);
4187 tu_cs_discard_entries(&cmd_buffer
->draw_epilogue_cs
);
4188 tu_cs_begin(&cmd_buffer
->draw_epilogue_cs
);
4190 cmd_buffer
->state
.pass
= NULL
;
4191 cmd_buffer
->state
.subpass
= NULL
;
4192 cmd_buffer
->state
.framebuffer
= NULL
;
4196 tu_CmdEndRenderPass2(VkCommandBuffer commandBuffer
,
4197 const VkSubpassEndInfoKHR
*pSubpassEndInfo
)
4199 tu_CmdEndRenderPass(commandBuffer
);
4202 struct tu_barrier_info
4204 uint32_t eventCount
;
4205 const VkEvent
*pEvents
;
4206 VkPipelineStageFlags srcStageMask
;
4210 tu_barrier(struct tu_cmd_buffer
*cmd_buffer
,
4211 uint32_t memoryBarrierCount
,
4212 const VkMemoryBarrier
*pMemoryBarriers
,
4213 uint32_t bufferMemoryBarrierCount
,
4214 const VkBufferMemoryBarrier
*pBufferMemoryBarriers
,
4215 uint32_t imageMemoryBarrierCount
,
4216 const VkImageMemoryBarrier
*pImageMemoryBarriers
,
4217 const struct tu_barrier_info
*info
)
4222 tu_CmdPipelineBarrier(VkCommandBuffer commandBuffer
,
4223 VkPipelineStageFlags srcStageMask
,
4224 VkPipelineStageFlags destStageMask
,
4226 uint32_t memoryBarrierCount
,
4227 const VkMemoryBarrier
*pMemoryBarriers
,
4228 uint32_t bufferMemoryBarrierCount
,
4229 const VkBufferMemoryBarrier
*pBufferMemoryBarriers
,
4230 uint32_t imageMemoryBarrierCount
,
4231 const VkImageMemoryBarrier
*pImageMemoryBarriers
)
4233 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
4234 struct tu_barrier_info info
;
4236 info
.eventCount
= 0;
4237 info
.pEvents
= NULL
;
4238 info
.srcStageMask
= srcStageMask
;
4240 tu_barrier(cmd_buffer
, memoryBarrierCount
, pMemoryBarriers
,
4241 bufferMemoryBarrierCount
, pBufferMemoryBarriers
,
4242 imageMemoryBarrierCount
, pImageMemoryBarriers
, &info
);
4246 write_event(struct tu_cmd_buffer
*cmd
, struct tu_event
*event
, unsigned value
)
4248 struct tu_cs
*cs
= &cmd
->cs
;
4250 VkResult result
= tu_cs_reserve_space(cs
, 4);
4251 if (result
!= VK_SUCCESS
) {
4252 cmd
->record_result
= result
;
4256 tu_bo_list_add(&cmd
->bo_list
, &event
->bo
, MSM_SUBMIT_BO_WRITE
);
4258 /* TODO: any flush required before/after ? */
4260 tu_cs_emit_pkt7(cs
, CP_MEM_WRITE
, 3);
4261 tu_cs_emit_qw(cs
, event
->bo
.iova
); /* ADDR_LO/HI */
4262 tu_cs_emit(cs
, value
);
4266 tu_CmdSetEvent(VkCommandBuffer commandBuffer
,
4268 VkPipelineStageFlags stageMask
)
4270 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
4271 TU_FROM_HANDLE(tu_event
, event
, _event
);
4273 write_event(cmd
, event
, 1);
4277 tu_CmdResetEvent(VkCommandBuffer commandBuffer
,
4279 VkPipelineStageFlags stageMask
)
4281 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
4282 TU_FROM_HANDLE(tu_event
, event
, _event
);
4284 write_event(cmd
, event
, 0);
4288 tu_CmdWaitEvents(VkCommandBuffer commandBuffer
,
4289 uint32_t eventCount
,
4290 const VkEvent
*pEvents
,
4291 VkPipelineStageFlags srcStageMask
,
4292 VkPipelineStageFlags dstStageMask
,
4293 uint32_t memoryBarrierCount
,
4294 const VkMemoryBarrier
*pMemoryBarriers
,
4295 uint32_t bufferMemoryBarrierCount
,
4296 const VkBufferMemoryBarrier
*pBufferMemoryBarriers
,
4297 uint32_t imageMemoryBarrierCount
,
4298 const VkImageMemoryBarrier
*pImageMemoryBarriers
)
4300 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
4301 struct tu_cs
*cs
= &cmd
->cs
;
4303 VkResult result
= tu_cs_reserve_space(cs
, eventCount
* 7);
4304 if (result
!= VK_SUCCESS
) {
4305 cmd
->record_result
= result
;
4309 /* TODO: any flush required before/after? (CP_WAIT_FOR_ME?) */
4311 for (uint32_t i
= 0; i
< eventCount
; i
++) {
4312 const struct tu_event
*event
= (const struct tu_event
*) pEvents
[i
];
4314 tu_bo_list_add(&cmd
->bo_list
, &event
->bo
, MSM_SUBMIT_BO_READ
);
4316 tu_cs_emit_pkt7(cs
, CP_WAIT_REG_MEM
, 6);
4317 tu_cs_emit(cs
, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ
) |
4318 CP_WAIT_REG_MEM_0_POLL_MEMORY
);
4319 tu_cs_emit_qw(cs
, event
->bo
.iova
); /* POLL_ADDR_LO/HI */
4320 tu_cs_emit(cs
, CP_WAIT_REG_MEM_3_REF(1));
4321 tu_cs_emit(cs
, CP_WAIT_REG_MEM_4_MASK(~0u));
4322 tu_cs_emit(cs
, CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(20));
4327 tu_CmdSetDeviceMask(VkCommandBuffer commandBuffer
, uint32_t deviceMask
)