2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
28 #include "tu_private.h"
30 #include "registers/adreno_pm4.xml.h"
31 #include "registers/adreno_common.xml.h"
33 #include "vk_format.h"
38 tu_bo_list_init(struct tu_bo_list
*list
)
40 list
->count
= list
->capacity
= 0;
41 list
->bo_infos
= NULL
;
45 tu_bo_list_destroy(struct tu_bo_list
*list
)
51 tu_bo_list_reset(struct tu_bo_list
*list
)
57 * \a flags consists of MSM_SUBMIT_BO_FLAGS.
60 tu_bo_list_add_info(struct tu_bo_list
*list
,
61 const struct drm_msm_gem_submit_bo
*bo_info
)
63 assert(bo_info
->handle
!= 0);
65 for (uint32_t i
= 0; i
< list
->count
; ++i
) {
66 if (list
->bo_infos
[i
].handle
== bo_info
->handle
) {
67 assert(list
->bo_infos
[i
].presumed
== bo_info
->presumed
);
68 list
->bo_infos
[i
].flags
|= bo_info
->flags
;
73 /* grow list->bo_infos if needed */
74 if (list
->count
== list
->capacity
) {
75 uint32_t new_capacity
= MAX2(2 * list
->count
, 16);
76 struct drm_msm_gem_submit_bo
*new_bo_infos
= realloc(
77 list
->bo_infos
, new_capacity
* sizeof(struct drm_msm_gem_submit_bo
));
79 return TU_BO_LIST_FAILED
;
80 list
->bo_infos
= new_bo_infos
;
81 list
->capacity
= new_capacity
;
84 list
->bo_infos
[list
->count
] = *bo_info
;
89 tu_bo_list_add(struct tu_bo_list
*list
,
90 const struct tu_bo
*bo
,
93 return tu_bo_list_add_info(list
, &(struct drm_msm_gem_submit_bo
) {
95 .handle
= bo
->gem_handle
,
101 tu_bo_list_merge(struct tu_bo_list
*list
, const struct tu_bo_list
*other
)
103 for (uint32_t i
= 0; i
< other
->count
; i
++) {
104 if (tu_bo_list_add_info(list
, other
->bo_infos
+ i
) == TU_BO_LIST_FAILED
)
105 return VK_ERROR_OUT_OF_HOST_MEMORY
;
112 tu_tiling_config_update_tile_layout(struct tu_tiling_config
*tiling
,
113 const struct tu_device
*dev
,
114 const struct tu_render_pass
*pass
)
116 const uint32_t tile_align_w
= pass
->tile_align_w
;
117 const uint32_t max_tile_width
= 1024;
119 /* note: don't offset the tiling config by render_area.offset,
120 * because binning pass can't deal with it
121 * this means we might end up with more tiles than necessary,
122 * but load/store/etc are still scissored to the render_area
124 tiling
->tile0
.offset
= (VkOffset2D
) {};
126 const uint32_t ra_width
=
127 tiling
->render_area
.extent
.width
+
128 (tiling
->render_area
.offset
.x
- tiling
->tile0
.offset
.x
);
129 const uint32_t ra_height
=
130 tiling
->render_area
.extent
.height
+
131 (tiling
->render_area
.offset
.y
- tiling
->tile0
.offset
.y
);
133 /* start from 1 tile */
134 tiling
->tile_count
= (VkExtent2D
) {
138 tiling
->tile0
.extent
= (VkExtent2D
) {
139 .width
= util_align_npot(ra_width
, tile_align_w
),
140 .height
= align(ra_height
, TILE_ALIGN_H
),
143 if (unlikely(dev
->physical_device
->instance
->debug_flags
& TU_DEBUG_FORCEBIN
)) {
144 /* start with 2x2 tiles */
145 tiling
->tile_count
.width
= 2;
146 tiling
->tile_count
.height
= 2;
147 tiling
->tile0
.extent
.width
= util_align_npot(DIV_ROUND_UP(ra_width
, 2), tile_align_w
);
148 tiling
->tile0
.extent
.height
= align(DIV_ROUND_UP(ra_height
, 2), TILE_ALIGN_H
);
151 /* do not exceed max tile width */
152 while (tiling
->tile0
.extent
.width
> max_tile_width
) {
153 tiling
->tile_count
.width
++;
154 tiling
->tile0
.extent
.width
=
155 util_align_npot(DIV_ROUND_UP(ra_width
, tiling
->tile_count
.width
), tile_align_w
);
158 /* will force to sysmem, don't bother trying to have a valid tile config
159 * TODO: just skip all GMEM stuff when sysmem is forced?
161 if (!pass
->gmem_pixels
)
164 /* do not exceed gmem size */
165 while (tiling
->tile0
.extent
.width
* tiling
->tile0
.extent
.height
> pass
->gmem_pixels
) {
166 if (tiling
->tile0
.extent
.width
> MAX2(tile_align_w
, tiling
->tile0
.extent
.height
)) {
167 tiling
->tile_count
.width
++;
168 tiling
->tile0
.extent
.width
=
169 util_align_npot(DIV_ROUND_UP(ra_width
, tiling
->tile_count
.width
), tile_align_w
);
171 /* if this assert fails then layout is impossible.. */
172 assert(tiling
->tile0
.extent
.height
> TILE_ALIGN_H
);
173 tiling
->tile_count
.height
++;
174 tiling
->tile0
.extent
.height
=
175 align(DIV_ROUND_UP(ra_height
, tiling
->tile_count
.height
), TILE_ALIGN_H
);
181 tu_tiling_config_update_pipe_layout(struct tu_tiling_config
*tiling
,
182 const struct tu_device
*dev
)
184 const uint32_t max_pipe_count
= 32; /* A6xx */
186 /* start from 1 tile per pipe */
187 tiling
->pipe0
= (VkExtent2D
) {
191 tiling
->pipe_count
= tiling
->tile_count
;
193 while (tiling
->pipe_count
.width
* tiling
->pipe_count
.height
> max_pipe_count
) {
194 if (tiling
->pipe0
.width
< tiling
->pipe0
.height
) {
195 tiling
->pipe0
.width
+= 1;
196 tiling
->pipe_count
.width
=
197 DIV_ROUND_UP(tiling
->tile_count
.width
, tiling
->pipe0
.width
);
199 tiling
->pipe0
.height
+= 1;
200 tiling
->pipe_count
.height
=
201 DIV_ROUND_UP(tiling
->tile_count
.height
, tiling
->pipe0
.height
);
207 tu_tiling_config_update_pipes(struct tu_tiling_config
*tiling
,
208 const struct tu_device
*dev
)
210 const uint32_t max_pipe_count
= 32; /* A6xx */
211 const uint32_t used_pipe_count
=
212 tiling
->pipe_count
.width
* tiling
->pipe_count
.height
;
213 const VkExtent2D last_pipe
= {
214 .width
= (tiling
->tile_count
.width
- 1) % tiling
->pipe0
.width
+ 1,
215 .height
= (tiling
->tile_count
.height
- 1) % tiling
->pipe0
.height
+ 1,
218 assert(used_pipe_count
<= max_pipe_count
);
219 assert(max_pipe_count
<= ARRAY_SIZE(tiling
->pipe_config
));
221 for (uint32_t y
= 0; y
< tiling
->pipe_count
.height
; y
++) {
222 for (uint32_t x
= 0; x
< tiling
->pipe_count
.width
; x
++) {
223 const uint32_t pipe_x
= tiling
->pipe0
.width
* x
;
224 const uint32_t pipe_y
= tiling
->pipe0
.height
* y
;
225 const uint32_t pipe_w
= (x
== tiling
->pipe_count
.width
- 1)
227 : tiling
->pipe0
.width
;
228 const uint32_t pipe_h
= (y
== tiling
->pipe_count
.height
- 1)
230 : tiling
->pipe0
.height
;
231 const uint32_t n
= tiling
->pipe_count
.width
* y
+ x
;
233 tiling
->pipe_config
[n
] = A6XX_VSC_PIPE_CONFIG_REG_X(pipe_x
) |
234 A6XX_VSC_PIPE_CONFIG_REG_Y(pipe_y
) |
235 A6XX_VSC_PIPE_CONFIG_REG_W(pipe_w
) |
236 A6XX_VSC_PIPE_CONFIG_REG_H(pipe_h
);
237 tiling
->pipe_sizes
[n
] = CP_SET_BIN_DATA5_0_VSC_SIZE(pipe_w
* pipe_h
);
241 memset(tiling
->pipe_config
+ used_pipe_count
, 0,
242 sizeof(uint32_t) * (max_pipe_count
- used_pipe_count
));
246 tu_tiling_config_get_tile(const struct tu_tiling_config
*tiling
,
247 const struct tu_device
*dev
,
250 struct tu_tile
*tile
)
252 /* find the pipe and the slot for tile (tx, ty) */
253 const uint32_t px
= tx
/ tiling
->pipe0
.width
;
254 const uint32_t py
= ty
/ tiling
->pipe0
.height
;
255 const uint32_t sx
= tx
- tiling
->pipe0
.width
* px
;
256 const uint32_t sy
= ty
- tiling
->pipe0
.height
* py
;
257 /* last pipe has different width */
258 const uint32_t pipe_width
=
259 MIN2(tiling
->pipe0
.width
,
260 tiling
->tile_count
.width
- px
* tiling
->pipe0
.width
);
262 assert(tx
< tiling
->tile_count
.width
&& ty
< tiling
->tile_count
.height
);
263 assert(px
< tiling
->pipe_count
.width
&& py
< tiling
->pipe_count
.height
);
264 assert(sx
< tiling
->pipe0
.width
&& sy
< tiling
->pipe0
.height
);
266 /* convert to 1D indices */
267 tile
->pipe
= tiling
->pipe_count
.width
* py
+ px
;
268 tile
->slot
= pipe_width
* sy
+ sx
;
270 /* get the blit area for the tile */
271 tile
->begin
= (VkOffset2D
) {
272 .x
= tiling
->tile0
.offset
.x
+ tiling
->tile0
.extent
.width
* tx
,
273 .y
= tiling
->tile0
.offset
.y
+ tiling
->tile0
.extent
.height
* ty
,
276 (tx
== tiling
->tile_count
.width
- 1)
277 ? tiling
->render_area
.offset
.x
+ tiling
->render_area
.extent
.width
278 : tile
->begin
.x
+ tiling
->tile0
.extent
.width
;
280 (ty
== tiling
->tile_count
.height
- 1)
281 ? tiling
->render_area
.offset
.y
+ tiling
->render_area
.extent
.height
282 : tile
->begin
.y
+ tiling
->tile0
.extent
.height
;
286 tu6_emit_event_write(struct tu_cmd_buffer
*cmd
,
288 enum vgt_event_type event
)
290 bool need_seqno
= false;
295 case PC_CCU_FLUSH_DEPTH_TS
:
296 case PC_CCU_FLUSH_COLOR_TS
:
297 case PC_CCU_RESOLVE_TS
:
304 tu_cs_emit_pkt7(cs
, CP_EVENT_WRITE
, need_seqno
? 4 : 1);
305 tu_cs_emit(cs
, CP_EVENT_WRITE_0_EVENT(event
));
307 tu_cs_emit_qw(cs
, cmd
->scratch_bo
.iova
);
313 tu6_emit_flushes(struct tu_cmd_buffer
*cmd_buffer
,
315 enum tu_cmd_flush_bits flushes
)
317 /* Experiments show that invalidating CCU while it still has data in it
318 * doesn't work, so make sure to always flush before invalidating in case
319 * any data remains that hasn't yet been made available through a barrier.
320 * However it does seem to work for UCHE.
322 if (flushes
& (TU_CMD_FLAG_CCU_FLUSH_COLOR
|
323 TU_CMD_FLAG_CCU_INVALIDATE_COLOR
))
324 tu6_emit_event_write(cmd_buffer
, cs
, PC_CCU_FLUSH_COLOR_TS
);
325 if (flushes
& (TU_CMD_FLAG_CCU_FLUSH_DEPTH
|
326 TU_CMD_FLAG_CCU_INVALIDATE_DEPTH
))
327 tu6_emit_event_write(cmd_buffer
, cs
, PC_CCU_FLUSH_DEPTH_TS
);
328 if (flushes
& TU_CMD_FLAG_CCU_INVALIDATE_COLOR
)
329 tu6_emit_event_write(cmd_buffer
, cs
, PC_CCU_INVALIDATE_COLOR
);
330 if (flushes
& TU_CMD_FLAG_CCU_INVALIDATE_DEPTH
)
331 tu6_emit_event_write(cmd_buffer
, cs
, PC_CCU_INVALIDATE_DEPTH
);
332 if (flushes
& TU_CMD_FLAG_CACHE_FLUSH
)
333 tu6_emit_event_write(cmd_buffer
, cs
, CACHE_FLUSH_TS
);
334 if (flushes
& TU_CMD_FLAG_CACHE_INVALIDATE
)
335 tu6_emit_event_write(cmd_buffer
, cs
, CACHE_INVALIDATE
);
336 if (flushes
& TU_CMD_FLAG_WFI
)
340 /* "Normal" cache flushes, that don't require any special handling */
343 tu_emit_cache_flush(struct tu_cmd_buffer
*cmd_buffer
,
346 tu6_emit_flushes(cmd_buffer
, cs
, cmd_buffer
->state
.cache
.flush_bits
);
347 cmd_buffer
->state
.cache
.flush_bits
= 0;
350 /* Renderpass cache flushes */
353 tu_emit_cache_flush_renderpass(struct tu_cmd_buffer
*cmd_buffer
,
356 tu6_emit_flushes(cmd_buffer
, cs
, cmd_buffer
->state
.renderpass_cache
.flush_bits
);
357 cmd_buffer
->state
.renderpass_cache
.flush_bits
= 0;
360 /* Cache flushes for things that use the color/depth read/write path (i.e.
361 * blits and draws). This deals with changing CCU state as well as the usual
366 tu_emit_cache_flush_ccu(struct tu_cmd_buffer
*cmd_buffer
,
368 enum tu_cmd_ccu_state ccu_state
)
370 enum tu_cmd_flush_bits flushes
= cmd_buffer
->state
.cache
.flush_bits
;
372 assert(ccu_state
!= TU_CMD_CCU_UNKNOWN
);
374 /* Changing CCU state must involve invalidating the CCU. In sysmem mode,
375 * the CCU may also contain data that we haven't flushed out yet, so we
376 * also need to flush. Also, in order to program RB_CCU_CNTL, we need to
377 * emit a WFI as it isn't pipelined.
379 if (ccu_state
!= cmd_buffer
->state
.ccu_state
) {
380 if (cmd_buffer
->state
.ccu_state
!= TU_CMD_CCU_GMEM
) {
382 TU_CMD_FLAG_CCU_FLUSH_COLOR
|
383 TU_CMD_FLAG_CCU_FLUSH_DEPTH
;
384 cmd_buffer
->state
.cache
.pending_flush_bits
&= ~(
385 TU_CMD_FLAG_CCU_FLUSH_COLOR
|
386 TU_CMD_FLAG_CCU_FLUSH_DEPTH
);
389 TU_CMD_FLAG_CCU_INVALIDATE_COLOR
|
390 TU_CMD_FLAG_CCU_INVALIDATE_DEPTH
|
392 cmd_buffer
->state
.cache
.pending_flush_bits
&= ~(
393 TU_CMD_FLAG_CCU_INVALIDATE_COLOR
|
394 TU_CMD_FLAG_CCU_INVALIDATE_DEPTH
);
397 tu6_emit_flushes(cmd_buffer
, cs
, flushes
);
398 cmd_buffer
->state
.cache
.flush_bits
= 0;
400 if (ccu_state
!= cmd_buffer
->state
.ccu_state
) {
401 struct tu_physical_device
*phys_dev
= cmd_buffer
->device
->physical_device
;
403 A6XX_RB_CCU_CNTL(.offset
=
404 ccu_state
== TU_CMD_CCU_GMEM
?
405 phys_dev
->ccu_offset_gmem
:
406 phys_dev
->ccu_offset_bypass
,
407 .gmem
= ccu_state
== TU_CMD_CCU_GMEM
));
408 cmd_buffer
->state
.ccu_state
= ccu_state
;
413 tu6_emit_zs(struct tu_cmd_buffer
*cmd
,
414 const struct tu_subpass
*subpass
,
417 const struct tu_framebuffer
*fb
= cmd
->state
.framebuffer
;
419 const uint32_t a
= subpass
->depth_stencil_attachment
.attachment
;
420 if (a
== VK_ATTACHMENT_UNUSED
) {
422 A6XX_RB_DEPTH_BUFFER_INFO(.depth_format
= DEPTH6_NONE
),
423 A6XX_RB_DEPTH_BUFFER_PITCH(0),
424 A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH(0),
425 A6XX_RB_DEPTH_BUFFER_BASE(0),
426 A6XX_RB_DEPTH_BUFFER_BASE_GMEM(0));
429 A6XX_GRAS_SU_DEPTH_BUFFER_INFO(.depth_format
= DEPTH6_NONE
));
432 A6XX_GRAS_LRZ_BUFFER_BASE(0),
433 A6XX_GRAS_LRZ_BUFFER_PITCH(0),
434 A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE(0));
436 tu_cs_emit_regs(cs
, A6XX_RB_STENCIL_INFO(0));
441 const struct tu_image_view
*iview
= fb
->attachments
[a
].attachment
;
442 const struct tu_render_pass_attachment
*attachment
=
443 &cmd
->state
.pass
->attachments
[a
];
444 enum a6xx_depth_format fmt
= tu6_pipe2depth(attachment
->format
);
446 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_DEPTH_BUFFER_INFO
, 6);
447 tu_cs_emit(cs
, A6XX_RB_DEPTH_BUFFER_INFO(.depth_format
= fmt
).value
);
448 tu_cs_image_ref(cs
, iview
, 0);
449 tu_cs_emit(cs
, attachment
->gmem_offset
);
452 A6XX_GRAS_SU_DEPTH_BUFFER_INFO(.depth_format
= fmt
));
454 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_DEPTH_FLAG_BUFFER_BASE_LO
, 3);
455 tu_cs_image_flag_ref(cs
, iview
, 0);
458 A6XX_GRAS_LRZ_BUFFER_BASE(0),
459 A6XX_GRAS_LRZ_BUFFER_PITCH(0),
460 A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE(0));
462 if (attachment
->format
== VK_FORMAT_S8_UINT
) {
463 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_STENCIL_INFO
, 6);
464 tu_cs_emit(cs
, A6XX_RB_STENCIL_INFO(.separate_stencil
= true).value
);
465 tu_cs_image_ref(cs
, iview
, 0);
466 tu_cs_emit(cs
, attachment
->gmem_offset
);
469 A6XX_RB_STENCIL_INFO(0));
474 tu6_emit_mrt(struct tu_cmd_buffer
*cmd
,
475 const struct tu_subpass
*subpass
,
478 const struct tu_framebuffer
*fb
= cmd
->state
.framebuffer
;
480 for (uint32_t i
= 0; i
< subpass
->color_count
; ++i
) {
481 uint32_t a
= subpass
->color_attachments
[i
].attachment
;
482 if (a
== VK_ATTACHMENT_UNUSED
)
485 const struct tu_image_view
*iview
= fb
->attachments
[a
].attachment
;
487 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_MRT_BUF_INFO(i
), 6);
488 tu_cs_emit(cs
, iview
->RB_MRT_BUF_INFO
);
489 tu_cs_image_ref(cs
, iview
, 0);
490 tu_cs_emit(cs
, cmd
->state
.pass
->attachments
[a
].gmem_offset
);
493 A6XX_SP_FS_MRT_REG(i
, .dword
= iview
->SP_FS_MRT_REG
));
495 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_MRT_FLAG_BUFFER_ADDR_LO(i
), 3);
496 tu_cs_image_flag_ref(cs
, iview
, 0);
500 A6XX_RB_SRGB_CNTL(.dword
= subpass
->srgb_cntl
));
502 A6XX_SP_SRGB_CNTL(.dword
= subpass
->srgb_cntl
));
504 tu_cs_emit_regs(cs
, A6XX_GRAS_MAX_LAYER_INDEX(fb
->layers
- 1));
508 tu6_emit_msaa(struct tu_cs
*cs
, VkSampleCountFlagBits vk_samples
)
510 const enum a3xx_msaa_samples samples
= tu_msaa_samples(vk_samples
);
511 bool msaa_disable
= samples
== MSAA_ONE
;
514 A6XX_SP_TP_RAS_MSAA_CNTL(samples
),
515 A6XX_SP_TP_DEST_MSAA_CNTL(.samples
= samples
,
516 .msaa_disable
= msaa_disable
));
519 A6XX_GRAS_RAS_MSAA_CNTL(samples
),
520 A6XX_GRAS_DEST_MSAA_CNTL(.samples
= samples
,
521 .msaa_disable
= msaa_disable
));
524 A6XX_RB_RAS_MSAA_CNTL(samples
),
525 A6XX_RB_DEST_MSAA_CNTL(.samples
= samples
,
526 .msaa_disable
= msaa_disable
));
529 A6XX_RB_MSAA_CNTL(samples
));
533 tu6_emit_bin_size(struct tu_cs
*cs
,
534 uint32_t bin_w
, uint32_t bin_h
, uint32_t flags
)
537 A6XX_GRAS_BIN_CONTROL(.binw
= bin_w
,
542 A6XX_RB_BIN_CONTROL(.binw
= bin_w
,
546 /* no flag for RB_BIN_CONTROL2... */
548 A6XX_RB_BIN_CONTROL2(.binw
= bin_w
,
553 tu6_emit_render_cntl(struct tu_cmd_buffer
*cmd
,
554 const struct tu_subpass
*subpass
,
558 const struct tu_framebuffer
*fb
= cmd
->state
.framebuffer
;
560 cntl
|= A6XX_RB_RENDER_CNTL_UNK4
;
562 cntl
|= A6XX_RB_RENDER_CNTL_BINNING
;
564 uint32_t mrts_ubwc_enable
= 0;
565 for (uint32_t i
= 0; i
< subpass
->color_count
; ++i
) {
566 uint32_t a
= subpass
->color_attachments
[i
].attachment
;
567 if (a
== VK_ATTACHMENT_UNUSED
)
570 const struct tu_image_view
*iview
= fb
->attachments
[a
].attachment
;
571 if (iview
->ubwc_enabled
)
572 mrts_ubwc_enable
|= 1 << i
;
575 cntl
|= A6XX_RB_RENDER_CNTL_FLAG_MRTS(mrts_ubwc_enable
);
577 const uint32_t a
= subpass
->depth_stencil_attachment
.attachment
;
578 if (a
!= VK_ATTACHMENT_UNUSED
) {
579 const struct tu_image_view
*iview
= fb
->attachments
[a
].attachment
;
580 if (iview
->ubwc_enabled
)
581 cntl
|= A6XX_RB_RENDER_CNTL_FLAG_DEPTH
;
584 /* In the !binning case, we need to set RB_RENDER_CNTL in the draw_cs
585 * in order to set it correctly for the different subpasses. However,
586 * that means the packets we're emitting also happen during binning. So
587 * we need to guard the write on !BINNING at CP execution time.
589 tu_cs_reserve(cs
, 3 + 4);
590 tu_cs_emit_pkt7(cs
, CP_COND_REG_EXEC
, 2);
591 tu_cs_emit(cs
, CP_COND_REG_EXEC_0_MODE(RENDER_MODE
) |
592 CP_COND_REG_EXEC_0_GMEM
| CP_COND_REG_EXEC_0_SYSMEM
);
593 tu_cs_emit(cs
, CP_COND_REG_EXEC_1_DWORDS(4));
596 tu_cs_emit_pkt7(cs
, CP_REG_WRITE
, 3);
597 tu_cs_emit(cs
, CP_REG_WRITE_0_TRACKER(TRACK_RENDER_CNTL
));
598 tu_cs_emit(cs
, REG_A6XX_RB_RENDER_CNTL
);
599 tu_cs_emit(cs
, cntl
);
603 tu6_emit_blit_scissor(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
, bool align
)
605 const VkRect2D
*render_area
= &cmd
->state
.tiling_config
.render_area
;
606 uint32_t x1
= render_area
->offset
.x
;
607 uint32_t y1
= render_area
->offset
.y
;
608 uint32_t x2
= x1
+ render_area
->extent
.width
- 1;
609 uint32_t y2
= y1
+ render_area
->extent
.height
- 1;
612 x1
= x1
& ~(GMEM_ALIGN_W
- 1);
613 y1
= y1
& ~(GMEM_ALIGN_H
- 1);
614 x2
= ALIGN_POT(x2
+ 1, GMEM_ALIGN_W
) - 1;
615 y2
= ALIGN_POT(y2
+ 1, GMEM_ALIGN_H
) - 1;
619 A6XX_RB_BLIT_SCISSOR_TL(.x
= x1
, .y
= y1
),
620 A6XX_RB_BLIT_SCISSOR_BR(.x
= x2
, .y
= y2
));
624 tu6_emit_window_scissor(struct tu_cs
*cs
,
631 A6XX_GRAS_SC_WINDOW_SCISSOR_TL(.x
= x1
, .y
= y1
),
632 A6XX_GRAS_SC_WINDOW_SCISSOR_BR(.x
= x2
, .y
= y2
));
635 A6XX_GRAS_RESOLVE_CNTL_1(.x
= x1
, .y
= y1
),
636 A6XX_GRAS_RESOLVE_CNTL_2(.x
= x2
, .y
= y2
));
640 tu6_emit_window_offset(struct tu_cs
*cs
, uint32_t x1
, uint32_t y1
)
643 A6XX_RB_WINDOW_OFFSET(.x
= x1
, .y
= y1
));
646 A6XX_RB_WINDOW_OFFSET2(.x
= x1
, .y
= y1
));
649 A6XX_SP_WINDOW_OFFSET(.x
= x1
, .y
= y1
));
652 A6XX_SP_TP_WINDOW_OFFSET(.x
= x1
, .y
= y1
));
656 tu_cs_emit_draw_state(struct tu_cs
*cs
, uint32_t id
, struct tu_draw_state state
)
658 uint32_t enable_mask
;
660 case TU_DRAW_STATE_PROGRAM
:
661 case TU_DRAW_STATE_VI
:
662 case TU_DRAW_STATE_FS_CONST
:
663 /* The blob seems to not enable this (DESC_SETS_LOAD) for binning, even
664 * when resources would actually be used in the binning shader.
665 * Presumably the overhead of prefetching the resources isn't
668 case TU_DRAW_STATE_DESC_SETS_LOAD
:
669 enable_mask
= CP_SET_DRAW_STATE__0_GMEM
|
670 CP_SET_DRAW_STATE__0_SYSMEM
;
672 case TU_DRAW_STATE_PROGRAM_BINNING
:
673 case TU_DRAW_STATE_VI_BINNING
:
674 enable_mask
= CP_SET_DRAW_STATE__0_BINNING
;
676 case TU_DRAW_STATE_INPUT_ATTACHMENTS_GMEM
:
677 enable_mask
= CP_SET_DRAW_STATE__0_GMEM
;
679 case TU_DRAW_STATE_INPUT_ATTACHMENTS_SYSMEM
:
680 enable_mask
= CP_SET_DRAW_STATE__0_SYSMEM
;
683 enable_mask
= CP_SET_DRAW_STATE__0_GMEM
|
684 CP_SET_DRAW_STATE__0_SYSMEM
|
685 CP_SET_DRAW_STATE__0_BINNING
;
689 tu_cs_emit(cs
, CP_SET_DRAW_STATE__0_COUNT(state
.size
) |
691 CP_SET_DRAW_STATE__0_GROUP_ID(id
) |
692 COND(!state
.size
, CP_SET_DRAW_STATE__0_DISABLE
));
693 tu_cs_emit_qw(cs
, state
.iova
);
696 /* note: get rid of this eventually */
698 tu_cs_emit_sds_ib(struct tu_cs
*cs
, uint32_t id
, struct tu_cs_entry entry
)
700 tu_cs_emit_draw_state(cs
, id
, (struct tu_draw_state
) {
701 .iova
= entry
.size
? entry
.bo
->iova
+ entry
.offset
: 0,
702 .size
= entry
.size
/ 4,
707 use_hw_binning(struct tu_cmd_buffer
*cmd
)
709 const struct tu_tiling_config
*tiling
= &cmd
->state
.tiling_config
;
711 /* XFB commands are emitted for BINNING || SYSMEM, which makes it incompatible
712 * with non-hw binning GMEM rendering. this is required because some of the
713 * XFB commands need to only be executed once
715 if (cmd
->state
.xfb_used
)
718 if (unlikely(cmd
->device
->physical_device
->instance
->debug_flags
& TU_DEBUG_NOBIN
))
721 if (unlikely(cmd
->device
->physical_device
->instance
->debug_flags
& TU_DEBUG_FORCEBIN
))
724 return (tiling
->tile_count
.width
* tiling
->tile_count
.height
) > 2;
728 use_sysmem_rendering(struct tu_cmd_buffer
*cmd
)
730 if (unlikely(cmd
->device
->physical_device
->instance
->debug_flags
& TU_DEBUG_SYSMEM
))
733 /* can't fit attachments into gmem */
734 if (!cmd
->state
.pass
->gmem_pixels
)
737 if (cmd
->state
.framebuffer
->layers
> 1)
743 return cmd
->state
.tiling_config
.force_sysmem
;
747 tu6_emit_tile_select(struct tu_cmd_buffer
*cmd
,
749 const struct tu_tile
*tile
)
751 tu_cs_emit_pkt7(cs
, CP_SET_MARKER
, 1);
752 tu_cs_emit(cs
, A6XX_CP_SET_MARKER_0_MODE(RM6_YIELD
));
754 tu_cs_emit_pkt7(cs
, CP_SET_MARKER
, 1);
755 tu_cs_emit(cs
, A6XX_CP_SET_MARKER_0_MODE(RM6_GMEM
));
757 const uint32_t x1
= tile
->begin
.x
;
758 const uint32_t y1
= tile
->begin
.y
;
759 const uint32_t x2
= tile
->end
.x
- 1;
760 const uint32_t y2
= tile
->end
.y
- 1;
761 tu6_emit_window_scissor(cs
, x1
, y1
, x2
, y2
);
762 tu6_emit_window_offset(cs
, x1
, y1
);
765 A6XX_VPC_SO_OVERRIDE(.so_disable
= false));
767 if (use_hw_binning(cmd
)) {
768 tu_cs_emit_pkt7(cs
, CP_WAIT_FOR_ME
, 0);
770 tu_cs_emit_pkt7(cs
, CP_SET_MODE
, 1);
773 tu_cs_emit_pkt7(cs
, CP_SET_BIN_DATA5
, 7);
774 tu_cs_emit(cs
, cmd
->state
.tiling_config
.pipe_sizes
[tile
->pipe
] |
775 CP_SET_BIN_DATA5_0_VSC_N(tile
->slot
));
776 tu_cs_emit_qw(cs
, cmd
->vsc_draw_strm
.iova
+ tile
->pipe
* cmd
->vsc_draw_strm_pitch
);
777 tu_cs_emit_qw(cs
, cmd
->vsc_draw_strm
.iova
+ (tile
->pipe
* 4) + (32 * cmd
->vsc_draw_strm_pitch
));
778 tu_cs_emit_qw(cs
, cmd
->vsc_prim_strm
.iova
+ (tile
->pipe
* cmd
->vsc_prim_strm_pitch
));
780 tu_cs_emit_pkt7(cs
, CP_SET_VISIBILITY_OVERRIDE
, 1);
783 tu_cs_emit_pkt7(cs
, CP_SET_MODE
, 1);
786 tu_cs_emit_pkt7(cs
, CP_SET_VISIBILITY_OVERRIDE
, 1);
789 tu_cs_emit_pkt7(cs
, CP_SET_MODE
, 1);
795 tu6_emit_sysmem_resolve(struct tu_cmd_buffer
*cmd
,
800 const struct tu_framebuffer
*fb
= cmd
->state
.framebuffer
;
801 struct tu_image_view
*dst
= fb
->attachments
[a
].attachment
;
802 struct tu_image_view
*src
= fb
->attachments
[gmem_a
].attachment
;
804 tu_resolve_sysmem(cmd
, cs
, src
, dst
, fb
->layers
, &cmd
->state
.tiling_config
.render_area
);
808 tu6_emit_sysmem_resolves(struct tu_cmd_buffer
*cmd
,
810 const struct tu_subpass
*subpass
)
812 if (subpass
->resolve_attachments
) {
813 /* From the documentation for vkCmdNextSubpass, section 7.4 "Render Pass
816 * End-of-subpass multisample resolves are treated as color
817 * attachment writes for the purposes of synchronization. That is,
818 * they are considered to execute in the
819 * VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT pipeline stage and
820 * their writes are synchronized with
821 * VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT. Synchronization between
822 * rendering within a subpass and any resolve operations at the end
823 * of the subpass occurs automatically, without need for explicit
824 * dependencies or pipeline barriers. However, if the resolve
825 * attachment is also used in a different subpass, an explicit
826 * dependency is needed.
828 * We use the CP_BLIT path for sysmem resolves, which is really a
829 * transfer command, so we have to manually flush similar to the gmem
830 * resolve case. However, a flush afterwards isn't needed because of the
831 * last sentence and the fact that we're in sysmem mode.
833 tu6_emit_event_write(cmd
, cs
, PC_CCU_FLUSH_COLOR_TS
);
834 tu6_emit_event_write(cmd
, cs
, CACHE_INVALIDATE
);
836 /* Wait for the flushes to land before using the 2D engine */
839 for (unsigned i
= 0; i
< subpass
->color_count
; i
++) {
840 uint32_t a
= subpass
->resolve_attachments
[i
].attachment
;
841 if (a
== VK_ATTACHMENT_UNUSED
)
844 tu6_emit_sysmem_resolve(cmd
, cs
, a
,
845 subpass
->color_attachments
[i
].attachment
);
851 tu6_emit_tile_store(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
853 const struct tu_render_pass
*pass
= cmd
->state
.pass
;
854 const struct tu_subpass
*subpass
= &pass
->subpasses
[pass
->subpass_count
-1];
856 tu_cs_emit_pkt7(cs
, CP_SET_DRAW_STATE
, 3);
857 tu_cs_emit(cs
, CP_SET_DRAW_STATE__0_COUNT(0) |
858 CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS
|
859 CP_SET_DRAW_STATE__0_GROUP_ID(0));
860 tu_cs_emit(cs
, CP_SET_DRAW_STATE__1_ADDR_LO(0));
861 tu_cs_emit(cs
, CP_SET_DRAW_STATE__2_ADDR_HI(0));
863 tu_cs_emit_pkt7(cs
, CP_SKIP_IB2_ENABLE_GLOBAL
, 1);
866 tu_cs_emit_pkt7(cs
, CP_SET_MARKER
, 1);
867 tu_cs_emit(cs
, A6XX_CP_SET_MARKER_0_MODE(RM6_RESOLVE
));
869 tu6_emit_blit_scissor(cmd
, cs
, true);
871 for (uint32_t a
= 0; a
< pass
->attachment_count
; ++a
) {
872 if (pass
->attachments
[a
].gmem_offset
>= 0)
873 tu_store_gmem_attachment(cmd
, cs
, a
, a
);
876 if (subpass
->resolve_attachments
) {
877 for (unsigned i
= 0; i
< subpass
->color_count
; i
++) {
878 uint32_t a
= subpass
->resolve_attachments
[i
].attachment
;
879 if (a
!= VK_ATTACHMENT_UNUSED
)
880 tu_store_gmem_attachment(cmd
, cs
, a
,
881 subpass
->color_attachments
[i
].attachment
);
887 tu6_init_hw(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
889 const struct tu_physical_device
*phys_dev
= cmd
->device
->physical_device
;
891 tu6_emit_event_write(cmd
, cs
, CACHE_INVALIDATE
);
893 tu_cs_emit_write_reg(cs
, REG_A6XX_HLSQ_UPDATE_CNTL
, 0xfffff);
896 A6XX_RB_CCU_CNTL(.offset
= phys_dev
->ccu_offset_bypass
));
897 cmd
->state
.ccu_state
= TU_CMD_CCU_SYSMEM
;
898 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_8E04
, 0x00100000);
899 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_AE04
, 0x8);
900 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_AE00
, 0);
901 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_AE0F
, 0x3f);
902 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_B605
, 0x44);
903 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_B600
, 0x100000);
904 tu_cs_emit_write_reg(cs
, REG_A6XX_HLSQ_UNKNOWN_BE00
, 0x80);
905 tu_cs_emit_write_reg(cs
, REG_A6XX_HLSQ_UNKNOWN_BE01
, 0);
907 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9600
, 0);
908 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_UNKNOWN_8600
, 0x880);
909 tu_cs_emit_write_reg(cs
, REG_A6XX_HLSQ_UNKNOWN_BE04
, 0);
910 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_AE03
, 0x00000410);
911 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_IBO_COUNT
, 0);
912 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_B182
, 0);
913 tu_cs_emit_write_reg(cs
, REG_A6XX_HLSQ_UNKNOWN_BB11
, 0);
914 tu_cs_emit_write_reg(cs
, REG_A6XX_UCHE_UNKNOWN_0E12
, 0x3200000);
915 tu_cs_emit_write_reg(cs
, REG_A6XX_UCHE_CLIENT_PF
, 4);
916 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_8E01
, 0x0);
917 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_A982
, 0);
918 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_A9A8
, 0);
919 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_AB00
, 0x5);
920 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_GS_SIV_CNTL
, 0x0000ffff);
922 tu_cs_emit_write_reg(cs
, REG_A6XX_VFD_ADD_OFFSET
, A6XX_VFD_ADD_OFFSET_VERTEX
);
923 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_8811
, 0x00000010);
924 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_MODE_CNTL
, 0x1f);
926 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_SRGB_CNTL
, 0);
928 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_UNKNOWN_8110
, 0);
930 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_RENDER_CONTROL0
, 0x401);
931 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_RENDER_CONTROL1
, 0);
932 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_FS_OUTPUT_CNTL0
, 0);
933 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_8818
, 0);
934 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_8819
, 0);
935 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_881A
, 0);
936 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_881B
, 0);
937 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_881C
, 0);
938 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_881D
, 0);
939 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_881E
, 0);
940 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_88F0
, 0);
942 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9101
, 0xffff00);
943 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9107
, 0);
945 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9236
,
946 A6XX_VPC_UNKNOWN_9236_POINT_COORD_INVERT(0));
947 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9300
, 0);
949 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_SO_OVERRIDE
,
950 A6XX_VPC_SO_OVERRIDE_SO_DISABLE
);
952 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_UNKNOWN_9801
, 0);
953 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_UNKNOWN_9980
, 0);
954 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_UNKNOWN_9990
, 0);
956 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_PRIMITIVE_CNTL_6
, 0);
957 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_UNKNOWN_9B07
, 0);
959 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_A81B
, 0);
961 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_B183
, 0);
963 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_UNKNOWN_8099
, 0);
964 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_UNKNOWN_809B
, 0);
965 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_UNKNOWN_80A0
, 2);
966 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_UNKNOWN_80AF
, 0);
967 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9210
, 0);
968 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9211
, 0);
969 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9602
, 0);
970 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_UNKNOWN_9981
, 0x3);
971 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_UNKNOWN_9E72
, 0);
972 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9108
, 0x3);
973 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_TP_UNKNOWN_B309
, 0x000000a2);
974 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_Z_BOUNDS_MIN
, 0);
975 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_Z_BOUNDS_MAX
, 0);
976 tu_cs_emit_write_reg(cs
, REG_A6XX_HLSQ_CONTROL_5_REG
, 0xfc);
978 tu_cs_emit_write_reg(cs
, REG_A6XX_VFD_MODE_CNTL
, 0x00000000);
980 tu_cs_emit_write_reg(cs
, REG_A6XX_VFD_UNKNOWN_A008
, 0);
982 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_MODE_CNTL
, 0x0000001f);
984 /* we don't use this yet.. probably best to disable.. */
985 tu_cs_emit_pkt7(cs
, CP_SET_DRAW_STATE
, 3);
986 tu_cs_emit(cs
, CP_SET_DRAW_STATE__0_COUNT(0) |
987 CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS
|
988 CP_SET_DRAW_STATE__0_GROUP_ID(0));
989 tu_cs_emit(cs
, CP_SET_DRAW_STATE__1_ADDR_LO(0));
990 tu_cs_emit(cs
, CP_SET_DRAW_STATE__2_ADDR_HI(0));
993 A6XX_SP_HS_CTRL_REG0(0));
996 A6XX_SP_GS_CTRL_REG0(0));
999 A6XX_GRAS_LRZ_CNTL(0));
1002 A6XX_RB_LRZ_CNTL(0));
1005 A6XX_SP_TP_BORDER_COLOR_BASE_ADDR(.bo
= &cmd
->device
->border_color
));
1007 A6XX_SP_PS_TP_BORDER_COLOR_BASE_ADDR(.bo
= &cmd
->device
->border_color
));
1009 tu_cs_sanity_check(cs
);
1013 update_vsc_pipe(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
1015 const struct tu_tiling_config
*tiling
= &cmd
->state
.tiling_config
;
1018 A6XX_VSC_BIN_SIZE(.width
= tiling
->tile0
.extent
.width
,
1019 .height
= tiling
->tile0
.extent
.height
),
1020 A6XX_VSC_DRAW_STRM_SIZE_ADDRESS(.bo
= &cmd
->vsc_draw_strm
,
1021 .bo_offset
= 32 * cmd
->vsc_draw_strm_pitch
));
1024 A6XX_VSC_BIN_COUNT(.nx
= tiling
->tile_count
.width
,
1025 .ny
= tiling
->tile_count
.height
));
1027 tu_cs_emit_pkt4(cs
, REG_A6XX_VSC_PIPE_CONFIG_REG(0), 32);
1028 for (unsigned i
= 0; i
< 32; i
++)
1029 tu_cs_emit(cs
, tiling
->pipe_config
[i
]);
1032 A6XX_VSC_PRIM_STRM_ADDRESS(.bo
= &cmd
->vsc_prim_strm
),
1033 A6XX_VSC_PRIM_STRM_PITCH(cmd
->vsc_prim_strm_pitch
),
1034 A6XX_VSC_PRIM_STRM_LIMIT(cmd
->vsc_prim_strm_pitch
- 64));
1037 A6XX_VSC_DRAW_STRM_ADDRESS(.bo
= &cmd
->vsc_draw_strm
),
1038 A6XX_VSC_DRAW_STRM_PITCH(cmd
->vsc_draw_strm_pitch
),
1039 A6XX_VSC_DRAW_STRM_LIMIT(cmd
->vsc_draw_strm_pitch
- 64));
1043 emit_vsc_overflow_test(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
1045 const struct tu_tiling_config
*tiling
= &cmd
->state
.tiling_config
;
1046 const uint32_t used_pipe_count
=
1047 tiling
->pipe_count
.width
* tiling
->pipe_count
.height
;
1049 /* Clear vsc_scratch: */
1050 tu_cs_emit_pkt7(cs
, CP_MEM_WRITE
, 3);
1051 tu_cs_emit_qw(cs
, cmd
->scratch_bo
.iova
+ ctrl_offset(vsc_scratch
));
1052 tu_cs_emit(cs
, 0x0);
1054 /* Check for overflow, write vsc_scratch if detected: */
1055 for (int i
= 0; i
< used_pipe_count
; i
++) {
1056 tu_cs_emit_pkt7(cs
, CP_COND_WRITE5
, 8);
1057 tu_cs_emit(cs
, CP_COND_WRITE5_0_FUNCTION(WRITE_GE
) |
1058 CP_COND_WRITE5_0_WRITE_MEMORY
);
1059 tu_cs_emit(cs
, CP_COND_WRITE5_1_POLL_ADDR_LO(REG_A6XX_VSC_DRAW_STRM_SIZE_REG(i
)));
1060 tu_cs_emit(cs
, CP_COND_WRITE5_2_POLL_ADDR_HI(0));
1061 tu_cs_emit(cs
, CP_COND_WRITE5_3_REF(cmd
->vsc_draw_strm_pitch
- 64));
1062 tu_cs_emit(cs
, CP_COND_WRITE5_4_MASK(~0));
1063 tu_cs_emit_qw(cs
, cmd
->scratch_bo
.iova
+ ctrl_offset(vsc_scratch
));
1064 tu_cs_emit(cs
, CP_COND_WRITE5_7_WRITE_DATA(1 + cmd
->vsc_draw_strm_pitch
));
1066 tu_cs_emit_pkt7(cs
, CP_COND_WRITE5
, 8);
1067 tu_cs_emit(cs
, CP_COND_WRITE5_0_FUNCTION(WRITE_GE
) |
1068 CP_COND_WRITE5_0_WRITE_MEMORY
);
1069 tu_cs_emit(cs
, CP_COND_WRITE5_1_POLL_ADDR_LO(REG_A6XX_VSC_PRIM_STRM_SIZE_REG(i
)));
1070 tu_cs_emit(cs
, CP_COND_WRITE5_2_POLL_ADDR_HI(0));
1071 tu_cs_emit(cs
, CP_COND_WRITE5_3_REF(cmd
->vsc_prim_strm_pitch
- 64));
1072 tu_cs_emit(cs
, CP_COND_WRITE5_4_MASK(~0));
1073 tu_cs_emit_qw(cs
, cmd
->scratch_bo
.iova
+ ctrl_offset(vsc_scratch
));
1074 tu_cs_emit(cs
, CP_COND_WRITE5_7_WRITE_DATA(3 + cmd
->vsc_prim_strm_pitch
));
1077 tu_cs_emit_pkt7(cs
, CP_WAIT_MEM_WRITES
, 0);
1081 tu6_emit_binning_pass(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
1083 struct tu_physical_device
*phys_dev
= cmd
->device
->physical_device
;
1084 const struct tu_tiling_config
*tiling
= &cmd
->state
.tiling_config
;
1086 uint32_t x1
= tiling
->tile0
.offset
.x
;
1087 uint32_t y1
= tiling
->tile0
.offset
.y
;
1088 uint32_t x2
= tiling
->render_area
.offset
.x
+ tiling
->render_area
.extent
.width
- 1;
1089 uint32_t y2
= tiling
->render_area
.offset
.y
+ tiling
->render_area
.extent
.height
- 1;
1091 tu6_emit_window_scissor(cs
, x1
, y1
, x2
, y2
);
1093 tu_cs_emit_pkt7(cs
, CP_SET_MARKER
, 1);
1094 tu_cs_emit(cs
, A6XX_CP_SET_MARKER_0_MODE(RM6_BINNING
));
1096 tu_cs_emit_pkt7(cs
, CP_SET_VISIBILITY_OVERRIDE
, 1);
1097 tu_cs_emit(cs
, 0x1);
1099 tu_cs_emit_pkt7(cs
, CP_SET_MODE
, 1);
1100 tu_cs_emit(cs
, 0x1);
1105 A6XX_VFD_MODE_CNTL(.binning_pass
= true));
1107 update_vsc_pipe(cmd
, cs
);
1110 A6XX_PC_UNKNOWN_9805(.unknown
= phys_dev
->magic
.PC_UNKNOWN_9805
));
1113 A6XX_SP_UNKNOWN_A0F8(.unknown
= phys_dev
->magic
.SP_UNKNOWN_A0F8
));
1115 tu_cs_emit_pkt7(cs
, CP_EVENT_WRITE
, 1);
1116 tu_cs_emit(cs
, UNK_2C
);
1119 A6XX_RB_WINDOW_OFFSET(.x
= 0, .y
= 0));
1122 A6XX_SP_TP_WINDOW_OFFSET(.x
= 0, .y
= 0));
1124 /* emit IB to binning drawcmds: */
1125 tu_cs_emit_call(cs
, &cmd
->draw_cs
);
1127 tu_cs_emit_pkt7(cs
, CP_SET_DRAW_STATE
, 3);
1128 tu_cs_emit(cs
, CP_SET_DRAW_STATE__0_COUNT(0) |
1129 CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS
|
1130 CP_SET_DRAW_STATE__0_GROUP_ID(0));
1131 tu_cs_emit(cs
, CP_SET_DRAW_STATE__1_ADDR_LO(0));
1132 tu_cs_emit(cs
, CP_SET_DRAW_STATE__2_ADDR_HI(0));
1134 tu_cs_emit_pkt7(cs
, CP_EVENT_WRITE
, 1);
1135 tu_cs_emit(cs
, UNK_2D
);
1137 /* This flush is probably required because the VSC, which produces the
1138 * visibility stream, is a client of UCHE, whereas the CP needs to read the
1139 * visibility stream (without caching) to do draw skipping. The
1140 * WFI+WAIT_FOR_ME combination guarantees that the binning commands
1141 * submitted are finished before reading the VSC regs (in
1142 * emit_vsc_overflow_test) or the VSC_DATA buffer directly (implicitly as
1145 tu6_emit_event_write(cmd
, cs
, CACHE_FLUSH_TS
);
1149 tu_cs_emit_pkt7(cs
, CP_WAIT_FOR_ME
, 0);
1151 emit_vsc_overflow_test(cmd
, cs
);
1153 tu_cs_emit_pkt7(cs
, CP_SET_VISIBILITY_OVERRIDE
, 1);
1154 tu_cs_emit(cs
, 0x0);
1156 tu_cs_emit_pkt7(cs
, CP_SET_MODE
, 1);
1157 tu_cs_emit(cs
, 0x0);
1161 tu_emit_input_attachments(struct tu_cmd_buffer
*cmd
,
1162 const struct tu_subpass
*subpass
,
1163 struct tu_cs_entry
*ib
,
1166 /* note: we can probably emit input attachments just once for the whole
1167 * renderpass, this would avoid emitting both sysmem/gmem versions
1169 * emit two texture descriptors for each input, as a workaround for
1170 * d24s8, which can be sampled as both float (depth) and integer (stencil)
1171 * tu_shader lowers uint input attachment loads to use the 2nd descriptor
1173 * TODO: a smarter workaround
1176 if (!subpass
->input_count
)
1179 struct tu_cs_memory texture
;
1180 VkResult result
= tu_cs_alloc(&cmd
->sub_cs
, subpass
->input_count
* 2,
1181 A6XX_TEX_CONST_DWORDS
, &texture
);
1182 assert(result
== VK_SUCCESS
);
1184 for (unsigned i
= 0; i
< subpass
->input_count
* 2; i
++) {
1185 uint32_t a
= subpass
->input_attachments
[i
/ 2].attachment
;
1186 if (a
== VK_ATTACHMENT_UNUSED
)
1189 struct tu_image_view
*iview
=
1190 cmd
->state
.framebuffer
->attachments
[a
].attachment
;
1191 const struct tu_render_pass_attachment
*att
=
1192 &cmd
->state
.pass
->attachments
[a
];
1193 uint32_t *dst
= &texture
.map
[A6XX_TEX_CONST_DWORDS
* i
];
1195 memcpy(dst
, iview
->descriptor
, A6XX_TEX_CONST_DWORDS
* 4);
1197 if (i
% 2 == 1 && att
->format
== VK_FORMAT_D24_UNORM_S8_UINT
) {
1198 /* note this works because spec says fb and input attachments
1199 * must use identity swizzle
1201 dst
[0] &= ~(A6XX_TEX_CONST_0_FMT__MASK
|
1202 A6XX_TEX_CONST_0_SWIZ_X__MASK
| A6XX_TEX_CONST_0_SWIZ_Y__MASK
|
1203 A6XX_TEX_CONST_0_SWIZ_Z__MASK
| A6XX_TEX_CONST_0_SWIZ_W__MASK
);
1204 dst
[0] |= A6XX_TEX_CONST_0_FMT(FMT6_S8Z24_UINT
) |
1205 A6XX_TEX_CONST_0_SWIZ_X(A6XX_TEX_Y
) |
1206 A6XX_TEX_CONST_0_SWIZ_Y(A6XX_TEX_ZERO
) |
1207 A6XX_TEX_CONST_0_SWIZ_Z(A6XX_TEX_ZERO
) |
1208 A6XX_TEX_CONST_0_SWIZ_W(A6XX_TEX_ONE
);
1214 /* patched for gmem */
1215 dst
[0] &= ~(A6XX_TEX_CONST_0_SWAP__MASK
| A6XX_TEX_CONST_0_TILE_MODE__MASK
);
1216 dst
[0] |= A6XX_TEX_CONST_0_TILE_MODE(TILE6_2
);
1218 A6XX_TEX_CONST_2_TYPE(A6XX_TEX_2D
) |
1219 A6XX_TEX_CONST_2_PITCH(cmd
->state
.tiling_config
.tile0
.extent
.width
* att
->cpp
);
1221 dst
[4] = cmd
->device
->physical_device
->gmem_base
+ att
->gmem_offset
;
1222 dst
[5] = A6XX_TEX_CONST_5_DEPTH(1);
1223 for (unsigned i
= 6; i
< A6XX_TEX_CONST_DWORDS
; i
++)
1228 tu_cs_begin_sub_stream(&cmd
->sub_cs
, 9, &cs
);
1230 tu_cs_emit_pkt7(&cs
, CP_LOAD_STATE6_FRAG
, 3);
1231 tu_cs_emit(&cs
, CP_LOAD_STATE6_0_DST_OFF(0) |
1232 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS
) |
1233 CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT
) |
1234 CP_LOAD_STATE6_0_STATE_BLOCK(SB6_FS_TEX
) |
1235 CP_LOAD_STATE6_0_NUM_UNIT(subpass
->input_count
* 2));
1236 tu_cs_emit_qw(&cs
, texture
.iova
);
1238 tu_cs_emit_pkt4(&cs
, REG_A6XX_SP_FS_TEX_CONST_LO
, 2);
1239 tu_cs_emit_qw(&cs
, texture
.iova
);
1241 tu_cs_emit_regs(&cs
, A6XX_SP_FS_TEX_COUNT(subpass
->input_count
* 2));
1243 *ib
= tu_cs_end_sub_stream(&cmd
->sub_cs
, &cs
);
1247 tu_set_input_attachments(struct tu_cmd_buffer
*cmd
, const struct tu_subpass
*subpass
)
1249 struct tu_cs
*cs
= &cmd
->draw_cs
;
1251 tu_emit_input_attachments(cmd
, subpass
, &cmd
->state
.ia_gmem_ib
, true);
1252 tu_emit_input_attachments(cmd
, subpass
, &cmd
->state
.ia_sysmem_ib
, false);
1254 tu_cs_emit_pkt7(cs
, CP_SET_DRAW_STATE
, 6);
1255 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_INPUT_ATTACHMENTS_GMEM
, cmd
->state
.ia_gmem_ib
);
1256 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_INPUT_ATTACHMENTS_SYSMEM
, cmd
->state
.ia_sysmem_ib
);
1260 tu_emit_renderpass_begin(struct tu_cmd_buffer
*cmd
,
1261 const VkRenderPassBeginInfo
*info
)
1263 struct tu_cs
*cs
= &cmd
->draw_cs
;
1265 tu_cond_exec_start(cs
, CP_COND_EXEC_0_RENDER_MODE_GMEM
);
1267 tu6_emit_blit_scissor(cmd
, cs
, true);
1269 for (uint32_t i
= 0; i
< cmd
->state
.pass
->attachment_count
; ++i
)
1270 tu_load_gmem_attachment(cmd
, cs
, i
, false);
1272 tu6_emit_blit_scissor(cmd
, cs
, false);
1274 for (uint32_t i
= 0; i
< cmd
->state
.pass
->attachment_count
; ++i
)
1275 tu_clear_gmem_attachment(cmd
, cs
, i
, info
);
1277 tu_cond_exec_end(cs
);
1279 tu_cond_exec_start(cs
, CP_COND_EXEC_0_RENDER_MODE_SYSMEM
);
1281 for (uint32_t i
= 0; i
< cmd
->state
.pass
->attachment_count
; ++i
)
1282 tu_clear_sysmem_attachment(cmd
, cs
, i
, info
);
1284 tu_cond_exec_end(cs
);
1288 tu6_sysmem_render_begin(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
,
1289 const struct VkRect2D
*renderArea
)
1291 const struct tu_framebuffer
*fb
= cmd
->state
.framebuffer
;
1293 assert(fb
->width
> 0 && fb
->height
> 0);
1294 tu6_emit_window_scissor(cs
, 0, 0, fb
->width
- 1, fb
->height
- 1);
1295 tu6_emit_window_offset(cs
, 0, 0);
1297 tu6_emit_bin_size(cs
, 0, 0, 0xc00000); /* 0xc00000 = BYPASS? */
1299 tu6_emit_event_write(cmd
, cs
, LRZ_FLUSH
);
1301 tu_cs_emit_pkt7(cs
, CP_SET_MARKER
, 1);
1302 tu_cs_emit(cs
, A6XX_CP_SET_MARKER_0_MODE(RM6_BYPASS
));
1304 tu_cs_emit_pkt7(cs
, CP_SKIP_IB2_ENABLE_GLOBAL
, 1);
1305 tu_cs_emit(cs
, 0x0);
1307 tu_emit_cache_flush_ccu(cmd
, cs
, TU_CMD_CCU_SYSMEM
);
1309 /* enable stream-out, with sysmem there is only one pass: */
1311 A6XX_VPC_SO_OVERRIDE(.so_disable
= false));
1313 tu_cs_emit_pkt7(cs
, CP_SET_VISIBILITY_OVERRIDE
, 1);
1314 tu_cs_emit(cs
, 0x1);
1316 tu_cs_emit_pkt7(cs
, CP_SET_MODE
, 1);
1317 tu_cs_emit(cs
, 0x0);
1319 tu_cs_sanity_check(cs
);
1323 tu6_sysmem_render_end(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
1325 /* Do any resolves of the last subpass. These are handled in the
1326 * tile_store_ib in the gmem path.
1328 tu6_emit_sysmem_resolves(cmd
, cs
, cmd
->state
.subpass
);
1330 tu_cs_emit_call(cs
, &cmd
->draw_epilogue_cs
);
1332 tu_cs_emit_pkt7(cs
, CP_SKIP_IB2_ENABLE_GLOBAL
, 1);
1333 tu_cs_emit(cs
, 0x0);
1335 tu6_emit_event_write(cmd
, cs
, LRZ_FLUSH
);
1337 tu_cs_sanity_check(cs
);
1341 tu6_tile_render_begin(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
1343 struct tu_physical_device
*phys_dev
= cmd
->device
->physical_device
;
1345 tu6_emit_event_write(cmd
, cs
, LRZ_FLUSH
);
1349 tu_cs_emit_pkt7(cs
, CP_SKIP_IB2_ENABLE_GLOBAL
, 1);
1350 tu_cs_emit(cs
, 0x0);
1352 tu_emit_cache_flush_ccu(cmd
, cs
, TU_CMD_CCU_GMEM
);
1354 const struct tu_tiling_config
*tiling
= &cmd
->state
.tiling_config
;
1355 if (use_hw_binning(cmd
)) {
1356 /* enable stream-out during binning pass: */
1357 tu_cs_emit_regs(cs
, A6XX_VPC_SO_OVERRIDE(.so_disable
=false));
1359 tu6_emit_bin_size(cs
,
1360 tiling
->tile0
.extent
.width
,
1361 tiling
->tile0
.extent
.height
,
1362 A6XX_RB_BIN_CONTROL_BINNING_PASS
| 0x6000000);
1364 tu6_emit_render_cntl(cmd
, cmd
->state
.subpass
, cs
, true);
1366 tu6_emit_binning_pass(cmd
, cs
);
1368 /* and disable stream-out for draw pass: */
1369 tu_cs_emit_regs(cs
, A6XX_VPC_SO_OVERRIDE(.so_disable
=true));
1371 tu6_emit_bin_size(cs
,
1372 tiling
->tile0
.extent
.width
,
1373 tiling
->tile0
.extent
.height
,
1374 A6XX_RB_BIN_CONTROL_USE_VIZ
| 0x6000000);
1377 A6XX_VFD_MODE_CNTL(0));
1379 tu_cs_emit_regs(cs
, A6XX_PC_UNKNOWN_9805(.unknown
= phys_dev
->magic
.PC_UNKNOWN_9805
));
1381 tu_cs_emit_regs(cs
, A6XX_SP_UNKNOWN_A0F8(.unknown
= phys_dev
->magic
.SP_UNKNOWN_A0F8
));
1383 tu_cs_emit_pkt7(cs
, CP_SKIP_IB2_ENABLE_GLOBAL
, 1);
1384 tu_cs_emit(cs
, 0x1);
1386 /* no binning pass, so enable stream-out for draw pass:: */
1387 tu_cs_emit_regs(cs
, A6XX_VPC_SO_OVERRIDE(.so_disable
=false));
1389 tu6_emit_bin_size(cs
,
1390 tiling
->tile0
.extent
.width
,
1391 tiling
->tile0
.extent
.height
,
1395 tu_cs_sanity_check(cs
);
1399 tu6_render_tile(struct tu_cmd_buffer
*cmd
,
1401 const struct tu_tile
*tile
)
1403 tu6_emit_tile_select(cmd
, cs
, tile
);
1405 tu_cs_emit_call(cs
, &cmd
->draw_cs
);
1407 if (use_hw_binning(cmd
)) {
1408 tu_cs_emit_pkt7(cs
, CP_SET_MARKER
, 1);
1409 tu_cs_emit(cs
, A6XX_CP_SET_MARKER_0_MODE(RM6_ENDVIS
));
1412 tu_cs_emit_ib(cs
, &cmd
->state
.tile_store_ib
);
1414 tu_cs_sanity_check(cs
);
1418 tu6_tile_render_end(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
1420 tu_cs_emit_call(cs
, &cmd
->draw_epilogue_cs
);
1423 A6XX_GRAS_LRZ_CNTL(0));
1425 tu6_emit_event_write(cmd
, cs
, LRZ_FLUSH
);
1427 tu6_emit_event_write(cmd
, cs
, PC_CCU_RESOLVE_TS
);
1429 tu_cs_sanity_check(cs
);
1433 tu_cmd_render_tiles(struct tu_cmd_buffer
*cmd
)
1435 const struct tu_tiling_config
*tiling
= &cmd
->state
.tiling_config
;
1437 if (use_hw_binning(cmd
))
1438 cmd
->use_vsc_data
= true;
1440 tu6_tile_render_begin(cmd
, &cmd
->cs
);
1442 for (uint32_t y
= 0; y
< tiling
->tile_count
.height
; y
++) {
1443 for (uint32_t x
= 0; x
< tiling
->tile_count
.width
; x
++) {
1444 struct tu_tile tile
;
1445 tu_tiling_config_get_tile(tiling
, cmd
->device
, x
, y
, &tile
);
1446 tu6_render_tile(cmd
, &cmd
->cs
, &tile
);
1450 tu6_tile_render_end(cmd
, &cmd
->cs
);
1454 tu_cmd_render_sysmem(struct tu_cmd_buffer
*cmd
)
1456 const struct tu_tiling_config
*tiling
= &cmd
->state
.tiling_config
;
1458 tu6_sysmem_render_begin(cmd
, &cmd
->cs
, &tiling
->render_area
);
1460 tu_cs_emit_call(&cmd
->cs
, &cmd
->draw_cs
);
1462 tu6_sysmem_render_end(cmd
, &cmd
->cs
);
1466 tu_cmd_prepare_tile_store_ib(struct tu_cmd_buffer
*cmd
)
1468 const uint32_t tile_store_space
= 11 + (35 * 2) * cmd
->state
.pass
->attachment_count
;
1469 struct tu_cs sub_cs
;
1472 tu_cs_begin_sub_stream(&cmd
->sub_cs
, tile_store_space
, &sub_cs
);
1473 if (result
!= VK_SUCCESS
) {
1474 cmd
->record_result
= result
;
1478 /* emit to tile-store sub_cs */
1479 tu6_emit_tile_store(cmd
, &sub_cs
);
1481 cmd
->state
.tile_store_ib
= tu_cs_end_sub_stream(&cmd
->sub_cs
, &sub_cs
);
1485 tu_cmd_update_tiling_config(struct tu_cmd_buffer
*cmd
,
1486 const VkRect2D
*render_area
)
1488 const struct tu_device
*dev
= cmd
->device
;
1489 struct tu_tiling_config
*tiling
= &cmd
->state
.tiling_config
;
1491 tiling
->render_area
= *render_area
;
1492 tiling
->force_sysmem
= false;
1494 tu_tiling_config_update_tile_layout(tiling
, dev
, cmd
->state
.pass
);
1495 tu_tiling_config_update_pipe_layout(tiling
, dev
);
1496 tu_tiling_config_update_pipes(tiling
, dev
);
1500 tu_create_cmd_buffer(struct tu_device
*device
,
1501 struct tu_cmd_pool
*pool
,
1502 VkCommandBufferLevel level
,
1503 VkCommandBuffer
*pCommandBuffer
)
1505 struct tu_cmd_buffer
*cmd_buffer
;
1506 cmd_buffer
= vk_zalloc(&pool
->alloc
, sizeof(*cmd_buffer
), 8,
1507 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1508 if (cmd_buffer
== NULL
)
1509 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
1511 cmd_buffer
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
1512 cmd_buffer
->device
= device
;
1513 cmd_buffer
->pool
= pool
;
1514 cmd_buffer
->level
= level
;
1517 list_addtail(&cmd_buffer
->pool_link
, &pool
->cmd_buffers
);
1518 cmd_buffer
->queue_family_index
= pool
->queue_family_index
;
1521 /* Init the pool_link so we can safely call list_del when we destroy
1522 * the command buffer
1524 list_inithead(&cmd_buffer
->pool_link
);
1525 cmd_buffer
->queue_family_index
= TU_QUEUE_GENERAL
;
1528 tu_bo_list_init(&cmd_buffer
->bo_list
);
1529 tu_cs_init(&cmd_buffer
->cs
, device
, TU_CS_MODE_GROW
, 4096);
1530 tu_cs_init(&cmd_buffer
->draw_cs
, device
, TU_CS_MODE_GROW
, 4096);
1531 tu_cs_init(&cmd_buffer
->draw_epilogue_cs
, device
, TU_CS_MODE_GROW
, 4096);
1532 tu_cs_init(&cmd_buffer
->sub_cs
, device
, TU_CS_MODE_SUB_STREAM
, 2048);
1534 *pCommandBuffer
= tu_cmd_buffer_to_handle(cmd_buffer
);
1536 list_inithead(&cmd_buffer
->upload
.list
);
1538 VkResult result
= tu_bo_init_new(device
, &cmd_buffer
->scratch_bo
, 0x1000);
1539 if (result
!= VK_SUCCESS
)
1540 goto fail_scratch_bo
;
1542 /* TODO: resize on overflow */
1543 cmd_buffer
->vsc_draw_strm_pitch
= device
->vsc_draw_strm_pitch
;
1544 cmd_buffer
->vsc_prim_strm_pitch
= device
->vsc_prim_strm_pitch
;
1545 cmd_buffer
->vsc_draw_strm
= device
->vsc_draw_strm
;
1546 cmd_buffer
->vsc_prim_strm
= device
->vsc_prim_strm
;
1551 list_del(&cmd_buffer
->pool_link
);
1556 tu_cmd_buffer_destroy(struct tu_cmd_buffer
*cmd_buffer
)
1558 tu_bo_finish(cmd_buffer
->device
, &cmd_buffer
->scratch_bo
);
1560 list_del(&cmd_buffer
->pool_link
);
1562 tu_cs_finish(&cmd_buffer
->cs
);
1563 tu_cs_finish(&cmd_buffer
->draw_cs
);
1564 tu_cs_finish(&cmd_buffer
->draw_epilogue_cs
);
1565 tu_cs_finish(&cmd_buffer
->sub_cs
);
1567 tu_bo_list_destroy(&cmd_buffer
->bo_list
);
1568 vk_free(&cmd_buffer
->pool
->alloc
, cmd_buffer
);
1572 tu_reset_cmd_buffer(struct tu_cmd_buffer
*cmd_buffer
)
1574 cmd_buffer
->record_result
= VK_SUCCESS
;
1576 tu_bo_list_reset(&cmd_buffer
->bo_list
);
1577 tu_cs_reset(&cmd_buffer
->cs
);
1578 tu_cs_reset(&cmd_buffer
->draw_cs
);
1579 tu_cs_reset(&cmd_buffer
->draw_epilogue_cs
);
1580 tu_cs_reset(&cmd_buffer
->sub_cs
);
1582 for (unsigned i
= 0; i
< MAX_BIND_POINTS
; i
++)
1583 memset(&cmd_buffer
->descriptors
[i
].sets
, 0, sizeof(cmd_buffer
->descriptors
[i
].sets
));
1585 cmd_buffer
->status
= TU_CMD_BUFFER_STATUS_INITIAL
;
1587 return cmd_buffer
->record_result
;
1591 tu_AllocateCommandBuffers(VkDevice _device
,
1592 const VkCommandBufferAllocateInfo
*pAllocateInfo
,
1593 VkCommandBuffer
*pCommandBuffers
)
1595 TU_FROM_HANDLE(tu_device
, device
, _device
);
1596 TU_FROM_HANDLE(tu_cmd_pool
, pool
, pAllocateInfo
->commandPool
);
1598 VkResult result
= VK_SUCCESS
;
1601 for (i
= 0; i
< pAllocateInfo
->commandBufferCount
; i
++) {
1603 if (!list_is_empty(&pool
->free_cmd_buffers
)) {
1604 struct tu_cmd_buffer
*cmd_buffer
= list_first_entry(
1605 &pool
->free_cmd_buffers
, struct tu_cmd_buffer
, pool_link
);
1607 list_del(&cmd_buffer
->pool_link
);
1608 list_addtail(&cmd_buffer
->pool_link
, &pool
->cmd_buffers
);
1610 result
= tu_reset_cmd_buffer(cmd_buffer
);
1611 cmd_buffer
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
1612 cmd_buffer
->level
= pAllocateInfo
->level
;
1614 pCommandBuffers
[i
] = tu_cmd_buffer_to_handle(cmd_buffer
);
1616 result
= tu_create_cmd_buffer(device
, pool
, pAllocateInfo
->level
,
1617 &pCommandBuffers
[i
]);
1619 if (result
!= VK_SUCCESS
)
1623 if (result
!= VK_SUCCESS
) {
1624 tu_FreeCommandBuffers(_device
, pAllocateInfo
->commandPool
, i
,
1627 /* From the Vulkan 1.0.66 spec:
1629 * "vkAllocateCommandBuffers can be used to create multiple
1630 * command buffers. If the creation of any of those command
1631 * buffers fails, the implementation must destroy all
1632 * successfully created command buffer objects from this
1633 * command, set all entries of the pCommandBuffers array to
1634 * NULL and return the error."
1636 memset(pCommandBuffers
, 0,
1637 sizeof(*pCommandBuffers
) * pAllocateInfo
->commandBufferCount
);
1644 tu_FreeCommandBuffers(VkDevice device
,
1645 VkCommandPool commandPool
,
1646 uint32_t commandBufferCount
,
1647 const VkCommandBuffer
*pCommandBuffers
)
1649 for (uint32_t i
= 0; i
< commandBufferCount
; i
++) {
1650 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, pCommandBuffers
[i
]);
1653 if (cmd_buffer
->pool
) {
1654 list_del(&cmd_buffer
->pool_link
);
1655 list_addtail(&cmd_buffer
->pool_link
,
1656 &cmd_buffer
->pool
->free_cmd_buffers
);
1658 tu_cmd_buffer_destroy(cmd_buffer
);
1664 tu_ResetCommandBuffer(VkCommandBuffer commandBuffer
,
1665 VkCommandBufferResetFlags flags
)
1667 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
1668 return tu_reset_cmd_buffer(cmd_buffer
);
1671 /* Initialize the cache, assuming all necessary flushes have happened but *not*
1675 tu_cache_init(struct tu_cache_state
*cache
)
1677 cache
->flush_bits
= 0;
1678 cache
->pending_flush_bits
= TU_CMD_FLAG_ALL_INVALIDATE
;
1682 tu_BeginCommandBuffer(VkCommandBuffer commandBuffer
,
1683 const VkCommandBufferBeginInfo
*pBeginInfo
)
1685 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
1686 VkResult result
= VK_SUCCESS
;
1688 if (cmd_buffer
->status
!= TU_CMD_BUFFER_STATUS_INITIAL
) {
1689 /* If the command buffer has already been resetted with
1690 * vkResetCommandBuffer, no need to do it again.
1692 result
= tu_reset_cmd_buffer(cmd_buffer
);
1693 if (result
!= VK_SUCCESS
)
1697 memset(&cmd_buffer
->state
, 0, sizeof(cmd_buffer
->state
));
1698 cmd_buffer
->state
.index_size
= 0xff; /* dirty restart index */
1700 tu_cache_init(&cmd_buffer
->state
.cache
);
1701 tu_cache_init(&cmd_buffer
->state
.renderpass_cache
);
1702 cmd_buffer
->usage_flags
= pBeginInfo
->flags
;
1704 tu_cs_begin(&cmd_buffer
->cs
);
1705 tu_cs_begin(&cmd_buffer
->draw_cs
);
1706 tu_cs_begin(&cmd_buffer
->draw_epilogue_cs
);
1708 /* setup initial configuration into command buffer */
1709 if (cmd_buffer
->level
== VK_COMMAND_BUFFER_LEVEL_PRIMARY
) {
1710 switch (cmd_buffer
->queue_family_index
) {
1711 case TU_QUEUE_GENERAL
:
1712 tu6_init_hw(cmd_buffer
, &cmd_buffer
->cs
);
1717 } else if (cmd_buffer
->level
== VK_COMMAND_BUFFER_LEVEL_SECONDARY
) {
1718 if (pBeginInfo
->flags
& VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT
) {
1719 assert(pBeginInfo
->pInheritanceInfo
);
1720 cmd_buffer
->state
.pass
= tu_render_pass_from_handle(pBeginInfo
->pInheritanceInfo
->renderPass
);
1721 cmd_buffer
->state
.subpass
=
1722 &cmd_buffer
->state
.pass
->subpasses
[pBeginInfo
->pInheritanceInfo
->subpass
];
1724 /* When executing in the middle of another command buffer, the CCU
1727 cmd_buffer
->state
.ccu_state
= TU_CMD_CCU_UNKNOWN
;
1731 cmd_buffer
->status
= TU_CMD_BUFFER_STATUS_RECORDING
;
1736 /* Sets vertex buffers to HW binding points. We emit VBs in SDS (so that bin
1737 * rendering can skip over unused state), so we need to collect all the
1738 * bindings together into a single state emit at draw time.
1741 tu_CmdBindVertexBuffers(VkCommandBuffer commandBuffer
,
1742 uint32_t firstBinding
,
1743 uint32_t bindingCount
,
1744 const VkBuffer
*pBuffers
,
1745 const VkDeviceSize
*pOffsets
)
1747 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
1749 assert(firstBinding
+ bindingCount
<= MAX_VBS
);
1751 for (uint32_t i
= 0; i
< bindingCount
; i
++) {
1752 struct tu_buffer
*buf
= tu_buffer_from_handle(pBuffers
[i
]);
1754 cmd
->state
.vb
.buffers
[firstBinding
+ i
] = buf
;
1755 cmd
->state
.vb
.offsets
[firstBinding
+ i
] = pOffsets
[i
];
1757 tu_bo_list_add(&cmd
->bo_list
, buf
->bo
, MSM_SUBMIT_BO_READ
);
1760 cmd
->state
.dirty
|= TU_CMD_DIRTY_VERTEX_BUFFERS
;
1764 tu_CmdBindIndexBuffer(VkCommandBuffer commandBuffer
,
1766 VkDeviceSize offset
,
1767 VkIndexType indexType
)
1769 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
1770 TU_FROM_HANDLE(tu_buffer
, buf
, buffer
);
1774 uint32_t index_size
, index_shift
, restart_index
;
1776 switch (indexType
) {
1777 case VK_INDEX_TYPE_UINT16
:
1778 index_size
= INDEX4_SIZE_16_BIT
;
1780 restart_index
= 0xffff;
1782 case VK_INDEX_TYPE_UINT32
:
1783 index_size
= INDEX4_SIZE_32_BIT
;
1785 restart_index
= 0xffffffff;
1787 case VK_INDEX_TYPE_UINT8_EXT
:
1788 index_size
= INDEX4_SIZE_8_BIT
;
1790 restart_index
= 0xff;
1793 unreachable("invalid VkIndexType");
1796 /* initialize/update the restart index */
1797 if (cmd
->state
.index_size
!= index_size
)
1798 tu_cs_emit_regs(&cmd
->draw_cs
, A6XX_PC_RESTART_INDEX(restart_index
));
1800 assert(buf
->size
>= offset
);
1802 cmd
->state
.index_va
= buf
->bo
->iova
+ buf
->bo_offset
+ offset
;
1803 cmd
->state
.max_index_count
= (buf
->size
- offset
) >> index_shift
;
1804 cmd
->state
.index_size
= index_size
;
1805 cmd
->state
.index_shift
= index_shift
;
1807 tu_bo_list_add(&cmd
->bo_list
, buf
->bo
, MSM_SUBMIT_BO_READ
);
1811 tu_CmdBindDescriptorSets(VkCommandBuffer commandBuffer
,
1812 VkPipelineBindPoint pipelineBindPoint
,
1813 VkPipelineLayout _layout
,
1815 uint32_t descriptorSetCount
,
1816 const VkDescriptorSet
*pDescriptorSets
,
1817 uint32_t dynamicOffsetCount
,
1818 const uint32_t *pDynamicOffsets
)
1820 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
1821 TU_FROM_HANDLE(tu_pipeline_layout
, layout
, _layout
);
1822 unsigned dyn_idx
= 0;
1824 struct tu_descriptor_state
*descriptors_state
=
1825 tu_get_descriptors_state(cmd
, pipelineBindPoint
);
1827 for (unsigned i
= 0; i
< descriptorSetCount
; ++i
) {
1828 unsigned idx
= i
+ firstSet
;
1829 TU_FROM_HANDLE(tu_descriptor_set
, set
, pDescriptorSets
[i
]);
1831 descriptors_state
->sets
[idx
] = set
;
1833 for(unsigned j
= 0; j
< set
->layout
->dynamic_offset_count
; ++j
, ++dyn_idx
) {
1834 /* update the contents of the dynamic descriptor set */
1835 unsigned src_idx
= j
;
1836 unsigned dst_idx
= j
+ layout
->set
[idx
].dynamic_offset_start
;
1837 assert(dyn_idx
< dynamicOffsetCount
);
1840 &descriptors_state
->dynamic_descriptors
[dst_idx
* A6XX_TEX_CONST_DWORDS
];
1842 &set
->dynamic_descriptors
[src_idx
* A6XX_TEX_CONST_DWORDS
];
1843 uint32_t offset
= pDynamicOffsets
[dyn_idx
];
1845 /* Patch the storage/uniform descriptors right away. */
1846 if (layout
->set
[idx
].layout
->dynamic_ubo
& (1 << j
)) {
1847 /* Note: we can assume here that the addition won't roll over and
1848 * change the SIZE field.
1850 uint64_t va
= src
[0] | ((uint64_t)src
[1] << 32);
1855 memcpy(dst
, src
, A6XX_TEX_CONST_DWORDS
* 4);
1856 /* Note: A6XX_IBO_5_DEPTH is always 0 */
1857 uint64_t va
= dst
[4] | ((uint64_t)dst
[5] << 32);
1864 for (unsigned j
= 0; j
< set
->layout
->buffer_count
; ++j
) {
1865 if (set
->buffers
[j
]) {
1866 tu_bo_list_add(&cmd
->bo_list
, set
->buffers
[j
],
1867 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_WRITE
);
1871 if (set
->size
> 0) {
1872 tu_bo_list_add(&cmd
->bo_list
, &set
->pool
->bo
,
1873 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_DUMP
);
1876 assert(dyn_idx
== dynamicOffsetCount
);
1878 uint32_t sp_bindless_base_reg
, hlsq_bindless_base_reg
, hlsq_update_value
;
1879 uint64_t addr
[MAX_SETS
+ 1] = {};
1882 for (uint32_t i
= 0; i
< MAX_SETS
; i
++) {
1883 struct tu_descriptor_set
*set
= descriptors_state
->sets
[i
];
1885 addr
[i
] = set
->va
| 3;
1888 if (layout
->dynamic_offset_count
) {
1889 /* allocate and fill out dynamic descriptor set */
1890 struct tu_cs_memory dynamic_desc_set
;
1891 VkResult result
= tu_cs_alloc(&cmd
->sub_cs
, layout
->dynamic_offset_count
,
1892 A6XX_TEX_CONST_DWORDS
, &dynamic_desc_set
);
1893 assert(result
== VK_SUCCESS
);
1895 memcpy(dynamic_desc_set
.map
, descriptors_state
->dynamic_descriptors
,
1896 layout
->dynamic_offset_count
* A6XX_TEX_CONST_DWORDS
* 4);
1897 addr
[MAX_SETS
] = dynamic_desc_set
.iova
| 3;
1900 if (pipelineBindPoint
== VK_PIPELINE_BIND_POINT_GRAPHICS
) {
1901 sp_bindless_base_reg
= REG_A6XX_SP_BINDLESS_BASE(0);
1902 hlsq_bindless_base_reg
= REG_A6XX_HLSQ_BINDLESS_BASE(0);
1903 hlsq_update_value
= 0x7c000;
1905 cmd
->state
.dirty
|= TU_CMD_DIRTY_DESCRIPTOR_SETS
| TU_CMD_DIRTY_SHADER_CONSTS
;
1907 assert(pipelineBindPoint
== VK_PIPELINE_BIND_POINT_COMPUTE
);
1909 sp_bindless_base_reg
= REG_A6XX_SP_CS_BINDLESS_BASE(0);
1910 hlsq_bindless_base_reg
= REG_A6XX_HLSQ_CS_BINDLESS_BASE(0);
1911 hlsq_update_value
= 0x3e00;
1913 cmd
->state
.dirty
|= TU_CMD_DIRTY_COMPUTE_DESCRIPTOR_SETS
;
1916 tu_cs_begin_sub_stream(&cmd
->sub_cs
, 24, &cs
);
1918 tu_cs_emit_pkt4(&cs
, sp_bindless_base_reg
, 10);
1919 tu_cs_emit_array(&cs
, (const uint32_t*) addr
, 10);
1920 tu_cs_emit_pkt4(&cs
, hlsq_bindless_base_reg
, 10);
1921 tu_cs_emit_array(&cs
, (const uint32_t*) addr
, 10);
1922 tu_cs_emit_regs(&cs
, A6XX_HLSQ_UPDATE_CNTL(.dword
= hlsq_update_value
));
1924 struct tu_cs_entry ib
= tu_cs_end_sub_stream(&cmd
->sub_cs
, &cs
);
1925 if (pipelineBindPoint
== VK_PIPELINE_BIND_POINT_GRAPHICS
) {
1926 tu_cs_emit_pkt7(&cmd
->draw_cs
, CP_SET_DRAW_STATE
, 3);
1927 tu_cs_emit_sds_ib(&cmd
->draw_cs
, TU_DRAW_STATE_DESC_SETS
, ib
);
1928 cmd
->state
.desc_sets_ib
= ib
;
1930 /* note: for compute we could emit directly, instead of a CP_INDIRECT
1931 * however, the blob uses draw states for compute
1933 tu_cs_emit_ib(&cmd
->cs
, &ib
);
1937 void tu_CmdBindTransformFeedbackBuffersEXT(VkCommandBuffer commandBuffer
,
1938 uint32_t firstBinding
,
1939 uint32_t bindingCount
,
1940 const VkBuffer
*pBuffers
,
1941 const VkDeviceSize
*pOffsets
,
1942 const VkDeviceSize
*pSizes
)
1944 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
1945 struct tu_cs
*cs
= &cmd
->draw_cs
;
1947 /* using COND_REG_EXEC for xfb commands matches the blob behavior
1948 * presumably there isn't any benefit using a draw state when the
1949 * condition is (SYSMEM | BINNING)
1951 tu_cond_exec_start(cs
, CP_COND_REG_EXEC_0_MODE(RENDER_MODE
) |
1952 CP_COND_REG_EXEC_0_SYSMEM
|
1953 CP_COND_REG_EXEC_0_BINNING
);
1955 for (uint32_t i
= 0; i
< bindingCount
; i
++) {
1956 TU_FROM_HANDLE(tu_buffer
, buf
, pBuffers
[i
]);
1957 uint64_t iova
= buf
->bo
->iova
+ pOffsets
[i
];
1958 uint32_t size
= buf
->bo
->size
- pOffsets
[i
];
1959 uint32_t idx
= i
+ firstBinding
;
1961 if (pSizes
&& pSizes
[i
] != VK_WHOLE_SIZE
)
1964 /* BUFFER_BASE is 32-byte aligned, add remaining offset to BUFFER_OFFSET */
1965 uint32_t offset
= iova
& 0x1f;
1966 iova
&= ~(uint64_t) 0x1f;
1968 tu_cs_emit_pkt4(cs
, REG_A6XX_VPC_SO_BUFFER_BASE(idx
), 3);
1969 tu_cs_emit_qw(cs
, iova
);
1970 tu_cs_emit(cs
, size
+ offset
);
1972 cmd
->state
.streamout_offset
[idx
] = offset
;
1974 tu_bo_list_add(&cmd
->bo_list
, buf
->bo
, MSM_SUBMIT_BO_WRITE
);
1977 tu_cond_exec_end(cs
);
1981 tu_CmdBeginTransformFeedbackEXT(VkCommandBuffer commandBuffer
,
1982 uint32_t firstCounterBuffer
,
1983 uint32_t counterBufferCount
,
1984 const VkBuffer
*pCounterBuffers
,
1985 const VkDeviceSize
*pCounterBufferOffsets
)
1987 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
1988 struct tu_cs
*cs
= &cmd
->draw_cs
;
1990 tu_cond_exec_start(cs
, CP_COND_REG_EXEC_0_MODE(RENDER_MODE
) |
1991 CP_COND_REG_EXEC_0_SYSMEM
|
1992 CP_COND_REG_EXEC_0_BINNING
);
1994 /* TODO: only update offset for active buffers */
1995 for (uint32_t i
= 0; i
< IR3_MAX_SO_BUFFERS
; i
++)
1996 tu_cs_emit_regs(cs
, A6XX_VPC_SO_BUFFER_OFFSET(i
, cmd
->state
.streamout_offset
[i
]));
1998 for (uint32_t i
= 0; i
< counterBufferCount
; i
++) {
1999 uint32_t idx
= firstCounterBuffer
+ i
;
2000 uint32_t offset
= cmd
->state
.streamout_offset
[idx
];
2002 if (!pCounterBuffers
[i
])
2005 TU_FROM_HANDLE(tu_buffer
, buf
, pCounterBuffers
[i
]);
2007 tu_bo_list_add(&cmd
->bo_list
, buf
->bo
, MSM_SUBMIT_BO_READ
);
2009 tu_cs_emit_pkt7(cs
, CP_MEM_TO_REG
, 3);
2010 tu_cs_emit(cs
, CP_MEM_TO_REG_0_REG(REG_A6XX_VPC_SO_BUFFER_OFFSET(idx
)) |
2011 CP_MEM_TO_REG_0_UNK31
|
2012 CP_MEM_TO_REG_0_CNT(1));
2013 tu_cs_emit_qw(cs
, buf
->bo
->iova
+ pCounterBufferOffsets
[i
]);
2016 tu_cs_emit_pkt7(cs
, CP_REG_RMW
, 3);
2017 tu_cs_emit(cs
, CP_REG_RMW_0_DST_REG(REG_A6XX_VPC_SO_BUFFER_OFFSET(idx
)) |
2018 CP_REG_RMW_0_SRC1_ADD
);
2019 tu_cs_emit_qw(cs
, 0xffffffff);
2020 tu_cs_emit_qw(cs
, offset
);
2024 tu_cond_exec_end(cs
);
2027 void tu_CmdEndTransformFeedbackEXT(VkCommandBuffer commandBuffer
,
2028 uint32_t firstCounterBuffer
,
2029 uint32_t counterBufferCount
,
2030 const VkBuffer
*pCounterBuffers
,
2031 const VkDeviceSize
*pCounterBufferOffsets
)
2033 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2034 struct tu_cs
*cs
= &cmd
->draw_cs
;
2036 tu_cond_exec_start(cs
, CP_COND_REG_EXEC_0_MODE(RENDER_MODE
) |
2037 CP_COND_REG_EXEC_0_SYSMEM
|
2038 CP_COND_REG_EXEC_0_BINNING
);
2040 /* TODO: only flush buffers that need to be flushed */
2041 for (uint32_t i
= 0; i
< IR3_MAX_SO_BUFFERS
; i
++) {
2042 /* note: FLUSH_BASE is always the same, so it could go in init_hw()? */
2043 tu_cs_emit_pkt4(cs
, REG_A6XX_VPC_SO_FLUSH_BASE(i
), 2);
2044 tu_cs_emit_qw(cs
, cmd
->scratch_bo
.iova
+ ctrl_offset(flush_base
[i
]));
2045 tu6_emit_event_write(cmd
, cs
, FLUSH_SO_0
+ i
);
2048 for (uint32_t i
= 0; i
< counterBufferCount
; i
++) {
2049 uint32_t idx
= firstCounterBuffer
+ i
;
2050 uint32_t offset
= cmd
->state
.streamout_offset
[idx
];
2052 if (!pCounterBuffers
[i
])
2055 TU_FROM_HANDLE(tu_buffer
, buf
, pCounterBuffers
[i
]);
2057 tu_bo_list_add(&cmd
->bo_list
, buf
->bo
, MSM_SUBMIT_BO_WRITE
);
2059 /* VPC_SO_FLUSH_BASE has dwords counter, but counter should be in bytes */
2060 tu_cs_emit_pkt7(cs
, CP_MEM_TO_REG
, 3);
2061 tu_cs_emit(cs
, CP_MEM_TO_REG_0_REG(REG_A6XX_CP_SCRATCH_REG(0)) |
2062 CP_MEM_TO_REG_0_SHIFT_BY_2
|
2064 CP_MEM_TO_REG_0_UNK31
|
2065 CP_MEM_TO_REG_0_CNT(1));
2066 tu_cs_emit_qw(cs
, cmd
->scratch_bo
.iova
+ ctrl_offset(flush_base
[idx
]));
2069 tu_cs_emit_pkt7(cs
, CP_REG_RMW
, 3);
2070 tu_cs_emit(cs
, CP_REG_RMW_0_DST_REG(REG_A6XX_CP_SCRATCH_REG(0)) |
2071 CP_REG_RMW_0_SRC1_ADD
);
2072 tu_cs_emit_qw(cs
, 0xffffffff);
2073 tu_cs_emit_qw(cs
, -offset
);
2076 tu_cs_emit_pkt7(cs
, CP_REG_TO_MEM
, 3);
2077 tu_cs_emit(cs
, CP_REG_TO_MEM_0_REG(REG_A6XX_CP_SCRATCH_REG(0)) |
2078 CP_REG_TO_MEM_0_CNT(1));
2079 tu_cs_emit_qw(cs
, buf
->bo
->iova
+ pCounterBufferOffsets
[i
]);
2082 tu_cond_exec_end(cs
);
2084 cmd
->state
.xfb_used
= true;
2088 tu_CmdPushConstants(VkCommandBuffer commandBuffer
,
2089 VkPipelineLayout layout
,
2090 VkShaderStageFlags stageFlags
,
2093 const void *pValues
)
2095 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2096 memcpy((void*) cmd
->push_constants
+ offset
, pValues
, size
);
2097 cmd
->state
.dirty
|= TU_CMD_DIRTY_SHADER_CONSTS
;
2100 /* Flush everything which has been made available but we haven't actually
2104 tu_flush_all_pending(struct tu_cache_state
*cache
)
2106 cache
->flush_bits
|= cache
->pending_flush_bits
& TU_CMD_FLAG_ALL_FLUSH
;
2107 cache
->pending_flush_bits
&= ~TU_CMD_FLAG_ALL_FLUSH
;
2111 tu_EndCommandBuffer(VkCommandBuffer commandBuffer
)
2113 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
2115 /* We currently flush CCU at the end of the command buffer, like
2116 * what the blob does. There's implicit synchronization around every
2117 * vkQueueSubmit, but the kernel only flushes the UCHE, and we don't
2118 * know yet if this command buffer will be the last in the submit so we
2119 * have to defensively flush everything else.
2121 * TODO: We could definitely do better than this, since these flushes
2122 * aren't required by Vulkan, but we'd need kernel support to do that.
2123 * Ideally, we'd like the kernel to flush everything afterwards, so that we
2124 * wouldn't have to do any flushes here, and when submitting multiple
2125 * command buffers there wouldn't be any unnecessary flushes in between.
2127 if (cmd_buffer
->state
.pass
) {
2128 tu_flush_all_pending(&cmd_buffer
->state
.renderpass_cache
);
2129 tu_emit_cache_flush_renderpass(cmd_buffer
, &cmd_buffer
->draw_cs
);
2131 tu_flush_all_pending(&cmd_buffer
->state
.cache
);
2132 cmd_buffer
->state
.cache
.flush_bits
|=
2133 TU_CMD_FLAG_CCU_FLUSH_COLOR
|
2134 TU_CMD_FLAG_CCU_FLUSH_DEPTH
;
2135 tu_emit_cache_flush(cmd_buffer
, &cmd_buffer
->cs
);
2138 tu_bo_list_add(&cmd_buffer
->bo_list
, &cmd_buffer
->scratch_bo
,
2139 MSM_SUBMIT_BO_WRITE
);
2141 if (cmd_buffer
->use_vsc_data
) {
2142 tu_bo_list_add(&cmd_buffer
->bo_list
, &cmd_buffer
->vsc_draw_strm
,
2143 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_WRITE
);
2144 tu_bo_list_add(&cmd_buffer
->bo_list
, &cmd_buffer
->vsc_prim_strm
,
2145 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_WRITE
);
2148 tu_bo_list_add(&cmd_buffer
->bo_list
, &cmd_buffer
->device
->border_color
,
2149 MSM_SUBMIT_BO_READ
);
2151 for (uint32_t i
= 0; i
< cmd_buffer
->draw_cs
.bo_count
; i
++) {
2152 tu_bo_list_add(&cmd_buffer
->bo_list
, cmd_buffer
->draw_cs
.bos
[i
],
2153 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_DUMP
);
2156 for (uint32_t i
= 0; i
< cmd_buffer
->draw_epilogue_cs
.bo_count
; i
++) {
2157 tu_bo_list_add(&cmd_buffer
->bo_list
, cmd_buffer
->draw_epilogue_cs
.bos
[i
],
2158 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_DUMP
);
2161 for (uint32_t i
= 0; i
< cmd_buffer
->sub_cs
.bo_count
; i
++) {
2162 tu_bo_list_add(&cmd_buffer
->bo_list
, cmd_buffer
->sub_cs
.bos
[i
],
2163 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_DUMP
);
2166 tu_cs_end(&cmd_buffer
->cs
);
2167 tu_cs_end(&cmd_buffer
->draw_cs
);
2168 tu_cs_end(&cmd_buffer
->draw_epilogue_cs
);
2170 cmd_buffer
->status
= TU_CMD_BUFFER_STATUS_EXECUTABLE
;
2172 return cmd_buffer
->record_result
;
2176 tu_cmd_dynamic_state(struct tu_cmd_buffer
*cmd
, uint32_t id
, uint32_t size
)
2178 struct tu_cs_memory memory
;
2181 /* TODO: share this logic with tu_pipeline_static_state */
2182 tu_cs_alloc(&cmd
->sub_cs
, size
, 1, &memory
);
2183 tu_cs_init_external(&cs
, memory
.map
, memory
.map
+ size
);
2185 tu_cs_reserve_space(&cs
, size
);
2187 assert(id
< ARRAY_SIZE(cmd
->state
.dynamic_state
));
2188 cmd
->state
.dynamic_state
[id
].iova
= memory
.iova
;
2189 cmd
->state
.dynamic_state
[id
].size
= size
;
2191 tu_cs_emit_pkt7(&cmd
->draw_cs
, CP_SET_DRAW_STATE
, 3);
2192 tu_cs_emit_draw_state(&cmd
->draw_cs
, TU_DRAW_STATE_DYNAMIC
+ id
, cmd
->state
.dynamic_state
[id
]);
2198 tu_CmdBindPipeline(VkCommandBuffer commandBuffer
,
2199 VkPipelineBindPoint pipelineBindPoint
,
2200 VkPipeline _pipeline
)
2202 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2203 TU_FROM_HANDLE(tu_pipeline
, pipeline
, _pipeline
);
2205 for (uint32_t i
= 0; i
< pipeline
->cs
.bo_count
; i
++) {
2206 tu_bo_list_add(&cmd
->bo_list
, pipeline
->cs
.bos
[i
],
2207 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_DUMP
);
2210 if (pipelineBindPoint
== VK_PIPELINE_BIND_POINT_COMPUTE
) {
2211 cmd
->state
.compute_pipeline
= pipeline
;
2212 cmd
->state
.dirty
|= TU_CMD_DIRTY_COMPUTE_PIPELINE
;
2216 assert(pipelineBindPoint
== VK_PIPELINE_BIND_POINT_GRAPHICS
);
2218 cmd
->state
.pipeline
= pipeline
;
2219 cmd
->state
.dirty
|= TU_CMD_DIRTY_SHADER_CONSTS
;
2221 struct tu_cs
*cs
= &cmd
->draw_cs
;
2222 uint32_t mask
= ~pipeline
->dynamic_state_mask
& BITFIELD_MASK(TU_DYNAMIC_STATE_COUNT
);
2225 tu_cs_emit_pkt7(cs
, CP_SET_DRAW_STATE
, 3 * (7 + util_bitcount(mask
)));
2226 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_PROGRAM
, pipeline
->program
.state_ib
);
2227 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_PROGRAM_BINNING
, pipeline
->program
.binning_state_ib
);
2228 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_VI
, pipeline
->vi
.state_ib
);
2229 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_VI_BINNING
, pipeline
->vi
.binning_state_ib
);
2230 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_RAST
, pipeline
->rast
.state_ib
);
2231 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_DS
, pipeline
->ds
.state_ib
);
2232 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_BLEND
, pipeline
->blend
.state_ib
);
2234 for_each_bit(i
, mask
)
2235 tu_cs_emit_draw_state(cs
, TU_DRAW_STATE_DYNAMIC
+ i
, pipeline
->dynamic_state
[i
]);
2237 /* If the new pipeline requires more VBs than we had previously set up, we
2238 * need to re-emit them in SDS. If it requires the same set or fewer, we
2239 * can just re-use the old SDS.
2241 if (pipeline
->vi
.bindings_used
& ~cmd
->vertex_bindings_set
)
2242 cmd
->state
.dirty
|= TU_CMD_DIRTY_VERTEX_BUFFERS
;
2244 /* If the pipeline needs a dynamic descriptor, re-emit descriptor sets */
2245 if (pipeline
->layout
->dynamic_offset_count
)
2246 cmd
->state
.dirty
|= TU_CMD_DIRTY_DESCRIPTOR_SETS
;
2248 /* dynamic linewidth state depends pipeline state's gras_su_cntl
2249 * so the dynamic state ib must be updated when pipeline changes
2251 if (pipeline
->dynamic_state_mask
& BIT(VK_DYNAMIC_STATE_LINE_WIDTH
)) {
2252 struct tu_cs cs
= tu_cmd_dynamic_state(cmd
, VK_DYNAMIC_STATE_LINE_WIDTH
, 2);
2254 cmd
->state
.dynamic_gras_su_cntl
&= A6XX_GRAS_SU_CNTL_LINEHALFWIDTH__MASK
;
2255 cmd
->state
.dynamic_gras_su_cntl
|= pipeline
->gras_su_cntl
;
2257 tu_cs_emit_regs(&cs
, A6XX_GRAS_SU_CNTL(.dword
= cmd
->state
.dynamic_gras_su_cntl
));
2262 tu_CmdSetViewport(VkCommandBuffer commandBuffer
,
2263 uint32_t firstViewport
,
2264 uint32_t viewportCount
,
2265 const VkViewport
*pViewports
)
2267 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2268 struct tu_cs cs
= tu_cmd_dynamic_state(cmd
, VK_DYNAMIC_STATE_VIEWPORT
, 18);
2270 assert(firstViewport
== 0 && viewportCount
== 1);
2272 tu6_emit_viewport(&cs
, pViewports
);
2276 tu_CmdSetScissor(VkCommandBuffer commandBuffer
,
2277 uint32_t firstScissor
,
2278 uint32_t scissorCount
,
2279 const VkRect2D
*pScissors
)
2281 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2282 struct tu_cs cs
= tu_cmd_dynamic_state(cmd
, VK_DYNAMIC_STATE_SCISSOR
, 3);
2284 assert(firstScissor
== 0 && scissorCount
== 1);
2286 tu6_emit_scissor(&cs
, pScissors
);
2290 tu_CmdSetLineWidth(VkCommandBuffer commandBuffer
, float lineWidth
)
2292 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2293 struct tu_cs cs
= tu_cmd_dynamic_state(cmd
, VK_DYNAMIC_STATE_LINE_WIDTH
, 2);
2295 cmd
->state
.dynamic_gras_su_cntl
&= ~A6XX_GRAS_SU_CNTL_LINEHALFWIDTH__MASK
;
2296 cmd
->state
.dynamic_gras_su_cntl
|= A6XX_GRAS_SU_CNTL_LINEHALFWIDTH(lineWidth
/ 2.0f
);
2298 tu_cs_emit_regs(&cs
, A6XX_GRAS_SU_CNTL(.dword
= cmd
->state
.dynamic_gras_su_cntl
));
2302 tu_CmdSetDepthBias(VkCommandBuffer commandBuffer
,
2303 float depthBiasConstantFactor
,
2304 float depthBiasClamp
,
2305 float depthBiasSlopeFactor
)
2307 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2308 struct tu_cs cs
= tu_cmd_dynamic_state(cmd
, VK_DYNAMIC_STATE_DEPTH_BIAS
, 4);
2310 tu6_emit_depth_bias(&cs
, depthBiasConstantFactor
, depthBiasClamp
, depthBiasSlopeFactor
);
2314 tu_CmdSetBlendConstants(VkCommandBuffer commandBuffer
,
2315 const float blendConstants
[4])
2317 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2318 struct tu_cs cs
= tu_cmd_dynamic_state(cmd
, VK_DYNAMIC_STATE_BLEND_CONSTANTS
, 5);
2320 tu_cs_emit_pkt4(&cs
, REG_A6XX_RB_BLEND_RED_F32
, 4);
2321 tu_cs_emit_array(&cs
, (const uint32_t *) blendConstants
, 4);
2325 tu_CmdSetDepthBounds(VkCommandBuffer commandBuffer
,
2326 float minDepthBounds
,
2327 float maxDepthBounds
)
2332 update_stencil_mask(uint32_t *value
, VkStencilFaceFlags face
, uint32_t mask
)
2334 if (face
& VK_STENCIL_FACE_FRONT_BIT
)
2335 *value
|= A6XX_RB_STENCILMASK_MASK(mask
);
2336 if (face
& VK_STENCIL_FACE_BACK_BIT
)
2337 *value
|= A6XX_RB_STENCILMASK_BFMASK(mask
);
2341 tu_CmdSetStencilCompareMask(VkCommandBuffer commandBuffer
,
2342 VkStencilFaceFlags faceMask
,
2343 uint32_t compareMask
)
2345 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2346 struct tu_cs cs
= tu_cmd_dynamic_state(cmd
, VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK
, 2);
2348 update_stencil_mask(&cmd
->state
.dynamic_stencil_mask
, faceMask
, compareMask
);
2350 tu_cs_emit_regs(&cs
, A6XX_RB_STENCILMASK(.dword
= cmd
->state
.dynamic_stencil_mask
));
2354 tu_CmdSetStencilWriteMask(VkCommandBuffer commandBuffer
,
2355 VkStencilFaceFlags faceMask
,
2358 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2359 struct tu_cs cs
= tu_cmd_dynamic_state(cmd
, VK_DYNAMIC_STATE_STENCIL_WRITE_MASK
, 2);
2361 update_stencil_mask(&cmd
->state
.dynamic_stencil_wrmask
, faceMask
, writeMask
);
2363 tu_cs_emit_regs(&cs
, A6XX_RB_STENCILWRMASK(.dword
= cmd
->state
.dynamic_stencil_wrmask
));
2367 tu_CmdSetStencilReference(VkCommandBuffer commandBuffer
,
2368 VkStencilFaceFlags faceMask
,
2371 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2372 struct tu_cs cs
= tu_cmd_dynamic_state(cmd
, VK_DYNAMIC_STATE_STENCIL_REFERENCE
, 2);
2374 update_stencil_mask(&cmd
->state
.dynamic_stencil_ref
, faceMask
, reference
);
2376 tu_cs_emit_regs(&cs
, A6XX_RB_STENCILREF(.dword
= cmd
->state
.dynamic_stencil_ref
));
2380 tu_CmdSetSampleLocationsEXT(VkCommandBuffer commandBuffer
,
2381 const VkSampleLocationsInfoEXT
* pSampleLocationsInfo
)
2383 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2384 struct tu_cs cs
= tu_cmd_dynamic_state(cmd
, TU_DYNAMIC_STATE_SAMPLE_LOCATIONS
, 9);
2386 assert(pSampleLocationsInfo
);
2388 tu6_emit_sample_locations(&cs
, pSampleLocationsInfo
);
2392 tu_flush_for_access(struct tu_cache_state
*cache
,
2393 enum tu_cmd_access_mask src_mask
,
2394 enum tu_cmd_access_mask dst_mask
)
2396 enum tu_cmd_flush_bits flush_bits
= 0;
2398 if (src_mask
& TU_ACCESS_SYSMEM_WRITE
) {
2399 cache
->pending_flush_bits
|= TU_CMD_FLAG_ALL_INVALIDATE
;
2402 #define SRC_FLUSH(domain, flush, invalidate) \
2403 if (src_mask & TU_ACCESS_##domain##_WRITE) { \
2404 cache->pending_flush_bits |= TU_CMD_FLAG_##flush | \
2405 (TU_CMD_FLAG_ALL_INVALIDATE & ~TU_CMD_FLAG_##invalidate); \
2408 SRC_FLUSH(UCHE
, CACHE_FLUSH
, CACHE_INVALIDATE
)
2409 SRC_FLUSH(CCU_COLOR
, CCU_FLUSH_COLOR
, CCU_INVALIDATE_COLOR
)
2410 SRC_FLUSH(CCU_DEPTH
, CCU_FLUSH_DEPTH
, CCU_INVALIDATE_DEPTH
)
2414 #define SRC_INCOHERENT_FLUSH(domain, flush, invalidate) \
2415 if (src_mask & TU_ACCESS_##domain##_INCOHERENT_WRITE) { \
2416 flush_bits |= TU_CMD_FLAG_##flush; \
2417 cache->pending_flush_bits |= \
2418 (TU_CMD_FLAG_ALL_INVALIDATE & ~TU_CMD_FLAG_##invalidate); \
2421 SRC_INCOHERENT_FLUSH(CCU_COLOR
, CCU_FLUSH_COLOR
, CCU_INVALIDATE_COLOR
)
2422 SRC_INCOHERENT_FLUSH(CCU_DEPTH
, CCU_FLUSH_DEPTH
, CCU_INVALIDATE_DEPTH
)
2424 #undef SRC_INCOHERENT_FLUSH
2426 if (dst_mask
& (TU_ACCESS_SYSMEM_READ
| TU_ACCESS_SYSMEM_WRITE
)) {
2427 flush_bits
|= cache
->pending_flush_bits
& TU_CMD_FLAG_ALL_FLUSH
;
2430 #define DST_FLUSH(domain, flush, invalidate) \
2431 if (dst_mask & (TU_ACCESS_##domain##_READ | \
2432 TU_ACCESS_##domain##_WRITE)) { \
2433 flush_bits |= cache->pending_flush_bits & \
2434 (TU_CMD_FLAG_##invalidate | \
2435 (TU_CMD_FLAG_ALL_FLUSH & ~TU_CMD_FLAG_##flush)); \
2438 DST_FLUSH(UCHE
, CACHE_FLUSH
, CACHE_INVALIDATE
)
2439 DST_FLUSH(CCU_COLOR
, CCU_FLUSH_COLOR
, CCU_INVALIDATE_COLOR
)
2440 DST_FLUSH(CCU_DEPTH
, CCU_FLUSH_DEPTH
, CCU_INVALIDATE_DEPTH
)
2444 #define DST_INCOHERENT_FLUSH(domain, flush, invalidate) \
2445 if (dst_mask & (TU_ACCESS_##domain##_READ | \
2446 TU_ACCESS_##domain##_WRITE)) { \
2447 flush_bits |= TU_CMD_FLAG_##invalidate | \
2448 (cache->pending_flush_bits & \
2449 (TU_CMD_FLAG_ALL_FLUSH & ~TU_CMD_FLAG_##flush)); \
2452 DST_INCOHERENT_FLUSH(CCU_COLOR
, CCU_FLUSH_COLOR
, CCU_INVALIDATE_COLOR
)
2453 DST_INCOHERENT_FLUSH(CCU_DEPTH
, CCU_FLUSH_DEPTH
, CCU_INVALIDATE_DEPTH
)
2455 #undef DST_INCOHERENT_FLUSH
2457 if (dst_mask
& TU_ACCESS_WFI_READ
) {
2458 flush_bits
|= TU_CMD_FLAG_WFI
;
2461 cache
->flush_bits
|= flush_bits
;
2462 cache
->pending_flush_bits
&= ~flush_bits
;
2465 static enum tu_cmd_access_mask
2466 vk2tu_access(VkAccessFlags flags
, bool gmem
)
2468 enum tu_cmd_access_mask mask
= 0;
2470 /* If the GPU writes a buffer that is then read by an indirect draw
2471 * command, we theoretically need a WFI + WAIT_FOR_ME combination to
2472 * wait for the writes to complete. The WAIT_FOR_ME is performed as part
2473 * of the draw by the firmware, so we just need to execute a WFI.
2476 (VK_ACCESS_INDIRECT_COMMAND_READ_BIT
|
2477 VK_ACCESS_MEMORY_READ_BIT
)) {
2478 mask
|= TU_ACCESS_WFI_READ
;
2482 (VK_ACCESS_INDIRECT_COMMAND_READ_BIT
| /* Read performed by CP */
2483 VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_READ_BIT_EXT
| /* Read performed by CP, I think */
2484 VK_ACCESS_CONDITIONAL_RENDERING_READ_BIT_EXT
| /* Read performed by CP */
2485 VK_ACCESS_HOST_READ_BIT
| /* sysmem by definition */
2486 VK_ACCESS_MEMORY_READ_BIT
)) {
2487 mask
|= TU_ACCESS_SYSMEM_READ
;
2491 (VK_ACCESS_HOST_WRITE_BIT
|
2492 VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT
| /* Write performed by CP, I think */
2493 VK_ACCESS_MEMORY_WRITE_BIT
)) {
2494 mask
|= TU_ACCESS_SYSMEM_WRITE
;
2498 (VK_ACCESS_INDEX_READ_BIT
| /* Read performed by PC, I think */
2499 VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT
| /* Read performed by VFD */
2500 VK_ACCESS_UNIFORM_READ_BIT
| /* Read performed by SP */
2501 /* TODO: Is there a no-cache bit for textures so that we can ignore
2504 VK_ACCESS_INPUT_ATTACHMENT_READ_BIT
| /* Read performed by TP */
2505 VK_ACCESS_SHADER_READ_BIT
| /* Read perfomed by SP/TP */
2506 VK_ACCESS_MEMORY_READ_BIT
)) {
2507 mask
|= TU_ACCESS_UCHE_READ
;
2511 (VK_ACCESS_SHADER_WRITE_BIT
| /* Write performed by SP */
2512 VK_ACCESS_TRANSFORM_FEEDBACK_WRITE_BIT_EXT
| /* Write performed by VPC */
2513 VK_ACCESS_MEMORY_WRITE_BIT
)) {
2514 mask
|= TU_ACCESS_UCHE_WRITE
;
2517 /* When using GMEM, the CCU is always flushed automatically to GMEM, and
2518 * then GMEM is flushed to sysmem. Furthermore, we already had to flush any
2519 * previous writes in sysmem mode when transitioning to GMEM. Therefore we
2520 * can ignore CCU and pretend that color attachments and transfers use
2525 (VK_ACCESS_COLOR_ATTACHMENT_READ_BIT
|
2526 VK_ACCESS_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT
|
2527 VK_ACCESS_MEMORY_READ_BIT
)) {
2529 mask
|= TU_ACCESS_SYSMEM_READ
;
2531 mask
|= TU_ACCESS_CCU_COLOR_INCOHERENT_READ
;
2535 (VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT
|
2536 VK_ACCESS_MEMORY_READ_BIT
)) {
2538 mask
|= TU_ACCESS_SYSMEM_READ
;
2540 mask
|= TU_ACCESS_CCU_DEPTH_INCOHERENT_READ
;
2544 (VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT
|
2545 VK_ACCESS_MEMORY_WRITE_BIT
)) {
2547 mask
|= TU_ACCESS_SYSMEM_WRITE
;
2549 mask
|= TU_ACCESS_CCU_COLOR_INCOHERENT_WRITE
;
2554 (VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT
|
2555 VK_ACCESS_MEMORY_WRITE_BIT
)) {
2557 mask
|= TU_ACCESS_SYSMEM_WRITE
;
2559 mask
|= TU_ACCESS_CCU_DEPTH_INCOHERENT_WRITE
;
2563 /* When the dst access is a transfer read/write, it seems we sometimes need
2564 * to insert a WFI after any flushes, to guarantee that the flushes finish
2565 * before the 2D engine starts. However the opposite (i.e. a WFI after
2566 * CP_BLIT and before any subsequent flush) does not seem to be needed, and
2567 * the blob doesn't emit such a WFI.
2571 (VK_ACCESS_TRANSFER_WRITE_BIT
|
2572 VK_ACCESS_MEMORY_WRITE_BIT
)) {
2574 mask
|= TU_ACCESS_SYSMEM_WRITE
;
2576 mask
|= TU_ACCESS_CCU_COLOR_WRITE
;
2578 mask
|= TU_ACCESS_WFI_READ
;
2582 (VK_ACCESS_TRANSFER_READ_BIT
| /* Access performed by TP */
2583 VK_ACCESS_MEMORY_READ_BIT
)) {
2584 mask
|= TU_ACCESS_UCHE_READ
| TU_ACCESS_WFI_READ
;
2592 tu_CmdExecuteCommands(VkCommandBuffer commandBuffer
,
2593 uint32_t commandBufferCount
,
2594 const VkCommandBuffer
*pCmdBuffers
)
2596 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2599 assert(commandBufferCount
> 0);
2601 /* Emit any pending flushes. */
2602 if (cmd
->state
.pass
) {
2603 tu_flush_all_pending(&cmd
->state
.renderpass_cache
);
2604 tu_emit_cache_flush_renderpass(cmd
, &cmd
->draw_cs
);
2606 tu_flush_all_pending(&cmd
->state
.cache
);
2607 tu_emit_cache_flush(cmd
, &cmd
->cs
);
2610 for (uint32_t i
= 0; i
< commandBufferCount
; i
++) {
2611 TU_FROM_HANDLE(tu_cmd_buffer
, secondary
, pCmdBuffers
[i
]);
2613 result
= tu_bo_list_merge(&cmd
->bo_list
, &secondary
->bo_list
);
2614 if (result
!= VK_SUCCESS
) {
2615 cmd
->record_result
= result
;
2619 if (secondary
->usage_flags
&
2620 VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT
) {
2621 assert(tu_cs_is_empty(&secondary
->cs
));
2623 result
= tu_cs_add_entries(&cmd
->draw_cs
, &secondary
->draw_cs
);
2624 if (result
!= VK_SUCCESS
) {
2625 cmd
->record_result
= result
;
2629 result
= tu_cs_add_entries(&cmd
->draw_epilogue_cs
,
2630 &secondary
->draw_epilogue_cs
);
2631 if (result
!= VK_SUCCESS
) {
2632 cmd
->record_result
= result
;
2636 if (secondary
->has_tess
)
2637 cmd
->has_tess
= true;
2639 assert(tu_cs_is_empty(&secondary
->draw_cs
));
2640 assert(tu_cs_is_empty(&secondary
->draw_epilogue_cs
));
2642 for (uint32_t j
= 0; j
< secondary
->cs
.bo_count
; j
++) {
2643 tu_bo_list_add(&cmd
->bo_list
, secondary
->cs
.bos
[j
],
2644 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_DUMP
);
2647 tu_cs_add_entries(&cmd
->cs
, &secondary
->cs
);
2650 cmd
->state
.index_size
= secondary
->state
.index_size
; /* for restart index update */
2652 cmd
->state
.dirty
= ~0u; /* TODO: set dirty only what needs to be */
2654 /* After executing secondary command buffers, there may have been arbitrary
2655 * flushes executed, so when we encounter a pipeline barrier with a
2656 * srcMask, we have to assume that we need to invalidate. Therefore we need
2657 * to re-initialize the cache with all pending invalidate bits set.
2659 if (cmd
->state
.pass
) {
2660 tu_cache_init(&cmd
->state
.renderpass_cache
);
2662 tu_cache_init(&cmd
->state
.cache
);
2667 tu_CreateCommandPool(VkDevice _device
,
2668 const VkCommandPoolCreateInfo
*pCreateInfo
,
2669 const VkAllocationCallbacks
*pAllocator
,
2670 VkCommandPool
*pCmdPool
)
2672 TU_FROM_HANDLE(tu_device
, device
, _device
);
2673 struct tu_cmd_pool
*pool
;
2675 pool
= vk_alloc2(&device
->alloc
, pAllocator
, sizeof(*pool
), 8,
2676 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
2678 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
2681 pool
->alloc
= *pAllocator
;
2683 pool
->alloc
= device
->alloc
;
2685 list_inithead(&pool
->cmd_buffers
);
2686 list_inithead(&pool
->free_cmd_buffers
);
2688 pool
->queue_family_index
= pCreateInfo
->queueFamilyIndex
;
2690 *pCmdPool
= tu_cmd_pool_to_handle(pool
);
2696 tu_DestroyCommandPool(VkDevice _device
,
2697 VkCommandPool commandPool
,
2698 const VkAllocationCallbacks
*pAllocator
)
2700 TU_FROM_HANDLE(tu_device
, device
, _device
);
2701 TU_FROM_HANDLE(tu_cmd_pool
, pool
, commandPool
);
2706 list_for_each_entry_safe(struct tu_cmd_buffer
, cmd_buffer
,
2707 &pool
->cmd_buffers
, pool_link
)
2709 tu_cmd_buffer_destroy(cmd_buffer
);
2712 list_for_each_entry_safe(struct tu_cmd_buffer
, cmd_buffer
,
2713 &pool
->free_cmd_buffers
, pool_link
)
2715 tu_cmd_buffer_destroy(cmd_buffer
);
2718 vk_free2(&device
->alloc
, pAllocator
, pool
);
2722 tu_ResetCommandPool(VkDevice device
,
2723 VkCommandPool commandPool
,
2724 VkCommandPoolResetFlags flags
)
2726 TU_FROM_HANDLE(tu_cmd_pool
, pool
, commandPool
);
2729 list_for_each_entry(struct tu_cmd_buffer
, cmd_buffer
, &pool
->cmd_buffers
,
2732 result
= tu_reset_cmd_buffer(cmd_buffer
);
2733 if (result
!= VK_SUCCESS
)
2741 tu_TrimCommandPool(VkDevice device
,
2742 VkCommandPool commandPool
,
2743 VkCommandPoolTrimFlags flags
)
2745 TU_FROM_HANDLE(tu_cmd_pool
, pool
, commandPool
);
2750 list_for_each_entry_safe(struct tu_cmd_buffer
, cmd_buffer
,
2751 &pool
->free_cmd_buffers
, pool_link
)
2753 tu_cmd_buffer_destroy(cmd_buffer
);
2758 tu_subpass_barrier(struct tu_cmd_buffer
*cmd_buffer
,
2759 const struct tu_subpass_barrier
*barrier
,
2762 /* Note: we don't know until the end of the subpass whether we'll use
2763 * sysmem, so assume sysmem here to be safe.
2765 struct tu_cache_state
*cache
=
2766 external
? &cmd_buffer
->state
.cache
: &cmd_buffer
->state
.renderpass_cache
;
2767 enum tu_cmd_access_mask src_flags
=
2768 vk2tu_access(barrier
->src_access_mask
, false);
2769 enum tu_cmd_access_mask dst_flags
=
2770 vk2tu_access(barrier
->dst_access_mask
, false);
2772 if (barrier
->incoherent_ccu_color
)
2773 src_flags
|= TU_ACCESS_CCU_COLOR_INCOHERENT_WRITE
;
2774 if (barrier
->incoherent_ccu_depth
)
2775 src_flags
|= TU_ACCESS_CCU_DEPTH_INCOHERENT_WRITE
;
2777 tu_flush_for_access(cache
, src_flags
, dst_flags
);
2781 tu_CmdBeginRenderPass(VkCommandBuffer commandBuffer
,
2782 const VkRenderPassBeginInfo
*pRenderPassBegin
,
2783 VkSubpassContents contents
)
2785 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2786 TU_FROM_HANDLE(tu_render_pass
, pass
, pRenderPassBegin
->renderPass
);
2787 TU_FROM_HANDLE(tu_framebuffer
, fb
, pRenderPassBegin
->framebuffer
);
2789 cmd
->state
.pass
= pass
;
2790 cmd
->state
.subpass
= pass
->subpasses
;
2791 cmd
->state
.framebuffer
= fb
;
2793 tu_cmd_update_tiling_config(cmd
, &pRenderPassBegin
->renderArea
);
2794 tu_cmd_prepare_tile_store_ib(cmd
);
2796 /* Note: because this is external, any flushes will happen before draw_cs
2797 * gets called. However deferred flushes could have to happen later as part
2800 tu_subpass_barrier(cmd
, &pass
->subpasses
[0].start_barrier
, true);
2801 cmd
->state
.renderpass_cache
.pending_flush_bits
=
2802 cmd
->state
.cache
.pending_flush_bits
;
2803 cmd
->state
.renderpass_cache
.flush_bits
= 0;
2805 tu_emit_renderpass_begin(cmd
, pRenderPassBegin
);
2807 tu6_emit_zs(cmd
, cmd
->state
.subpass
, &cmd
->draw_cs
);
2808 tu6_emit_mrt(cmd
, cmd
->state
.subpass
, &cmd
->draw_cs
);
2809 tu6_emit_msaa(&cmd
->draw_cs
, cmd
->state
.subpass
->samples
);
2810 tu6_emit_render_cntl(cmd
, cmd
->state
.subpass
, &cmd
->draw_cs
, false);
2812 tu_set_input_attachments(cmd
, cmd
->state
.subpass
);
2814 for (uint32_t i
= 0; i
< fb
->attachment_count
; ++i
) {
2815 const struct tu_image_view
*iview
= fb
->attachments
[i
].attachment
;
2816 tu_bo_list_add(&cmd
->bo_list
, iview
->image
->bo
,
2817 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_WRITE
);
2820 cmd
->state
.dirty
|= TU_CMD_DIRTY_DRAW_STATE
;
2824 tu_CmdBeginRenderPass2(VkCommandBuffer commandBuffer
,
2825 const VkRenderPassBeginInfo
*pRenderPassBeginInfo
,
2826 const VkSubpassBeginInfoKHR
*pSubpassBeginInfo
)
2828 tu_CmdBeginRenderPass(commandBuffer
, pRenderPassBeginInfo
,
2829 pSubpassBeginInfo
->contents
);
2833 tu_CmdNextSubpass(VkCommandBuffer commandBuffer
, VkSubpassContents contents
)
2835 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2836 const struct tu_render_pass
*pass
= cmd
->state
.pass
;
2837 struct tu_cs
*cs
= &cmd
->draw_cs
;
2839 const struct tu_subpass
*subpass
= cmd
->state
.subpass
++;
2841 tu_cond_exec_start(cs
, CP_COND_EXEC_0_RENDER_MODE_GMEM
);
2843 if (subpass
->resolve_attachments
) {
2844 tu6_emit_blit_scissor(cmd
, cs
, true);
2846 for (unsigned i
= 0; i
< subpass
->color_count
; i
++) {
2847 uint32_t a
= subpass
->resolve_attachments
[i
].attachment
;
2848 if (a
== VK_ATTACHMENT_UNUSED
)
2851 tu_store_gmem_attachment(cmd
, cs
, a
,
2852 subpass
->color_attachments
[i
].attachment
);
2854 if (pass
->attachments
[a
].gmem_offset
< 0)
2858 * check if the resolved attachment is needed by later subpasses,
2859 * if it is, should be doing a GMEM->GMEM resolve instead of GMEM->MEM->GMEM..
2861 tu_finishme("missing GMEM->GMEM resolve path\n");
2862 tu_load_gmem_attachment(cmd
, cs
, a
, true);
2866 tu_cond_exec_end(cs
);
2868 tu_cond_exec_start(cs
, CP_COND_EXEC_0_RENDER_MODE_SYSMEM
);
2870 tu6_emit_sysmem_resolves(cmd
, cs
, subpass
);
2872 tu_cond_exec_end(cs
);
2874 /* Handle dependencies for the next subpass */
2875 tu_subpass_barrier(cmd
, &cmd
->state
.subpass
->start_barrier
, false);
2877 /* emit mrt/zs/msaa/ubwc state for the subpass that is starting */
2878 tu6_emit_zs(cmd
, cmd
->state
.subpass
, cs
);
2879 tu6_emit_mrt(cmd
, cmd
->state
.subpass
, cs
);
2880 tu6_emit_msaa(cs
, cmd
->state
.subpass
->samples
);
2881 tu6_emit_render_cntl(cmd
, cmd
->state
.subpass
, cs
, false);
2883 tu_set_input_attachments(cmd
, cmd
->state
.subpass
);
2887 tu_CmdNextSubpass2(VkCommandBuffer commandBuffer
,
2888 const VkSubpassBeginInfoKHR
*pSubpassBeginInfo
,
2889 const VkSubpassEndInfoKHR
*pSubpassEndInfo
)
2891 tu_CmdNextSubpass(commandBuffer
, pSubpassBeginInfo
->contents
);
2895 tu6_emit_user_consts(struct tu_cs
*cs
, const struct tu_pipeline
*pipeline
,
2896 struct tu_descriptor_state
*descriptors_state
,
2897 gl_shader_stage type
,
2898 uint32_t *push_constants
)
2900 const struct tu_program_descriptor_linkage
*link
=
2901 &pipeline
->program
.link
[type
];
2902 const struct ir3_ubo_analysis_state
*state
= &link
->const_state
.ubo_state
;
2904 if (link
->push_consts
.count
> 0) {
2905 unsigned num_units
= link
->push_consts
.count
;
2906 unsigned offset
= link
->push_consts
.lo
;
2907 tu_cs_emit_pkt7(cs
, tu6_stage2opcode(type
), 3 + num_units
* 4);
2908 tu_cs_emit(cs
, CP_LOAD_STATE6_0_DST_OFF(offset
) |
2909 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS
) |
2910 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT
) |
2911 CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type
)) |
2912 CP_LOAD_STATE6_0_NUM_UNIT(num_units
));
2915 for (unsigned i
= 0; i
< num_units
* 4; i
++)
2916 tu_cs_emit(cs
, push_constants
[i
+ offset
* 4]);
2919 for (uint32_t i
= 0; i
< state
->num_enabled
; i
++) {
2920 uint32_t size
= state
->range
[i
].end
- state
->range
[i
].start
;
2921 uint32_t offset
= state
->range
[i
].start
;
2923 /* and even if the start of the const buffer is before
2924 * first_immediate, the end may not be:
2926 size
= MIN2(size
, (16 * link
->constlen
) - state
->range
[i
].offset
);
2931 /* things should be aligned to vec4: */
2932 debug_assert((state
->range
[i
].offset
% 16) == 0);
2933 debug_assert((size
% 16) == 0);
2934 debug_assert((offset
% 16) == 0);
2936 /* Dig out the descriptor from the descriptor state and read the VA from
2939 assert(state
->range
[i
].ubo
.bindless
);
2940 uint32_t *base
= state
->range
[i
].ubo
.bindless_base
== MAX_SETS
?
2941 descriptors_state
->dynamic_descriptors
:
2942 descriptors_state
->sets
[state
->range
[i
].ubo
.bindless_base
]->mapped_ptr
;
2943 unsigned block
= state
->range
[i
].ubo
.block
;
2944 uint32_t *desc
= base
+ block
* A6XX_TEX_CONST_DWORDS
;
2945 uint64_t va
= desc
[0] | ((uint64_t)(desc
[1] & A6XX_UBO_1_BASE_HI__MASK
) << 32);
2948 tu_cs_emit_pkt7(cs
, tu6_stage2opcode(type
), 3);
2949 tu_cs_emit(cs
, CP_LOAD_STATE6_0_DST_OFF(state
->range
[i
].offset
/ 16) |
2950 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS
) |
2951 CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT
) |
2952 CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type
)) |
2953 CP_LOAD_STATE6_0_NUM_UNIT(size
/ 16));
2954 tu_cs_emit_qw(cs
, va
+ offset
);
2958 static struct tu_cs_entry
2959 tu6_emit_consts(struct tu_cmd_buffer
*cmd
,
2960 const struct tu_pipeline
*pipeline
,
2961 struct tu_descriptor_state
*descriptors_state
,
2962 gl_shader_stage type
)
2965 tu_cs_begin_sub_stream(&cmd
->sub_cs
, 512, &cs
); /* TODO: maximum size? */
2967 tu6_emit_user_consts(&cs
, pipeline
, descriptors_state
, type
, cmd
->push_constants
);
2969 return tu_cs_end_sub_stream(&cmd
->sub_cs
, &cs
);
2973 tu6_emit_vs_params(struct tu_cmd_buffer
*cmd
,
2974 uint32_t first_instance
,
2975 struct tu_cs_entry
*entry
)
2977 /* TODO: fill out more than just base instance */
2978 const struct tu_program_descriptor_linkage
*link
=
2979 &cmd
->state
.pipeline
->program
.link
[MESA_SHADER_VERTEX
];
2980 const struct ir3_const_state
*const_state
= &link
->const_state
;
2983 if (const_state
->offsets
.driver_param
>= link
->constlen
) {
2984 *entry
= (struct tu_cs_entry
) {};
2988 VkResult result
= tu_cs_begin_sub_stream(&cmd
->sub_cs
, 8, &cs
);
2989 if (result
!= VK_SUCCESS
)
2992 tu_cs_emit_pkt7(&cs
, CP_LOAD_STATE6_GEOM
, 3 + 4);
2993 tu_cs_emit(&cs
, CP_LOAD_STATE6_0_DST_OFF(const_state
->offsets
.driver_param
) |
2994 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS
) |
2995 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT
) |
2996 CP_LOAD_STATE6_0_STATE_BLOCK(SB6_VS_SHADER
) |
2997 CP_LOAD_STATE6_0_NUM_UNIT(1));
3001 STATIC_ASSERT(IR3_DP_INSTID_BASE
== 2);
3005 tu_cs_emit(&cs
, first_instance
);
3008 *entry
= tu_cs_end_sub_stream(&cmd
->sub_cs
, &cs
);
3012 static struct tu_cs_entry
3013 tu6_emit_vertex_buffers(struct tu_cmd_buffer
*cmd
,
3014 const struct tu_pipeline
*pipeline
)
3017 tu_cs_begin_sub_stream(&cmd
->sub_cs
, 4 * MAX_VBS
, &cs
);
3020 for_each_bit(binding
, pipeline
->vi
.bindings_used
) {
3021 const struct tu_buffer
*buf
= cmd
->state
.vb
.buffers
[binding
];
3022 const VkDeviceSize offset
= buf
->bo_offset
+
3023 cmd
->state
.vb
.offsets
[binding
];
3025 tu_cs_emit_regs(&cs
,
3026 A6XX_VFD_FETCH_BASE(binding
, .bo
= buf
->bo
, .bo_offset
= offset
),
3027 A6XX_VFD_FETCH_SIZE(binding
, buf
->size
- offset
));
3031 cmd
->vertex_bindings_set
= pipeline
->vi
.bindings_used
;
3033 return tu_cs_end_sub_stream(&cmd
->sub_cs
, &cs
);
3037 get_tess_param_bo_size(const struct tu_pipeline
*pipeline
,
3038 uint32_t draw_count
)
3040 /* TODO: For indirect draws, we can't compute the BO size ahead of time.
3041 * Still not sure what to do here, so just allocate a reasonably large
3042 * BO and hope for the best for now.
3043 * (maxTessellationControlPerVertexOutputComponents * 2048 vertices +
3044 * maxTessellationControlPerPatchOutputComponents * 512 patches) */
3046 return ((128 * 2048) + (128 * 512)) * 4;
3049 /* For each patch, adreno lays out the tess param BO in memory as:
3050 * (v_input[0][0])...(v_input[i][j])(p_input[0])...(p_input[k]).
3051 * where i = # vertices per patch, j = # per-vertex outputs, and
3052 * k = # per-patch outputs.*/
3053 uint32_t verts_per_patch
= pipeline
->ia
.primtype
- DI_PT_PATCHES0
;
3054 uint32_t num_patches
= draw_count
/ verts_per_patch
;
3055 return draw_count
* pipeline
->tess
.per_vertex_output_size
+
3056 pipeline
->tess
.per_patch_output_size
* num_patches
;
3060 get_tess_factor_bo_size(const struct tu_pipeline
*pipeline
,
3061 uint32_t draw_count
)
3063 /* TODO: For indirect draws, we can't compute the BO size ahead of time.
3064 * Still not sure what to do here, so just allocate a reasonably large
3065 * BO and hope for the best for now.
3066 * (quad factor stride * 512 patches) */
3068 return (28 * 512) * 4;
3071 /* Each distinct patch gets its own tess factor output. */
3072 uint32_t verts_per_patch
= pipeline
->ia
.primtype
- DI_PT_PATCHES0
;
3073 uint32_t num_patches
= draw_count
/ verts_per_patch
;
3074 uint32_t factor_stride
;
3075 switch (pipeline
->tess
.patch_type
) {
3076 case IR3_TESS_ISOLINES
:
3079 case IR3_TESS_TRIANGLES
:
3082 case IR3_TESS_QUADS
:
3086 unreachable("bad tessmode");
3088 return factor_stride
* num_patches
;
3092 tu6_emit_tess_consts(struct tu_cmd_buffer
*cmd
,
3093 uint32_t draw_count
,
3094 const struct tu_pipeline
*pipeline
,
3095 struct tu_cs_entry
*entry
)
3098 VkResult result
= tu_cs_begin_sub_stream(&cmd
->sub_cs
, 20, &cs
);
3099 if (result
!= VK_SUCCESS
)
3102 uint64_t tess_factor_size
= get_tess_factor_bo_size(pipeline
, draw_count
);
3103 uint64_t tess_param_size
= get_tess_param_bo_size(pipeline
, draw_count
);
3104 uint64_t tess_bo_size
= tess_factor_size
+ tess_param_size
;
3105 if (tess_bo_size
> 0) {
3106 struct tu_bo
*tess_bo
;
3107 result
= tu_get_scratch_bo(cmd
->device
, tess_bo_size
, &tess_bo
);
3108 if (result
!= VK_SUCCESS
)
3111 tu_bo_list_add(&cmd
->bo_list
, tess_bo
,
3112 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_WRITE
);
3113 uint64_t tess_factor_iova
= tess_bo
->iova
;
3114 uint64_t tess_param_iova
= tess_factor_iova
+ tess_factor_size
;
3116 tu_cs_emit_pkt7(&cs
, CP_LOAD_STATE6_GEOM
, 3 + 4);
3117 tu_cs_emit(&cs
, CP_LOAD_STATE6_0_DST_OFF(pipeline
->tess
.hs_bo_regid
) |
3118 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS
) |
3119 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT
) |
3120 CP_LOAD_STATE6_0_STATE_BLOCK(SB6_HS_SHADER
) |
3121 CP_LOAD_STATE6_0_NUM_UNIT(1));
3122 tu_cs_emit(&cs
, CP_LOAD_STATE6_1_EXT_SRC_ADDR(0));
3123 tu_cs_emit(&cs
, CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI(0));
3124 tu_cs_emit_qw(&cs
, tess_param_iova
);
3125 tu_cs_emit_qw(&cs
, tess_factor_iova
);
3127 tu_cs_emit_pkt7(&cs
, CP_LOAD_STATE6_GEOM
, 3 + 4);
3128 tu_cs_emit(&cs
, CP_LOAD_STATE6_0_DST_OFF(pipeline
->tess
.ds_bo_regid
) |
3129 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS
) |
3130 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT
) |
3131 CP_LOAD_STATE6_0_STATE_BLOCK(SB6_DS_SHADER
) |
3132 CP_LOAD_STATE6_0_NUM_UNIT(1));
3133 tu_cs_emit(&cs
, CP_LOAD_STATE6_1_EXT_SRC_ADDR(0));
3134 tu_cs_emit(&cs
, CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI(0));
3135 tu_cs_emit_qw(&cs
, tess_param_iova
);
3136 tu_cs_emit_qw(&cs
, tess_factor_iova
);
3138 tu_cs_emit_pkt4(&cs
, REG_A6XX_PC_TESSFACTOR_ADDR_LO
, 2);
3139 tu_cs_emit_qw(&cs
, tess_factor_iova
);
3141 /* TODO: Without this WFI here, the hardware seems unable to read these
3142 * addresses we just emitted. Freedreno emits these consts as part of
3143 * IB1 instead of in a draw state which might make this WFI unnecessary,
3144 * but it requires a bit more indirection (SS6_INDIRECT for consts). */
3145 tu_cs_emit_wfi(&cs
);
3147 *entry
= tu_cs_end_sub_stream(&cmd
->sub_cs
, &cs
);
3152 tu6_draw_common(struct tu_cmd_buffer
*cmd
,
3155 uint32_t vertex_offset
,
3156 uint32_t first_instance
,
3157 /* note: draw_count count is 0 for indirect */
3158 uint32_t draw_count
)
3160 const struct tu_pipeline
*pipeline
= cmd
->state
.pipeline
;
3163 struct tu_descriptor_state
*descriptors_state
=
3164 &cmd
->descriptors
[VK_PIPELINE_BIND_POINT_GRAPHICS
];
3166 tu_emit_cache_flush_renderpass(cmd
, cs
);
3171 A6XX_VFD_INDEX_OFFSET(vertex_offset
),
3172 A6XX_VFD_INSTANCE_START_OFFSET(first_instance
));
3174 tu_cs_emit_regs(cs
, A6XX_PC_PRIMITIVE_CNTL_0(
3175 .primitive_restart
=
3176 pipeline
->ia
.primitive_restart
&& indexed
,
3177 .tess_upper_left_domain_origin
=
3178 pipeline
->tess
.upper_left_domain_origin
));
3180 if (cmd
->state
.dirty
& TU_CMD_DIRTY_SHADER_CONSTS
) {
3181 cmd
->state
.shader_const_ib
[MESA_SHADER_VERTEX
] =
3182 tu6_emit_consts(cmd
, pipeline
, descriptors_state
, MESA_SHADER_VERTEX
);
3183 cmd
->state
.shader_const_ib
[MESA_SHADER_TESS_CTRL
] =
3184 tu6_emit_consts(cmd
, pipeline
, descriptors_state
, MESA_SHADER_TESS_CTRL
);
3185 cmd
->state
.shader_const_ib
[MESA_SHADER_TESS_EVAL
] =
3186 tu6_emit_consts(cmd
, pipeline
, descriptors_state
, MESA_SHADER_TESS_EVAL
);
3187 cmd
->state
.shader_const_ib
[MESA_SHADER_GEOMETRY
] =
3188 tu6_emit_consts(cmd
, pipeline
, descriptors_state
, MESA_SHADER_GEOMETRY
);
3189 cmd
->state
.shader_const_ib
[MESA_SHADER_FRAGMENT
] =
3190 tu6_emit_consts(cmd
, pipeline
, descriptors_state
, MESA_SHADER_FRAGMENT
);
3193 if (cmd
->state
.dirty
& TU_CMD_DIRTY_DESCRIPTOR_SETS
) {
3194 /* We need to reload the descriptors every time the descriptor sets
3195 * change. However, the commands we send only depend on the pipeline
3196 * because the whole point is to cache descriptors which are used by the
3197 * pipeline. There's a problem here, in that the firmware has an
3198 * "optimization" which skips executing groups that are set to the same
3199 * value as the last draw. This means that if the descriptor sets change
3200 * but not the pipeline, we'd try to re-execute the same buffer which
3201 * the firmware would ignore and we wouldn't pre-load the new
3202 * descriptors. The blob seems to re-emit the LOAD_STATE group whenever
3203 * the descriptor sets change, which we emulate here by copying the
3204 * pre-prepared buffer.
3206 const struct tu_cs_entry
*load_entry
= &pipeline
->load_state
.state_ib
;
3207 if (load_entry
->size
> 0) {
3208 struct tu_cs load_cs
;
3209 result
= tu_cs_begin_sub_stream(&cmd
->sub_cs
, load_entry
->size
, &load_cs
);
3210 if (result
!= VK_SUCCESS
)
3212 tu_cs_emit_array(&load_cs
,
3213 (uint32_t *)((char *)load_entry
->bo
->map
+ load_entry
->offset
),
3214 load_entry
->size
/ 4);
3215 cmd
->state
.desc_sets_load_ib
= tu_cs_end_sub_stream(&cmd
->sub_cs
, &load_cs
);
3217 cmd
->state
.desc_sets_load_ib
.size
= 0;
3221 if (cmd
->state
.dirty
& TU_CMD_DIRTY_VERTEX_BUFFERS
)
3222 cmd
->state
.vertex_buffers_ib
= tu6_emit_vertex_buffers(cmd
, pipeline
);
3224 struct tu_cs_entry vs_params
;
3225 result
= tu6_emit_vs_params(cmd
, first_instance
, &vs_params
);
3226 if (result
!= VK_SUCCESS
)
3230 pipeline
->active_stages
& VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT
;
3231 struct tu_cs_entry tess_consts
= {};
3233 cmd
->has_tess
= true;
3234 result
= tu6_emit_tess_consts(cmd
, draw_count
, pipeline
, &tess_consts
);
3235 if (result
!= VK_SUCCESS
)
3239 /* for the first draw in a renderpass, re-emit all the draw states
3241 * and if a draw-state disabling path (CmdClearAttachments 3D fallback) was
3242 * used, then draw states must be re-emitted. note however this only happens
3243 * in the sysmem path, so this can be skipped this for the gmem path (TODO)
3245 * the two input attachment states are excluded because secondary command
3246 * buffer doesn't have a state ib to restore it, and not re-emitting them
3247 * is OK since CmdClearAttachments won't disable/overwrite them
3249 if (cmd
->state
.dirty
& TU_CMD_DIRTY_DRAW_STATE
) {
3250 tu_cs_emit_pkt7(cs
, CP_SET_DRAW_STATE
, 3 * (TU_DRAW_STATE_COUNT
- 2));
3252 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_PROGRAM
, pipeline
->program
.state_ib
);
3253 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_PROGRAM_BINNING
, pipeline
->program
.binning_state_ib
);
3254 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_TESS
, tess_consts
);
3255 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_VI
, pipeline
->vi
.state_ib
);
3256 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_VI_BINNING
, pipeline
->vi
.binning_state_ib
);
3257 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_RAST
, pipeline
->rast
.state_ib
);
3258 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_DS
, pipeline
->ds
.state_ib
);
3259 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_BLEND
, pipeline
->blend
.state_ib
);
3260 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_VS_CONST
, cmd
->state
.shader_const_ib
[MESA_SHADER_VERTEX
]);
3261 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_HS_CONST
, cmd
->state
.shader_const_ib
[MESA_SHADER_TESS_CTRL
]);
3262 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_DS_CONST
, cmd
->state
.shader_const_ib
[MESA_SHADER_TESS_EVAL
]);
3263 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_GS_CONST
, cmd
->state
.shader_const_ib
[MESA_SHADER_GEOMETRY
]);
3264 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_FS_CONST
, cmd
->state
.shader_const_ib
[MESA_SHADER_FRAGMENT
]);
3265 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_DESC_SETS
, cmd
->state
.desc_sets_ib
);
3266 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_DESC_SETS_LOAD
, cmd
->state
.desc_sets_load_ib
);
3267 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_VB
, cmd
->state
.vertex_buffers_ib
);
3268 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_VS_PARAMS
, vs_params
);
3270 for (uint32_t i
= 0; i
< ARRAY_SIZE(cmd
->state
.dynamic_state
); i
++) {
3271 tu_cs_emit_draw_state(cs
, TU_DRAW_STATE_DYNAMIC
+ i
,
3272 ((pipeline
->dynamic_state_mask
& BIT(i
)) ?
3273 cmd
->state
.dynamic_state
[i
] :
3274 pipeline
->dynamic_state
[i
]));
3278 /* emit draw states that were just updated
3279 * note we eventually don't want to have to emit anything here
3281 uint32_t draw_state_count
=
3283 ((cmd
->state
.dirty
& TU_CMD_DIRTY_SHADER_CONSTS
) ? 5 : 0) +
3284 ((cmd
->state
.dirty
& TU_CMD_DIRTY_DESCRIPTOR_SETS
) ? 1 : 0) +
3285 ((cmd
->state
.dirty
& TU_CMD_DIRTY_VERTEX_BUFFERS
) ? 1 : 0) +
3288 tu_cs_emit_pkt7(cs
, CP_SET_DRAW_STATE
, 3 * draw_state_count
);
3290 /* We may need to re-emit tess consts if the current draw call is
3291 * sufficiently larger than the last draw call. */
3293 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_TESS
, tess_consts
);
3294 if (cmd
->state
.dirty
& TU_CMD_DIRTY_SHADER_CONSTS
) {
3295 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_VS_CONST
, cmd
->state
.shader_const_ib
[MESA_SHADER_VERTEX
]);
3296 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_HS_CONST
, cmd
->state
.shader_const_ib
[MESA_SHADER_TESS_CTRL
]);
3297 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_DS_CONST
, cmd
->state
.shader_const_ib
[MESA_SHADER_TESS_EVAL
]);
3298 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_GS_CONST
, cmd
->state
.shader_const_ib
[MESA_SHADER_GEOMETRY
]);
3299 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_FS_CONST
, cmd
->state
.shader_const_ib
[MESA_SHADER_FRAGMENT
]);
3301 if (cmd
->state
.dirty
& TU_CMD_DIRTY_DESCRIPTOR_SETS
)
3302 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_DESC_SETS_LOAD
, cmd
->state
.desc_sets_load_ib
);
3303 if (cmd
->state
.dirty
& TU_CMD_DIRTY_VERTEX_BUFFERS
)
3304 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_VB
, cmd
->state
.vertex_buffers_ib
);
3305 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_VS_PARAMS
, vs_params
);
3308 tu_cs_sanity_check(cs
);
3310 /* There are too many graphics dirty bits to list here, so just list the
3311 * bits to preserve instead. The only things not emitted here are
3312 * compute-related state.
3314 cmd
->state
.dirty
&= (TU_CMD_DIRTY_COMPUTE_DESCRIPTOR_SETS
| TU_CMD_DIRTY_COMPUTE_PIPELINE
);
3319 tu_draw_initiator(struct tu_cmd_buffer
*cmd
, enum pc_di_src_sel src_sel
)
3321 const struct tu_pipeline
*pipeline
= cmd
->state
.pipeline
;
3322 uint32_t initiator
=
3323 CP_DRAW_INDX_OFFSET_0_PRIM_TYPE(pipeline
->ia
.primtype
) |
3324 CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(src_sel
) |
3325 CP_DRAW_INDX_OFFSET_0_INDEX_SIZE(cmd
->state
.index_size
) |
3326 CP_DRAW_INDX_OFFSET_0_VIS_CULL(USE_VISIBILITY
);
3328 if (pipeline
->active_stages
& VK_SHADER_STAGE_GEOMETRY_BIT
)
3329 initiator
|= CP_DRAW_INDX_OFFSET_0_GS_ENABLE
;
3331 switch (pipeline
->tess
.patch_type
) {
3332 case IR3_TESS_TRIANGLES
:
3333 initiator
|= CP_DRAW_INDX_OFFSET_0_PATCH_TYPE(TESS_TRIANGLES
) |
3334 CP_DRAW_INDX_OFFSET_0_TESS_ENABLE
;
3336 case IR3_TESS_ISOLINES
:
3337 initiator
|= CP_DRAW_INDX_OFFSET_0_PATCH_TYPE(TESS_ISOLINES
) |
3338 CP_DRAW_INDX_OFFSET_0_TESS_ENABLE
;
3341 initiator
|= CP_DRAW_INDX_OFFSET_0_PATCH_TYPE(TESS_QUADS
);
3343 case IR3_TESS_QUADS
:
3344 initiator
|= CP_DRAW_INDX_OFFSET_0_PATCH_TYPE(TESS_QUADS
) |
3345 CP_DRAW_INDX_OFFSET_0_TESS_ENABLE
;
3352 tu_CmdDraw(VkCommandBuffer commandBuffer
,
3353 uint32_t vertexCount
,
3354 uint32_t instanceCount
,
3355 uint32_t firstVertex
,
3356 uint32_t firstInstance
)
3358 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
3359 struct tu_cs
*cs
= &cmd
->draw_cs
;
3361 tu6_draw_common(cmd
, cs
, false, firstVertex
, firstInstance
, vertexCount
);
3363 tu_cs_emit_pkt7(cs
, CP_DRAW_INDX_OFFSET
, 3);
3364 tu_cs_emit(cs
, tu_draw_initiator(cmd
, DI_SRC_SEL_AUTO_INDEX
));
3365 tu_cs_emit(cs
, instanceCount
);
3366 tu_cs_emit(cs
, vertexCount
);
3370 tu_CmdDrawIndexed(VkCommandBuffer commandBuffer
,
3371 uint32_t indexCount
,
3372 uint32_t instanceCount
,
3373 uint32_t firstIndex
,
3374 int32_t vertexOffset
,
3375 uint32_t firstInstance
)
3377 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
3378 struct tu_cs
*cs
= &cmd
->draw_cs
;
3380 tu6_draw_common(cmd
, cs
, true, vertexOffset
, firstInstance
, indexCount
);
3382 tu_cs_emit_pkt7(cs
, CP_DRAW_INDX_OFFSET
, 7);
3383 tu_cs_emit(cs
, tu_draw_initiator(cmd
, DI_SRC_SEL_DMA
));
3384 tu_cs_emit(cs
, instanceCount
);
3385 tu_cs_emit(cs
, indexCount
);
3386 tu_cs_emit(cs
, 0x0); /* XXX */
3387 tu_cs_emit_qw(cs
, cmd
->state
.index_va
+ (firstIndex
<< cmd
->state
.index_shift
));
3388 tu_cs_emit(cs
, indexCount
<< cmd
->state
.index_shift
);
3392 tu_CmdDrawIndirect(VkCommandBuffer commandBuffer
,
3394 VkDeviceSize offset
,
3398 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
3399 TU_FROM_HANDLE(tu_buffer
, buf
, _buffer
);
3400 struct tu_cs
*cs
= &cmd
->draw_cs
;
3402 tu6_draw_common(cmd
, cs
, false, 0, 0, 0);
3404 for (uint32_t i
= 0; i
< drawCount
; i
++) {
3405 tu_cs_emit_pkt7(cs
, CP_DRAW_INDIRECT
, 3);
3406 tu_cs_emit(cs
, tu_draw_initiator(cmd
, DI_SRC_SEL_AUTO_INDEX
));
3407 tu_cs_emit_qw(cs
, buf
->bo
->iova
+ buf
->bo_offset
+ offset
+ stride
* i
);
3410 tu_bo_list_add(&cmd
->bo_list
, buf
->bo
, MSM_SUBMIT_BO_READ
);
3414 tu_CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer
,
3416 VkDeviceSize offset
,
3420 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
3421 TU_FROM_HANDLE(tu_buffer
, buf
, _buffer
);
3422 struct tu_cs
*cs
= &cmd
->draw_cs
;
3424 tu6_draw_common(cmd
, cs
, true, 0, 0, 0);
3426 for (uint32_t i
= 0; i
< drawCount
; i
++) {
3427 tu_cs_emit_pkt7(cs
, CP_DRAW_INDX_INDIRECT
, 6);
3428 tu_cs_emit(cs
, tu_draw_initiator(cmd
, DI_SRC_SEL_DMA
));
3429 tu_cs_emit_qw(cs
, cmd
->state
.index_va
);
3430 tu_cs_emit(cs
, A5XX_CP_DRAW_INDX_INDIRECT_3_MAX_INDICES(cmd
->state
.max_index_count
));
3431 tu_cs_emit_qw(cs
, buf
->bo
->iova
+ buf
->bo_offset
+ offset
+ stride
* i
);
3434 tu_bo_list_add(&cmd
->bo_list
, buf
->bo
, MSM_SUBMIT_BO_READ
);
3437 void tu_CmdDrawIndirectByteCountEXT(VkCommandBuffer commandBuffer
,
3438 uint32_t instanceCount
,
3439 uint32_t firstInstance
,
3440 VkBuffer _counterBuffer
,
3441 VkDeviceSize counterBufferOffset
,
3442 uint32_t counterOffset
,
3443 uint32_t vertexStride
)
3445 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
3446 TU_FROM_HANDLE(tu_buffer
, buf
, _counterBuffer
);
3447 struct tu_cs
*cs
= &cmd
->draw_cs
;
3449 tu6_draw_common(cmd
, cs
, false, 0, firstInstance
, 0);
3451 tu_cs_emit_pkt7(cs
, CP_DRAW_AUTO
, 6);
3452 tu_cs_emit(cs
, tu_draw_initiator(cmd
, DI_SRC_SEL_AUTO_XFB
));
3453 tu_cs_emit(cs
, instanceCount
);
3454 tu_cs_emit_qw(cs
, buf
->bo
->iova
+ buf
->bo_offset
+ counterBufferOffset
);
3455 tu_cs_emit(cs
, counterOffset
);
3456 tu_cs_emit(cs
, vertexStride
);
3458 tu_bo_list_add(&cmd
->bo_list
, buf
->bo
, MSM_SUBMIT_BO_READ
);
3461 struct tu_dispatch_info
3464 * Determine the layout of the grid (in block units) to be used.
3469 * A starting offset for the grid. If unaligned is set, the offset
3470 * must still be aligned.
3472 uint32_t offsets
[3];
3474 * Whether it's an unaligned compute dispatch.
3479 * Indirect compute parameters resource.
3481 struct tu_buffer
*indirect
;
3482 uint64_t indirect_offset
;
3486 tu_emit_compute_driver_params(struct tu_cs
*cs
, struct tu_pipeline
*pipeline
,
3487 const struct tu_dispatch_info
*info
)
3489 gl_shader_stage type
= MESA_SHADER_COMPUTE
;
3490 const struct tu_program_descriptor_linkage
*link
=
3491 &pipeline
->program
.link
[type
];
3492 const struct ir3_const_state
*const_state
= &link
->const_state
;
3493 uint32_t offset
= const_state
->offsets
.driver_param
;
3495 if (link
->constlen
<= offset
)
3498 if (!info
->indirect
) {
3499 uint32_t driver_params
[IR3_DP_CS_COUNT
] = {
3500 [IR3_DP_NUM_WORK_GROUPS_X
] = info
->blocks
[0],
3501 [IR3_DP_NUM_WORK_GROUPS_Y
] = info
->blocks
[1],
3502 [IR3_DP_NUM_WORK_GROUPS_Z
] = info
->blocks
[2],
3503 [IR3_DP_LOCAL_GROUP_SIZE_X
] = pipeline
->compute
.local_size
[0],
3504 [IR3_DP_LOCAL_GROUP_SIZE_Y
] = pipeline
->compute
.local_size
[1],
3505 [IR3_DP_LOCAL_GROUP_SIZE_Z
] = pipeline
->compute
.local_size
[2],
3508 uint32_t num_consts
= MIN2(const_state
->num_driver_params
,
3509 (link
->constlen
- offset
) * 4);
3510 /* push constants */
3511 tu_cs_emit_pkt7(cs
, tu6_stage2opcode(type
), 3 + num_consts
);
3512 tu_cs_emit(cs
, CP_LOAD_STATE6_0_DST_OFF(offset
) |
3513 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS
) |
3514 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT
) |
3515 CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type
)) |
3516 CP_LOAD_STATE6_0_NUM_UNIT(num_consts
/ 4));
3520 for (i
= 0; i
< num_consts
; i
++)
3521 tu_cs_emit(cs
, driver_params
[i
]);
3523 tu_finishme("Indirect driver params");
3528 tu_dispatch(struct tu_cmd_buffer
*cmd
,
3529 const struct tu_dispatch_info
*info
)
3531 struct tu_cs
*cs
= &cmd
->cs
;
3532 struct tu_pipeline
*pipeline
= cmd
->state
.compute_pipeline
;
3533 struct tu_descriptor_state
*descriptors_state
=
3534 &cmd
->descriptors
[VK_PIPELINE_BIND_POINT_COMPUTE
];
3536 /* TODO: We could probably flush less if we add a compute_flush_bits
3539 tu_emit_cache_flush(cmd
, cs
);
3541 if (cmd
->state
.dirty
& TU_CMD_DIRTY_COMPUTE_PIPELINE
)
3542 tu_cs_emit_ib(cs
, &pipeline
->program
.state_ib
);
3544 struct tu_cs_entry ib
;
3546 ib
= tu6_emit_consts(cmd
, pipeline
, descriptors_state
, MESA_SHADER_COMPUTE
);
3548 tu_cs_emit_ib(cs
, &ib
);
3550 tu_emit_compute_driver_params(cs
, pipeline
, info
);
3552 if ((cmd
->state
.dirty
& TU_CMD_DIRTY_COMPUTE_DESCRIPTOR_SETS
) &&
3553 pipeline
->load_state
.state_ib
.size
> 0) {
3554 tu_cs_emit_ib(cs
, &pipeline
->load_state
.state_ib
);
3558 ~(TU_CMD_DIRTY_COMPUTE_DESCRIPTOR_SETS
| TU_CMD_DIRTY_COMPUTE_PIPELINE
);
3560 tu_cs_emit_pkt7(cs
, CP_SET_MARKER
, 1);
3561 tu_cs_emit(cs
, A6XX_CP_SET_MARKER_0_MODE(RM6_COMPUTE
));
3563 const uint32_t *local_size
= pipeline
->compute
.local_size
;
3564 const uint32_t *num_groups
= info
->blocks
;
3566 A6XX_HLSQ_CS_NDRANGE_0(.kerneldim
= 3,
3567 .localsizex
= local_size
[0] - 1,
3568 .localsizey
= local_size
[1] - 1,
3569 .localsizez
= local_size
[2] - 1),
3570 A6XX_HLSQ_CS_NDRANGE_1(.globalsize_x
= local_size
[0] * num_groups
[0]),
3571 A6XX_HLSQ_CS_NDRANGE_2(.globaloff_x
= 0),
3572 A6XX_HLSQ_CS_NDRANGE_3(.globalsize_y
= local_size
[1] * num_groups
[1]),
3573 A6XX_HLSQ_CS_NDRANGE_4(.globaloff_y
= 0),
3574 A6XX_HLSQ_CS_NDRANGE_5(.globalsize_z
= local_size
[2] * num_groups
[2]),
3575 A6XX_HLSQ_CS_NDRANGE_6(.globaloff_z
= 0));
3578 A6XX_HLSQ_CS_KERNEL_GROUP_X(1),
3579 A6XX_HLSQ_CS_KERNEL_GROUP_Y(1),
3580 A6XX_HLSQ_CS_KERNEL_GROUP_Z(1));
3582 if (info
->indirect
) {
3583 uint64_t iova
= tu_buffer_iova(info
->indirect
) + info
->indirect_offset
;
3585 tu_bo_list_add(&cmd
->bo_list
, info
->indirect
->bo
,
3586 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_WRITE
);
3588 tu_cs_emit_pkt7(cs
, CP_EXEC_CS_INDIRECT
, 4);
3589 tu_cs_emit(cs
, 0x00000000);
3590 tu_cs_emit_qw(cs
, iova
);
3592 A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEX(local_size
[0] - 1) |
3593 A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEY(local_size
[1] - 1) |
3594 A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEZ(local_size
[2] - 1));
3596 tu_cs_emit_pkt7(cs
, CP_EXEC_CS
, 4);
3597 tu_cs_emit(cs
, 0x00000000);
3598 tu_cs_emit(cs
, CP_EXEC_CS_1_NGROUPS_X(info
->blocks
[0]));
3599 tu_cs_emit(cs
, CP_EXEC_CS_2_NGROUPS_Y(info
->blocks
[1]));
3600 tu_cs_emit(cs
, CP_EXEC_CS_3_NGROUPS_Z(info
->blocks
[2]));
3607 tu_CmdDispatchBase(VkCommandBuffer commandBuffer
,
3615 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
3616 struct tu_dispatch_info info
= {};
3622 info
.offsets
[0] = base_x
;
3623 info
.offsets
[1] = base_y
;
3624 info
.offsets
[2] = base_z
;
3625 tu_dispatch(cmd_buffer
, &info
);
3629 tu_CmdDispatch(VkCommandBuffer commandBuffer
,
3634 tu_CmdDispatchBase(commandBuffer
, 0, 0, 0, x
, y
, z
);
3638 tu_CmdDispatchIndirect(VkCommandBuffer commandBuffer
,
3640 VkDeviceSize offset
)
3642 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
3643 TU_FROM_HANDLE(tu_buffer
, buffer
, _buffer
);
3644 struct tu_dispatch_info info
= {};
3646 info
.indirect
= buffer
;
3647 info
.indirect_offset
= offset
;
3649 tu_dispatch(cmd_buffer
, &info
);
3653 tu_CmdEndRenderPass(VkCommandBuffer commandBuffer
)
3655 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
3657 tu_cs_end(&cmd_buffer
->draw_cs
);
3658 tu_cs_end(&cmd_buffer
->draw_epilogue_cs
);
3660 if (use_sysmem_rendering(cmd_buffer
))
3661 tu_cmd_render_sysmem(cmd_buffer
);
3663 tu_cmd_render_tiles(cmd_buffer
);
3665 /* discard draw_cs and draw_epilogue_cs entries now that the tiles are
3667 tu_cs_discard_entries(&cmd_buffer
->draw_cs
);
3668 tu_cs_begin(&cmd_buffer
->draw_cs
);
3669 tu_cs_discard_entries(&cmd_buffer
->draw_epilogue_cs
);
3670 tu_cs_begin(&cmd_buffer
->draw_epilogue_cs
);
3672 cmd_buffer
->state
.cache
.pending_flush_bits
|=
3673 cmd_buffer
->state
.renderpass_cache
.pending_flush_bits
;
3674 tu_subpass_barrier(cmd_buffer
, &cmd_buffer
->state
.pass
->end_barrier
, true);
3676 cmd_buffer
->state
.pass
= NULL
;
3677 cmd_buffer
->state
.subpass
= NULL
;
3678 cmd_buffer
->state
.framebuffer
= NULL
;
3682 tu_CmdEndRenderPass2(VkCommandBuffer commandBuffer
,
3683 const VkSubpassEndInfoKHR
*pSubpassEndInfo
)
3685 tu_CmdEndRenderPass(commandBuffer
);
3688 struct tu_barrier_info
3690 uint32_t eventCount
;
3691 const VkEvent
*pEvents
;
3692 VkPipelineStageFlags srcStageMask
;
3696 tu_barrier(struct tu_cmd_buffer
*cmd
,
3697 uint32_t memoryBarrierCount
,
3698 const VkMemoryBarrier
*pMemoryBarriers
,
3699 uint32_t bufferMemoryBarrierCount
,
3700 const VkBufferMemoryBarrier
*pBufferMemoryBarriers
,
3701 uint32_t imageMemoryBarrierCount
,
3702 const VkImageMemoryBarrier
*pImageMemoryBarriers
,
3703 const struct tu_barrier_info
*info
)
3705 struct tu_cs
*cs
= cmd
->state
.pass
? &cmd
->draw_cs
: &cmd
->cs
;
3706 VkAccessFlags srcAccessMask
= 0;
3707 VkAccessFlags dstAccessMask
= 0;
3709 for (uint32_t i
= 0; i
< memoryBarrierCount
; i
++) {
3710 srcAccessMask
|= pMemoryBarriers
[i
].srcAccessMask
;
3711 dstAccessMask
|= pMemoryBarriers
[i
].dstAccessMask
;
3714 for (uint32_t i
= 0; i
< bufferMemoryBarrierCount
; i
++) {
3715 srcAccessMask
|= pBufferMemoryBarriers
[i
].srcAccessMask
;
3716 dstAccessMask
|= pBufferMemoryBarriers
[i
].dstAccessMask
;
3719 enum tu_cmd_access_mask src_flags
= 0;
3720 enum tu_cmd_access_mask dst_flags
= 0;
3722 for (uint32_t i
= 0; i
< imageMemoryBarrierCount
; i
++) {
3723 TU_FROM_HANDLE(tu_image
, image
, pImageMemoryBarriers
[i
].image
);
3724 VkImageLayout old_layout
= pImageMemoryBarriers
[i
].oldLayout
;
3725 /* For non-linear images, PREINITIALIZED is the same as UNDEFINED */
3726 if (old_layout
== VK_IMAGE_LAYOUT_UNDEFINED
||
3727 (image
->tiling
!= VK_IMAGE_TILING_LINEAR
&&
3728 old_layout
== VK_IMAGE_LAYOUT_PREINITIALIZED
)) {
3729 /* The underlying memory for this image may have been used earlier
3730 * within the same queue submission for a different image, which
3731 * means that there may be old, stale cache entries which are in the
3732 * "wrong" location, which could cause problems later after writing
3733 * to the image. We don't want these entries being flushed later and
3734 * overwriting the actual image, so we need to flush the CCU.
3736 src_flags
|= TU_ACCESS_CCU_COLOR_INCOHERENT_WRITE
;
3738 srcAccessMask
|= pImageMemoryBarriers
[i
].srcAccessMask
;
3739 dstAccessMask
|= pImageMemoryBarriers
[i
].dstAccessMask
;
3742 /* Inside a renderpass, we don't know yet whether we'll be using sysmem
3743 * so we have to use the sysmem flushes.
3745 bool gmem
= cmd
->state
.ccu_state
== TU_CMD_CCU_GMEM
&&
3747 src_flags
|= vk2tu_access(srcAccessMask
, gmem
);
3748 dst_flags
|= vk2tu_access(dstAccessMask
, gmem
);
3750 struct tu_cache_state
*cache
=
3751 cmd
->state
.pass
? &cmd
->state
.renderpass_cache
: &cmd
->state
.cache
;
3752 tu_flush_for_access(cache
, src_flags
, dst_flags
);
3754 for (uint32_t i
= 0; i
< info
->eventCount
; i
++) {
3755 TU_FROM_HANDLE(tu_event
, event
, info
->pEvents
[i
]);
3757 tu_bo_list_add(&cmd
->bo_list
, &event
->bo
, MSM_SUBMIT_BO_READ
);
3759 tu_cs_emit_pkt7(cs
, CP_WAIT_REG_MEM
, 6);
3760 tu_cs_emit(cs
, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ
) |
3761 CP_WAIT_REG_MEM_0_POLL_MEMORY
);
3762 tu_cs_emit_qw(cs
, event
->bo
.iova
); /* POLL_ADDR_LO/HI */
3763 tu_cs_emit(cs
, CP_WAIT_REG_MEM_3_REF(1));
3764 tu_cs_emit(cs
, CP_WAIT_REG_MEM_4_MASK(~0u));
3765 tu_cs_emit(cs
, CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(20));
3770 tu_CmdPipelineBarrier(VkCommandBuffer commandBuffer
,
3771 VkPipelineStageFlags srcStageMask
,
3772 VkPipelineStageFlags dstStageMask
,
3773 VkDependencyFlags dependencyFlags
,
3774 uint32_t memoryBarrierCount
,
3775 const VkMemoryBarrier
*pMemoryBarriers
,
3776 uint32_t bufferMemoryBarrierCount
,
3777 const VkBufferMemoryBarrier
*pBufferMemoryBarriers
,
3778 uint32_t imageMemoryBarrierCount
,
3779 const VkImageMemoryBarrier
*pImageMemoryBarriers
)
3781 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
3782 struct tu_barrier_info info
;
3784 info
.eventCount
= 0;
3785 info
.pEvents
= NULL
;
3786 info
.srcStageMask
= srcStageMask
;
3788 tu_barrier(cmd_buffer
, memoryBarrierCount
, pMemoryBarriers
,
3789 bufferMemoryBarrierCount
, pBufferMemoryBarriers
,
3790 imageMemoryBarrierCount
, pImageMemoryBarriers
, &info
);
3794 write_event(struct tu_cmd_buffer
*cmd
, struct tu_event
*event
,
3795 VkPipelineStageFlags stageMask
, unsigned value
)
3797 struct tu_cs
*cs
= &cmd
->cs
;
3799 /* vkCmdSetEvent/vkCmdResetEvent cannot be called inside a render pass */
3800 assert(!cmd
->state
.pass
);
3802 tu_emit_cache_flush(cmd
, cs
);
3804 tu_bo_list_add(&cmd
->bo_list
, &event
->bo
, MSM_SUBMIT_BO_WRITE
);
3806 /* Flags that only require a top-of-pipe event. DrawIndirect parameters are
3807 * read by the CP, so the draw indirect stage counts as top-of-pipe too.
3809 VkPipelineStageFlags top_of_pipe_flags
=
3810 VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT
|
3811 VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT
;
3813 if (!(stageMask
& ~top_of_pipe_flags
)) {
3814 tu_cs_emit_pkt7(cs
, CP_MEM_WRITE
, 3);
3815 tu_cs_emit_qw(cs
, event
->bo
.iova
); /* ADDR_LO/HI */
3816 tu_cs_emit(cs
, value
);
3818 /* Use a RB_DONE_TS event to wait for everything to complete. */
3819 tu_cs_emit_pkt7(cs
, CP_EVENT_WRITE
, 4);
3820 tu_cs_emit(cs
, CP_EVENT_WRITE_0_EVENT(RB_DONE_TS
));
3821 tu_cs_emit_qw(cs
, event
->bo
.iova
);
3822 tu_cs_emit(cs
, value
);
3827 tu_CmdSetEvent(VkCommandBuffer commandBuffer
,
3829 VkPipelineStageFlags stageMask
)
3831 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
3832 TU_FROM_HANDLE(tu_event
, event
, _event
);
3834 write_event(cmd
, event
, stageMask
, 1);
3838 tu_CmdResetEvent(VkCommandBuffer commandBuffer
,
3840 VkPipelineStageFlags stageMask
)
3842 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
3843 TU_FROM_HANDLE(tu_event
, event
, _event
);
3845 write_event(cmd
, event
, stageMask
, 0);
3849 tu_CmdWaitEvents(VkCommandBuffer commandBuffer
,
3850 uint32_t eventCount
,
3851 const VkEvent
*pEvents
,
3852 VkPipelineStageFlags srcStageMask
,
3853 VkPipelineStageFlags dstStageMask
,
3854 uint32_t memoryBarrierCount
,
3855 const VkMemoryBarrier
*pMemoryBarriers
,
3856 uint32_t bufferMemoryBarrierCount
,
3857 const VkBufferMemoryBarrier
*pBufferMemoryBarriers
,
3858 uint32_t imageMemoryBarrierCount
,
3859 const VkImageMemoryBarrier
*pImageMemoryBarriers
)
3861 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
3862 struct tu_barrier_info info
;
3864 info
.eventCount
= eventCount
;
3865 info
.pEvents
= pEvents
;
3866 info
.srcStageMask
= 0;
3868 tu_barrier(cmd
, memoryBarrierCount
, pMemoryBarriers
,
3869 bufferMemoryBarrierCount
, pBufferMemoryBarriers
,
3870 imageMemoryBarrierCount
, pImageMemoryBarriers
, &info
);
3874 tu_CmdSetDeviceMask(VkCommandBuffer commandBuffer
, uint32_t deviceMask
)