2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
28 #include "tu_private.h"
30 #include "registers/adreno_pm4.xml.h"
31 #include "registers/adreno_common.xml.h"
33 #include "vk_format.h"
38 tu_bo_list_init(struct tu_bo_list
*list
)
40 list
->count
= list
->capacity
= 0;
41 list
->bo_infos
= NULL
;
45 tu_bo_list_destroy(struct tu_bo_list
*list
)
51 tu_bo_list_reset(struct tu_bo_list
*list
)
57 * \a flags consists of MSM_SUBMIT_BO_FLAGS.
60 tu_bo_list_add_info(struct tu_bo_list
*list
,
61 const struct drm_msm_gem_submit_bo
*bo_info
)
63 assert(bo_info
->handle
!= 0);
65 for (uint32_t i
= 0; i
< list
->count
; ++i
) {
66 if (list
->bo_infos
[i
].handle
== bo_info
->handle
) {
67 assert(list
->bo_infos
[i
].presumed
== bo_info
->presumed
);
68 list
->bo_infos
[i
].flags
|= bo_info
->flags
;
73 /* grow list->bo_infos if needed */
74 if (list
->count
== list
->capacity
) {
75 uint32_t new_capacity
= MAX2(2 * list
->count
, 16);
76 struct drm_msm_gem_submit_bo
*new_bo_infos
= realloc(
77 list
->bo_infos
, new_capacity
* sizeof(struct drm_msm_gem_submit_bo
));
79 return TU_BO_LIST_FAILED
;
80 list
->bo_infos
= new_bo_infos
;
81 list
->capacity
= new_capacity
;
84 list
->bo_infos
[list
->count
] = *bo_info
;
89 tu_bo_list_add(struct tu_bo_list
*list
,
90 const struct tu_bo
*bo
,
93 return tu_bo_list_add_info(list
, &(struct drm_msm_gem_submit_bo
) {
95 .handle
= bo
->gem_handle
,
101 tu_bo_list_merge(struct tu_bo_list
*list
, const struct tu_bo_list
*other
)
103 for (uint32_t i
= 0; i
< other
->count
; i
++) {
104 if (tu_bo_list_add_info(list
, other
->bo_infos
+ i
) == TU_BO_LIST_FAILED
)
105 return VK_ERROR_OUT_OF_HOST_MEMORY
;
112 tu6_emit_event_write(struct tu_cmd_buffer
*cmd
,
114 enum vgt_event_type event
)
116 bool need_seqno
= false;
121 case PC_CCU_FLUSH_DEPTH_TS
:
122 case PC_CCU_FLUSH_COLOR_TS
:
123 case PC_CCU_RESOLVE_TS
:
130 tu_cs_emit_pkt7(cs
, CP_EVENT_WRITE
, need_seqno
? 4 : 1);
131 tu_cs_emit(cs
, CP_EVENT_WRITE_0_EVENT(event
));
133 tu_cs_emit_qw(cs
, global_iova(cmd
, seqno_dummy
));
139 tu6_emit_flushes(struct tu_cmd_buffer
*cmd_buffer
,
141 enum tu_cmd_flush_bits flushes
)
143 /* Experiments show that invalidating CCU while it still has data in it
144 * doesn't work, so make sure to always flush before invalidating in case
145 * any data remains that hasn't yet been made available through a barrier.
146 * However it does seem to work for UCHE.
148 if (flushes
& (TU_CMD_FLAG_CCU_FLUSH_COLOR
|
149 TU_CMD_FLAG_CCU_INVALIDATE_COLOR
))
150 tu6_emit_event_write(cmd_buffer
, cs
, PC_CCU_FLUSH_COLOR_TS
);
151 if (flushes
& (TU_CMD_FLAG_CCU_FLUSH_DEPTH
|
152 TU_CMD_FLAG_CCU_INVALIDATE_DEPTH
))
153 tu6_emit_event_write(cmd_buffer
, cs
, PC_CCU_FLUSH_DEPTH_TS
);
154 if (flushes
& TU_CMD_FLAG_CCU_INVALIDATE_COLOR
)
155 tu6_emit_event_write(cmd_buffer
, cs
, PC_CCU_INVALIDATE_COLOR
);
156 if (flushes
& TU_CMD_FLAG_CCU_INVALIDATE_DEPTH
)
157 tu6_emit_event_write(cmd_buffer
, cs
, PC_CCU_INVALIDATE_DEPTH
);
158 if (flushes
& TU_CMD_FLAG_CACHE_FLUSH
)
159 tu6_emit_event_write(cmd_buffer
, cs
, CACHE_FLUSH_TS
);
160 if (flushes
& TU_CMD_FLAG_CACHE_INVALIDATE
)
161 tu6_emit_event_write(cmd_buffer
, cs
, CACHE_INVALIDATE
);
162 if (flushes
& TU_CMD_FLAG_WFI
)
166 /* "Normal" cache flushes, that don't require any special handling */
169 tu_emit_cache_flush(struct tu_cmd_buffer
*cmd_buffer
,
172 tu6_emit_flushes(cmd_buffer
, cs
, cmd_buffer
->state
.cache
.flush_bits
);
173 cmd_buffer
->state
.cache
.flush_bits
= 0;
176 /* Renderpass cache flushes */
179 tu_emit_cache_flush_renderpass(struct tu_cmd_buffer
*cmd_buffer
,
182 tu6_emit_flushes(cmd_buffer
, cs
, cmd_buffer
->state
.renderpass_cache
.flush_bits
);
183 cmd_buffer
->state
.renderpass_cache
.flush_bits
= 0;
186 /* Cache flushes for things that use the color/depth read/write path (i.e.
187 * blits and draws). This deals with changing CCU state as well as the usual
192 tu_emit_cache_flush_ccu(struct tu_cmd_buffer
*cmd_buffer
,
194 enum tu_cmd_ccu_state ccu_state
)
196 enum tu_cmd_flush_bits flushes
= cmd_buffer
->state
.cache
.flush_bits
;
198 assert(ccu_state
!= TU_CMD_CCU_UNKNOWN
);
200 /* Changing CCU state must involve invalidating the CCU. In sysmem mode,
201 * the CCU may also contain data that we haven't flushed out yet, so we
202 * also need to flush. Also, in order to program RB_CCU_CNTL, we need to
203 * emit a WFI as it isn't pipelined.
205 if (ccu_state
!= cmd_buffer
->state
.ccu_state
) {
206 if (cmd_buffer
->state
.ccu_state
!= TU_CMD_CCU_GMEM
) {
208 TU_CMD_FLAG_CCU_FLUSH_COLOR
|
209 TU_CMD_FLAG_CCU_FLUSH_DEPTH
;
210 cmd_buffer
->state
.cache
.pending_flush_bits
&= ~(
211 TU_CMD_FLAG_CCU_FLUSH_COLOR
|
212 TU_CMD_FLAG_CCU_FLUSH_DEPTH
);
215 TU_CMD_FLAG_CCU_INVALIDATE_COLOR
|
216 TU_CMD_FLAG_CCU_INVALIDATE_DEPTH
|
218 cmd_buffer
->state
.cache
.pending_flush_bits
&= ~(
219 TU_CMD_FLAG_CCU_INVALIDATE_COLOR
|
220 TU_CMD_FLAG_CCU_INVALIDATE_DEPTH
);
223 tu6_emit_flushes(cmd_buffer
, cs
, flushes
);
224 cmd_buffer
->state
.cache
.flush_bits
= 0;
226 if (ccu_state
!= cmd_buffer
->state
.ccu_state
) {
227 struct tu_physical_device
*phys_dev
= cmd_buffer
->device
->physical_device
;
229 A6XX_RB_CCU_CNTL(.offset
=
230 ccu_state
== TU_CMD_CCU_GMEM
?
231 phys_dev
->ccu_offset_gmem
:
232 phys_dev
->ccu_offset_bypass
,
233 .gmem
= ccu_state
== TU_CMD_CCU_GMEM
));
234 cmd_buffer
->state
.ccu_state
= ccu_state
;
239 tu6_emit_zs(struct tu_cmd_buffer
*cmd
,
240 const struct tu_subpass
*subpass
,
243 const struct tu_framebuffer
*fb
= cmd
->state
.framebuffer
;
245 const uint32_t a
= subpass
->depth_stencil_attachment
.attachment
;
246 if (a
== VK_ATTACHMENT_UNUSED
) {
248 A6XX_RB_DEPTH_BUFFER_INFO(.depth_format
= DEPTH6_NONE
),
249 A6XX_RB_DEPTH_BUFFER_PITCH(0),
250 A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH(0),
251 A6XX_RB_DEPTH_BUFFER_BASE(0),
252 A6XX_RB_DEPTH_BUFFER_BASE_GMEM(0));
255 A6XX_GRAS_SU_DEPTH_BUFFER_INFO(.depth_format
= DEPTH6_NONE
));
258 A6XX_GRAS_LRZ_BUFFER_BASE(0),
259 A6XX_GRAS_LRZ_BUFFER_PITCH(0),
260 A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE(0));
262 tu_cs_emit_regs(cs
, A6XX_RB_STENCIL_INFO(0));
267 const struct tu_image_view
*iview
= fb
->attachments
[a
].attachment
;
268 const struct tu_render_pass_attachment
*attachment
=
269 &cmd
->state
.pass
->attachments
[a
];
270 enum a6xx_depth_format fmt
= tu6_pipe2depth(attachment
->format
);
272 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_DEPTH_BUFFER_INFO
, 6);
273 tu_cs_emit(cs
, A6XX_RB_DEPTH_BUFFER_INFO(.depth_format
= fmt
).value
);
274 tu_cs_image_ref(cs
, iview
, 0);
275 tu_cs_emit(cs
, attachment
->gmem_offset
);
278 A6XX_GRAS_SU_DEPTH_BUFFER_INFO(.depth_format
= fmt
));
280 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_DEPTH_FLAG_BUFFER_BASE_LO
, 3);
281 tu_cs_image_flag_ref(cs
, iview
, 0);
284 A6XX_GRAS_LRZ_BUFFER_BASE(0),
285 A6XX_GRAS_LRZ_BUFFER_PITCH(0),
286 A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE(0));
288 if (attachment
->format
== VK_FORMAT_S8_UINT
) {
289 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_STENCIL_INFO
, 6);
290 tu_cs_emit(cs
, A6XX_RB_STENCIL_INFO(.separate_stencil
= true).value
);
291 tu_cs_image_ref(cs
, iview
, 0);
292 tu_cs_emit(cs
, attachment
->gmem_offset
);
295 A6XX_RB_STENCIL_INFO(0));
300 tu6_emit_mrt(struct tu_cmd_buffer
*cmd
,
301 const struct tu_subpass
*subpass
,
304 const struct tu_framebuffer
*fb
= cmd
->state
.framebuffer
;
306 for (uint32_t i
= 0; i
< subpass
->color_count
; ++i
) {
307 uint32_t a
= subpass
->color_attachments
[i
].attachment
;
308 if (a
== VK_ATTACHMENT_UNUSED
)
311 const struct tu_image_view
*iview
= fb
->attachments
[a
].attachment
;
313 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_MRT_BUF_INFO(i
), 6);
314 tu_cs_emit(cs
, iview
->RB_MRT_BUF_INFO
);
315 tu_cs_image_ref(cs
, iview
, 0);
316 tu_cs_emit(cs
, cmd
->state
.pass
->attachments
[a
].gmem_offset
);
319 A6XX_SP_FS_MRT_REG(i
, .dword
= iview
->SP_FS_MRT_REG
));
321 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_MRT_FLAG_BUFFER_ADDR_LO(i
), 3);
322 tu_cs_image_flag_ref(cs
, iview
, 0);
326 A6XX_RB_SRGB_CNTL(.dword
= subpass
->srgb_cntl
));
328 A6XX_SP_SRGB_CNTL(.dword
= subpass
->srgb_cntl
));
330 tu_cs_emit_regs(cs
, A6XX_GRAS_MAX_LAYER_INDEX(fb
->layers
- 1));
334 tu6_emit_msaa(struct tu_cs
*cs
, VkSampleCountFlagBits vk_samples
)
336 const enum a3xx_msaa_samples samples
= tu_msaa_samples(vk_samples
);
337 bool msaa_disable
= samples
== MSAA_ONE
;
340 A6XX_SP_TP_RAS_MSAA_CNTL(samples
),
341 A6XX_SP_TP_DEST_MSAA_CNTL(.samples
= samples
,
342 .msaa_disable
= msaa_disable
));
345 A6XX_GRAS_RAS_MSAA_CNTL(samples
),
346 A6XX_GRAS_DEST_MSAA_CNTL(.samples
= samples
,
347 .msaa_disable
= msaa_disable
));
350 A6XX_RB_RAS_MSAA_CNTL(samples
),
351 A6XX_RB_DEST_MSAA_CNTL(.samples
= samples
,
352 .msaa_disable
= msaa_disable
));
355 A6XX_RB_MSAA_CNTL(samples
));
359 tu6_emit_bin_size(struct tu_cs
*cs
,
360 uint32_t bin_w
, uint32_t bin_h
, uint32_t flags
)
363 A6XX_GRAS_BIN_CONTROL(.binw
= bin_w
,
368 A6XX_RB_BIN_CONTROL(.binw
= bin_w
,
372 /* no flag for RB_BIN_CONTROL2... */
374 A6XX_RB_BIN_CONTROL2(.binw
= bin_w
,
379 tu6_emit_render_cntl(struct tu_cmd_buffer
*cmd
,
380 const struct tu_subpass
*subpass
,
384 const struct tu_framebuffer
*fb
= cmd
->state
.framebuffer
;
386 cntl
|= A6XX_RB_RENDER_CNTL_UNK4
;
388 cntl
|= A6XX_RB_RENDER_CNTL_BINNING
;
390 uint32_t mrts_ubwc_enable
= 0;
391 for (uint32_t i
= 0; i
< subpass
->color_count
; ++i
) {
392 uint32_t a
= subpass
->color_attachments
[i
].attachment
;
393 if (a
== VK_ATTACHMENT_UNUSED
)
396 const struct tu_image_view
*iview
= fb
->attachments
[a
].attachment
;
397 if (iview
->ubwc_enabled
)
398 mrts_ubwc_enable
|= 1 << i
;
401 cntl
|= A6XX_RB_RENDER_CNTL_FLAG_MRTS(mrts_ubwc_enable
);
403 const uint32_t a
= subpass
->depth_stencil_attachment
.attachment
;
404 if (a
!= VK_ATTACHMENT_UNUSED
) {
405 const struct tu_image_view
*iview
= fb
->attachments
[a
].attachment
;
406 if (iview
->ubwc_enabled
)
407 cntl
|= A6XX_RB_RENDER_CNTL_FLAG_DEPTH
;
410 /* In the !binning case, we need to set RB_RENDER_CNTL in the draw_cs
411 * in order to set it correctly for the different subpasses. However,
412 * that means the packets we're emitting also happen during binning. So
413 * we need to guard the write on !BINNING at CP execution time.
415 tu_cs_reserve(cs
, 3 + 4);
416 tu_cs_emit_pkt7(cs
, CP_COND_REG_EXEC
, 2);
417 tu_cs_emit(cs
, CP_COND_REG_EXEC_0_MODE(RENDER_MODE
) |
418 CP_COND_REG_EXEC_0_GMEM
| CP_COND_REG_EXEC_0_SYSMEM
);
419 tu_cs_emit(cs
, CP_COND_REG_EXEC_1_DWORDS(4));
422 tu_cs_emit_pkt7(cs
, CP_REG_WRITE
, 3);
423 tu_cs_emit(cs
, CP_REG_WRITE_0_TRACKER(TRACK_RENDER_CNTL
));
424 tu_cs_emit(cs
, REG_A6XX_RB_RENDER_CNTL
);
425 tu_cs_emit(cs
, cntl
);
429 tu6_emit_blit_scissor(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
, bool align
)
431 const VkRect2D
*render_area
= &cmd
->state
.render_area
;
432 uint32_t x1
= render_area
->offset
.x
;
433 uint32_t y1
= render_area
->offset
.y
;
434 uint32_t x2
= x1
+ render_area
->extent
.width
- 1;
435 uint32_t y2
= y1
+ render_area
->extent
.height
- 1;
438 x1
= x1
& ~(GMEM_ALIGN_W
- 1);
439 y1
= y1
& ~(GMEM_ALIGN_H
- 1);
440 x2
= ALIGN_POT(x2
+ 1, GMEM_ALIGN_W
) - 1;
441 y2
= ALIGN_POT(y2
+ 1, GMEM_ALIGN_H
) - 1;
445 A6XX_RB_BLIT_SCISSOR_TL(.x
= x1
, .y
= y1
),
446 A6XX_RB_BLIT_SCISSOR_BR(.x
= x2
, .y
= y2
));
450 tu6_emit_window_scissor(struct tu_cs
*cs
,
457 A6XX_GRAS_SC_WINDOW_SCISSOR_TL(.x
= x1
, .y
= y1
),
458 A6XX_GRAS_SC_WINDOW_SCISSOR_BR(.x
= x2
, .y
= y2
));
461 A6XX_GRAS_RESOLVE_CNTL_1(.x
= x1
, .y
= y1
),
462 A6XX_GRAS_RESOLVE_CNTL_2(.x
= x2
, .y
= y2
));
466 tu6_emit_window_offset(struct tu_cs
*cs
, uint32_t x1
, uint32_t y1
)
469 A6XX_RB_WINDOW_OFFSET(.x
= x1
, .y
= y1
));
472 A6XX_RB_WINDOW_OFFSET2(.x
= x1
, .y
= y1
));
475 A6XX_SP_WINDOW_OFFSET(.x
= x1
, .y
= y1
));
478 A6XX_SP_TP_WINDOW_OFFSET(.x
= x1
, .y
= y1
));
482 tu_cs_emit_draw_state(struct tu_cs
*cs
, uint32_t id
, struct tu_draw_state state
)
484 uint32_t enable_mask
;
486 case TU_DRAW_STATE_PROGRAM
:
487 case TU_DRAW_STATE_VI
:
488 case TU_DRAW_STATE_FS_CONST
:
489 /* The blob seems to not enable this (DESC_SETS_LOAD) for binning, even
490 * when resources would actually be used in the binning shader.
491 * Presumably the overhead of prefetching the resources isn't
494 case TU_DRAW_STATE_DESC_SETS_LOAD
:
495 enable_mask
= CP_SET_DRAW_STATE__0_GMEM
|
496 CP_SET_DRAW_STATE__0_SYSMEM
;
498 case TU_DRAW_STATE_PROGRAM_BINNING
:
499 case TU_DRAW_STATE_VI_BINNING
:
500 enable_mask
= CP_SET_DRAW_STATE__0_BINNING
;
502 case TU_DRAW_STATE_INPUT_ATTACHMENTS_GMEM
:
503 enable_mask
= CP_SET_DRAW_STATE__0_GMEM
;
505 case TU_DRAW_STATE_INPUT_ATTACHMENTS_SYSMEM
:
506 enable_mask
= CP_SET_DRAW_STATE__0_SYSMEM
;
509 enable_mask
= CP_SET_DRAW_STATE__0_GMEM
|
510 CP_SET_DRAW_STATE__0_SYSMEM
|
511 CP_SET_DRAW_STATE__0_BINNING
;
515 tu_cs_emit(cs
, CP_SET_DRAW_STATE__0_COUNT(state
.size
) |
517 CP_SET_DRAW_STATE__0_GROUP_ID(id
) |
518 COND(!state
.size
, CP_SET_DRAW_STATE__0_DISABLE
));
519 tu_cs_emit_qw(cs
, state
.iova
);
522 /* note: get rid of this eventually */
524 tu_cs_emit_sds_ib(struct tu_cs
*cs
, uint32_t id
, struct tu_cs_entry entry
)
526 tu_cs_emit_draw_state(cs
, id
, (struct tu_draw_state
) {
527 .iova
= entry
.size
? entry
.bo
->iova
+ entry
.offset
: 0,
528 .size
= entry
.size
/ 4,
533 use_hw_binning(struct tu_cmd_buffer
*cmd
)
535 const struct tu_framebuffer
*fb
= cmd
->state
.framebuffer
;
537 /* XFB commands are emitted for BINNING || SYSMEM, which makes it incompatible
538 * with non-hw binning GMEM rendering. this is required because some of the
539 * XFB commands need to only be executed once
541 if (cmd
->state
.xfb_used
)
544 if (unlikely(cmd
->device
->physical_device
->instance
->debug_flags
& TU_DEBUG_NOBIN
))
547 if (unlikely(cmd
->device
->physical_device
->instance
->debug_flags
& TU_DEBUG_FORCEBIN
))
550 return (fb
->tile_count
.width
* fb
->tile_count
.height
) > 2;
554 use_sysmem_rendering(struct tu_cmd_buffer
*cmd
)
556 if (unlikely(cmd
->device
->physical_device
->instance
->debug_flags
& TU_DEBUG_SYSMEM
))
559 /* can't fit attachments into gmem */
560 if (!cmd
->state
.pass
->gmem_pixels
)
563 if (cmd
->state
.framebuffer
->layers
> 1)
573 tu6_emit_tile_select(struct tu_cmd_buffer
*cmd
,
575 uint32_t tx
, uint32_t ty
, uint32_t pipe
, uint32_t slot
)
577 const struct tu_framebuffer
*fb
= cmd
->state
.framebuffer
;
579 tu_cs_emit_pkt7(cs
, CP_SET_MARKER
, 1);
580 tu_cs_emit(cs
, A6XX_CP_SET_MARKER_0_MODE(RM6_YIELD
));
582 tu_cs_emit_pkt7(cs
, CP_SET_MARKER
, 1);
583 tu_cs_emit(cs
, A6XX_CP_SET_MARKER_0_MODE(RM6_GMEM
));
585 const uint32_t x1
= fb
->tile0
.width
* tx
;
586 const uint32_t y1
= fb
->tile0
.height
* ty
;
587 const uint32_t x2
= x1
+ fb
->tile0
.width
- 1;
588 const uint32_t y2
= y1
+ fb
->tile0
.height
- 1;
589 tu6_emit_window_scissor(cs
, x1
, y1
, x2
, y2
);
590 tu6_emit_window_offset(cs
, x1
, y1
);
593 A6XX_VPC_SO_OVERRIDE(.so_disable
= false));
595 if (use_hw_binning(cmd
)) {
596 tu_cs_emit_pkt7(cs
, CP_WAIT_FOR_ME
, 0);
598 tu_cs_emit_pkt7(cs
, CP_SET_MODE
, 1);
601 tu_cs_emit_pkt7(cs
, CP_SET_BIN_DATA5_OFFSET
, 4);
602 tu_cs_emit(cs
, fb
->pipe_sizes
[pipe
] |
603 CP_SET_BIN_DATA5_0_VSC_N(slot
));
604 tu_cs_emit(cs
, pipe
* cmd
->vsc_draw_strm_pitch
);
605 tu_cs_emit(cs
, pipe
* 4);
606 tu_cs_emit(cs
, pipe
* cmd
->vsc_prim_strm_pitch
);
608 tu_cs_emit_pkt7(cs
, CP_SET_VISIBILITY_OVERRIDE
, 1);
611 tu_cs_emit_pkt7(cs
, CP_SET_MODE
, 1);
614 tu_cs_emit_pkt7(cs
, CP_SET_VISIBILITY_OVERRIDE
, 1);
617 tu_cs_emit_pkt7(cs
, CP_SET_MODE
, 1);
623 tu6_emit_sysmem_resolve(struct tu_cmd_buffer
*cmd
,
628 const struct tu_framebuffer
*fb
= cmd
->state
.framebuffer
;
629 struct tu_image_view
*dst
= fb
->attachments
[a
].attachment
;
630 struct tu_image_view
*src
= fb
->attachments
[gmem_a
].attachment
;
632 tu_resolve_sysmem(cmd
, cs
, src
, dst
, fb
->layers
, &cmd
->state
.render_area
);
636 tu6_emit_sysmem_resolves(struct tu_cmd_buffer
*cmd
,
638 const struct tu_subpass
*subpass
)
640 if (subpass
->resolve_attachments
) {
641 /* From the documentation for vkCmdNextSubpass, section 7.4 "Render Pass
644 * End-of-subpass multisample resolves are treated as color
645 * attachment writes for the purposes of synchronization. That is,
646 * they are considered to execute in the
647 * VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT pipeline stage and
648 * their writes are synchronized with
649 * VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT. Synchronization between
650 * rendering within a subpass and any resolve operations at the end
651 * of the subpass occurs automatically, without need for explicit
652 * dependencies or pipeline barriers. However, if the resolve
653 * attachment is also used in a different subpass, an explicit
654 * dependency is needed.
656 * We use the CP_BLIT path for sysmem resolves, which is really a
657 * transfer command, so we have to manually flush similar to the gmem
658 * resolve case. However, a flush afterwards isn't needed because of the
659 * last sentence and the fact that we're in sysmem mode.
661 tu6_emit_event_write(cmd
, cs
, PC_CCU_FLUSH_COLOR_TS
);
662 tu6_emit_event_write(cmd
, cs
, CACHE_INVALIDATE
);
664 /* Wait for the flushes to land before using the 2D engine */
667 for (unsigned i
= 0; i
< subpass
->color_count
; i
++) {
668 uint32_t a
= subpass
->resolve_attachments
[i
].attachment
;
669 if (a
== VK_ATTACHMENT_UNUSED
)
672 tu6_emit_sysmem_resolve(cmd
, cs
, a
,
673 subpass
->color_attachments
[i
].attachment
);
679 tu6_emit_tile_store(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
681 const struct tu_render_pass
*pass
= cmd
->state
.pass
;
682 const struct tu_subpass
*subpass
= &pass
->subpasses
[pass
->subpass_count
-1];
684 tu_cs_emit_pkt7(cs
, CP_SET_DRAW_STATE
, 3);
685 tu_cs_emit(cs
, CP_SET_DRAW_STATE__0_COUNT(0) |
686 CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS
|
687 CP_SET_DRAW_STATE__0_GROUP_ID(0));
688 tu_cs_emit(cs
, CP_SET_DRAW_STATE__1_ADDR_LO(0));
689 tu_cs_emit(cs
, CP_SET_DRAW_STATE__2_ADDR_HI(0));
691 tu_cs_emit_pkt7(cs
, CP_SKIP_IB2_ENABLE_GLOBAL
, 1);
694 tu_cs_emit_pkt7(cs
, CP_SET_MARKER
, 1);
695 tu_cs_emit(cs
, A6XX_CP_SET_MARKER_0_MODE(RM6_RESOLVE
));
697 tu6_emit_blit_scissor(cmd
, cs
, true);
699 for (uint32_t a
= 0; a
< pass
->attachment_count
; ++a
) {
700 if (pass
->attachments
[a
].gmem_offset
>= 0)
701 tu_store_gmem_attachment(cmd
, cs
, a
, a
);
704 if (subpass
->resolve_attachments
) {
705 for (unsigned i
= 0; i
< subpass
->color_count
; i
++) {
706 uint32_t a
= subpass
->resolve_attachments
[i
].attachment
;
707 if (a
!= VK_ATTACHMENT_UNUSED
)
708 tu_store_gmem_attachment(cmd
, cs
, a
,
709 subpass
->color_attachments
[i
].attachment
);
715 tu6_init_hw(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
717 struct tu_device
*dev
= cmd
->device
;
718 const struct tu_physical_device
*phys_dev
= dev
->physical_device
;
720 tu6_emit_event_write(cmd
, cs
, CACHE_INVALIDATE
);
722 tu_cs_emit_write_reg(cs
, REG_A6XX_HLSQ_UPDATE_CNTL
, 0xfffff);
725 A6XX_RB_CCU_CNTL(.offset
= phys_dev
->ccu_offset_bypass
));
726 cmd
->state
.ccu_state
= TU_CMD_CCU_SYSMEM
;
727 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_8E04
, 0x00100000);
728 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_AE04
, 0x8);
729 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_AE00
, 0);
730 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_AE0F
, 0x3f);
731 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_B605
, 0x44);
732 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_B600
, 0x100000);
733 tu_cs_emit_write_reg(cs
, REG_A6XX_HLSQ_UNKNOWN_BE00
, 0x80);
734 tu_cs_emit_write_reg(cs
, REG_A6XX_HLSQ_UNKNOWN_BE01
, 0);
736 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9600
, 0);
737 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_UNKNOWN_8600
, 0x880);
738 tu_cs_emit_write_reg(cs
, REG_A6XX_HLSQ_UNKNOWN_BE04
, 0);
739 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_AE03
, 0x00000410);
740 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_IBO_COUNT
, 0);
741 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_B182
, 0);
742 tu_cs_emit_write_reg(cs
, REG_A6XX_HLSQ_UNKNOWN_BB11
, 0);
743 tu_cs_emit_write_reg(cs
, REG_A6XX_UCHE_UNKNOWN_0E12
, 0x3200000);
744 tu_cs_emit_write_reg(cs
, REG_A6XX_UCHE_CLIENT_PF
, 4);
745 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_8E01
, 0x0);
746 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_A982
, 0);
747 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_A9A8
, 0);
748 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_AB00
, 0x5);
749 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_GS_SIV_CNTL
, 0x0000ffff);
751 /* TODO: set A6XX_VFD_ADD_OFFSET_INSTANCE and fix ir3 to avoid adding base instance */
752 tu_cs_emit_write_reg(cs
, REG_A6XX_VFD_ADD_OFFSET
, A6XX_VFD_ADD_OFFSET_VERTEX
);
753 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_8811
, 0x00000010);
754 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_MODE_CNTL
, 0x1f);
756 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_SRGB_CNTL
, 0);
758 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_UNKNOWN_8110
, 0);
760 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_RENDER_CONTROL0
, 0x401);
761 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_RENDER_CONTROL1
, 0);
762 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_FS_OUTPUT_CNTL0
, 0);
763 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_8818
, 0);
764 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_8819
, 0);
765 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_881A
, 0);
766 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_881B
, 0);
767 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_881C
, 0);
768 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_881D
, 0);
769 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_881E
, 0);
770 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_88F0
, 0);
772 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9101
, 0xffff00);
773 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9107
, 0);
775 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9236
,
776 A6XX_VPC_UNKNOWN_9236_POINT_COORD_INVERT(0));
777 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9300
, 0);
779 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_SO_OVERRIDE
,
780 A6XX_VPC_SO_OVERRIDE_SO_DISABLE
);
782 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_UNKNOWN_9801
, 0);
783 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_UNKNOWN_9980
, 0);
784 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_UNKNOWN_9990
, 0);
786 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_PRIMITIVE_CNTL_6
, 0);
787 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_UNKNOWN_9B07
, 0);
789 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_A81B
, 0);
791 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_B183
, 0);
793 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_UNKNOWN_8099
, 0);
794 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_UNKNOWN_809B
, 0);
795 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_UNKNOWN_80A0
, 2);
796 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_UNKNOWN_80AF
, 0);
797 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9210
, 0);
798 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9211
, 0);
799 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9602
, 0);
800 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_UNKNOWN_9E72
, 0);
801 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_TP_UNKNOWN_B309
, 0x000000a2);
802 tu_cs_emit_write_reg(cs
, REG_A6XX_HLSQ_CONTROL_5_REG
, 0xfc);
804 tu_cs_emit_write_reg(cs
, REG_A6XX_VFD_MODE_CNTL
, 0x00000000);
806 tu_cs_emit_write_reg(cs
, REG_A6XX_VFD_UNKNOWN_A008
, 0);
808 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_MODE_CNTL
, 0x0000001f);
810 /* we don't use this yet.. probably best to disable.. */
811 tu_cs_emit_pkt7(cs
, CP_SET_DRAW_STATE
, 3);
812 tu_cs_emit(cs
, CP_SET_DRAW_STATE__0_COUNT(0) |
813 CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS
|
814 CP_SET_DRAW_STATE__0_GROUP_ID(0));
815 tu_cs_emit(cs
, CP_SET_DRAW_STATE__1_ADDR_LO(0));
816 tu_cs_emit(cs
, CP_SET_DRAW_STATE__2_ADDR_HI(0));
819 A6XX_SP_HS_CTRL_REG0(0));
822 A6XX_SP_GS_CTRL_REG0(0));
825 A6XX_GRAS_LRZ_CNTL(0));
828 A6XX_RB_LRZ_CNTL(0));
831 A6XX_SP_TP_BORDER_COLOR_BASE_ADDR(.bo
= &dev
->global_bo
,
832 .bo_offset
= gb_offset(border_color
)));
834 A6XX_SP_PS_TP_BORDER_COLOR_BASE_ADDR(.bo
= &dev
->global_bo
,
835 .bo_offset
= gb_offset(border_color
)));
838 * use vsc pitches from the largest values used so far with this device
839 * if there hasn't been overflow, there will already be a scratch bo
840 * allocated for these sizes
842 * if overflow is detected, the stream size is increased by 2x
844 mtx_lock(&dev
->vsc_pitch_mtx
);
846 struct tu6_global
*global
= dev
->global_bo
.map
;
848 uint32_t vsc_draw_overflow
= global
->vsc_draw_overflow
;
849 uint32_t vsc_prim_overflow
= global
->vsc_prim_overflow
;
851 if (vsc_draw_overflow
>= dev
->vsc_draw_strm_pitch
)
852 dev
->vsc_draw_strm_pitch
= (dev
->vsc_draw_strm_pitch
- VSC_PAD
) * 2 + VSC_PAD
;
854 if (vsc_prim_overflow
>= dev
->vsc_prim_strm_pitch
)
855 dev
->vsc_prim_strm_pitch
= (dev
->vsc_prim_strm_pitch
- VSC_PAD
) * 2 + VSC_PAD
;
857 cmd
->vsc_prim_strm_pitch
= dev
->vsc_prim_strm_pitch
;
858 cmd
->vsc_draw_strm_pitch
= dev
->vsc_draw_strm_pitch
;
860 mtx_unlock(&dev
->vsc_pitch_mtx
);
862 struct tu_bo
*vsc_bo
;
863 uint32_t size0
= cmd
->vsc_prim_strm_pitch
* MAX_VSC_PIPES
+
864 cmd
->vsc_draw_strm_pitch
* MAX_VSC_PIPES
;
866 tu_get_scratch_bo(dev
, size0
+ MAX_VSC_PIPES
* 4, &vsc_bo
);
869 A6XX_VSC_DRAW_STRM_SIZE_ADDRESS(.bo
= vsc_bo
, .bo_offset
= size0
));
871 A6XX_VSC_PRIM_STRM_ADDRESS(.bo
= vsc_bo
));
873 A6XX_VSC_DRAW_STRM_ADDRESS(.bo
= vsc_bo
,
874 .bo_offset
= cmd
->vsc_prim_strm_pitch
* MAX_VSC_PIPES
));
876 tu_bo_list_add(&cmd
->bo_list
, vsc_bo
, MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_WRITE
);
878 tu_cs_sanity_check(cs
);
882 update_vsc_pipe(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
884 const struct tu_framebuffer
*fb
= cmd
->state
.framebuffer
;
887 A6XX_VSC_BIN_SIZE(.width
= fb
->tile0
.width
,
888 .height
= fb
->tile0
.height
));
891 A6XX_VSC_BIN_COUNT(.nx
= fb
->tile_count
.width
,
892 .ny
= fb
->tile_count
.height
));
894 tu_cs_emit_pkt4(cs
, REG_A6XX_VSC_PIPE_CONFIG_REG(0), 32);
895 tu_cs_emit_array(cs
, fb
->pipe_config
, 32);
898 A6XX_VSC_PRIM_STRM_PITCH(cmd
->vsc_prim_strm_pitch
),
899 A6XX_VSC_PRIM_STRM_LIMIT(cmd
->vsc_prim_strm_pitch
- VSC_PAD
));
902 A6XX_VSC_DRAW_STRM_PITCH(cmd
->vsc_draw_strm_pitch
),
903 A6XX_VSC_DRAW_STRM_LIMIT(cmd
->vsc_draw_strm_pitch
- VSC_PAD
));
907 emit_vsc_overflow_test(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
909 const struct tu_framebuffer
*fb
= cmd
->state
.framebuffer
;
910 const uint32_t used_pipe_count
=
911 fb
->pipe_count
.width
* fb
->pipe_count
.height
;
913 for (int i
= 0; i
< used_pipe_count
; i
++) {
914 tu_cs_emit_pkt7(cs
, CP_COND_WRITE5
, 8);
915 tu_cs_emit(cs
, CP_COND_WRITE5_0_FUNCTION(WRITE_GE
) |
916 CP_COND_WRITE5_0_WRITE_MEMORY
);
917 tu_cs_emit(cs
, CP_COND_WRITE5_1_POLL_ADDR_LO(REG_A6XX_VSC_DRAW_STRM_SIZE_REG(i
)));
918 tu_cs_emit(cs
, CP_COND_WRITE5_2_POLL_ADDR_HI(0));
919 tu_cs_emit(cs
, CP_COND_WRITE5_3_REF(cmd
->vsc_draw_strm_pitch
- VSC_PAD
));
920 tu_cs_emit(cs
, CP_COND_WRITE5_4_MASK(~0));
921 tu_cs_emit_qw(cs
, global_iova(cmd
, vsc_draw_overflow
));
922 tu_cs_emit(cs
, CP_COND_WRITE5_7_WRITE_DATA(cmd
->vsc_draw_strm_pitch
));
924 tu_cs_emit_pkt7(cs
, CP_COND_WRITE5
, 8);
925 tu_cs_emit(cs
, CP_COND_WRITE5_0_FUNCTION(WRITE_GE
) |
926 CP_COND_WRITE5_0_WRITE_MEMORY
);
927 tu_cs_emit(cs
, CP_COND_WRITE5_1_POLL_ADDR_LO(REG_A6XX_VSC_PRIM_STRM_SIZE_REG(i
)));
928 tu_cs_emit(cs
, CP_COND_WRITE5_2_POLL_ADDR_HI(0));
929 tu_cs_emit(cs
, CP_COND_WRITE5_3_REF(cmd
->vsc_prim_strm_pitch
- VSC_PAD
));
930 tu_cs_emit(cs
, CP_COND_WRITE5_4_MASK(~0));
931 tu_cs_emit_qw(cs
, global_iova(cmd
, vsc_prim_overflow
));
932 tu_cs_emit(cs
, CP_COND_WRITE5_7_WRITE_DATA(cmd
->vsc_prim_strm_pitch
));
935 tu_cs_emit_pkt7(cs
, CP_WAIT_MEM_WRITES
, 0);
939 tu6_emit_binning_pass(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
941 struct tu_physical_device
*phys_dev
= cmd
->device
->physical_device
;
942 const struct tu_framebuffer
*fb
= cmd
->state
.framebuffer
;
944 tu6_emit_window_scissor(cs
, 0, 0, fb
->width
- 1, fb
->height
- 1);
946 tu_cs_emit_pkt7(cs
, CP_SET_MARKER
, 1);
947 tu_cs_emit(cs
, A6XX_CP_SET_MARKER_0_MODE(RM6_BINNING
));
949 tu_cs_emit_pkt7(cs
, CP_SET_VISIBILITY_OVERRIDE
, 1);
952 tu_cs_emit_pkt7(cs
, CP_SET_MODE
, 1);
958 A6XX_VFD_MODE_CNTL(.binning_pass
= true));
960 update_vsc_pipe(cmd
, cs
);
963 A6XX_PC_UNKNOWN_9805(.unknown
= phys_dev
->magic
.PC_UNKNOWN_9805
));
966 A6XX_SP_UNKNOWN_A0F8(.unknown
= phys_dev
->magic
.SP_UNKNOWN_A0F8
));
968 tu_cs_emit_pkt7(cs
, CP_EVENT_WRITE
, 1);
969 tu_cs_emit(cs
, UNK_2C
);
972 A6XX_RB_WINDOW_OFFSET(.x
= 0, .y
= 0));
975 A6XX_SP_TP_WINDOW_OFFSET(.x
= 0, .y
= 0));
977 /* emit IB to binning drawcmds: */
978 tu_cs_emit_call(cs
, &cmd
->draw_cs
);
980 tu_cs_emit_pkt7(cs
, CP_SET_DRAW_STATE
, 3);
981 tu_cs_emit(cs
, CP_SET_DRAW_STATE__0_COUNT(0) |
982 CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS
|
983 CP_SET_DRAW_STATE__0_GROUP_ID(0));
984 tu_cs_emit(cs
, CP_SET_DRAW_STATE__1_ADDR_LO(0));
985 tu_cs_emit(cs
, CP_SET_DRAW_STATE__2_ADDR_HI(0));
987 tu_cs_emit_pkt7(cs
, CP_EVENT_WRITE
, 1);
988 tu_cs_emit(cs
, UNK_2D
);
990 /* This flush is probably required because the VSC, which produces the
991 * visibility stream, is a client of UCHE, whereas the CP needs to read the
992 * visibility stream (without caching) to do draw skipping. The
993 * WFI+WAIT_FOR_ME combination guarantees that the binning commands
994 * submitted are finished before reading the VSC regs (in
995 * emit_vsc_overflow_test) or the VSC_DATA buffer directly (implicitly as
998 tu6_emit_event_write(cmd
, cs
, CACHE_FLUSH_TS
);
1002 tu_cs_emit_pkt7(cs
, CP_WAIT_FOR_ME
, 0);
1004 emit_vsc_overflow_test(cmd
, cs
);
1006 tu_cs_emit_pkt7(cs
, CP_SET_VISIBILITY_OVERRIDE
, 1);
1007 tu_cs_emit(cs
, 0x0);
1009 tu_cs_emit_pkt7(cs
, CP_SET_MODE
, 1);
1010 tu_cs_emit(cs
, 0x0);
1014 tu_emit_input_attachments(struct tu_cmd_buffer
*cmd
,
1015 const struct tu_subpass
*subpass
,
1016 struct tu_cs_entry
*ib
,
1019 /* note: we can probably emit input attachments just once for the whole
1020 * renderpass, this would avoid emitting both sysmem/gmem versions
1022 * emit two texture descriptors for each input, as a workaround for
1023 * d24s8, which can be sampled as both float (depth) and integer (stencil)
1024 * tu_shader lowers uint input attachment loads to use the 2nd descriptor
1026 * TODO: a smarter workaround
1029 if (!subpass
->input_count
)
1032 struct tu_cs_memory texture
;
1033 VkResult result
= tu_cs_alloc(&cmd
->sub_cs
, subpass
->input_count
* 2,
1034 A6XX_TEX_CONST_DWORDS
, &texture
);
1035 assert(result
== VK_SUCCESS
);
1037 for (unsigned i
= 0; i
< subpass
->input_count
* 2; i
++) {
1038 uint32_t a
= subpass
->input_attachments
[i
/ 2].attachment
;
1039 if (a
== VK_ATTACHMENT_UNUSED
)
1042 struct tu_image_view
*iview
=
1043 cmd
->state
.framebuffer
->attachments
[a
].attachment
;
1044 const struct tu_render_pass_attachment
*att
=
1045 &cmd
->state
.pass
->attachments
[a
];
1046 uint32_t *dst
= &texture
.map
[A6XX_TEX_CONST_DWORDS
* i
];
1048 memcpy(dst
, iview
->descriptor
, A6XX_TEX_CONST_DWORDS
* 4);
1050 if (i
% 2 == 1 && att
->format
== VK_FORMAT_D24_UNORM_S8_UINT
) {
1051 /* note this works because spec says fb and input attachments
1052 * must use identity swizzle
1054 dst
[0] &= ~(A6XX_TEX_CONST_0_FMT__MASK
|
1055 A6XX_TEX_CONST_0_SWIZ_X__MASK
| A6XX_TEX_CONST_0_SWIZ_Y__MASK
|
1056 A6XX_TEX_CONST_0_SWIZ_Z__MASK
| A6XX_TEX_CONST_0_SWIZ_W__MASK
);
1057 dst
[0] |= A6XX_TEX_CONST_0_FMT(FMT6_S8Z24_UINT
) |
1058 A6XX_TEX_CONST_0_SWIZ_X(A6XX_TEX_Y
) |
1059 A6XX_TEX_CONST_0_SWIZ_Y(A6XX_TEX_ZERO
) |
1060 A6XX_TEX_CONST_0_SWIZ_Z(A6XX_TEX_ZERO
) |
1061 A6XX_TEX_CONST_0_SWIZ_W(A6XX_TEX_ONE
);
1067 /* patched for gmem */
1068 dst
[0] &= ~(A6XX_TEX_CONST_0_SWAP__MASK
| A6XX_TEX_CONST_0_TILE_MODE__MASK
);
1069 dst
[0] |= A6XX_TEX_CONST_0_TILE_MODE(TILE6_2
);
1071 A6XX_TEX_CONST_2_TYPE(A6XX_TEX_2D
) |
1072 A6XX_TEX_CONST_2_PITCH(cmd
->state
.framebuffer
->tile0
.width
* att
->cpp
);
1074 dst
[4] = cmd
->device
->physical_device
->gmem_base
+ att
->gmem_offset
;
1075 dst
[5] = A6XX_TEX_CONST_5_DEPTH(1);
1076 for (unsigned i
= 6; i
< A6XX_TEX_CONST_DWORDS
; i
++)
1081 tu_cs_begin_sub_stream(&cmd
->sub_cs
, 9, &cs
);
1083 tu_cs_emit_pkt7(&cs
, CP_LOAD_STATE6_FRAG
, 3);
1084 tu_cs_emit(&cs
, CP_LOAD_STATE6_0_DST_OFF(0) |
1085 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS
) |
1086 CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT
) |
1087 CP_LOAD_STATE6_0_STATE_BLOCK(SB6_FS_TEX
) |
1088 CP_LOAD_STATE6_0_NUM_UNIT(subpass
->input_count
* 2));
1089 tu_cs_emit_qw(&cs
, texture
.iova
);
1091 tu_cs_emit_pkt4(&cs
, REG_A6XX_SP_FS_TEX_CONST_LO
, 2);
1092 tu_cs_emit_qw(&cs
, texture
.iova
);
1094 tu_cs_emit_regs(&cs
, A6XX_SP_FS_TEX_COUNT(subpass
->input_count
* 2));
1096 *ib
= tu_cs_end_sub_stream(&cmd
->sub_cs
, &cs
);
1100 tu_set_input_attachments(struct tu_cmd_buffer
*cmd
, const struct tu_subpass
*subpass
)
1102 struct tu_cs
*cs
= &cmd
->draw_cs
;
1104 tu_emit_input_attachments(cmd
, subpass
, &cmd
->state
.ia_gmem_ib
, true);
1105 tu_emit_input_attachments(cmd
, subpass
, &cmd
->state
.ia_sysmem_ib
, false);
1107 tu_cs_emit_pkt7(cs
, CP_SET_DRAW_STATE
, 6);
1108 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_INPUT_ATTACHMENTS_GMEM
, cmd
->state
.ia_gmem_ib
);
1109 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_INPUT_ATTACHMENTS_SYSMEM
, cmd
->state
.ia_sysmem_ib
);
1113 tu_emit_renderpass_begin(struct tu_cmd_buffer
*cmd
,
1114 const VkRenderPassBeginInfo
*info
)
1116 struct tu_cs
*cs
= &cmd
->draw_cs
;
1118 tu_cond_exec_start(cs
, CP_COND_EXEC_0_RENDER_MODE_GMEM
);
1120 tu6_emit_blit_scissor(cmd
, cs
, true);
1122 for (uint32_t i
= 0; i
< cmd
->state
.pass
->attachment_count
; ++i
)
1123 tu_load_gmem_attachment(cmd
, cs
, i
, false);
1125 tu6_emit_blit_scissor(cmd
, cs
, false);
1127 for (uint32_t i
= 0; i
< cmd
->state
.pass
->attachment_count
; ++i
)
1128 tu_clear_gmem_attachment(cmd
, cs
, i
, info
);
1130 tu_cond_exec_end(cs
);
1132 tu_cond_exec_start(cs
, CP_COND_EXEC_0_RENDER_MODE_SYSMEM
);
1134 for (uint32_t i
= 0; i
< cmd
->state
.pass
->attachment_count
; ++i
)
1135 tu_clear_sysmem_attachment(cmd
, cs
, i
, info
);
1137 tu_cond_exec_end(cs
);
1141 tu6_sysmem_render_begin(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
1143 const struct tu_framebuffer
*fb
= cmd
->state
.framebuffer
;
1145 assert(fb
->width
> 0 && fb
->height
> 0);
1146 tu6_emit_window_scissor(cs
, 0, 0, fb
->width
- 1, fb
->height
- 1);
1147 tu6_emit_window_offset(cs
, 0, 0);
1149 tu6_emit_bin_size(cs
, 0, 0, 0xc00000); /* 0xc00000 = BYPASS? */
1151 tu6_emit_event_write(cmd
, cs
, LRZ_FLUSH
);
1153 tu_cs_emit_pkt7(cs
, CP_SET_MARKER
, 1);
1154 tu_cs_emit(cs
, A6XX_CP_SET_MARKER_0_MODE(RM6_BYPASS
));
1156 tu_cs_emit_pkt7(cs
, CP_SKIP_IB2_ENABLE_GLOBAL
, 1);
1157 tu_cs_emit(cs
, 0x0);
1159 tu_emit_cache_flush_ccu(cmd
, cs
, TU_CMD_CCU_SYSMEM
);
1161 /* enable stream-out, with sysmem there is only one pass: */
1163 A6XX_VPC_SO_OVERRIDE(.so_disable
= false));
1165 tu_cs_emit_pkt7(cs
, CP_SET_VISIBILITY_OVERRIDE
, 1);
1166 tu_cs_emit(cs
, 0x1);
1168 tu_cs_emit_pkt7(cs
, CP_SET_MODE
, 1);
1169 tu_cs_emit(cs
, 0x0);
1171 tu_cs_sanity_check(cs
);
1175 tu6_sysmem_render_end(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
1177 /* Do any resolves of the last subpass. These are handled in the
1178 * tile_store_ib in the gmem path.
1180 tu6_emit_sysmem_resolves(cmd
, cs
, cmd
->state
.subpass
);
1182 tu_cs_emit_call(cs
, &cmd
->draw_epilogue_cs
);
1184 tu_cs_emit_pkt7(cs
, CP_SKIP_IB2_ENABLE_GLOBAL
, 1);
1185 tu_cs_emit(cs
, 0x0);
1187 tu6_emit_event_write(cmd
, cs
, LRZ_FLUSH
);
1189 tu_cs_sanity_check(cs
);
1193 tu6_tile_render_begin(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
1195 struct tu_physical_device
*phys_dev
= cmd
->device
->physical_device
;
1197 tu6_emit_event_write(cmd
, cs
, LRZ_FLUSH
);
1201 tu_cs_emit_pkt7(cs
, CP_SKIP_IB2_ENABLE_GLOBAL
, 1);
1202 tu_cs_emit(cs
, 0x0);
1204 tu_emit_cache_flush_ccu(cmd
, cs
, TU_CMD_CCU_GMEM
);
1206 const struct tu_framebuffer
*fb
= cmd
->state
.framebuffer
;
1207 if (use_hw_binning(cmd
)) {
1208 /* enable stream-out during binning pass: */
1209 tu_cs_emit_regs(cs
, A6XX_VPC_SO_OVERRIDE(.so_disable
=false));
1211 tu6_emit_bin_size(cs
, fb
->tile0
.width
, fb
->tile0
.height
,
1212 A6XX_RB_BIN_CONTROL_BINNING_PASS
| 0x6000000);
1214 tu6_emit_render_cntl(cmd
, cmd
->state
.subpass
, cs
, true);
1216 tu6_emit_binning_pass(cmd
, cs
);
1218 /* and disable stream-out for draw pass: */
1219 tu_cs_emit_regs(cs
, A6XX_VPC_SO_OVERRIDE(.so_disable
=true));
1221 tu6_emit_bin_size(cs
, fb
->tile0
.width
, fb
->tile0
.height
,
1222 A6XX_RB_BIN_CONTROL_USE_VIZ
| 0x6000000);
1225 A6XX_VFD_MODE_CNTL(0));
1227 tu_cs_emit_regs(cs
, A6XX_PC_UNKNOWN_9805(.unknown
= phys_dev
->magic
.PC_UNKNOWN_9805
));
1229 tu_cs_emit_regs(cs
, A6XX_SP_UNKNOWN_A0F8(.unknown
= phys_dev
->magic
.SP_UNKNOWN_A0F8
));
1231 tu_cs_emit_pkt7(cs
, CP_SKIP_IB2_ENABLE_GLOBAL
, 1);
1232 tu_cs_emit(cs
, 0x1);
1234 /* no binning pass, so enable stream-out for draw pass:: */
1235 tu_cs_emit_regs(cs
, A6XX_VPC_SO_OVERRIDE(.so_disable
=false));
1237 tu6_emit_bin_size(cs
, fb
->tile0
.width
, fb
->tile0
.height
, 0x6000000);
1240 tu_cs_sanity_check(cs
);
1244 tu6_render_tile(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
1246 tu_cs_emit_call(cs
, &cmd
->draw_cs
);
1248 if (use_hw_binning(cmd
)) {
1249 tu_cs_emit_pkt7(cs
, CP_SET_MARKER
, 1);
1250 tu_cs_emit(cs
, A6XX_CP_SET_MARKER_0_MODE(RM6_ENDVIS
));
1253 tu_cs_emit_ib(cs
, &cmd
->state
.tile_store_ib
);
1255 tu_cs_sanity_check(cs
);
1259 tu6_tile_render_end(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
1261 tu_cs_emit_call(cs
, &cmd
->draw_epilogue_cs
);
1264 A6XX_GRAS_LRZ_CNTL(0));
1266 tu6_emit_event_write(cmd
, cs
, LRZ_FLUSH
);
1268 tu6_emit_event_write(cmd
, cs
, PC_CCU_RESOLVE_TS
);
1270 tu_cs_sanity_check(cs
);
1274 tu_cmd_render_tiles(struct tu_cmd_buffer
*cmd
)
1276 const struct tu_framebuffer
*fb
= cmd
->state
.framebuffer
;
1278 tu6_tile_render_begin(cmd
, &cmd
->cs
);
1281 for (uint32_t py
= 0; py
< fb
->pipe_count
.height
; py
++) {
1282 for (uint32_t px
= 0; px
< fb
->pipe_count
.width
; px
++, pipe
++) {
1283 uint32_t tx1
= px
* fb
->pipe0
.width
;
1284 uint32_t ty1
= py
* fb
->pipe0
.height
;
1285 uint32_t tx2
= MIN2(tx1
+ fb
->pipe0
.width
, fb
->tile_count
.width
);
1286 uint32_t ty2
= MIN2(ty1
+ fb
->pipe0
.height
, fb
->tile_count
.height
);
1288 for (uint32_t ty
= ty1
; ty
< ty2
; ty
++) {
1289 for (uint32_t tx
= tx1
; tx
< tx2
; tx
++, slot
++) {
1290 tu6_emit_tile_select(cmd
, &cmd
->cs
, tx
, ty
, pipe
, slot
);
1291 tu6_render_tile(cmd
, &cmd
->cs
);
1297 tu6_tile_render_end(cmd
, &cmd
->cs
);
1301 tu_cmd_render_sysmem(struct tu_cmd_buffer
*cmd
)
1303 tu6_sysmem_render_begin(cmd
, &cmd
->cs
);
1305 tu_cs_emit_call(&cmd
->cs
, &cmd
->draw_cs
);
1307 tu6_sysmem_render_end(cmd
, &cmd
->cs
);
1311 tu_cmd_prepare_tile_store_ib(struct tu_cmd_buffer
*cmd
)
1313 const uint32_t tile_store_space
= 11 + (35 * 2) * cmd
->state
.pass
->attachment_count
;
1314 struct tu_cs sub_cs
;
1317 tu_cs_begin_sub_stream(&cmd
->sub_cs
, tile_store_space
, &sub_cs
);
1318 if (result
!= VK_SUCCESS
) {
1319 cmd
->record_result
= result
;
1323 /* emit to tile-store sub_cs */
1324 tu6_emit_tile_store(cmd
, &sub_cs
);
1326 cmd
->state
.tile_store_ib
= tu_cs_end_sub_stream(&cmd
->sub_cs
, &sub_cs
);
1330 tu_create_cmd_buffer(struct tu_device
*device
,
1331 struct tu_cmd_pool
*pool
,
1332 VkCommandBufferLevel level
,
1333 VkCommandBuffer
*pCommandBuffer
)
1335 struct tu_cmd_buffer
*cmd_buffer
;
1336 cmd_buffer
= vk_zalloc(&pool
->alloc
, sizeof(*cmd_buffer
), 8,
1337 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1338 if (cmd_buffer
== NULL
)
1339 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
1341 cmd_buffer
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
1342 cmd_buffer
->device
= device
;
1343 cmd_buffer
->pool
= pool
;
1344 cmd_buffer
->level
= level
;
1347 list_addtail(&cmd_buffer
->pool_link
, &pool
->cmd_buffers
);
1348 cmd_buffer
->queue_family_index
= pool
->queue_family_index
;
1351 /* Init the pool_link so we can safely call list_del when we destroy
1352 * the command buffer
1354 list_inithead(&cmd_buffer
->pool_link
);
1355 cmd_buffer
->queue_family_index
= TU_QUEUE_GENERAL
;
1358 tu_bo_list_init(&cmd_buffer
->bo_list
);
1359 tu_cs_init(&cmd_buffer
->cs
, device
, TU_CS_MODE_GROW
, 4096);
1360 tu_cs_init(&cmd_buffer
->draw_cs
, device
, TU_CS_MODE_GROW
, 4096);
1361 tu_cs_init(&cmd_buffer
->draw_epilogue_cs
, device
, TU_CS_MODE_GROW
, 4096);
1362 tu_cs_init(&cmd_buffer
->sub_cs
, device
, TU_CS_MODE_SUB_STREAM
, 2048);
1364 *pCommandBuffer
= tu_cmd_buffer_to_handle(cmd_buffer
);
1366 list_inithead(&cmd_buffer
->upload
.list
);
1372 tu_cmd_buffer_destroy(struct tu_cmd_buffer
*cmd_buffer
)
1374 list_del(&cmd_buffer
->pool_link
);
1376 tu_cs_finish(&cmd_buffer
->cs
);
1377 tu_cs_finish(&cmd_buffer
->draw_cs
);
1378 tu_cs_finish(&cmd_buffer
->draw_epilogue_cs
);
1379 tu_cs_finish(&cmd_buffer
->sub_cs
);
1381 tu_bo_list_destroy(&cmd_buffer
->bo_list
);
1382 vk_free(&cmd_buffer
->pool
->alloc
, cmd_buffer
);
1386 tu_reset_cmd_buffer(struct tu_cmd_buffer
*cmd_buffer
)
1388 cmd_buffer
->record_result
= VK_SUCCESS
;
1390 tu_bo_list_reset(&cmd_buffer
->bo_list
);
1391 tu_cs_reset(&cmd_buffer
->cs
);
1392 tu_cs_reset(&cmd_buffer
->draw_cs
);
1393 tu_cs_reset(&cmd_buffer
->draw_epilogue_cs
);
1394 tu_cs_reset(&cmd_buffer
->sub_cs
);
1396 for (unsigned i
= 0; i
< MAX_BIND_POINTS
; i
++)
1397 memset(&cmd_buffer
->descriptors
[i
].sets
, 0, sizeof(cmd_buffer
->descriptors
[i
].sets
));
1399 cmd_buffer
->status
= TU_CMD_BUFFER_STATUS_INITIAL
;
1401 return cmd_buffer
->record_result
;
1405 tu_AllocateCommandBuffers(VkDevice _device
,
1406 const VkCommandBufferAllocateInfo
*pAllocateInfo
,
1407 VkCommandBuffer
*pCommandBuffers
)
1409 TU_FROM_HANDLE(tu_device
, device
, _device
);
1410 TU_FROM_HANDLE(tu_cmd_pool
, pool
, pAllocateInfo
->commandPool
);
1412 VkResult result
= VK_SUCCESS
;
1415 for (i
= 0; i
< pAllocateInfo
->commandBufferCount
; i
++) {
1417 if (!list_is_empty(&pool
->free_cmd_buffers
)) {
1418 struct tu_cmd_buffer
*cmd_buffer
= list_first_entry(
1419 &pool
->free_cmd_buffers
, struct tu_cmd_buffer
, pool_link
);
1421 list_del(&cmd_buffer
->pool_link
);
1422 list_addtail(&cmd_buffer
->pool_link
, &pool
->cmd_buffers
);
1424 result
= tu_reset_cmd_buffer(cmd_buffer
);
1425 cmd_buffer
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
1426 cmd_buffer
->level
= pAllocateInfo
->level
;
1428 pCommandBuffers
[i
] = tu_cmd_buffer_to_handle(cmd_buffer
);
1430 result
= tu_create_cmd_buffer(device
, pool
, pAllocateInfo
->level
,
1431 &pCommandBuffers
[i
]);
1433 if (result
!= VK_SUCCESS
)
1437 if (result
!= VK_SUCCESS
) {
1438 tu_FreeCommandBuffers(_device
, pAllocateInfo
->commandPool
, i
,
1441 /* From the Vulkan 1.0.66 spec:
1443 * "vkAllocateCommandBuffers can be used to create multiple
1444 * command buffers. If the creation of any of those command
1445 * buffers fails, the implementation must destroy all
1446 * successfully created command buffer objects from this
1447 * command, set all entries of the pCommandBuffers array to
1448 * NULL and return the error."
1450 memset(pCommandBuffers
, 0,
1451 sizeof(*pCommandBuffers
) * pAllocateInfo
->commandBufferCount
);
1458 tu_FreeCommandBuffers(VkDevice device
,
1459 VkCommandPool commandPool
,
1460 uint32_t commandBufferCount
,
1461 const VkCommandBuffer
*pCommandBuffers
)
1463 for (uint32_t i
= 0; i
< commandBufferCount
; i
++) {
1464 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, pCommandBuffers
[i
]);
1467 if (cmd_buffer
->pool
) {
1468 list_del(&cmd_buffer
->pool_link
);
1469 list_addtail(&cmd_buffer
->pool_link
,
1470 &cmd_buffer
->pool
->free_cmd_buffers
);
1472 tu_cmd_buffer_destroy(cmd_buffer
);
1478 tu_ResetCommandBuffer(VkCommandBuffer commandBuffer
,
1479 VkCommandBufferResetFlags flags
)
1481 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
1482 return tu_reset_cmd_buffer(cmd_buffer
);
1485 /* Initialize the cache, assuming all necessary flushes have happened but *not*
1489 tu_cache_init(struct tu_cache_state
*cache
)
1491 cache
->flush_bits
= 0;
1492 cache
->pending_flush_bits
= TU_CMD_FLAG_ALL_INVALIDATE
;
1496 tu_BeginCommandBuffer(VkCommandBuffer commandBuffer
,
1497 const VkCommandBufferBeginInfo
*pBeginInfo
)
1499 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
1500 VkResult result
= VK_SUCCESS
;
1502 if (cmd_buffer
->status
!= TU_CMD_BUFFER_STATUS_INITIAL
) {
1503 /* If the command buffer has already been resetted with
1504 * vkResetCommandBuffer, no need to do it again.
1506 result
= tu_reset_cmd_buffer(cmd_buffer
);
1507 if (result
!= VK_SUCCESS
)
1511 memset(&cmd_buffer
->state
, 0, sizeof(cmd_buffer
->state
));
1512 cmd_buffer
->state
.index_size
= 0xff; /* dirty restart index */
1514 tu_cache_init(&cmd_buffer
->state
.cache
);
1515 tu_cache_init(&cmd_buffer
->state
.renderpass_cache
);
1516 cmd_buffer
->usage_flags
= pBeginInfo
->flags
;
1518 tu_cs_begin(&cmd_buffer
->cs
);
1519 tu_cs_begin(&cmd_buffer
->draw_cs
);
1520 tu_cs_begin(&cmd_buffer
->draw_epilogue_cs
);
1522 /* setup initial configuration into command buffer */
1523 if (cmd_buffer
->level
== VK_COMMAND_BUFFER_LEVEL_PRIMARY
) {
1524 switch (cmd_buffer
->queue_family_index
) {
1525 case TU_QUEUE_GENERAL
:
1526 tu6_init_hw(cmd_buffer
, &cmd_buffer
->cs
);
1531 } else if (cmd_buffer
->level
== VK_COMMAND_BUFFER_LEVEL_SECONDARY
) {
1532 if (pBeginInfo
->flags
& VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT
) {
1533 assert(pBeginInfo
->pInheritanceInfo
);
1534 cmd_buffer
->state
.pass
= tu_render_pass_from_handle(pBeginInfo
->pInheritanceInfo
->renderPass
);
1535 cmd_buffer
->state
.subpass
=
1536 &cmd_buffer
->state
.pass
->subpasses
[pBeginInfo
->pInheritanceInfo
->subpass
];
1538 /* When executing in the middle of another command buffer, the CCU
1541 cmd_buffer
->state
.ccu_state
= TU_CMD_CCU_UNKNOWN
;
1545 cmd_buffer
->status
= TU_CMD_BUFFER_STATUS_RECORDING
;
1550 /* Sets vertex buffers to HW binding points. We emit VBs in SDS (so that bin
1551 * rendering can skip over unused state), so we need to collect all the
1552 * bindings together into a single state emit at draw time.
1555 tu_CmdBindVertexBuffers(VkCommandBuffer commandBuffer
,
1556 uint32_t firstBinding
,
1557 uint32_t bindingCount
,
1558 const VkBuffer
*pBuffers
,
1559 const VkDeviceSize
*pOffsets
)
1561 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
1563 assert(firstBinding
+ bindingCount
<= MAX_VBS
);
1565 for (uint32_t i
= 0; i
< bindingCount
; i
++) {
1566 struct tu_buffer
*buf
= tu_buffer_from_handle(pBuffers
[i
]);
1568 cmd
->state
.vb
.buffers
[firstBinding
+ i
] = buf
;
1569 cmd
->state
.vb
.offsets
[firstBinding
+ i
] = pOffsets
[i
];
1571 tu_bo_list_add(&cmd
->bo_list
, buf
->bo
, MSM_SUBMIT_BO_READ
);
1574 cmd
->state
.dirty
|= TU_CMD_DIRTY_VERTEX_BUFFERS
;
1578 tu_CmdBindIndexBuffer(VkCommandBuffer commandBuffer
,
1580 VkDeviceSize offset
,
1581 VkIndexType indexType
)
1583 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
1584 TU_FROM_HANDLE(tu_buffer
, buf
, buffer
);
1588 uint32_t index_size
, index_shift
, restart_index
;
1590 switch (indexType
) {
1591 case VK_INDEX_TYPE_UINT16
:
1592 index_size
= INDEX4_SIZE_16_BIT
;
1594 restart_index
= 0xffff;
1596 case VK_INDEX_TYPE_UINT32
:
1597 index_size
= INDEX4_SIZE_32_BIT
;
1599 restart_index
= 0xffffffff;
1601 case VK_INDEX_TYPE_UINT8_EXT
:
1602 index_size
= INDEX4_SIZE_8_BIT
;
1604 restart_index
= 0xff;
1607 unreachable("invalid VkIndexType");
1610 /* initialize/update the restart index */
1611 if (cmd
->state
.index_size
!= index_size
)
1612 tu_cs_emit_regs(&cmd
->draw_cs
, A6XX_PC_RESTART_INDEX(restart_index
));
1614 assert(buf
->size
>= offset
);
1616 cmd
->state
.index_va
= buf
->bo
->iova
+ buf
->bo_offset
+ offset
;
1617 cmd
->state
.max_index_count
= (buf
->size
- offset
) >> index_shift
;
1618 cmd
->state
.index_size
= index_size
;
1620 tu_bo_list_add(&cmd
->bo_list
, buf
->bo
, MSM_SUBMIT_BO_READ
);
1624 tu_CmdBindDescriptorSets(VkCommandBuffer commandBuffer
,
1625 VkPipelineBindPoint pipelineBindPoint
,
1626 VkPipelineLayout _layout
,
1628 uint32_t descriptorSetCount
,
1629 const VkDescriptorSet
*pDescriptorSets
,
1630 uint32_t dynamicOffsetCount
,
1631 const uint32_t *pDynamicOffsets
)
1633 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
1634 TU_FROM_HANDLE(tu_pipeline_layout
, layout
, _layout
);
1635 unsigned dyn_idx
= 0;
1637 struct tu_descriptor_state
*descriptors_state
=
1638 tu_get_descriptors_state(cmd
, pipelineBindPoint
);
1640 for (unsigned i
= 0; i
< descriptorSetCount
; ++i
) {
1641 unsigned idx
= i
+ firstSet
;
1642 TU_FROM_HANDLE(tu_descriptor_set
, set
, pDescriptorSets
[i
]);
1644 descriptors_state
->sets
[idx
] = set
;
1646 for(unsigned j
= 0; j
< set
->layout
->dynamic_offset_count
; ++j
, ++dyn_idx
) {
1647 /* update the contents of the dynamic descriptor set */
1648 unsigned src_idx
= j
;
1649 unsigned dst_idx
= j
+ layout
->set
[idx
].dynamic_offset_start
;
1650 assert(dyn_idx
< dynamicOffsetCount
);
1653 &descriptors_state
->dynamic_descriptors
[dst_idx
* A6XX_TEX_CONST_DWORDS
];
1655 &set
->dynamic_descriptors
[src_idx
* A6XX_TEX_CONST_DWORDS
];
1656 uint32_t offset
= pDynamicOffsets
[dyn_idx
];
1658 /* Patch the storage/uniform descriptors right away. */
1659 if (layout
->set
[idx
].layout
->dynamic_ubo
& (1 << j
)) {
1660 /* Note: we can assume here that the addition won't roll over and
1661 * change the SIZE field.
1663 uint64_t va
= src
[0] | ((uint64_t)src
[1] << 32);
1668 memcpy(dst
, src
, A6XX_TEX_CONST_DWORDS
* 4);
1669 /* Note: A6XX_IBO_5_DEPTH is always 0 */
1670 uint64_t va
= dst
[4] | ((uint64_t)dst
[5] << 32);
1677 for (unsigned j
= 0; j
< set
->layout
->buffer_count
; ++j
) {
1678 if (set
->buffers
[j
]) {
1679 tu_bo_list_add(&cmd
->bo_list
, set
->buffers
[j
],
1680 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_WRITE
);
1684 if (set
->size
> 0) {
1685 tu_bo_list_add(&cmd
->bo_list
, &set
->pool
->bo
,
1686 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_DUMP
);
1689 assert(dyn_idx
== dynamicOffsetCount
);
1691 uint32_t sp_bindless_base_reg
, hlsq_bindless_base_reg
, hlsq_update_value
;
1692 uint64_t addr
[MAX_SETS
+ 1] = {};
1695 for (uint32_t i
= 0; i
< MAX_SETS
; i
++) {
1696 struct tu_descriptor_set
*set
= descriptors_state
->sets
[i
];
1698 addr
[i
] = set
->va
| 3;
1701 if (layout
->dynamic_offset_count
) {
1702 /* allocate and fill out dynamic descriptor set */
1703 struct tu_cs_memory dynamic_desc_set
;
1704 VkResult result
= tu_cs_alloc(&cmd
->sub_cs
, layout
->dynamic_offset_count
,
1705 A6XX_TEX_CONST_DWORDS
, &dynamic_desc_set
);
1706 assert(result
== VK_SUCCESS
);
1708 memcpy(dynamic_desc_set
.map
, descriptors_state
->dynamic_descriptors
,
1709 layout
->dynamic_offset_count
* A6XX_TEX_CONST_DWORDS
* 4);
1710 addr
[MAX_SETS
] = dynamic_desc_set
.iova
| 3;
1713 if (pipelineBindPoint
== VK_PIPELINE_BIND_POINT_GRAPHICS
) {
1714 sp_bindless_base_reg
= REG_A6XX_SP_BINDLESS_BASE(0);
1715 hlsq_bindless_base_reg
= REG_A6XX_HLSQ_BINDLESS_BASE(0);
1716 hlsq_update_value
= 0x7c000;
1718 cmd
->state
.dirty
|= TU_CMD_DIRTY_DESCRIPTOR_SETS
| TU_CMD_DIRTY_SHADER_CONSTS
;
1720 assert(pipelineBindPoint
== VK_PIPELINE_BIND_POINT_COMPUTE
);
1722 sp_bindless_base_reg
= REG_A6XX_SP_CS_BINDLESS_BASE(0);
1723 hlsq_bindless_base_reg
= REG_A6XX_HLSQ_CS_BINDLESS_BASE(0);
1724 hlsq_update_value
= 0x3e00;
1726 cmd
->state
.dirty
|= TU_CMD_DIRTY_COMPUTE_DESCRIPTOR_SETS
;
1729 tu_cs_begin_sub_stream(&cmd
->sub_cs
, 24, &cs
);
1731 tu_cs_emit_pkt4(&cs
, sp_bindless_base_reg
, 10);
1732 tu_cs_emit_array(&cs
, (const uint32_t*) addr
, 10);
1733 tu_cs_emit_pkt4(&cs
, hlsq_bindless_base_reg
, 10);
1734 tu_cs_emit_array(&cs
, (const uint32_t*) addr
, 10);
1735 tu_cs_emit_regs(&cs
, A6XX_HLSQ_UPDATE_CNTL(.dword
= hlsq_update_value
));
1737 struct tu_cs_entry ib
= tu_cs_end_sub_stream(&cmd
->sub_cs
, &cs
);
1738 if (pipelineBindPoint
== VK_PIPELINE_BIND_POINT_GRAPHICS
) {
1739 tu_cs_emit_pkt7(&cmd
->draw_cs
, CP_SET_DRAW_STATE
, 3);
1740 tu_cs_emit_sds_ib(&cmd
->draw_cs
, TU_DRAW_STATE_DESC_SETS
, ib
);
1741 cmd
->state
.desc_sets_ib
= ib
;
1743 /* note: for compute we could emit directly, instead of a CP_INDIRECT
1744 * however, the blob uses draw states for compute
1746 tu_cs_emit_ib(&cmd
->cs
, &ib
);
1750 void tu_CmdBindTransformFeedbackBuffersEXT(VkCommandBuffer commandBuffer
,
1751 uint32_t firstBinding
,
1752 uint32_t bindingCount
,
1753 const VkBuffer
*pBuffers
,
1754 const VkDeviceSize
*pOffsets
,
1755 const VkDeviceSize
*pSizes
)
1757 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
1758 struct tu_cs
*cs
= &cmd
->draw_cs
;
1760 /* using COND_REG_EXEC for xfb commands matches the blob behavior
1761 * presumably there isn't any benefit using a draw state when the
1762 * condition is (SYSMEM | BINNING)
1764 tu_cond_exec_start(cs
, CP_COND_REG_EXEC_0_MODE(RENDER_MODE
) |
1765 CP_COND_REG_EXEC_0_SYSMEM
|
1766 CP_COND_REG_EXEC_0_BINNING
);
1768 for (uint32_t i
= 0; i
< bindingCount
; i
++) {
1769 TU_FROM_HANDLE(tu_buffer
, buf
, pBuffers
[i
]);
1770 uint64_t iova
= buf
->bo
->iova
+ pOffsets
[i
];
1771 uint32_t size
= buf
->bo
->size
- pOffsets
[i
];
1772 uint32_t idx
= i
+ firstBinding
;
1774 if (pSizes
&& pSizes
[i
] != VK_WHOLE_SIZE
)
1777 /* BUFFER_BASE is 32-byte aligned, add remaining offset to BUFFER_OFFSET */
1778 uint32_t offset
= iova
& 0x1f;
1779 iova
&= ~(uint64_t) 0x1f;
1781 tu_cs_emit_pkt4(cs
, REG_A6XX_VPC_SO_BUFFER_BASE(idx
), 3);
1782 tu_cs_emit_qw(cs
, iova
);
1783 tu_cs_emit(cs
, size
+ offset
);
1785 cmd
->state
.streamout_offset
[idx
] = offset
;
1787 tu_bo_list_add(&cmd
->bo_list
, buf
->bo
, MSM_SUBMIT_BO_WRITE
);
1790 tu_cond_exec_end(cs
);
1794 tu_CmdBeginTransformFeedbackEXT(VkCommandBuffer commandBuffer
,
1795 uint32_t firstCounterBuffer
,
1796 uint32_t counterBufferCount
,
1797 const VkBuffer
*pCounterBuffers
,
1798 const VkDeviceSize
*pCounterBufferOffsets
)
1800 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
1801 struct tu_cs
*cs
= &cmd
->draw_cs
;
1803 tu_cond_exec_start(cs
, CP_COND_REG_EXEC_0_MODE(RENDER_MODE
) |
1804 CP_COND_REG_EXEC_0_SYSMEM
|
1805 CP_COND_REG_EXEC_0_BINNING
);
1807 /* TODO: only update offset for active buffers */
1808 for (uint32_t i
= 0; i
< IR3_MAX_SO_BUFFERS
; i
++)
1809 tu_cs_emit_regs(cs
, A6XX_VPC_SO_BUFFER_OFFSET(i
, cmd
->state
.streamout_offset
[i
]));
1811 for (uint32_t i
= 0; i
< counterBufferCount
; i
++) {
1812 uint32_t idx
= firstCounterBuffer
+ i
;
1813 uint32_t offset
= cmd
->state
.streamout_offset
[idx
];
1815 if (!pCounterBuffers
[i
])
1818 TU_FROM_HANDLE(tu_buffer
, buf
, pCounterBuffers
[i
]);
1820 tu_bo_list_add(&cmd
->bo_list
, buf
->bo
, MSM_SUBMIT_BO_READ
);
1822 tu_cs_emit_pkt7(cs
, CP_MEM_TO_REG
, 3);
1823 tu_cs_emit(cs
, CP_MEM_TO_REG_0_REG(REG_A6XX_VPC_SO_BUFFER_OFFSET(idx
)) |
1824 CP_MEM_TO_REG_0_UNK31
|
1825 CP_MEM_TO_REG_0_CNT(1));
1826 tu_cs_emit_qw(cs
, buf
->bo
->iova
+ pCounterBufferOffsets
[i
]);
1829 tu_cs_emit_pkt7(cs
, CP_REG_RMW
, 3);
1830 tu_cs_emit(cs
, CP_REG_RMW_0_DST_REG(REG_A6XX_VPC_SO_BUFFER_OFFSET(idx
)) |
1831 CP_REG_RMW_0_SRC1_ADD
);
1832 tu_cs_emit_qw(cs
, 0xffffffff);
1833 tu_cs_emit_qw(cs
, offset
);
1837 tu_cond_exec_end(cs
);
1840 void tu_CmdEndTransformFeedbackEXT(VkCommandBuffer commandBuffer
,
1841 uint32_t firstCounterBuffer
,
1842 uint32_t counterBufferCount
,
1843 const VkBuffer
*pCounterBuffers
,
1844 const VkDeviceSize
*pCounterBufferOffsets
)
1846 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
1847 struct tu_cs
*cs
= &cmd
->draw_cs
;
1849 tu_cond_exec_start(cs
, CP_COND_REG_EXEC_0_MODE(RENDER_MODE
) |
1850 CP_COND_REG_EXEC_0_SYSMEM
|
1851 CP_COND_REG_EXEC_0_BINNING
);
1853 /* TODO: only flush buffers that need to be flushed */
1854 for (uint32_t i
= 0; i
< IR3_MAX_SO_BUFFERS
; i
++) {
1855 /* note: FLUSH_BASE is always the same, so it could go in init_hw()? */
1856 tu_cs_emit_pkt4(cs
, REG_A6XX_VPC_SO_FLUSH_BASE(i
), 2);
1857 tu_cs_emit_qw(cs
, global_iova(cmd
, flush_base
[i
]));
1858 tu6_emit_event_write(cmd
, cs
, FLUSH_SO_0
+ i
);
1861 for (uint32_t i
= 0; i
< counterBufferCount
; i
++) {
1862 uint32_t idx
= firstCounterBuffer
+ i
;
1863 uint32_t offset
= cmd
->state
.streamout_offset
[idx
];
1865 if (!pCounterBuffers
[i
])
1868 TU_FROM_HANDLE(tu_buffer
, buf
, pCounterBuffers
[i
]);
1870 tu_bo_list_add(&cmd
->bo_list
, buf
->bo
, MSM_SUBMIT_BO_WRITE
);
1872 /* VPC_SO_FLUSH_BASE has dwords counter, but counter should be in bytes */
1873 tu_cs_emit_pkt7(cs
, CP_MEM_TO_REG
, 3);
1874 tu_cs_emit(cs
, CP_MEM_TO_REG_0_REG(REG_A6XX_CP_SCRATCH_REG(0)) |
1875 CP_MEM_TO_REG_0_SHIFT_BY_2
|
1877 CP_MEM_TO_REG_0_UNK31
|
1878 CP_MEM_TO_REG_0_CNT(1));
1879 tu_cs_emit_qw(cs
, global_iova(cmd
, flush_base
[idx
]));
1882 tu_cs_emit_pkt7(cs
, CP_REG_RMW
, 3);
1883 tu_cs_emit(cs
, CP_REG_RMW_0_DST_REG(REG_A6XX_CP_SCRATCH_REG(0)) |
1884 CP_REG_RMW_0_SRC1_ADD
);
1885 tu_cs_emit_qw(cs
, 0xffffffff);
1886 tu_cs_emit_qw(cs
, -offset
);
1889 tu_cs_emit_pkt7(cs
, CP_REG_TO_MEM
, 3);
1890 tu_cs_emit(cs
, CP_REG_TO_MEM_0_REG(REG_A6XX_CP_SCRATCH_REG(0)) |
1891 CP_REG_TO_MEM_0_CNT(1));
1892 tu_cs_emit_qw(cs
, buf
->bo
->iova
+ pCounterBufferOffsets
[i
]);
1895 tu_cond_exec_end(cs
);
1897 cmd
->state
.xfb_used
= true;
1901 tu_CmdPushConstants(VkCommandBuffer commandBuffer
,
1902 VkPipelineLayout layout
,
1903 VkShaderStageFlags stageFlags
,
1906 const void *pValues
)
1908 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
1909 memcpy((void*) cmd
->push_constants
+ offset
, pValues
, size
);
1910 cmd
->state
.dirty
|= TU_CMD_DIRTY_SHADER_CONSTS
;
1913 /* Flush everything which has been made available but we haven't actually
1917 tu_flush_all_pending(struct tu_cache_state
*cache
)
1919 cache
->flush_bits
|= cache
->pending_flush_bits
& TU_CMD_FLAG_ALL_FLUSH
;
1920 cache
->pending_flush_bits
&= ~TU_CMD_FLAG_ALL_FLUSH
;
1924 tu_EndCommandBuffer(VkCommandBuffer commandBuffer
)
1926 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
1928 /* We currently flush CCU at the end of the command buffer, like
1929 * what the blob does. There's implicit synchronization around every
1930 * vkQueueSubmit, but the kernel only flushes the UCHE, and we don't
1931 * know yet if this command buffer will be the last in the submit so we
1932 * have to defensively flush everything else.
1934 * TODO: We could definitely do better than this, since these flushes
1935 * aren't required by Vulkan, but we'd need kernel support to do that.
1936 * Ideally, we'd like the kernel to flush everything afterwards, so that we
1937 * wouldn't have to do any flushes here, and when submitting multiple
1938 * command buffers there wouldn't be any unnecessary flushes in between.
1940 if (cmd_buffer
->state
.pass
) {
1941 tu_flush_all_pending(&cmd_buffer
->state
.renderpass_cache
);
1942 tu_emit_cache_flush_renderpass(cmd_buffer
, &cmd_buffer
->draw_cs
);
1944 tu_flush_all_pending(&cmd_buffer
->state
.cache
);
1945 cmd_buffer
->state
.cache
.flush_bits
|=
1946 TU_CMD_FLAG_CCU_FLUSH_COLOR
|
1947 TU_CMD_FLAG_CCU_FLUSH_DEPTH
;
1948 tu_emit_cache_flush(cmd_buffer
, &cmd_buffer
->cs
);
1951 tu_bo_list_add(&cmd_buffer
->bo_list
, &cmd_buffer
->device
->global_bo
,
1952 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_WRITE
);
1954 for (uint32_t i
= 0; i
< cmd_buffer
->draw_cs
.bo_count
; i
++) {
1955 tu_bo_list_add(&cmd_buffer
->bo_list
, cmd_buffer
->draw_cs
.bos
[i
],
1956 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_DUMP
);
1959 for (uint32_t i
= 0; i
< cmd_buffer
->draw_epilogue_cs
.bo_count
; i
++) {
1960 tu_bo_list_add(&cmd_buffer
->bo_list
, cmd_buffer
->draw_epilogue_cs
.bos
[i
],
1961 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_DUMP
);
1964 for (uint32_t i
= 0; i
< cmd_buffer
->sub_cs
.bo_count
; i
++) {
1965 tu_bo_list_add(&cmd_buffer
->bo_list
, cmd_buffer
->sub_cs
.bos
[i
],
1966 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_DUMP
);
1969 tu_cs_end(&cmd_buffer
->cs
);
1970 tu_cs_end(&cmd_buffer
->draw_cs
);
1971 tu_cs_end(&cmd_buffer
->draw_epilogue_cs
);
1973 cmd_buffer
->status
= TU_CMD_BUFFER_STATUS_EXECUTABLE
;
1975 return cmd_buffer
->record_result
;
1979 tu_cmd_dynamic_state(struct tu_cmd_buffer
*cmd
, uint32_t id
, uint32_t size
)
1981 struct tu_cs_memory memory
;
1984 /* TODO: share this logic with tu_pipeline_static_state */
1985 tu_cs_alloc(&cmd
->sub_cs
, size
, 1, &memory
);
1986 tu_cs_init_external(&cs
, memory
.map
, memory
.map
+ size
);
1988 tu_cs_reserve_space(&cs
, size
);
1990 assert(id
< ARRAY_SIZE(cmd
->state
.dynamic_state
));
1991 cmd
->state
.dynamic_state
[id
].iova
= memory
.iova
;
1992 cmd
->state
.dynamic_state
[id
].size
= size
;
1994 tu_cs_emit_pkt7(&cmd
->draw_cs
, CP_SET_DRAW_STATE
, 3);
1995 tu_cs_emit_draw_state(&cmd
->draw_cs
, TU_DRAW_STATE_DYNAMIC
+ id
, cmd
->state
.dynamic_state
[id
]);
2001 tu_CmdBindPipeline(VkCommandBuffer commandBuffer
,
2002 VkPipelineBindPoint pipelineBindPoint
,
2003 VkPipeline _pipeline
)
2005 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2006 TU_FROM_HANDLE(tu_pipeline
, pipeline
, _pipeline
);
2008 for (uint32_t i
= 0; i
< pipeline
->cs
.bo_count
; i
++) {
2009 tu_bo_list_add(&cmd
->bo_list
, pipeline
->cs
.bos
[i
],
2010 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_DUMP
);
2013 if (pipelineBindPoint
== VK_PIPELINE_BIND_POINT_COMPUTE
) {
2014 cmd
->state
.compute_pipeline
= pipeline
;
2015 cmd
->state
.dirty
|= TU_CMD_DIRTY_COMPUTE_PIPELINE
;
2019 assert(pipelineBindPoint
== VK_PIPELINE_BIND_POINT_GRAPHICS
);
2021 cmd
->state
.pipeline
= pipeline
;
2022 cmd
->state
.dirty
|= TU_CMD_DIRTY_SHADER_CONSTS
;
2024 struct tu_cs
*cs
= &cmd
->draw_cs
;
2025 uint32_t mask
= ~pipeline
->dynamic_state_mask
& BITFIELD_MASK(TU_DYNAMIC_STATE_COUNT
);
2028 tu_cs_emit_pkt7(cs
, CP_SET_DRAW_STATE
, 3 * (7 + util_bitcount(mask
)));
2029 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_PROGRAM
, pipeline
->program
.state_ib
);
2030 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_PROGRAM_BINNING
, pipeline
->program
.binning_state_ib
);
2031 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_VI
, pipeline
->vi
.state_ib
);
2032 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_VI_BINNING
, pipeline
->vi
.binning_state_ib
);
2033 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_RAST
, pipeline
->rast
.state_ib
);
2034 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_DS
, pipeline
->ds
.state_ib
);
2035 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_BLEND
, pipeline
->blend
.state_ib
);
2037 for_each_bit(i
, mask
)
2038 tu_cs_emit_draw_state(cs
, TU_DRAW_STATE_DYNAMIC
+ i
, pipeline
->dynamic_state
[i
]);
2040 /* If the new pipeline requires more VBs than we had previously set up, we
2041 * need to re-emit them in SDS. If it requires the same set or fewer, we
2042 * can just re-use the old SDS.
2044 if (pipeline
->vi
.bindings_used
& ~cmd
->vertex_bindings_set
)
2045 cmd
->state
.dirty
|= TU_CMD_DIRTY_VERTEX_BUFFERS
;
2047 /* If the pipeline needs a dynamic descriptor, re-emit descriptor sets */
2048 if (pipeline
->layout
->dynamic_offset_count
)
2049 cmd
->state
.dirty
|= TU_CMD_DIRTY_DESCRIPTOR_SETS
;
2051 /* dynamic linewidth state depends pipeline state's gras_su_cntl
2052 * so the dynamic state ib must be updated when pipeline changes
2054 if (pipeline
->dynamic_state_mask
& BIT(VK_DYNAMIC_STATE_LINE_WIDTH
)) {
2055 struct tu_cs cs
= tu_cmd_dynamic_state(cmd
, VK_DYNAMIC_STATE_LINE_WIDTH
, 2);
2057 cmd
->state
.dynamic_gras_su_cntl
&= A6XX_GRAS_SU_CNTL_LINEHALFWIDTH__MASK
;
2058 cmd
->state
.dynamic_gras_su_cntl
|= pipeline
->gras_su_cntl
;
2060 tu_cs_emit_regs(&cs
, A6XX_GRAS_SU_CNTL(.dword
= cmd
->state
.dynamic_gras_su_cntl
));
2065 tu_CmdSetViewport(VkCommandBuffer commandBuffer
,
2066 uint32_t firstViewport
,
2067 uint32_t viewportCount
,
2068 const VkViewport
*pViewports
)
2070 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2071 struct tu_cs cs
= tu_cmd_dynamic_state(cmd
, VK_DYNAMIC_STATE_VIEWPORT
, 18);
2073 assert(firstViewport
== 0 && viewportCount
== 1);
2075 tu6_emit_viewport(&cs
, pViewports
);
2079 tu_CmdSetScissor(VkCommandBuffer commandBuffer
,
2080 uint32_t firstScissor
,
2081 uint32_t scissorCount
,
2082 const VkRect2D
*pScissors
)
2084 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2085 struct tu_cs cs
= tu_cmd_dynamic_state(cmd
, VK_DYNAMIC_STATE_SCISSOR
, 3);
2087 assert(firstScissor
== 0 && scissorCount
== 1);
2089 tu6_emit_scissor(&cs
, pScissors
);
2093 tu_CmdSetLineWidth(VkCommandBuffer commandBuffer
, float lineWidth
)
2095 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2096 struct tu_cs cs
= tu_cmd_dynamic_state(cmd
, VK_DYNAMIC_STATE_LINE_WIDTH
, 2);
2098 cmd
->state
.dynamic_gras_su_cntl
&= ~A6XX_GRAS_SU_CNTL_LINEHALFWIDTH__MASK
;
2099 cmd
->state
.dynamic_gras_su_cntl
|= A6XX_GRAS_SU_CNTL_LINEHALFWIDTH(lineWidth
/ 2.0f
);
2101 tu_cs_emit_regs(&cs
, A6XX_GRAS_SU_CNTL(.dword
= cmd
->state
.dynamic_gras_su_cntl
));
2105 tu_CmdSetDepthBias(VkCommandBuffer commandBuffer
,
2106 float depthBiasConstantFactor
,
2107 float depthBiasClamp
,
2108 float depthBiasSlopeFactor
)
2110 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2111 struct tu_cs cs
= tu_cmd_dynamic_state(cmd
, VK_DYNAMIC_STATE_DEPTH_BIAS
, 4);
2113 tu6_emit_depth_bias(&cs
, depthBiasConstantFactor
, depthBiasClamp
, depthBiasSlopeFactor
);
2117 tu_CmdSetBlendConstants(VkCommandBuffer commandBuffer
,
2118 const float blendConstants
[4])
2120 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2121 struct tu_cs cs
= tu_cmd_dynamic_state(cmd
, VK_DYNAMIC_STATE_BLEND_CONSTANTS
, 5);
2123 tu_cs_emit_pkt4(&cs
, REG_A6XX_RB_BLEND_RED_F32
, 4);
2124 tu_cs_emit_array(&cs
, (const uint32_t *) blendConstants
, 4);
2128 tu_CmdSetDepthBounds(VkCommandBuffer commandBuffer
,
2129 float minDepthBounds
,
2130 float maxDepthBounds
)
2132 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2133 struct tu_cs cs
= tu_cmd_dynamic_state(cmd
, VK_DYNAMIC_STATE_DEPTH_BOUNDS
, 3);
2135 tu_cs_emit_regs(&cs
,
2136 A6XX_RB_Z_BOUNDS_MIN(minDepthBounds
),
2137 A6XX_RB_Z_BOUNDS_MAX(maxDepthBounds
));
2141 update_stencil_mask(uint32_t *value
, VkStencilFaceFlags face
, uint32_t mask
)
2143 if (face
& VK_STENCIL_FACE_FRONT_BIT
)
2144 *value
= (*value
& 0xff00) | (mask
& 0xff);
2145 if (face
& VK_STENCIL_FACE_BACK_BIT
)
2146 *value
= (*value
& 0xff) | (mask
& 0xff) << 8;
2150 tu_CmdSetStencilCompareMask(VkCommandBuffer commandBuffer
,
2151 VkStencilFaceFlags faceMask
,
2152 uint32_t compareMask
)
2154 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2155 struct tu_cs cs
= tu_cmd_dynamic_state(cmd
, VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK
, 2);
2157 update_stencil_mask(&cmd
->state
.dynamic_stencil_mask
, faceMask
, compareMask
);
2159 tu_cs_emit_regs(&cs
, A6XX_RB_STENCILMASK(.dword
= cmd
->state
.dynamic_stencil_mask
));
2163 tu_CmdSetStencilWriteMask(VkCommandBuffer commandBuffer
,
2164 VkStencilFaceFlags faceMask
,
2167 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2168 struct tu_cs cs
= tu_cmd_dynamic_state(cmd
, VK_DYNAMIC_STATE_STENCIL_WRITE_MASK
, 2);
2170 update_stencil_mask(&cmd
->state
.dynamic_stencil_wrmask
, faceMask
, writeMask
);
2172 tu_cs_emit_regs(&cs
, A6XX_RB_STENCILWRMASK(.dword
= cmd
->state
.dynamic_stencil_wrmask
));
2176 tu_CmdSetStencilReference(VkCommandBuffer commandBuffer
,
2177 VkStencilFaceFlags faceMask
,
2180 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2181 struct tu_cs cs
= tu_cmd_dynamic_state(cmd
, VK_DYNAMIC_STATE_STENCIL_REFERENCE
, 2);
2183 update_stencil_mask(&cmd
->state
.dynamic_stencil_ref
, faceMask
, reference
);
2185 tu_cs_emit_regs(&cs
, A6XX_RB_STENCILREF(.dword
= cmd
->state
.dynamic_stencil_ref
));
2189 tu_CmdSetSampleLocationsEXT(VkCommandBuffer commandBuffer
,
2190 const VkSampleLocationsInfoEXT
* pSampleLocationsInfo
)
2192 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2193 struct tu_cs cs
= tu_cmd_dynamic_state(cmd
, TU_DYNAMIC_STATE_SAMPLE_LOCATIONS
, 9);
2195 assert(pSampleLocationsInfo
);
2197 tu6_emit_sample_locations(&cs
, pSampleLocationsInfo
);
2201 tu_flush_for_access(struct tu_cache_state
*cache
,
2202 enum tu_cmd_access_mask src_mask
,
2203 enum tu_cmd_access_mask dst_mask
)
2205 enum tu_cmd_flush_bits flush_bits
= 0;
2207 if (src_mask
& TU_ACCESS_SYSMEM_WRITE
) {
2208 cache
->pending_flush_bits
|= TU_CMD_FLAG_ALL_INVALIDATE
;
2211 #define SRC_FLUSH(domain, flush, invalidate) \
2212 if (src_mask & TU_ACCESS_##domain##_WRITE) { \
2213 cache->pending_flush_bits |= TU_CMD_FLAG_##flush | \
2214 (TU_CMD_FLAG_ALL_INVALIDATE & ~TU_CMD_FLAG_##invalidate); \
2217 SRC_FLUSH(UCHE
, CACHE_FLUSH
, CACHE_INVALIDATE
)
2218 SRC_FLUSH(CCU_COLOR
, CCU_FLUSH_COLOR
, CCU_INVALIDATE_COLOR
)
2219 SRC_FLUSH(CCU_DEPTH
, CCU_FLUSH_DEPTH
, CCU_INVALIDATE_DEPTH
)
2223 #define SRC_INCOHERENT_FLUSH(domain, flush, invalidate) \
2224 if (src_mask & TU_ACCESS_##domain##_INCOHERENT_WRITE) { \
2225 flush_bits |= TU_CMD_FLAG_##flush; \
2226 cache->pending_flush_bits |= \
2227 (TU_CMD_FLAG_ALL_INVALIDATE & ~TU_CMD_FLAG_##invalidate); \
2230 SRC_INCOHERENT_FLUSH(CCU_COLOR
, CCU_FLUSH_COLOR
, CCU_INVALIDATE_COLOR
)
2231 SRC_INCOHERENT_FLUSH(CCU_DEPTH
, CCU_FLUSH_DEPTH
, CCU_INVALIDATE_DEPTH
)
2233 #undef SRC_INCOHERENT_FLUSH
2235 if (dst_mask
& (TU_ACCESS_SYSMEM_READ
| TU_ACCESS_SYSMEM_WRITE
)) {
2236 flush_bits
|= cache
->pending_flush_bits
& TU_CMD_FLAG_ALL_FLUSH
;
2239 #define DST_FLUSH(domain, flush, invalidate) \
2240 if (dst_mask & (TU_ACCESS_##domain##_READ | \
2241 TU_ACCESS_##domain##_WRITE)) { \
2242 flush_bits |= cache->pending_flush_bits & \
2243 (TU_CMD_FLAG_##invalidate | \
2244 (TU_CMD_FLAG_ALL_FLUSH & ~TU_CMD_FLAG_##flush)); \
2247 DST_FLUSH(UCHE
, CACHE_FLUSH
, CACHE_INVALIDATE
)
2248 DST_FLUSH(CCU_COLOR
, CCU_FLUSH_COLOR
, CCU_INVALIDATE_COLOR
)
2249 DST_FLUSH(CCU_DEPTH
, CCU_FLUSH_DEPTH
, CCU_INVALIDATE_DEPTH
)
2253 #define DST_INCOHERENT_FLUSH(domain, flush, invalidate) \
2254 if (dst_mask & (TU_ACCESS_##domain##_READ | \
2255 TU_ACCESS_##domain##_WRITE)) { \
2256 flush_bits |= TU_CMD_FLAG_##invalidate | \
2257 (cache->pending_flush_bits & \
2258 (TU_CMD_FLAG_ALL_FLUSH & ~TU_CMD_FLAG_##flush)); \
2261 DST_INCOHERENT_FLUSH(CCU_COLOR
, CCU_FLUSH_COLOR
, CCU_INVALIDATE_COLOR
)
2262 DST_INCOHERENT_FLUSH(CCU_DEPTH
, CCU_FLUSH_DEPTH
, CCU_INVALIDATE_DEPTH
)
2264 #undef DST_INCOHERENT_FLUSH
2266 if (dst_mask
& TU_ACCESS_WFI_READ
) {
2267 flush_bits
|= TU_CMD_FLAG_WFI
;
2270 cache
->flush_bits
|= flush_bits
;
2271 cache
->pending_flush_bits
&= ~flush_bits
;
2274 static enum tu_cmd_access_mask
2275 vk2tu_access(VkAccessFlags flags
, bool gmem
)
2277 enum tu_cmd_access_mask mask
= 0;
2279 /* If the GPU writes a buffer that is then read by an indirect draw
2280 * command, we theoretically need a WFI + WAIT_FOR_ME combination to
2281 * wait for the writes to complete. The WAIT_FOR_ME is performed as part
2282 * of the draw by the firmware, so we just need to execute a WFI.
2285 (VK_ACCESS_INDIRECT_COMMAND_READ_BIT
|
2286 VK_ACCESS_MEMORY_READ_BIT
)) {
2287 mask
|= TU_ACCESS_WFI_READ
;
2291 (VK_ACCESS_INDIRECT_COMMAND_READ_BIT
| /* Read performed by CP */
2292 VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_READ_BIT_EXT
| /* Read performed by CP, I think */
2293 VK_ACCESS_CONDITIONAL_RENDERING_READ_BIT_EXT
| /* Read performed by CP */
2294 VK_ACCESS_HOST_READ_BIT
| /* sysmem by definition */
2295 VK_ACCESS_MEMORY_READ_BIT
)) {
2296 mask
|= TU_ACCESS_SYSMEM_READ
;
2300 (VK_ACCESS_HOST_WRITE_BIT
|
2301 VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT
| /* Write performed by CP, I think */
2302 VK_ACCESS_MEMORY_WRITE_BIT
)) {
2303 mask
|= TU_ACCESS_SYSMEM_WRITE
;
2307 (VK_ACCESS_INDEX_READ_BIT
| /* Read performed by PC, I think */
2308 VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT
| /* Read performed by VFD */
2309 VK_ACCESS_UNIFORM_READ_BIT
| /* Read performed by SP */
2310 /* TODO: Is there a no-cache bit for textures so that we can ignore
2313 VK_ACCESS_INPUT_ATTACHMENT_READ_BIT
| /* Read performed by TP */
2314 VK_ACCESS_SHADER_READ_BIT
| /* Read perfomed by SP/TP */
2315 VK_ACCESS_MEMORY_READ_BIT
)) {
2316 mask
|= TU_ACCESS_UCHE_READ
;
2320 (VK_ACCESS_SHADER_WRITE_BIT
| /* Write performed by SP */
2321 VK_ACCESS_TRANSFORM_FEEDBACK_WRITE_BIT_EXT
| /* Write performed by VPC */
2322 VK_ACCESS_MEMORY_WRITE_BIT
)) {
2323 mask
|= TU_ACCESS_UCHE_WRITE
;
2326 /* When using GMEM, the CCU is always flushed automatically to GMEM, and
2327 * then GMEM is flushed to sysmem. Furthermore, we already had to flush any
2328 * previous writes in sysmem mode when transitioning to GMEM. Therefore we
2329 * can ignore CCU and pretend that color attachments and transfers use
2334 (VK_ACCESS_COLOR_ATTACHMENT_READ_BIT
|
2335 VK_ACCESS_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT
|
2336 VK_ACCESS_MEMORY_READ_BIT
)) {
2338 mask
|= TU_ACCESS_SYSMEM_READ
;
2340 mask
|= TU_ACCESS_CCU_COLOR_INCOHERENT_READ
;
2344 (VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT
|
2345 VK_ACCESS_MEMORY_READ_BIT
)) {
2347 mask
|= TU_ACCESS_SYSMEM_READ
;
2349 mask
|= TU_ACCESS_CCU_DEPTH_INCOHERENT_READ
;
2353 (VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT
|
2354 VK_ACCESS_MEMORY_WRITE_BIT
)) {
2356 mask
|= TU_ACCESS_SYSMEM_WRITE
;
2358 mask
|= TU_ACCESS_CCU_COLOR_INCOHERENT_WRITE
;
2363 (VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT
|
2364 VK_ACCESS_MEMORY_WRITE_BIT
)) {
2366 mask
|= TU_ACCESS_SYSMEM_WRITE
;
2368 mask
|= TU_ACCESS_CCU_DEPTH_INCOHERENT_WRITE
;
2372 /* When the dst access is a transfer read/write, it seems we sometimes need
2373 * to insert a WFI after any flushes, to guarantee that the flushes finish
2374 * before the 2D engine starts. However the opposite (i.e. a WFI after
2375 * CP_BLIT and before any subsequent flush) does not seem to be needed, and
2376 * the blob doesn't emit such a WFI.
2380 (VK_ACCESS_TRANSFER_WRITE_BIT
|
2381 VK_ACCESS_MEMORY_WRITE_BIT
)) {
2383 mask
|= TU_ACCESS_SYSMEM_WRITE
;
2385 mask
|= TU_ACCESS_CCU_COLOR_WRITE
;
2387 mask
|= TU_ACCESS_WFI_READ
;
2391 (VK_ACCESS_TRANSFER_READ_BIT
| /* Access performed by TP */
2392 VK_ACCESS_MEMORY_READ_BIT
)) {
2393 mask
|= TU_ACCESS_UCHE_READ
| TU_ACCESS_WFI_READ
;
2401 tu_CmdExecuteCommands(VkCommandBuffer commandBuffer
,
2402 uint32_t commandBufferCount
,
2403 const VkCommandBuffer
*pCmdBuffers
)
2405 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2408 assert(commandBufferCount
> 0);
2410 /* Emit any pending flushes. */
2411 if (cmd
->state
.pass
) {
2412 tu_flush_all_pending(&cmd
->state
.renderpass_cache
);
2413 tu_emit_cache_flush_renderpass(cmd
, &cmd
->draw_cs
);
2415 tu_flush_all_pending(&cmd
->state
.cache
);
2416 tu_emit_cache_flush(cmd
, &cmd
->cs
);
2419 for (uint32_t i
= 0; i
< commandBufferCount
; i
++) {
2420 TU_FROM_HANDLE(tu_cmd_buffer
, secondary
, pCmdBuffers
[i
]);
2422 result
= tu_bo_list_merge(&cmd
->bo_list
, &secondary
->bo_list
);
2423 if (result
!= VK_SUCCESS
) {
2424 cmd
->record_result
= result
;
2428 if (secondary
->usage_flags
&
2429 VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT
) {
2430 assert(tu_cs_is_empty(&secondary
->cs
));
2432 result
= tu_cs_add_entries(&cmd
->draw_cs
, &secondary
->draw_cs
);
2433 if (result
!= VK_SUCCESS
) {
2434 cmd
->record_result
= result
;
2438 result
= tu_cs_add_entries(&cmd
->draw_epilogue_cs
,
2439 &secondary
->draw_epilogue_cs
);
2440 if (result
!= VK_SUCCESS
) {
2441 cmd
->record_result
= result
;
2445 if (secondary
->has_tess
)
2446 cmd
->has_tess
= true;
2448 assert(tu_cs_is_empty(&secondary
->draw_cs
));
2449 assert(tu_cs_is_empty(&secondary
->draw_epilogue_cs
));
2451 for (uint32_t j
= 0; j
< secondary
->cs
.bo_count
; j
++) {
2452 tu_bo_list_add(&cmd
->bo_list
, secondary
->cs
.bos
[j
],
2453 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_DUMP
);
2456 tu_cs_add_entries(&cmd
->cs
, &secondary
->cs
);
2459 cmd
->state
.index_size
= secondary
->state
.index_size
; /* for restart index update */
2461 cmd
->state
.dirty
= ~0u; /* TODO: set dirty only what needs to be */
2463 /* After executing secondary command buffers, there may have been arbitrary
2464 * flushes executed, so when we encounter a pipeline barrier with a
2465 * srcMask, we have to assume that we need to invalidate. Therefore we need
2466 * to re-initialize the cache with all pending invalidate bits set.
2468 if (cmd
->state
.pass
) {
2469 tu_cache_init(&cmd
->state
.renderpass_cache
);
2471 tu_cache_init(&cmd
->state
.cache
);
2476 tu_CreateCommandPool(VkDevice _device
,
2477 const VkCommandPoolCreateInfo
*pCreateInfo
,
2478 const VkAllocationCallbacks
*pAllocator
,
2479 VkCommandPool
*pCmdPool
)
2481 TU_FROM_HANDLE(tu_device
, device
, _device
);
2482 struct tu_cmd_pool
*pool
;
2484 pool
= vk_alloc2(&device
->alloc
, pAllocator
, sizeof(*pool
), 8,
2485 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
2487 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
2490 pool
->alloc
= *pAllocator
;
2492 pool
->alloc
= device
->alloc
;
2494 list_inithead(&pool
->cmd_buffers
);
2495 list_inithead(&pool
->free_cmd_buffers
);
2497 pool
->queue_family_index
= pCreateInfo
->queueFamilyIndex
;
2499 *pCmdPool
= tu_cmd_pool_to_handle(pool
);
2505 tu_DestroyCommandPool(VkDevice _device
,
2506 VkCommandPool commandPool
,
2507 const VkAllocationCallbacks
*pAllocator
)
2509 TU_FROM_HANDLE(tu_device
, device
, _device
);
2510 TU_FROM_HANDLE(tu_cmd_pool
, pool
, commandPool
);
2515 list_for_each_entry_safe(struct tu_cmd_buffer
, cmd_buffer
,
2516 &pool
->cmd_buffers
, pool_link
)
2518 tu_cmd_buffer_destroy(cmd_buffer
);
2521 list_for_each_entry_safe(struct tu_cmd_buffer
, cmd_buffer
,
2522 &pool
->free_cmd_buffers
, pool_link
)
2524 tu_cmd_buffer_destroy(cmd_buffer
);
2527 vk_free2(&device
->alloc
, pAllocator
, pool
);
2531 tu_ResetCommandPool(VkDevice device
,
2532 VkCommandPool commandPool
,
2533 VkCommandPoolResetFlags flags
)
2535 TU_FROM_HANDLE(tu_cmd_pool
, pool
, commandPool
);
2538 list_for_each_entry(struct tu_cmd_buffer
, cmd_buffer
, &pool
->cmd_buffers
,
2541 result
= tu_reset_cmd_buffer(cmd_buffer
);
2542 if (result
!= VK_SUCCESS
)
2550 tu_TrimCommandPool(VkDevice device
,
2551 VkCommandPool commandPool
,
2552 VkCommandPoolTrimFlags flags
)
2554 TU_FROM_HANDLE(tu_cmd_pool
, pool
, commandPool
);
2559 list_for_each_entry_safe(struct tu_cmd_buffer
, cmd_buffer
,
2560 &pool
->free_cmd_buffers
, pool_link
)
2562 tu_cmd_buffer_destroy(cmd_buffer
);
2567 tu_subpass_barrier(struct tu_cmd_buffer
*cmd_buffer
,
2568 const struct tu_subpass_barrier
*barrier
,
2571 /* Note: we don't know until the end of the subpass whether we'll use
2572 * sysmem, so assume sysmem here to be safe.
2574 struct tu_cache_state
*cache
=
2575 external
? &cmd_buffer
->state
.cache
: &cmd_buffer
->state
.renderpass_cache
;
2576 enum tu_cmd_access_mask src_flags
=
2577 vk2tu_access(barrier
->src_access_mask
, false);
2578 enum tu_cmd_access_mask dst_flags
=
2579 vk2tu_access(barrier
->dst_access_mask
, false);
2581 if (barrier
->incoherent_ccu_color
)
2582 src_flags
|= TU_ACCESS_CCU_COLOR_INCOHERENT_WRITE
;
2583 if (barrier
->incoherent_ccu_depth
)
2584 src_flags
|= TU_ACCESS_CCU_DEPTH_INCOHERENT_WRITE
;
2586 tu_flush_for_access(cache
, src_flags
, dst_flags
);
2590 tu_CmdBeginRenderPass(VkCommandBuffer commandBuffer
,
2591 const VkRenderPassBeginInfo
*pRenderPassBegin
,
2592 VkSubpassContents contents
)
2594 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2595 TU_FROM_HANDLE(tu_render_pass
, pass
, pRenderPassBegin
->renderPass
);
2596 TU_FROM_HANDLE(tu_framebuffer
, fb
, pRenderPassBegin
->framebuffer
);
2598 cmd
->state
.pass
= pass
;
2599 cmd
->state
.subpass
= pass
->subpasses
;
2600 cmd
->state
.framebuffer
= fb
;
2601 cmd
->state
.render_area
= pRenderPassBegin
->renderArea
;
2603 tu_cmd_prepare_tile_store_ib(cmd
);
2605 /* Note: because this is external, any flushes will happen before draw_cs
2606 * gets called. However deferred flushes could have to happen later as part
2609 tu_subpass_barrier(cmd
, &pass
->subpasses
[0].start_barrier
, true);
2610 cmd
->state
.renderpass_cache
.pending_flush_bits
=
2611 cmd
->state
.cache
.pending_flush_bits
;
2612 cmd
->state
.renderpass_cache
.flush_bits
= 0;
2614 tu_emit_renderpass_begin(cmd
, pRenderPassBegin
);
2616 tu6_emit_zs(cmd
, cmd
->state
.subpass
, &cmd
->draw_cs
);
2617 tu6_emit_mrt(cmd
, cmd
->state
.subpass
, &cmd
->draw_cs
);
2618 tu6_emit_msaa(&cmd
->draw_cs
, cmd
->state
.subpass
->samples
);
2619 tu6_emit_render_cntl(cmd
, cmd
->state
.subpass
, &cmd
->draw_cs
, false);
2621 tu_set_input_attachments(cmd
, cmd
->state
.subpass
);
2623 for (uint32_t i
= 0; i
< fb
->attachment_count
; ++i
) {
2624 const struct tu_image_view
*iview
= fb
->attachments
[i
].attachment
;
2625 tu_bo_list_add(&cmd
->bo_list
, iview
->image
->bo
,
2626 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_WRITE
);
2629 cmd
->state
.dirty
|= TU_CMD_DIRTY_DRAW_STATE
;
2633 tu_CmdBeginRenderPass2(VkCommandBuffer commandBuffer
,
2634 const VkRenderPassBeginInfo
*pRenderPassBeginInfo
,
2635 const VkSubpassBeginInfoKHR
*pSubpassBeginInfo
)
2637 tu_CmdBeginRenderPass(commandBuffer
, pRenderPassBeginInfo
,
2638 pSubpassBeginInfo
->contents
);
2642 tu_CmdNextSubpass(VkCommandBuffer commandBuffer
, VkSubpassContents contents
)
2644 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2645 const struct tu_render_pass
*pass
= cmd
->state
.pass
;
2646 struct tu_cs
*cs
= &cmd
->draw_cs
;
2648 const struct tu_subpass
*subpass
= cmd
->state
.subpass
++;
2650 tu_cond_exec_start(cs
, CP_COND_EXEC_0_RENDER_MODE_GMEM
);
2652 if (subpass
->resolve_attachments
) {
2653 tu6_emit_blit_scissor(cmd
, cs
, true);
2655 for (unsigned i
= 0; i
< subpass
->color_count
; i
++) {
2656 uint32_t a
= subpass
->resolve_attachments
[i
].attachment
;
2657 if (a
== VK_ATTACHMENT_UNUSED
)
2660 tu_store_gmem_attachment(cmd
, cs
, a
,
2661 subpass
->color_attachments
[i
].attachment
);
2663 if (pass
->attachments
[a
].gmem_offset
< 0)
2667 * check if the resolved attachment is needed by later subpasses,
2668 * if it is, should be doing a GMEM->GMEM resolve instead of GMEM->MEM->GMEM..
2670 tu_finishme("missing GMEM->GMEM resolve path\n");
2671 tu_load_gmem_attachment(cmd
, cs
, a
, true);
2675 tu_cond_exec_end(cs
);
2677 tu_cond_exec_start(cs
, CP_COND_EXEC_0_RENDER_MODE_SYSMEM
);
2679 tu6_emit_sysmem_resolves(cmd
, cs
, subpass
);
2681 tu_cond_exec_end(cs
);
2683 /* Handle dependencies for the next subpass */
2684 tu_subpass_barrier(cmd
, &cmd
->state
.subpass
->start_barrier
, false);
2686 /* emit mrt/zs/msaa/ubwc state for the subpass that is starting */
2687 tu6_emit_zs(cmd
, cmd
->state
.subpass
, cs
);
2688 tu6_emit_mrt(cmd
, cmd
->state
.subpass
, cs
);
2689 tu6_emit_msaa(cs
, cmd
->state
.subpass
->samples
);
2690 tu6_emit_render_cntl(cmd
, cmd
->state
.subpass
, cs
, false);
2692 tu_set_input_attachments(cmd
, cmd
->state
.subpass
);
2696 tu_CmdNextSubpass2(VkCommandBuffer commandBuffer
,
2697 const VkSubpassBeginInfoKHR
*pSubpassBeginInfo
,
2698 const VkSubpassEndInfoKHR
*pSubpassEndInfo
)
2700 tu_CmdNextSubpass(commandBuffer
, pSubpassBeginInfo
->contents
);
2704 tu6_emit_user_consts(struct tu_cs
*cs
, const struct tu_pipeline
*pipeline
,
2705 struct tu_descriptor_state
*descriptors_state
,
2706 gl_shader_stage type
,
2707 uint32_t *push_constants
)
2709 const struct tu_program_descriptor_linkage
*link
=
2710 &pipeline
->program
.link
[type
];
2711 const struct ir3_ubo_analysis_state
*state
= &link
->const_state
.ubo_state
;
2713 if (link
->push_consts
.count
> 0) {
2714 unsigned num_units
= link
->push_consts
.count
;
2715 unsigned offset
= link
->push_consts
.lo
;
2716 tu_cs_emit_pkt7(cs
, tu6_stage2opcode(type
), 3 + num_units
* 4);
2717 tu_cs_emit(cs
, CP_LOAD_STATE6_0_DST_OFF(offset
) |
2718 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS
) |
2719 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT
) |
2720 CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type
)) |
2721 CP_LOAD_STATE6_0_NUM_UNIT(num_units
));
2724 for (unsigned i
= 0; i
< num_units
* 4; i
++)
2725 tu_cs_emit(cs
, push_constants
[i
+ offset
* 4]);
2728 for (uint32_t i
= 0; i
< state
->num_enabled
; i
++) {
2729 uint32_t size
= state
->range
[i
].end
- state
->range
[i
].start
;
2730 uint32_t offset
= state
->range
[i
].start
;
2732 /* and even if the start of the const buffer is before
2733 * first_immediate, the end may not be:
2735 size
= MIN2(size
, (16 * link
->constlen
) - state
->range
[i
].offset
);
2740 /* things should be aligned to vec4: */
2741 debug_assert((state
->range
[i
].offset
% 16) == 0);
2742 debug_assert((size
% 16) == 0);
2743 debug_assert((offset
% 16) == 0);
2745 /* Dig out the descriptor from the descriptor state and read the VA from
2748 assert(state
->range
[i
].ubo
.bindless
);
2749 uint32_t *base
= state
->range
[i
].ubo
.bindless_base
== MAX_SETS
?
2750 descriptors_state
->dynamic_descriptors
:
2751 descriptors_state
->sets
[state
->range
[i
].ubo
.bindless_base
]->mapped_ptr
;
2752 unsigned block
= state
->range
[i
].ubo
.block
;
2753 uint32_t *desc
= base
+ block
* A6XX_TEX_CONST_DWORDS
;
2754 uint64_t va
= desc
[0] | ((uint64_t)(desc
[1] & A6XX_UBO_1_BASE_HI__MASK
) << 32);
2757 tu_cs_emit_pkt7(cs
, tu6_stage2opcode(type
), 3);
2758 tu_cs_emit(cs
, CP_LOAD_STATE6_0_DST_OFF(state
->range
[i
].offset
/ 16) |
2759 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS
) |
2760 CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT
) |
2761 CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type
)) |
2762 CP_LOAD_STATE6_0_NUM_UNIT(size
/ 16));
2763 tu_cs_emit_qw(cs
, va
+ offset
);
2767 static struct tu_cs_entry
2768 tu6_emit_consts(struct tu_cmd_buffer
*cmd
,
2769 const struct tu_pipeline
*pipeline
,
2770 struct tu_descriptor_state
*descriptors_state
,
2771 gl_shader_stage type
)
2774 tu_cs_begin_sub_stream(&cmd
->sub_cs
, 512, &cs
); /* TODO: maximum size? */
2776 tu6_emit_user_consts(&cs
, pipeline
, descriptors_state
, type
, cmd
->push_constants
);
2778 return tu_cs_end_sub_stream(&cmd
->sub_cs
, &cs
);
2781 static struct tu_cs_entry
2782 tu6_emit_vertex_buffers(struct tu_cmd_buffer
*cmd
,
2783 const struct tu_pipeline
*pipeline
)
2786 tu_cs_begin_sub_stream(&cmd
->sub_cs
, 4 * MAX_VBS
, &cs
);
2789 for_each_bit(binding
, pipeline
->vi
.bindings_used
) {
2790 const struct tu_buffer
*buf
= cmd
->state
.vb
.buffers
[binding
];
2791 const VkDeviceSize offset
= buf
->bo_offset
+
2792 cmd
->state
.vb
.offsets
[binding
];
2794 tu_cs_emit_regs(&cs
,
2795 A6XX_VFD_FETCH_BASE(binding
, .bo
= buf
->bo
, .bo_offset
= offset
),
2796 A6XX_VFD_FETCH_SIZE(binding
, buf
->size
- offset
));
2800 cmd
->vertex_bindings_set
= pipeline
->vi
.bindings_used
;
2802 return tu_cs_end_sub_stream(&cmd
->sub_cs
, &cs
);
2806 get_tess_param_bo_size(const struct tu_pipeline
*pipeline
,
2807 uint32_t draw_count
)
2809 /* TODO: For indirect draws, we can't compute the BO size ahead of time.
2810 * Still not sure what to do here, so just allocate a reasonably large
2811 * BO and hope for the best for now.
2812 * (maxTessellationControlPerVertexOutputComponents * 2048 vertices +
2813 * maxTessellationControlPerPatchOutputComponents * 512 patches) */
2815 return ((128 * 2048) + (128 * 512)) * 4;
2818 /* For each patch, adreno lays out the tess param BO in memory as:
2819 * (v_input[0][0])...(v_input[i][j])(p_input[0])...(p_input[k]).
2820 * where i = # vertices per patch, j = # per-vertex outputs, and
2821 * k = # per-patch outputs.*/
2822 uint32_t verts_per_patch
= pipeline
->ia
.primtype
- DI_PT_PATCHES0
;
2823 uint32_t num_patches
= draw_count
/ verts_per_patch
;
2824 return draw_count
* pipeline
->tess
.per_vertex_output_size
+
2825 pipeline
->tess
.per_patch_output_size
* num_patches
;
2829 get_tess_factor_bo_size(const struct tu_pipeline
*pipeline
,
2830 uint32_t draw_count
)
2832 /* TODO: For indirect draws, we can't compute the BO size ahead of time.
2833 * Still not sure what to do here, so just allocate a reasonably large
2834 * BO and hope for the best for now.
2835 * (quad factor stride * 512 patches) */
2837 return (28 * 512) * 4;
2840 /* Each distinct patch gets its own tess factor output. */
2841 uint32_t verts_per_patch
= pipeline
->ia
.primtype
- DI_PT_PATCHES0
;
2842 uint32_t num_patches
= draw_count
/ verts_per_patch
;
2843 uint32_t factor_stride
;
2844 switch (pipeline
->tess
.patch_type
) {
2845 case IR3_TESS_ISOLINES
:
2848 case IR3_TESS_TRIANGLES
:
2851 case IR3_TESS_QUADS
:
2855 unreachable("bad tessmode");
2857 return factor_stride
* num_patches
;
2861 tu6_emit_tess_consts(struct tu_cmd_buffer
*cmd
,
2862 uint32_t draw_count
,
2863 const struct tu_pipeline
*pipeline
,
2864 struct tu_cs_entry
*entry
)
2867 VkResult result
= tu_cs_begin_sub_stream(&cmd
->sub_cs
, 20, &cs
);
2868 if (result
!= VK_SUCCESS
)
2871 uint64_t tess_factor_size
= get_tess_factor_bo_size(pipeline
, draw_count
);
2872 uint64_t tess_param_size
= get_tess_param_bo_size(pipeline
, draw_count
);
2873 uint64_t tess_bo_size
= tess_factor_size
+ tess_param_size
;
2874 if (tess_bo_size
> 0) {
2875 struct tu_bo
*tess_bo
;
2876 result
= tu_get_scratch_bo(cmd
->device
, tess_bo_size
, &tess_bo
);
2877 if (result
!= VK_SUCCESS
)
2880 tu_bo_list_add(&cmd
->bo_list
, tess_bo
,
2881 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_WRITE
);
2882 uint64_t tess_factor_iova
= tess_bo
->iova
;
2883 uint64_t tess_param_iova
= tess_factor_iova
+ tess_factor_size
;
2885 tu_cs_emit_pkt7(&cs
, CP_LOAD_STATE6_GEOM
, 3 + 4);
2886 tu_cs_emit(&cs
, CP_LOAD_STATE6_0_DST_OFF(pipeline
->tess
.hs_bo_regid
) |
2887 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS
) |
2888 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT
) |
2889 CP_LOAD_STATE6_0_STATE_BLOCK(SB6_HS_SHADER
) |
2890 CP_LOAD_STATE6_0_NUM_UNIT(1));
2891 tu_cs_emit(&cs
, CP_LOAD_STATE6_1_EXT_SRC_ADDR(0));
2892 tu_cs_emit(&cs
, CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI(0));
2893 tu_cs_emit_qw(&cs
, tess_param_iova
);
2894 tu_cs_emit_qw(&cs
, tess_factor_iova
);
2896 tu_cs_emit_pkt7(&cs
, CP_LOAD_STATE6_GEOM
, 3 + 4);
2897 tu_cs_emit(&cs
, CP_LOAD_STATE6_0_DST_OFF(pipeline
->tess
.ds_bo_regid
) |
2898 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS
) |
2899 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT
) |
2900 CP_LOAD_STATE6_0_STATE_BLOCK(SB6_DS_SHADER
) |
2901 CP_LOAD_STATE6_0_NUM_UNIT(1));
2902 tu_cs_emit(&cs
, CP_LOAD_STATE6_1_EXT_SRC_ADDR(0));
2903 tu_cs_emit(&cs
, CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI(0));
2904 tu_cs_emit_qw(&cs
, tess_param_iova
);
2905 tu_cs_emit_qw(&cs
, tess_factor_iova
);
2907 tu_cs_emit_pkt4(&cs
, REG_A6XX_PC_TESSFACTOR_ADDR_LO
, 2);
2908 tu_cs_emit_qw(&cs
, tess_factor_iova
);
2910 /* TODO: Without this WFI here, the hardware seems unable to read these
2911 * addresses we just emitted. Freedreno emits these consts as part of
2912 * IB1 instead of in a draw state which might make this WFI unnecessary,
2913 * but it requires a bit more indirection (SS6_INDIRECT for consts). */
2914 tu_cs_emit_wfi(&cs
);
2916 *entry
= tu_cs_end_sub_stream(&cmd
->sub_cs
, &cs
);
2921 tu6_draw_common(struct tu_cmd_buffer
*cmd
,
2924 /* note: draw_count is 0 for indirect */
2925 uint32_t draw_count
)
2927 const struct tu_pipeline
*pipeline
= cmd
->state
.pipeline
;
2930 struct tu_descriptor_state
*descriptors_state
=
2931 &cmd
->descriptors
[VK_PIPELINE_BIND_POINT_GRAPHICS
];
2933 tu_emit_cache_flush_renderpass(cmd
, cs
);
2937 tu_cs_emit_regs(cs
, A6XX_PC_PRIMITIVE_CNTL_0(
2938 .primitive_restart
=
2939 pipeline
->ia
.primitive_restart
&& indexed
,
2940 .tess_upper_left_domain_origin
=
2941 pipeline
->tess
.upper_left_domain_origin
));
2943 if (cmd
->state
.dirty
& TU_CMD_DIRTY_SHADER_CONSTS
) {
2944 cmd
->state
.shader_const_ib
[MESA_SHADER_VERTEX
] =
2945 tu6_emit_consts(cmd
, pipeline
, descriptors_state
, MESA_SHADER_VERTEX
);
2946 cmd
->state
.shader_const_ib
[MESA_SHADER_TESS_CTRL
] =
2947 tu6_emit_consts(cmd
, pipeline
, descriptors_state
, MESA_SHADER_TESS_CTRL
);
2948 cmd
->state
.shader_const_ib
[MESA_SHADER_TESS_EVAL
] =
2949 tu6_emit_consts(cmd
, pipeline
, descriptors_state
, MESA_SHADER_TESS_EVAL
);
2950 cmd
->state
.shader_const_ib
[MESA_SHADER_GEOMETRY
] =
2951 tu6_emit_consts(cmd
, pipeline
, descriptors_state
, MESA_SHADER_GEOMETRY
);
2952 cmd
->state
.shader_const_ib
[MESA_SHADER_FRAGMENT
] =
2953 tu6_emit_consts(cmd
, pipeline
, descriptors_state
, MESA_SHADER_FRAGMENT
);
2956 if (cmd
->state
.dirty
& TU_CMD_DIRTY_DESCRIPTOR_SETS
) {
2957 /* We need to reload the descriptors every time the descriptor sets
2958 * change. However, the commands we send only depend on the pipeline
2959 * because the whole point is to cache descriptors which are used by the
2960 * pipeline. There's a problem here, in that the firmware has an
2961 * "optimization" which skips executing groups that are set to the same
2962 * value as the last draw. This means that if the descriptor sets change
2963 * but not the pipeline, we'd try to re-execute the same buffer which
2964 * the firmware would ignore and we wouldn't pre-load the new
2965 * descriptors. The blob seems to re-emit the LOAD_STATE group whenever
2966 * the descriptor sets change, which we emulate here by copying the
2967 * pre-prepared buffer.
2969 const struct tu_cs_entry
*load_entry
= &pipeline
->load_state
.state_ib
;
2970 if (load_entry
->size
> 0) {
2971 struct tu_cs load_cs
;
2972 result
= tu_cs_begin_sub_stream(&cmd
->sub_cs
, load_entry
->size
, &load_cs
);
2973 if (result
!= VK_SUCCESS
)
2975 tu_cs_emit_array(&load_cs
,
2976 (uint32_t *)((char *)load_entry
->bo
->map
+ load_entry
->offset
),
2977 load_entry
->size
/ 4);
2978 cmd
->state
.desc_sets_load_ib
= tu_cs_end_sub_stream(&cmd
->sub_cs
, &load_cs
);
2980 cmd
->state
.desc_sets_load_ib
.size
= 0;
2984 if (cmd
->state
.dirty
& TU_CMD_DIRTY_VERTEX_BUFFERS
)
2985 cmd
->state
.vertex_buffers_ib
= tu6_emit_vertex_buffers(cmd
, pipeline
);
2988 pipeline
->active_stages
& VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT
;
2989 struct tu_cs_entry tess_consts
= {};
2991 cmd
->has_tess
= true;
2992 result
= tu6_emit_tess_consts(cmd
, draw_count
, pipeline
, &tess_consts
);
2993 if (result
!= VK_SUCCESS
)
2997 /* for the first draw in a renderpass, re-emit all the draw states
2999 * and if a draw-state disabling path (CmdClearAttachments 3D fallback) was
3000 * used, then draw states must be re-emitted. note however this only happens
3001 * in the sysmem path, so this can be skipped this for the gmem path (TODO)
3003 * the two input attachment states are excluded because secondary command
3004 * buffer doesn't have a state ib to restore it, and not re-emitting them
3005 * is OK since CmdClearAttachments won't disable/overwrite them
3007 if (cmd
->state
.dirty
& TU_CMD_DIRTY_DRAW_STATE
) {
3008 tu_cs_emit_pkt7(cs
, CP_SET_DRAW_STATE
, 3 * (TU_DRAW_STATE_COUNT
- 2));
3010 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_PROGRAM
, pipeline
->program
.state_ib
);
3011 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_PROGRAM_BINNING
, pipeline
->program
.binning_state_ib
);
3012 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_TESS
, tess_consts
);
3013 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_VI
, pipeline
->vi
.state_ib
);
3014 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_VI_BINNING
, pipeline
->vi
.binning_state_ib
);
3015 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_RAST
, pipeline
->rast
.state_ib
);
3016 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_DS
, pipeline
->ds
.state_ib
);
3017 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_BLEND
, pipeline
->blend
.state_ib
);
3018 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_VS_CONST
, cmd
->state
.shader_const_ib
[MESA_SHADER_VERTEX
]);
3019 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_HS_CONST
, cmd
->state
.shader_const_ib
[MESA_SHADER_TESS_CTRL
]);
3020 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_DS_CONST
, cmd
->state
.shader_const_ib
[MESA_SHADER_TESS_EVAL
]);
3021 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_GS_CONST
, cmd
->state
.shader_const_ib
[MESA_SHADER_GEOMETRY
]);
3022 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_FS_CONST
, cmd
->state
.shader_const_ib
[MESA_SHADER_FRAGMENT
]);
3023 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_DESC_SETS
, cmd
->state
.desc_sets_ib
);
3024 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_DESC_SETS_LOAD
, cmd
->state
.desc_sets_load_ib
);
3025 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_VB
, cmd
->state
.vertex_buffers_ib
);
3026 tu_cs_emit_draw_state(cs
, TU_DRAW_STATE_VS_PARAMS
, cmd
->state
.vs_params
);
3028 for (uint32_t i
= 0; i
< ARRAY_SIZE(cmd
->state
.dynamic_state
); i
++) {
3029 tu_cs_emit_draw_state(cs
, TU_DRAW_STATE_DYNAMIC
+ i
,
3030 ((pipeline
->dynamic_state_mask
& BIT(i
)) ?
3031 cmd
->state
.dynamic_state
[i
] :
3032 pipeline
->dynamic_state
[i
]));
3036 /* emit draw states that were just updated
3037 * note we eventually don't want to have to emit anything here
3039 uint32_t draw_state_count
=
3041 ((cmd
->state
.dirty
& TU_CMD_DIRTY_SHADER_CONSTS
) ? 5 : 0) +
3042 ((cmd
->state
.dirty
& TU_CMD_DIRTY_DESCRIPTOR_SETS
) ? 1 : 0) +
3043 ((cmd
->state
.dirty
& TU_CMD_DIRTY_VERTEX_BUFFERS
) ? 1 : 0) +
3046 tu_cs_emit_pkt7(cs
, CP_SET_DRAW_STATE
, 3 * draw_state_count
);
3048 /* We may need to re-emit tess consts if the current draw call is
3049 * sufficiently larger than the last draw call. */
3051 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_TESS
, tess_consts
);
3052 if (cmd
->state
.dirty
& TU_CMD_DIRTY_SHADER_CONSTS
) {
3053 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_VS_CONST
, cmd
->state
.shader_const_ib
[MESA_SHADER_VERTEX
]);
3054 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_HS_CONST
, cmd
->state
.shader_const_ib
[MESA_SHADER_TESS_CTRL
]);
3055 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_DS_CONST
, cmd
->state
.shader_const_ib
[MESA_SHADER_TESS_EVAL
]);
3056 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_GS_CONST
, cmd
->state
.shader_const_ib
[MESA_SHADER_GEOMETRY
]);
3057 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_FS_CONST
, cmd
->state
.shader_const_ib
[MESA_SHADER_FRAGMENT
]);
3059 if (cmd
->state
.dirty
& TU_CMD_DIRTY_DESCRIPTOR_SETS
)
3060 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_DESC_SETS_LOAD
, cmd
->state
.desc_sets_load_ib
);
3061 if (cmd
->state
.dirty
& TU_CMD_DIRTY_VERTEX_BUFFERS
)
3062 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_VB
, cmd
->state
.vertex_buffers_ib
);
3063 tu_cs_emit_draw_state(cs
, TU_DRAW_STATE_VS_PARAMS
, cmd
->state
.vs_params
);
3066 tu_cs_sanity_check(cs
);
3068 /* There are too many graphics dirty bits to list here, so just list the
3069 * bits to preserve instead. The only things not emitted here are
3070 * compute-related state.
3072 cmd
->state
.dirty
&= (TU_CMD_DIRTY_COMPUTE_DESCRIPTOR_SETS
| TU_CMD_DIRTY_COMPUTE_PIPELINE
);
3077 tu_draw_initiator(struct tu_cmd_buffer
*cmd
, enum pc_di_src_sel src_sel
)
3079 const struct tu_pipeline
*pipeline
= cmd
->state
.pipeline
;
3080 uint32_t initiator
=
3081 CP_DRAW_INDX_OFFSET_0_PRIM_TYPE(pipeline
->ia
.primtype
) |
3082 CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(src_sel
) |
3083 CP_DRAW_INDX_OFFSET_0_INDEX_SIZE(cmd
->state
.index_size
) |
3084 CP_DRAW_INDX_OFFSET_0_VIS_CULL(USE_VISIBILITY
);
3086 if (pipeline
->active_stages
& VK_SHADER_STAGE_GEOMETRY_BIT
)
3087 initiator
|= CP_DRAW_INDX_OFFSET_0_GS_ENABLE
;
3089 switch (pipeline
->tess
.patch_type
) {
3090 case IR3_TESS_TRIANGLES
:
3091 initiator
|= CP_DRAW_INDX_OFFSET_0_PATCH_TYPE(TESS_TRIANGLES
) |
3092 CP_DRAW_INDX_OFFSET_0_TESS_ENABLE
;
3094 case IR3_TESS_ISOLINES
:
3095 initiator
|= CP_DRAW_INDX_OFFSET_0_PATCH_TYPE(TESS_ISOLINES
) |
3096 CP_DRAW_INDX_OFFSET_0_TESS_ENABLE
;
3099 initiator
|= CP_DRAW_INDX_OFFSET_0_PATCH_TYPE(TESS_QUADS
);
3101 case IR3_TESS_QUADS
:
3102 initiator
|= CP_DRAW_INDX_OFFSET_0_PATCH_TYPE(TESS_QUADS
) |
3103 CP_DRAW_INDX_OFFSET_0_TESS_ENABLE
;
3111 vs_params_offset(struct tu_cmd_buffer
*cmd
)
3113 const struct tu_program_descriptor_linkage
*link
=
3114 &cmd
->state
.pipeline
->program
.link
[MESA_SHADER_VERTEX
];
3115 const struct ir3_const_state
*const_state
= &link
->const_state
;
3117 if (const_state
->offsets
.driver_param
>= link
->constlen
)
3120 /* this layout is required by CP_DRAW_INDIRECT_MULTI */
3121 STATIC_ASSERT(IR3_DP_DRAWID
== 0);
3122 STATIC_ASSERT(IR3_DP_VTXID_BASE
== 1);
3123 STATIC_ASSERT(IR3_DP_INSTID_BASE
== 2);
3125 /* 0 means disabled for CP_DRAW_INDIRECT_MULTI */
3126 assert(const_state
->offsets
.driver_param
!= 0);
3128 return const_state
->offsets
.driver_param
;
3131 static struct tu_draw_state
3132 tu6_emit_vs_params(struct tu_cmd_buffer
*cmd
,
3133 uint32_t vertex_offset
,
3134 uint32_t first_instance
)
3136 uint32_t offset
= vs_params_offset(cmd
);
3139 VkResult result
= tu_cs_begin_sub_stream(&cmd
->sub_cs
, 3 + (offset
? 8 : 0), &cs
);
3140 if (result
!= VK_SUCCESS
) {
3141 cmd
->record_result
= result
;
3142 return (struct tu_draw_state
) {};
3145 /* TODO: don't make a new draw state when it doesn't change */
3147 tu_cs_emit_regs(&cs
,
3148 A6XX_VFD_INDEX_OFFSET(vertex_offset
),
3149 A6XX_VFD_INSTANCE_START_OFFSET(first_instance
));
3152 tu_cs_emit_pkt7(&cs
, CP_LOAD_STATE6_GEOM
, 3 + 4);
3153 tu_cs_emit(&cs
, CP_LOAD_STATE6_0_DST_OFF(offset
) |
3154 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS
) |
3155 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT
) |
3156 CP_LOAD_STATE6_0_STATE_BLOCK(SB6_VS_SHADER
) |
3157 CP_LOAD_STATE6_0_NUM_UNIT(1));
3162 tu_cs_emit(&cs
, vertex_offset
);
3163 tu_cs_emit(&cs
, first_instance
);
3167 struct tu_cs_entry entry
= tu_cs_end_sub_stream(&cmd
->sub_cs
, &cs
);
3168 return (struct tu_draw_state
) {entry
.bo
->iova
+ entry
.offset
, entry
.size
/ 4};
3172 tu_CmdDraw(VkCommandBuffer commandBuffer
,
3173 uint32_t vertexCount
,
3174 uint32_t instanceCount
,
3175 uint32_t firstVertex
,
3176 uint32_t firstInstance
)
3178 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
3179 struct tu_cs
*cs
= &cmd
->draw_cs
;
3181 cmd
->state
.vs_params
= tu6_emit_vs_params(cmd
, firstVertex
, firstInstance
);
3183 tu6_draw_common(cmd
, cs
, false, vertexCount
);
3185 tu_cs_emit_pkt7(cs
, CP_DRAW_INDX_OFFSET
, 3);
3186 tu_cs_emit(cs
, tu_draw_initiator(cmd
, DI_SRC_SEL_AUTO_INDEX
));
3187 tu_cs_emit(cs
, instanceCount
);
3188 tu_cs_emit(cs
, vertexCount
);
3192 tu_CmdDrawIndexed(VkCommandBuffer commandBuffer
,
3193 uint32_t indexCount
,
3194 uint32_t instanceCount
,
3195 uint32_t firstIndex
,
3196 int32_t vertexOffset
,
3197 uint32_t firstInstance
)
3199 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
3200 struct tu_cs
*cs
= &cmd
->draw_cs
;
3202 cmd
->state
.vs_params
= tu6_emit_vs_params(cmd
, vertexOffset
, firstInstance
);
3204 tu6_draw_common(cmd
, cs
, true, indexCount
);
3206 tu_cs_emit_pkt7(cs
, CP_DRAW_INDX_OFFSET
, 7);
3207 tu_cs_emit(cs
, tu_draw_initiator(cmd
, DI_SRC_SEL_DMA
));
3208 tu_cs_emit(cs
, instanceCount
);
3209 tu_cs_emit(cs
, indexCount
);
3210 tu_cs_emit(cs
, firstIndex
);
3211 tu_cs_emit_qw(cs
, cmd
->state
.index_va
);
3212 tu_cs_emit(cs
, cmd
->state
.max_index_count
);
3216 tu_CmdDrawIndirect(VkCommandBuffer commandBuffer
,
3218 VkDeviceSize offset
,
3222 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
3223 TU_FROM_HANDLE(tu_buffer
, buf
, _buffer
);
3224 struct tu_cs
*cs
= &cmd
->draw_cs
;
3226 cmd
->state
.vs_params
= (struct tu_draw_state
) {};
3228 tu6_draw_common(cmd
, cs
, false, 0);
3230 /* workaround for a firmware bug with CP_DRAW_INDIRECT_MULTI, where it
3231 * doesn't wait for WFIs to be completed and leads to GPU fault/hang
3232 * TODO: this could be worked around in a more performant way,
3233 * or there may exist newer firmware that has been fixed
3235 if (cmd
->device
->physical_device
->gpu_id
!= 650)
3236 tu_cs_emit_pkt7(cs
, CP_WAIT_FOR_ME
, 0);
3238 tu_cs_emit_pkt7(cs
, CP_DRAW_INDIRECT_MULTI
, 6);
3239 tu_cs_emit(cs
, tu_draw_initiator(cmd
, DI_SRC_SEL_AUTO_INDEX
));
3240 tu_cs_emit(cs
, A6XX_CP_DRAW_INDIRECT_MULTI_1_OPCODE(INDIRECT_OP_NORMAL
) |
3241 A6XX_CP_DRAW_INDIRECT_MULTI_1_DST_OFF(vs_params_offset(cmd
)));
3242 tu_cs_emit(cs
, drawCount
);
3243 tu_cs_emit_qw(cs
, buf
->bo
->iova
+ buf
->bo_offset
+ offset
);
3244 tu_cs_emit(cs
, stride
);
3246 tu_bo_list_add(&cmd
->bo_list
, buf
->bo
, MSM_SUBMIT_BO_READ
);
3250 tu_CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer
,
3252 VkDeviceSize offset
,
3256 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
3257 TU_FROM_HANDLE(tu_buffer
, buf
, _buffer
);
3258 struct tu_cs
*cs
= &cmd
->draw_cs
;
3260 cmd
->state
.vs_params
= (struct tu_draw_state
) {};
3262 tu6_draw_common(cmd
, cs
, true, 0);
3264 /* workaround for a firmware bug with CP_DRAW_INDIRECT_MULTI, where it
3265 * doesn't wait for WFIs to be completed and leads to GPU fault/hang
3266 * TODO: this could be worked around in a more performant way,
3267 * or there may exist newer firmware that has been fixed
3269 if (cmd
->device
->physical_device
->gpu_id
!= 650)
3270 tu_cs_emit_pkt7(cs
, CP_WAIT_FOR_ME
, 0);
3272 tu_cs_emit_pkt7(cs
, CP_DRAW_INDIRECT_MULTI
, 9);
3273 tu_cs_emit(cs
, tu_draw_initiator(cmd
, DI_SRC_SEL_DMA
));
3274 tu_cs_emit(cs
, A6XX_CP_DRAW_INDIRECT_MULTI_1_OPCODE(INDIRECT_OP_INDEXED
) |
3275 A6XX_CP_DRAW_INDIRECT_MULTI_1_DST_OFF(vs_params_offset(cmd
)));
3276 tu_cs_emit(cs
, drawCount
);
3277 tu_cs_emit_qw(cs
, cmd
->state
.index_va
);
3278 tu_cs_emit(cs
, cmd
->state
.max_index_count
);
3279 tu_cs_emit_qw(cs
, buf
->bo
->iova
+ buf
->bo_offset
+ offset
);
3280 tu_cs_emit(cs
, stride
);
3282 tu_bo_list_add(&cmd
->bo_list
, buf
->bo
, MSM_SUBMIT_BO_READ
);
3285 void tu_CmdDrawIndirectByteCountEXT(VkCommandBuffer commandBuffer
,
3286 uint32_t instanceCount
,
3287 uint32_t firstInstance
,
3288 VkBuffer _counterBuffer
,
3289 VkDeviceSize counterBufferOffset
,
3290 uint32_t counterOffset
,
3291 uint32_t vertexStride
)
3293 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
3294 TU_FROM_HANDLE(tu_buffer
, buf
, _counterBuffer
);
3295 struct tu_cs
*cs
= &cmd
->draw_cs
;
3297 cmd
->state
.vs_params
= tu6_emit_vs_params(cmd
, 0, firstInstance
);
3299 tu6_draw_common(cmd
, cs
, false, 0);
3301 tu_cs_emit_pkt7(cs
, CP_DRAW_AUTO
, 6);
3302 tu_cs_emit(cs
, tu_draw_initiator(cmd
, DI_SRC_SEL_AUTO_XFB
));
3303 tu_cs_emit(cs
, instanceCount
);
3304 tu_cs_emit_qw(cs
, buf
->bo
->iova
+ buf
->bo_offset
+ counterBufferOffset
);
3305 tu_cs_emit(cs
, counterOffset
);
3306 tu_cs_emit(cs
, vertexStride
);
3308 tu_bo_list_add(&cmd
->bo_list
, buf
->bo
, MSM_SUBMIT_BO_READ
);
3311 struct tu_dispatch_info
3314 * Determine the layout of the grid (in block units) to be used.
3319 * A starting offset for the grid. If unaligned is set, the offset
3320 * must still be aligned.
3322 uint32_t offsets
[3];
3324 * Whether it's an unaligned compute dispatch.
3329 * Indirect compute parameters resource.
3331 struct tu_buffer
*indirect
;
3332 uint64_t indirect_offset
;
3336 tu_emit_compute_driver_params(struct tu_cs
*cs
, struct tu_pipeline
*pipeline
,
3337 const struct tu_dispatch_info
*info
)
3339 gl_shader_stage type
= MESA_SHADER_COMPUTE
;
3340 const struct tu_program_descriptor_linkage
*link
=
3341 &pipeline
->program
.link
[type
];
3342 const struct ir3_const_state
*const_state
= &link
->const_state
;
3343 uint32_t offset
= const_state
->offsets
.driver_param
;
3345 if (link
->constlen
<= offset
)
3348 if (!info
->indirect
) {
3349 uint32_t driver_params
[IR3_DP_CS_COUNT
] = {
3350 [IR3_DP_NUM_WORK_GROUPS_X
] = info
->blocks
[0],
3351 [IR3_DP_NUM_WORK_GROUPS_Y
] = info
->blocks
[1],
3352 [IR3_DP_NUM_WORK_GROUPS_Z
] = info
->blocks
[2],
3353 [IR3_DP_LOCAL_GROUP_SIZE_X
] = pipeline
->compute
.local_size
[0],
3354 [IR3_DP_LOCAL_GROUP_SIZE_Y
] = pipeline
->compute
.local_size
[1],
3355 [IR3_DP_LOCAL_GROUP_SIZE_Z
] = pipeline
->compute
.local_size
[2],
3358 uint32_t num_consts
= MIN2(const_state
->num_driver_params
,
3359 (link
->constlen
- offset
) * 4);
3360 /* push constants */
3361 tu_cs_emit_pkt7(cs
, tu6_stage2opcode(type
), 3 + num_consts
);
3362 tu_cs_emit(cs
, CP_LOAD_STATE6_0_DST_OFF(offset
) |
3363 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS
) |
3364 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT
) |
3365 CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type
)) |
3366 CP_LOAD_STATE6_0_NUM_UNIT(num_consts
/ 4));
3370 for (i
= 0; i
< num_consts
; i
++)
3371 tu_cs_emit(cs
, driver_params
[i
]);
3373 tu_finishme("Indirect driver params");
3378 tu_dispatch(struct tu_cmd_buffer
*cmd
,
3379 const struct tu_dispatch_info
*info
)
3381 struct tu_cs
*cs
= &cmd
->cs
;
3382 struct tu_pipeline
*pipeline
= cmd
->state
.compute_pipeline
;
3383 struct tu_descriptor_state
*descriptors_state
=
3384 &cmd
->descriptors
[VK_PIPELINE_BIND_POINT_COMPUTE
];
3386 /* TODO: We could probably flush less if we add a compute_flush_bits
3389 tu_emit_cache_flush(cmd
, cs
);
3391 if (cmd
->state
.dirty
& TU_CMD_DIRTY_COMPUTE_PIPELINE
)
3392 tu_cs_emit_ib(cs
, &pipeline
->program
.state_ib
);
3394 struct tu_cs_entry ib
;
3396 ib
= tu6_emit_consts(cmd
, pipeline
, descriptors_state
, MESA_SHADER_COMPUTE
);
3398 tu_cs_emit_ib(cs
, &ib
);
3400 tu_emit_compute_driver_params(cs
, pipeline
, info
);
3402 if ((cmd
->state
.dirty
& TU_CMD_DIRTY_COMPUTE_DESCRIPTOR_SETS
) &&
3403 pipeline
->load_state
.state_ib
.size
> 0) {
3404 tu_cs_emit_ib(cs
, &pipeline
->load_state
.state_ib
);
3408 ~(TU_CMD_DIRTY_COMPUTE_DESCRIPTOR_SETS
| TU_CMD_DIRTY_COMPUTE_PIPELINE
);
3410 tu_cs_emit_pkt7(cs
, CP_SET_MARKER
, 1);
3411 tu_cs_emit(cs
, A6XX_CP_SET_MARKER_0_MODE(RM6_COMPUTE
));
3413 const uint32_t *local_size
= pipeline
->compute
.local_size
;
3414 const uint32_t *num_groups
= info
->blocks
;
3416 A6XX_HLSQ_CS_NDRANGE_0(.kerneldim
= 3,
3417 .localsizex
= local_size
[0] - 1,
3418 .localsizey
= local_size
[1] - 1,
3419 .localsizez
= local_size
[2] - 1),
3420 A6XX_HLSQ_CS_NDRANGE_1(.globalsize_x
= local_size
[0] * num_groups
[0]),
3421 A6XX_HLSQ_CS_NDRANGE_2(.globaloff_x
= 0),
3422 A6XX_HLSQ_CS_NDRANGE_3(.globalsize_y
= local_size
[1] * num_groups
[1]),
3423 A6XX_HLSQ_CS_NDRANGE_4(.globaloff_y
= 0),
3424 A6XX_HLSQ_CS_NDRANGE_5(.globalsize_z
= local_size
[2] * num_groups
[2]),
3425 A6XX_HLSQ_CS_NDRANGE_6(.globaloff_z
= 0));
3428 A6XX_HLSQ_CS_KERNEL_GROUP_X(1),
3429 A6XX_HLSQ_CS_KERNEL_GROUP_Y(1),
3430 A6XX_HLSQ_CS_KERNEL_GROUP_Z(1));
3432 if (info
->indirect
) {
3433 uint64_t iova
= tu_buffer_iova(info
->indirect
) + info
->indirect_offset
;
3435 tu_bo_list_add(&cmd
->bo_list
, info
->indirect
->bo
,
3436 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_WRITE
);
3438 tu_cs_emit_pkt7(cs
, CP_EXEC_CS_INDIRECT
, 4);
3439 tu_cs_emit(cs
, 0x00000000);
3440 tu_cs_emit_qw(cs
, iova
);
3442 A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEX(local_size
[0] - 1) |
3443 A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEY(local_size
[1] - 1) |
3444 A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEZ(local_size
[2] - 1));
3446 tu_cs_emit_pkt7(cs
, CP_EXEC_CS
, 4);
3447 tu_cs_emit(cs
, 0x00000000);
3448 tu_cs_emit(cs
, CP_EXEC_CS_1_NGROUPS_X(info
->blocks
[0]));
3449 tu_cs_emit(cs
, CP_EXEC_CS_2_NGROUPS_Y(info
->blocks
[1]));
3450 tu_cs_emit(cs
, CP_EXEC_CS_3_NGROUPS_Z(info
->blocks
[2]));
3457 tu_CmdDispatchBase(VkCommandBuffer commandBuffer
,
3465 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
3466 struct tu_dispatch_info info
= {};
3472 info
.offsets
[0] = base_x
;
3473 info
.offsets
[1] = base_y
;
3474 info
.offsets
[2] = base_z
;
3475 tu_dispatch(cmd_buffer
, &info
);
3479 tu_CmdDispatch(VkCommandBuffer commandBuffer
,
3484 tu_CmdDispatchBase(commandBuffer
, 0, 0, 0, x
, y
, z
);
3488 tu_CmdDispatchIndirect(VkCommandBuffer commandBuffer
,
3490 VkDeviceSize offset
)
3492 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
3493 TU_FROM_HANDLE(tu_buffer
, buffer
, _buffer
);
3494 struct tu_dispatch_info info
= {};
3496 info
.indirect
= buffer
;
3497 info
.indirect_offset
= offset
;
3499 tu_dispatch(cmd_buffer
, &info
);
3503 tu_CmdEndRenderPass(VkCommandBuffer commandBuffer
)
3505 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
3507 tu_cs_end(&cmd_buffer
->draw_cs
);
3508 tu_cs_end(&cmd_buffer
->draw_epilogue_cs
);
3510 if (use_sysmem_rendering(cmd_buffer
))
3511 tu_cmd_render_sysmem(cmd_buffer
);
3513 tu_cmd_render_tiles(cmd_buffer
);
3515 /* discard draw_cs and draw_epilogue_cs entries now that the tiles are
3517 tu_cs_discard_entries(&cmd_buffer
->draw_cs
);
3518 tu_cs_begin(&cmd_buffer
->draw_cs
);
3519 tu_cs_discard_entries(&cmd_buffer
->draw_epilogue_cs
);
3520 tu_cs_begin(&cmd_buffer
->draw_epilogue_cs
);
3522 cmd_buffer
->state
.cache
.pending_flush_bits
|=
3523 cmd_buffer
->state
.renderpass_cache
.pending_flush_bits
;
3524 tu_subpass_barrier(cmd_buffer
, &cmd_buffer
->state
.pass
->end_barrier
, true);
3526 cmd_buffer
->state
.pass
= NULL
;
3527 cmd_buffer
->state
.subpass
= NULL
;
3528 cmd_buffer
->state
.framebuffer
= NULL
;
3532 tu_CmdEndRenderPass2(VkCommandBuffer commandBuffer
,
3533 const VkSubpassEndInfoKHR
*pSubpassEndInfo
)
3535 tu_CmdEndRenderPass(commandBuffer
);
3538 struct tu_barrier_info
3540 uint32_t eventCount
;
3541 const VkEvent
*pEvents
;
3542 VkPipelineStageFlags srcStageMask
;
3546 tu_barrier(struct tu_cmd_buffer
*cmd
,
3547 uint32_t memoryBarrierCount
,
3548 const VkMemoryBarrier
*pMemoryBarriers
,
3549 uint32_t bufferMemoryBarrierCount
,
3550 const VkBufferMemoryBarrier
*pBufferMemoryBarriers
,
3551 uint32_t imageMemoryBarrierCount
,
3552 const VkImageMemoryBarrier
*pImageMemoryBarriers
,
3553 const struct tu_barrier_info
*info
)
3555 struct tu_cs
*cs
= cmd
->state
.pass
? &cmd
->draw_cs
: &cmd
->cs
;
3556 VkAccessFlags srcAccessMask
= 0;
3557 VkAccessFlags dstAccessMask
= 0;
3559 for (uint32_t i
= 0; i
< memoryBarrierCount
; i
++) {
3560 srcAccessMask
|= pMemoryBarriers
[i
].srcAccessMask
;
3561 dstAccessMask
|= pMemoryBarriers
[i
].dstAccessMask
;
3564 for (uint32_t i
= 0; i
< bufferMemoryBarrierCount
; i
++) {
3565 srcAccessMask
|= pBufferMemoryBarriers
[i
].srcAccessMask
;
3566 dstAccessMask
|= pBufferMemoryBarriers
[i
].dstAccessMask
;
3569 enum tu_cmd_access_mask src_flags
= 0;
3570 enum tu_cmd_access_mask dst_flags
= 0;
3572 for (uint32_t i
= 0; i
< imageMemoryBarrierCount
; i
++) {
3573 TU_FROM_HANDLE(tu_image
, image
, pImageMemoryBarriers
[i
].image
);
3574 VkImageLayout old_layout
= pImageMemoryBarriers
[i
].oldLayout
;
3575 /* For non-linear images, PREINITIALIZED is the same as UNDEFINED */
3576 if (old_layout
== VK_IMAGE_LAYOUT_UNDEFINED
||
3577 (image
->tiling
!= VK_IMAGE_TILING_LINEAR
&&
3578 old_layout
== VK_IMAGE_LAYOUT_PREINITIALIZED
)) {
3579 /* The underlying memory for this image may have been used earlier
3580 * within the same queue submission for a different image, which
3581 * means that there may be old, stale cache entries which are in the
3582 * "wrong" location, which could cause problems later after writing
3583 * to the image. We don't want these entries being flushed later and
3584 * overwriting the actual image, so we need to flush the CCU.
3586 src_flags
|= TU_ACCESS_CCU_COLOR_INCOHERENT_WRITE
;
3588 srcAccessMask
|= pImageMemoryBarriers
[i
].srcAccessMask
;
3589 dstAccessMask
|= pImageMemoryBarriers
[i
].dstAccessMask
;
3592 /* Inside a renderpass, we don't know yet whether we'll be using sysmem
3593 * so we have to use the sysmem flushes.
3595 bool gmem
= cmd
->state
.ccu_state
== TU_CMD_CCU_GMEM
&&
3597 src_flags
|= vk2tu_access(srcAccessMask
, gmem
);
3598 dst_flags
|= vk2tu_access(dstAccessMask
, gmem
);
3600 struct tu_cache_state
*cache
=
3601 cmd
->state
.pass
? &cmd
->state
.renderpass_cache
: &cmd
->state
.cache
;
3602 tu_flush_for_access(cache
, src_flags
, dst_flags
);
3604 for (uint32_t i
= 0; i
< info
->eventCount
; i
++) {
3605 TU_FROM_HANDLE(tu_event
, event
, info
->pEvents
[i
]);
3607 tu_bo_list_add(&cmd
->bo_list
, &event
->bo
, MSM_SUBMIT_BO_READ
);
3609 tu_cs_emit_pkt7(cs
, CP_WAIT_REG_MEM
, 6);
3610 tu_cs_emit(cs
, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ
) |
3611 CP_WAIT_REG_MEM_0_POLL_MEMORY
);
3612 tu_cs_emit_qw(cs
, event
->bo
.iova
); /* POLL_ADDR_LO/HI */
3613 tu_cs_emit(cs
, CP_WAIT_REG_MEM_3_REF(1));
3614 tu_cs_emit(cs
, CP_WAIT_REG_MEM_4_MASK(~0u));
3615 tu_cs_emit(cs
, CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(20));
3620 tu_CmdPipelineBarrier(VkCommandBuffer commandBuffer
,
3621 VkPipelineStageFlags srcStageMask
,
3622 VkPipelineStageFlags dstStageMask
,
3623 VkDependencyFlags dependencyFlags
,
3624 uint32_t memoryBarrierCount
,
3625 const VkMemoryBarrier
*pMemoryBarriers
,
3626 uint32_t bufferMemoryBarrierCount
,
3627 const VkBufferMemoryBarrier
*pBufferMemoryBarriers
,
3628 uint32_t imageMemoryBarrierCount
,
3629 const VkImageMemoryBarrier
*pImageMemoryBarriers
)
3631 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
3632 struct tu_barrier_info info
;
3634 info
.eventCount
= 0;
3635 info
.pEvents
= NULL
;
3636 info
.srcStageMask
= srcStageMask
;
3638 tu_barrier(cmd_buffer
, memoryBarrierCount
, pMemoryBarriers
,
3639 bufferMemoryBarrierCount
, pBufferMemoryBarriers
,
3640 imageMemoryBarrierCount
, pImageMemoryBarriers
, &info
);
3644 write_event(struct tu_cmd_buffer
*cmd
, struct tu_event
*event
,
3645 VkPipelineStageFlags stageMask
, unsigned value
)
3647 struct tu_cs
*cs
= &cmd
->cs
;
3649 /* vkCmdSetEvent/vkCmdResetEvent cannot be called inside a render pass */
3650 assert(!cmd
->state
.pass
);
3652 tu_emit_cache_flush(cmd
, cs
);
3654 tu_bo_list_add(&cmd
->bo_list
, &event
->bo
, MSM_SUBMIT_BO_WRITE
);
3656 /* Flags that only require a top-of-pipe event. DrawIndirect parameters are
3657 * read by the CP, so the draw indirect stage counts as top-of-pipe too.
3659 VkPipelineStageFlags top_of_pipe_flags
=
3660 VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT
|
3661 VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT
;
3663 if (!(stageMask
& ~top_of_pipe_flags
)) {
3664 tu_cs_emit_pkt7(cs
, CP_MEM_WRITE
, 3);
3665 tu_cs_emit_qw(cs
, event
->bo
.iova
); /* ADDR_LO/HI */
3666 tu_cs_emit(cs
, value
);
3668 /* Use a RB_DONE_TS event to wait for everything to complete. */
3669 tu_cs_emit_pkt7(cs
, CP_EVENT_WRITE
, 4);
3670 tu_cs_emit(cs
, CP_EVENT_WRITE_0_EVENT(RB_DONE_TS
));
3671 tu_cs_emit_qw(cs
, event
->bo
.iova
);
3672 tu_cs_emit(cs
, value
);
3677 tu_CmdSetEvent(VkCommandBuffer commandBuffer
,
3679 VkPipelineStageFlags stageMask
)
3681 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
3682 TU_FROM_HANDLE(tu_event
, event
, _event
);
3684 write_event(cmd
, event
, stageMask
, 1);
3688 tu_CmdResetEvent(VkCommandBuffer commandBuffer
,
3690 VkPipelineStageFlags stageMask
)
3692 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
3693 TU_FROM_HANDLE(tu_event
, event
, _event
);
3695 write_event(cmd
, event
, stageMask
, 0);
3699 tu_CmdWaitEvents(VkCommandBuffer commandBuffer
,
3700 uint32_t eventCount
,
3701 const VkEvent
*pEvents
,
3702 VkPipelineStageFlags srcStageMask
,
3703 VkPipelineStageFlags dstStageMask
,
3704 uint32_t memoryBarrierCount
,
3705 const VkMemoryBarrier
*pMemoryBarriers
,
3706 uint32_t bufferMemoryBarrierCount
,
3707 const VkBufferMemoryBarrier
*pBufferMemoryBarriers
,
3708 uint32_t imageMemoryBarrierCount
,
3709 const VkImageMemoryBarrier
*pImageMemoryBarriers
)
3711 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
3712 struct tu_barrier_info info
;
3714 info
.eventCount
= eventCount
;
3715 info
.pEvents
= pEvents
;
3716 info
.srcStageMask
= 0;
3718 tu_barrier(cmd
, memoryBarrierCount
, pMemoryBarriers
,
3719 bufferMemoryBarrierCount
, pBufferMemoryBarriers
,
3720 imageMemoryBarrierCount
, pImageMemoryBarriers
, &info
);
3724 tu_CmdSetDeviceMask(VkCommandBuffer commandBuffer
, uint32_t deviceMask
)