2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
28 #include "tu_private.h"
30 #include "adreno_pm4.xml.h"
31 #include "adreno_common.xml.h"
33 #include "vk_format.h"
39 tu_bo_list_init(struct tu_bo_list
*list
)
41 list
->count
= list
->capacity
= 0;
42 list
->bo_infos
= NULL
;
46 tu_bo_list_destroy(struct tu_bo_list
*list
)
52 tu_bo_list_reset(struct tu_bo_list
*list
)
58 * \a flags consists of MSM_SUBMIT_BO_FLAGS.
61 tu_bo_list_add_info(struct tu_bo_list
*list
,
62 const struct drm_msm_gem_submit_bo
*bo_info
)
64 assert(bo_info
->handle
!= 0);
66 for (uint32_t i
= 0; i
< list
->count
; ++i
) {
67 if (list
->bo_infos
[i
].handle
== bo_info
->handle
) {
68 assert(list
->bo_infos
[i
].presumed
== bo_info
->presumed
);
69 list
->bo_infos
[i
].flags
|= bo_info
->flags
;
74 /* grow list->bo_infos if needed */
75 if (list
->count
== list
->capacity
) {
76 uint32_t new_capacity
= MAX2(2 * list
->count
, 16);
77 struct drm_msm_gem_submit_bo
*new_bo_infos
= realloc(
78 list
->bo_infos
, new_capacity
* sizeof(struct drm_msm_gem_submit_bo
));
80 return TU_BO_LIST_FAILED
;
81 list
->bo_infos
= new_bo_infos
;
82 list
->capacity
= new_capacity
;
85 list
->bo_infos
[list
->count
] = *bo_info
;
90 tu_bo_list_add(struct tu_bo_list
*list
,
91 const struct tu_bo
*bo
,
94 return tu_bo_list_add_info(list
, &(struct drm_msm_gem_submit_bo
) {
96 .handle
= bo
->gem_handle
,
102 tu_bo_list_merge(struct tu_bo_list
*list
, const struct tu_bo_list
*other
)
104 for (uint32_t i
= 0; i
< other
->count
; i
++) {
105 if (tu_bo_list_add_info(list
, other
->bo_infos
+ i
) == TU_BO_LIST_FAILED
)
106 return VK_ERROR_OUT_OF_HOST_MEMORY
;
113 tu6_emit_event_write(struct tu_cmd_buffer
*cmd
,
115 enum vgt_event_type event
)
117 bool need_seqno
= false;
122 case PC_CCU_FLUSH_DEPTH_TS
:
123 case PC_CCU_FLUSH_COLOR_TS
:
124 case PC_CCU_RESOLVE_TS
:
131 tu_cs_emit_pkt7(cs
, CP_EVENT_WRITE
, need_seqno
? 4 : 1);
132 tu_cs_emit(cs
, CP_EVENT_WRITE_0_EVENT(event
));
134 tu_cs_emit_qw(cs
, global_iova(cmd
, seqno_dummy
));
140 tu6_emit_flushes(struct tu_cmd_buffer
*cmd_buffer
,
142 enum tu_cmd_flush_bits flushes
)
144 /* Experiments show that invalidating CCU while it still has data in it
145 * doesn't work, so make sure to always flush before invalidating in case
146 * any data remains that hasn't yet been made available through a barrier.
147 * However it does seem to work for UCHE.
149 if (flushes
& (TU_CMD_FLAG_CCU_FLUSH_COLOR
|
150 TU_CMD_FLAG_CCU_INVALIDATE_COLOR
))
151 tu6_emit_event_write(cmd_buffer
, cs
, PC_CCU_FLUSH_COLOR_TS
);
152 if (flushes
& (TU_CMD_FLAG_CCU_FLUSH_DEPTH
|
153 TU_CMD_FLAG_CCU_INVALIDATE_DEPTH
))
154 tu6_emit_event_write(cmd_buffer
, cs
, PC_CCU_FLUSH_DEPTH_TS
);
155 if (flushes
& TU_CMD_FLAG_CCU_INVALIDATE_COLOR
)
156 tu6_emit_event_write(cmd_buffer
, cs
, PC_CCU_INVALIDATE_COLOR
);
157 if (flushes
& TU_CMD_FLAG_CCU_INVALIDATE_DEPTH
)
158 tu6_emit_event_write(cmd_buffer
, cs
, PC_CCU_INVALIDATE_DEPTH
);
159 if (flushes
& TU_CMD_FLAG_CACHE_FLUSH
)
160 tu6_emit_event_write(cmd_buffer
, cs
, CACHE_FLUSH_TS
);
161 if (flushes
& TU_CMD_FLAG_CACHE_INVALIDATE
)
162 tu6_emit_event_write(cmd_buffer
, cs
, CACHE_INVALIDATE
);
163 if (flushes
& TU_CMD_FLAG_WAIT_MEM_WRITES
)
164 tu_cs_emit_pkt7(cs
, CP_WAIT_MEM_WRITES
, 0);
165 if (flushes
& TU_CMD_FLAG_WAIT_FOR_IDLE
)
167 if (flushes
& TU_CMD_FLAG_WAIT_FOR_ME
)
168 tu_cs_emit_pkt7(cs
, CP_WAIT_FOR_ME
, 0);
171 /* "Normal" cache flushes, that don't require any special handling */
174 tu_emit_cache_flush(struct tu_cmd_buffer
*cmd_buffer
,
177 tu6_emit_flushes(cmd_buffer
, cs
, cmd_buffer
->state
.cache
.flush_bits
);
178 cmd_buffer
->state
.cache
.flush_bits
= 0;
181 /* Renderpass cache flushes */
184 tu_emit_cache_flush_renderpass(struct tu_cmd_buffer
*cmd_buffer
,
187 tu6_emit_flushes(cmd_buffer
, cs
, cmd_buffer
->state
.renderpass_cache
.flush_bits
);
188 cmd_buffer
->state
.renderpass_cache
.flush_bits
= 0;
191 /* Cache flushes for things that use the color/depth read/write path (i.e.
192 * blits and draws). This deals with changing CCU state as well as the usual
197 tu_emit_cache_flush_ccu(struct tu_cmd_buffer
*cmd_buffer
,
199 enum tu_cmd_ccu_state ccu_state
)
201 enum tu_cmd_flush_bits flushes
= cmd_buffer
->state
.cache
.flush_bits
;
203 assert(ccu_state
!= TU_CMD_CCU_UNKNOWN
);
205 /* Changing CCU state must involve invalidating the CCU. In sysmem mode,
206 * the CCU may also contain data that we haven't flushed out yet, so we
207 * also need to flush. Also, in order to program RB_CCU_CNTL, we need to
208 * emit a WFI as it isn't pipelined.
210 if (ccu_state
!= cmd_buffer
->state
.ccu_state
) {
211 if (cmd_buffer
->state
.ccu_state
!= TU_CMD_CCU_GMEM
) {
213 TU_CMD_FLAG_CCU_FLUSH_COLOR
|
214 TU_CMD_FLAG_CCU_FLUSH_DEPTH
;
215 cmd_buffer
->state
.cache
.pending_flush_bits
&= ~(
216 TU_CMD_FLAG_CCU_FLUSH_COLOR
|
217 TU_CMD_FLAG_CCU_FLUSH_DEPTH
);
220 TU_CMD_FLAG_CCU_INVALIDATE_COLOR
|
221 TU_CMD_FLAG_CCU_INVALIDATE_DEPTH
|
222 TU_CMD_FLAG_WAIT_FOR_IDLE
;
223 cmd_buffer
->state
.cache
.pending_flush_bits
&= ~(
224 TU_CMD_FLAG_CCU_INVALIDATE_COLOR
|
225 TU_CMD_FLAG_CCU_INVALIDATE_DEPTH
|
226 TU_CMD_FLAG_WAIT_FOR_IDLE
);
229 tu6_emit_flushes(cmd_buffer
, cs
, flushes
);
230 cmd_buffer
->state
.cache
.flush_bits
= 0;
232 if (ccu_state
!= cmd_buffer
->state
.ccu_state
) {
233 struct tu_physical_device
*phys_dev
= cmd_buffer
->device
->physical_device
;
235 A6XX_RB_CCU_CNTL(.offset
=
236 ccu_state
== TU_CMD_CCU_GMEM
?
237 phys_dev
->ccu_offset_gmem
:
238 phys_dev
->ccu_offset_bypass
,
239 .gmem
= ccu_state
== TU_CMD_CCU_GMEM
));
240 cmd_buffer
->state
.ccu_state
= ccu_state
;
245 tu6_emit_zs(struct tu_cmd_buffer
*cmd
,
246 const struct tu_subpass
*subpass
,
249 const struct tu_framebuffer
*fb
= cmd
->state
.framebuffer
;
251 const uint32_t a
= subpass
->depth_stencil_attachment
.attachment
;
252 if (a
== VK_ATTACHMENT_UNUSED
) {
254 A6XX_RB_DEPTH_BUFFER_INFO(.depth_format
= DEPTH6_NONE
),
255 A6XX_RB_DEPTH_BUFFER_PITCH(0),
256 A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH(0),
257 A6XX_RB_DEPTH_BUFFER_BASE(0),
258 A6XX_RB_DEPTH_BUFFER_BASE_GMEM(0));
261 A6XX_GRAS_SU_DEPTH_BUFFER_INFO(.depth_format
= DEPTH6_NONE
));
264 A6XX_GRAS_LRZ_BUFFER_BASE(0),
265 A6XX_GRAS_LRZ_BUFFER_PITCH(0),
266 A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE(0));
268 tu_cs_emit_regs(cs
, A6XX_RB_STENCIL_INFO(0));
273 const struct tu_image_view
*iview
= fb
->attachments
[a
].attachment
;
274 const struct tu_render_pass_attachment
*attachment
=
275 &cmd
->state
.pass
->attachments
[a
];
276 enum a6xx_depth_format fmt
= tu6_pipe2depth(attachment
->format
);
278 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_DEPTH_BUFFER_INFO
, 6);
279 tu_cs_emit(cs
, A6XX_RB_DEPTH_BUFFER_INFO(.depth_format
= fmt
).value
);
280 tu_cs_image_ref(cs
, iview
, 0);
281 tu_cs_emit(cs
, attachment
->gmem_offset
);
284 A6XX_GRAS_SU_DEPTH_BUFFER_INFO(.depth_format
= fmt
));
286 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_DEPTH_FLAG_BUFFER_BASE_LO
, 3);
287 tu_cs_image_flag_ref(cs
, iview
, 0);
290 A6XX_GRAS_LRZ_BUFFER_BASE(0),
291 A6XX_GRAS_LRZ_BUFFER_PITCH(0),
292 A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE(0));
294 if (attachment
->format
== VK_FORMAT_D32_SFLOAT_S8_UINT
||
295 attachment
->format
== VK_FORMAT_S8_UINT
) {
297 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_STENCIL_INFO
, 6);
298 tu_cs_emit(cs
, A6XX_RB_STENCIL_INFO(.separate_stencil
= true).value
);
299 if (attachment
->format
== VK_FORMAT_D32_SFLOAT_S8_UINT
) {
300 tu_cs_image_stencil_ref(cs
, iview
, 0);
301 tu_cs_emit(cs
, attachment
->gmem_offset_stencil
);
303 tu_cs_image_ref(cs
, iview
, 0);
304 tu_cs_emit(cs
, attachment
->gmem_offset
);
308 A6XX_RB_STENCIL_INFO(0));
313 tu6_emit_mrt(struct tu_cmd_buffer
*cmd
,
314 const struct tu_subpass
*subpass
,
317 const struct tu_framebuffer
*fb
= cmd
->state
.framebuffer
;
319 for (uint32_t i
= 0; i
< subpass
->color_count
; ++i
) {
320 uint32_t a
= subpass
->color_attachments
[i
].attachment
;
321 if (a
== VK_ATTACHMENT_UNUSED
)
324 const struct tu_image_view
*iview
= fb
->attachments
[a
].attachment
;
326 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_MRT_BUF_INFO(i
), 6);
327 tu_cs_emit(cs
, iview
->RB_MRT_BUF_INFO
);
328 tu_cs_image_ref(cs
, iview
, 0);
329 tu_cs_emit(cs
, cmd
->state
.pass
->attachments
[a
].gmem_offset
);
332 A6XX_SP_FS_MRT_REG(i
, .dword
= iview
->SP_FS_MRT_REG
));
334 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_MRT_FLAG_BUFFER_ADDR_LO(i
), 3);
335 tu_cs_image_flag_ref(cs
, iview
, 0);
339 A6XX_RB_SRGB_CNTL(.dword
= subpass
->srgb_cntl
));
341 A6XX_SP_SRGB_CNTL(.dword
= subpass
->srgb_cntl
));
343 unsigned layers
= MAX2(fb
->layers
, util_logbase2(subpass
->multiview_mask
) + 1);
344 tu_cs_emit_regs(cs
, A6XX_GRAS_MAX_LAYER_INDEX(layers
- 1));
348 tu6_emit_msaa(struct tu_cs
*cs
, VkSampleCountFlagBits vk_samples
)
350 const enum a3xx_msaa_samples samples
= tu_msaa_samples(vk_samples
);
351 bool msaa_disable
= samples
== MSAA_ONE
;
354 A6XX_SP_TP_RAS_MSAA_CNTL(samples
),
355 A6XX_SP_TP_DEST_MSAA_CNTL(.samples
= samples
,
356 .msaa_disable
= msaa_disable
));
359 A6XX_GRAS_RAS_MSAA_CNTL(samples
),
360 A6XX_GRAS_DEST_MSAA_CNTL(.samples
= samples
,
361 .msaa_disable
= msaa_disable
));
364 A6XX_RB_RAS_MSAA_CNTL(samples
),
365 A6XX_RB_DEST_MSAA_CNTL(.samples
= samples
,
366 .msaa_disable
= msaa_disable
));
369 A6XX_RB_MSAA_CNTL(samples
));
373 tu6_emit_bin_size(struct tu_cs
*cs
,
374 uint32_t bin_w
, uint32_t bin_h
, uint32_t flags
)
377 A6XX_GRAS_BIN_CONTROL(.binw
= bin_w
,
382 A6XX_RB_BIN_CONTROL(.binw
= bin_w
,
386 /* no flag for RB_BIN_CONTROL2... */
388 A6XX_RB_BIN_CONTROL2(.binw
= bin_w
,
393 tu6_emit_render_cntl(struct tu_cmd_buffer
*cmd
,
394 const struct tu_subpass
*subpass
,
398 const struct tu_framebuffer
*fb
= cmd
->state
.framebuffer
;
400 cntl
|= A6XX_RB_RENDER_CNTL_UNK4
;
402 cntl
|= A6XX_RB_RENDER_CNTL_BINNING
;
404 uint32_t mrts_ubwc_enable
= 0;
405 for (uint32_t i
= 0; i
< subpass
->color_count
; ++i
) {
406 uint32_t a
= subpass
->color_attachments
[i
].attachment
;
407 if (a
== VK_ATTACHMENT_UNUSED
)
410 const struct tu_image_view
*iview
= fb
->attachments
[a
].attachment
;
411 if (iview
->ubwc_enabled
)
412 mrts_ubwc_enable
|= 1 << i
;
415 cntl
|= A6XX_RB_RENDER_CNTL_FLAG_MRTS(mrts_ubwc_enable
);
417 const uint32_t a
= subpass
->depth_stencil_attachment
.attachment
;
418 if (a
!= VK_ATTACHMENT_UNUSED
) {
419 const struct tu_image_view
*iview
= fb
->attachments
[a
].attachment
;
420 if (iview
->ubwc_enabled
)
421 cntl
|= A6XX_RB_RENDER_CNTL_FLAG_DEPTH
;
424 /* In the !binning case, we need to set RB_RENDER_CNTL in the draw_cs
425 * in order to set it correctly for the different subpasses. However,
426 * that means the packets we're emitting also happen during binning. So
427 * we need to guard the write on !BINNING at CP execution time.
429 tu_cs_reserve(cs
, 3 + 4);
430 tu_cs_emit_pkt7(cs
, CP_COND_REG_EXEC
, 2);
431 tu_cs_emit(cs
, CP_COND_REG_EXEC_0_MODE(RENDER_MODE
) |
432 CP_COND_REG_EXEC_0_GMEM
| CP_COND_REG_EXEC_0_SYSMEM
);
433 tu_cs_emit(cs
, CP_COND_REG_EXEC_1_DWORDS(4));
436 tu_cs_emit_pkt7(cs
, CP_REG_WRITE
, 3);
437 tu_cs_emit(cs
, CP_REG_WRITE_0_TRACKER(TRACK_RENDER_CNTL
));
438 tu_cs_emit(cs
, REG_A6XX_RB_RENDER_CNTL
);
439 tu_cs_emit(cs
, cntl
);
443 tu6_emit_blit_scissor(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
, bool align
)
446 const VkRect2D
*render_area
= &cmd
->state
.render_area
;
448 /* Avoid assertion fails with an empty render area at (0, 0) where the
449 * subtraction below wraps around. Empty render areas should be forced to
450 * the sysmem path by use_sysmem_rendering(). It's not even clear whether
451 * an empty scissor here works, and the blob seems to force sysmem too as
452 * it sets something wrong (non-empty) for the scissor.
454 if (render_area
->extent
.width
== 0 ||
455 render_area
->extent
.height
== 0)
458 uint32_t x1
= render_area
->offset
.x
;
459 uint32_t y1
= render_area
->offset
.y
;
460 uint32_t x2
= x1
+ render_area
->extent
.width
- 1;
461 uint32_t y2
= y1
+ render_area
->extent
.height
- 1;
464 x1
= x1
& ~(GMEM_ALIGN_W
- 1);
465 y1
= y1
& ~(GMEM_ALIGN_H
- 1);
466 x2
= ALIGN_POT(x2
+ 1, GMEM_ALIGN_W
) - 1;
467 y2
= ALIGN_POT(y2
+ 1, GMEM_ALIGN_H
) - 1;
471 A6XX_RB_BLIT_SCISSOR_TL(.x
= x1
, .y
= y1
),
472 A6XX_RB_BLIT_SCISSOR_BR(.x
= x2
, .y
= y2
));
476 tu6_emit_window_scissor(struct tu_cs
*cs
,
483 A6XX_GRAS_SC_WINDOW_SCISSOR_TL(.x
= x1
, .y
= y1
),
484 A6XX_GRAS_SC_WINDOW_SCISSOR_BR(.x
= x2
, .y
= y2
));
487 A6XX_GRAS_2D_RESOLVE_CNTL_1(.x
= x1
, .y
= y1
),
488 A6XX_GRAS_2D_RESOLVE_CNTL_2(.x
= x2
, .y
= y2
));
492 tu6_emit_window_offset(struct tu_cs
*cs
, uint32_t x1
, uint32_t y1
)
495 A6XX_RB_WINDOW_OFFSET(.x
= x1
, .y
= y1
));
498 A6XX_RB_WINDOW_OFFSET2(.x
= x1
, .y
= y1
));
501 A6XX_SP_WINDOW_OFFSET(.x
= x1
, .y
= y1
));
504 A6XX_SP_TP_WINDOW_OFFSET(.x
= x1
, .y
= y1
));
508 tu_cs_emit_draw_state(struct tu_cs
*cs
, uint32_t id
, struct tu_draw_state state
)
510 uint32_t enable_mask
;
512 case TU_DRAW_STATE_PROGRAM
:
513 case TU_DRAW_STATE_VI
:
514 case TU_DRAW_STATE_FS_CONST
:
515 /* The blob seems to not enable this (DESC_SETS_LOAD) for binning, even
516 * when resources would actually be used in the binning shader.
517 * Presumably the overhead of prefetching the resources isn't
520 case TU_DRAW_STATE_DESC_SETS_LOAD
:
521 enable_mask
= CP_SET_DRAW_STATE__0_GMEM
|
522 CP_SET_DRAW_STATE__0_SYSMEM
;
524 case TU_DRAW_STATE_PROGRAM_BINNING
:
525 case TU_DRAW_STATE_VI_BINNING
:
526 enable_mask
= CP_SET_DRAW_STATE__0_BINNING
;
528 case TU_DRAW_STATE_INPUT_ATTACHMENTS_GMEM
:
529 enable_mask
= CP_SET_DRAW_STATE__0_GMEM
;
531 case TU_DRAW_STATE_INPUT_ATTACHMENTS_SYSMEM
:
532 enable_mask
= CP_SET_DRAW_STATE__0_SYSMEM
;
535 enable_mask
= CP_SET_DRAW_STATE__0_GMEM
|
536 CP_SET_DRAW_STATE__0_SYSMEM
|
537 CP_SET_DRAW_STATE__0_BINNING
;
541 /* We need to reload the descriptors every time the descriptor sets
542 * change. However, the commands we send only depend on the pipeline
543 * because the whole point is to cache descriptors which are used by the
544 * pipeline. There's a problem here, in that the firmware has an
545 * "optimization" which skips executing groups that are set to the same
546 * value as the last draw. This means that if the descriptor sets change
547 * but not the pipeline, we'd try to re-execute the same buffer which
548 * the firmware would ignore and we wouldn't pre-load the new
549 * descriptors. Set the DIRTY bit to avoid this optimization
551 if (id
== TU_DRAW_STATE_DESC_SETS_LOAD
)
552 enable_mask
|= CP_SET_DRAW_STATE__0_DIRTY
;
554 tu_cs_emit(cs
, CP_SET_DRAW_STATE__0_COUNT(state
.size
) |
556 CP_SET_DRAW_STATE__0_GROUP_ID(id
) |
557 COND(!state
.size
, CP_SET_DRAW_STATE__0_DISABLE
));
558 tu_cs_emit_qw(cs
, state
.iova
);
562 use_hw_binning(struct tu_cmd_buffer
*cmd
)
564 const struct tu_framebuffer
*fb
= cmd
->state
.framebuffer
;
566 /* XFB commands are emitted for BINNING || SYSMEM, which makes it incompatible
567 * with non-hw binning GMEM rendering. this is required because some of the
568 * XFB commands need to only be executed once
570 if (cmd
->state
.xfb_used
)
573 /* Some devices have a newer a630_sqe.fw in which, only in CP_DRAW_INDX and
574 * CP_DRAW_INDX_OFFSET, visibility-based skipping happens *before*
575 * predication-based skipping. It seems this breaks predication, because
576 * draws skipped by predication will not be executed in the binning phase,
577 * and therefore won't have an entry in the draw stream, but the
578 * visibility-based skipping will expect it to have an entry. The result is
579 * a GPU hang when actually executing the first non-predicated draw.
580 * However, it seems that things still work if the whole renderpass is
581 * predicated. Affected tests are
582 * dEQP-VK.conditional_rendering.draw_clear.draw.case_2 as well as a few
585 * Broken FW version: 016ee181
586 * linux-firmware (working) FW version: 016ee176
588 * All known a650_sqe.fw versions don't have this bug.
590 * TODO: we should do version detection of the FW so that devices using the
591 * linux-firmware version of a630_sqe.fw don't need this workaround.
593 if (cmd
->state
.has_subpass_predication
&& cmd
->device
->physical_device
->gpu_id
!= 650)
596 if (unlikely(cmd
->device
->physical_device
->instance
->debug_flags
& TU_DEBUG_NOBIN
))
599 if (unlikely(cmd
->device
->physical_device
->instance
->debug_flags
& TU_DEBUG_FORCEBIN
))
602 return (fb
->tile_count
.width
* fb
->tile_count
.height
) > 2;
606 use_sysmem_rendering(struct tu_cmd_buffer
*cmd
)
608 if (unlikely(cmd
->device
->physical_device
->instance
->debug_flags
& TU_DEBUG_SYSMEM
))
611 /* If hw binning is required because of XFB but doesn't work because of the
612 * conditional rendering bug, fallback to sysmem.
614 if (cmd
->state
.xfb_used
&& cmd
->state
.has_subpass_predication
&&
615 cmd
->device
->physical_device
->gpu_id
!= 650)
618 /* can't fit attachments into gmem */
619 if (!cmd
->state
.pass
->gmem_pixels
)
622 if (cmd
->state
.framebuffer
->layers
> 1)
625 /* Use sysmem for empty render areas */
626 if (cmd
->state
.render_area
.extent
.width
== 0 ||
627 cmd
->state
.render_area
.extent
.height
== 0)
630 if (cmd
->state
.has_tess
)
637 tu6_emit_tile_select(struct tu_cmd_buffer
*cmd
,
639 uint32_t tx
, uint32_t ty
, uint32_t pipe
, uint32_t slot
)
641 const struct tu_framebuffer
*fb
= cmd
->state
.framebuffer
;
643 tu_cs_emit_pkt7(cs
, CP_SET_MARKER
, 1);
644 tu_cs_emit(cs
, A6XX_CP_SET_MARKER_0_MODE(RM6_YIELD
));
646 tu_cs_emit_pkt7(cs
, CP_SET_MARKER
, 1);
647 tu_cs_emit(cs
, A6XX_CP_SET_MARKER_0_MODE(RM6_GMEM
));
649 const uint32_t x1
= fb
->tile0
.width
* tx
;
650 const uint32_t y1
= fb
->tile0
.height
* ty
;
651 const uint32_t x2
= x1
+ fb
->tile0
.width
- 1;
652 const uint32_t y2
= y1
+ fb
->tile0
.height
- 1;
653 tu6_emit_window_scissor(cs
, x1
, y1
, x2
, y2
);
654 tu6_emit_window_offset(cs
, x1
, y1
);
656 tu_cs_emit_regs(cs
, A6XX_VPC_SO_DISABLE(false));
658 if (use_hw_binning(cmd
)) {
659 tu_cs_emit_pkt7(cs
, CP_WAIT_FOR_ME
, 0);
661 tu_cs_emit_pkt7(cs
, CP_SET_MODE
, 1);
664 tu_cs_emit_pkt7(cs
, CP_SET_BIN_DATA5_OFFSET
, 4);
665 tu_cs_emit(cs
, fb
->pipe_sizes
[pipe
] |
666 CP_SET_BIN_DATA5_0_VSC_N(slot
));
667 tu_cs_emit(cs
, pipe
* cmd
->vsc_draw_strm_pitch
);
668 tu_cs_emit(cs
, pipe
* 4);
669 tu_cs_emit(cs
, pipe
* cmd
->vsc_prim_strm_pitch
);
671 tu_cs_emit_pkt7(cs
, CP_SET_VISIBILITY_OVERRIDE
, 1);
674 tu_cs_emit_pkt7(cs
, CP_SET_MODE
, 1);
677 tu_cs_emit_pkt7(cs
, CP_SET_VISIBILITY_OVERRIDE
, 1);
680 tu_cs_emit_pkt7(cs
, CP_SET_MODE
, 1);
686 tu6_emit_sysmem_resolve(struct tu_cmd_buffer
*cmd
,
692 const struct tu_framebuffer
*fb
= cmd
->state
.framebuffer
;
693 struct tu_image_view
*dst
= fb
->attachments
[a
].attachment
;
694 struct tu_image_view
*src
= fb
->attachments
[gmem_a
].attachment
;
696 tu_resolve_sysmem(cmd
, cs
, src
, dst
, layer_mask
, fb
->layers
, &cmd
->state
.render_area
);
700 tu6_emit_sysmem_resolves(struct tu_cmd_buffer
*cmd
,
702 const struct tu_subpass
*subpass
)
704 if (subpass
->resolve_attachments
) {
705 /* From the documentation for vkCmdNextSubpass, section 7.4 "Render Pass
708 * End-of-subpass multisample resolves are treated as color
709 * attachment writes for the purposes of synchronization. That is,
710 * they are considered to execute in the
711 * VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT pipeline stage and
712 * their writes are synchronized with
713 * VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT. Synchronization between
714 * rendering within a subpass and any resolve operations at the end
715 * of the subpass occurs automatically, without need for explicit
716 * dependencies or pipeline barriers. However, if the resolve
717 * attachment is also used in a different subpass, an explicit
718 * dependency is needed.
720 * We use the CP_BLIT path for sysmem resolves, which is really a
721 * transfer command, so we have to manually flush similar to the gmem
722 * resolve case. However, a flush afterwards isn't needed because of the
723 * last sentence and the fact that we're in sysmem mode.
725 tu6_emit_event_write(cmd
, cs
, PC_CCU_FLUSH_COLOR_TS
);
726 tu6_emit_event_write(cmd
, cs
, CACHE_INVALIDATE
);
728 /* Wait for the flushes to land before using the 2D engine */
731 for (unsigned i
= 0; i
< subpass
->color_count
; i
++) {
732 uint32_t a
= subpass
->resolve_attachments
[i
].attachment
;
733 if (a
== VK_ATTACHMENT_UNUSED
)
736 tu6_emit_sysmem_resolve(cmd
, cs
, subpass
->multiview_mask
, a
,
737 subpass
->color_attachments
[i
].attachment
);
743 tu6_emit_tile_store(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
745 const struct tu_render_pass
*pass
= cmd
->state
.pass
;
746 const struct tu_subpass
*subpass
= &pass
->subpasses
[pass
->subpass_count
-1];
748 tu_cs_emit_pkt7(cs
, CP_SET_DRAW_STATE
, 3);
749 tu_cs_emit(cs
, CP_SET_DRAW_STATE__0_COUNT(0) |
750 CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS
|
751 CP_SET_DRAW_STATE__0_GROUP_ID(0));
752 tu_cs_emit(cs
, CP_SET_DRAW_STATE__1_ADDR_LO(0));
753 tu_cs_emit(cs
, CP_SET_DRAW_STATE__2_ADDR_HI(0));
755 tu_cs_emit_pkt7(cs
, CP_SKIP_IB2_ENABLE_GLOBAL
, 1);
758 tu_cs_emit_pkt7(cs
, CP_SET_MARKER
, 1);
759 tu_cs_emit(cs
, A6XX_CP_SET_MARKER_0_MODE(RM6_RESOLVE
));
761 tu6_emit_blit_scissor(cmd
, cs
, true);
763 for (uint32_t a
= 0; a
< pass
->attachment_count
; ++a
) {
764 if (pass
->attachments
[a
].gmem_offset
>= 0)
765 tu_store_gmem_attachment(cmd
, cs
, a
, a
);
768 if (subpass
->resolve_attachments
) {
769 for (unsigned i
= 0; i
< subpass
->color_count
; i
++) {
770 uint32_t a
= subpass
->resolve_attachments
[i
].attachment
;
771 if (a
!= VK_ATTACHMENT_UNUSED
)
772 tu_store_gmem_attachment(cmd
, cs
, a
,
773 subpass
->color_attachments
[i
].attachment
);
779 tu6_init_hw(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
781 struct tu_device
*dev
= cmd
->device
;
782 const struct tu_physical_device
*phys_dev
= dev
->physical_device
;
784 tu6_emit_event_write(cmd
, cs
, CACHE_INVALIDATE
);
786 tu_cs_emit_regs(cs
, A6XX_HLSQ_INVALIDATE_CMD(
795 .gfx_shared_const
= true,
796 .cs_shared_const
= true,
797 .gfx_bindless
= 0x1f,
798 .cs_bindless
= 0x1f));
802 cmd
->state
.cache
.pending_flush_bits
&=
803 ~(TU_CMD_FLAG_WAIT_FOR_IDLE
| TU_CMD_FLAG_CACHE_INVALIDATE
);
806 A6XX_RB_CCU_CNTL(.offset
= phys_dev
->ccu_offset_bypass
));
807 cmd
->state
.ccu_state
= TU_CMD_CCU_SYSMEM
;
808 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_8E04
, 0x00100000);
809 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_AE04
, 0x8);
810 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_AE00
, 0);
811 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_AE0F
, 0x3f);
812 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_B605
, 0x44);
813 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_B600
, 0x100000);
814 tu_cs_emit_write_reg(cs
, REG_A6XX_HLSQ_UNKNOWN_BE00
, 0x80);
815 tu_cs_emit_write_reg(cs
, REG_A6XX_HLSQ_UNKNOWN_BE01
, 0);
817 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9600
, 0);
818 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_UNKNOWN_8600
, 0x880);
819 tu_cs_emit_write_reg(cs
, REG_A6XX_HLSQ_UNKNOWN_BE04
, 0);
820 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_AE03
, 0x00000410);
821 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_IBO_COUNT
, 0);
822 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_B182
, 0);
823 tu_cs_emit_write_reg(cs
, REG_A6XX_HLSQ_SHARED_CONSTS
, 0);
824 tu_cs_emit_write_reg(cs
, REG_A6XX_UCHE_UNKNOWN_0E12
, 0x3200000);
825 tu_cs_emit_write_reg(cs
, REG_A6XX_UCHE_CLIENT_PF
, 4);
826 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_8E01
, 0x0);
827 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_A982
, 0);
828 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_A9A8
, 0);
829 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_MODE_CONTROL
,
830 A6XX_SP_MODE_CONTROL_CONSTANT_DEMOTION_ENABLE
| 4);
832 /* TODO: set A6XX_VFD_ADD_OFFSET_INSTANCE and fix ir3 to avoid adding base instance */
833 tu_cs_emit_write_reg(cs
, REG_A6XX_VFD_ADD_OFFSET
, A6XX_VFD_ADD_OFFSET_VERTEX
);
834 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_8811
, 0x00000010);
835 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_MODE_CNTL
, 0x1f);
837 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_SRGB_CNTL
, 0);
839 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_UNKNOWN_8110
, 0);
841 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_RENDER_CONTROL0
, 0x401);
842 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_RENDER_CONTROL1
, 0);
843 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_FS_OUTPUT_CNTL0
, 0);
844 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_8818
, 0);
845 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_8819
, 0);
846 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_881A
, 0);
847 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_881B
, 0);
848 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_881C
, 0);
849 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_881D
, 0);
850 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_881E
, 0);
851 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_88F0
, 0);
853 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9107
, 0);
855 tu_cs_emit_regs(cs
, A6XX_VPC_POINT_COORD_INVERT(false));
856 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9300
, 0);
858 tu_cs_emit_regs(cs
, A6XX_VPC_SO_DISABLE(true));
860 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_UNKNOWN_9980
, 0);
862 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_PRIMITIVE_CNTL_6
, 0);
864 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_A81B
, 0);
866 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_B183
, 0);
868 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_UNKNOWN_8099
, 0);
869 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_UNKNOWN_80A0
, 2);
870 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_UNKNOWN_80AF
, 0);
871 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9210
, 0);
872 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9211
, 0);
873 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9602
, 0);
874 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_UNKNOWN_9E72
, 0);
875 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_TP_UNKNOWN_B309
, 0x000000a2);
876 tu_cs_emit_write_reg(cs
, REG_A6XX_HLSQ_CONTROL_5_REG
, 0xfc);
878 tu_cs_emit_write_reg(cs
, REG_A6XX_VFD_MODE_CNTL
, 0x00000000);
880 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_MODE_CNTL
, 0x0000001f);
882 /* we don't use this yet.. probably best to disable.. */
883 tu_cs_emit_pkt7(cs
, CP_SET_DRAW_STATE
, 3);
884 tu_cs_emit(cs
, CP_SET_DRAW_STATE__0_COUNT(0) |
885 CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS
|
886 CP_SET_DRAW_STATE__0_GROUP_ID(0));
887 tu_cs_emit(cs
, CP_SET_DRAW_STATE__1_ADDR_LO(0));
888 tu_cs_emit(cs
, CP_SET_DRAW_STATE__2_ADDR_HI(0));
891 A6XX_SP_HS_CTRL_REG0(0));
894 A6XX_SP_GS_CTRL_REG0(0));
897 A6XX_GRAS_LRZ_CNTL(0));
900 A6XX_RB_LRZ_CNTL(0));
903 A6XX_SP_TP_BORDER_COLOR_BASE_ADDR(.bo
= &dev
->global_bo
,
904 .bo_offset
= gb_offset(bcolor_builtin
)));
906 A6XX_SP_PS_TP_BORDER_COLOR_BASE_ADDR(.bo
= &dev
->global_bo
,
907 .bo_offset
= gb_offset(bcolor_builtin
)));
910 * use vsc pitches from the largest values used so far with this device
911 * if there hasn't been overflow, there will already be a scratch bo
912 * allocated for these sizes
914 * if overflow is detected, the stream size is increased by 2x
916 mtx_lock(&dev
->mutex
);
918 struct tu6_global
*global
= dev
->global_bo
.map
;
920 uint32_t vsc_draw_overflow
= global
->vsc_draw_overflow
;
921 uint32_t vsc_prim_overflow
= global
->vsc_prim_overflow
;
923 if (vsc_draw_overflow
>= dev
->vsc_draw_strm_pitch
)
924 dev
->vsc_draw_strm_pitch
= (dev
->vsc_draw_strm_pitch
- VSC_PAD
) * 2 + VSC_PAD
;
926 if (vsc_prim_overflow
>= dev
->vsc_prim_strm_pitch
)
927 dev
->vsc_prim_strm_pitch
= (dev
->vsc_prim_strm_pitch
- VSC_PAD
) * 2 + VSC_PAD
;
929 cmd
->vsc_prim_strm_pitch
= dev
->vsc_prim_strm_pitch
;
930 cmd
->vsc_draw_strm_pitch
= dev
->vsc_draw_strm_pitch
;
932 mtx_unlock(&dev
->mutex
);
934 struct tu_bo
*vsc_bo
;
935 uint32_t size0
= cmd
->vsc_prim_strm_pitch
* MAX_VSC_PIPES
+
936 cmd
->vsc_draw_strm_pitch
* MAX_VSC_PIPES
;
938 tu_get_scratch_bo(dev
, size0
+ MAX_VSC_PIPES
* 4, &vsc_bo
);
941 A6XX_VSC_DRAW_STRM_SIZE_ADDRESS(.bo
= vsc_bo
, .bo_offset
= size0
));
943 A6XX_VSC_PRIM_STRM_ADDRESS(.bo
= vsc_bo
));
945 A6XX_VSC_DRAW_STRM_ADDRESS(.bo
= vsc_bo
,
946 .bo_offset
= cmd
->vsc_prim_strm_pitch
* MAX_VSC_PIPES
));
948 tu_bo_list_add(&cmd
->bo_list
, vsc_bo
, MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_WRITE
);
950 tu_cs_sanity_check(cs
);
954 update_vsc_pipe(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
956 const struct tu_framebuffer
*fb
= cmd
->state
.framebuffer
;
959 A6XX_VSC_BIN_SIZE(.width
= fb
->tile0
.width
,
960 .height
= fb
->tile0
.height
));
963 A6XX_VSC_BIN_COUNT(.nx
= fb
->tile_count
.width
,
964 .ny
= fb
->tile_count
.height
));
966 tu_cs_emit_pkt4(cs
, REG_A6XX_VSC_PIPE_CONFIG_REG(0), 32);
967 tu_cs_emit_array(cs
, fb
->pipe_config
, 32);
970 A6XX_VSC_PRIM_STRM_PITCH(cmd
->vsc_prim_strm_pitch
),
971 A6XX_VSC_PRIM_STRM_LIMIT(cmd
->vsc_prim_strm_pitch
- VSC_PAD
));
974 A6XX_VSC_DRAW_STRM_PITCH(cmd
->vsc_draw_strm_pitch
),
975 A6XX_VSC_DRAW_STRM_LIMIT(cmd
->vsc_draw_strm_pitch
- VSC_PAD
));
979 emit_vsc_overflow_test(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
981 const struct tu_framebuffer
*fb
= cmd
->state
.framebuffer
;
982 const uint32_t used_pipe_count
=
983 fb
->pipe_count
.width
* fb
->pipe_count
.height
;
985 for (int i
= 0; i
< used_pipe_count
; i
++) {
986 tu_cs_emit_pkt7(cs
, CP_COND_WRITE5
, 8);
987 tu_cs_emit(cs
, CP_COND_WRITE5_0_FUNCTION(WRITE_GE
) |
988 CP_COND_WRITE5_0_WRITE_MEMORY
);
989 tu_cs_emit(cs
, CP_COND_WRITE5_1_POLL_ADDR_LO(REG_A6XX_VSC_DRAW_STRM_SIZE_REG(i
)));
990 tu_cs_emit(cs
, CP_COND_WRITE5_2_POLL_ADDR_HI(0));
991 tu_cs_emit(cs
, CP_COND_WRITE5_3_REF(cmd
->vsc_draw_strm_pitch
- VSC_PAD
));
992 tu_cs_emit(cs
, CP_COND_WRITE5_4_MASK(~0));
993 tu_cs_emit_qw(cs
, global_iova(cmd
, vsc_draw_overflow
));
994 tu_cs_emit(cs
, CP_COND_WRITE5_7_WRITE_DATA(cmd
->vsc_draw_strm_pitch
));
996 tu_cs_emit_pkt7(cs
, CP_COND_WRITE5
, 8);
997 tu_cs_emit(cs
, CP_COND_WRITE5_0_FUNCTION(WRITE_GE
) |
998 CP_COND_WRITE5_0_WRITE_MEMORY
);
999 tu_cs_emit(cs
, CP_COND_WRITE5_1_POLL_ADDR_LO(REG_A6XX_VSC_PRIM_STRM_SIZE_REG(i
)));
1000 tu_cs_emit(cs
, CP_COND_WRITE5_2_POLL_ADDR_HI(0));
1001 tu_cs_emit(cs
, CP_COND_WRITE5_3_REF(cmd
->vsc_prim_strm_pitch
- VSC_PAD
));
1002 tu_cs_emit(cs
, CP_COND_WRITE5_4_MASK(~0));
1003 tu_cs_emit_qw(cs
, global_iova(cmd
, vsc_prim_overflow
));
1004 tu_cs_emit(cs
, CP_COND_WRITE5_7_WRITE_DATA(cmd
->vsc_prim_strm_pitch
));
1007 tu_cs_emit_pkt7(cs
, CP_WAIT_MEM_WRITES
, 0);
1011 tu6_emit_binning_pass(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
1013 struct tu_physical_device
*phys_dev
= cmd
->device
->physical_device
;
1014 const struct tu_framebuffer
*fb
= cmd
->state
.framebuffer
;
1016 tu6_emit_window_scissor(cs
, 0, 0, fb
->width
- 1, fb
->height
- 1);
1018 tu_cs_emit_pkt7(cs
, CP_SET_MARKER
, 1);
1019 tu_cs_emit(cs
, A6XX_CP_SET_MARKER_0_MODE(RM6_BINNING
));
1021 tu_cs_emit_pkt7(cs
, CP_SET_VISIBILITY_OVERRIDE
, 1);
1022 tu_cs_emit(cs
, 0x1);
1024 tu_cs_emit_pkt7(cs
, CP_SET_MODE
, 1);
1025 tu_cs_emit(cs
, 0x1);
1030 A6XX_VFD_MODE_CNTL(.binning_pass
= true));
1032 update_vsc_pipe(cmd
, cs
);
1035 A6XX_PC_UNKNOWN_9805(.unknown
= phys_dev
->magic
.PC_UNKNOWN_9805
));
1038 A6XX_SP_UNKNOWN_A0F8(.unknown
= phys_dev
->magic
.SP_UNKNOWN_A0F8
));
1040 tu_cs_emit_pkt7(cs
, CP_EVENT_WRITE
, 1);
1041 tu_cs_emit(cs
, UNK_2C
);
1044 A6XX_RB_WINDOW_OFFSET(.x
= 0, .y
= 0));
1047 A6XX_SP_TP_WINDOW_OFFSET(.x
= 0, .y
= 0));
1049 /* emit IB to binning drawcmds: */
1050 tu_cs_emit_call(cs
, &cmd
->draw_cs
);
1052 tu_cs_emit_pkt7(cs
, CP_SET_DRAW_STATE
, 3);
1053 tu_cs_emit(cs
, CP_SET_DRAW_STATE__0_COUNT(0) |
1054 CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS
|
1055 CP_SET_DRAW_STATE__0_GROUP_ID(0));
1056 tu_cs_emit(cs
, CP_SET_DRAW_STATE__1_ADDR_LO(0));
1057 tu_cs_emit(cs
, CP_SET_DRAW_STATE__2_ADDR_HI(0));
1059 tu_cs_emit_pkt7(cs
, CP_EVENT_WRITE
, 1);
1060 tu_cs_emit(cs
, UNK_2D
);
1062 /* This flush is probably required because the VSC, which produces the
1063 * visibility stream, is a client of UCHE, whereas the CP needs to read the
1064 * visibility stream (without caching) to do draw skipping. The
1065 * WFI+WAIT_FOR_ME combination guarantees that the binning commands
1066 * submitted are finished before reading the VSC regs (in
1067 * emit_vsc_overflow_test) or the VSC_DATA buffer directly (implicitly as
1070 tu6_emit_event_write(cmd
, cs
, CACHE_FLUSH_TS
);
1074 tu_cs_emit_pkt7(cs
, CP_WAIT_FOR_ME
, 0);
1076 emit_vsc_overflow_test(cmd
, cs
);
1078 tu_cs_emit_pkt7(cs
, CP_SET_VISIBILITY_OVERRIDE
, 1);
1079 tu_cs_emit(cs
, 0x0);
1081 tu_cs_emit_pkt7(cs
, CP_SET_MODE
, 1);
1082 tu_cs_emit(cs
, 0x0);
1085 static struct tu_draw_state
1086 tu_emit_input_attachments(struct tu_cmd_buffer
*cmd
,
1087 const struct tu_subpass
*subpass
,
1090 /* note: we can probably emit input attachments just once for the whole
1091 * renderpass, this would avoid emitting both sysmem/gmem versions
1093 * emit two texture descriptors for each input, as a workaround for
1094 * d24s8/d32s8, which can be sampled as both float (depth) and integer (stencil)
1095 * tu_shader lowers uint input attachment loads to use the 2nd descriptor
1097 * TODO: a smarter workaround
1100 if (!subpass
->input_count
)
1101 return (struct tu_draw_state
) {};
1103 struct tu_cs_memory texture
;
1104 VkResult result
= tu_cs_alloc(&cmd
->sub_cs
, subpass
->input_count
* 2,
1105 A6XX_TEX_CONST_DWORDS
, &texture
);
1106 assert(result
== VK_SUCCESS
);
1108 for (unsigned i
= 0; i
< subpass
->input_count
* 2; i
++) {
1109 uint32_t a
= subpass
->input_attachments
[i
/ 2].attachment
;
1110 if (a
== VK_ATTACHMENT_UNUSED
)
1113 struct tu_image_view
*iview
=
1114 cmd
->state
.framebuffer
->attachments
[a
].attachment
;
1115 const struct tu_render_pass_attachment
*att
=
1116 &cmd
->state
.pass
->attachments
[a
];
1117 uint32_t *dst
= &texture
.map
[A6XX_TEX_CONST_DWORDS
* i
];
1118 uint32_t gmem_offset
= att
->gmem_offset
;
1119 uint32_t cpp
= att
->cpp
;
1121 memcpy(dst
, iview
->descriptor
, A6XX_TEX_CONST_DWORDS
* 4);
1123 if (i
% 2 == 1 && att
->format
== VK_FORMAT_D24_UNORM_S8_UINT
) {
1124 /* note this works because spec says fb and input attachments
1125 * must use identity swizzle
1127 dst
[0] &= ~(A6XX_TEX_CONST_0_FMT__MASK
|
1128 A6XX_TEX_CONST_0_SWIZ_X__MASK
| A6XX_TEX_CONST_0_SWIZ_Y__MASK
|
1129 A6XX_TEX_CONST_0_SWIZ_Z__MASK
| A6XX_TEX_CONST_0_SWIZ_W__MASK
);
1130 if (cmd
->device
->physical_device
->limited_z24s8
) {
1131 dst
[0] |= A6XX_TEX_CONST_0_FMT(FMT6_8_8_8_8_UINT
) |
1132 A6XX_TEX_CONST_0_SWIZ_X(A6XX_TEX_W
) |
1133 A6XX_TEX_CONST_0_SWIZ_Y(A6XX_TEX_ZERO
) |
1134 A6XX_TEX_CONST_0_SWIZ_Z(A6XX_TEX_ZERO
) |
1135 A6XX_TEX_CONST_0_SWIZ_W(A6XX_TEX_ONE
);
1137 dst
[0] |= A6XX_TEX_CONST_0_FMT(FMT6_Z24_UINT_S8_UINT
) |
1138 A6XX_TEX_CONST_0_SWIZ_X(A6XX_TEX_Y
) |
1139 A6XX_TEX_CONST_0_SWIZ_Y(A6XX_TEX_ZERO
) |
1140 A6XX_TEX_CONST_0_SWIZ_Z(A6XX_TEX_ZERO
) |
1141 A6XX_TEX_CONST_0_SWIZ_W(A6XX_TEX_ONE
);
1145 if (i
% 2 == 1 && att
->format
== VK_FORMAT_D32_SFLOAT_S8_UINT
) {
1146 dst
[0] &= ~A6XX_TEX_CONST_0_FMT__MASK
;
1147 dst
[0] |= A6XX_TEX_CONST_0_FMT(FMT6_8_UINT
);
1148 dst
[2] &= ~(A6XX_TEX_CONST_2_PITCHALIGN__MASK
| A6XX_TEX_CONST_2_PITCH__MASK
);
1149 dst
[2] |= A6XX_TEX_CONST_2_PITCH(iview
->stencil_PITCH
<< 6);
1151 dst
[4] = iview
->stencil_base_addr
;
1152 dst
[5] = (dst
[5] & 0xffff) | iview
->stencil_base_addr
>> 32;
1155 gmem_offset
= att
->gmem_offset_stencil
;
1161 /* patched for gmem */
1162 dst
[0] &= ~(A6XX_TEX_CONST_0_SWAP__MASK
| A6XX_TEX_CONST_0_TILE_MODE__MASK
);
1163 dst
[0] |= A6XX_TEX_CONST_0_TILE_MODE(TILE6_2
);
1165 A6XX_TEX_CONST_2_TYPE(A6XX_TEX_2D
) |
1166 A6XX_TEX_CONST_2_PITCH(cmd
->state
.framebuffer
->tile0
.width
* cpp
);
1168 dst
[4] = cmd
->device
->physical_device
->gmem_base
+ gmem_offset
;
1169 dst
[5] = A6XX_TEX_CONST_5_DEPTH(1);
1170 for (unsigned i
= 6; i
< A6XX_TEX_CONST_DWORDS
; i
++)
1175 struct tu_draw_state ds
= tu_cs_draw_state(&cmd
->sub_cs
, &cs
, 9);
1177 tu_cs_emit_pkt7(&cs
, CP_LOAD_STATE6_FRAG
, 3);
1178 tu_cs_emit(&cs
, CP_LOAD_STATE6_0_DST_OFF(0) |
1179 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS
) |
1180 CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT
) |
1181 CP_LOAD_STATE6_0_STATE_BLOCK(SB6_FS_TEX
) |
1182 CP_LOAD_STATE6_0_NUM_UNIT(subpass
->input_count
* 2));
1183 tu_cs_emit_qw(&cs
, texture
.iova
);
1185 tu_cs_emit_pkt4(&cs
, REG_A6XX_SP_FS_TEX_CONST_LO
, 2);
1186 tu_cs_emit_qw(&cs
, texture
.iova
);
1188 tu_cs_emit_regs(&cs
, A6XX_SP_FS_TEX_COUNT(subpass
->input_count
* 2));
1190 assert(cs
.cur
== cs
.end
); /* validate draw state size */
1196 tu_set_input_attachments(struct tu_cmd_buffer
*cmd
, const struct tu_subpass
*subpass
)
1198 struct tu_cs
*cs
= &cmd
->draw_cs
;
1200 tu_cs_emit_pkt7(cs
, CP_SET_DRAW_STATE
, 6);
1201 tu_cs_emit_draw_state(cs
, TU_DRAW_STATE_INPUT_ATTACHMENTS_GMEM
,
1202 tu_emit_input_attachments(cmd
, subpass
, true));
1203 tu_cs_emit_draw_state(cs
, TU_DRAW_STATE_INPUT_ATTACHMENTS_SYSMEM
,
1204 tu_emit_input_attachments(cmd
, subpass
, false));
1208 tu_emit_renderpass_begin(struct tu_cmd_buffer
*cmd
,
1209 const VkRenderPassBeginInfo
*info
)
1211 struct tu_cs
*cs
= &cmd
->draw_cs
;
1213 tu_cond_exec_start(cs
, CP_COND_EXEC_0_RENDER_MODE_GMEM
);
1215 tu6_emit_blit_scissor(cmd
, cs
, true);
1217 for (uint32_t i
= 0; i
< cmd
->state
.pass
->attachment_count
; ++i
)
1218 tu_load_gmem_attachment(cmd
, cs
, i
, false);
1220 tu6_emit_blit_scissor(cmd
, cs
, false);
1222 for (uint32_t i
= 0; i
< cmd
->state
.pass
->attachment_count
; ++i
)
1223 tu_clear_gmem_attachment(cmd
, cs
, i
, info
);
1225 tu_cond_exec_end(cs
);
1227 tu_cond_exec_start(cs
, CP_COND_EXEC_0_RENDER_MODE_SYSMEM
);
1229 for (uint32_t i
= 0; i
< cmd
->state
.pass
->attachment_count
; ++i
)
1230 tu_clear_sysmem_attachment(cmd
, cs
, i
, info
);
1232 tu_cond_exec_end(cs
);
1236 tu6_sysmem_render_begin(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
1238 const struct tu_framebuffer
*fb
= cmd
->state
.framebuffer
;
1240 assert(fb
->width
> 0 && fb
->height
> 0);
1241 tu6_emit_window_scissor(cs
, 0, 0, fb
->width
- 1, fb
->height
- 1);
1242 tu6_emit_window_offset(cs
, 0, 0);
1244 tu6_emit_bin_size(cs
, 0, 0, 0xc00000); /* 0xc00000 = BYPASS? */
1246 tu6_emit_event_write(cmd
, cs
, LRZ_FLUSH
);
1248 tu_cs_emit_pkt7(cs
, CP_SET_MARKER
, 1);
1249 tu_cs_emit(cs
, A6XX_CP_SET_MARKER_0_MODE(RM6_BYPASS
));
1251 tu_cs_emit_pkt7(cs
, CP_SKIP_IB2_ENABLE_GLOBAL
, 1);
1252 tu_cs_emit(cs
, 0x0);
1254 tu_emit_cache_flush_ccu(cmd
, cs
, TU_CMD_CCU_SYSMEM
);
1256 /* enable stream-out, with sysmem there is only one pass: */
1257 tu_cs_emit_regs(cs
, A6XX_VPC_SO_DISABLE(false));
1259 tu_cs_emit_pkt7(cs
, CP_SET_VISIBILITY_OVERRIDE
, 1);
1260 tu_cs_emit(cs
, 0x1);
1262 tu_cs_emit_pkt7(cs
, CP_SET_MODE
, 1);
1263 tu_cs_emit(cs
, 0x0);
1265 tu_cs_sanity_check(cs
);
1269 tu6_sysmem_render_end(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
1271 /* Do any resolves of the last subpass. These are handled in the
1272 * tile_store_ib in the gmem path.
1274 tu6_emit_sysmem_resolves(cmd
, cs
, cmd
->state
.subpass
);
1276 tu_cs_emit_call(cs
, &cmd
->draw_epilogue_cs
);
1278 tu_cs_emit_pkt7(cs
, CP_SKIP_IB2_ENABLE_GLOBAL
, 1);
1279 tu_cs_emit(cs
, 0x0);
1281 tu6_emit_event_write(cmd
, cs
, LRZ_FLUSH
);
1283 tu_cs_sanity_check(cs
);
1287 tu6_tile_render_begin(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
1289 struct tu_physical_device
*phys_dev
= cmd
->device
->physical_device
;
1291 tu6_emit_event_write(cmd
, cs
, LRZ_FLUSH
);
1295 tu_cs_emit_pkt7(cs
, CP_SKIP_IB2_ENABLE_GLOBAL
, 1);
1296 tu_cs_emit(cs
, 0x0);
1298 tu_emit_cache_flush_ccu(cmd
, cs
, TU_CMD_CCU_GMEM
);
1300 const struct tu_framebuffer
*fb
= cmd
->state
.framebuffer
;
1301 if (use_hw_binning(cmd
)) {
1302 /* enable stream-out during binning pass: */
1303 tu_cs_emit_regs(cs
, A6XX_VPC_SO_DISABLE(false));
1305 tu6_emit_bin_size(cs
, fb
->tile0
.width
, fb
->tile0
.height
,
1306 A6XX_RB_BIN_CONTROL_BINNING_PASS
| 0x6000000);
1308 tu6_emit_render_cntl(cmd
, cmd
->state
.subpass
, cs
, true);
1310 tu6_emit_binning_pass(cmd
, cs
);
1312 /* and disable stream-out for draw pass: */
1313 tu_cs_emit_regs(cs
, A6XX_VPC_SO_DISABLE(true));
1315 tu6_emit_bin_size(cs
, fb
->tile0
.width
, fb
->tile0
.height
,
1316 A6XX_RB_BIN_CONTROL_USE_VIZ
| 0x6000000);
1319 A6XX_VFD_MODE_CNTL(0));
1321 tu_cs_emit_regs(cs
, A6XX_PC_UNKNOWN_9805(.unknown
= phys_dev
->magic
.PC_UNKNOWN_9805
));
1323 tu_cs_emit_regs(cs
, A6XX_SP_UNKNOWN_A0F8(.unknown
= phys_dev
->magic
.SP_UNKNOWN_A0F8
));
1325 tu_cs_emit_pkt7(cs
, CP_SKIP_IB2_ENABLE_GLOBAL
, 1);
1326 tu_cs_emit(cs
, 0x1);
1328 /* no binning pass, so enable stream-out for draw pass:: */
1329 tu_cs_emit_regs(cs
, A6XX_VPC_SO_DISABLE(false));
1331 tu6_emit_bin_size(cs
, fb
->tile0
.width
, fb
->tile0
.height
, 0x6000000);
1334 tu_cs_sanity_check(cs
);
1338 tu6_render_tile(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
1340 tu_cs_emit_call(cs
, &cmd
->draw_cs
);
1342 if (use_hw_binning(cmd
)) {
1343 tu_cs_emit_pkt7(cs
, CP_SET_MARKER
, 1);
1344 tu_cs_emit(cs
, A6XX_CP_SET_MARKER_0_MODE(RM6_ENDVIS
));
1347 tu_cs_emit_ib(cs
, &cmd
->state
.tile_store_ib
);
1349 tu_cs_sanity_check(cs
);
1353 tu6_tile_render_end(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
1355 tu_cs_emit_call(cs
, &cmd
->draw_epilogue_cs
);
1358 A6XX_GRAS_LRZ_CNTL(0));
1360 tu6_emit_event_write(cmd
, cs
, LRZ_FLUSH
);
1362 tu6_emit_event_write(cmd
, cs
, PC_CCU_RESOLVE_TS
);
1364 tu_cs_sanity_check(cs
);
1368 tu_cmd_render_tiles(struct tu_cmd_buffer
*cmd
)
1370 const struct tu_framebuffer
*fb
= cmd
->state
.framebuffer
;
1372 tu6_tile_render_begin(cmd
, &cmd
->cs
);
1375 for (uint32_t py
= 0; py
< fb
->pipe_count
.height
; py
++) {
1376 for (uint32_t px
= 0; px
< fb
->pipe_count
.width
; px
++, pipe
++) {
1377 uint32_t tx1
= px
* fb
->pipe0
.width
;
1378 uint32_t ty1
= py
* fb
->pipe0
.height
;
1379 uint32_t tx2
= MIN2(tx1
+ fb
->pipe0
.width
, fb
->tile_count
.width
);
1380 uint32_t ty2
= MIN2(ty1
+ fb
->pipe0
.height
, fb
->tile_count
.height
);
1382 for (uint32_t ty
= ty1
; ty
< ty2
; ty
++) {
1383 for (uint32_t tx
= tx1
; tx
< tx2
; tx
++, slot
++) {
1384 tu6_emit_tile_select(cmd
, &cmd
->cs
, tx
, ty
, pipe
, slot
);
1385 tu6_render_tile(cmd
, &cmd
->cs
);
1391 tu6_tile_render_end(cmd
, &cmd
->cs
);
1395 tu_cmd_render_sysmem(struct tu_cmd_buffer
*cmd
)
1397 tu6_sysmem_render_begin(cmd
, &cmd
->cs
);
1399 tu_cs_emit_call(&cmd
->cs
, &cmd
->draw_cs
);
1401 tu6_sysmem_render_end(cmd
, &cmd
->cs
);
1405 tu_cmd_prepare_tile_store_ib(struct tu_cmd_buffer
*cmd
)
1407 const uint32_t tile_store_space
= 11 + (35 * 2) * cmd
->state
.pass
->attachment_count
;
1408 struct tu_cs sub_cs
;
1411 tu_cs_begin_sub_stream(&cmd
->sub_cs
, tile_store_space
, &sub_cs
);
1412 if (result
!= VK_SUCCESS
) {
1413 cmd
->record_result
= result
;
1417 /* emit to tile-store sub_cs */
1418 tu6_emit_tile_store(cmd
, &sub_cs
);
1420 cmd
->state
.tile_store_ib
= tu_cs_end_sub_stream(&cmd
->sub_cs
, &sub_cs
);
1424 tu_create_cmd_buffer(struct tu_device
*device
,
1425 struct tu_cmd_pool
*pool
,
1426 VkCommandBufferLevel level
,
1427 VkCommandBuffer
*pCommandBuffer
)
1429 struct tu_cmd_buffer
*cmd_buffer
;
1431 cmd_buffer
= vk_object_zalloc(&device
->vk
, NULL
, sizeof(*cmd_buffer
),
1432 VK_OBJECT_TYPE_COMMAND_BUFFER
);
1433 if (cmd_buffer
== NULL
)
1434 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
1436 cmd_buffer
->device
= device
;
1437 cmd_buffer
->pool
= pool
;
1438 cmd_buffer
->level
= level
;
1441 list_addtail(&cmd_buffer
->pool_link
, &pool
->cmd_buffers
);
1442 cmd_buffer
->queue_family_index
= pool
->queue_family_index
;
1445 /* Init the pool_link so we can safely call list_del when we destroy
1446 * the command buffer
1448 list_inithead(&cmd_buffer
->pool_link
);
1449 cmd_buffer
->queue_family_index
= TU_QUEUE_GENERAL
;
1452 tu_bo_list_init(&cmd_buffer
->bo_list
);
1453 tu_cs_init(&cmd_buffer
->cs
, device
, TU_CS_MODE_GROW
, 4096);
1454 tu_cs_init(&cmd_buffer
->draw_cs
, device
, TU_CS_MODE_GROW
, 4096);
1455 tu_cs_init(&cmd_buffer
->draw_epilogue_cs
, device
, TU_CS_MODE_GROW
, 4096);
1456 tu_cs_init(&cmd_buffer
->sub_cs
, device
, TU_CS_MODE_SUB_STREAM
, 2048);
1458 *pCommandBuffer
= tu_cmd_buffer_to_handle(cmd_buffer
);
1460 list_inithead(&cmd_buffer
->upload
.list
);
1466 tu_cmd_buffer_destroy(struct tu_cmd_buffer
*cmd_buffer
)
1468 list_del(&cmd_buffer
->pool_link
);
1470 tu_cs_finish(&cmd_buffer
->cs
);
1471 tu_cs_finish(&cmd_buffer
->draw_cs
);
1472 tu_cs_finish(&cmd_buffer
->draw_epilogue_cs
);
1473 tu_cs_finish(&cmd_buffer
->sub_cs
);
1475 tu_bo_list_destroy(&cmd_buffer
->bo_list
);
1476 vk_object_free(&cmd_buffer
->device
->vk
, &cmd_buffer
->pool
->alloc
, cmd_buffer
);
1480 tu_reset_cmd_buffer(struct tu_cmd_buffer
*cmd_buffer
)
1482 cmd_buffer
->record_result
= VK_SUCCESS
;
1484 tu_bo_list_reset(&cmd_buffer
->bo_list
);
1485 tu_cs_reset(&cmd_buffer
->cs
);
1486 tu_cs_reset(&cmd_buffer
->draw_cs
);
1487 tu_cs_reset(&cmd_buffer
->draw_epilogue_cs
);
1488 tu_cs_reset(&cmd_buffer
->sub_cs
);
1490 for (unsigned i
= 0; i
< MAX_BIND_POINTS
; i
++)
1491 memset(&cmd_buffer
->descriptors
[i
].sets
, 0, sizeof(cmd_buffer
->descriptors
[i
].sets
));
1493 cmd_buffer
->status
= TU_CMD_BUFFER_STATUS_INITIAL
;
1495 return cmd_buffer
->record_result
;
1499 tu_AllocateCommandBuffers(VkDevice _device
,
1500 const VkCommandBufferAllocateInfo
*pAllocateInfo
,
1501 VkCommandBuffer
*pCommandBuffers
)
1503 TU_FROM_HANDLE(tu_device
, device
, _device
);
1504 TU_FROM_HANDLE(tu_cmd_pool
, pool
, pAllocateInfo
->commandPool
);
1506 VkResult result
= VK_SUCCESS
;
1509 for (i
= 0; i
< pAllocateInfo
->commandBufferCount
; i
++) {
1511 if (!list_is_empty(&pool
->free_cmd_buffers
)) {
1512 struct tu_cmd_buffer
*cmd_buffer
= list_first_entry(
1513 &pool
->free_cmd_buffers
, struct tu_cmd_buffer
, pool_link
);
1515 list_del(&cmd_buffer
->pool_link
);
1516 list_addtail(&cmd_buffer
->pool_link
, &pool
->cmd_buffers
);
1518 result
= tu_reset_cmd_buffer(cmd_buffer
);
1519 cmd_buffer
->level
= pAllocateInfo
->level
;
1521 pCommandBuffers
[i
] = tu_cmd_buffer_to_handle(cmd_buffer
);
1523 result
= tu_create_cmd_buffer(device
, pool
, pAllocateInfo
->level
,
1524 &pCommandBuffers
[i
]);
1526 if (result
!= VK_SUCCESS
)
1530 if (result
!= VK_SUCCESS
) {
1531 tu_FreeCommandBuffers(_device
, pAllocateInfo
->commandPool
, i
,
1534 /* From the Vulkan 1.0.66 spec:
1536 * "vkAllocateCommandBuffers can be used to create multiple
1537 * command buffers. If the creation of any of those command
1538 * buffers fails, the implementation must destroy all
1539 * successfully created command buffer objects from this
1540 * command, set all entries of the pCommandBuffers array to
1541 * NULL and return the error."
1543 memset(pCommandBuffers
, 0,
1544 sizeof(*pCommandBuffers
) * pAllocateInfo
->commandBufferCount
);
1551 tu_FreeCommandBuffers(VkDevice device
,
1552 VkCommandPool commandPool
,
1553 uint32_t commandBufferCount
,
1554 const VkCommandBuffer
*pCommandBuffers
)
1556 for (uint32_t i
= 0; i
< commandBufferCount
; i
++) {
1557 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, pCommandBuffers
[i
]);
1560 if (cmd_buffer
->pool
) {
1561 list_del(&cmd_buffer
->pool_link
);
1562 list_addtail(&cmd_buffer
->pool_link
,
1563 &cmd_buffer
->pool
->free_cmd_buffers
);
1565 tu_cmd_buffer_destroy(cmd_buffer
);
1571 tu_ResetCommandBuffer(VkCommandBuffer commandBuffer
,
1572 VkCommandBufferResetFlags flags
)
1574 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
1575 return tu_reset_cmd_buffer(cmd_buffer
);
1578 /* Initialize the cache, assuming all necessary flushes have happened but *not*
1582 tu_cache_init(struct tu_cache_state
*cache
)
1584 cache
->flush_bits
= 0;
1585 cache
->pending_flush_bits
= TU_CMD_FLAG_ALL_INVALIDATE
;
1589 tu_BeginCommandBuffer(VkCommandBuffer commandBuffer
,
1590 const VkCommandBufferBeginInfo
*pBeginInfo
)
1592 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
1593 VkResult result
= VK_SUCCESS
;
1595 if (cmd_buffer
->status
!= TU_CMD_BUFFER_STATUS_INITIAL
) {
1596 /* If the command buffer has already been resetted with
1597 * vkResetCommandBuffer, no need to do it again.
1599 result
= tu_reset_cmd_buffer(cmd_buffer
);
1600 if (result
!= VK_SUCCESS
)
1604 memset(&cmd_buffer
->state
, 0, sizeof(cmd_buffer
->state
));
1605 cmd_buffer
->state
.index_size
= 0xff; /* dirty restart index */
1607 tu_cache_init(&cmd_buffer
->state
.cache
);
1608 tu_cache_init(&cmd_buffer
->state
.renderpass_cache
);
1609 cmd_buffer
->usage_flags
= pBeginInfo
->flags
;
1611 tu_cs_begin(&cmd_buffer
->cs
);
1612 tu_cs_begin(&cmd_buffer
->draw_cs
);
1613 tu_cs_begin(&cmd_buffer
->draw_epilogue_cs
);
1615 /* setup initial configuration into command buffer */
1616 if (cmd_buffer
->level
== VK_COMMAND_BUFFER_LEVEL_PRIMARY
) {
1617 switch (cmd_buffer
->queue_family_index
) {
1618 case TU_QUEUE_GENERAL
:
1619 tu6_init_hw(cmd_buffer
, &cmd_buffer
->cs
);
1624 } else if (cmd_buffer
->level
== VK_COMMAND_BUFFER_LEVEL_SECONDARY
) {
1625 assert(pBeginInfo
->pInheritanceInfo
);
1627 vk_foreach_struct(ext
, pBeginInfo
->pInheritanceInfo
) {
1628 switch (ext
->sType
) {
1629 case VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_CONDITIONAL_RENDERING_INFO_EXT
: {
1630 const VkCommandBufferInheritanceConditionalRenderingInfoEXT
*cond_rend
= (void *) ext
;
1631 cmd_buffer
->state
.predication_active
= cond_rend
->conditionalRenderingEnable
;
1639 if (pBeginInfo
->flags
& VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT
) {
1640 cmd_buffer
->state
.pass
= tu_render_pass_from_handle(pBeginInfo
->pInheritanceInfo
->renderPass
);
1641 cmd_buffer
->state
.subpass
=
1642 &cmd_buffer
->state
.pass
->subpasses
[pBeginInfo
->pInheritanceInfo
->subpass
];
1644 /* When executing in the middle of another command buffer, the CCU
1647 cmd_buffer
->state
.ccu_state
= TU_CMD_CCU_UNKNOWN
;
1651 cmd_buffer
->status
= TU_CMD_BUFFER_STATUS_RECORDING
;
1656 /* Sets vertex buffers to HW binding points. We emit VBs in SDS (so that bin
1657 * rendering can skip over unused state), so we need to collect all the
1658 * bindings together into a single state emit at draw time.
1661 tu_CmdBindVertexBuffers(VkCommandBuffer commandBuffer
,
1662 uint32_t firstBinding
,
1663 uint32_t bindingCount
,
1664 const VkBuffer
*pBuffers
,
1665 const VkDeviceSize
*pOffsets
)
1667 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
1669 assert(firstBinding
+ bindingCount
<= MAX_VBS
);
1671 for (uint32_t i
= 0; i
< bindingCount
; i
++) {
1672 struct tu_buffer
*buf
= tu_buffer_from_handle(pBuffers
[i
]);
1674 cmd
->state
.vb
.buffers
[firstBinding
+ i
] = buf
;
1675 cmd
->state
.vb
.offsets
[firstBinding
+ i
] = pOffsets
[i
];
1677 tu_bo_list_add(&cmd
->bo_list
, buf
->bo
, MSM_SUBMIT_BO_READ
);
1680 cmd
->state
.dirty
|= TU_CMD_DIRTY_VERTEX_BUFFERS
;
1684 tu_CmdBindIndexBuffer(VkCommandBuffer commandBuffer
,
1686 VkDeviceSize offset
,
1687 VkIndexType indexType
)
1689 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
1690 TU_FROM_HANDLE(tu_buffer
, buf
, buffer
);
1694 uint32_t index_size
, index_shift
, restart_index
;
1696 switch (indexType
) {
1697 case VK_INDEX_TYPE_UINT16
:
1698 index_size
= INDEX4_SIZE_16_BIT
;
1700 restart_index
= 0xffff;
1702 case VK_INDEX_TYPE_UINT32
:
1703 index_size
= INDEX4_SIZE_32_BIT
;
1705 restart_index
= 0xffffffff;
1707 case VK_INDEX_TYPE_UINT8_EXT
:
1708 index_size
= INDEX4_SIZE_8_BIT
;
1710 restart_index
= 0xff;
1713 unreachable("invalid VkIndexType");
1716 /* initialize/update the restart index */
1717 if (cmd
->state
.index_size
!= index_size
)
1718 tu_cs_emit_regs(&cmd
->draw_cs
, A6XX_PC_RESTART_INDEX(restart_index
));
1720 assert(buf
->size
>= offset
);
1722 cmd
->state
.index_va
= buf
->bo
->iova
+ buf
->bo_offset
+ offset
;
1723 cmd
->state
.max_index_count
= (buf
->size
- offset
) >> index_shift
;
1724 cmd
->state
.index_size
= index_size
;
1726 tu_bo_list_add(&cmd
->bo_list
, buf
->bo
, MSM_SUBMIT_BO_READ
);
1730 tu_CmdBindDescriptorSets(VkCommandBuffer commandBuffer
,
1731 VkPipelineBindPoint pipelineBindPoint
,
1732 VkPipelineLayout _layout
,
1734 uint32_t descriptorSetCount
,
1735 const VkDescriptorSet
*pDescriptorSets
,
1736 uint32_t dynamicOffsetCount
,
1737 const uint32_t *pDynamicOffsets
)
1739 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
1740 TU_FROM_HANDLE(tu_pipeline_layout
, layout
, _layout
);
1741 unsigned dyn_idx
= 0;
1743 struct tu_descriptor_state
*descriptors_state
=
1744 tu_get_descriptors_state(cmd
, pipelineBindPoint
);
1746 for (unsigned i
= 0; i
< descriptorSetCount
; ++i
) {
1747 unsigned idx
= i
+ firstSet
;
1748 TU_FROM_HANDLE(tu_descriptor_set
, set
, pDescriptorSets
[i
]);
1750 descriptors_state
->sets
[idx
] = set
;
1752 for(unsigned j
= 0; j
< set
->layout
->dynamic_offset_count
; ++j
, ++dyn_idx
) {
1753 /* update the contents of the dynamic descriptor set */
1754 unsigned src_idx
= j
;
1755 unsigned dst_idx
= j
+ layout
->set
[idx
].dynamic_offset_start
;
1756 assert(dyn_idx
< dynamicOffsetCount
);
1759 &descriptors_state
->dynamic_descriptors
[dst_idx
* A6XX_TEX_CONST_DWORDS
];
1761 &set
->dynamic_descriptors
[src_idx
* A6XX_TEX_CONST_DWORDS
];
1762 uint32_t offset
= pDynamicOffsets
[dyn_idx
];
1764 /* Patch the storage/uniform descriptors right away. */
1765 if (layout
->set
[idx
].layout
->dynamic_ubo
& (1 << j
)) {
1766 /* Note: we can assume here that the addition won't roll over and
1767 * change the SIZE field.
1769 uint64_t va
= src
[0] | ((uint64_t)src
[1] << 32);
1774 memcpy(dst
, src
, A6XX_TEX_CONST_DWORDS
* 4);
1775 /* Note: A6XX_IBO_5_DEPTH is always 0 */
1776 uint64_t va
= dst
[4] | ((uint64_t)dst
[5] << 32);
1783 for (unsigned j
= 0; j
< set
->layout
->buffer_count
; ++j
) {
1784 if (set
->buffers
[j
]) {
1785 tu_bo_list_add(&cmd
->bo_list
, set
->buffers
[j
],
1786 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_WRITE
);
1790 if (set
->size
> 0) {
1791 tu_bo_list_add(&cmd
->bo_list
, &set
->pool
->bo
,
1792 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_DUMP
);
1795 assert(dyn_idx
== dynamicOffsetCount
);
1797 uint32_t sp_bindless_base_reg
, hlsq_bindless_base_reg
, hlsq_invalidate_value
;
1798 uint64_t addr
[MAX_SETS
+ 1] = {};
1799 struct tu_cs
*cs
, state_cs
;
1801 for (uint32_t i
= 0; i
< MAX_SETS
; i
++) {
1802 struct tu_descriptor_set
*set
= descriptors_state
->sets
[i
];
1804 addr
[i
] = set
->va
| 3;
1807 if (layout
->dynamic_offset_count
) {
1808 /* allocate and fill out dynamic descriptor set */
1809 struct tu_cs_memory dynamic_desc_set
;
1810 VkResult result
= tu_cs_alloc(&cmd
->sub_cs
, layout
->dynamic_offset_count
,
1811 A6XX_TEX_CONST_DWORDS
, &dynamic_desc_set
);
1812 assert(result
== VK_SUCCESS
);
1814 memcpy(dynamic_desc_set
.map
, descriptors_state
->dynamic_descriptors
,
1815 layout
->dynamic_offset_count
* A6XX_TEX_CONST_DWORDS
* 4);
1816 addr
[MAX_SETS
] = dynamic_desc_set
.iova
| 3;
1819 if (pipelineBindPoint
== VK_PIPELINE_BIND_POINT_GRAPHICS
) {
1820 sp_bindless_base_reg
= REG_A6XX_SP_BINDLESS_BASE(0);
1821 hlsq_bindless_base_reg
= REG_A6XX_HLSQ_BINDLESS_BASE(0);
1822 hlsq_invalidate_value
= A6XX_HLSQ_INVALIDATE_CMD_GFX_BINDLESS(0x1f);
1824 cmd
->state
.desc_sets
= tu_cs_draw_state(&cmd
->sub_cs
, &state_cs
, 24);
1825 cmd
->state
.dirty
|= TU_CMD_DIRTY_DESC_SETS_LOAD
| TU_CMD_DIRTY_SHADER_CONSTS
;
1828 assert(pipelineBindPoint
== VK_PIPELINE_BIND_POINT_COMPUTE
);
1830 sp_bindless_base_reg
= REG_A6XX_SP_CS_BINDLESS_BASE(0);
1831 hlsq_bindless_base_reg
= REG_A6XX_HLSQ_CS_BINDLESS_BASE(0);
1832 hlsq_invalidate_value
= A6XX_HLSQ_INVALIDATE_CMD_CS_BINDLESS(0x1f);
1834 cmd
->state
.dirty
|= TU_CMD_DIRTY_COMPUTE_DESC_SETS_LOAD
;
1838 tu_cs_emit_pkt4(cs
, sp_bindless_base_reg
, 10);
1839 tu_cs_emit_array(cs
, (const uint32_t*) addr
, 10);
1840 tu_cs_emit_pkt4(cs
, hlsq_bindless_base_reg
, 10);
1841 tu_cs_emit_array(cs
, (const uint32_t*) addr
, 10);
1842 tu_cs_emit_regs(cs
, A6XX_HLSQ_INVALIDATE_CMD(.dword
= hlsq_invalidate_value
));
1844 if (pipelineBindPoint
== VK_PIPELINE_BIND_POINT_GRAPHICS
) {
1845 assert(cs
->cur
== cs
->end
); /* validate draw state size */
1846 tu_cs_emit_pkt7(&cmd
->draw_cs
, CP_SET_DRAW_STATE
, 3);
1847 tu_cs_emit_draw_state(&cmd
->draw_cs
, TU_DRAW_STATE_DESC_SETS
, cmd
->state
.desc_sets
);
1851 void tu_CmdBindTransformFeedbackBuffersEXT(VkCommandBuffer commandBuffer
,
1852 uint32_t firstBinding
,
1853 uint32_t bindingCount
,
1854 const VkBuffer
*pBuffers
,
1855 const VkDeviceSize
*pOffsets
,
1856 const VkDeviceSize
*pSizes
)
1858 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
1859 struct tu_cs
*cs
= &cmd
->draw_cs
;
1861 /* using COND_REG_EXEC for xfb commands matches the blob behavior
1862 * presumably there isn't any benefit using a draw state when the
1863 * condition is (SYSMEM | BINNING)
1865 tu_cond_exec_start(cs
, CP_COND_REG_EXEC_0_MODE(RENDER_MODE
) |
1866 CP_COND_REG_EXEC_0_SYSMEM
|
1867 CP_COND_REG_EXEC_0_BINNING
);
1869 for (uint32_t i
= 0; i
< bindingCount
; i
++) {
1870 TU_FROM_HANDLE(tu_buffer
, buf
, pBuffers
[i
]);
1871 uint64_t iova
= buf
->bo
->iova
+ pOffsets
[i
];
1872 uint32_t size
= buf
->bo
->size
- pOffsets
[i
];
1873 uint32_t idx
= i
+ firstBinding
;
1875 if (pSizes
&& pSizes
[i
] != VK_WHOLE_SIZE
)
1878 /* BUFFER_BASE is 32-byte aligned, add remaining offset to BUFFER_OFFSET */
1879 uint32_t offset
= iova
& 0x1f;
1880 iova
&= ~(uint64_t) 0x1f;
1882 tu_cs_emit_pkt4(cs
, REG_A6XX_VPC_SO_BUFFER_BASE(idx
), 3);
1883 tu_cs_emit_qw(cs
, iova
);
1884 tu_cs_emit(cs
, size
+ offset
);
1886 cmd
->state
.streamout_offset
[idx
] = offset
;
1888 tu_bo_list_add(&cmd
->bo_list
, buf
->bo
, MSM_SUBMIT_BO_WRITE
);
1891 tu_cond_exec_end(cs
);
1895 tu_CmdBeginTransformFeedbackEXT(VkCommandBuffer commandBuffer
,
1896 uint32_t firstCounterBuffer
,
1897 uint32_t counterBufferCount
,
1898 const VkBuffer
*pCounterBuffers
,
1899 const VkDeviceSize
*pCounterBufferOffsets
)
1901 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
1902 struct tu_cs
*cs
= &cmd
->draw_cs
;
1904 tu_cond_exec_start(cs
, CP_COND_REG_EXEC_0_MODE(RENDER_MODE
) |
1905 CP_COND_REG_EXEC_0_SYSMEM
|
1906 CP_COND_REG_EXEC_0_BINNING
);
1908 /* TODO: only update offset for active buffers */
1909 for (uint32_t i
= 0; i
< IR3_MAX_SO_BUFFERS
; i
++)
1910 tu_cs_emit_regs(cs
, A6XX_VPC_SO_BUFFER_OFFSET(i
, cmd
->state
.streamout_offset
[i
]));
1912 for (uint32_t i
= 0; i
< counterBufferCount
; i
++) {
1913 uint32_t idx
= firstCounterBuffer
+ i
;
1914 uint32_t offset
= cmd
->state
.streamout_offset
[idx
];
1916 if (!pCounterBuffers
[i
])
1919 TU_FROM_HANDLE(tu_buffer
, buf
, pCounterBuffers
[i
]);
1921 tu_bo_list_add(&cmd
->bo_list
, buf
->bo
, MSM_SUBMIT_BO_READ
);
1923 tu_cs_emit_pkt7(cs
, CP_MEM_TO_REG
, 3);
1924 tu_cs_emit(cs
, CP_MEM_TO_REG_0_REG(REG_A6XX_VPC_SO_BUFFER_OFFSET(idx
)) |
1925 CP_MEM_TO_REG_0_UNK31
|
1926 CP_MEM_TO_REG_0_CNT(1));
1927 tu_cs_emit_qw(cs
, buf
->bo
->iova
+ pCounterBufferOffsets
[i
]);
1930 tu_cs_emit_pkt7(cs
, CP_REG_RMW
, 3);
1931 tu_cs_emit(cs
, CP_REG_RMW_0_DST_REG(REG_A6XX_VPC_SO_BUFFER_OFFSET(idx
)) |
1932 CP_REG_RMW_0_SRC1_ADD
);
1933 tu_cs_emit_qw(cs
, 0xffffffff);
1934 tu_cs_emit_qw(cs
, offset
);
1938 tu_cond_exec_end(cs
);
1941 void tu_CmdEndTransformFeedbackEXT(VkCommandBuffer commandBuffer
,
1942 uint32_t firstCounterBuffer
,
1943 uint32_t counterBufferCount
,
1944 const VkBuffer
*pCounterBuffers
,
1945 const VkDeviceSize
*pCounterBufferOffsets
)
1947 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
1948 struct tu_cs
*cs
= &cmd
->draw_cs
;
1950 tu_cond_exec_start(cs
, CP_COND_REG_EXEC_0_MODE(RENDER_MODE
) |
1951 CP_COND_REG_EXEC_0_SYSMEM
|
1952 CP_COND_REG_EXEC_0_BINNING
);
1954 /* TODO: only flush buffers that need to be flushed */
1955 for (uint32_t i
= 0; i
< IR3_MAX_SO_BUFFERS
; i
++) {
1956 /* note: FLUSH_BASE is always the same, so it could go in init_hw()? */
1957 tu_cs_emit_pkt4(cs
, REG_A6XX_VPC_SO_FLUSH_BASE(i
), 2);
1958 tu_cs_emit_qw(cs
, global_iova(cmd
, flush_base
[i
]));
1959 tu6_emit_event_write(cmd
, cs
, FLUSH_SO_0
+ i
);
1962 for (uint32_t i
= 0; i
< counterBufferCount
; i
++) {
1963 uint32_t idx
= firstCounterBuffer
+ i
;
1964 uint32_t offset
= cmd
->state
.streamout_offset
[idx
];
1966 if (!pCounterBuffers
[i
])
1969 TU_FROM_HANDLE(tu_buffer
, buf
, pCounterBuffers
[i
]);
1971 tu_bo_list_add(&cmd
->bo_list
, buf
->bo
, MSM_SUBMIT_BO_WRITE
);
1973 /* VPC_SO_FLUSH_BASE has dwords counter, but counter should be in bytes */
1974 tu_cs_emit_pkt7(cs
, CP_MEM_TO_REG
, 3);
1975 tu_cs_emit(cs
, CP_MEM_TO_REG_0_REG(REG_A6XX_CP_SCRATCH_REG(0)) |
1976 CP_MEM_TO_REG_0_SHIFT_BY_2
|
1978 CP_MEM_TO_REG_0_UNK31
|
1979 CP_MEM_TO_REG_0_CNT(1));
1980 tu_cs_emit_qw(cs
, global_iova(cmd
, flush_base
[idx
]));
1983 tu_cs_emit_pkt7(cs
, CP_REG_RMW
, 3);
1984 tu_cs_emit(cs
, CP_REG_RMW_0_DST_REG(REG_A6XX_CP_SCRATCH_REG(0)) |
1985 CP_REG_RMW_0_SRC1_ADD
);
1986 tu_cs_emit_qw(cs
, 0xffffffff);
1987 tu_cs_emit_qw(cs
, -offset
);
1990 tu_cs_emit_pkt7(cs
, CP_REG_TO_MEM
, 3);
1991 tu_cs_emit(cs
, CP_REG_TO_MEM_0_REG(REG_A6XX_CP_SCRATCH_REG(0)) |
1992 CP_REG_TO_MEM_0_CNT(1));
1993 tu_cs_emit_qw(cs
, buf
->bo
->iova
+ pCounterBufferOffsets
[i
]);
1996 tu_cond_exec_end(cs
);
1998 cmd
->state
.xfb_used
= true;
2002 tu_CmdPushConstants(VkCommandBuffer commandBuffer
,
2003 VkPipelineLayout layout
,
2004 VkShaderStageFlags stageFlags
,
2007 const void *pValues
)
2009 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2010 memcpy((void*) cmd
->push_constants
+ offset
, pValues
, size
);
2011 cmd
->state
.dirty
|= TU_CMD_DIRTY_SHADER_CONSTS
;
2014 /* Flush everything which has been made available but we haven't actually
2018 tu_flush_all_pending(struct tu_cache_state
*cache
)
2020 cache
->flush_bits
|= cache
->pending_flush_bits
& TU_CMD_FLAG_ALL_FLUSH
;
2021 cache
->pending_flush_bits
&= ~TU_CMD_FLAG_ALL_FLUSH
;
2025 tu_EndCommandBuffer(VkCommandBuffer commandBuffer
)
2027 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
2029 /* We currently flush CCU at the end of the command buffer, like
2030 * what the blob does. There's implicit synchronization around every
2031 * vkQueueSubmit, but the kernel only flushes the UCHE, and we don't
2032 * know yet if this command buffer will be the last in the submit so we
2033 * have to defensively flush everything else.
2035 * TODO: We could definitely do better than this, since these flushes
2036 * aren't required by Vulkan, but we'd need kernel support to do that.
2037 * Ideally, we'd like the kernel to flush everything afterwards, so that we
2038 * wouldn't have to do any flushes here, and when submitting multiple
2039 * command buffers there wouldn't be any unnecessary flushes in between.
2041 if (cmd_buffer
->state
.pass
) {
2042 tu_flush_all_pending(&cmd_buffer
->state
.renderpass_cache
);
2043 tu_emit_cache_flush_renderpass(cmd_buffer
, &cmd_buffer
->draw_cs
);
2045 tu_flush_all_pending(&cmd_buffer
->state
.cache
);
2046 cmd_buffer
->state
.cache
.flush_bits
|=
2047 TU_CMD_FLAG_CCU_FLUSH_COLOR
|
2048 TU_CMD_FLAG_CCU_FLUSH_DEPTH
;
2049 tu_emit_cache_flush(cmd_buffer
, &cmd_buffer
->cs
);
2052 tu_bo_list_add(&cmd_buffer
->bo_list
, &cmd_buffer
->device
->global_bo
,
2053 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_WRITE
);
2055 for (uint32_t i
= 0; i
< cmd_buffer
->draw_cs
.bo_count
; i
++) {
2056 tu_bo_list_add(&cmd_buffer
->bo_list
, cmd_buffer
->draw_cs
.bos
[i
],
2057 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_DUMP
);
2060 for (uint32_t i
= 0; i
< cmd_buffer
->draw_epilogue_cs
.bo_count
; i
++) {
2061 tu_bo_list_add(&cmd_buffer
->bo_list
, cmd_buffer
->draw_epilogue_cs
.bos
[i
],
2062 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_DUMP
);
2065 for (uint32_t i
= 0; i
< cmd_buffer
->sub_cs
.bo_count
; i
++) {
2066 tu_bo_list_add(&cmd_buffer
->bo_list
, cmd_buffer
->sub_cs
.bos
[i
],
2067 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_DUMP
);
2070 tu_cs_end(&cmd_buffer
->cs
);
2071 tu_cs_end(&cmd_buffer
->draw_cs
);
2072 tu_cs_end(&cmd_buffer
->draw_epilogue_cs
);
2074 cmd_buffer
->status
= TU_CMD_BUFFER_STATUS_EXECUTABLE
;
2076 return cmd_buffer
->record_result
;
2080 tu_cmd_dynamic_state(struct tu_cmd_buffer
*cmd
, uint32_t id
, uint32_t size
)
2084 assert(id
< ARRAY_SIZE(cmd
->state
.dynamic_state
));
2085 cmd
->state
.dynamic_state
[id
] = tu_cs_draw_state(&cmd
->sub_cs
, &cs
, size
);
2087 tu_cs_emit_pkt7(&cmd
->draw_cs
, CP_SET_DRAW_STATE
, 3);
2088 tu_cs_emit_draw_state(&cmd
->draw_cs
, TU_DRAW_STATE_DYNAMIC
+ id
, cmd
->state
.dynamic_state
[id
]);
2094 tu_CmdBindPipeline(VkCommandBuffer commandBuffer
,
2095 VkPipelineBindPoint pipelineBindPoint
,
2096 VkPipeline _pipeline
)
2098 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2099 TU_FROM_HANDLE(tu_pipeline
, pipeline
, _pipeline
);
2101 for (uint32_t i
= 0; i
< pipeline
->cs
.bo_count
; i
++) {
2102 tu_bo_list_add(&cmd
->bo_list
, pipeline
->cs
.bos
[i
],
2103 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_DUMP
);
2106 if (pipelineBindPoint
== VK_PIPELINE_BIND_POINT_COMPUTE
) {
2107 cmd
->state
.compute_pipeline
= pipeline
;
2108 tu_cs_emit_state_ib(&cmd
->cs
, pipeline
->program
.state
);
2112 assert(pipelineBindPoint
== VK_PIPELINE_BIND_POINT_GRAPHICS
);
2114 cmd
->state
.pipeline
= pipeline
;
2115 cmd
->state
.dirty
|= TU_CMD_DIRTY_DESC_SETS_LOAD
| TU_CMD_DIRTY_SHADER_CONSTS
;
2117 struct tu_cs
*cs
= &cmd
->draw_cs
;
2118 uint32_t mask
= ~pipeline
->dynamic_state_mask
& BITFIELD_MASK(TU_DYNAMIC_STATE_COUNT
);
2121 tu_cs_emit_pkt7(cs
, CP_SET_DRAW_STATE
, 3 * (7 + util_bitcount(mask
)));
2122 tu_cs_emit_draw_state(cs
, TU_DRAW_STATE_PROGRAM
, pipeline
->program
.state
);
2123 tu_cs_emit_draw_state(cs
, TU_DRAW_STATE_PROGRAM_BINNING
, pipeline
->program
.binning_state
);
2124 tu_cs_emit_draw_state(cs
, TU_DRAW_STATE_VI
, pipeline
->vi
.state
);
2125 tu_cs_emit_draw_state(cs
, TU_DRAW_STATE_VI_BINNING
, pipeline
->vi
.binning_state
);
2126 tu_cs_emit_draw_state(cs
, TU_DRAW_STATE_RAST
, pipeline
->rast_state
);
2127 tu_cs_emit_draw_state(cs
, TU_DRAW_STATE_DS
, pipeline
->ds_state
);
2128 tu_cs_emit_draw_state(cs
, TU_DRAW_STATE_BLEND
, pipeline
->blend_state
);
2129 for_each_bit(i
, mask
)
2130 tu_cs_emit_draw_state(cs
, TU_DRAW_STATE_DYNAMIC
+ i
, pipeline
->dynamic_state
[i
]);
2132 /* If the new pipeline requires more VBs than we had previously set up, we
2133 * need to re-emit them in SDS. If it requires the same set or fewer, we
2134 * can just re-use the old SDS.
2136 if (pipeline
->vi
.bindings_used
& ~cmd
->vertex_bindings_set
)
2137 cmd
->state
.dirty
|= TU_CMD_DIRTY_VERTEX_BUFFERS
;
2139 /* dynamic linewidth state depends pipeline state's gras_su_cntl
2140 * so the dynamic state ib must be updated when pipeline changes
2142 if (pipeline
->dynamic_state_mask
& BIT(VK_DYNAMIC_STATE_LINE_WIDTH
)) {
2143 struct tu_cs cs
= tu_cmd_dynamic_state(cmd
, VK_DYNAMIC_STATE_LINE_WIDTH
, 2);
2145 cmd
->state
.dynamic_gras_su_cntl
&= A6XX_GRAS_SU_CNTL_LINEHALFWIDTH__MASK
;
2146 cmd
->state
.dynamic_gras_su_cntl
|= pipeline
->gras_su_cntl
;
2148 tu_cs_emit_regs(&cs
, A6XX_GRAS_SU_CNTL(.dword
= cmd
->state
.dynamic_gras_su_cntl
));
2153 tu_CmdSetViewport(VkCommandBuffer commandBuffer
,
2154 uint32_t firstViewport
,
2155 uint32_t viewportCount
,
2156 const VkViewport
*pViewports
)
2158 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2159 struct tu_cs cs
= tu_cmd_dynamic_state(cmd
, VK_DYNAMIC_STATE_VIEWPORT
, 18);
2161 assert(firstViewport
== 0 && viewportCount
== 1);
2163 tu6_emit_viewport(&cs
, pViewports
);
2167 tu_CmdSetScissor(VkCommandBuffer commandBuffer
,
2168 uint32_t firstScissor
,
2169 uint32_t scissorCount
,
2170 const VkRect2D
*pScissors
)
2172 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2173 struct tu_cs cs
= tu_cmd_dynamic_state(cmd
, VK_DYNAMIC_STATE_SCISSOR
, 3);
2175 assert(firstScissor
== 0 && scissorCount
== 1);
2177 tu6_emit_scissor(&cs
, pScissors
);
2181 tu_CmdSetLineWidth(VkCommandBuffer commandBuffer
, float lineWidth
)
2183 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2184 struct tu_cs cs
= tu_cmd_dynamic_state(cmd
, VK_DYNAMIC_STATE_LINE_WIDTH
, 2);
2186 cmd
->state
.dynamic_gras_su_cntl
&= ~A6XX_GRAS_SU_CNTL_LINEHALFWIDTH__MASK
;
2187 cmd
->state
.dynamic_gras_su_cntl
|= A6XX_GRAS_SU_CNTL_LINEHALFWIDTH(lineWidth
/ 2.0f
);
2189 tu_cs_emit_regs(&cs
, A6XX_GRAS_SU_CNTL(.dword
= cmd
->state
.dynamic_gras_su_cntl
));
2193 tu_CmdSetDepthBias(VkCommandBuffer commandBuffer
,
2194 float depthBiasConstantFactor
,
2195 float depthBiasClamp
,
2196 float depthBiasSlopeFactor
)
2198 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2199 struct tu_cs cs
= tu_cmd_dynamic_state(cmd
, VK_DYNAMIC_STATE_DEPTH_BIAS
, 4);
2201 tu6_emit_depth_bias(&cs
, depthBiasConstantFactor
, depthBiasClamp
, depthBiasSlopeFactor
);
2205 tu_CmdSetBlendConstants(VkCommandBuffer commandBuffer
,
2206 const float blendConstants
[4])
2208 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2209 struct tu_cs cs
= tu_cmd_dynamic_state(cmd
, VK_DYNAMIC_STATE_BLEND_CONSTANTS
, 5);
2211 tu_cs_emit_pkt4(&cs
, REG_A6XX_RB_BLEND_RED_F32
, 4);
2212 tu_cs_emit_array(&cs
, (const uint32_t *) blendConstants
, 4);
2216 tu_CmdSetDepthBounds(VkCommandBuffer commandBuffer
,
2217 float minDepthBounds
,
2218 float maxDepthBounds
)
2220 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2221 struct tu_cs cs
= tu_cmd_dynamic_state(cmd
, VK_DYNAMIC_STATE_DEPTH_BOUNDS
, 3);
2223 tu_cs_emit_regs(&cs
,
2224 A6XX_RB_Z_BOUNDS_MIN(minDepthBounds
),
2225 A6XX_RB_Z_BOUNDS_MAX(maxDepthBounds
));
2229 update_stencil_mask(uint32_t *value
, VkStencilFaceFlags face
, uint32_t mask
)
2231 if (face
& VK_STENCIL_FACE_FRONT_BIT
)
2232 *value
= (*value
& 0xff00) | (mask
& 0xff);
2233 if (face
& VK_STENCIL_FACE_BACK_BIT
)
2234 *value
= (*value
& 0xff) | (mask
& 0xff) << 8;
2238 tu_CmdSetStencilCompareMask(VkCommandBuffer commandBuffer
,
2239 VkStencilFaceFlags faceMask
,
2240 uint32_t compareMask
)
2242 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2243 struct tu_cs cs
= tu_cmd_dynamic_state(cmd
, VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK
, 2);
2245 update_stencil_mask(&cmd
->state
.dynamic_stencil_mask
, faceMask
, compareMask
);
2247 tu_cs_emit_regs(&cs
, A6XX_RB_STENCILMASK(.dword
= cmd
->state
.dynamic_stencil_mask
));
2251 tu_CmdSetStencilWriteMask(VkCommandBuffer commandBuffer
,
2252 VkStencilFaceFlags faceMask
,
2255 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2256 struct tu_cs cs
= tu_cmd_dynamic_state(cmd
, VK_DYNAMIC_STATE_STENCIL_WRITE_MASK
, 2);
2258 update_stencil_mask(&cmd
->state
.dynamic_stencil_wrmask
, faceMask
, writeMask
);
2260 tu_cs_emit_regs(&cs
, A6XX_RB_STENCILWRMASK(.dword
= cmd
->state
.dynamic_stencil_wrmask
));
2264 tu_CmdSetStencilReference(VkCommandBuffer commandBuffer
,
2265 VkStencilFaceFlags faceMask
,
2268 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2269 struct tu_cs cs
= tu_cmd_dynamic_state(cmd
, VK_DYNAMIC_STATE_STENCIL_REFERENCE
, 2);
2271 update_stencil_mask(&cmd
->state
.dynamic_stencil_ref
, faceMask
, reference
);
2273 tu_cs_emit_regs(&cs
, A6XX_RB_STENCILREF(.dword
= cmd
->state
.dynamic_stencil_ref
));
2277 tu_CmdSetSampleLocationsEXT(VkCommandBuffer commandBuffer
,
2278 const VkSampleLocationsInfoEXT
* pSampleLocationsInfo
)
2280 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2281 struct tu_cs cs
= tu_cmd_dynamic_state(cmd
, TU_DYNAMIC_STATE_SAMPLE_LOCATIONS
, 9);
2283 assert(pSampleLocationsInfo
);
2285 tu6_emit_sample_locations(&cs
, pSampleLocationsInfo
);
2289 tu_flush_for_access(struct tu_cache_state
*cache
,
2290 enum tu_cmd_access_mask src_mask
,
2291 enum tu_cmd_access_mask dst_mask
)
2293 enum tu_cmd_flush_bits flush_bits
= 0;
2295 if (src_mask
& TU_ACCESS_HOST_WRITE
) {
2296 /* Host writes are always visible to CP, so only invalidate GPU caches */
2297 cache
->pending_flush_bits
|= TU_CMD_FLAG_GPU_INVALIDATE
;
2300 if (src_mask
& TU_ACCESS_SYSMEM_WRITE
) {
2301 /* Invalidate CP and 2D engine (make it do WFI + WFM if necessary) as
2304 cache
->pending_flush_bits
|= TU_CMD_FLAG_ALL_INVALIDATE
;
2307 if (src_mask
& TU_ACCESS_CP_WRITE
) {
2308 /* Flush the CP write queue. However a WFI shouldn't be necessary as
2309 * WAIT_MEM_WRITES should cover it.
2311 cache
->pending_flush_bits
|=
2312 TU_CMD_FLAG_WAIT_MEM_WRITES
|
2313 TU_CMD_FLAG_GPU_INVALIDATE
|
2314 TU_CMD_FLAG_WAIT_FOR_ME
;
2317 #define SRC_FLUSH(domain, flush, invalidate) \
2318 if (src_mask & TU_ACCESS_##domain##_WRITE) { \
2319 cache->pending_flush_bits |= TU_CMD_FLAG_##flush | \
2320 (TU_CMD_FLAG_ALL_INVALIDATE & ~TU_CMD_FLAG_##invalidate); \
2323 SRC_FLUSH(UCHE
, CACHE_FLUSH
, CACHE_INVALIDATE
)
2324 SRC_FLUSH(CCU_COLOR
, CCU_FLUSH_COLOR
, CCU_INVALIDATE_COLOR
)
2325 SRC_FLUSH(CCU_DEPTH
, CCU_FLUSH_DEPTH
, CCU_INVALIDATE_DEPTH
)
2329 #define SRC_INCOHERENT_FLUSH(domain, flush, invalidate) \
2330 if (src_mask & TU_ACCESS_##domain##_INCOHERENT_WRITE) { \
2331 flush_bits |= TU_CMD_FLAG_##flush; \
2332 cache->pending_flush_bits |= \
2333 (TU_CMD_FLAG_ALL_INVALIDATE & ~TU_CMD_FLAG_##invalidate); \
2336 SRC_INCOHERENT_FLUSH(CCU_COLOR
, CCU_FLUSH_COLOR
, CCU_INVALIDATE_COLOR
)
2337 SRC_INCOHERENT_FLUSH(CCU_DEPTH
, CCU_FLUSH_DEPTH
, CCU_INVALIDATE_DEPTH
)
2339 #undef SRC_INCOHERENT_FLUSH
2341 /* Treat host & sysmem write accesses the same, since the kernel implicitly
2342 * drains the queue before signalling completion to the host.
2344 if (dst_mask
& (TU_ACCESS_SYSMEM_READ
| TU_ACCESS_SYSMEM_WRITE
|
2345 TU_ACCESS_HOST_READ
| TU_ACCESS_HOST_WRITE
)) {
2346 flush_bits
|= cache
->pending_flush_bits
& TU_CMD_FLAG_ALL_FLUSH
;
2349 #define DST_FLUSH(domain, flush, invalidate) \
2350 if (dst_mask & (TU_ACCESS_##domain##_READ | \
2351 TU_ACCESS_##domain##_WRITE)) { \
2352 flush_bits |= cache->pending_flush_bits & \
2353 (TU_CMD_FLAG_##invalidate | \
2354 (TU_CMD_FLAG_ALL_FLUSH & ~TU_CMD_FLAG_##flush)); \
2357 DST_FLUSH(UCHE
, CACHE_FLUSH
, CACHE_INVALIDATE
)
2358 DST_FLUSH(CCU_COLOR
, CCU_FLUSH_COLOR
, CCU_INVALIDATE_COLOR
)
2359 DST_FLUSH(CCU_DEPTH
, CCU_FLUSH_DEPTH
, CCU_INVALIDATE_DEPTH
)
2363 #define DST_INCOHERENT_FLUSH(domain, flush, invalidate) \
2364 if (dst_mask & (TU_ACCESS_##domain##_INCOHERENT_READ | \
2365 TU_ACCESS_##domain##_INCOHERENT_WRITE)) { \
2366 flush_bits |= TU_CMD_FLAG_##invalidate | \
2367 (cache->pending_flush_bits & \
2368 (TU_CMD_FLAG_ALL_FLUSH & ~TU_CMD_FLAG_##flush)); \
2371 DST_INCOHERENT_FLUSH(CCU_COLOR
, CCU_FLUSH_COLOR
, CCU_INVALIDATE_COLOR
)
2372 DST_INCOHERENT_FLUSH(CCU_DEPTH
, CCU_FLUSH_DEPTH
, CCU_INVALIDATE_DEPTH
)
2374 #undef DST_INCOHERENT_FLUSH
2376 if (dst_mask
& TU_ACCESS_WFI_READ
) {
2377 flush_bits
|= cache
->pending_flush_bits
&
2378 (TU_CMD_FLAG_ALL_FLUSH
| TU_CMD_FLAG_WAIT_FOR_IDLE
);
2381 if (dst_mask
& TU_ACCESS_WFM_READ
) {
2382 flush_bits
|= cache
->pending_flush_bits
&
2383 (TU_CMD_FLAG_ALL_FLUSH
| TU_CMD_FLAG_WAIT_FOR_ME
);
2386 cache
->flush_bits
|= flush_bits
;
2387 cache
->pending_flush_bits
&= ~flush_bits
;
2390 static enum tu_cmd_access_mask
2391 vk2tu_access(VkAccessFlags flags
, bool gmem
)
2393 enum tu_cmd_access_mask mask
= 0;
2395 /* If the GPU writes a buffer that is then read by an indirect draw
2396 * command, we theoretically need to emit a WFI to wait for any cache
2397 * flushes, and then a WAIT_FOR_ME to wait on the CP for the WFI to
2398 * complete. Waiting for the WFI to complete is performed as part of the
2399 * draw by the firmware, so we just need to execute the WFI.
2401 * Transform feedback counters are read via CP_MEM_TO_REG, which implicitly
2402 * does CP_WAIT_FOR_ME, but we still need a WFI if the GPU writes it.
2404 * Currently we read the draw predicate using CP_MEM_TO_MEM, which
2405 * also implicitly does CP_WAIT_FOR_ME. However CP_DRAW_PRED_SET does *not*
2406 * implicitly do CP_WAIT_FOR_ME, it seems to only wait for counters to
2407 * complete since it's written for DX11 where you can only predicate on the
2408 * result of a query object. So if we implement 64-bit comparisons in the
2409 * future, or if CP_DRAW_PRED_SET grows the capability to do 32-bit
2410 * comparisons, then this will have to be dealt with.
2413 (VK_ACCESS_INDIRECT_COMMAND_READ_BIT
|
2414 VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_READ_BIT_EXT
|
2415 VK_ACCESS_CONDITIONAL_RENDERING_READ_BIT_EXT
|
2416 VK_ACCESS_MEMORY_READ_BIT
)) {
2417 mask
|= TU_ACCESS_WFI_READ
;
2421 (VK_ACCESS_INDIRECT_COMMAND_READ_BIT
| /* Read performed by CP */
2422 VK_ACCESS_CONDITIONAL_RENDERING_READ_BIT_EXT
| /* Read performed by CP */
2423 VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_READ_BIT_EXT
| /* Read performed by CP */
2424 VK_ACCESS_MEMORY_READ_BIT
)) {
2425 mask
|= TU_ACCESS_SYSMEM_READ
;
2429 (VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT
|
2430 VK_ACCESS_MEMORY_WRITE_BIT
)) {
2431 mask
|= TU_ACCESS_CP_WRITE
;
2435 (VK_ACCESS_HOST_READ_BIT
|
2436 VK_ACCESS_MEMORY_WRITE_BIT
)) {
2437 mask
|= TU_ACCESS_HOST_READ
;
2441 (VK_ACCESS_HOST_WRITE_BIT
|
2442 VK_ACCESS_MEMORY_WRITE_BIT
)) {
2443 mask
|= TU_ACCESS_HOST_WRITE
;
2447 (VK_ACCESS_INDEX_READ_BIT
| /* Read performed by PC, I think */
2448 VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT
| /* Read performed by VFD */
2449 VK_ACCESS_UNIFORM_READ_BIT
| /* Read performed by SP */
2450 /* TODO: Is there a no-cache bit for textures so that we can ignore
2453 VK_ACCESS_INPUT_ATTACHMENT_READ_BIT
| /* Read performed by TP */
2454 VK_ACCESS_SHADER_READ_BIT
| /* Read perfomed by SP/TP */
2455 VK_ACCESS_MEMORY_READ_BIT
)) {
2456 mask
|= TU_ACCESS_UCHE_READ
;
2460 (VK_ACCESS_SHADER_WRITE_BIT
| /* Write performed by SP */
2461 VK_ACCESS_TRANSFORM_FEEDBACK_WRITE_BIT_EXT
| /* Write performed by VPC */
2462 VK_ACCESS_MEMORY_WRITE_BIT
)) {
2463 mask
|= TU_ACCESS_UCHE_WRITE
;
2466 /* When using GMEM, the CCU is always flushed automatically to GMEM, and
2467 * then GMEM is flushed to sysmem. Furthermore, we already had to flush any
2468 * previous writes in sysmem mode when transitioning to GMEM. Therefore we
2469 * can ignore CCU and pretend that color attachments and transfers use
2474 (VK_ACCESS_COLOR_ATTACHMENT_READ_BIT
|
2475 VK_ACCESS_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT
|
2476 VK_ACCESS_MEMORY_READ_BIT
)) {
2478 mask
|= TU_ACCESS_SYSMEM_READ
;
2480 mask
|= TU_ACCESS_CCU_COLOR_INCOHERENT_READ
;
2484 (VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT
|
2485 VK_ACCESS_MEMORY_READ_BIT
)) {
2487 mask
|= TU_ACCESS_SYSMEM_READ
;
2489 mask
|= TU_ACCESS_CCU_DEPTH_INCOHERENT_READ
;
2493 (VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT
|
2494 VK_ACCESS_MEMORY_WRITE_BIT
)) {
2496 mask
|= TU_ACCESS_SYSMEM_WRITE
;
2498 mask
|= TU_ACCESS_CCU_COLOR_INCOHERENT_WRITE
;
2503 (VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT
|
2504 VK_ACCESS_MEMORY_WRITE_BIT
)) {
2506 mask
|= TU_ACCESS_SYSMEM_WRITE
;
2508 mask
|= TU_ACCESS_CCU_DEPTH_INCOHERENT_WRITE
;
2512 /* When the dst access is a transfer read/write, it seems we sometimes need
2513 * to insert a WFI after any flushes, to guarantee that the flushes finish
2514 * before the 2D engine starts. However the opposite (i.e. a WFI after
2515 * CP_BLIT and before any subsequent flush) does not seem to be needed, and
2516 * the blob doesn't emit such a WFI.
2520 (VK_ACCESS_TRANSFER_WRITE_BIT
|
2521 VK_ACCESS_MEMORY_WRITE_BIT
)) {
2523 mask
|= TU_ACCESS_SYSMEM_WRITE
;
2525 mask
|= TU_ACCESS_CCU_COLOR_WRITE
;
2527 mask
|= TU_ACCESS_WFI_READ
;
2531 (VK_ACCESS_TRANSFER_READ_BIT
| /* Access performed by TP */
2532 VK_ACCESS_MEMORY_READ_BIT
)) {
2533 mask
|= TU_ACCESS_UCHE_READ
| TU_ACCESS_WFI_READ
;
2541 tu_CmdExecuteCommands(VkCommandBuffer commandBuffer
,
2542 uint32_t commandBufferCount
,
2543 const VkCommandBuffer
*pCmdBuffers
)
2545 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2548 assert(commandBufferCount
> 0);
2550 /* Emit any pending flushes. */
2551 if (cmd
->state
.pass
) {
2552 tu_flush_all_pending(&cmd
->state
.renderpass_cache
);
2553 tu_emit_cache_flush_renderpass(cmd
, &cmd
->draw_cs
);
2555 tu_flush_all_pending(&cmd
->state
.cache
);
2556 tu_emit_cache_flush(cmd
, &cmd
->cs
);
2559 for (uint32_t i
= 0; i
< commandBufferCount
; i
++) {
2560 TU_FROM_HANDLE(tu_cmd_buffer
, secondary
, pCmdBuffers
[i
]);
2562 result
= tu_bo_list_merge(&cmd
->bo_list
, &secondary
->bo_list
);
2563 if (result
!= VK_SUCCESS
) {
2564 cmd
->record_result
= result
;
2568 if (secondary
->usage_flags
&
2569 VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT
) {
2570 assert(tu_cs_is_empty(&secondary
->cs
));
2572 result
= tu_cs_add_entries(&cmd
->draw_cs
, &secondary
->draw_cs
);
2573 if (result
!= VK_SUCCESS
) {
2574 cmd
->record_result
= result
;
2578 result
= tu_cs_add_entries(&cmd
->draw_epilogue_cs
,
2579 &secondary
->draw_epilogue_cs
);
2580 if (result
!= VK_SUCCESS
) {
2581 cmd
->record_result
= result
;
2585 if (secondary
->state
.has_tess
)
2586 cmd
->state
.has_tess
= true;
2587 if (secondary
->state
.has_subpass_predication
)
2588 cmd
->state
.has_subpass_predication
= true;
2590 assert(tu_cs_is_empty(&secondary
->draw_cs
));
2591 assert(tu_cs_is_empty(&secondary
->draw_epilogue_cs
));
2593 for (uint32_t j
= 0; j
< secondary
->cs
.bo_count
; j
++) {
2594 tu_bo_list_add(&cmd
->bo_list
, secondary
->cs
.bos
[j
],
2595 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_DUMP
);
2598 tu_cs_add_entries(&cmd
->cs
, &secondary
->cs
);
2601 cmd
->state
.index_size
= secondary
->state
.index_size
; /* for restart index update */
2603 cmd
->state
.dirty
= ~0u; /* TODO: set dirty only what needs to be */
2605 /* After executing secondary command buffers, there may have been arbitrary
2606 * flushes executed, so when we encounter a pipeline barrier with a
2607 * srcMask, we have to assume that we need to invalidate. Therefore we need
2608 * to re-initialize the cache with all pending invalidate bits set.
2610 if (cmd
->state
.pass
) {
2611 tu_cache_init(&cmd
->state
.renderpass_cache
);
2613 tu_cache_init(&cmd
->state
.cache
);
2618 tu_CreateCommandPool(VkDevice _device
,
2619 const VkCommandPoolCreateInfo
*pCreateInfo
,
2620 const VkAllocationCallbacks
*pAllocator
,
2621 VkCommandPool
*pCmdPool
)
2623 TU_FROM_HANDLE(tu_device
, device
, _device
);
2624 struct tu_cmd_pool
*pool
;
2626 pool
= vk_object_alloc(&device
->vk
, pAllocator
, sizeof(*pool
),
2627 VK_OBJECT_TYPE_COMMAND_POOL
);
2629 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
2632 pool
->alloc
= *pAllocator
;
2634 pool
->alloc
= device
->vk
.alloc
;
2636 list_inithead(&pool
->cmd_buffers
);
2637 list_inithead(&pool
->free_cmd_buffers
);
2639 pool
->queue_family_index
= pCreateInfo
->queueFamilyIndex
;
2641 *pCmdPool
= tu_cmd_pool_to_handle(pool
);
2647 tu_DestroyCommandPool(VkDevice _device
,
2648 VkCommandPool commandPool
,
2649 const VkAllocationCallbacks
*pAllocator
)
2651 TU_FROM_HANDLE(tu_device
, device
, _device
);
2652 TU_FROM_HANDLE(tu_cmd_pool
, pool
, commandPool
);
2657 list_for_each_entry_safe(struct tu_cmd_buffer
, cmd_buffer
,
2658 &pool
->cmd_buffers
, pool_link
)
2660 tu_cmd_buffer_destroy(cmd_buffer
);
2663 list_for_each_entry_safe(struct tu_cmd_buffer
, cmd_buffer
,
2664 &pool
->free_cmd_buffers
, pool_link
)
2666 tu_cmd_buffer_destroy(cmd_buffer
);
2669 vk_object_free(&device
->vk
, pAllocator
, pool
);
2673 tu_ResetCommandPool(VkDevice device
,
2674 VkCommandPool commandPool
,
2675 VkCommandPoolResetFlags flags
)
2677 TU_FROM_HANDLE(tu_cmd_pool
, pool
, commandPool
);
2680 list_for_each_entry(struct tu_cmd_buffer
, cmd_buffer
, &pool
->cmd_buffers
,
2683 result
= tu_reset_cmd_buffer(cmd_buffer
);
2684 if (result
!= VK_SUCCESS
)
2692 tu_TrimCommandPool(VkDevice device
,
2693 VkCommandPool commandPool
,
2694 VkCommandPoolTrimFlags flags
)
2696 TU_FROM_HANDLE(tu_cmd_pool
, pool
, commandPool
);
2701 list_for_each_entry_safe(struct tu_cmd_buffer
, cmd_buffer
,
2702 &pool
->free_cmd_buffers
, pool_link
)
2704 tu_cmd_buffer_destroy(cmd_buffer
);
2709 tu_subpass_barrier(struct tu_cmd_buffer
*cmd_buffer
,
2710 const struct tu_subpass_barrier
*barrier
,
2713 /* Note: we don't know until the end of the subpass whether we'll use
2714 * sysmem, so assume sysmem here to be safe.
2716 struct tu_cache_state
*cache
=
2717 external
? &cmd_buffer
->state
.cache
: &cmd_buffer
->state
.renderpass_cache
;
2718 enum tu_cmd_access_mask src_flags
=
2719 vk2tu_access(barrier
->src_access_mask
, false);
2720 enum tu_cmd_access_mask dst_flags
=
2721 vk2tu_access(barrier
->dst_access_mask
, false);
2723 if (barrier
->incoherent_ccu_color
)
2724 src_flags
|= TU_ACCESS_CCU_COLOR_INCOHERENT_WRITE
;
2725 if (barrier
->incoherent_ccu_depth
)
2726 src_flags
|= TU_ACCESS_CCU_DEPTH_INCOHERENT_WRITE
;
2728 tu_flush_for_access(cache
, src_flags
, dst_flags
);
2732 tu_CmdBeginRenderPass(VkCommandBuffer commandBuffer
,
2733 const VkRenderPassBeginInfo
*pRenderPassBegin
,
2734 VkSubpassContents contents
)
2736 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2737 TU_FROM_HANDLE(tu_render_pass
, pass
, pRenderPassBegin
->renderPass
);
2738 TU_FROM_HANDLE(tu_framebuffer
, fb
, pRenderPassBegin
->framebuffer
);
2740 cmd
->state
.pass
= pass
;
2741 cmd
->state
.subpass
= pass
->subpasses
;
2742 cmd
->state
.framebuffer
= fb
;
2743 cmd
->state
.render_area
= pRenderPassBegin
->renderArea
;
2745 tu_cmd_prepare_tile_store_ib(cmd
);
2747 /* Note: because this is external, any flushes will happen before draw_cs
2748 * gets called. However deferred flushes could have to happen later as part
2751 tu_subpass_barrier(cmd
, &pass
->subpasses
[0].start_barrier
, true);
2752 cmd
->state
.renderpass_cache
.pending_flush_bits
=
2753 cmd
->state
.cache
.pending_flush_bits
;
2754 cmd
->state
.renderpass_cache
.flush_bits
= 0;
2756 tu_emit_renderpass_begin(cmd
, pRenderPassBegin
);
2758 tu6_emit_zs(cmd
, cmd
->state
.subpass
, &cmd
->draw_cs
);
2759 tu6_emit_mrt(cmd
, cmd
->state
.subpass
, &cmd
->draw_cs
);
2760 tu6_emit_msaa(&cmd
->draw_cs
, cmd
->state
.subpass
->samples
);
2761 tu6_emit_render_cntl(cmd
, cmd
->state
.subpass
, &cmd
->draw_cs
, false);
2763 tu_set_input_attachments(cmd
, cmd
->state
.subpass
);
2765 for (uint32_t i
= 0; i
< fb
->attachment_count
; ++i
) {
2766 const struct tu_image_view
*iview
= fb
->attachments
[i
].attachment
;
2767 tu_bo_list_add(&cmd
->bo_list
, iview
->image
->bo
,
2768 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_WRITE
);
2771 cmd
->state
.dirty
|= TU_CMD_DIRTY_DRAW_STATE
;
2775 tu_CmdBeginRenderPass2(VkCommandBuffer commandBuffer
,
2776 const VkRenderPassBeginInfo
*pRenderPassBeginInfo
,
2777 const VkSubpassBeginInfoKHR
*pSubpassBeginInfo
)
2779 tu_CmdBeginRenderPass(commandBuffer
, pRenderPassBeginInfo
,
2780 pSubpassBeginInfo
->contents
);
2784 tu_CmdNextSubpass(VkCommandBuffer commandBuffer
, VkSubpassContents contents
)
2786 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2787 const struct tu_render_pass
*pass
= cmd
->state
.pass
;
2788 struct tu_cs
*cs
= &cmd
->draw_cs
;
2790 const struct tu_subpass
*subpass
= cmd
->state
.subpass
++;
2792 tu_cond_exec_start(cs
, CP_COND_EXEC_0_RENDER_MODE_GMEM
);
2794 if (subpass
->resolve_attachments
) {
2795 tu6_emit_blit_scissor(cmd
, cs
, true);
2797 for (unsigned i
= 0; i
< subpass
->color_count
; i
++) {
2798 uint32_t a
= subpass
->resolve_attachments
[i
].attachment
;
2799 if (a
== VK_ATTACHMENT_UNUSED
)
2802 tu_store_gmem_attachment(cmd
, cs
, a
,
2803 subpass
->color_attachments
[i
].attachment
);
2805 if (pass
->attachments
[a
].gmem_offset
< 0)
2809 * check if the resolved attachment is needed by later subpasses,
2810 * if it is, should be doing a GMEM->GMEM resolve instead of GMEM->MEM->GMEM..
2812 tu_finishme("missing GMEM->GMEM resolve path\n");
2813 tu_load_gmem_attachment(cmd
, cs
, a
, true);
2817 tu_cond_exec_end(cs
);
2819 tu_cond_exec_start(cs
, CP_COND_EXEC_0_RENDER_MODE_SYSMEM
);
2821 tu6_emit_sysmem_resolves(cmd
, cs
, subpass
);
2823 tu_cond_exec_end(cs
);
2825 /* Handle dependencies for the next subpass */
2826 tu_subpass_barrier(cmd
, &cmd
->state
.subpass
->start_barrier
, false);
2828 /* emit mrt/zs/msaa/ubwc state for the subpass that is starting */
2829 tu6_emit_zs(cmd
, cmd
->state
.subpass
, cs
);
2830 tu6_emit_mrt(cmd
, cmd
->state
.subpass
, cs
);
2831 tu6_emit_msaa(cs
, cmd
->state
.subpass
->samples
);
2832 tu6_emit_render_cntl(cmd
, cmd
->state
.subpass
, cs
, false);
2834 tu_set_input_attachments(cmd
, cmd
->state
.subpass
);
2838 tu_CmdNextSubpass2(VkCommandBuffer commandBuffer
,
2839 const VkSubpassBeginInfoKHR
*pSubpassBeginInfo
,
2840 const VkSubpassEndInfoKHR
*pSubpassEndInfo
)
2842 tu_CmdNextSubpass(commandBuffer
, pSubpassBeginInfo
->contents
);
2846 tu6_emit_user_consts(struct tu_cs
*cs
, const struct tu_pipeline
*pipeline
,
2847 struct tu_descriptor_state
*descriptors_state
,
2848 gl_shader_stage type
,
2849 uint32_t *push_constants
)
2851 const struct tu_program_descriptor_linkage
*link
=
2852 &pipeline
->program
.link
[type
];
2853 const struct ir3_ubo_analysis_state
*state
= &link
->const_state
.ubo_state
;
2855 if (link
->push_consts
.count
> 0) {
2856 unsigned num_units
= link
->push_consts
.count
;
2857 unsigned offset
= link
->push_consts
.lo
;
2858 tu_cs_emit_pkt7(cs
, tu6_stage2opcode(type
), 3 + num_units
* 4);
2859 tu_cs_emit(cs
, CP_LOAD_STATE6_0_DST_OFF(offset
) |
2860 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS
) |
2861 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT
) |
2862 CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type
)) |
2863 CP_LOAD_STATE6_0_NUM_UNIT(num_units
));
2866 for (unsigned i
= 0; i
< num_units
* 4; i
++)
2867 tu_cs_emit(cs
, push_constants
[i
+ offset
* 4]);
2870 for (uint32_t i
= 0; i
< state
->num_enabled
; i
++) {
2871 uint32_t size
= state
->range
[i
].end
- state
->range
[i
].start
;
2872 uint32_t offset
= state
->range
[i
].start
;
2874 /* and even if the start of the const buffer is before
2875 * first_immediate, the end may not be:
2877 size
= MIN2(size
, (16 * link
->constlen
) - state
->range
[i
].offset
);
2882 /* things should be aligned to vec4: */
2883 debug_assert((state
->range
[i
].offset
% 16) == 0);
2884 debug_assert((size
% 16) == 0);
2885 debug_assert((offset
% 16) == 0);
2887 /* Dig out the descriptor from the descriptor state and read the VA from
2890 assert(state
->range
[i
].ubo
.bindless
);
2891 uint32_t *base
= state
->range
[i
].ubo
.bindless_base
== MAX_SETS
?
2892 descriptors_state
->dynamic_descriptors
:
2893 descriptors_state
->sets
[state
->range
[i
].ubo
.bindless_base
]->mapped_ptr
;
2894 unsigned block
= state
->range
[i
].ubo
.block
;
2895 uint32_t *desc
= base
+ block
* A6XX_TEX_CONST_DWORDS
;
2896 uint64_t va
= desc
[0] | ((uint64_t)(desc
[1] & A6XX_UBO_1_BASE_HI__MASK
) << 32);
2899 tu_cs_emit_pkt7(cs
, tu6_stage2opcode(type
), 3);
2900 tu_cs_emit(cs
, CP_LOAD_STATE6_0_DST_OFF(state
->range
[i
].offset
/ 16) |
2901 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS
) |
2902 CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT
) |
2903 CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type
)) |
2904 CP_LOAD_STATE6_0_NUM_UNIT(size
/ 16));
2905 tu_cs_emit_qw(cs
, va
+ offset
);
2909 static struct tu_draw_state
2910 tu6_emit_consts(struct tu_cmd_buffer
*cmd
,
2911 const struct tu_pipeline
*pipeline
,
2912 struct tu_descriptor_state
*descriptors_state
,
2913 gl_shader_stage type
)
2916 tu_cs_begin_sub_stream(&cmd
->sub_cs
, 512, &cs
); /* TODO: maximum size? */
2918 tu6_emit_user_consts(&cs
, pipeline
, descriptors_state
, type
, cmd
->push_constants
);
2920 return tu_cs_end_draw_state(&cmd
->sub_cs
, &cs
);
2923 static struct tu_draw_state
2924 tu6_emit_vertex_buffers(struct tu_cmd_buffer
*cmd
,
2925 const struct tu_pipeline
*pipeline
)
2928 tu_cs_begin_sub_stream(&cmd
->sub_cs
, 4 * MAX_VBS
, &cs
);
2931 for_each_bit(binding
, pipeline
->vi
.bindings_used
) {
2932 const struct tu_buffer
*buf
= cmd
->state
.vb
.buffers
[binding
];
2933 const VkDeviceSize offset
= buf
->bo_offset
+
2934 cmd
->state
.vb
.offsets
[binding
];
2936 tu_cs_emit_regs(&cs
,
2937 A6XX_VFD_FETCH_BASE(binding
, .bo
= buf
->bo
, .bo_offset
= offset
),
2938 A6XX_VFD_FETCH_SIZE(binding
, buf
->size
- offset
));
2942 cmd
->vertex_bindings_set
= pipeline
->vi
.bindings_used
;
2944 return tu_cs_end_draw_state(&cmd
->sub_cs
, &cs
);
2948 get_tess_param_bo_size(const struct tu_pipeline
*pipeline
,
2949 uint32_t draw_count
)
2951 /* TODO: For indirect draws, we can't compute the BO size ahead of time.
2952 * Still not sure what to do here, so just allocate a reasonably large
2953 * BO and hope for the best for now. */
2957 /* the tess param BO is pipeline->tess.param_stride bytes per patch,
2958 * which includes both the per-vertex outputs and per-patch outputs
2959 * build_primitive_map in ir3 calculates this stride
2961 uint32_t verts_per_patch
= pipeline
->ia
.primtype
- DI_PT_PATCHES0
;
2962 uint32_t num_patches
= draw_count
/ verts_per_patch
;
2963 return num_patches
* pipeline
->tess
.param_stride
;
2967 get_tess_factor_bo_size(const struct tu_pipeline
*pipeline
,
2968 uint32_t draw_count
)
2970 /* TODO: For indirect draws, we can't compute the BO size ahead of time.
2971 * Still not sure what to do here, so just allocate a reasonably large
2972 * BO and hope for the best for now. */
2976 /* Each distinct patch gets its own tess factor output. */
2977 uint32_t verts_per_patch
= pipeline
->ia
.primtype
- DI_PT_PATCHES0
;
2978 uint32_t num_patches
= draw_count
/ verts_per_patch
;
2979 uint32_t factor_stride
;
2980 switch (pipeline
->tess
.patch_type
) {
2981 case IR3_TESS_ISOLINES
:
2984 case IR3_TESS_TRIANGLES
:
2987 case IR3_TESS_QUADS
:
2991 unreachable("bad tessmode");
2993 return factor_stride
* num_patches
;
2997 tu6_emit_tess_consts(struct tu_cmd_buffer
*cmd
,
2998 uint32_t draw_count
,
2999 const struct tu_pipeline
*pipeline
,
3000 struct tu_draw_state
*state
,
3001 uint64_t *factor_iova
)
3004 VkResult result
= tu_cs_begin_sub_stream(&cmd
->sub_cs
, 16, &cs
);
3005 if (result
!= VK_SUCCESS
)
3008 uint64_t tess_factor_size
= get_tess_factor_bo_size(pipeline
, draw_count
);
3009 uint64_t tess_param_size
= get_tess_param_bo_size(pipeline
, draw_count
);
3010 uint64_t tess_bo_size
= tess_factor_size
+ tess_param_size
;
3011 if (tess_bo_size
> 0) {
3012 struct tu_bo
*tess_bo
;
3013 result
= tu_get_scratch_bo(cmd
->device
, tess_bo_size
, &tess_bo
);
3014 if (result
!= VK_SUCCESS
)
3017 tu_bo_list_add(&cmd
->bo_list
, tess_bo
,
3018 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_WRITE
);
3019 uint64_t tess_factor_iova
= tess_bo
->iova
;
3020 uint64_t tess_param_iova
= tess_factor_iova
+ tess_factor_size
;
3022 tu_cs_emit_pkt7(&cs
, CP_LOAD_STATE6_GEOM
, 3 + 4);
3023 tu_cs_emit(&cs
, CP_LOAD_STATE6_0_DST_OFF(pipeline
->tess
.hs_bo_regid
) |
3024 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS
) |
3025 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT
) |
3026 CP_LOAD_STATE6_0_STATE_BLOCK(SB6_HS_SHADER
) |
3027 CP_LOAD_STATE6_0_NUM_UNIT(1));
3028 tu_cs_emit(&cs
, CP_LOAD_STATE6_1_EXT_SRC_ADDR(0));
3029 tu_cs_emit(&cs
, CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI(0));
3030 tu_cs_emit_qw(&cs
, tess_param_iova
);
3031 tu_cs_emit_qw(&cs
, tess_factor_iova
);
3033 tu_cs_emit_pkt7(&cs
, CP_LOAD_STATE6_GEOM
, 3 + 4);
3034 tu_cs_emit(&cs
, CP_LOAD_STATE6_0_DST_OFF(pipeline
->tess
.ds_bo_regid
) |
3035 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS
) |
3036 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT
) |
3037 CP_LOAD_STATE6_0_STATE_BLOCK(SB6_DS_SHADER
) |
3038 CP_LOAD_STATE6_0_NUM_UNIT(1));
3039 tu_cs_emit(&cs
, CP_LOAD_STATE6_1_EXT_SRC_ADDR(0));
3040 tu_cs_emit(&cs
, CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI(0));
3041 tu_cs_emit_qw(&cs
, tess_param_iova
);
3042 tu_cs_emit_qw(&cs
, tess_factor_iova
);
3044 *factor_iova
= tess_factor_iova
;
3046 *state
= tu_cs_end_draw_state(&cmd
->sub_cs
, &cs
);
3051 tu6_draw_common(struct tu_cmd_buffer
*cmd
,
3054 /* note: draw_count is 0 for indirect */
3055 uint32_t draw_count
)
3057 const struct tu_pipeline
*pipeline
= cmd
->state
.pipeline
;
3060 struct tu_descriptor_state
*descriptors_state
=
3061 &cmd
->descriptors
[VK_PIPELINE_BIND_POINT_GRAPHICS
];
3063 tu_emit_cache_flush_renderpass(cmd
, cs
);
3067 tu_cs_emit_regs(cs
, A6XX_PC_PRIMITIVE_CNTL_0(
3068 .primitive_restart
=
3069 pipeline
->ia
.primitive_restart
&& indexed
,
3070 .tess_upper_left_domain_origin
=
3071 pipeline
->tess
.upper_left_domain_origin
));
3073 if (cmd
->state
.dirty
& TU_CMD_DIRTY_SHADER_CONSTS
) {
3074 cmd
->state
.shader_const
[MESA_SHADER_VERTEX
] =
3075 tu6_emit_consts(cmd
, pipeline
, descriptors_state
, MESA_SHADER_VERTEX
);
3076 cmd
->state
.shader_const
[MESA_SHADER_TESS_CTRL
] =
3077 tu6_emit_consts(cmd
, pipeline
, descriptors_state
, MESA_SHADER_TESS_CTRL
);
3078 cmd
->state
.shader_const
[MESA_SHADER_TESS_EVAL
] =
3079 tu6_emit_consts(cmd
, pipeline
, descriptors_state
, MESA_SHADER_TESS_EVAL
);
3080 cmd
->state
.shader_const
[MESA_SHADER_GEOMETRY
] =
3081 tu6_emit_consts(cmd
, pipeline
, descriptors_state
, MESA_SHADER_GEOMETRY
);
3082 cmd
->state
.shader_const
[MESA_SHADER_FRAGMENT
] =
3083 tu6_emit_consts(cmd
, pipeline
, descriptors_state
, MESA_SHADER_FRAGMENT
);
3086 if (cmd
->state
.dirty
& TU_CMD_DIRTY_VERTEX_BUFFERS
)
3087 cmd
->state
.vertex_buffers
= tu6_emit_vertex_buffers(cmd
, pipeline
);
3090 pipeline
->active_stages
& VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT
;
3091 struct tu_draw_state tess_consts
= {};
3093 uint64_t tess_factor_iova
= 0;
3095 cmd
->state
.has_tess
= true;
3096 result
= tu6_emit_tess_consts(cmd
, draw_count
, pipeline
, &tess_consts
, &tess_factor_iova
);
3097 if (result
!= VK_SUCCESS
)
3100 /* this sequence matches what the blob does before every tess draw
3101 * PC_TESSFACTOR_ADDR_LO is a non-context register and needs a wfi
3102 * before writing to it
3106 tu_cs_emit_pkt4(cs
, REG_A6XX_PC_TESSFACTOR_ADDR_LO
, 2);
3107 tu_cs_emit_qw(cs
, tess_factor_iova
);
3109 tu_cs_emit_pkt7(cs
, CP_SET_SUBDRAW_SIZE
, 1);
3110 tu_cs_emit(cs
, draw_count
);
3113 /* for the first draw in a renderpass, re-emit all the draw states
3115 * and if a draw-state disabling path (CmdClearAttachments 3D fallback) was
3116 * used, then draw states must be re-emitted. note however this only happens
3117 * in the sysmem path, so this can be skipped this for the gmem path (TODO)
3119 * the two input attachment states are excluded because secondary command
3120 * buffer doesn't have a state ib to restore it, and not re-emitting them
3121 * is OK since CmdClearAttachments won't disable/overwrite them
3123 if (cmd
->state
.dirty
& TU_CMD_DIRTY_DRAW_STATE
) {
3124 tu_cs_emit_pkt7(cs
, CP_SET_DRAW_STATE
, 3 * (TU_DRAW_STATE_COUNT
- 2));
3126 tu_cs_emit_draw_state(cs
, TU_DRAW_STATE_PROGRAM
, pipeline
->program
.state
);
3127 tu_cs_emit_draw_state(cs
, TU_DRAW_STATE_PROGRAM_BINNING
, pipeline
->program
.binning_state
);
3128 tu_cs_emit_draw_state(cs
, TU_DRAW_STATE_TESS
, tess_consts
);
3129 tu_cs_emit_draw_state(cs
, TU_DRAW_STATE_VI
, pipeline
->vi
.state
);
3130 tu_cs_emit_draw_state(cs
, TU_DRAW_STATE_VI_BINNING
, pipeline
->vi
.binning_state
);
3131 tu_cs_emit_draw_state(cs
, TU_DRAW_STATE_RAST
, pipeline
->rast_state
);
3132 tu_cs_emit_draw_state(cs
, TU_DRAW_STATE_DS
, pipeline
->ds_state
);
3133 tu_cs_emit_draw_state(cs
, TU_DRAW_STATE_BLEND
, pipeline
->blend_state
);
3134 tu_cs_emit_draw_state(cs
, TU_DRAW_STATE_VS_CONST
, cmd
->state
.shader_const
[MESA_SHADER_VERTEX
]);
3135 tu_cs_emit_draw_state(cs
, TU_DRAW_STATE_HS_CONST
, cmd
->state
.shader_const
[MESA_SHADER_TESS_CTRL
]);
3136 tu_cs_emit_draw_state(cs
, TU_DRAW_STATE_DS_CONST
, cmd
->state
.shader_const
[MESA_SHADER_TESS_EVAL
]);
3137 tu_cs_emit_draw_state(cs
, TU_DRAW_STATE_GS_CONST
, cmd
->state
.shader_const
[MESA_SHADER_GEOMETRY
]);
3138 tu_cs_emit_draw_state(cs
, TU_DRAW_STATE_FS_CONST
, cmd
->state
.shader_const
[MESA_SHADER_FRAGMENT
]);
3139 tu_cs_emit_draw_state(cs
, TU_DRAW_STATE_DESC_SETS
, cmd
->state
.desc_sets
);
3140 tu_cs_emit_draw_state(cs
, TU_DRAW_STATE_DESC_SETS_LOAD
, pipeline
->load_state
);
3141 tu_cs_emit_draw_state(cs
, TU_DRAW_STATE_VB
, cmd
->state
.vertex_buffers
);
3142 tu_cs_emit_draw_state(cs
, TU_DRAW_STATE_VS_PARAMS
, cmd
->state
.vs_params
);
3144 for (uint32_t i
= 0; i
< ARRAY_SIZE(cmd
->state
.dynamic_state
); i
++) {
3145 tu_cs_emit_draw_state(cs
, TU_DRAW_STATE_DYNAMIC
+ i
,
3146 ((pipeline
->dynamic_state_mask
& BIT(i
)) ?
3147 cmd
->state
.dynamic_state
[i
] :
3148 pipeline
->dynamic_state
[i
]));
3152 /* emit draw states that were just updated
3153 * note we eventually don't want to have to emit anything here
3155 uint32_t draw_state_count
=
3157 ((cmd
->state
.dirty
& TU_CMD_DIRTY_SHADER_CONSTS
) ? 5 : 0) +
3158 ((cmd
->state
.dirty
& TU_CMD_DIRTY_DESC_SETS_LOAD
) ? 1 : 0) +
3159 ((cmd
->state
.dirty
& TU_CMD_DIRTY_VERTEX_BUFFERS
) ? 1 : 0) +
3162 tu_cs_emit_pkt7(cs
, CP_SET_DRAW_STATE
, 3 * draw_state_count
);
3164 /* We may need to re-emit tess consts if the current draw call is
3165 * sufficiently larger than the last draw call. */
3167 tu_cs_emit_draw_state(cs
, TU_DRAW_STATE_TESS
, tess_consts
);
3168 if (cmd
->state
.dirty
& TU_CMD_DIRTY_SHADER_CONSTS
) {
3169 tu_cs_emit_draw_state(cs
, TU_DRAW_STATE_VS_CONST
, cmd
->state
.shader_const
[MESA_SHADER_VERTEX
]);
3170 tu_cs_emit_draw_state(cs
, TU_DRAW_STATE_HS_CONST
, cmd
->state
.shader_const
[MESA_SHADER_TESS_CTRL
]);
3171 tu_cs_emit_draw_state(cs
, TU_DRAW_STATE_DS_CONST
, cmd
->state
.shader_const
[MESA_SHADER_TESS_EVAL
]);
3172 tu_cs_emit_draw_state(cs
, TU_DRAW_STATE_GS_CONST
, cmd
->state
.shader_const
[MESA_SHADER_GEOMETRY
]);
3173 tu_cs_emit_draw_state(cs
, TU_DRAW_STATE_FS_CONST
, cmd
->state
.shader_const
[MESA_SHADER_FRAGMENT
]);
3175 if (cmd
->state
.dirty
& TU_CMD_DIRTY_DESC_SETS_LOAD
)
3176 tu_cs_emit_draw_state(cs
, TU_DRAW_STATE_DESC_SETS_LOAD
, pipeline
->load_state
);
3177 if (cmd
->state
.dirty
& TU_CMD_DIRTY_VERTEX_BUFFERS
)
3178 tu_cs_emit_draw_state(cs
, TU_DRAW_STATE_VB
, cmd
->state
.vertex_buffers
);
3179 tu_cs_emit_draw_state(cs
, TU_DRAW_STATE_VS_PARAMS
, cmd
->state
.vs_params
);
3182 tu_cs_sanity_check(cs
);
3184 /* There are too many graphics dirty bits to list here, so just list the
3185 * bits to preserve instead. The only things not emitted here are
3186 * compute-related state.
3188 cmd
->state
.dirty
&= TU_CMD_DIRTY_COMPUTE_DESC_SETS_LOAD
;
3193 tu_draw_initiator(struct tu_cmd_buffer
*cmd
, enum pc_di_src_sel src_sel
)
3195 const struct tu_pipeline
*pipeline
= cmd
->state
.pipeline
;
3196 uint32_t initiator
=
3197 CP_DRAW_INDX_OFFSET_0_PRIM_TYPE(pipeline
->ia
.primtype
) |
3198 CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(src_sel
) |
3199 CP_DRAW_INDX_OFFSET_0_INDEX_SIZE(cmd
->state
.index_size
) |
3200 CP_DRAW_INDX_OFFSET_0_VIS_CULL(USE_VISIBILITY
);
3202 if (pipeline
->active_stages
& VK_SHADER_STAGE_GEOMETRY_BIT
)
3203 initiator
|= CP_DRAW_INDX_OFFSET_0_GS_ENABLE
;
3205 switch (pipeline
->tess
.patch_type
) {
3206 case IR3_TESS_TRIANGLES
:
3207 initiator
|= CP_DRAW_INDX_OFFSET_0_PATCH_TYPE(TESS_TRIANGLES
) |
3208 CP_DRAW_INDX_OFFSET_0_TESS_ENABLE
;
3210 case IR3_TESS_ISOLINES
:
3211 initiator
|= CP_DRAW_INDX_OFFSET_0_PATCH_TYPE(TESS_ISOLINES
) |
3212 CP_DRAW_INDX_OFFSET_0_TESS_ENABLE
;
3215 initiator
|= CP_DRAW_INDX_OFFSET_0_PATCH_TYPE(TESS_QUADS
);
3217 case IR3_TESS_QUADS
:
3218 initiator
|= CP_DRAW_INDX_OFFSET_0_PATCH_TYPE(TESS_QUADS
) |
3219 CP_DRAW_INDX_OFFSET_0_TESS_ENABLE
;
3227 vs_params_offset(struct tu_cmd_buffer
*cmd
)
3229 const struct tu_program_descriptor_linkage
*link
=
3230 &cmd
->state
.pipeline
->program
.link
[MESA_SHADER_VERTEX
];
3231 const struct ir3_const_state
*const_state
= &link
->const_state
;
3233 if (const_state
->offsets
.driver_param
>= link
->constlen
)
3236 /* this layout is required by CP_DRAW_INDIRECT_MULTI */
3237 STATIC_ASSERT(IR3_DP_DRAWID
== 0);
3238 STATIC_ASSERT(IR3_DP_VTXID_BASE
== 1);
3239 STATIC_ASSERT(IR3_DP_INSTID_BASE
== 2);
3241 /* 0 means disabled for CP_DRAW_INDIRECT_MULTI */
3242 assert(const_state
->offsets
.driver_param
!= 0);
3244 return const_state
->offsets
.driver_param
;
3247 static struct tu_draw_state
3248 tu6_emit_vs_params(struct tu_cmd_buffer
*cmd
,
3249 uint32_t vertex_offset
,
3250 uint32_t first_instance
)
3252 uint32_t offset
= vs_params_offset(cmd
);
3255 VkResult result
= tu_cs_begin_sub_stream(&cmd
->sub_cs
, 3 + (offset
? 8 : 0), &cs
);
3256 if (result
!= VK_SUCCESS
) {
3257 cmd
->record_result
= result
;
3258 return (struct tu_draw_state
) {};
3261 /* TODO: don't make a new draw state when it doesn't change */
3263 tu_cs_emit_regs(&cs
,
3264 A6XX_VFD_INDEX_OFFSET(vertex_offset
),
3265 A6XX_VFD_INSTANCE_START_OFFSET(first_instance
));
3268 tu_cs_emit_pkt7(&cs
, CP_LOAD_STATE6_GEOM
, 3 + 4);
3269 tu_cs_emit(&cs
, CP_LOAD_STATE6_0_DST_OFF(offset
) |
3270 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS
) |
3271 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT
) |
3272 CP_LOAD_STATE6_0_STATE_BLOCK(SB6_VS_SHADER
) |
3273 CP_LOAD_STATE6_0_NUM_UNIT(1));
3278 tu_cs_emit(&cs
, vertex_offset
);
3279 tu_cs_emit(&cs
, first_instance
);
3283 struct tu_cs_entry entry
= tu_cs_end_sub_stream(&cmd
->sub_cs
, &cs
);
3284 return (struct tu_draw_state
) {entry
.bo
->iova
+ entry
.offset
, entry
.size
/ 4};
3288 tu_CmdDraw(VkCommandBuffer commandBuffer
,
3289 uint32_t vertexCount
,
3290 uint32_t instanceCount
,
3291 uint32_t firstVertex
,
3292 uint32_t firstInstance
)
3294 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
3295 struct tu_cs
*cs
= &cmd
->draw_cs
;
3297 cmd
->state
.vs_params
= tu6_emit_vs_params(cmd
, firstVertex
, firstInstance
);
3299 tu6_draw_common(cmd
, cs
, false, vertexCount
);
3301 tu_cs_emit_pkt7(cs
, CP_DRAW_INDX_OFFSET
, 3);
3302 tu_cs_emit(cs
, tu_draw_initiator(cmd
, DI_SRC_SEL_AUTO_INDEX
));
3303 tu_cs_emit(cs
, instanceCount
);
3304 tu_cs_emit(cs
, vertexCount
);
3308 tu_CmdDrawIndexed(VkCommandBuffer commandBuffer
,
3309 uint32_t indexCount
,
3310 uint32_t instanceCount
,
3311 uint32_t firstIndex
,
3312 int32_t vertexOffset
,
3313 uint32_t firstInstance
)
3315 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
3316 struct tu_cs
*cs
= &cmd
->draw_cs
;
3318 cmd
->state
.vs_params
= tu6_emit_vs_params(cmd
, vertexOffset
, firstInstance
);
3320 tu6_draw_common(cmd
, cs
, true, indexCount
);
3322 tu_cs_emit_pkt7(cs
, CP_DRAW_INDX_OFFSET
, 7);
3323 tu_cs_emit(cs
, tu_draw_initiator(cmd
, DI_SRC_SEL_DMA
));
3324 tu_cs_emit(cs
, instanceCount
);
3325 tu_cs_emit(cs
, indexCount
);
3326 tu_cs_emit(cs
, firstIndex
);
3327 tu_cs_emit_qw(cs
, cmd
->state
.index_va
);
3328 tu_cs_emit(cs
, cmd
->state
.max_index_count
);
3331 /* Various firmware bugs/inconsistencies mean that some indirect draw opcodes
3332 * do not wait for WFI's to complete before executing. Add a WAIT_FOR_ME if
3333 * pending for these opcodes. This may result in a few extra WAIT_FOR_ME's
3334 * with these opcodes, but the alternative would add unnecessary WAIT_FOR_ME's
3335 * before draw opcodes that don't need it.
3338 draw_wfm(struct tu_cmd_buffer
*cmd
)
3340 cmd
->state
.renderpass_cache
.flush_bits
|=
3341 cmd
->state
.renderpass_cache
.pending_flush_bits
& TU_CMD_FLAG_WAIT_FOR_ME
;
3342 cmd
->state
.renderpass_cache
.pending_flush_bits
&= ~TU_CMD_FLAG_WAIT_FOR_ME
;
3346 tu_CmdDrawIndirect(VkCommandBuffer commandBuffer
,
3348 VkDeviceSize offset
,
3352 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
3353 TU_FROM_HANDLE(tu_buffer
, buf
, _buffer
);
3354 struct tu_cs
*cs
= &cmd
->draw_cs
;
3356 cmd
->state
.vs_params
= (struct tu_draw_state
) {};
3358 /* The latest known a630_sqe.fw fails to wait for WFI before reading the
3359 * indirect buffer when using CP_DRAW_INDIRECT_MULTI, so we have to fall
3360 * back to CP_WAIT_FOR_ME except for a650 which has a fixed firmware.
3362 * TODO: There may be newer a630_sqe.fw released in the future which fixes
3363 * this, if so we should detect it and avoid this workaround.
3365 if (cmd
->device
->physical_device
->gpu_id
!= 650)
3368 tu6_draw_common(cmd
, cs
, false, 0);
3370 tu_cs_emit_pkt7(cs
, CP_DRAW_INDIRECT_MULTI
, 6);
3371 tu_cs_emit(cs
, tu_draw_initiator(cmd
, DI_SRC_SEL_AUTO_INDEX
));
3372 tu_cs_emit(cs
, A6XX_CP_DRAW_INDIRECT_MULTI_1_OPCODE(INDIRECT_OP_NORMAL
) |
3373 A6XX_CP_DRAW_INDIRECT_MULTI_1_DST_OFF(vs_params_offset(cmd
)));
3374 tu_cs_emit(cs
, drawCount
);
3375 tu_cs_emit_qw(cs
, buf
->bo
->iova
+ buf
->bo_offset
+ offset
);
3376 tu_cs_emit(cs
, stride
);
3378 tu_bo_list_add(&cmd
->bo_list
, buf
->bo
, MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_DUMP
);
3382 tu_CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer
,
3384 VkDeviceSize offset
,
3388 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
3389 TU_FROM_HANDLE(tu_buffer
, buf
, _buffer
);
3390 struct tu_cs
*cs
= &cmd
->draw_cs
;
3392 cmd
->state
.vs_params
= (struct tu_draw_state
) {};
3394 if (cmd
->device
->physical_device
->gpu_id
!= 650)
3397 tu6_draw_common(cmd
, cs
, true, 0);
3399 tu_cs_emit_pkt7(cs
, CP_DRAW_INDIRECT_MULTI
, 9);
3400 tu_cs_emit(cs
, tu_draw_initiator(cmd
, DI_SRC_SEL_DMA
));
3401 tu_cs_emit(cs
, A6XX_CP_DRAW_INDIRECT_MULTI_1_OPCODE(INDIRECT_OP_INDEXED
) |
3402 A6XX_CP_DRAW_INDIRECT_MULTI_1_DST_OFF(vs_params_offset(cmd
)));
3403 tu_cs_emit(cs
, drawCount
);
3404 tu_cs_emit_qw(cs
, cmd
->state
.index_va
);
3405 tu_cs_emit(cs
, cmd
->state
.max_index_count
);
3406 tu_cs_emit_qw(cs
, buf
->bo
->iova
+ buf
->bo_offset
+ offset
);
3407 tu_cs_emit(cs
, stride
);
3409 tu_bo_list_add(&cmd
->bo_list
, buf
->bo
, MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_DUMP
);
3413 tu_CmdDrawIndirectCount(VkCommandBuffer commandBuffer
,
3415 VkDeviceSize offset
,
3416 VkBuffer countBuffer
,
3417 VkDeviceSize countBufferOffset
,
3421 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
3422 TU_FROM_HANDLE(tu_buffer
, buf
, _buffer
);
3423 TU_FROM_HANDLE(tu_buffer
, count_buf
, countBuffer
);
3424 struct tu_cs
*cs
= &cmd
->draw_cs
;
3426 cmd
->state
.vs_params
= (struct tu_draw_state
) {};
3428 /* It turns out that the firmware we have for a650 only partially fixed the
3429 * problem with CP_DRAW_INDIRECT_MULTI not waiting for WFI's to complete
3430 * before reading indirect parameters. It waits for WFI's before reading
3431 * the draw parameters, but after reading the indirect count :(.
3435 tu6_draw_common(cmd
, cs
, false, 0);
3437 tu_cs_emit_pkt7(cs
, CP_DRAW_INDIRECT_MULTI
, 8);
3438 tu_cs_emit(cs
, tu_draw_initiator(cmd
, DI_SRC_SEL_AUTO_INDEX
));
3439 tu_cs_emit(cs
, A6XX_CP_DRAW_INDIRECT_MULTI_1_OPCODE(INDIRECT_OP_INDIRECT_COUNT
) |
3440 A6XX_CP_DRAW_INDIRECT_MULTI_1_DST_OFF(vs_params_offset(cmd
)));
3441 tu_cs_emit(cs
, drawCount
);
3442 tu_cs_emit_qw(cs
, buf
->bo
->iova
+ buf
->bo_offset
+ offset
);
3443 tu_cs_emit_qw(cs
, count_buf
->bo
->iova
+ count_buf
->bo_offset
+ countBufferOffset
);
3444 tu_cs_emit(cs
, stride
);
3446 tu_bo_list_add(&cmd
->bo_list
, buf
->bo
, MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_DUMP
);
3447 tu_bo_list_add(&cmd
->bo_list
, count_buf
->bo
, MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_DUMP
);
3451 tu_CmdDrawIndexedIndirectCount(VkCommandBuffer commandBuffer
,
3453 VkDeviceSize offset
,
3454 VkBuffer countBuffer
,
3455 VkDeviceSize countBufferOffset
,
3459 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
3460 TU_FROM_HANDLE(tu_buffer
, buf
, _buffer
);
3461 TU_FROM_HANDLE(tu_buffer
, count_buf
, countBuffer
);
3462 struct tu_cs
*cs
= &cmd
->draw_cs
;
3464 cmd
->state
.vs_params
= (struct tu_draw_state
) {};
3468 tu6_draw_common(cmd
, cs
, true, 0);
3470 tu_cs_emit_pkt7(cs
, CP_DRAW_INDIRECT_MULTI
, 11);
3471 tu_cs_emit(cs
, tu_draw_initiator(cmd
, DI_SRC_SEL_DMA
));
3472 tu_cs_emit(cs
, A6XX_CP_DRAW_INDIRECT_MULTI_1_OPCODE(INDIRECT_OP_INDIRECT_COUNT_INDEXED
) |
3473 A6XX_CP_DRAW_INDIRECT_MULTI_1_DST_OFF(vs_params_offset(cmd
)));
3474 tu_cs_emit(cs
, drawCount
);
3475 tu_cs_emit_qw(cs
, cmd
->state
.index_va
);
3476 tu_cs_emit(cs
, cmd
->state
.max_index_count
);
3477 tu_cs_emit_qw(cs
, buf
->bo
->iova
+ buf
->bo_offset
+ offset
);
3478 tu_cs_emit_qw(cs
, count_buf
->bo
->iova
+ count_buf
->bo_offset
+ countBufferOffset
);
3479 tu_cs_emit(cs
, stride
);
3481 tu_bo_list_add(&cmd
->bo_list
, buf
->bo
, MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_DUMP
);
3482 tu_bo_list_add(&cmd
->bo_list
, count_buf
->bo
, MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_DUMP
);
3485 void tu_CmdDrawIndirectByteCountEXT(VkCommandBuffer commandBuffer
,
3486 uint32_t instanceCount
,
3487 uint32_t firstInstance
,
3488 VkBuffer _counterBuffer
,
3489 VkDeviceSize counterBufferOffset
,
3490 uint32_t counterOffset
,
3491 uint32_t vertexStride
)
3493 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
3494 TU_FROM_HANDLE(tu_buffer
, buf
, _counterBuffer
);
3495 struct tu_cs
*cs
= &cmd
->draw_cs
;
3497 /* All known firmware versions do not wait for WFI's with CP_DRAW_AUTO.
3498 * Plus, for the common case where the counter buffer is written by
3499 * vkCmdEndTransformFeedback, we need to wait for the CP_WAIT_MEM_WRITES to
3500 * complete which means we need a WAIT_FOR_ME anyway.
3504 cmd
->state
.vs_params
= tu6_emit_vs_params(cmd
, 0, firstInstance
);
3506 tu6_draw_common(cmd
, cs
, false, 0);
3508 tu_cs_emit_pkt7(cs
, CP_DRAW_AUTO
, 6);
3509 tu_cs_emit(cs
, tu_draw_initiator(cmd
, DI_SRC_SEL_AUTO_XFB
));
3510 tu_cs_emit(cs
, instanceCount
);
3511 tu_cs_emit_qw(cs
, buf
->bo
->iova
+ buf
->bo_offset
+ counterBufferOffset
);
3512 tu_cs_emit(cs
, counterOffset
);
3513 tu_cs_emit(cs
, vertexStride
);
3515 tu_bo_list_add(&cmd
->bo_list
, buf
->bo
, MSM_SUBMIT_BO_READ
);
3518 struct tu_dispatch_info
3521 * Determine the layout of the grid (in block units) to be used.
3526 * A starting offset for the grid. If unaligned is set, the offset
3527 * must still be aligned.
3529 uint32_t offsets
[3];
3531 * Whether it's an unaligned compute dispatch.
3536 * Indirect compute parameters resource.
3538 struct tu_buffer
*indirect
;
3539 uint64_t indirect_offset
;
3543 tu_emit_compute_driver_params(struct tu_cs
*cs
, struct tu_pipeline
*pipeline
,
3544 const struct tu_dispatch_info
*info
)
3546 gl_shader_stage type
= MESA_SHADER_COMPUTE
;
3547 const struct tu_program_descriptor_linkage
*link
=
3548 &pipeline
->program
.link
[type
];
3549 const struct ir3_const_state
*const_state
= &link
->const_state
;
3550 uint32_t offset
= const_state
->offsets
.driver_param
;
3552 if (link
->constlen
<= offset
)
3555 if (!info
->indirect
) {
3556 uint32_t driver_params
[IR3_DP_CS_COUNT
] = {
3557 [IR3_DP_NUM_WORK_GROUPS_X
] = info
->blocks
[0],
3558 [IR3_DP_NUM_WORK_GROUPS_Y
] = info
->blocks
[1],
3559 [IR3_DP_NUM_WORK_GROUPS_Z
] = info
->blocks
[2],
3560 [IR3_DP_LOCAL_GROUP_SIZE_X
] = pipeline
->compute
.local_size
[0],
3561 [IR3_DP_LOCAL_GROUP_SIZE_Y
] = pipeline
->compute
.local_size
[1],
3562 [IR3_DP_LOCAL_GROUP_SIZE_Z
] = pipeline
->compute
.local_size
[2],
3565 uint32_t num_consts
= MIN2(const_state
->num_driver_params
,
3566 (link
->constlen
- offset
) * 4);
3567 /* push constants */
3568 tu_cs_emit_pkt7(cs
, tu6_stage2opcode(type
), 3 + num_consts
);
3569 tu_cs_emit(cs
, CP_LOAD_STATE6_0_DST_OFF(offset
) |
3570 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS
) |
3571 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT
) |
3572 CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type
)) |
3573 CP_LOAD_STATE6_0_NUM_UNIT(num_consts
/ 4));
3577 for (i
= 0; i
< num_consts
; i
++)
3578 tu_cs_emit(cs
, driver_params
[i
]);
3580 tu_finishme("Indirect driver params");
3585 tu_dispatch(struct tu_cmd_buffer
*cmd
,
3586 const struct tu_dispatch_info
*info
)
3588 struct tu_cs
*cs
= &cmd
->cs
;
3589 struct tu_pipeline
*pipeline
= cmd
->state
.compute_pipeline
;
3590 struct tu_descriptor_state
*descriptors_state
=
3591 &cmd
->descriptors
[VK_PIPELINE_BIND_POINT_COMPUTE
];
3593 /* TODO: We could probably flush less if we add a compute_flush_bits
3596 tu_emit_cache_flush(cmd
, cs
);
3598 /* note: no reason to have this in a separate IB */
3599 tu_cs_emit_state_ib(cs
,
3600 tu6_emit_consts(cmd
, pipeline
, descriptors_state
, MESA_SHADER_COMPUTE
));
3602 tu_emit_compute_driver_params(cs
, pipeline
, info
);
3604 if (cmd
->state
.dirty
& TU_CMD_DIRTY_COMPUTE_DESC_SETS_LOAD
)
3605 tu_cs_emit_state_ib(cs
, pipeline
->load_state
);
3607 cmd
->state
.dirty
&= ~TU_CMD_DIRTY_COMPUTE_DESC_SETS_LOAD
;
3609 tu_cs_emit_pkt7(cs
, CP_SET_MARKER
, 1);
3610 tu_cs_emit(cs
, A6XX_CP_SET_MARKER_0_MODE(RM6_COMPUTE
));
3612 const uint32_t *local_size
= pipeline
->compute
.local_size
;
3613 const uint32_t *num_groups
= info
->blocks
;
3615 A6XX_HLSQ_CS_NDRANGE_0(.kerneldim
= 3,
3616 .localsizex
= local_size
[0] - 1,
3617 .localsizey
= local_size
[1] - 1,
3618 .localsizez
= local_size
[2] - 1),
3619 A6XX_HLSQ_CS_NDRANGE_1(.globalsize_x
= local_size
[0] * num_groups
[0]),
3620 A6XX_HLSQ_CS_NDRANGE_2(.globaloff_x
= 0),
3621 A6XX_HLSQ_CS_NDRANGE_3(.globalsize_y
= local_size
[1] * num_groups
[1]),
3622 A6XX_HLSQ_CS_NDRANGE_4(.globaloff_y
= 0),
3623 A6XX_HLSQ_CS_NDRANGE_5(.globalsize_z
= local_size
[2] * num_groups
[2]),
3624 A6XX_HLSQ_CS_NDRANGE_6(.globaloff_z
= 0));
3627 A6XX_HLSQ_CS_KERNEL_GROUP_X(1),
3628 A6XX_HLSQ_CS_KERNEL_GROUP_Y(1),
3629 A6XX_HLSQ_CS_KERNEL_GROUP_Z(1));
3631 if (info
->indirect
) {
3632 uint64_t iova
= tu_buffer_iova(info
->indirect
) + info
->indirect_offset
;
3634 tu_bo_list_add(&cmd
->bo_list
, info
->indirect
->bo
,
3635 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_WRITE
);
3637 tu_cs_emit_pkt7(cs
, CP_EXEC_CS_INDIRECT
, 4);
3638 tu_cs_emit(cs
, 0x00000000);
3639 tu_cs_emit_qw(cs
, iova
);
3641 A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEX(local_size
[0] - 1) |
3642 A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEY(local_size
[1] - 1) |
3643 A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEZ(local_size
[2] - 1));
3645 tu_cs_emit_pkt7(cs
, CP_EXEC_CS
, 4);
3646 tu_cs_emit(cs
, 0x00000000);
3647 tu_cs_emit(cs
, CP_EXEC_CS_1_NGROUPS_X(info
->blocks
[0]));
3648 tu_cs_emit(cs
, CP_EXEC_CS_2_NGROUPS_Y(info
->blocks
[1]));
3649 tu_cs_emit(cs
, CP_EXEC_CS_3_NGROUPS_Z(info
->blocks
[2]));
3656 tu_CmdDispatchBase(VkCommandBuffer commandBuffer
,
3664 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
3665 struct tu_dispatch_info info
= {};
3671 info
.offsets
[0] = base_x
;
3672 info
.offsets
[1] = base_y
;
3673 info
.offsets
[2] = base_z
;
3674 tu_dispatch(cmd_buffer
, &info
);
3678 tu_CmdDispatch(VkCommandBuffer commandBuffer
,
3683 tu_CmdDispatchBase(commandBuffer
, 0, 0, 0, x
, y
, z
);
3687 tu_CmdDispatchIndirect(VkCommandBuffer commandBuffer
,
3689 VkDeviceSize offset
)
3691 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
3692 TU_FROM_HANDLE(tu_buffer
, buffer
, _buffer
);
3693 struct tu_dispatch_info info
= {};
3695 info
.indirect
= buffer
;
3696 info
.indirect_offset
= offset
;
3698 tu_dispatch(cmd_buffer
, &info
);
3702 tu_CmdEndRenderPass(VkCommandBuffer commandBuffer
)
3704 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
3706 tu_cs_end(&cmd_buffer
->draw_cs
);
3707 tu_cs_end(&cmd_buffer
->draw_epilogue_cs
);
3709 if (use_sysmem_rendering(cmd_buffer
))
3710 tu_cmd_render_sysmem(cmd_buffer
);
3712 tu_cmd_render_tiles(cmd_buffer
);
3714 /* discard draw_cs and draw_epilogue_cs entries now that the tiles are
3716 tu_cs_discard_entries(&cmd_buffer
->draw_cs
);
3717 tu_cs_begin(&cmd_buffer
->draw_cs
);
3718 tu_cs_discard_entries(&cmd_buffer
->draw_epilogue_cs
);
3719 tu_cs_begin(&cmd_buffer
->draw_epilogue_cs
);
3721 cmd_buffer
->state
.cache
.pending_flush_bits
|=
3722 cmd_buffer
->state
.renderpass_cache
.pending_flush_bits
;
3723 tu_subpass_barrier(cmd_buffer
, &cmd_buffer
->state
.pass
->end_barrier
, true);
3725 cmd_buffer
->state
.pass
= NULL
;
3726 cmd_buffer
->state
.subpass
= NULL
;
3727 cmd_buffer
->state
.framebuffer
= NULL
;
3728 cmd_buffer
->state
.has_tess
= false;
3729 cmd_buffer
->state
.has_subpass_predication
= false;
3733 tu_CmdEndRenderPass2(VkCommandBuffer commandBuffer
,
3734 const VkSubpassEndInfoKHR
*pSubpassEndInfo
)
3736 tu_CmdEndRenderPass(commandBuffer
);
3739 struct tu_barrier_info
3741 uint32_t eventCount
;
3742 const VkEvent
*pEvents
;
3743 VkPipelineStageFlags srcStageMask
;
3747 tu_barrier(struct tu_cmd_buffer
*cmd
,
3748 uint32_t memoryBarrierCount
,
3749 const VkMemoryBarrier
*pMemoryBarriers
,
3750 uint32_t bufferMemoryBarrierCount
,
3751 const VkBufferMemoryBarrier
*pBufferMemoryBarriers
,
3752 uint32_t imageMemoryBarrierCount
,
3753 const VkImageMemoryBarrier
*pImageMemoryBarriers
,
3754 const struct tu_barrier_info
*info
)
3756 struct tu_cs
*cs
= cmd
->state
.pass
? &cmd
->draw_cs
: &cmd
->cs
;
3757 VkAccessFlags srcAccessMask
= 0;
3758 VkAccessFlags dstAccessMask
= 0;
3760 for (uint32_t i
= 0; i
< memoryBarrierCount
; i
++) {
3761 srcAccessMask
|= pMemoryBarriers
[i
].srcAccessMask
;
3762 dstAccessMask
|= pMemoryBarriers
[i
].dstAccessMask
;
3765 for (uint32_t i
= 0; i
< bufferMemoryBarrierCount
; i
++) {
3766 srcAccessMask
|= pBufferMemoryBarriers
[i
].srcAccessMask
;
3767 dstAccessMask
|= pBufferMemoryBarriers
[i
].dstAccessMask
;
3770 enum tu_cmd_access_mask src_flags
= 0;
3771 enum tu_cmd_access_mask dst_flags
= 0;
3773 for (uint32_t i
= 0; i
< imageMemoryBarrierCount
; i
++) {
3774 TU_FROM_HANDLE(tu_image
, image
, pImageMemoryBarriers
[i
].image
);
3775 VkImageLayout old_layout
= pImageMemoryBarriers
[i
].oldLayout
;
3776 /* For non-linear images, PREINITIALIZED is the same as UNDEFINED */
3777 if (old_layout
== VK_IMAGE_LAYOUT_UNDEFINED
||
3778 (image
->tiling
!= VK_IMAGE_TILING_LINEAR
&&
3779 old_layout
== VK_IMAGE_LAYOUT_PREINITIALIZED
)) {
3780 /* The underlying memory for this image may have been used earlier
3781 * within the same queue submission for a different image, which
3782 * means that there may be old, stale cache entries which are in the
3783 * "wrong" location, which could cause problems later after writing
3784 * to the image. We don't want these entries being flushed later and
3785 * overwriting the actual image, so we need to flush the CCU.
3787 src_flags
|= TU_ACCESS_CCU_COLOR_INCOHERENT_WRITE
;
3789 srcAccessMask
|= pImageMemoryBarriers
[i
].srcAccessMask
;
3790 dstAccessMask
|= pImageMemoryBarriers
[i
].dstAccessMask
;
3793 /* Inside a renderpass, we don't know yet whether we'll be using sysmem
3794 * so we have to use the sysmem flushes.
3796 bool gmem
= cmd
->state
.ccu_state
== TU_CMD_CCU_GMEM
&&
3798 src_flags
|= vk2tu_access(srcAccessMask
, gmem
);
3799 dst_flags
|= vk2tu_access(dstAccessMask
, gmem
);
3801 struct tu_cache_state
*cache
=
3802 cmd
->state
.pass
? &cmd
->state
.renderpass_cache
: &cmd
->state
.cache
;
3803 tu_flush_for_access(cache
, src_flags
, dst_flags
);
3805 for (uint32_t i
= 0; i
< info
->eventCount
; i
++) {
3806 TU_FROM_HANDLE(tu_event
, event
, info
->pEvents
[i
]);
3808 tu_bo_list_add(&cmd
->bo_list
, &event
->bo
, MSM_SUBMIT_BO_READ
);
3810 tu_cs_emit_pkt7(cs
, CP_WAIT_REG_MEM
, 6);
3811 tu_cs_emit(cs
, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ
) |
3812 CP_WAIT_REG_MEM_0_POLL_MEMORY
);
3813 tu_cs_emit_qw(cs
, event
->bo
.iova
); /* POLL_ADDR_LO/HI */
3814 tu_cs_emit(cs
, CP_WAIT_REG_MEM_3_REF(1));
3815 tu_cs_emit(cs
, CP_WAIT_REG_MEM_4_MASK(~0u));
3816 tu_cs_emit(cs
, CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(20));
3821 tu_CmdPipelineBarrier(VkCommandBuffer commandBuffer
,
3822 VkPipelineStageFlags srcStageMask
,
3823 VkPipelineStageFlags dstStageMask
,
3824 VkDependencyFlags dependencyFlags
,
3825 uint32_t memoryBarrierCount
,
3826 const VkMemoryBarrier
*pMemoryBarriers
,
3827 uint32_t bufferMemoryBarrierCount
,
3828 const VkBufferMemoryBarrier
*pBufferMemoryBarriers
,
3829 uint32_t imageMemoryBarrierCount
,
3830 const VkImageMemoryBarrier
*pImageMemoryBarriers
)
3832 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
3833 struct tu_barrier_info info
;
3835 info
.eventCount
= 0;
3836 info
.pEvents
= NULL
;
3837 info
.srcStageMask
= srcStageMask
;
3839 tu_barrier(cmd_buffer
, memoryBarrierCount
, pMemoryBarriers
,
3840 bufferMemoryBarrierCount
, pBufferMemoryBarriers
,
3841 imageMemoryBarrierCount
, pImageMemoryBarriers
, &info
);
3845 write_event(struct tu_cmd_buffer
*cmd
, struct tu_event
*event
,
3846 VkPipelineStageFlags stageMask
, unsigned value
)
3848 struct tu_cs
*cs
= &cmd
->cs
;
3850 /* vkCmdSetEvent/vkCmdResetEvent cannot be called inside a render pass */
3851 assert(!cmd
->state
.pass
);
3853 tu_emit_cache_flush(cmd
, cs
);
3855 tu_bo_list_add(&cmd
->bo_list
, &event
->bo
, MSM_SUBMIT_BO_WRITE
);
3857 /* Flags that only require a top-of-pipe event. DrawIndirect parameters are
3858 * read by the CP, so the draw indirect stage counts as top-of-pipe too.
3860 VkPipelineStageFlags top_of_pipe_flags
=
3861 VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT
|
3862 VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT
;
3864 if (!(stageMask
& ~top_of_pipe_flags
)) {
3865 tu_cs_emit_pkt7(cs
, CP_MEM_WRITE
, 3);
3866 tu_cs_emit_qw(cs
, event
->bo
.iova
); /* ADDR_LO/HI */
3867 tu_cs_emit(cs
, value
);
3869 /* Use a RB_DONE_TS event to wait for everything to complete. */
3870 tu_cs_emit_pkt7(cs
, CP_EVENT_WRITE
, 4);
3871 tu_cs_emit(cs
, CP_EVENT_WRITE_0_EVENT(RB_DONE_TS
));
3872 tu_cs_emit_qw(cs
, event
->bo
.iova
);
3873 tu_cs_emit(cs
, value
);
3878 tu_CmdSetEvent(VkCommandBuffer commandBuffer
,
3880 VkPipelineStageFlags stageMask
)
3882 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
3883 TU_FROM_HANDLE(tu_event
, event
, _event
);
3885 write_event(cmd
, event
, stageMask
, 1);
3889 tu_CmdResetEvent(VkCommandBuffer commandBuffer
,
3891 VkPipelineStageFlags stageMask
)
3893 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
3894 TU_FROM_HANDLE(tu_event
, event
, _event
);
3896 write_event(cmd
, event
, stageMask
, 0);
3900 tu_CmdWaitEvents(VkCommandBuffer commandBuffer
,
3901 uint32_t eventCount
,
3902 const VkEvent
*pEvents
,
3903 VkPipelineStageFlags srcStageMask
,
3904 VkPipelineStageFlags dstStageMask
,
3905 uint32_t memoryBarrierCount
,
3906 const VkMemoryBarrier
*pMemoryBarriers
,
3907 uint32_t bufferMemoryBarrierCount
,
3908 const VkBufferMemoryBarrier
*pBufferMemoryBarriers
,
3909 uint32_t imageMemoryBarrierCount
,
3910 const VkImageMemoryBarrier
*pImageMemoryBarriers
)
3912 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
3913 struct tu_barrier_info info
;
3915 info
.eventCount
= eventCount
;
3916 info
.pEvents
= pEvents
;
3917 info
.srcStageMask
= 0;
3919 tu_barrier(cmd
, memoryBarrierCount
, pMemoryBarriers
,
3920 bufferMemoryBarrierCount
, pBufferMemoryBarriers
,
3921 imageMemoryBarrierCount
, pImageMemoryBarriers
, &info
);
3925 tu_CmdSetDeviceMask(VkCommandBuffer commandBuffer
, uint32_t deviceMask
)
3932 tu_CmdBeginConditionalRenderingEXT(VkCommandBuffer commandBuffer
,
3933 const VkConditionalRenderingBeginInfoEXT
*pConditionalRenderingBegin
)
3935 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
3937 cmd
->state
.predication_active
= true;
3938 if (cmd
->state
.pass
)
3939 cmd
->state
.has_subpass_predication
= true;
3941 struct tu_cs
*cs
= cmd
->state
.pass
? &cmd
->draw_cs
: &cmd
->cs
;
3943 tu_cs_emit_pkt7(cs
, CP_DRAW_PRED_ENABLE_GLOBAL
, 1);
3946 /* Wait for any writes to the predicate to land */
3947 if (cmd
->state
.pass
)
3948 tu_emit_cache_flush_renderpass(cmd
, cs
);
3950 tu_emit_cache_flush(cmd
, cs
);
3952 TU_FROM_HANDLE(tu_buffer
, buf
, pConditionalRenderingBegin
->buffer
);
3953 uint64_t iova
= tu_buffer_iova(buf
) + pConditionalRenderingBegin
->offset
;
3955 /* qcom doesn't support 32-bit reference values, only 64-bit, but Vulkan
3956 * mandates 32-bit comparisons. Our workaround is to copy the the reference
3957 * value to the low 32-bits of a location where the high 32 bits are known
3958 * to be 0 and then compare that.
3960 tu_cs_emit_pkt7(cs
, CP_MEM_TO_MEM
, 5);
3962 tu_cs_emit_qw(cs
, global_iova(cmd
, predicate
));
3963 tu_cs_emit_qw(cs
, iova
);
3965 tu_cs_emit_pkt7(cs
, CP_WAIT_MEM_WRITES
, 0);
3966 tu_cs_emit_pkt7(cs
, CP_WAIT_FOR_ME
, 0);
3968 bool inv
= pConditionalRenderingBegin
->flags
& VK_CONDITIONAL_RENDERING_INVERTED_BIT_EXT
;
3969 tu_cs_emit_pkt7(cs
, CP_DRAW_PRED_SET
, 3);
3970 tu_cs_emit(cs
, CP_DRAW_PRED_SET_0_SRC(PRED_SRC_MEM
) |
3971 CP_DRAW_PRED_SET_0_TEST(inv
? EQ_0_PASS
: NE_0_PASS
));
3972 tu_cs_emit_qw(cs
, global_iova(cmd
, predicate
));
3974 tu_bo_list_add(&cmd
->bo_list
, buf
->bo
, MSM_SUBMIT_BO_READ
);
3978 tu_CmdEndConditionalRenderingEXT(VkCommandBuffer commandBuffer
)
3980 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
3982 cmd
->state
.predication_active
= false;
3984 struct tu_cs
*cs
= cmd
->state
.pass
? &cmd
->draw_cs
: &cmd
->cs
;
3986 tu_cs_emit_pkt7(cs
, CP_DRAW_PRED_ENABLE_GLOBAL
, 1);