2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
28 #include "tu_private.h"
30 #include "registers/adreno_pm4.xml.h"
31 #include "registers/adreno_common.xml.h"
33 #include "vk_format.h"
37 #define OVERFLOW_FLAG_REG REG_A6XX_CP_SCRATCH_REG(0)
40 tu_bo_list_init(struct tu_bo_list
*list
)
42 list
->count
= list
->capacity
= 0;
43 list
->bo_infos
= NULL
;
47 tu_bo_list_destroy(struct tu_bo_list
*list
)
53 tu_bo_list_reset(struct tu_bo_list
*list
)
59 * \a flags consists of MSM_SUBMIT_BO_FLAGS.
62 tu_bo_list_add_info(struct tu_bo_list
*list
,
63 const struct drm_msm_gem_submit_bo
*bo_info
)
65 assert(bo_info
->handle
!= 0);
67 for (uint32_t i
= 0; i
< list
->count
; ++i
) {
68 if (list
->bo_infos
[i
].handle
== bo_info
->handle
) {
69 assert(list
->bo_infos
[i
].presumed
== bo_info
->presumed
);
70 list
->bo_infos
[i
].flags
|= bo_info
->flags
;
75 /* grow list->bo_infos if needed */
76 if (list
->count
== list
->capacity
) {
77 uint32_t new_capacity
= MAX2(2 * list
->count
, 16);
78 struct drm_msm_gem_submit_bo
*new_bo_infos
= realloc(
79 list
->bo_infos
, new_capacity
* sizeof(struct drm_msm_gem_submit_bo
));
81 return TU_BO_LIST_FAILED
;
82 list
->bo_infos
= new_bo_infos
;
83 list
->capacity
= new_capacity
;
86 list
->bo_infos
[list
->count
] = *bo_info
;
91 tu_bo_list_add(struct tu_bo_list
*list
,
92 const struct tu_bo
*bo
,
95 return tu_bo_list_add_info(list
, &(struct drm_msm_gem_submit_bo
) {
97 .handle
= bo
->gem_handle
,
103 tu_bo_list_merge(struct tu_bo_list
*list
, const struct tu_bo_list
*other
)
105 for (uint32_t i
= 0; i
< other
->count
; i
++) {
106 if (tu_bo_list_add_info(list
, other
->bo_infos
+ i
) == TU_BO_LIST_FAILED
)
107 return VK_ERROR_OUT_OF_HOST_MEMORY
;
114 tu_tiling_config_update_tile_layout(struct tu_tiling_config
*tiling
,
115 const struct tu_device
*dev
,
116 const struct tu_render_pass
*pass
)
118 const uint32_t tile_align_w
= pass
->tile_align_w
;
119 const uint32_t max_tile_width
= 1024;
121 /* note: don't offset the tiling config by render_area.offset,
122 * because binning pass can't deal with it
123 * this means we might end up with more tiles than necessary,
124 * but load/store/etc are still scissored to the render_area
126 tiling
->tile0
.offset
= (VkOffset2D
) {};
128 const uint32_t ra_width
=
129 tiling
->render_area
.extent
.width
+
130 (tiling
->render_area
.offset
.x
- tiling
->tile0
.offset
.x
);
131 const uint32_t ra_height
=
132 tiling
->render_area
.extent
.height
+
133 (tiling
->render_area
.offset
.y
- tiling
->tile0
.offset
.y
);
135 /* start from 1 tile */
136 tiling
->tile_count
= (VkExtent2D
) {
140 tiling
->tile0
.extent
= (VkExtent2D
) {
141 .width
= util_align_npot(ra_width
, tile_align_w
),
142 .height
= align(ra_height
, TILE_ALIGN_H
),
145 if (unlikely(dev
->physical_device
->instance
->debug_flags
& TU_DEBUG_FORCEBIN
)) {
146 /* start with 2x2 tiles */
147 tiling
->tile_count
.width
= 2;
148 tiling
->tile_count
.height
= 2;
149 tiling
->tile0
.extent
.width
= util_align_npot(DIV_ROUND_UP(ra_width
, 2), tile_align_w
);
150 tiling
->tile0
.extent
.height
= align(DIV_ROUND_UP(ra_height
, 2), TILE_ALIGN_H
);
153 /* do not exceed max tile width */
154 while (tiling
->tile0
.extent
.width
> max_tile_width
) {
155 tiling
->tile_count
.width
++;
156 tiling
->tile0
.extent
.width
=
157 util_align_npot(DIV_ROUND_UP(ra_width
, tiling
->tile_count
.width
), tile_align_w
);
160 /* will force to sysmem, don't bother trying to have a valid tile config
161 * TODO: just skip all GMEM stuff when sysmem is forced?
163 if (!pass
->gmem_pixels
)
166 /* do not exceed gmem size */
167 while (tiling
->tile0
.extent
.width
* tiling
->tile0
.extent
.height
> pass
->gmem_pixels
) {
168 if (tiling
->tile0
.extent
.width
> MAX2(tile_align_w
, tiling
->tile0
.extent
.height
)) {
169 tiling
->tile_count
.width
++;
170 tiling
->tile0
.extent
.width
=
171 util_align_npot(DIV_ROUND_UP(ra_width
, tiling
->tile_count
.width
), tile_align_w
);
173 /* if this assert fails then layout is impossible.. */
174 assert(tiling
->tile0
.extent
.height
> TILE_ALIGN_H
);
175 tiling
->tile_count
.height
++;
176 tiling
->tile0
.extent
.height
=
177 align(DIV_ROUND_UP(ra_height
, tiling
->tile_count
.height
), TILE_ALIGN_H
);
183 tu_tiling_config_update_pipe_layout(struct tu_tiling_config
*tiling
,
184 const struct tu_device
*dev
)
186 const uint32_t max_pipe_count
= 32; /* A6xx */
188 /* start from 1 tile per pipe */
189 tiling
->pipe0
= (VkExtent2D
) {
193 tiling
->pipe_count
= tiling
->tile_count
;
195 while (tiling
->pipe_count
.width
* tiling
->pipe_count
.height
> max_pipe_count
) {
196 if (tiling
->pipe0
.width
< tiling
->pipe0
.height
) {
197 tiling
->pipe0
.width
+= 1;
198 tiling
->pipe_count
.width
=
199 DIV_ROUND_UP(tiling
->tile_count
.width
, tiling
->pipe0
.width
);
201 tiling
->pipe0
.height
+= 1;
202 tiling
->pipe_count
.height
=
203 DIV_ROUND_UP(tiling
->tile_count
.height
, tiling
->pipe0
.height
);
209 tu_tiling_config_update_pipes(struct tu_tiling_config
*tiling
,
210 const struct tu_device
*dev
)
212 const uint32_t max_pipe_count
= 32; /* A6xx */
213 const uint32_t used_pipe_count
=
214 tiling
->pipe_count
.width
* tiling
->pipe_count
.height
;
215 const VkExtent2D last_pipe
= {
216 .width
= (tiling
->tile_count
.width
- 1) % tiling
->pipe0
.width
+ 1,
217 .height
= (tiling
->tile_count
.height
- 1) % tiling
->pipe0
.height
+ 1,
220 assert(used_pipe_count
<= max_pipe_count
);
221 assert(max_pipe_count
<= ARRAY_SIZE(tiling
->pipe_config
));
223 for (uint32_t y
= 0; y
< tiling
->pipe_count
.height
; y
++) {
224 for (uint32_t x
= 0; x
< tiling
->pipe_count
.width
; x
++) {
225 const uint32_t pipe_x
= tiling
->pipe0
.width
* x
;
226 const uint32_t pipe_y
= tiling
->pipe0
.height
* y
;
227 const uint32_t pipe_w
= (x
== tiling
->pipe_count
.width
- 1)
229 : tiling
->pipe0
.width
;
230 const uint32_t pipe_h
= (y
== tiling
->pipe_count
.height
- 1)
232 : tiling
->pipe0
.height
;
233 const uint32_t n
= tiling
->pipe_count
.width
* y
+ x
;
235 tiling
->pipe_config
[n
] = A6XX_VSC_PIPE_CONFIG_REG_X(pipe_x
) |
236 A6XX_VSC_PIPE_CONFIG_REG_Y(pipe_y
) |
237 A6XX_VSC_PIPE_CONFIG_REG_W(pipe_w
) |
238 A6XX_VSC_PIPE_CONFIG_REG_H(pipe_h
);
239 tiling
->pipe_sizes
[n
] = CP_SET_BIN_DATA5_0_VSC_SIZE(pipe_w
* pipe_h
);
243 memset(tiling
->pipe_config
+ used_pipe_count
, 0,
244 sizeof(uint32_t) * (max_pipe_count
- used_pipe_count
));
248 tu_tiling_config_get_tile(const struct tu_tiling_config
*tiling
,
249 const struct tu_device
*dev
,
252 struct tu_tile
*tile
)
254 /* find the pipe and the slot for tile (tx, ty) */
255 const uint32_t px
= tx
/ tiling
->pipe0
.width
;
256 const uint32_t py
= ty
/ tiling
->pipe0
.height
;
257 const uint32_t sx
= tx
- tiling
->pipe0
.width
* px
;
258 const uint32_t sy
= ty
- tiling
->pipe0
.height
* py
;
259 /* last pipe has different width */
260 const uint32_t pipe_width
=
261 MIN2(tiling
->pipe0
.width
,
262 tiling
->tile_count
.width
- px
* tiling
->pipe0
.width
);
264 assert(tx
< tiling
->tile_count
.width
&& ty
< tiling
->tile_count
.height
);
265 assert(px
< tiling
->pipe_count
.width
&& py
< tiling
->pipe_count
.height
);
266 assert(sx
< tiling
->pipe0
.width
&& sy
< tiling
->pipe0
.height
);
268 /* convert to 1D indices */
269 tile
->pipe
= tiling
->pipe_count
.width
* py
+ px
;
270 tile
->slot
= pipe_width
* sy
+ sx
;
272 /* get the blit area for the tile */
273 tile
->begin
= (VkOffset2D
) {
274 .x
= tiling
->tile0
.offset
.x
+ tiling
->tile0
.extent
.width
* tx
,
275 .y
= tiling
->tile0
.offset
.y
+ tiling
->tile0
.extent
.height
* ty
,
278 (tx
== tiling
->tile_count
.width
- 1)
279 ? tiling
->render_area
.offset
.x
+ tiling
->render_area
.extent
.width
280 : tile
->begin
.x
+ tiling
->tile0
.extent
.width
;
282 (ty
== tiling
->tile_count
.height
- 1)
283 ? tiling
->render_area
.offset
.y
+ tiling
->render_area
.extent
.height
284 : tile
->begin
.y
+ tiling
->tile0
.extent
.height
;
288 tu6_emit_event_write(struct tu_cmd_buffer
*cmd
,
290 enum vgt_event_type event
)
292 bool need_seqno
= false;
297 case PC_CCU_FLUSH_DEPTH_TS
:
298 case PC_CCU_FLUSH_COLOR_TS
:
299 case PC_CCU_RESOLVE_TS
:
306 tu_cs_emit_pkt7(cs
, CP_EVENT_WRITE
, need_seqno
? 4 : 1);
307 tu_cs_emit(cs
, CP_EVENT_WRITE_0_EVENT(event
));
309 tu_cs_emit_qw(cs
, cmd
->scratch_bo
.iova
);
315 tu6_emit_flushes(struct tu_cmd_buffer
*cmd_buffer
,
317 enum tu_cmd_flush_bits flushes
)
319 /* Experiments show that invalidating CCU while it still has data in it
320 * doesn't work, so make sure to always flush before invalidating in case
321 * any data remains that hasn't yet been made available through a barrier.
322 * However it does seem to work for UCHE.
324 if (flushes
& (TU_CMD_FLAG_CCU_FLUSH_COLOR
|
325 TU_CMD_FLAG_CCU_INVALIDATE_COLOR
))
326 tu6_emit_event_write(cmd_buffer
, cs
, PC_CCU_FLUSH_COLOR_TS
);
327 if (flushes
& (TU_CMD_FLAG_CCU_FLUSH_DEPTH
|
328 TU_CMD_FLAG_CCU_INVALIDATE_DEPTH
))
329 tu6_emit_event_write(cmd_buffer
, cs
, PC_CCU_FLUSH_DEPTH_TS
);
330 if (flushes
& TU_CMD_FLAG_CCU_INVALIDATE_COLOR
)
331 tu6_emit_event_write(cmd_buffer
, cs
, PC_CCU_INVALIDATE_COLOR
);
332 if (flushes
& TU_CMD_FLAG_CCU_INVALIDATE_DEPTH
)
333 tu6_emit_event_write(cmd_buffer
, cs
, PC_CCU_INVALIDATE_DEPTH
);
334 if (flushes
& TU_CMD_FLAG_CACHE_FLUSH
)
335 tu6_emit_event_write(cmd_buffer
, cs
, CACHE_FLUSH_TS
);
336 if (flushes
& TU_CMD_FLAG_CACHE_INVALIDATE
)
337 tu6_emit_event_write(cmd_buffer
, cs
, CACHE_INVALIDATE
);
338 if (flushes
& TU_CMD_FLAG_WFI
)
342 /* "Normal" cache flushes, that don't require any special handling */
345 tu_emit_cache_flush(struct tu_cmd_buffer
*cmd_buffer
,
348 tu6_emit_flushes(cmd_buffer
, cs
, cmd_buffer
->state
.cache
.flush_bits
);
349 cmd_buffer
->state
.cache
.flush_bits
= 0;
352 /* Renderpass cache flushes */
355 tu_emit_cache_flush_renderpass(struct tu_cmd_buffer
*cmd_buffer
,
358 tu6_emit_flushes(cmd_buffer
, cs
, cmd_buffer
->state
.renderpass_cache
.flush_bits
);
359 cmd_buffer
->state
.renderpass_cache
.flush_bits
= 0;
362 /* Cache flushes for things that use the color/depth read/write path (i.e.
363 * blits and draws). This deals with changing CCU state as well as the usual
368 tu_emit_cache_flush_ccu(struct tu_cmd_buffer
*cmd_buffer
,
370 enum tu_cmd_ccu_state ccu_state
)
372 enum tu_cmd_flush_bits flushes
= cmd_buffer
->state
.cache
.flush_bits
;
374 assert(ccu_state
!= TU_CMD_CCU_UNKNOWN
);
376 /* Changing CCU state must involve invalidating the CCU. In sysmem mode,
377 * the CCU may also contain data that we haven't flushed out yet, so we
378 * also need to flush. Also, in order to program RB_CCU_CNTL, we need to
379 * emit a WFI as it isn't pipelined.
381 if (ccu_state
!= cmd_buffer
->state
.ccu_state
) {
382 if (cmd_buffer
->state
.ccu_state
!= TU_CMD_CCU_GMEM
) {
384 TU_CMD_FLAG_CCU_FLUSH_COLOR
|
385 TU_CMD_FLAG_CCU_FLUSH_DEPTH
;
386 cmd_buffer
->state
.cache
.pending_flush_bits
&= ~(
387 TU_CMD_FLAG_CCU_FLUSH_COLOR
|
388 TU_CMD_FLAG_CCU_FLUSH_DEPTH
);
391 TU_CMD_FLAG_CCU_INVALIDATE_COLOR
|
392 TU_CMD_FLAG_CCU_INVALIDATE_DEPTH
|
394 cmd_buffer
->state
.cache
.pending_flush_bits
&= ~(
395 TU_CMD_FLAG_CCU_INVALIDATE_COLOR
|
396 TU_CMD_FLAG_CCU_INVALIDATE_DEPTH
);
399 tu6_emit_flushes(cmd_buffer
, cs
, flushes
);
400 cmd_buffer
->state
.cache
.flush_bits
= 0;
402 if (ccu_state
!= cmd_buffer
->state
.ccu_state
) {
403 struct tu_physical_device
*phys_dev
= cmd_buffer
->device
->physical_device
;
405 A6XX_RB_CCU_CNTL(.offset
=
406 ccu_state
== TU_CMD_CCU_GMEM
?
407 phys_dev
->ccu_offset_gmem
:
408 phys_dev
->ccu_offset_bypass
,
409 .gmem
= ccu_state
== TU_CMD_CCU_GMEM
));
410 cmd_buffer
->state
.ccu_state
= ccu_state
;
415 tu6_emit_zs(struct tu_cmd_buffer
*cmd
,
416 const struct tu_subpass
*subpass
,
419 const struct tu_framebuffer
*fb
= cmd
->state
.framebuffer
;
421 const uint32_t a
= subpass
->depth_stencil_attachment
.attachment
;
422 if (a
== VK_ATTACHMENT_UNUSED
) {
424 A6XX_RB_DEPTH_BUFFER_INFO(.depth_format
= DEPTH6_NONE
),
425 A6XX_RB_DEPTH_BUFFER_PITCH(0),
426 A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH(0),
427 A6XX_RB_DEPTH_BUFFER_BASE(0),
428 A6XX_RB_DEPTH_BUFFER_BASE_GMEM(0));
431 A6XX_GRAS_SU_DEPTH_BUFFER_INFO(.depth_format
= DEPTH6_NONE
));
434 A6XX_GRAS_LRZ_BUFFER_BASE(0),
435 A6XX_GRAS_LRZ_BUFFER_PITCH(0),
436 A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE(0));
438 tu_cs_emit_regs(cs
, A6XX_RB_STENCIL_INFO(0));
443 const struct tu_image_view
*iview
= fb
->attachments
[a
].attachment
;
444 const struct tu_render_pass_attachment
*attachment
=
445 &cmd
->state
.pass
->attachments
[a
];
446 enum a6xx_depth_format fmt
= tu6_pipe2depth(attachment
->format
);
448 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_DEPTH_BUFFER_INFO
, 6);
449 tu_cs_emit(cs
, A6XX_RB_DEPTH_BUFFER_INFO(.depth_format
= fmt
).value
);
450 tu_cs_image_ref(cs
, iview
, 0);
451 tu_cs_emit(cs
, attachment
->gmem_offset
);
454 A6XX_GRAS_SU_DEPTH_BUFFER_INFO(.depth_format
= fmt
));
456 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_DEPTH_FLAG_BUFFER_BASE_LO
, 3);
457 tu_cs_image_flag_ref(cs
, iview
, 0);
460 A6XX_GRAS_LRZ_BUFFER_BASE(0),
461 A6XX_GRAS_LRZ_BUFFER_PITCH(0),
462 A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE(0));
464 if (attachment
->format
== VK_FORMAT_S8_UINT
) {
465 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_STENCIL_INFO
, 6);
466 tu_cs_emit(cs
, A6XX_RB_STENCIL_INFO(.separate_stencil
= true).value
);
467 tu_cs_image_ref(cs
, iview
, 0);
468 tu_cs_emit(cs
, attachment
->gmem_offset
);
471 A6XX_RB_STENCIL_INFO(0));
476 tu6_emit_mrt(struct tu_cmd_buffer
*cmd
,
477 const struct tu_subpass
*subpass
,
480 const struct tu_framebuffer
*fb
= cmd
->state
.framebuffer
;
482 for (uint32_t i
= 0; i
< subpass
->color_count
; ++i
) {
483 uint32_t a
= subpass
->color_attachments
[i
].attachment
;
484 if (a
== VK_ATTACHMENT_UNUSED
)
487 const struct tu_image_view
*iview
= fb
->attachments
[a
].attachment
;
489 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_MRT_BUF_INFO(i
), 6);
490 tu_cs_emit(cs
, iview
->RB_MRT_BUF_INFO
);
491 tu_cs_image_ref(cs
, iview
, 0);
492 tu_cs_emit(cs
, cmd
->state
.pass
->attachments
[a
].gmem_offset
);
495 A6XX_SP_FS_MRT_REG(i
, .dword
= iview
->SP_FS_MRT_REG
));
497 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_MRT_FLAG_BUFFER_ADDR_LO(i
), 3);
498 tu_cs_image_flag_ref(cs
, iview
, 0);
502 A6XX_RB_SRGB_CNTL(.dword
= subpass
->srgb_cntl
));
504 A6XX_SP_SRGB_CNTL(.dword
= subpass
->srgb_cntl
));
506 tu_cs_emit_regs(cs
, A6XX_GRAS_MAX_LAYER_INDEX(fb
->layers
- 1));
510 tu6_emit_msaa(struct tu_cs
*cs
, VkSampleCountFlagBits vk_samples
)
512 const enum a3xx_msaa_samples samples
= tu_msaa_samples(vk_samples
);
513 bool msaa_disable
= samples
== MSAA_ONE
;
516 A6XX_SP_TP_RAS_MSAA_CNTL(samples
),
517 A6XX_SP_TP_DEST_MSAA_CNTL(.samples
= samples
,
518 .msaa_disable
= msaa_disable
));
521 A6XX_GRAS_RAS_MSAA_CNTL(samples
),
522 A6XX_GRAS_DEST_MSAA_CNTL(.samples
= samples
,
523 .msaa_disable
= msaa_disable
));
526 A6XX_RB_RAS_MSAA_CNTL(samples
),
527 A6XX_RB_DEST_MSAA_CNTL(.samples
= samples
,
528 .msaa_disable
= msaa_disable
));
531 A6XX_RB_MSAA_CNTL(samples
));
535 tu6_emit_bin_size(struct tu_cs
*cs
,
536 uint32_t bin_w
, uint32_t bin_h
, uint32_t flags
)
539 A6XX_GRAS_BIN_CONTROL(.binw
= bin_w
,
544 A6XX_RB_BIN_CONTROL(.binw
= bin_w
,
548 /* no flag for RB_BIN_CONTROL2... */
550 A6XX_RB_BIN_CONTROL2(.binw
= bin_w
,
555 tu6_emit_render_cntl(struct tu_cmd_buffer
*cmd
,
556 const struct tu_subpass
*subpass
,
560 const struct tu_framebuffer
*fb
= cmd
->state
.framebuffer
;
562 cntl
|= A6XX_RB_RENDER_CNTL_UNK4
;
564 cntl
|= A6XX_RB_RENDER_CNTL_BINNING
;
566 uint32_t mrts_ubwc_enable
= 0;
567 for (uint32_t i
= 0; i
< subpass
->color_count
; ++i
) {
568 uint32_t a
= subpass
->color_attachments
[i
].attachment
;
569 if (a
== VK_ATTACHMENT_UNUSED
)
572 const struct tu_image_view
*iview
= fb
->attachments
[a
].attachment
;
573 if (iview
->ubwc_enabled
)
574 mrts_ubwc_enable
|= 1 << i
;
577 cntl
|= A6XX_RB_RENDER_CNTL_FLAG_MRTS(mrts_ubwc_enable
);
579 const uint32_t a
= subpass
->depth_stencil_attachment
.attachment
;
580 if (a
!= VK_ATTACHMENT_UNUSED
) {
581 const struct tu_image_view
*iview
= fb
->attachments
[a
].attachment
;
582 if (iview
->ubwc_enabled
)
583 cntl
|= A6XX_RB_RENDER_CNTL_FLAG_DEPTH
;
586 /* In the !binning case, we need to set RB_RENDER_CNTL in the draw_cs
587 * in order to set it correctly for the different subpasses. However,
588 * that means the packets we're emitting also happen during binning. So
589 * we need to guard the write on !BINNING at CP execution time.
591 tu_cs_reserve(cs
, 3 + 4);
592 tu_cs_emit_pkt7(cs
, CP_COND_REG_EXEC
, 2);
593 tu_cs_emit(cs
, CP_COND_REG_EXEC_0_MODE(RENDER_MODE
) |
594 CP_COND_REG_EXEC_0_GMEM
| CP_COND_REG_EXEC_0_SYSMEM
);
595 tu_cs_emit(cs
, CP_COND_REG_EXEC_1_DWORDS(4));
598 tu_cs_emit_pkt7(cs
, CP_REG_WRITE
, 3);
599 tu_cs_emit(cs
, CP_REG_WRITE_0_TRACKER(TRACK_RENDER_CNTL
));
600 tu_cs_emit(cs
, REG_A6XX_RB_RENDER_CNTL
);
601 tu_cs_emit(cs
, cntl
);
605 tu6_emit_blit_scissor(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
, bool align
)
607 const VkRect2D
*render_area
= &cmd
->state
.tiling_config
.render_area
;
608 uint32_t x1
= render_area
->offset
.x
;
609 uint32_t y1
= render_area
->offset
.y
;
610 uint32_t x2
= x1
+ render_area
->extent
.width
- 1;
611 uint32_t y2
= y1
+ render_area
->extent
.height
- 1;
614 x1
= x1
& ~(GMEM_ALIGN_W
- 1);
615 y1
= y1
& ~(GMEM_ALIGN_H
- 1);
616 x2
= ALIGN_POT(x2
+ 1, GMEM_ALIGN_W
) - 1;
617 y2
= ALIGN_POT(y2
+ 1, GMEM_ALIGN_H
) - 1;
621 A6XX_RB_BLIT_SCISSOR_TL(.x
= x1
, .y
= y1
),
622 A6XX_RB_BLIT_SCISSOR_BR(.x
= x2
, .y
= y2
));
626 tu6_emit_window_scissor(struct tu_cs
*cs
,
633 A6XX_GRAS_SC_WINDOW_SCISSOR_TL(.x
= x1
, .y
= y1
),
634 A6XX_GRAS_SC_WINDOW_SCISSOR_BR(.x
= x2
, .y
= y2
));
637 A6XX_GRAS_RESOLVE_CNTL_1(.x
= x1
, .y
= y1
),
638 A6XX_GRAS_RESOLVE_CNTL_2(.x
= x2
, .y
= y2
));
642 tu6_emit_window_offset(struct tu_cs
*cs
, uint32_t x1
, uint32_t y1
)
645 A6XX_RB_WINDOW_OFFSET(.x
= x1
, .y
= y1
));
648 A6XX_RB_WINDOW_OFFSET2(.x
= x1
, .y
= y1
));
651 A6XX_SP_WINDOW_OFFSET(.x
= x1
, .y
= y1
));
654 A6XX_SP_TP_WINDOW_OFFSET(.x
= x1
, .y
= y1
));
658 tu_cs_emit_draw_state(struct tu_cs
*cs
, uint32_t id
, struct tu_draw_state state
)
660 uint32_t enable_mask
;
662 case TU_DRAW_STATE_PROGRAM
:
663 case TU_DRAW_STATE_VI
:
664 case TU_DRAW_STATE_FS_CONST
:
665 /* The blob seems to not enable this (DESC_SETS_LOAD) for binning, even
666 * when resources would actually be used in the binning shader.
667 * Presumably the overhead of prefetching the resources isn't
670 case TU_DRAW_STATE_DESC_SETS_LOAD
:
671 enable_mask
= CP_SET_DRAW_STATE__0_GMEM
|
672 CP_SET_DRAW_STATE__0_SYSMEM
;
674 case TU_DRAW_STATE_PROGRAM_BINNING
:
675 case TU_DRAW_STATE_VI_BINNING
:
676 enable_mask
= CP_SET_DRAW_STATE__0_BINNING
;
678 case TU_DRAW_STATE_INPUT_ATTACHMENTS_GMEM
:
679 enable_mask
= CP_SET_DRAW_STATE__0_GMEM
;
681 case TU_DRAW_STATE_INPUT_ATTACHMENTS_SYSMEM
:
682 enable_mask
= CP_SET_DRAW_STATE__0_SYSMEM
;
685 enable_mask
= CP_SET_DRAW_STATE__0_GMEM
|
686 CP_SET_DRAW_STATE__0_SYSMEM
|
687 CP_SET_DRAW_STATE__0_BINNING
;
691 tu_cs_emit(cs
, CP_SET_DRAW_STATE__0_COUNT(state
.size
) |
693 CP_SET_DRAW_STATE__0_GROUP_ID(id
) |
694 COND(!state
.size
, CP_SET_DRAW_STATE__0_DISABLE
));
695 tu_cs_emit_qw(cs
, state
.iova
);
698 /* note: get rid of this eventually */
700 tu_cs_emit_sds_ib(struct tu_cs
*cs
, uint32_t id
, struct tu_cs_entry entry
)
702 tu_cs_emit_draw_state(cs
, id
, (struct tu_draw_state
) {
703 .iova
= entry
.size
? entry
.bo
->iova
+ entry
.offset
: 0,
704 .size
= entry
.size
/ 4,
709 use_hw_binning(struct tu_cmd_buffer
*cmd
)
711 const struct tu_tiling_config
*tiling
= &cmd
->state
.tiling_config
;
713 if (unlikely(cmd
->device
->physical_device
->instance
->debug_flags
& TU_DEBUG_NOBIN
))
716 if (unlikely(cmd
->device
->physical_device
->instance
->debug_flags
& TU_DEBUG_FORCEBIN
))
719 return (tiling
->tile_count
.width
* tiling
->tile_count
.height
) > 2;
723 use_sysmem_rendering(struct tu_cmd_buffer
*cmd
)
725 if (unlikely(cmd
->device
->physical_device
->instance
->debug_flags
& TU_DEBUG_SYSMEM
))
728 /* can't fit attachments into gmem */
729 if (!cmd
->state
.pass
->gmem_pixels
)
732 if (cmd
->state
.framebuffer
->layers
> 1)
735 return cmd
->state
.tiling_config
.force_sysmem
;
739 tu6_emit_tile_select(struct tu_cmd_buffer
*cmd
,
741 const struct tu_tile
*tile
)
743 tu_cs_emit_pkt7(cs
, CP_SET_MARKER
, 1);
744 tu_cs_emit(cs
, A6XX_CP_SET_MARKER_0_MODE(RM6_YIELD
));
746 tu_cs_emit_pkt7(cs
, CP_SET_MARKER
, 1);
747 tu_cs_emit(cs
, A6XX_CP_SET_MARKER_0_MODE(RM6_GMEM
));
749 const uint32_t x1
= tile
->begin
.x
;
750 const uint32_t y1
= tile
->begin
.y
;
751 const uint32_t x2
= tile
->end
.x
- 1;
752 const uint32_t y2
= tile
->end
.y
- 1;
753 tu6_emit_window_scissor(cs
, x1
, y1
, x2
, y2
);
754 tu6_emit_window_offset(cs
, x1
, y1
);
757 A6XX_VPC_SO_OVERRIDE(.so_disable
= false));
759 if (use_hw_binning(cmd
)) {
760 tu_cs_emit_pkt7(cs
, CP_WAIT_FOR_ME
, 0);
762 tu_cs_emit_pkt7(cs
, CP_SET_MODE
, 1);
765 tu_cs_emit_pkt7(cs
, CP_REG_TEST
, 1);
766 tu_cs_emit(cs
, A6XX_CP_REG_TEST_0_REG(OVERFLOW_FLAG_REG
) |
767 A6XX_CP_REG_TEST_0_BIT(0) |
768 A6XX_CP_REG_TEST_0_WAIT_FOR_ME
);
770 tu_cs_reserve(cs
, 3 + 11);
771 tu_cs_emit_pkt7(cs
, CP_COND_REG_EXEC
, 2);
772 tu_cs_emit(cs
, CP_COND_REG_EXEC_0_MODE(PRED_TEST
));
773 tu_cs_emit(cs
, CP_COND_REG_EXEC_1_DWORDS(11));
775 /* if (no overflow) */ {
776 tu_cs_emit_pkt7(cs
, CP_SET_BIN_DATA5
, 7);
777 tu_cs_emit(cs
, cmd
->state
.tiling_config
.pipe_sizes
[tile
->pipe
] |
778 CP_SET_BIN_DATA5_0_VSC_N(tile
->slot
));
779 tu_cs_emit_qw(cs
, cmd
->vsc_draw_strm
.iova
+ tile
->pipe
* cmd
->vsc_draw_strm_pitch
);
780 tu_cs_emit_qw(cs
, cmd
->vsc_draw_strm
.iova
+ (tile
->pipe
* 4) + (32 * cmd
->vsc_draw_strm_pitch
));
781 tu_cs_emit_qw(cs
, cmd
->vsc_prim_strm
.iova
+ (tile
->pipe
* cmd
->vsc_prim_strm_pitch
));
783 tu_cs_emit_pkt7(cs
, CP_SET_VISIBILITY_OVERRIDE
, 1);
786 /* use a NOP packet to skip over the 'else' side: */
787 tu_cs_emit_pkt7(cs
, CP_NOP
, 2);
789 tu_cs_emit_pkt7(cs
, CP_SET_VISIBILITY_OVERRIDE
, 1);
793 tu_cs_emit_pkt7(cs
, CP_SET_MODE
, 1);
796 tu_cs_emit_pkt7(cs
, CP_SET_VISIBILITY_OVERRIDE
, 1);
799 tu_cs_emit_pkt7(cs
, CP_SET_MODE
, 1);
805 tu6_emit_sysmem_resolve(struct tu_cmd_buffer
*cmd
,
810 const struct tu_framebuffer
*fb
= cmd
->state
.framebuffer
;
811 struct tu_image_view
*dst
= fb
->attachments
[a
].attachment
;
812 struct tu_image_view
*src
= fb
->attachments
[gmem_a
].attachment
;
814 tu_resolve_sysmem(cmd
, cs
, src
, dst
, fb
->layers
, &cmd
->state
.tiling_config
.render_area
);
818 tu6_emit_sysmem_resolves(struct tu_cmd_buffer
*cmd
,
820 const struct tu_subpass
*subpass
)
822 if (subpass
->resolve_attachments
) {
823 /* From the documentation for vkCmdNextSubpass, section 7.4 "Render Pass
826 * End-of-subpass multisample resolves are treated as color
827 * attachment writes for the purposes of synchronization. That is,
828 * they are considered to execute in the
829 * VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT pipeline stage and
830 * their writes are synchronized with
831 * VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT. Synchronization between
832 * rendering within a subpass and any resolve operations at the end
833 * of the subpass occurs automatically, without need for explicit
834 * dependencies or pipeline barriers. However, if the resolve
835 * attachment is also used in a different subpass, an explicit
836 * dependency is needed.
838 * We use the CP_BLIT path for sysmem resolves, which is really a
839 * transfer command, so we have to manually flush similar to the gmem
840 * resolve case. However, a flush afterwards isn't needed because of the
841 * last sentence and the fact that we're in sysmem mode.
843 tu6_emit_event_write(cmd
, cs
, PC_CCU_FLUSH_COLOR_TS
);
844 tu6_emit_event_write(cmd
, cs
, CACHE_INVALIDATE
);
846 /* Wait for the flushes to land before using the 2D engine */
849 for (unsigned i
= 0; i
< subpass
->color_count
; i
++) {
850 uint32_t a
= subpass
->resolve_attachments
[i
].attachment
;
851 if (a
== VK_ATTACHMENT_UNUSED
)
854 tu6_emit_sysmem_resolve(cmd
, cs
, a
,
855 subpass
->color_attachments
[i
].attachment
);
861 tu6_emit_tile_store(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
863 const struct tu_render_pass
*pass
= cmd
->state
.pass
;
864 const struct tu_subpass
*subpass
= &pass
->subpasses
[pass
->subpass_count
-1];
866 tu_cs_emit_pkt7(cs
, CP_SET_DRAW_STATE
, 3);
867 tu_cs_emit(cs
, CP_SET_DRAW_STATE__0_COUNT(0) |
868 CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS
|
869 CP_SET_DRAW_STATE__0_GROUP_ID(0));
870 tu_cs_emit(cs
, CP_SET_DRAW_STATE__1_ADDR_LO(0));
871 tu_cs_emit(cs
, CP_SET_DRAW_STATE__2_ADDR_HI(0));
873 tu_cs_emit_pkt7(cs
, CP_SKIP_IB2_ENABLE_GLOBAL
, 1);
876 tu_cs_emit_pkt7(cs
, CP_SET_MARKER
, 1);
877 tu_cs_emit(cs
, A6XX_CP_SET_MARKER_0_MODE(RM6_RESOLVE
));
879 tu6_emit_blit_scissor(cmd
, cs
, true);
881 for (uint32_t a
= 0; a
< pass
->attachment_count
; ++a
) {
882 if (pass
->attachments
[a
].gmem_offset
>= 0)
883 tu_store_gmem_attachment(cmd
, cs
, a
, a
);
886 if (subpass
->resolve_attachments
) {
887 for (unsigned i
= 0; i
< subpass
->color_count
; i
++) {
888 uint32_t a
= subpass
->resolve_attachments
[i
].attachment
;
889 if (a
!= VK_ATTACHMENT_UNUSED
)
890 tu_store_gmem_attachment(cmd
, cs
, a
,
891 subpass
->color_attachments
[i
].attachment
);
897 tu6_emit_restart_index(struct tu_cs
*cs
, uint32_t restart_index
)
900 A6XX_PC_RESTART_INDEX(restart_index
));
904 tu6_init_hw(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
906 const struct tu_physical_device
*phys_dev
= cmd
->device
->physical_device
;
908 tu6_emit_event_write(cmd
, cs
, CACHE_INVALIDATE
);
910 tu_cs_emit_write_reg(cs
, REG_A6XX_HLSQ_UPDATE_CNTL
, 0xfffff);
913 A6XX_RB_CCU_CNTL(.offset
= phys_dev
->ccu_offset_bypass
));
914 cmd
->state
.ccu_state
= TU_CMD_CCU_SYSMEM
;
915 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_8E04
, 0x00100000);
916 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_AE04
, 0x8);
917 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_AE00
, 0);
918 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_AE0F
, 0x3f);
919 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_B605
, 0x44);
920 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_B600
, 0x100000);
921 tu_cs_emit_write_reg(cs
, REG_A6XX_HLSQ_UNKNOWN_BE00
, 0x80);
922 tu_cs_emit_write_reg(cs
, REG_A6XX_HLSQ_UNKNOWN_BE01
, 0);
924 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9600
, 0);
925 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_UNKNOWN_8600
, 0x880);
926 tu_cs_emit_write_reg(cs
, REG_A6XX_HLSQ_UNKNOWN_BE04
, 0);
927 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_AE03
, 0x00000410);
928 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_IBO_COUNT
, 0);
929 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_B182
, 0);
930 tu_cs_emit_write_reg(cs
, REG_A6XX_HLSQ_UNKNOWN_BB11
, 0);
931 tu_cs_emit_write_reg(cs
, REG_A6XX_UCHE_UNKNOWN_0E12
, 0x3200000);
932 tu_cs_emit_write_reg(cs
, REG_A6XX_UCHE_CLIENT_PF
, 4);
933 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_8E01
, 0x0);
934 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_A982
, 0);
935 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_A9A8
, 0);
936 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_AB00
, 0x5);
937 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_GS_SIV_CNTL
, 0x0000ffff);
939 tu_cs_emit_write_reg(cs
, REG_A6XX_VFD_ADD_OFFSET
, A6XX_VFD_ADD_OFFSET_VERTEX
);
940 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_8811
, 0x00000010);
941 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_MODE_CNTL
, 0x1f);
943 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_SRGB_CNTL
, 0);
945 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_UNKNOWN_8110
, 0);
947 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_RENDER_CONTROL0
, 0x401);
948 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_RENDER_CONTROL1
, 0);
949 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_FS_OUTPUT_CNTL0
, 0);
950 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_8818
, 0);
951 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_8819
, 0);
952 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_881A
, 0);
953 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_881B
, 0);
954 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_881C
, 0);
955 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_881D
, 0);
956 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_881E
, 0);
957 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_88F0
, 0);
959 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9101
, 0xffff00);
960 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9107
, 0);
962 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9236
,
963 A6XX_VPC_UNKNOWN_9236_POINT_COORD_INVERT(0));
964 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9300
, 0);
966 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_SO_OVERRIDE
,
967 A6XX_VPC_SO_OVERRIDE_SO_DISABLE
);
969 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_UNKNOWN_9801
, 0);
970 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_UNKNOWN_9980
, 0);
971 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_UNKNOWN_9990
, 0);
973 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_PRIMITIVE_CNTL_6
, 0);
974 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_UNKNOWN_9B07
, 0);
976 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_A81B
, 0);
978 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_B183
, 0);
980 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_UNKNOWN_8099
, 0);
981 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_UNKNOWN_809B
, 0);
982 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_UNKNOWN_80A0
, 2);
983 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_UNKNOWN_80AF
, 0);
984 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9210
, 0);
985 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9211
, 0);
986 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9602
, 0);
987 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_UNKNOWN_9981
, 0x3);
988 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_UNKNOWN_9E72
, 0);
989 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9108
, 0x3);
990 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_TP_UNKNOWN_B309
, 0x000000a2);
991 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_8878
, 0);
992 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_8879
, 0);
993 tu_cs_emit_write_reg(cs
, REG_A6XX_HLSQ_CONTROL_5_REG
, 0xfc);
995 tu_cs_emit_write_reg(cs
, REG_A6XX_VFD_MODE_CNTL
, 0x00000000);
997 tu_cs_emit_write_reg(cs
, REG_A6XX_VFD_UNKNOWN_A008
, 0);
999 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_MODE_CNTL
, 0x0000001f);
1001 /* we don't use this yet.. probably best to disable.. */
1002 tu_cs_emit_pkt7(cs
, CP_SET_DRAW_STATE
, 3);
1003 tu_cs_emit(cs
, CP_SET_DRAW_STATE__0_COUNT(0) |
1004 CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS
|
1005 CP_SET_DRAW_STATE__0_GROUP_ID(0));
1006 tu_cs_emit(cs
, CP_SET_DRAW_STATE__1_ADDR_LO(0));
1007 tu_cs_emit(cs
, CP_SET_DRAW_STATE__2_ADDR_HI(0));
1009 /* Set not to use streamout by default, */
1010 tu_cs_emit_pkt7(cs
, CP_CONTEXT_REG_BUNCH
, 4);
1011 tu_cs_emit(cs
, REG_A6XX_VPC_SO_CNTL
);
1013 tu_cs_emit(cs
, REG_A6XX_VPC_SO_BUF_CNTL
);
1017 A6XX_SP_HS_CTRL_REG0(0));
1020 A6XX_SP_GS_CTRL_REG0(0));
1023 A6XX_GRAS_LRZ_CNTL(0));
1026 A6XX_RB_LRZ_CNTL(0));
1029 A6XX_SP_TP_BORDER_COLOR_BASE_ADDR(.bo
= &cmd
->device
->border_color
));
1031 A6XX_SP_PS_TP_BORDER_COLOR_BASE_ADDR(.bo
= &cmd
->device
->border_color
));
1033 tu_cs_sanity_check(cs
);
1037 update_vsc_pipe(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
1039 const struct tu_tiling_config
*tiling
= &cmd
->state
.tiling_config
;
1042 A6XX_VSC_BIN_SIZE(.width
= tiling
->tile0
.extent
.width
,
1043 .height
= tiling
->tile0
.extent
.height
),
1044 A6XX_VSC_DRAW_STRM_SIZE_ADDRESS(.bo
= &cmd
->vsc_draw_strm
,
1045 .bo_offset
= 32 * cmd
->vsc_draw_strm_pitch
));
1048 A6XX_VSC_BIN_COUNT(.nx
= tiling
->tile_count
.width
,
1049 .ny
= tiling
->tile_count
.height
));
1051 tu_cs_emit_pkt4(cs
, REG_A6XX_VSC_PIPE_CONFIG_REG(0), 32);
1052 for (unsigned i
= 0; i
< 32; i
++)
1053 tu_cs_emit(cs
, tiling
->pipe_config
[i
]);
1056 A6XX_VSC_PRIM_STRM_ADDRESS(.bo
= &cmd
->vsc_prim_strm
),
1057 A6XX_VSC_PRIM_STRM_PITCH(cmd
->vsc_prim_strm_pitch
),
1058 A6XX_VSC_PRIM_STRM_ARRAY_PITCH(cmd
->vsc_prim_strm
.size
));
1061 A6XX_VSC_DRAW_STRM_ADDRESS(.bo
= &cmd
->vsc_draw_strm
),
1062 A6XX_VSC_DRAW_STRM_PITCH(cmd
->vsc_draw_strm_pitch
),
1063 A6XX_VSC_DRAW_STRM_ARRAY_PITCH(cmd
->vsc_draw_strm
.size
));
1067 emit_vsc_overflow_test(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
1069 const struct tu_tiling_config
*tiling
= &cmd
->state
.tiling_config
;
1070 const uint32_t used_pipe_count
=
1071 tiling
->pipe_count
.width
* tiling
->pipe_count
.height
;
1073 /* Clear vsc_scratch: */
1074 tu_cs_emit_pkt7(cs
, CP_MEM_WRITE
, 3);
1075 tu_cs_emit_qw(cs
, cmd
->scratch_bo
.iova
+ ctrl_offset(vsc_scratch
));
1076 tu_cs_emit(cs
, 0x0);
1078 /* Check for overflow, write vsc_scratch if detected: */
1079 for (int i
= 0; i
< used_pipe_count
; i
++) {
1080 tu_cs_emit_pkt7(cs
, CP_COND_WRITE5
, 8);
1081 tu_cs_emit(cs
, CP_COND_WRITE5_0_FUNCTION(WRITE_GE
) |
1082 CP_COND_WRITE5_0_WRITE_MEMORY
);
1083 tu_cs_emit(cs
, CP_COND_WRITE5_1_POLL_ADDR_LO(REG_A6XX_VSC_DRAW_STRM_SIZE_REG(i
)));
1084 tu_cs_emit(cs
, CP_COND_WRITE5_2_POLL_ADDR_HI(0));
1085 tu_cs_emit(cs
, CP_COND_WRITE5_3_REF(cmd
->vsc_draw_strm_pitch
));
1086 tu_cs_emit(cs
, CP_COND_WRITE5_4_MASK(~0));
1087 tu_cs_emit_qw(cs
, cmd
->scratch_bo
.iova
+ ctrl_offset(vsc_scratch
));
1088 tu_cs_emit(cs
, CP_COND_WRITE5_7_WRITE_DATA(1 + cmd
->vsc_draw_strm_pitch
));
1090 tu_cs_emit_pkt7(cs
, CP_COND_WRITE5
, 8);
1091 tu_cs_emit(cs
, CP_COND_WRITE5_0_FUNCTION(WRITE_GE
) |
1092 CP_COND_WRITE5_0_WRITE_MEMORY
);
1093 tu_cs_emit(cs
, CP_COND_WRITE5_1_POLL_ADDR_LO(REG_A6XX_VSC_PRIM_STRM_SIZE_REG(i
)));
1094 tu_cs_emit(cs
, CP_COND_WRITE5_2_POLL_ADDR_HI(0));
1095 tu_cs_emit(cs
, CP_COND_WRITE5_3_REF(cmd
->vsc_prim_strm_pitch
));
1096 tu_cs_emit(cs
, CP_COND_WRITE5_4_MASK(~0));
1097 tu_cs_emit_qw(cs
, cmd
->scratch_bo
.iova
+ ctrl_offset(vsc_scratch
));
1098 tu_cs_emit(cs
, CP_COND_WRITE5_7_WRITE_DATA(3 + cmd
->vsc_prim_strm_pitch
));
1101 tu_cs_emit_pkt7(cs
, CP_WAIT_MEM_WRITES
, 0);
1103 tu_cs_emit_pkt7(cs
, CP_WAIT_FOR_ME
, 0);
1105 tu_cs_emit_pkt7(cs
, CP_MEM_TO_REG
, 3);
1106 tu_cs_emit(cs
, CP_MEM_TO_REG_0_REG(OVERFLOW_FLAG_REG
) |
1107 CP_MEM_TO_REG_0_CNT(1 - 1));
1108 tu_cs_emit_qw(cs
, cmd
->scratch_bo
.iova
+ ctrl_offset(vsc_scratch
));
1111 * This is a bit awkward, we really want a way to invert the
1112 * CP_REG_TEST/CP_COND_REG_EXEC logic, so that we can conditionally
1113 * execute cmds to use hwbinning when a bit is *not* set. This
1114 * dance is to invert OVERFLOW_FLAG_REG
1116 * A CP_NOP packet is used to skip executing the 'else' clause
1120 /* b0 will be set if VSC_DRAW_STRM or VSC_PRIM_STRM overflow: */
1121 tu_cs_emit_pkt7(cs
, CP_REG_TEST
, 1);
1122 tu_cs_emit(cs
, A6XX_CP_REG_TEST_0_REG(OVERFLOW_FLAG_REG
) |
1123 A6XX_CP_REG_TEST_0_BIT(0) |
1124 A6XX_CP_REG_TEST_0_WAIT_FOR_ME
);
1126 tu_cs_reserve(cs
, 3 + 7);
1127 tu_cs_emit_pkt7(cs
, CP_COND_REG_EXEC
, 2);
1128 tu_cs_emit(cs
, CP_COND_REG_EXEC_0_MODE(PRED_TEST
));
1129 tu_cs_emit(cs
, CP_COND_REG_EXEC_1_DWORDS(7));
1133 * On overflow, mirror the value to control->vsc_overflow
1134 * which CPU is checking to detect overflow (see
1135 * check_vsc_overflow())
1137 tu_cs_emit_pkt7(cs
, CP_REG_TO_MEM
, 3);
1138 tu_cs_emit(cs
, CP_REG_TO_MEM_0_REG(OVERFLOW_FLAG_REG
) |
1139 CP_REG_TO_MEM_0_CNT(0));
1140 tu_cs_emit_qw(cs
, cmd
->scratch_bo
.iova
+ ctrl_offset(vsc_overflow
));
1142 tu_cs_emit_pkt4(cs
, OVERFLOW_FLAG_REG
, 1);
1143 tu_cs_emit(cs
, 0x0);
1145 tu_cs_emit_pkt7(cs
, CP_NOP
, 2); /* skip 'else' when 'if' is taken */
1147 tu_cs_emit_pkt4(cs
, OVERFLOW_FLAG_REG
, 1);
1148 tu_cs_emit(cs
, 0x1);
1153 tu6_emit_binning_pass(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
1155 struct tu_physical_device
*phys_dev
= cmd
->device
->physical_device
;
1156 const struct tu_tiling_config
*tiling
= &cmd
->state
.tiling_config
;
1158 uint32_t x1
= tiling
->tile0
.offset
.x
;
1159 uint32_t y1
= tiling
->tile0
.offset
.y
;
1160 uint32_t x2
= tiling
->render_area
.offset
.x
+ tiling
->render_area
.extent
.width
- 1;
1161 uint32_t y2
= tiling
->render_area
.offset
.y
+ tiling
->render_area
.extent
.height
- 1;
1163 tu6_emit_window_scissor(cs
, x1
, y1
, x2
, y2
);
1165 tu_cs_emit_pkt7(cs
, CP_SET_MARKER
, 1);
1166 tu_cs_emit(cs
, A6XX_CP_SET_MARKER_0_MODE(RM6_BINNING
));
1168 tu_cs_emit_pkt7(cs
, CP_SET_VISIBILITY_OVERRIDE
, 1);
1169 tu_cs_emit(cs
, 0x1);
1171 tu_cs_emit_pkt7(cs
, CP_SET_MODE
, 1);
1172 tu_cs_emit(cs
, 0x1);
1177 A6XX_VFD_MODE_CNTL(.binning_pass
= true));
1179 update_vsc_pipe(cmd
, cs
);
1182 A6XX_PC_UNKNOWN_9805(.unknown
= phys_dev
->magic
.PC_UNKNOWN_9805
));
1185 A6XX_SP_UNKNOWN_A0F8(.unknown
= phys_dev
->magic
.SP_UNKNOWN_A0F8
));
1187 tu_cs_emit_pkt7(cs
, CP_EVENT_WRITE
, 1);
1188 tu_cs_emit(cs
, UNK_2C
);
1191 A6XX_RB_WINDOW_OFFSET(.x
= 0, .y
= 0));
1194 A6XX_SP_TP_WINDOW_OFFSET(.x
= 0, .y
= 0));
1196 /* emit IB to binning drawcmds: */
1197 tu_cs_emit_call(cs
, &cmd
->draw_cs
);
1199 tu_cs_emit_pkt7(cs
, CP_SET_DRAW_STATE
, 3);
1200 tu_cs_emit(cs
, CP_SET_DRAW_STATE__0_COUNT(0) |
1201 CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS
|
1202 CP_SET_DRAW_STATE__0_GROUP_ID(0));
1203 tu_cs_emit(cs
, CP_SET_DRAW_STATE__1_ADDR_LO(0));
1204 tu_cs_emit(cs
, CP_SET_DRAW_STATE__2_ADDR_HI(0));
1206 tu_cs_emit_pkt7(cs
, CP_EVENT_WRITE
, 1);
1207 tu_cs_emit(cs
, UNK_2D
);
1209 /* This flush is probably required because the VSC, which produces the
1210 * visibility stream, is a client of UCHE, whereas the CP needs to read the
1211 * visibility stream (without caching) to do draw skipping. The
1212 * WFI+WAIT_FOR_ME combination guarantees that the binning commands
1213 * submitted are finished before reading the VSC regs (in
1214 * emit_vsc_overflow_test) or the VSC_DATA buffer directly (implicitly as
1217 tu6_emit_event_write(cmd
, cs
, CACHE_FLUSH_TS
);
1221 tu_cs_emit_pkt7(cs
, CP_WAIT_FOR_ME
, 0);
1223 emit_vsc_overflow_test(cmd
, cs
);
1225 tu_cs_emit_pkt7(cs
, CP_SET_VISIBILITY_OVERRIDE
, 1);
1226 tu_cs_emit(cs
, 0x0);
1228 tu_cs_emit_pkt7(cs
, CP_SET_MODE
, 1);
1229 tu_cs_emit(cs
, 0x0);
1233 tu_emit_input_attachments(struct tu_cmd_buffer
*cmd
,
1234 const struct tu_subpass
*subpass
,
1235 struct tu_cs_entry
*ib
,
1238 /* note: we can probably emit input attachments just once for the whole
1239 * renderpass, this would avoid emitting both sysmem/gmem versions
1241 * emit two texture descriptors for each input, as a workaround for
1242 * d24s8, which can be sampled as both float (depth) and integer (stencil)
1243 * tu_shader lowers uint input attachment loads to use the 2nd descriptor
1245 * TODO: a smarter workaround
1248 if (!subpass
->input_count
)
1251 struct ts_cs_memory texture
;
1252 VkResult result
= tu_cs_alloc(&cmd
->sub_cs
, subpass
->input_count
* 2,
1253 A6XX_TEX_CONST_DWORDS
, &texture
);
1254 assert(result
== VK_SUCCESS
);
1256 for (unsigned i
= 0; i
< subpass
->input_count
* 2; i
++) {
1257 uint32_t a
= subpass
->input_attachments
[i
/ 2].attachment
;
1258 if (a
== VK_ATTACHMENT_UNUSED
)
1261 struct tu_image_view
*iview
=
1262 cmd
->state
.framebuffer
->attachments
[a
].attachment
;
1263 const struct tu_render_pass_attachment
*att
=
1264 &cmd
->state
.pass
->attachments
[a
];
1265 uint32_t *dst
= &texture
.map
[A6XX_TEX_CONST_DWORDS
* i
];
1267 memcpy(dst
, iview
->descriptor
, A6XX_TEX_CONST_DWORDS
* 4);
1269 if (i
% 2 == 1 && att
->format
== VK_FORMAT_D24_UNORM_S8_UINT
) {
1270 /* note this works because spec says fb and input attachments
1271 * must use identity swizzle
1273 dst
[0] &= ~(A6XX_TEX_CONST_0_FMT__MASK
|
1274 A6XX_TEX_CONST_0_SWIZ_X__MASK
| A6XX_TEX_CONST_0_SWIZ_Y__MASK
|
1275 A6XX_TEX_CONST_0_SWIZ_Z__MASK
| A6XX_TEX_CONST_0_SWIZ_W__MASK
);
1276 dst
[0] |= A6XX_TEX_CONST_0_FMT(FMT6_S8Z24_UINT
) |
1277 A6XX_TEX_CONST_0_SWIZ_X(A6XX_TEX_Y
) |
1278 A6XX_TEX_CONST_0_SWIZ_Y(A6XX_TEX_ZERO
) |
1279 A6XX_TEX_CONST_0_SWIZ_Z(A6XX_TEX_ZERO
) |
1280 A6XX_TEX_CONST_0_SWIZ_W(A6XX_TEX_ONE
);
1286 /* patched for gmem */
1287 dst
[0] &= ~(A6XX_TEX_CONST_0_SWAP__MASK
| A6XX_TEX_CONST_0_TILE_MODE__MASK
);
1288 dst
[0] |= A6XX_TEX_CONST_0_TILE_MODE(TILE6_2
);
1290 A6XX_TEX_CONST_2_TYPE(A6XX_TEX_2D
) |
1291 A6XX_TEX_CONST_2_PITCH(cmd
->state
.tiling_config
.tile0
.extent
.width
* att
->cpp
);
1293 dst
[4] = cmd
->device
->physical_device
->gmem_base
+ att
->gmem_offset
;
1294 dst
[5] = A6XX_TEX_CONST_5_DEPTH(1);
1295 for (unsigned i
= 6; i
< A6XX_TEX_CONST_DWORDS
; i
++)
1300 tu_cs_begin_sub_stream(&cmd
->sub_cs
, 9, &cs
);
1302 tu_cs_emit_pkt7(&cs
, CP_LOAD_STATE6_FRAG
, 3);
1303 tu_cs_emit(&cs
, CP_LOAD_STATE6_0_DST_OFF(0) |
1304 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS
) |
1305 CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT
) |
1306 CP_LOAD_STATE6_0_STATE_BLOCK(SB6_FS_TEX
) |
1307 CP_LOAD_STATE6_0_NUM_UNIT(subpass
->input_count
* 2));
1308 tu_cs_emit_qw(&cs
, texture
.iova
);
1310 tu_cs_emit_pkt4(&cs
, REG_A6XX_SP_FS_TEX_CONST_LO
, 2);
1311 tu_cs_emit_qw(&cs
, texture
.iova
);
1313 tu_cs_emit_regs(&cs
, A6XX_SP_FS_TEX_COUNT(subpass
->input_count
* 2));
1315 *ib
= tu_cs_end_sub_stream(&cmd
->sub_cs
, &cs
);
1319 tu_set_input_attachments(struct tu_cmd_buffer
*cmd
, const struct tu_subpass
*subpass
)
1321 struct tu_cs
*cs
= &cmd
->draw_cs
;
1323 tu_emit_input_attachments(cmd
, subpass
, &cmd
->state
.ia_gmem_ib
, true);
1324 tu_emit_input_attachments(cmd
, subpass
, &cmd
->state
.ia_sysmem_ib
, false);
1326 tu_cs_emit_pkt7(cs
, CP_SET_DRAW_STATE
, 6);
1327 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_INPUT_ATTACHMENTS_GMEM
, cmd
->state
.ia_gmem_ib
);
1328 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_INPUT_ATTACHMENTS_SYSMEM
, cmd
->state
.ia_sysmem_ib
);
1332 tu_emit_renderpass_begin(struct tu_cmd_buffer
*cmd
,
1333 const VkRenderPassBeginInfo
*info
)
1335 struct tu_cs
*cs
= &cmd
->draw_cs
;
1337 tu_cond_exec_start(cs
, CP_COND_EXEC_0_RENDER_MODE_GMEM
);
1339 tu6_emit_blit_scissor(cmd
, cs
, true);
1341 for (uint32_t i
= 0; i
< cmd
->state
.pass
->attachment_count
; ++i
)
1342 tu_load_gmem_attachment(cmd
, cs
, i
, false);
1344 tu6_emit_blit_scissor(cmd
, cs
, false);
1346 for (uint32_t i
= 0; i
< cmd
->state
.pass
->attachment_count
; ++i
)
1347 tu_clear_gmem_attachment(cmd
, cs
, i
, info
);
1349 tu_cond_exec_end(cs
);
1351 tu_cond_exec_start(cs
, CP_COND_EXEC_0_RENDER_MODE_SYSMEM
);
1353 for (uint32_t i
= 0; i
< cmd
->state
.pass
->attachment_count
; ++i
)
1354 tu_clear_sysmem_attachment(cmd
, cs
, i
, info
);
1356 tu_cond_exec_end(cs
);
1360 tu6_sysmem_render_begin(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
,
1361 const struct VkRect2D
*renderArea
)
1363 const struct tu_framebuffer
*fb
= cmd
->state
.framebuffer
;
1365 assert(fb
->width
> 0 && fb
->height
> 0);
1366 tu6_emit_window_scissor(cs
, 0, 0, fb
->width
- 1, fb
->height
- 1);
1367 tu6_emit_window_offset(cs
, 0, 0);
1369 tu6_emit_bin_size(cs
, 0, 0, 0xc00000); /* 0xc00000 = BYPASS? */
1371 tu6_emit_event_write(cmd
, cs
, LRZ_FLUSH
);
1373 tu_cs_emit_pkt7(cs
, CP_SET_MARKER
, 1);
1374 tu_cs_emit(cs
, A6XX_CP_SET_MARKER_0_MODE(RM6_BYPASS
));
1376 tu_cs_emit_pkt7(cs
, CP_SKIP_IB2_ENABLE_GLOBAL
, 1);
1377 tu_cs_emit(cs
, 0x0);
1379 tu_emit_cache_flush_ccu(cmd
, cs
, TU_CMD_CCU_SYSMEM
);
1381 /* enable stream-out, with sysmem there is only one pass: */
1383 A6XX_VPC_SO_OVERRIDE(.so_disable
= false));
1385 tu_cs_emit_pkt7(cs
, CP_SET_VISIBILITY_OVERRIDE
, 1);
1386 tu_cs_emit(cs
, 0x1);
1388 tu_cs_emit_pkt7(cs
, CP_SET_MODE
, 1);
1389 tu_cs_emit(cs
, 0x0);
1391 tu_cs_sanity_check(cs
);
1395 tu6_sysmem_render_end(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
1397 /* Do any resolves of the last subpass. These are handled in the
1398 * tile_store_ib in the gmem path.
1400 tu6_emit_sysmem_resolves(cmd
, cs
, cmd
->state
.subpass
);
1402 tu_cs_emit_call(cs
, &cmd
->draw_epilogue_cs
);
1404 tu_cs_emit_pkt7(cs
, CP_SKIP_IB2_ENABLE_GLOBAL
, 1);
1405 tu_cs_emit(cs
, 0x0);
1407 tu6_emit_event_write(cmd
, cs
, LRZ_FLUSH
);
1409 tu_cs_sanity_check(cs
);
1413 tu6_tile_render_begin(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
1415 struct tu_physical_device
*phys_dev
= cmd
->device
->physical_device
;
1417 tu6_emit_event_write(cmd
, cs
, LRZ_FLUSH
);
1421 tu_cs_emit_pkt7(cs
, CP_SKIP_IB2_ENABLE_GLOBAL
, 1);
1422 tu_cs_emit(cs
, 0x0);
1424 tu_emit_cache_flush_ccu(cmd
, cs
, TU_CMD_CCU_GMEM
);
1426 const struct tu_tiling_config
*tiling
= &cmd
->state
.tiling_config
;
1427 if (use_hw_binning(cmd
)) {
1428 /* enable stream-out during binning pass: */
1429 tu_cs_emit_regs(cs
, A6XX_VPC_SO_OVERRIDE(.so_disable
=false));
1431 tu6_emit_bin_size(cs
,
1432 tiling
->tile0
.extent
.width
,
1433 tiling
->tile0
.extent
.height
,
1434 A6XX_RB_BIN_CONTROL_BINNING_PASS
| 0x6000000);
1436 tu6_emit_render_cntl(cmd
, cmd
->state
.subpass
, cs
, true);
1438 tu6_emit_binning_pass(cmd
, cs
);
1440 /* and disable stream-out for draw pass: */
1441 tu_cs_emit_regs(cs
, A6XX_VPC_SO_OVERRIDE(.so_disable
=true));
1443 tu6_emit_bin_size(cs
,
1444 tiling
->tile0
.extent
.width
,
1445 tiling
->tile0
.extent
.height
,
1446 A6XX_RB_BIN_CONTROL_USE_VIZ
| 0x6000000);
1449 A6XX_VFD_MODE_CNTL(0));
1451 tu_cs_emit_regs(cs
, A6XX_PC_UNKNOWN_9805(.unknown
= phys_dev
->magic
.PC_UNKNOWN_9805
));
1453 tu_cs_emit_regs(cs
, A6XX_SP_UNKNOWN_A0F8(.unknown
= phys_dev
->magic
.SP_UNKNOWN_A0F8
));
1455 tu_cs_emit_pkt7(cs
, CP_SKIP_IB2_ENABLE_GLOBAL
, 1);
1456 tu_cs_emit(cs
, 0x1);
1458 /* no binning pass, so enable stream-out for draw pass:: */
1459 tu_cs_emit_regs(cs
, A6XX_VPC_SO_OVERRIDE(.so_disable
=false));
1461 tu6_emit_bin_size(cs
,
1462 tiling
->tile0
.extent
.width
,
1463 tiling
->tile0
.extent
.height
,
1467 tu_cs_sanity_check(cs
);
1471 tu6_render_tile(struct tu_cmd_buffer
*cmd
,
1473 const struct tu_tile
*tile
)
1475 tu6_emit_tile_select(cmd
, cs
, tile
);
1477 tu_cs_emit_call(cs
, &cmd
->draw_cs
);
1479 if (use_hw_binning(cmd
)) {
1480 tu_cs_emit_pkt7(cs
, CP_REG_TEST
, 1);
1481 tu_cs_emit(cs
, A6XX_CP_REG_TEST_0_REG(OVERFLOW_FLAG_REG
) |
1482 A6XX_CP_REG_TEST_0_BIT(0) |
1483 A6XX_CP_REG_TEST_0_WAIT_FOR_ME
);
1485 tu_cs_reserve(cs
, 3 + 2);
1486 tu_cs_emit_pkt7(cs
, CP_COND_REG_EXEC
, 2);
1487 tu_cs_emit(cs
, CP_COND_REG_EXEC_0_MODE(PRED_TEST
));
1488 tu_cs_emit(cs
, CP_COND_REG_EXEC_1_DWORDS(2));
1490 /* if (no overflow) */ {
1491 tu_cs_emit_pkt7(cs
, CP_SET_MARKER
, 1);
1492 tu_cs_emit(cs
, A6XX_CP_SET_MARKER_0_MODE(RM6_ENDVIS
));
1496 tu_cs_emit_ib(cs
, &cmd
->state
.tile_store_ib
);
1498 tu_cs_sanity_check(cs
);
1502 tu6_tile_render_end(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
1504 tu_cs_emit_call(cs
, &cmd
->draw_epilogue_cs
);
1507 A6XX_GRAS_LRZ_CNTL(0));
1509 tu6_emit_event_write(cmd
, cs
, LRZ_FLUSH
);
1511 tu6_emit_event_write(cmd
, cs
, PC_CCU_RESOLVE_TS
);
1513 tu_cs_sanity_check(cs
);
1517 tu_cmd_render_tiles(struct tu_cmd_buffer
*cmd
)
1519 const struct tu_tiling_config
*tiling
= &cmd
->state
.tiling_config
;
1521 tu6_tile_render_begin(cmd
, &cmd
->cs
);
1523 for (uint32_t y
= 0; y
< tiling
->tile_count
.height
; y
++) {
1524 for (uint32_t x
= 0; x
< tiling
->tile_count
.width
; x
++) {
1525 struct tu_tile tile
;
1526 tu_tiling_config_get_tile(tiling
, cmd
->device
, x
, y
, &tile
);
1527 tu6_render_tile(cmd
, &cmd
->cs
, &tile
);
1531 tu6_tile_render_end(cmd
, &cmd
->cs
);
1535 tu_cmd_render_sysmem(struct tu_cmd_buffer
*cmd
)
1537 const struct tu_tiling_config
*tiling
= &cmd
->state
.tiling_config
;
1539 tu6_sysmem_render_begin(cmd
, &cmd
->cs
, &tiling
->render_area
);
1541 tu_cs_emit_call(&cmd
->cs
, &cmd
->draw_cs
);
1543 tu6_sysmem_render_end(cmd
, &cmd
->cs
);
1547 tu_cmd_prepare_tile_store_ib(struct tu_cmd_buffer
*cmd
)
1549 const uint32_t tile_store_space
= 11 + (35 * 2) * cmd
->state
.pass
->attachment_count
;
1550 struct tu_cs sub_cs
;
1553 tu_cs_begin_sub_stream(&cmd
->sub_cs
, tile_store_space
, &sub_cs
);
1554 if (result
!= VK_SUCCESS
) {
1555 cmd
->record_result
= result
;
1559 /* emit to tile-store sub_cs */
1560 tu6_emit_tile_store(cmd
, &sub_cs
);
1562 cmd
->state
.tile_store_ib
= tu_cs_end_sub_stream(&cmd
->sub_cs
, &sub_cs
);
1566 tu_cmd_update_tiling_config(struct tu_cmd_buffer
*cmd
,
1567 const VkRect2D
*render_area
)
1569 const struct tu_device
*dev
= cmd
->device
;
1570 struct tu_tiling_config
*tiling
= &cmd
->state
.tiling_config
;
1572 tiling
->render_area
= *render_area
;
1573 tiling
->force_sysmem
= false;
1575 tu_tiling_config_update_tile_layout(tiling
, dev
, cmd
->state
.pass
);
1576 tu_tiling_config_update_pipe_layout(tiling
, dev
);
1577 tu_tiling_config_update_pipes(tiling
, dev
);
1581 tu_create_cmd_buffer(struct tu_device
*device
,
1582 struct tu_cmd_pool
*pool
,
1583 VkCommandBufferLevel level
,
1584 VkCommandBuffer
*pCommandBuffer
)
1586 struct tu_cmd_buffer
*cmd_buffer
;
1587 cmd_buffer
= vk_zalloc(&pool
->alloc
, sizeof(*cmd_buffer
), 8,
1588 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1589 if (cmd_buffer
== NULL
)
1590 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
1592 cmd_buffer
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
1593 cmd_buffer
->device
= device
;
1594 cmd_buffer
->pool
= pool
;
1595 cmd_buffer
->level
= level
;
1598 list_addtail(&cmd_buffer
->pool_link
, &pool
->cmd_buffers
);
1599 cmd_buffer
->queue_family_index
= pool
->queue_family_index
;
1602 /* Init the pool_link so we can safely call list_del when we destroy
1603 * the command buffer
1605 list_inithead(&cmd_buffer
->pool_link
);
1606 cmd_buffer
->queue_family_index
= TU_QUEUE_GENERAL
;
1609 tu_bo_list_init(&cmd_buffer
->bo_list
);
1610 tu_cs_init(&cmd_buffer
->cs
, device
, TU_CS_MODE_GROW
, 4096);
1611 tu_cs_init(&cmd_buffer
->draw_cs
, device
, TU_CS_MODE_GROW
, 4096);
1612 tu_cs_init(&cmd_buffer
->draw_epilogue_cs
, device
, TU_CS_MODE_GROW
, 4096);
1613 tu_cs_init(&cmd_buffer
->sub_cs
, device
, TU_CS_MODE_SUB_STREAM
, 2048);
1615 *pCommandBuffer
= tu_cmd_buffer_to_handle(cmd_buffer
);
1617 list_inithead(&cmd_buffer
->upload
.list
);
1619 VkResult result
= tu_bo_init_new(device
, &cmd_buffer
->scratch_bo
, 0x1000);
1620 if (result
!= VK_SUCCESS
)
1621 goto fail_scratch_bo
;
1623 /* TODO: resize on overflow */
1624 cmd_buffer
->vsc_draw_strm_pitch
= device
->vsc_draw_strm_pitch
;
1625 cmd_buffer
->vsc_prim_strm_pitch
= device
->vsc_prim_strm_pitch
;
1626 cmd_buffer
->vsc_draw_strm
= device
->vsc_draw_strm
;
1627 cmd_buffer
->vsc_prim_strm
= device
->vsc_prim_strm
;
1632 list_del(&cmd_buffer
->pool_link
);
1637 tu_cmd_buffer_destroy(struct tu_cmd_buffer
*cmd_buffer
)
1639 tu_bo_finish(cmd_buffer
->device
, &cmd_buffer
->scratch_bo
);
1641 list_del(&cmd_buffer
->pool_link
);
1643 tu_cs_finish(&cmd_buffer
->cs
);
1644 tu_cs_finish(&cmd_buffer
->draw_cs
);
1645 tu_cs_finish(&cmd_buffer
->draw_epilogue_cs
);
1646 tu_cs_finish(&cmd_buffer
->sub_cs
);
1648 tu_bo_list_destroy(&cmd_buffer
->bo_list
);
1649 vk_free(&cmd_buffer
->pool
->alloc
, cmd_buffer
);
1653 tu_reset_cmd_buffer(struct tu_cmd_buffer
*cmd_buffer
)
1655 cmd_buffer
->record_result
= VK_SUCCESS
;
1657 tu_bo_list_reset(&cmd_buffer
->bo_list
);
1658 tu_cs_reset(&cmd_buffer
->cs
);
1659 tu_cs_reset(&cmd_buffer
->draw_cs
);
1660 tu_cs_reset(&cmd_buffer
->draw_epilogue_cs
);
1661 tu_cs_reset(&cmd_buffer
->sub_cs
);
1663 for (unsigned i
= 0; i
< MAX_BIND_POINTS
; i
++)
1664 memset(&cmd_buffer
->descriptors
[i
].sets
, 0, sizeof(cmd_buffer
->descriptors
[i
].sets
));
1666 cmd_buffer
->status
= TU_CMD_BUFFER_STATUS_INITIAL
;
1668 return cmd_buffer
->record_result
;
1672 tu_AllocateCommandBuffers(VkDevice _device
,
1673 const VkCommandBufferAllocateInfo
*pAllocateInfo
,
1674 VkCommandBuffer
*pCommandBuffers
)
1676 TU_FROM_HANDLE(tu_device
, device
, _device
);
1677 TU_FROM_HANDLE(tu_cmd_pool
, pool
, pAllocateInfo
->commandPool
);
1679 VkResult result
= VK_SUCCESS
;
1682 for (i
= 0; i
< pAllocateInfo
->commandBufferCount
; i
++) {
1684 if (!list_is_empty(&pool
->free_cmd_buffers
)) {
1685 struct tu_cmd_buffer
*cmd_buffer
= list_first_entry(
1686 &pool
->free_cmd_buffers
, struct tu_cmd_buffer
, pool_link
);
1688 list_del(&cmd_buffer
->pool_link
);
1689 list_addtail(&cmd_buffer
->pool_link
, &pool
->cmd_buffers
);
1691 result
= tu_reset_cmd_buffer(cmd_buffer
);
1692 cmd_buffer
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
1693 cmd_buffer
->level
= pAllocateInfo
->level
;
1695 pCommandBuffers
[i
] = tu_cmd_buffer_to_handle(cmd_buffer
);
1697 result
= tu_create_cmd_buffer(device
, pool
, pAllocateInfo
->level
,
1698 &pCommandBuffers
[i
]);
1700 if (result
!= VK_SUCCESS
)
1704 if (result
!= VK_SUCCESS
) {
1705 tu_FreeCommandBuffers(_device
, pAllocateInfo
->commandPool
, i
,
1708 /* From the Vulkan 1.0.66 spec:
1710 * "vkAllocateCommandBuffers can be used to create multiple
1711 * command buffers. If the creation of any of those command
1712 * buffers fails, the implementation must destroy all
1713 * successfully created command buffer objects from this
1714 * command, set all entries of the pCommandBuffers array to
1715 * NULL and return the error."
1717 memset(pCommandBuffers
, 0,
1718 sizeof(*pCommandBuffers
) * pAllocateInfo
->commandBufferCount
);
1725 tu_FreeCommandBuffers(VkDevice device
,
1726 VkCommandPool commandPool
,
1727 uint32_t commandBufferCount
,
1728 const VkCommandBuffer
*pCommandBuffers
)
1730 for (uint32_t i
= 0; i
< commandBufferCount
; i
++) {
1731 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, pCommandBuffers
[i
]);
1734 if (cmd_buffer
->pool
) {
1735 list_del(&cmd_buffer
->pool_link
);
1736 list_addtail(&cmd_buffer
->pool_link
,
1737 &cmd_buffer
->pool
->free_cmd_buffers
);
1739 tu_cmd_buffer_destroy(cmd_buffer
);
1745 tu_ResetCommandBuffer(VkCommandBuffer commandBuffer
,
1746 VkCommandBufferResetFlags flags
)
1748 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
1749 return tu_reset_cmd_buffer(cmd_buffer
);
1752 /* Initialize the cache, assuming all necessary flushes have happened but *not*
1756 tu_cache_init(struct tu_cache_state
*cache
)
1758 cache
->flush_bits
= 0;
1759 cache
->pending_flush_bits
= TU_CMD_FLAG_ALL_INVALIDATE
;
1763 tu_BeginCommandBuffer(VkCommandBuffer commandBuffer
,
1764 const VkCommandBufferBeginInfo
*pBeginInfo
)
1766 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
1767 VkResult result
= VK_SUCCESS
;
1769 if (cmd_buffer
->status
!= TU_CMD_BUFFER_STATUS_INITIAL
) {
1770 /* If the command buffer has already been resetted with
1771 * vkResetCommandBuffer, no need to do it again.
1773 result
= tu_reset_cmd_buffer(cmd_buffer
);
1774 if (result
!= VK_SUCCESS
)
1778 memset(&cmd_buffer
->state
, 0, sizeof(cmd_buffer
->state
));
1779 tu_cache_init(&cmd_buffer
->state
.cache
);
1780 tu_cache_init(&cmd_buffer
->state
.renderpass_cache
);
1781 cmd_buffer
->usage_flags
= pBeginInfo
->flags
;
1783 tu_cs_begin(&cmd_buffer
->cs
);
1784 tu_cs_begin(&cmd_buffer
->draw_cs
);
1785 tu_cs_begin(&cmd_buffer
->draw_epilogue_cs
);
1787 /* setup initial configuration into command buffer */
1788 if (cmd_buffer
->level
== VK_COMMAND_BUFFER_LEVEL_PRIMARY
) {
1789 switch (cmd_buffer
->queue_family_index
) {
1790 case TU_QUEUE_GENERAL
:
1791 tu6_init_hw(cmd_buffer
, &cmd_buffer
->cs
);
1796 } else if (cmd_buffer
->level
== VK_COMMAND_BUFFER_LEVEL_SECONDARY
) {
1797 if (pBeginInfo
->flags
& VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT
) {
1798 assert(pBeginInfo
->pInheritanceInfo
);
1799 cmd_buffer
->state
.pass
= tu_render_pass_from_handle(pBeginInfo
->pInheritanceInfo
->renderPass
);
1800 cmd_buffer
->state
.subpass
=
1801 &cmd_buffer
->state
.pass
->subpasses
[pBeginInfo
->pInheritanceInfo
->subpass
];
1803 /* When executing in the middle of another command buffer, the CCU
1806 cmd_buffer
->state
.ccu_state
= TU_CMD_CCU_UNKNOWN
;
1810 cmd_buffer
->status
= TU_CMD_BUFFER_STATUS_RECORDING
;
1815 /* Sets vertex buffers to HW binding points. We emit VBs in SDS (so that bin
1816 * rendering can skip over unused state), so we need to collect all the
1817 * bindings together into a single state emit at draw time.
1820 tu_CmdBindVertexBuffers(VkCommandBuffer commandBuffer
,
1821 uint32_t firstBinding
,
1822 uint32_t bindingCount
,
1823 const VkBuffer
*pBuffers
,
1824 const VkDeviceSize
*pOffsets
)
1826 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
1828 assert(firstBinding
+ bindingCount
<= MAX_VBS
);
1830 for (uint32_t i
= 0; i
< bindingCount
; i
++) {
1831 struct tu_buffer
*buf
= tu_buffer_from_handle(pBuffers
[i
]);
1833 cmd
->state
.vb
.buffers
[firstBinding
+ i
] = buf
;
1834 cmd
->state
.vb
.offsets
[firstBinding
+ i
] = pOffsets
[i
];
1836 tu_bo_list_add(&cmd
->bo_list
, buf
->bo
, MSM_SUBMIT_BO_READ
);
1839 cmd
->state
.dirty
|= TU_CMD_DIRTY_VERTEX_BUFFERS
;
1843 tu_CmdBindIndexBuffer(VkCommandBuffer commandBuffer
,
1845 VkDeviceSize offset
,
1846 VkIndexType indexType
)
1848 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
1849 TU_FROM_HANDLE(tu_buffer
, buf
, buffer
);
1851 /* initialize/update the restart index */
1852 if (!cmd
->state
.index_buffer
|| cmd
->state
.index_type
!= indexType
) {
1853 struct tu_cs
*draw_cs
= &cmd
->draw_cs
;
1855 tu6_emit_restart_index(
1856 draw_cs
, indexType
== VK_INDEX_TYPE_UINT32
? 0xffffffff : 0xffff);
1858 tu_cs_sanity_check(draw_cs
);
1862 if (cmd
->state
.index_buffer
!= buf
)
1863 tu_bo_list_add(&cmd
->bo_list
, buf
->bo
, MSM_SUBMIT_BO_READ
);
1865 cmd
->state
.index_buffer
= buf
;
1866 cmd
->state
.index_offset
= offset
;
1867 cmd
->state
.index_type
= indexType
;
1871 tu_CmdBindDescriptorSets(VkCommandBuffer commandBuffer
,
1872 VkPipelineBindPoint pipelineBindPoint
,
1873 VkPipelineLayout _layout
,
1875 uint32_t descriptorSetCount
,
1876 const VkDescriptorSet
*pDescriptorSets
,
1877 uint32_t dynamicOffsetCount
,
1878 const uint32_t *pDynamicOffsets
)
1880 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
1881 TU_FROM_HANDLE(tu_pipeline_layout
, layout
, _layout
);
1882 unsigned dyn_idx
= 0;
1884 struct tu_descriptor_state
*descriptors_state
=
1885 tu_get_descriptors_state(cmd
, pipelineBindPoint
);
1887 for (unsigned i
= 0; i
< descriptorSetCount
; ++i
) {
1888 unsigned idx
= i
+ firstSet
;
1889 TU_FROM_HANDLE(tu_descriptor_set
, set
, pDescriptorSets
[i
]);
1891 descriptors_state
->sets
[idx
] = set
;
1893 for(unsigned j
= 0; j
< set
->layout
->dynamic_offset_count
; ++j
, ++dyn_idx
) {
1894 /* update the contents of the dynamic descriptor set */
1895 unsigned src_idx
= j
;
1896 unsigned dst_idx
= j
+ layout
->set
[idx
].dynamic_offset_start
;
1897 assert(dyn_idx
< dynamicOffsetCount
);
1900 &descriptors_state
->dynamic_descriptors
[dst_idx
* A6XX_TEX_CONST_DWORDS
];
1902 &set
->dynamic_descriptors
[src_idx
* A6XX_TEX_CONST_DWORDS
];
1903 uint32_t offset
= pDynamicOffsets
[dyn_idx
];
1905 /* Patch the storage/uniform descriptors right away. */
1906 if (layout
->set
[idx
].layout
->dynamic_ubo
& (1 << j
)) {
1907 /* Note: we can assume here that the addition won't roll over and
1908 * change the SIZE field.
1910 uint64_t va
= src
[0] | ((uint64_t)src
[1] << 32);
1915 memcpy(dst
, src
, A6XX_TEX_CONST_DWORDS
* 4);
1916 /* Note: A6XX_IBO_5_DEPTH is always 0 */
1917 uint64_t va
= dst
[4] | ((uint64_t)dst
[5] << 32);
1924 for (unsigned j
= 0; j
< set
->layout
->buffer_count
; ++j
) {
1925 if (set
->buffers
[j
]) {
1926 tu_bo_list_add(&cmd
->bo_list
, set
->buffers
[j
],
1927 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_WRITE
);
1931 if (set
->size
> 0) {
1932 tu_bo_list_add(&cmd
->bo_list
, &set
->pool
->bo
,
1933 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_DUMP
);
1936 assert(dyn_idx
== dynamicOffsetCount
);
1938 uint32_t sp_bindless_base_reg
, hlsq_bindless_base_reg
, hlsq_update_value
;
1939 uint64_t addr
[MAX_SETS
+ 1] = {};
1942 for (uint32_t i
= 0; i
< MAX_SETS
; i
++) {
1943 struct tu_descriptor_set
*set
= descriptors_state
->sets
[i
];
1945 addr
[i
] = set
->va
| 3;
1948 if (layout
->dynamic_offset_count
) {
1949 /* allocate and fill out dynamic descriptor set */
1950 struct ts_cs_memory dynamic_desc_set
;
1951 VkResult result
= tu_cs_alloc(&cmd
->sub_cs
, layout
->dynamic_offset_count
,
1952 A6XX_TEX_CONST_DWORDS
, &dynamic_desc_set
);
1953 assert(result
== VK_SUCCESS
);
1955 memcpy(dynamic_desc_set
.map
, descriptors_state
->dynamic_descriptors
,
1956 layout
->dynamic_offset_count
* A6XX_TEX_CONST_DWORDS
* 4);
1957 addr
[MAX_SETS
] = dynamic_desc_set
.iova
| 3;
1960 if (pipelineBindPoint
== VK_PIPELINE_BIND_POINT_GRAPHICS
) {
1961 sp_bindless_base_reg
= REG_A6XX_SP_BINDLESS_BASE(0);
1962 hlsq_bindless_base_reg
= REG_A6XX_HLSQ_BINDLESS_BASE(0);
1963 hlsq_update_value
= 0x7c000;
1965 cmd
->state
.dirty
|= TU_CMD_DIRTY_DESCRIPTOR_SETS
| TU_CMD_DIRTY_SHADER_CONSTS
;
1967 assert(pipelineBindPoint
== VK_PIPELINE_BIND_POINT_COMPUTE
);
1969 sp_bindless_base_reg
= REG_A6XX_SP_CS_BINDLESS_BASE(0);
1970 hlsq_bindless_base_reg
= REG_A6XX_HLSQ_CS_BINDLESS_BASE(0);
1971 hlsq_update_value
= 0x3e00;
1973 cmd
->state
.dirty
|= TU_CMD_DIRTY_COMPUTE_DESCRIPTOR_SETS
;
1976 tu_cs_begin_sub_stream(&cmd
->sub_cs
, 24, &cs
);
1978 tu_cs_emit_pkt4(&cs
, sp_bindless_base_reg
, 10);
1979 tu_cs_emit_array(&cs
, (const uint32_t*) addr
, 10);
1980 tu_cs_emit_pkt4(&cs
, hlsq_bindless_base_reg
, 10);
1981 tu_cs_emit_array(&cs
, (const uint32_t*) addr
, 10);
1982 tu_cs_emit_regs(&cs
, A6XX_HLSQ_UPDATE_CNTL(.dword
= hlsq_update_value
));
1984 struct tu_cs_entry ib
= tu_cs_end_sub_stream(&cmd
->sub_cs
, &cs
);
1985 if (pipelineBindPoint
== VK_PIPELINE_BIND_POINT_GRAPHICS
) {
1986 tu_cs_emit_pkt7(&cmd
->draw_cs
, CP_SET_DRAW_STATE
, 3);
1987 tu_cs_emit_sds_ib(&cmd
->draw_cs
, TU_DRAW_STATE_DESC_SETS
, ib
);
1988 cmd
->state
.desc_sets_ib
= ib
;
1990 /* note: for compute we could emit directly, instead of a CP_INDIRECT
1991 * however, the blob uses draw states for compute
1993 tu_cs_emit_ib(&cmd
->cs
, &ib
);
1997 void tu_CmdBindTransformFeedbackBuffersEXT(VkCommandBuffer commandBuffer
,
1998 uint32_t firstBinding
,
1999 uint32_t bindingCount
,
2000 const VkBuffer
*pBuffers
,
2001 const VkDeviceSize
*pOffsets
,
2002 const VkDeviceSize
*pSizes
)
2004 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2005 assert(firstBinding
+ bindingCount
<= IR3_MAX_SO_BUFFERS
);
2007 for (uint32_t i
= 0; i
< bindingCount
; i
++) {
2008 uint32_t idx
= firstBinding
+ i
;
2009 TU_FROM_HANDLE(tu_buffer
, buf
, pBuffers
[i
]);
2011 if (pOffsets
[i
] != 0)
2012 cmd
->state
.streamout_reset
|= 1 << idx
;
2014 cmd
->state
.streamout_buf
.buffers
[idx
] = buf
;
2015 cmd
->state
.streamout_buf
.offsets
[idx
] = pOffsets
[i
];
2016 cmd
->state
.streamout_buf
.sizes
[idx
] = pSizes
[i
];
2018 cmd
->state
.streamout_enabled
|= 1 << idx
;
2021 cmd
->state
.dirty
|= TU_CMD_DIRTY_STREAMOUT_BUFFERS
;
2024 void tu_CmdBeginTransformFeedbackEXT(VkCommandBuffer commandBuffer
,
2025 uint32_t firstCounterBuffer
,
2026 uint32_t counterBufferCount
,
2027 const VkBuffer
*pCounterBuffers
,
2028 const VkDeviceSize
*pCounterBufferOffsets
)
2030 assert(firstCounterBuffer
+ counterBufferCount
<= IR3_MAX_SO_BUFFERS
);
2031 /* TODO do something with counter buffer? */
2034 void tu_CmdEndTransformFeedbackEXT(VkCommandBuffer commandBuffer
,
2035 uint32_t firstCounterBuffer
,
2036 uint32_t counterBufferCount
,
2037 const VkBuffer
*pCounterBuffers
,
2038 const VkDeviceSize
*pCounterBufferOffsets
)
2040 assert(firstCounterBuffer
+ counterBufferCount
<= IR3_MAX_SO_BUFFERS
);
2041 /* TODO do something with counter buffer? */
2043 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2044 cmd
->state
.streamout_enabled
= 0;
2048 tu_CmdPushConstants(VkCommandBuffer commandBuffer
,
2049 VkPipelineLayout layout
,
2050 VkShaderStageFlags stageFlags
,
2053 const void *pValues
)
2055 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2056 memcpy((void*) cmd
->push_constants
+ offset
, pValues
, size
);
2057 cmd
->state
.dirty
|= TU_CMD_DIRTY_SHADER_CONSTS
;
2060 /* Flush everything which has been made available but we haven't actually
2064 tu_flush_all_pending(struct tu_cache_state
*cache
)
2066 cache
->flush_bits
|= cache
->pending_flush_bits
& TU_CMD_FLAG_ALL_FLUSH
;
2067 cache
->pending_flush_bits
&= ~TU_CMD_FLAG_ALL_FLUSH
;
2071 tu_EndCommandBuffer(VkCommandBuffer commandBuffer
)
2073 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
2075 /* We currently flush CCU at the end of the command buffer, like
2076 * what the blob does. There's implicit synchronization around every
2077 * vkQueueSubmit, but the kernel only flushes the UCHE, and we don't
2078 * know yet if this command buffer will be the last in the submit so we
2079 * have to defensively flush everything else.
2081 * TODO: We could definitely do better than this, since these flushes
2082 * aren't required by Vulkan, but we'd need kernel support to do that.
2083 * Ideally, we'd like the kernel to flush everything afterwards, so that we
2084 * wouldn't have to do any flushes here, and when submitting multiple
2085 * command buffers there wouldn't be any unnecessary flushes in between.
2087 if (cmd_buffer
->state
.pass
) {
2088 tu_flush_all_pending(&cmd_buffer
->state
.renderpass_cache
);
2089 tu_emit_cache_flush_renderpass(cmd_buffer
, &cmd_buffer
->draw_cs
);
2091 tu_flush_all_pending(&cmd_buffer
->state
.cache
);
2092 cmd_buffer
->state
.cache
.flush_bits
|=
2093 TU_CMD_FLAG_CCU_FLUSH_COLOR
|
2094 TU_CMD_FLAG_CCU_FLUSH_DEPTH
;
2095 tu_emit_cache_flush(cmd_buffer
, &cmd_buffer
->cs
);
2098 tu_bo_list_add(&cmd_buffer
->bo_list
, &cmd_buffer
->scratch_bo
,
2099 MSM_SUBMIT_BO_WRITE
);
2101 if (cmd_buffer
->use_vsc_data
) {
2102 tu_bo_list_add(&cmd_buffer
->bo_list
, &cmd_buffer
->vsc_draw_strm
,
2103 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_WRITE
);
2104 tu_bo_list_add(&cmd_buffer
->bo_list
, &cmd_buffer
->vsc_prim_strm
,
2105 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_WRITE
);
2108 tu_bo_list_add(&cmd_buffer
->bo_list
, &cmd_buffer
->device
->border_color
,
2109 MSM_SUBMIT_BO_READ
);
2111 for (uint32_t i
= 0; i
< cmd_buffer
->draw_cs
.bo_count
; i
++) {
2112 tu_bo_list_add(&cmd_buffer
->bo_list
, cmd_buffer
->draw_cs
.bos
[i
],
2113 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_DUMP
);
2116 for (uint32_t i
= 0; i
< cmd_buffer
->draw_epilogue_cs
.bo_count
; i
++) {
2117 tu_bo_list_add(&cmd_buffer
->bo_list
, cmd_buffer
->draw_epilogue_cs
.bos
[i
],
2118 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_DUMP
);
2121 for (uint32_t i
= 0; i
< cmd_buffer
->sub_cs
.bo_count
; i
++) {
2122 tu_bo_list_add(&cmd_buffer
->bo_list
, cmd_buffer
->sub_cs
.bos
[i
],
2123 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_DUMP
);
2126 tu_cs_end(&cmd_buffer
->cs
);
2127 tu_cs_end(&cmd_buffer
->draw_cs
);
2128 tu_cs_end(&cmd_buffer
->draw_epilogue_cs
);
2130 cmd_buffer
->status
= TU_CMD_BUFFER_STATUS_EXECUTABLE
;
2132 return cmd_buffer
->record_result
;
2136 tu_cmd_dynamic_state(struct tu_cmd_buffer
*cmd
, uint32_t id
, uint32_t size
)
2138 struct ts_cs_memory memory
;
2141 /* TODO: share this logic with tu_pipeline_static_state */
2142 tu_cs_alloc(&cmd
->sub_cs
, size
, 1, &memory
);
2143 tu_cs_init_external(&cs
, memory
.map
, memory
.map
+ size
);
2145 tu_cs_reserve_space(&cs
, size
);
2147 assert(id
< ARRAY_SIZE(cmd
->state
.dynamic_state
));
2148 cmd
->state
.dynamic_state
[id
].iova
= memory
.iova
;
2149 cmd
->state
.dynamic_state
[id
].size
= size
;
2151 tu_cs_emit_pkt7(&cmd
->draw_cs
, CP_SET_DRAW_STATE
, 3);
2152 tu_cs_emit_draw_state(&cmd
->draw_cs
, TU_DRAW_STATE_DYNAMIC
+ id
, cmd
->state
.dynamic_state
[id
]);
2158 tu_CmdBindPipeline(VkCommandBuffer commandBuffer
,
2159 VkPipelineBindPoint pipelineBindPoint
,
2160 VkPipeline _pipeline
)
2162 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2163 TU_FROM_HANDLE(tu_pipeline
, pipeline
, _pipeline
);
2165 tu_bo_list_add(&cmd
->bo_list
, &pipeline
->program
.binary_bo
,
2166 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_DUMP
);
2167 for (uint32_t i
= 0; i
< pipeline
->cs
.bo_count
; i
++) {
2168 tu_bo_list_add(&cmd
->bo_list
, pipeline
->cs
.bos
[i
],
2169 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_DUMP
);
2172 if (pipelineBindPoint
== VK_PIPELINE_BIND_POINT_COMPUTE
) {
2173 cmd
->state
.compute_pipeline
= pipeline
;
2174 cmd
->state
.dirty
|= TU_CMD_DIRTY_COMPUTE_PIPELINE
;
2178 assert(pipelineBindPoint
== VK_PIPELINE_BIND_POINT_GRAPHICS
);
2180 cmd
->state
.pipeline
= pipeline
;
2181 cmd
->state
.dirty
|= TU_CMD_DIRTY_SHADER_CONSTS
;
2183 struct tu_cs
*cs
= &cmd
->draw_cs
;
2184 uint32_t mask
= ~pipeline
->dynamic_state_mask
& BITFIELD_MASK(TU_DYNAMIC_STATE_COUNT
);
2187 tu_cs_emit_pkt7(cs
, CP_SET_DRAW_STATE
, 3 * (7 + util_bitcount(mask
)));
2188 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_PROGRAM
, pipeline
->program
.state_ib
);
2189 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_PROGRAM_BINNING
, pipeline
->program
.binning_state_ib
);
2190 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_VI
, pipeline
->vi
.state_ib
);
2191 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_VI_BINNING
, pipeline
->vi
.binning_state_ib
);
2192 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_RAST
, pipeline
->rast
.state_ib
);
2193 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_DS
, pipeline
->ds
.state_ib
);
2194 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_BLEND
, pipeline
->blend
.state_ib
);
2196 for_each_bit(i
, mask
)
2197 tu_cs_emit_draw_state(cs
, TU_DRAW_STATE_DYNAMIC
+ i
, pipeline
->dynamic_state
[i
]);
2199 /* If the new pipeline requires more VBs than we had previously set up, we
2200 * need to re-emit them in SDS. If it requires the same set or fewer, we
2201 * can just re-use the old SDS.
2203 if (pipeline
->vi
.bindings_used
& ~cmd
->vertex_bindings_set
)
2204 cmd
->state
.dirty
|= TU_CMD_DIRTY_VERTEX_BUFFERS
;
2206 /* If the pipeline needs a dynamic descriptor, re-emit descriptor sets */
2207 if (pipeline
->layout
->dynamic_offset_count
)
2208 cmd
->state
.dirty
|= TU_CMD_DIRTY_DESCRIPTOR_SETS
;
2210 /* dynamic linewidth state depends pipeline state's gras_su_cntl
2211 * so the dynamic state ib must be updated when pipeline changes
2213 if (pipeline
->dynamic_state_mask
& BIT(VK_DYNAMIC_STATE_LINE_WIDTH
)) {
2214 struct tu_cs cs
= tu_cmd_dynamic_state(cmd
, VK_DYNAMIC_STATE_LINE_WIDTH
, 2);
2216 cmd
->state
.dynamic_gras_su_cntl
&= A6XX_GRAS_SU_CNTL_LINEHALFWIDTH__MASK
;
2217 cmd
->state
.dynamic_gras_su_cntl
|= pipeline
->gras_su_cntl
;
2219 tu_cs_emit_regs(&cs
, A6XX_GRAS_SU_CNTL(.dword
= cmd
->state
.dynamic_gras_su_cntl
));
2224 tu_CmdSetViewport(VkCommandBuffer commandBuffer
,
2225 uint32_t firstViewport
,
2226 uint32_t viewportCount
,
2227 const VkViewport
*pViewports
)
2229 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2230 struct tu_cs cs
= tu_cmd_dynamic_state(cmd
, VK_DYNAMIC_STATE_VIEWPORT
, 18);
2232 assert(firstViewport
== 0 && viewportCount
== 1);
2234 tu6_emit_viewport(&cs
, pViewports
);
2238 tu_CmdSetScissor(VkCommandBuffer commandBuffer
,
2239 uint32_t firstScissor
,
2240 uint32_t scissorCount
,
2241 const VkRect2D
*pScissors
)
2243 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2244 struct tu_cs cs
= tu_cmd_dynamic_state(cmd
, VK_DYNAMIC_STATE_SCISSOR
, 3);
2246 assert(firstScissor
== 0 && scissorCount
== 1);
2248 tu6_emit_scissor(&cs
, pScissors
);
2252 tu_CmdSetLineWidth(VkCommandBuffer commandBuffer
, float lineWidth
)
2254 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2255 struct tu_cs cs
= tu_cmd_dynamic_state(cmd
, VK_DYNAMIC_STATE_LINE_WIDTH
, 2);
2257 cmd
->state
.dynamic_gras_su_cntl
&= ~A6XX_GRAS_SU_CNTL_LINEHALFWIDTH__MASK
;
2258 cmd
->state
.dynamic_gras_su_cntl
|= A6XX_GRAS_SU_CNTL_LINEHALFWIDTH(lineWidth
/ 2.0f
);
2260 tu_cs_emit_regs(&cs
, A6XX_GRAS_SU_CNTL(.dword
= cmd
->state
.dynamic_gras_su_cntl
));
2264 tu_CmdSetDepthBias(VkCommandBuffer commandBuffer
,
2265 float depthBiasConstantFactor
,
2266 float depthBiasClamp
,
2267 float depthBiasSlopeFactor
)
2269 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2270 struct tu_cs cs
= tu_cmd_dynamic_state(cmd
, VK_DYNAMIC_STATE_DEPTH_BIAS
, 4);
2272 tu6_emit_depth_bias(&cs
, depthBiasConstantFactor
, depthBiasClamp
, depthBiasSlopeFactor
);
2276 tu_CmdSetBlendConstants(VkCommandBuffer commandBuffer
,
2277 const float blendConstants
[4])
2279 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2280 struct tu_cs cs
= tu_cmd_dynamic_state(cmd
, VK_DYNAMIC_STATE_BLEND_CONSTANTS
, 5);
2282 tu_cs_emit_pkt4(&cs
, REG_A6XX_RB_BLEND_RED_F32
, 4);
2283 tu_cs_emit_array(&cs
, (const uint32_t *) blendConstants
, 4);
2287 tu_CmdSetDepthBounds(VkCommandBuffer commandBuffer
,
2288 float minDepthBounds
,
2289 float maxDepthBounds
)
2294 update_stencil_mask(uint32_t *value
, VkStencilFaceFlags face
, uint32_t mask
)
2296 if (face
& VK_STENCIL_FACE_FRONT_BIT
)
2297 *value
|= A6XX_RB_STENCILMASK_MASK(mask
);
2298 if (face
& VK_STENCIL_FACE_BACK_BIT
)
2299 *value
|= A6XX_RB_STENCILMASK_BFMASK(mask
);
2303 tu_CmdSetStencilCompareMask(VkCommandBuffer commandBuffer
,
2304 VkStencilFaceFlags faceMask
,
2305 uint32_t compareMask
)
2307 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2308 struct tu_cs cs
= tu_cmd_dynamic_state(cmd
, VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK
, 2);
2310 update_stencil_mask(&cmd
->state
.dynamic_stencil_mask
, faceMask
, compareMask
);
2312 tu_cs_emit_regs(&cs
, A6XX_RB_STENCILMASK(.dword
= cmd
->state
.dynamic_stencil_mask
));
2316 tu_CmdSetStencilWriteMask(VkCommandBuffer commandBuffer
,
2317 VkStencilFaceFlags faceMask
,
2320 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2321 struct tu_cs cs
= tu_cmd_dynamic_state(cmd
, VK_DYNAMIC_STATE_STENCIL_WRITE_MASK
, 2);
2323 update_stencil_mask(&cmd
->state
.dynamic_stencil_wrmask
, faceMask
, writeMask
);
2325 tu_cs_emit_regs(&cs
, A6XX_RB_STENCILWRMASK(.dword
= cmd
->state
.dynamic_stencil_wrmask
));
2329 tu_CmdSetStencilReference(VkCommandBuffer commandBuffer
,
2330 VkStencilFaceFlags faceMask
,
2333 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2334 struct tu_cs cs
= tu_cmd_dynamic_state(cmd
, VK_DYNAMIC_STATE_STENCIL_REFERENCE
, 2);
2336 update_stencil_mask(&cmd
->state
.dynamic_stencil_ref
, faceMask
, reference
);
2338 tu_cs_emit_regs(&cs
, A6XX_RB_STENCILREF(.dword
= cmd
->state
.dynamic_stencil_ref
));
2342 tu_CmdSetSampleLocationsEXT(VkCommandBuffer commandBuffer
,
2343 const VkSampleLocationsInfoEXT
* pSampleLocationsInfo
)
2345 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2346 struct tu_cs cs
= tu_cmd_dynamic_state(cmd
, TU_DYNAMIC_STATE_SAMPLE_LOCATIONS
, 9);
2348 assert(pSampleLocationsInfo
);
2350 tu6_emit_sample_locations(&cs
, pSampleLocationsInfo
);
2354 tu_flush_for_access(struct tu_cache_state
*cache
,
2355 enum tu_cmd_access_mask src_mask
,
2356 enum tu_cmd_access_mask dst_mask
)
2358 enum tu_cmd_flush_bits flush_bits
= 0;
2360 if (src_mask
& TU_ACCESS_SYSMEM_WRITE
) {
2361 cache
->pending_flush_bits
|= TU_CMD_FLAG_ALL_INVALIDATE
;
2364 #define SRC_FLUSH(domain, flush, invalidate) \
2365 if (src_mask & TU_ACCESS_##domain##_WRITE) { \
2366 cache->pending_flush_bits |= TU_CMD_FLAG_##flush | \
2367 (TU_CMD_FLAG_ALL_INVALIDATE & ~TU_CMD_FLAG_##invalidate); \
2370 SRC_FLUSH(UCHE
, CACHE_FLUSH
, CACHE_INVALIDATE
)
2371 SRC_FLUSH(CCU_COLOR
, CCU_FLUSH_COLOR
, CCU_INVALIDATE_COLOR
)
2372 SRC_FLUSH(CCU_DEPTH
, CCU_FLUSH_DEPTH
, CCU_INVALIDATE_DEPTH
)
2376 #define SRC_INCOHERENT_FLUSH(domain, flush, invalidate) \
2377 if (src_mask & TU_ACCESS_##domain##_INCOHERENT_WRITE) { \
2378 flush_bits |= TU_CMD_FLAG_##flush; \
2379 cache->pending_flush_bits |= \
2380 (TU_CMD_FLAG_ALL_INVALIDATE & ~TU_CMD_FLAG_##invalidate); \
2383 SRC_INCOHERENT_FLUSH(CCU_COLOR
, CCU_FLUSH_COLOR
, CCU_INVALIDATE_COLOR
)
2384 SRC_INCOHERENT_FLUSH(CCU_DEPTH
, CCU_FLUSH_DEPTH
, CCU_INVALIDATE_DEPTH
)
2386 #undef SRC_INCOHERENT_FLUSH
2388 if (dst_mask
& (TU_ACCESS_SYSMEM_READ
| TU_ACCESS_SYSMEM_WRITE
)) {
2389 flush_bits
|= cache
->pending_flush_bits
& TU_CMD_FLAG_ALL_FLUSH
;
2392 #define DST_FLUSH(domain, flush, invalidate) \
2393 if (dst_mask & (TU_ACCESS_##domain##_READ | \
2394 TU_ACCESS_##domain##_WRITE)) { \
2395 flush_bits |= cache->pending_flush_bits & \
2396 (TU_CMD_FLAG_##invalidate | \
2397 (TU_CMD_FLAG_ALL_FLUSH & ~TU_CMD_FLAG_##flush)); \
2400 DST_FLUSH(UCHE
, CACHE_FLUSH
, CACHE_INVALIDATE
)
2401 DST_FLUSH(CCU_COLOR
, CCU_FLUSH_COLOR
, CCU_INVALIDATE_COLOR
)
2402 DST_FLUSH(CCU_DEPTH
, CCU_FLUSH_DEPTH
, CCU_INVALIDATE_DEPTH
)
2406 #define DST_INCOHERENT_FLUSH(domain, flush, invalidate) \
2407 if (dst_mask & (TU_ACCESS_##domain##_READ | \
2408 TU_ACCESS_##domain##_WRITE)) { \
2409 flush_bits |= TU_CMD_FLAG_##invalidate | \
2410 (cache->pending_flush_bits & \
2411 (TU_CMD_FLAG_ALL_FLUSH & ~TU_CMD_FLAG_##flush)); \
2414 DST_INCOHERENT_FLUSH(CCU_COLOR
, CCU_FLUSH_COLOR
, CCU_INVALIDATE_COLOR
)
2415 DST_INCOHERENT_FLUSH(CCU_DEPTH
, CCU_FLUSH_DEPTH
, CCU_INVALIDATE_DEPTH
)
2417 #undef DST_INCOHERENT_FLUSH
2419 if (dst_mask
& TU_ACCESS_WFI_READ
) {
2420 flush_bits
|= TU_CMD_FLAG_WFI
;
2423 cache
->flush_bits
|= flush_bits
;
2424 cache
->pending_flush_bits
&= ~flush_bits
;
2427 static enum tu_cmd_access_mask
2428 vk2tu_access(VkAccessFlags flags
, bool gmem
)
2430 enum tu_cmd_access_mask mask
= 0;
2432 /* If the GPU writes a buffer that is then read by an indirect draw
2433 * command, we theoretically need a WFI + WAIT_FOR_ME combination to
2434 * wait for the writes to complete. The WAIT_FOR_ME is performed as part
2435 * of the draw by the firmware, so we just need to execute a WFI.
2438 (VK_ACCESS_INDIRECT_COMMAND_READ_BIT
|
2439 VK_ACCESS_MEMORY_READ_BIT
)) {
2440 mask
|= TU_ACCESS_WFI_READ
;
2444 (VK_ACCESS_INDIRECT_COMMAND_READ_BIT
| /* Read performed by CP */
2445 VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_READ_BIT_EXT
| /* Read performed by CP, I think */
2446 VK_ACCESS_CONDITIONAL_RENDERING_READ_BIT_EXT
| /* Read performed by CP */
2447 VK_ACCESS_HOST_READ_BIT
| /* sysmem by definition */
2448 VK_ACCESS_MEMORY_READ_BIT
)) {
2449 mask
|= TU_ACCESS_SYSMEM_READ
;
2453 (VK_ACCESS_HOST_WRITE_BIT
|
2454 VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT
| /* Write performed by CP, I think */
2455 VK_ACCESS_MEMORY_WRITE_BIT
)) {
2456 mask
|= TU_ACCESS_SYSMEM_WRITE
;
2460 (VK_ACCESS_INDEX_READ_BIT
| /* Read performed by PC, I think */
2461 VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT
| /* Read performed by VFD */
2462 VK_ACCESS_UNIFORM_READ_BIT
| /* Read performed by SP */
2463 /* TODO: Is there a no-cache bit for textures so that we can ignore
2466 VK_ACCESS_INPUT_ATTACHMENT_READ_BIT
| /* Read performed by TP */
2467 VK_ACCESS_SHADER_READ_BIT
| /* Read perfomed by SP/TP */
2468 VK_ACCESS_MEMORY_READ_BIT
)) {
2469 mask
|= TU_ACCESS_UCHE_READ
;
2473 (VK_ACCESS_SHADER_WRITE_BIT
| /* Write performed by SP */
2474 VK_ACCESS_TRANSFORM_FEEDBACK_WRITE_BIT_EXT
| /* Write performed by VPC */
2475 VK_ACCESS_MEMORY_WRITE_BIT
)) {
2476 mask
|= TU_ACCESS_UCHE_WRITE
;
2479 /* When using GMEM, the CCU is always flushed automatically to GMEM, and
2480 * then GMEM is flushed to sysmem. Furthermore, we already had to flush any
2481 * previous writes in sysmem mode when transitioning to GMEM. Therefore we
2482 * can ignore CCU and pretend that color attachments and transfers use
2487 (VK_ACCESS_COLOR_ATTACHMENT_READ_BIT
|
2488 VK_ACCESS_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT
|
2489 VK_ACCESS_MEMORY_READ_BIT
)) {
2491 mask
|= TU_ACCESS_SYSMEM_READ
;
2493 mask
|= TU_ACCESS_CCU_COLOR_INCOHERENT_READ
;
2497 (VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT
|
2498 VK_ACCESS_MEMORY_READ_BIT
)) {
2500 mask
|= TU_ACCESS_SYSMEM_READ
;
2502 mask
|= TU_ACCESS_CCU_DEPTH_INCOHERENT_READ
;
2506 (VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT
|
2507 VK_ACCESS_MEMORY_WRITE_BIT
)) {
2509 mask
|= TU_ACCESS_SYSMEM_WRITE
;
2511 mask
|= TU_ACCESS_CCU_COLOR_INCOHERENT_WRITE
;
2516 (VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT
|
2517 VK_ACCESS_MEMORY_WRITE_BIT
)) {
2519 mask
|= TU_ACCESS_SYSMEM_WRITE
;
2521 mask
|= TU_ACCESS_CCU_DEPTH_INCOHERENT_WRITE
;
2525 /* When the dst access is a transfer read/write, it seems we sometimes need
2526 * to insert a WFI after any flushes, to guarantee that the flushes finish
2527 * before the 2D engine starts. However the opposite (i.e. a WFI after
2528 * CP_BLIT and before any subsequent flush) does not seem to be needed, and
2529 * the blob doesn't emit such a WFI.
2533 (VK_ACCESS_TRANSFER_WRITE_BIT
|
2534 VK_ACCESS_MEMORY_WRITE_BIT
)) {
2536 mask
|= TU_ACCESS_SYSMEM_WRITE
;
2538 mask
|= TU_ACCESS_CCU_COLOR_WRITE
;
2540 mask
|= TU_ACCESS_WFI_READ
;
2544 (VK_ACCESS_TRANSFER_READ_BIT
| /* Access performed by TP */
2545 VK_ACCESS_MEMORY_READ_BIT
)) {
2546 mask
|= TU_ACCESS_UCHE_READ
| TU_ACCESS_WFI_READ
;
2554 tu_CmdExecuteCommands(VkCommandBuffer commandBuffer
,
2555 uint32_t commandBufferCount
,
2556 const VkCommandBuffer
*pCmdBuffers
)
2558 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2561 assert(commandBufferCount
> 0);
2563 /* Emit any pending flushes. */
2564 if (cmd
->state
.pass
) {
2565 tu_flush_all_pending(&cmd
->state
.renderpass_cache
);
2566 tu_emit_cache_flush_renderpass(cmd
, &cmd
->draw_cs
);
2568 tu_flush_all_pending(&cmd
->state
.cache
);
2569 tu_emit_cache_flush(cmd
, &cmd
->cs
);
2572 for (uint32_t i
= 0; i
< commandBufferCount
; i
++) {
2573 TU_FROM_HANDLE(tu_cmd_buffer
, secondary
, pCmdBuffers
[i
]);
2575 result
= tu_bo_list_merge(&cmd
->bo_list
, &secondary
->bo_list
);
2576 if (result
!= VK_SUCCESS
) {
2577 cmd
->record_result
= result
;
2581 if (secondary
->usage_flags
&
2582 VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT
) {
2583 assert(tu_cs_is_empty(&secondary
->cs
));
2585 result
= tu_cs_add_entries(&cmd
->draw_cs
, &secondary
->draw_cs
);
2586 if (result
!= VK_SUCCESS
) {
2587 cmd
->record_result
= result
;
2591 result
= tu_cs_add_entries(&cmd
->draw_epilogue_cs
,
2592 &secondary
->draw_epilogue_cs
);
2593 if (result
!= VK_SUCCESS
) {
2594 cmd
->record_result
= result
;
2598 assert(tu_cs_is_empty(&secondary
->draw_cs
));
2599 assert(tu_cs_is_empty(&secondary
->draw_epilogue_cs
));
2601 for (uint32_t j
= 0; j
< secondary
->cs
.bo_count
; j
++) {
2602 tu_bo_list_add(&cmd
->bo_list
, secondary
->cs
.bos
[j
],
2603 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_DUMP
);
2606 tu_cs_add_entries(&cmd
->cs
, &secondary
->cs
);
2609 cmd
->state
.dirty
= ~0u; /* TODO: set dirty only what needs to be */
2611 /* After executing secondary command buffers, there may have been arbitrary
2612 * flushes executed, so when we encounter a pipeline barrier with a
2613 * srcMask, we have to assume that we need to invalidate. Therefore we need
2614 * to re-initialize the cache with all pending invalidate bits set.
2616 if (cmd
->state
.pass
) {
2617 tu_cache_init(&cmd
->state
.renderpass_cache
);
2619 tu_cache_init(&cmd
->state
.cache
);
2624 tu_CreateCommandPool(VkDevice _device
,
2625 const VkCommandPoolCreateInfo
*pCreateInfo
,
2626 const VkAllocationCallbacks
*pAllocator
,
2627 VkCommandPool
*pCmdPool
)
2629 TU_FROM_HANDLE(tu_device
, device
, _device
);
2630 struct tu_cmd_pool
*pool
;
2632 pool
= vk_alloc2(&device
->alloc
, pAllocator
, sizeof(*pool
), 8,
2633 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
2635 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
2638 pool
->alloc
= *pAllocator
;
2640 pool
->alloc
= device
->alloc
;
2642 list_inithead(&pool
->cmd_buffers
);
2643 list_inithead(&pool
->free_cmd_buffers
);
2645 pool
->queue_family_index
= pCreateInfo
->queueFamilyIndex
;
2647 *pCmdPool
= tu_cmd_pool_to_handle(pool
);
2653 tu_DestroyCommandPool(VkDevice _device
,
2654 VkCommandPool commandPool
,
2655 const VkAllocationCallbacks
*pAllocator
)
2657 TU_FROM_HANDLE(tu_device
, device
, _device
);
2658 TU_FROM_HANDLE(tu_cmd_pool
, pool
, commandPool
);
2663 list_for_each_entry_safe(struct tu_cmd_buffer
, cmd_buffer
,
2664 &pool
->cmd_buffers
, pool_link
)
2666 tu_cmd_buffer_destroy(cmd_buffer
);
2669 list_for_each_entry_safe(struct tu_cmd_buffer
, cmd_buffer
,
2670 &pool
->free_cmd_buffers
, pool_link
)
2672 tu_cmd_buffer_destroy(cmd_buffer
);
2675 vk_free2(&device
->alloc
, pAllocator
, pool
);
2679 tu_ResetCommandPool(VkDevice device
,
2680 VkCommandPool commandPool
,
2681 VkCommandPoolResetFlags flags
)
2683 TU_FROM_HANDLE(tu_cmd_pool
, pool
, commandPool
);
2686 list_for_each_entry(struct tu_cmd_buffer
, cmd_buffer
, &pool
->cmd_buffers
,
2689 result
= tu_reset_cmd_buffer(cmd_buffer
);
2690 if (result
!= VK_SUCCESS
)
2698 tu_TrimCommandPool(VkDevice device
,
2699 VkCommandPool commandPool
,
2700 VkCommandPoolTrimFlags flags
)
2702 TU_FROM_HANDLE(tu_cmd_pool
, pool
, commandPool
);
2707 list_for_each_entry_safe(struct tu_cmd_buffer
, cmd_buffer
,
2708 &pool
->free_cmd_buffers
, pool_link
)
2710 tu_cmd_buffer_destroy(cmd_buffer
);
2715 tu_subpass_barrier(struct tu_cmd_buffer
*cmd_buffer
,
2716 const struct tu_subpass_barrier
*barrier
,
2719 /* Note: we don't know until the end of the subpass whether we'll use
2720 * sysmem, so assume sysmem here to be safe.
2722 struct tu_cache_state
*cache
=
2723 external
? &cmd_buffer
->state
.cache
: &cmd_buffer
->state
.renderpass_cache
;
2724 enum tu_cmd_access_mask src_flags
=
2725 vk2tu_access(barrier
->src_access_mask
, false);
2726 enum tu_cmd_access_mask dst_flags
=
2727 vk2tu_access(barrier
->dst_access_mask
, false);
2729 if (barrier
->incoherent_ccu_color
)
2730 src_flags
|= TU_ACCESS_CCU_COLOR_INCOHERENT_WRITE
;
2731 if (barrier
->incoherent_ccu_depth
)
2732 src_flags
|= TU_ACCESS_CCU_DEPTH_INCOHERENT_WRITE
;
2734 tu_flush_for_access(cache
, src_flags
, dst_flags
);
2738 tu_CmdBeginRenderPass(VkCommandBuffer commandBuffer
,
2739 const VkRenderPassBeginInfo
*pRenderPassBegin
,
2740 VkSubpassContents contents
)
2742 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2743 TU_FROM_HANDLE(tu_render_pass
, pass
, pRenderPassBegin
->renderPass
);
2744 TU_FROM_HANDLE(tu_framebuffer
, fb
, pRenderPassBegin
->framebuffer
);
2746 cmd
->state
.pass
= pass
;
2747 cmd
->state
.subpass
= pass
->subpasses
;
2748 cmd
->state
.framebuffer
= fb
;
2750 tu_cmd_update_tiling_config(cmd
, &pRenderPassBegin
->renderArea
);
2751 tu_cmd_prepare_tile_store_ib(cmd
);
2753 /* Note: because this is external, any flushes will happen before draw_cs
2754 * gets called. However deferred flushes could have to happen later as part
2757 tu_subpass_barrier(cmd
, &pass
->subpasses
[0].start_barrier
, true);
2758 cmd
->state
.renderpass_cache
.pending_flush_bits
=
2759 cmd
->state
.cache
.pending_flush_bits
;
2760 cmd
->state
.renderpass_cache
.flush_bits
= 0;
2762 tu_emit_renderpass_begin(cmd
, pRenderPassBegin
);
2764 tu6_emit_zs(cmd
, cmd
->state
.subpass
, &cmd
->draw_cs
);
2765 tu6_emit_mrt(cmd
, cmd
->state
.subpass
, &cmd
->draw_cs
);
2766 tu6_emit_msaa(&cmd
->draw_cs
, cmd
->state
.subpass
->samples
);
2767 tu6_emit_render_cntl(cmd
, cmd
->state
.subpass
, &cmd
->draw_cs
, false);
2769 tu_set_input_attachments(cmd
, cmd
->state
.subpass
);
2771 /* note: use_hw_binning only checks tiling config */
2772 if (use_hw_binning(cmd
))
2773 cmd
->use_vsc_data
= true;
2775 for (uint32_t i
= 0; i
< fb
->attachment_count
; ++i
) {
2776 const struct tu_image_view
*iview
= fb
->attachments
[i
].attachment
;
2777 tu_bo_list_add(&cmd
->bo_list
, iview
->image
->bo
,
2778 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_WRITE
);
2781 cmd
->state
.dirty
|= TU_CMD_DIRTY_DRAW_STATE
;
2785 tu_CmdBeginRenderPass2(VkCommandBuffer commandBuffer
,
2786 const VkRenderPassBeginInfo
*pRenderPassBeginInfo
,
2787 const VkSubpassBeginInfoKHR
*pSubpassBeginInfo
)
2789 tu_CmdBeginRenderPass(commandBuffer
, pRenderPassBeginInfo
,
2790 pSubpassBeginInfo
->contents
);
2794 tu_CmdNextSubpass(VkCommandBuffer commandBuffer
, VkSubpassContents contents
)
2796 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2797 const struct tu_render_pass
*pass
= cmd
->state
.pass
;
2798 struct tu_cs
*cs
= &cmd
->draw_cs
;
2800 const struct tu_subpass
*subpass
= cmd
->state
.subpass
++;
2802 tu_cond_exec_start(cs
, CP_COND_EXEC_0_RENDER_MODE_GMEM
);
2804 if (subpass
->resolve_attachments
) {
2805 tu6_emit_blit_scissor(cmd
, cs
, true);
2807 for (unsigned i
= 0; i
< subpass
->color_count
; i
++) {
2808 uint32_t a
= subpass
->resolve_attachments
[i
].attachment
;
2809 if (a
== VK_ATTACHMENT_UNUSED
)
2812 tu_store_gmem_attachment(cmd
, cs
, a
,
2813 subpass
->color_attachments
[i
].attachment
);
2815 if (pass
->attachments
[a
].gmem_offset
< 0)
2819 * check if the resolved attachment is needed by later subpasses,
2820 * if it is, should be doing a GMEM->GMEM resolve instead of GMEM->MEM->GMEM..
2822 tu_finishme("missing GMEM->GMEM resolve path\n");
2823 tu_load_gmem_attachment(cmd
, cs
, a
, true);
2827 tu_cond_exec_end(cs
);
2829 tu_cond_exec_start(cs
, CP_COND_EXEC_0_RENDER_MODE_SYSMEM
);
2831 tu6_emit_sysmem_resolves(cmd
, cs
, subpass
);
2833 tu_cond_exec_end(cs
);
2835 /* Handle dependencies for the next subpass */
2836 tu_subpass_barrier(cmd
, &cmd
->state
.subpass
->start_barrier
, false);
2838 /* emit mrt/zs/msaa/ubwc state for the subpass that is starting */
2839 tu6_emit_zs(cmd
, cmd
->state
.subpass
, cs
);
2840 tu6_emit_mrt(cmd
, cmd
->state
.subpass
, cs
);
2841 tu6_emit_msaa(cs
, cmd
->state
.subpass
->samples
);
2842 tu6_emit_render_cntl(cmd
, cmd
->state
.subpass
, cs
, false);
2844 tu_set_input_attachments(cmd
, cmd
->state
.subpass
);
2848 tu_CmdNextSubpass2(VkCommandBuffer commandBuffer
,
2849 const VkSubpassBeginInfoKHR
*pSubpassBeginInfo
,
2850 const VkSubpassEndInfoKHR
*pSubpassEndInfo
)
2852 tu_CmdNextSubpass(commandBuffer
, pSubpassBeginInfo
->contents
);
2858 * Number of vertices.
2863 * Index of the first vertex.
2865 int32_t vertex_offset
;
2868 * First instance id.
2870 uint32_t first_instance
;
2873 * Number of instances.
2875 uint32_t instance_count
;
2878 * First index (indexed draws only).
2880 uint32_t first_index
;
2883 * Whether it's an indexed draw.
2888 * Indirect draw parameters resource.
2890 struct tu_buffer
*indirect
;
2891 uint64_t indirect_offset
;
2895 * Draw count parameters resource.
2897 struct tu_buffer
*count_buffer
;
2898 uint64_t count_buffer_offset
;
2901 * Stream output parameters resource.
2903 struct tu_buffer
*streamout_buffer
;
2904 uint64_t streamout_buffer_offset
;
2908 tu6_emit_user_consts(struct tu_cs
*cs
, const struct tu_pipeline
*pipeline
,
2909 struct tu_descriptor_state
*descriptors_state
,
2910 gl_shader_stage type
,
2911 uint32_t *push_constants
)
2913 const struct tu_program_descriptor_linkage
*link
=
2914 &pipeline
->program
.link
[type
];
2915 const struct ir3_ubo_analysis_state
*state
= &link
->const_state
.ubo_state
;
2917 if (link
->push_consts
.count
> 0) {
2918 unsigned num_units
= link
->push_consts
.count
;
2919 unsigned offset
= link
->push_consts
.lo
;
2920 tu_cs_emit_pkt7(cs
, tu6_stage2opcode(type
), 3 + num_units
* 4);
2921 tu_cs_emit(cs
, CP_LOAD_STATE6_0_DST_OFF(offset
) |
2922 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS
) |
2923 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT
) |
2924 CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type
)) |
2925 CP_LOAD_STATE6_0_NUM_UNIT(num_units
));
2928 for (unsigned i
= 0; i
< num_units
* 4; i
++)
2929 tu_cs_emit(cs
, push_constants
[i
+ offset
* 4]);
2932 for (uint32_t i
= 0; i
< state
->num_enabled
; i
++) {
2933 uint32_t size
= state
->range
[i
].end
- state
->range
[i
].start
;
2934 uint32_t offset
= state
->range
[i
].start
;
2936 /* and even if the start of the const buffer is before
2937 * first_immediate, the end may not be:
2939 size
= MIN2(size
, (16 * link
->constlen
) - state
->range
[i
].offset
);
2944 /* things should be aligned to vec4: */
2945 debug_assert((state
->range
[i
].offset
% 16) == 0);
2946 debug_assert((size
% 16) == 0);
2947 debug_assert((offset
% 16) == 0);
2949 /* Dig out the descriptor from the descriptor state and read the VA from
2952 assert(state
->range
[i
].bindless
);
2953 uint32_t *base
= state
->range
[i
].bindless_base
== MAX_SETS
?
2954 descriptors_state
->dynamic_descriptors
:
2955 descriptors_state
->sets
[state
->range
[i
].bindless_base
]->mapped_ptr
;
2956 unsigned block
= state
->range
[i
].block
;
2957 uint32_t *desc
= base
+ block
* A6XX_TEX_CONST_DWORDS
;
2958 uint64_t va
= desc
[0] | ((uint64_t)(desc
[1] & A6XX_UBO_1_BASE_HI__MASK
) << 32);
2961 tu_cs_emit_pkt7(cs
, tu6_stage2opcode(type
), 3);
2962 tu_cs_emit(cs
, CP_LOAD_STATE6_0_DST_OFF(state
->range
[i
].offset
/ 16) |
2963 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS
) |
2964 CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT
) |
2965 CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type
)) |
2966 CP_LOAD_STATE6_0_NUM_UNIT(size
/ 16));
2967 tu_cs_emit_qw(cs
, va
+ offset
);
2971 static struct tu_cs_entry
2972 tu6_emit_consts(struct tu_cmd_buffer
*cmd
,
2973 const struct tu_pipeline
*pipeline
,
2974 struct tu_descriptor_state
*descriptors_state
,
2975 gl_shader_stage type
)
2978 tu_cs_begin_sub_stream(&cmd
->sub_cs
, 512, &cs
); /* TODO: maximum size? */
2980 tu6_emit_user_consts(&cs
, pipeline
, descriptors_state
, type
, cmd
->push_constants
);
2982 return tu_cs_end_sub_stream(&cmd
->sub_cs
, &cs
);
2986 tu6_emit_vs_params(struct tu_cmd_buffer
*cmd
,
2987 const struct tu_draw_info
*draw
,
2988 struct tu_cs_entry
*entry
)
2990 /* TODO: fill out more than just base instance */
2991 const struct tu_program_descriptor_linkage
*link
=
2992 &cmd
->state
.pipeline
->program
.link
[MESA_SHADER_VERTEX
];
2993 const struct ir3_const_state
*const_state
= &link
->const_state
;
2996 if (const_state
->offsets
.driver_param
>= link
->constlen
) {
2997 *entry
= (struct tu_cs_entry
) {};
3001 VkResult result
= tu_cs_begin_sub_stream(&cmd
->sub_cs
, 8, &cs
);
3002 if (result
!= VK_SUCCESS
)
3005 tu_cs_emit_pkt7(&cs
, CP_LOAD_STATE6_GEOM
, 3 + 4);
3006 tu_cs_emit(&cs
, CP_LOAD_STATE6_0_DST_OFF(const_state
->offsets
.driver_param
) |
3007 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS
) |
3008 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT
) |
3009 CP_LOAD_STATE6_0_STATE_BLOCK(SB6_VS_SHADER
) |
3010 CP_LOAD_STATE6_0_NUM_UNIT(1));
3014 STATIC_ASSERT(IR3_DP_INSTID_BASE
== 2);
3018 tu_cs_emit(&cs
, draw
->first_instance
);
3021 *entry
= tu_cs_end_sub_stream(&cmd
->sub_cs
, &cs
);
3025 static struct tu_cs_entry
3026 tu6_emit_vertex_buffers(struct tu_cmd_buffer
*cmd
,
3027 const struct tu_pipeline
*pipeline
)
3030 tu_cs_begin_sub_stream(&cmd
->sub_cs
, 4 * MAX_VBS
, &cs
);
3033 for_each_bit(binding
, pipeline
->vi
.bindings_used
) {
3034 const struct tu_buffer
*buf
= cmd
->state
.vb
.buffers
[binding
];
3035 const VkDeviceSize offset
= buf
->bo_offset
+
3036 cmd
->state
.vb
.offsets
[binding
];
3038 tu_cs_emit_regs(&cs
,
3039 A6XX_VFD_FETCH_BASE(binding
, .bo
= buf
->bo
, .bo_offset
= offset
),
3040 A6XX_VFD_FETCH_SIZE(binding
, buf
->size
- offset
));
3044 cmd
->vertex_bindings_set
= pipeline
->vi
.bindings_used
;
3046 return tu_cs_end_sub_stream(&cmd
->sub_cs
, &cs
);
3050 tu6_emit_streamout(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
3052 struct tu_streamout_state
*tf
= &cmd
->state
.pipeline
->streamout
;
3054 for (unsigned i
= 0; i
< IR3_MAX_SO_BUFFERS
; i
++) {
3055 struct tu_buffer
*buf
= cmd
->state
.streamout_buf
.buffers
[i
];
3060 offset
= cmd
->state
.streamout_buf
.offsets
[i
];
3062 tu_cs_emit_regs(cs
, A6XX_VPC_SO_BUFFER_BASE(i
, .bo
= buf
->bo
,
3063 .bo_offset
= buf
->bo_offset
));
3064 tu_cs_emit_regs(cs
, A6XX_VPC_SO_BUFFER_SIZE(i
, buf
->size
));
3066 if (cmd
->state
.streamout_reset
& (1 << i
)) {
3067 tu_cs_emit_regs(cs
, A6XX_VPC_SO_BUFFER_OFFSET(i
, offset
));
3068 cmd
->state
.streamout_reset
&= ~(1 << i
);
3070 tu_cs_emit_pkt7(cs
, CP_MEM_TO_REG
, 3);
3071 tu_cs_emit(cs
, CP_MEM_TO_REG_0_REG(REG_A6XX_VPC_SO_BUFFER_OFFSET(i
)) |
3072 CP_MEM_TO_REG_0_SHIFT_BY_2
| CP_MEM_TO_REG_0_UNK31
|
3073 CP_MEM_TO_REG_0_CNT(0));
3074 tu_cs_emit_qw(cs
, cmd
->scratch_bo
.iova
+
3075 ctrl_offset(flush_base
[i
].offset
));
3078 tu_cs_emit_regs(cs
, A6XX_VPC_SO_FLUSH_BASE(i
, .bo
= &cmd
->scratch_bo
,
3080 ctrl_offset(flush_base
[i
])));
3083 if (cmd
->state
.streamout_enabled
) {
3084 tu_cs_emit_pkt7(cs
, CP_CONTEXT_REG_BUNCH
, 12 + (2 * tf
->prog_count
));
3085 tu_cs_emit(cs
, REG_A6XX_VPC_SO_BUF_CNTL
);
3086 tu_cs_emit(cs
, tf
->vpc_so_buf_cntl
);
3087 tu_cs_emit(cs
, REG_A6XX_VPC_SO_NCOMP(0));
3088 tu_cs_emit(cs
, tf
->ncomp
[0]);
3089 tu_cs_emit(cs
, REG_A6XX_VPC_SO_NCOMP(1));
3090 tu_cs_emit(cs
, tf
->ncomp
[1]);
3091 tu_cs_emit(cs
, REG_A6XX_VPC_SO_NCOMP(2));
3092 tu_cs_emit(cs
, tf
->ncomp
[2]);
3093 tu_cs_emit(cs
, REG_A6XX_VPC_SO_NCOMP(3));
3094 tu_cs_emit(cs
, tf
->ncomp
[3]);
3095 tu_cs_emit(cs
, REG_A6XX_VPC_SO_CNTL
);
3096 tu_cs_emit(cs
, A6XX_VPC_SO_CNTL_ENABLE
);
3097 for (unsigned i
= 0; i
< tf
->prog_count
; i
++) {
3098 tu_cs_emit(cs
, REG_A6XX_VPC_SO_PROG
);
3099 tu_cs_emit(cs
, tf
->prog
[i
]);
3102 tu_cs_emit_pkt7(cs
, CP_CONTEXT_REG_BUNCH
, 4);
3103 tu_cs_emit(cs
, REG_A6XX_VPC_SO_CNTL
);
3105 tu_cs_emit(cs
, REG_A6XX_VPC_SO_BUF_CNTL
);
3111 tu6_bind_draw_states(struct tu_cmd_buffer
*cmd
,
3113 const struct tu_draw_info
*draw
)
3115 const struct tu_pipeline
*pipeline
= cmd
->state
.pipeline
;
3118 struct tu_descriptor_state
*descriptors_state
=
3119 &cmd
->descriptors
[VK_PIPELINE_BIND_POINT_GRAPHICS
];
3124 A6XX_PC_PRIMITIVE_CNTL_0(.primitive_restart
=
3125 pipeline
->ia
.primitive_restart
&& draw
->indexed
));
3127 if (cmd
->state
.dirty
& TU_CMD_DIRTY_SHADER_CONSTS
) {
3128 cmd
->state
.shader_const_ib
[MESA_SHADER_VERTEX
] =
3129 tu6_emit_consts(cmd
, pipeline
, descriptors_state
, MESA_SHADER_VERTEX
);
3130 cmd
->state
.shader_const_ib
[MESA_SHADER_GEOMETRY
] =
3131 tu6_emit_consts(cmd
, pipeline
, descriptors_state
, MESA_SHADER_GEOMETRY
);
3132 cmd
->state
.shader_const_ib
[MESA_SHADER_FRAGMENT
] =
3133 tu6_emit_consts(cmd
, pipeline
, descriptors_state
, MESA_SHADER_FRAGMENT
);
3136 if (cmd
->state
.dirty
& TU_CMD_DIRTY_STREAMOUT_BUFFERS
)
3137 tu6_emit_streamout(cmd
, cs
);
3139 if (cmd
->state
.dirty
& TU_CMD_DIRTY_DESCRIPTOR_SETS
) {
3140 /* We need to reload the descriptors every time the descriptor sets
3141 * change. However, the commands we send only depend on the pipeline
3142 * because the whole point is to cache descriptors which are used by the
3143 * pipeline. There's a problem here, in that the firmware has an
3144 * "optimization" which skips executing groups that are set to the same
3145 * value as the last draw. This means that if the descriptor sets change
3146 * but not the pipeline, we'd try to re-execute the same buffer which
3147 * the firmware would ignore and we wouldn't pre-load the new
3148 * descriptors. The blob seems to re-emit the LOAD_STATE group whenever
3149 * the descriptor sets change, which we emulate here by copying the
3150 * pre-prepared buffer.
3152 const struct tu_cs_entry
*load_entry
= &pipeline
->load_state
.state_ib
;
3153 if (load_entry
->size
> 0) {
3154 struct tu_cs load_cs
;
3155 result
= tu_cs_begin_sub_stream(&cmd
->sub_cs
, load_entry
->size
, &load_cs
);
3156 if (result
!= VK_SUCCESS
)
3158 tu_cs_emit_array(&load_cs
,
3159 (uint32_t *)((char *)load_entry
->bo
->map
+ load_entry
->offset
),
3160 load_entry
->size
/ 4);
3161 cmd
->state
.desc_sets_load_ib
= tu_cs_end_sub_stream(&cmd
->sub_cs
, &load_cs
);
3163 cmd
->state
.desc_sets_load_ib
.size
= 0;
3167 if (cmd
->state
.dirty
& TU_CMD_DIRTY_VERTEX_BUFFERS
)
3168 cmd
->state
.vertex_buffers_ib
= tu6_emit_vertex_buffers(cmd
, pipeline
);
3170 struct tu_cs_entry vs_params
;
3171 result
= tu6_emit_vs_params(cmd
, draw
, &vs_params
);
3172 if (result
!= VK_SUCCESS
)
3175 /* for the first draw in a renderpass, re-emit all the draw states
3177 * and if a draw-state disabling path (CmdClearAttachments 3D fallback) was
3178 * used, then draw states must be re-emitted. note however this only happens
3179 * in the sysmem path, so this can be skipped this for the gmem path (TODO)
3181 * the two input attachment states are excluded because secondary command
3182 * buffer doesn't have a state ib to restore it, and not re-emitting them
3183 * is OK since CmdClearAttachments won't disable/overwrite them
3185 if (cmd
->state
.dirty
& TU_CMD_DIRTY_DRAW_STATE
) {
3186 tu_cs_emit_pkt7(cs
, CP_SET_DRAW_STATE
, 3 * (TU_DRAW_STATE_COUNT
- 2));
3188 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_PROGRAM
, pipeline
->program
.state_ib
);
3189 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_PROGRAM_BINNING
, pipeline
->program
.binning_state_ib
);
3190 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_VI
, pipeline
->vi
.state_ib
);
3191 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_VI_BINNING
, pipeline
->vi
.binning_state_ib
);
3192 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_RAST
, pipeline
->rast
.state_ib
);
3193 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_DS
, pipeline
->ds
.state_ib
);
3194 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_BLEND
, pipeline
->blend
.state_ib
);
3195 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_VS_CONST
, cmd
->state
.shader_const_ib
[MESA_SHADER_VERTEX
]);
3196 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_GS_CONST
, cmd
->state
.shader_const_ib
[MESA_SHADER_GEOMETRY
]);
3197 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_FS_CONST
, cmd
->state
.shader_const_ib
[MESA_SHADER_FRAGMENT
]);
3198 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_DESC_SETS
, cmd
->state
.desc_sets_ib
);
3199 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_DESC_SETS_LOAD
, cmd
->state
.desc_sets_load_ib
);
3200 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_VB
, cmd
->state
.vertex_buffers_ib
);
3201 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_VS_PARAMS
, vs_params
);
3203 for (uint32_t i
= 0; i
< ARRAY_SIZE(cmd
->state
.dynamic_state
); i
++) {
3204 tu_cs_emit_draw_state(cs
, TU_DRAW_STATE_DYNAMIC
+ i
,
3205 ((pipeline
->dynamic_state_mask
& BIT(i
)) ?
3206 cmd
->state
.dynamic_state
[i
] :
3207 pipeline
->dynamic_state
[i
]));
3211 /* emit draw states that were just updated
3212 * note we eventually don't want to have to emit anything here
3214 uint32_t draw_state_count
=
3215 ((cmd
->state
.dirty
& TU_CMD_DIRTY_SHADER_CONSTS
) ? 3 : 0) +
3216 ((cmd
->state
.dirty
& TU_CMD_DIRTY_DESCRIPTOR_SETS
) ? 1 : 0) +
3217 ((cmd
->state
.dirty
& TU_CMD_DIRTY_VERTEX_BUFFERS
) ? 1 : 0) +
3220 tu_cs_emit_pkt7(cs
, CP_SET_DRAW_STATE
, 3 * draw_state_count
);
3222 if (cmd
->state
.dirty
& TU_CMD_DIRTY_SHADER_CONSTS
) {
3223 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_VS_CONST
, cmd
->state
.shader_const_ib
[MESA_SHADER_VERTEX
]);
3224 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_GS_CONST
, cmd
->state
.shader_const_ib
[MESA_SHADER_GEOMETRY
]);
3225 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_FS_CONST
, cmd
->state
.shader_const_ib
[MESA_SHADER_FRAGMENT
]);
3227 if (cmd
->state
.dirty
& TU_CMD_DIRTY_DESCRIPTOR_SETS
)
3228 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_DESC_SETS_LOAD
, cmd
->state
.desc_sets_load_ib
);
3229 if (cmd
->state
.dirty
& TU_CMD_DIRTY_VERTEX_BUFFERS
)
3230 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_VB
, cmd
->state
.vertex_buffers_ib
);
3231 tu_cs_emit_sds_ib(cs
, TU_DRAW_STATE_VS_PARAMS
, vs_params
);
3234 tu_cs_sanity_check(cs
);
3237 if (cmd
->state
.dirty
& TU_CMD_DIRTY_STREAMOUT_BUFFERS
) {
3238 for (unsigned i
= 0; i
< IR3_MAX_SO_BUFFERS
; i
++) {
3239 const struct tu_buffer
*buf
= cmd
->state
.streamout_buf
.buffers
[i
];
3241 tu_bo_list_add(&cmd
->bo_list
, buf
->bo
,
3242 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_WRITE
);
3247 /* There are too many graphics dirty bits to list here, so just list the
3248 * bits to preserve instead. The only things not emitted here are
3249 * compute-related state.
3251 cmd
->state
.dirty
&= (TU_CMD_DIRTY_COMPUTE_DESCRIPTOR_SETS
| TU_CMD_DIRTY_COMPUTE_PIPELINE
);
3256 tu6_emit_draw_indirect(struct tu_cmd_buffer
*cmd
,
3258 const struct tu_draw_info
*draw
)
3260 const enum pc_di_primtype primtype
= cmd
->state
.pipeline
->ia
.primtype
;
3261 bool has_gs
= cmd
->state
.pipeline
->active_stages
&
3262 VK_SHADER_STAGE_GEOMETRY_BIT
;
3265 A6XX_VFD_INDEX_OFFSET(draw
->vertex_offset
),
3266 A6XX_VFD_INSTANCE_START_OFFSET(draw
->first_instance
));
3268 if (draw
->indexed
) {
3269 const enum a4xx_index_size index_size
=
3270 tu6_index_size(cmd
->state
.index_type
);
3271 const uint32_t index_bytes
=
3272 (cmd
->state
.index_type
== VK_INDEX_TYPE_UINT32
) ? 4 : 2;
3273 const struct tu_buffer
*index_buf
= cmd
->state
.index_buffer
;
3274 unsigned max_indicies
=
3275 (index_buf
->size
- cmd
->state
.index_offset
) / index_bytes
;
3277 const uint32_t cp_draw_indx
=
3278 CP_DRAW_INDX_OFFSET_0_PRIM_TYPE(primtype
) |
3279 CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(DI_SRC_SEL_DMA
) |
3280 CP_DRAW_INDX_OFFSET_0_INDEX_SIZE(index_size
) |
3281 CP_DRAW_INDX_OFFSET_0_VIS_CULL(USE_VISIBILITY
) |
3282 COND(has_gs
, CP_DRAW_INDX_OFFSET_0_GS_ENABLE
) | 0x2000;
3284 tu_cs_emit_pkt7(cs
, CP_DRAW_INDX_INDIRECT
, 6);
3285 tu_cs_emit(cs
, cp_draw_indx
);
3286 tu_cs_emit_qw(cs
, index_buf
->bo
->iova
+ cmd
->state
.index_offset
);
3287 tu_cs_emit(cs
, A5XX_CP_DRAW_INDX_INDIRECT_3_MAX_INDICES(max_indicies
));
3288 tu_cs_emit_qw(cs
, draw
->indirect
->bo
->iova
+ draw
->indirect_offset
);
3290 const uint32_t cp_draw_indx
=
3291 CP_DRAW_INDX_OFFSET_0_PRIM_TYPE(primtype
) |
3292 CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(DI_SRC_SEL_AUTO_INDEX
) |
3293 CP_DRAW_INDX_OFFSET_0_VIS_CULL(USE_VISIBILITY
) |
3294 COND(has_gs
, CP_DRAW_INDX_OFFSET_0_GS_ENABLE
) | 0x2000;
3296 tu_cs_emit_pkt7(cs
, CP_DRAW_INDIRECT
, 3);
3297 tu_cs_emit(cs
, cp_draw_indx
);
3298 tu_cs_emit_qw(cs
, draw
->indirect
->bo
->iova
+ draw
->indirect_offset
);
3301 tu_bo_list_add(&cmd
->bo_list
, draw
->indirect
->bo
, MSM_SUBMIT_BO_READ
);
3305 tu6_emit_draw_direct(struct tu_cmd_buffer
*cmd
,
3307 const struct tu_draw_info
*draw
)
3310 const enum pc_di_primtype primtype
= cmd
->state
.pipeline
->ia
.primtype
;
3311 bool has_gs
= cmd
->state
.pipeline
->active_stages
&
3312 VK_SHADER_STAGE_GEOMETRY_BIT
;
3315 A6XX_VFD_INDEX_OFFSET(draw
->vertex_offset
),
3316 A6XX_VFD_INSTANCE_START_OFFSET(draw
->first_instance
));
3318 /* TODO hw binning */
3319 if (draw
->indexed
) {
3320 const enum a4xx_index_size index_size
=
3321 tu6_index_size(cmd
->state
.index_type
);
3322 const uint32_t index_bytes
=
3323 (cmd
->state
.index_type
== VK_INDEX_TYPE_UINT32
) ? 4 : 2;
3324 const struct tu_buffer
*buf
= cmd
->state
.index_buffer
;
3325 const VkDeviceSize offset
= buf
->bo_offset
+ cmd
->state
.index_offset
+
3326 index_bytes
* draw
->first_index
;
3327 const uint32_t size
= index_bytes
* draw
->count
;
3329 const uint32_t cp_draw_indx
=
3330 CP_DRAW_INDX_OFFSET_0_PRIM_TYPE(primtype
) |
3331 CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(DI_SRC_SEL_DMA
) |
3332 CP_DRAW_INDX_OFFSET_0_INDEX_SIZE(index_size
) |
3333 CP_DRAW_INDX_OFFSET_0_VIS_CULL(USE_VISIBILITY
) |
3334 COND(has_gs
, CP_DRAW_INDX_OFFSET_0_GS_ENABLE
) | 0x2000;
3336 tu_cs_emit_pkt7(cs
, CP_DRAW_INDX_OFFSET
, 7);
3337 tu_cs_emit(cs
, cp_draw_indx
);
3338 tu_cs_emit(cs
, draw
->instance_count
);
3339 tu_cs_emit(cs
, draw
->count
);
3340 tu_cs_emit(cs
, 0x0); /* XXX */
3341 tu_cs_emit_qw(cs
, buf
->bo
->iova
+ offset
);
3342 tu_cs_emit(cs
, size
);
3344 const uint32_t cp_draw_indx
=
3345 CP_DRAW_INDX_OFFSET_0_PRIM_TYPE(primtype
) |
3346 CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(DI_SRC_SEL_AUTO_INDEX
) |
3347 CP_DRAW_INDX_OFFSET_0_VIS_CULL(USE_VISIBILITY
) |
3348 COND(has_gs
, CP_DRAW_INDX_OFFSET_0_GS_ENABLE
) | 0x2000;
3350 tu_cs_emit_pkt7(cs
, CP_DRAW_INDX_OFFSET
, 3);
3351 tu_cs_emit(cs
, cp_draw_indx
);
3352 tu_cs_emit(cs
, draw
->instance_count
);
3353 tu_cs_emit(cs
, draw
->count
);
3358 tu_draw(struct tu_cmd_buffer
*cmd
, const struct tu_draw_info
*draw
)
3360 struct tu_cs
*cs
= &cmd
->draw_cs
;
3363 tu_emit_cache_flush_renderpass(cmd
, cs
);
3365 result
= tu6_bind_draw_states(cmd
, cs
, draw
);
3366 if (result
!= VK_SUCCESS
) {
3367 cmd
->record_result
= result
;
3372 tu6_emit_draw_indirect(cmd
, cs
, draw
);
3374 tu6_emit_draw_direct(cmd
, cs
, draw
);
3376 if (cmd
->state
.streamout_enabled
) {
3377 for (unsigned i
= 0; i
< IR3_MAX_SO_BUFFERS
; i
++) {
3378 if (cmd
->state
.streamout_enabled
& (1 << i
))
3379 tu6_emit_event_write(cmd
, cs
, FLUSH_SO_0
+ i
);
3383 tu_cs_sanity_check(cs
);
3387 tu_CmdDraw(VkCommandBuffer commandBuffer
,
3388 uint32_t vertexCount
,
3389 uint32_t instanceCount
,
3390 uint32_t firstVertex
,
3391 uint32_t firstInstance
)
3393 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
3394 struct tu_draw_info info
= {};
3396 info
.count
= vertexCount
;
3397 info
.instance_count
= instanceCount
;
3398 info
.first_instance
= firstInstance
;
3399 info
.vertex_offset
= firstVertex
;
3401 tu_draw(cmd_buffer
, &info
);
3405 tu_CmdDrawIndexed(VkCommandBuffer commandBuffer
,
3406 uint32_t indexCount
,
3407 uint32_t instanceCount
,
3408 uint32_t firstIndex
,
3409 int32_t vertexOffset
,
3410 uint32_t firstInstance
)
3412 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
3413 struct tu_draw_info info
= {};
3415 info
.indexed
= true;
3416 info
.count
= indexCount
;
3417 info
.instance_count
= instanceCount
;
3418 info
.first_index
= firstIndex
;
3419 info
.vertex_offset
= vertexOffset
;
3420 info
.first_instance
= firstInstance
;
3422 tu_draw(cmd_buffer
, &info
);
3426 tu_CmdDrawIndirect(VkCommandBuffer commandBuffer
,
3428 VkDeviceSize offset
,
3432 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
3433 TU_FROM_HANDLE(tu_buffer
, buffer
, _buffer
);
3434 struct tu_draw_info info
= {};
3436 info
.count
= drawCount
;
3437 info
.indirect
= buffer
;
3438 info
.indirect_offset
= offset
;
3439 info
.stride
= stride
;
3441 tu_draw(cmd_buffer
, &info
);
3445 tu_CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer
,
3447 VkDeviceSize offset
,
3451 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
3452 TU_FROM_HANDLE(tu_buffer
, buffer
, _buffer
);
3453 struct tu_draw_info info
= {};
3455 info
.indexed
= true;
3456 info
.count
= drawCount
;
3457 info
.indirect
= buffer
;
3458 info
.indirect_offset
= offset
;
3459 info
.stride
= stride
;
3461 tu_draw(cmd_buffer
, &info
);
3464 void tu_CmdDrawIndirectByteCountEXT(VkCommandBuffer commandBuffer
,
3465 uint32_t instanceCount
,
3466 uint32_t firstInstance
,
3467 VkBuffer _counterBuffer
,
3468 VkDeviceSize counterBufferOffset
,
3469 uint32_t counterOffset
,
3470 uint32_t vertexStride
)
3472 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
3473 TU_FROM_HANDLE(tu_buffer
, buffer
, _counterBuffer
);
3475 struct tu_draw_info info
= {};
3477 info
.instance_count
= instanceCount
;
3478 info
.first_instance
= firstInstance
;
3479 info
.streamout_buffer
= buffer
;
3480 info
.streamout_buffer_offset
= counterBufferOffset
;
3481 info
.stride
= vertexStride
;
3483 tu_draw(cmd_buffer
, &info
);
3486 struct tu_dispatch_info
3489 * Determine the layout of the grid (in block units) to be used.
3494 * A starting offset for the grid. If unaligned is set, the offset
3495 * must still be aligned.
3497 uint32_t offsets
[3];
3499 * Whether it's an unaligned compute dispatch.
3504 * Indirect compute parameters resource.
3506 struct tu_buffer
*indirect
;
3507 uint64_t indirect_offset
;
3511 tu_emit_compute_driver_params(struct tu_cs
*cs
, struct tu_pipeline
*pipeline
,
3512 const struct tu_dispatch_info
*info
)
3514 gl_shader_stage type
= MESA_SHADER_COMPUTE
;
3515 const struct tu_program_descriptor_linkage
*link
=
3516 &pipeline
->program
.link
[type
];
3517 const struct ir3_const_state
*const_state
= &link
->const_state
;
3518 uint32_t offset
= const_state
->offsets
.driver_param
;
3520 if (link
->constlen
<= offset
)
3523 if (!info
->indirect
) {
3524 uint32_t driver_params
[IR3_DP_CS_COUNT
] = {
3525 [IR3_DP_NUM_WORK_GROUPS_X
] = info
->blocks
[0],
3526 [IR3_DP_NUM_WORK_GROUPS_Y
] = info
->blocks
[1],
3527 [IR3_DP_NUM_WORK_GROUPS_Z
] = info
->blocks
[2],
3528 [IR3_DP_LOCAL_GROUP_SIZE_X
] = pipeline
->compute
.local_size
[0],
3529 [IR3_DP_LOCAL_GROUP_SIZE_Y
] = pipeline
->compute
.local_size
[1],
3530 [IR3_DP_LOCAL_GROUP_SIZE_Z
] = pipeline
->compute
.local_size
[2],
3533 uint32_t num_consts
= MIN2(const_state
->num_driver_params
,
3534 (link
->constlen
- offset
) * 4);
3535 /* push constants */
3536 tu_cs_emit_pkt7(cs
, tu6_stage2opcode(type
), 3 + num_consts
);
3537 tu_cs_emit(cs
, CP_LOAD_STATE6_0_DST_OFF(offset
) |
3538 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS
) |
3539 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT
) |
3540 CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type
)) |
3541 CP_LOAD_STATE6_0_NUM_UNIT(num_consts
/ 4));
3545 for (i
= 0; i
< num_consts
; i
++)
3546 tu_cs_emit(cs
, driver_params
[i
]);
3548 tu_finishme("Indirect driver params");
3553 tu_dispatch(struct tu_cmd_buffer
*cmd
,
3554 const struct tu_dispatch_info
*info
)
3556 struct tu_cs
*cs
= &cmd
->cs
;
3557 struct tu_pipeline
*pipeline
= cmd
->state
.compute_pipeline
;
3558 struct tu_descriptor_state
*descriptors_state
=
3559 &cmd
->descriptors
[VK_PIPELINE_BIND_POINT_COMPUTE
];
3561 /* TODO: We could probably flush less if we add a compute_flush_bits
3564 tu_emit_cache_flush(cmd
, cs
);
3566 if (cmd
->state
.dirty
& TU_CMD_DIRTY_COMPUTE_PIPELINE
)
3567 tu_cs_emit_ib(cs
, &pipeline
->program
.state_ib
);
3569 struct tu_cs_entry ib
;
3571 ib
= tu6_emit_consts(cmd
, pipeline
, descriptors_state
, MESA_SHADER_COMPUTE
);
3573 tu_cs_emit_ib(cs
, &ib
);
3575 tu_emit_compute_driver_params(cs
, pipeline
, info
);
3577 if ((cmd
->state
.dirty
& TU_CMD_DIRTY_COMPUTE_DESCRIPTOR_SETS
) &&
3578 pipeline
->load_state
.state_ib
.size
> 0) {
3579 tu_cs_emit_ib(cs
, &pipeline
->load_state
.state_ib
);
3583 ~(TU_CMD_DIRTY_COMPUTE_DESCRIPTOR_SETS
| TU_CMD_DIRTY_COMPUTE_PIPELINE
);
3585 tu_cs_emit_pkt7(cs
, CP_SET_MARKER
, 1);
3586 tu_cs_emit(cs
, A6XX_CP_SET_MARKER_0_MODE(RM6_COMPUTE
));
3588 const uint32_t *local_size
= pipeline
->compute
.local_size
;
3589 const uint32_t *num_groups
= info
->blocks
;
3591 A6XX_HLSQ_CS_NDRANGE_0(.kerneldim
= 3,
3592 .localsizex
= local_size
[0] - 1,
3593 .localsizey
= local_size
[1] - 1,
3594 .localsizez
= local_size
[2] - 1),
3595 A6XX_HLSQ_CS_NDRANGE_1(.globalsize_x
= local_size
[0] * num_groups
[0]),
3596 A6XX_HLSQ_CS_NDRANGE_2(.globaloff_x
= 0),
3597 A6XX_HLSQ_CS_NDRANGE_3(.globalsize_y
= local_size
[1] * num_groups
[1]),
3598 A6XX_HLSQ_CS_NDRANGE_4(.globaloff_y
= 0),
3599 A6XX_HLSQ_CS_NDRANGE_5(.globalsize_z
= local_size
[2] * num_groups
[2]),
3600 A6XX_HLSQ_CS_NDRANGE_6(.globaloff_z
= 0));
3603 A6XX_HLSQ_CS_KERNEL_GROUP_X(1),
3604 A6XX_HLSQ_CS_KERNEL_GROUP_Y(1),
3605 A6XX_HLSQ_CS_KERNEL_GROUP_Z(1));
3607 if (info
->indirect
) {
3608 uint64_t iova
= tu_buffer_iova(info
->indirect
) + info
->indirect_offset
;
3610 tu_bo_list_add(&cmd
->bo_list
, info
->indirect
->bo
,
3611 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_WRITE
);
3613 tu_cs_emit_pkt7(cs
, CP_EXEC_CS_INDIRECT
, 4);
3614 tu_cs_emit(cs
, 0x00000000);
3615 tu_cs_emit_qw(cs
, iova
);
3617 A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEX(local_size
[0] - 1) |
3618 A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEY(local_size
[1] - 1) |
3619 A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEZ(local_size
[2] - 1));
3621 tu_cs_emit_pkt7(cs
, CP_EXEC_CS
, 4);
3622 tu_cs_emit(cs
, 0x00000000);
3623 tu_cs_emit(cs
, CP_EXEC_CS_1_NGROUPS_X(info
->blocks
[0]));
3624 tu_cs_emit(cs
, CP_EXEC_CS_2_NGROUPS_Y(info
->blocks
[1]));
3625 tu_cs_emit(cs
, CP_EXEC_CS_3_NGROUPS_Z(info
->blocks
[2]));
3632 tu_CmdDispatchBase(VkCommandBuffer commandBuffer
,
3640 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
3641 struct tu_dispatch_info info
= {};
3647 info
.offsets
[0] = base_x
;
3648 info
.offsets
[1] = base_y
;
3649 info
.offsets
[2] = base_z
;
3650 tu_dispatch(cmd_buffer
, &info
);
3654 tu_CmdDispatch(VkCommandBuffer commandBuffer
,
3659 tu_CmdDispatchBase(commandBuffer
, 0, 0, 0, x
, y
, z
);
3663 tu_CmdDispatchIndirect(VkCommandBuffer commandBuffer
,
3665 VkDeviceSize offset
)
3667 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
3668 TU_FROM_HANDLE(tu_buffer
, buffer
, _buffer
);
3669 struct tu_dispatch_info info
= {};
3671 info
.indirect
= buffer
;
3672 info
.indirect_offset
= offset
;
3674 tu_dispatch(cmd_buffer
, &info
);
3678 tu_CmdEndRenderPass(VkCommandBuffer commandBuffer
)
3680 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
3682 tu_cs_end(&cmd_buffer
->draw_cs
);
3683 tu_cs_end(&cmd_buffer
->draw_epilogue_cs
);
3685 if (use_sysmem_rendering(cmd_buffer
))
3686 tu_cmd_render_sysmem(cmd_buffer
);
3688 tu_cmd_render_tiles(cmd_buffer
);
3690 /* discard draw_cs and draw_epilogue_cs entries now that the tiles are
3692 tu_cs_discard_entries(&cmd_buffer
->draw_cs
);
3693 tu_cs_begin(&cmd_buffer
->draw_cs
);
3694 tu_cs_discard_entries(&cmd_buffer
->draw_epilogue_cs
);
3695 tu_cs_begin(&cmd_buffer
->draw_epilogue_cs
);
3697 cmd_buffer
->state
.cache
.pending_flush_bits
|=
3698 cmd_buffer
->state
.renderpass_cache
.pending_flush_bits
;
3699 tu_subpass_barrier(cmd_buffer
, &cmd_buffer
->state
.pass
->end_barrier
, true);
3701 cmd_buffer
->state
.pass
= NULL
;
3702 cmd_buffer
->state
.subpass
= NULL
;
3703 cmd_buffer
->state
.framebuffer
= NULL
;
3707 tu_CmdEndRenderPass2(VkCommandBuffer commandBuffer
,
3708 const VkSubpassEndInfoKHR
*pSubpassEndInfo
)
3710 tu_CmdEndRenderPass(commandBuffer
);
3713 struct tu_barrier_info
3715 uint32_t eventCount
;
3716 const VkEvent
*pEvents
;
3717 VkPipelineStageFlags srcStageMask
;
3721 tu_barrier(struct tu_cmd_buffer
*cmd
,
3722 uint32_t memoryBarrierCount
,
3723 const VkMemoryBarrier
*pMemoryBarriers
,
3724 uint32_t bufferMemoryBarrierCount
,
3725 const VkBufferMemoryBarrier
*pBufferMemoryBarriers
,
3726 uint32_t imageMemoryBarrierCount
,
3727 const VkImageMemoryBarrier
*pImageMemoryBarriers
,
3728 const struct tu_barrier_info
*info
)
3730 struct tu_cs
*cs
= cmd
->state
.pass
? &cmd
->draw_cs
: &cmd
->cs
;
3731 VkAccessFlags srcAccessMask
= 0;
3732 VkAccessFlags dstAccessMask
= 0;
3734 for (uint32_t i
= 0; i
< memoryBarrierCount
; i
++) {
3735 srcAccessMask
|= pMemoryBarriers
[i
].srcAccessMask
;
3736 dstAccessMask
|= pMemoryBarriers
[i
].dstAccessMask
;
3739 for (uint32_t i
= 0; i
< bufferMemoryBarrierCount
; i
++) {
3740 srcAccessMask
|= pBufferMemoryBarriers
[i
].srcAccessMask
;
3741 dstAccessMask
|= pBufferMemoryBarriers
[i
].dstAccessMask
;
3744 enum tu_cmd_access_mask src_flags
= 0;
3745 enum tu_cmd_access_mask dst_flags
= 0;
3747 for (uint32_t i
= 0; i
< imageMemoryBarrierCount
; i
++) {
3748 TU_FROM_HANDLE(tu_image
, image
, pImageMemoryBarriers
[i
].image
);
3749 VkImageLayout old_layout
= pImageMemoryBarriers
[i
].oldLayout
;
3750 /* For non-linear images, PREINITIALIZED is the same as UNDEFINED */
3751 if (old_layout
== VK_IMAGE_LAYOUT_UNDEFINED
||
3752 (image
->tiling
!= VK_IMAGE_TILING_LINEAR
&&
3753 old_layout
== VK_IMAGE_LAYOUT_PREINITIALIZED
)) {
3754 /* The underlying memory for this image may have been used earlier
3755 * within the same queue submission for a different image, which
3756 * means that there may be old, stale cache entries which are in the
3757 * "wrong" location, which could cause problems later after writing
3758 * to the image. We don't want these entries being flushed later and
3759 * overwriting the actual image, so we need to flush the CCU.
3761 src_flags
|= TU_ACCESS_CCU_COLOR_INCOHERENT_WRITE
;
3763 srcAccessMask
|= pImageMemoryBarriers
[i
].srcAccessMask
;
3764 dstAccessMask
|= pImageMemoryBarriers
[i
].dstAccessMask
;
3767 /* Inside a renderpass, we don't know yet whether we'll be using sysmem
3768 * so we have to use the sysmem flushes.
3770 bool gmem
= cmd
->state
.ccu_state
== TU_CMD_CCU_GMEM
&&
3772 src_flags
|= vk2tu_access(srcAccessMask
, gmem
);
3773 dst_flags
|= vk2tu_access(dstAccessMask
, gmem
);
3775 struct tu_cache_state
*cache
=
3776 cmd
->state
.pass
? &cmd
->state
.renderpass_cache
: &cmd
->state
.cache
;
3777 tu_flush_for_access(cache
, src_flags
, dst_flags
);
3779 for (uint32_t i
= 0; i
< info
->eventCount
; i
++) {
3780 TU_FROM_HANDLE(tu_event
, event
, info
->pEvents
[i
]);
3782 tu_bo_list_add(&cmd
->bo_list
, &event
->bo
, MSM_SUBMIT_BO_READ
);
3784 tu_cs_emit_pkt7(cs
, CP_WAIT_REG_MEM
, 6);
3785 tu_cs_emit(cs
, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ
) |
3786 CP_WAIT_REG_MEM_0_POLL_MEMORY
);
3787 tu_cs_emit_qw(cs
, event
->bo
.iova
); /* POLL_ADDR_LO/HI */
3788 tu_cs_emit(cs
, CP_WAIT_REG_MEM_3_REF(1));
3789 tu_cs_emit(cs
, CP_WAIT_REG_MEM_4_MASK(~0u));
3790 tu_cs_emit(cs
, CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(20));
3795 tu_CmdPipelineBarrier(VkCommandBuffer commandBuffer
,
3796 VkPipelineStageFlags srcStageMask
,
3797 VkPipelineStageFlags dstStageMask
,
3798 VkDependencyFlags dependencyFlags
,
3799 uint32_t memoryBarrierCount
,
3800 const VkMemoryBarrier
*pMemoryBarriers
,
3801 uint32_t bufferMemoryBarrierCount
,
3802 const VkBufferMemoryBarrier
*pBufferMemoryBarriers
,
3803 uint32_t imageMemoryBarrierCount
,
3804 const VkImageMemoryBarrier
*pImageMemoryBarriers
)
3806 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
3807 struct tu_barrier_info info
;
3809 info
.eventCount
= 0;
3810 info
.pEvents
= NULL
;
3811 info
.srcStageMask
= srcStageMask
;
3813 tu_barrier(cmd_buffer
, memoryBarrierCount
, pMemoryBarriers
,
3814 bufferMemoryBarrierCount
, pBufferMemoryBarriers
,
3815 imageMemoryBarrierCount
, pImageMemoryBarriers
, &info
);
3819 write_event(struct tu_cmd_buffer
*cmd
, struct tu_event
*event
,
3820 VkPipelineStageFlags stageMask
, unsigned value
)
3822 struct tu_cs
*cs
= &cmd
->cs
;
3824 /* vkCmdSetEvent/vkCmdResetEvent cannot be called inside a render pass */
3825 assert(!cmd
->state
.pass
);
3827 tu_emit_cache_flush(cmd
, cs
);
3829 tu_bo_list_add(&cmd
->bo_list
, &event
->bo
, MSM_SUBMIT_BO_WRITE
);
3831 /* Flags that only require a top-of-pipe event. DrawIndirect parameters are
3832 * read by the CP, so the draw indirect stage counts as top-of-pipe too.
3834 VkPipelineStageFlags top_of_pipe_flags
=
3835 VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT
|
3836 VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT
;
3838 if (!(stageMask
& ~top_of_pipe_flags
)) {
3839 tu_cs_emit_pkt7(cs
, CP_MEM_WRITE
, 3);
3840 tu_cs_emit_qw(cs
, event
->bo
.iova
); /* ADDR_LO/HI */
3841 tu_cs_emit(cs
, value
);
3843 /* Use a RB_DONE_TS event to wait for everything to complete. */
3844 tu_cs_emit_pkt7(cs
, CP_EVENT_WRITE
, 4);
3845 tu_cs_emit(cs
, CP_EVENT_WRITE_0_EVENT(RB_DONE_TS
));
3846 tu_cs_emit_qw(cs
, event
->bo
.iova
);
3847 tu_cs_emit(cs
, value
);
3852 tu_CmdSetEvent(VkCommandBuffer commandBuffer
,
3854 VkPipelineStageFlags stageMask
)
3856 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
3857 TU_FROM_HANDLE(tu_event
, event
, _event
);
3859 write_event(cmd
, event
, stageMask
, 1);
3863 tu_CmdResetEvent(VkCommandBuffer commandBuffer
,
3865 VkPipelineStageFlags stageMask
)
3867 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
3868 TU_FROM_HANDLE(tu_event
, event
, _event
);
3870 write_event(cmd
, event
, stageMask
, 0);
3874 tu_CmdWaitEvents(VkCommandBuffer commandBuffer
,
3875 uint32_t eventCount
,
3876 const VkEvent
*pEvents
,
3877 VkPipelineStageFlags srcStageMask
,
3878 VkPipelineStageFlags dstStageMask
,
3879 uint32_t memoryBarrierCount
,
3880 const VkMemoryBarrier
*pMemoryBarriers
,
3881 uint32_t bufferMemoryBarrierCount
,
3882 const VkBufferMemoryBarrier
*pBufferMemoryBarriers
,
3883 uint32_t imageMemoryBarrierCount
,
3884 const VkImageMemoryBarrier
*pImageMemoryBarriers
)
3886 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
3887 struct tu_barrier_info info
;
3889 info
.eventCount
= eventCount
;
3890 info
.pEvents
= pEvents
;
3891 info
.srcStageMask
= 0;
3893 tu_barrier(cmd
, memoryBarrierCount
, pMemoryBarriers
,
3894 bufferMemoryBarrierCount
, pBufferMemoryBarriers
,
3895 imageMemoryBarrierCount
, pImageMemoryBarriers
, &info
);
3899 tu_CmdSetDeviceMask(VkCommandBuffer commandBuffer
, uint32_t deviceMask
)