2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
28 #include "tu_private.h"
30 #include "registers/adreno_pm4.xml.h"
31 #include "registers/adreno_common.xml.h"
33 #include "vk_format.h"
37 #define OVERFLOW_FLAG_REG REG_A6XX_CP_SCRATCH_REG(0)
40 tu_bo_list_init(struct tu_bo_list
*list
)
42 list
->count
= list
->capacity
= 0;
43 list
->bo_infos
= NULL
;
47 tu_bo_list_destroy(struct tu_bo_list
*list
)
53 tu_bo_list_reset(struct tu_bo_list
*list
)
59 * \a flags consists of MSM_SUBMIT_BO_FLAGS.
62 tu_bo_list_add_info(struct tu_bo_list
*list
,
63 const struct drm_msm_gem_submit_bo
*bo_info
)
65 assert(bo_info
->handle
!= 0);
67 for (uint32_t i
= 0; i
< list
->count
; ++i
) {
68 if (list
->bo_infos
[i
].handle
== bo_info
->handle
) {
69 assert(list
->bo_infos
[i
].presumed
== bo_info
->presumed
);
70 list
->bo_infos
[i
].flags
|= bo_info
->flags
;
75 /* grow list->bo_infos if needed */
76 if (list
->count
== list
->capacity
) {
77 uint32_t new_capacity
= MAX2(2 * list
->count
, 16);
78 struct drm_msm_gem_submit_bo
*new_bo_infos
= realloc(
79 list
->bo_infos
, new_capacity
* sizeof(struct drm_msm_gem_submit_bo
));
81 return TU_BO_LIST_FAILED
;
82 list
->bo_infos
= new_bo_infos
;
83 list
->capacity
= new_capacity
;
86 list
->bo_infos
[list
->count
] = *bo_info
;
91 tu_bo_list_add(struct tu_bo_list
*list
,
92 const struct tu_bo
*bo
,
95 return tu_bo_list_add_info(list
, &(struct drm_msm_gem_submit_bo
) {
97 .handle
= bo
->gem_handle
,
103 tu_bo_list_merge(struct tu_bo_list
*list
, const struct tu_bo_list
*other
)
105 for (uint32_t i
= 0; i
< other
->count
; i
++) {
106 if (tu_bo_list_add_info(list
, other
->bo_infos
+ i
) == TU_BO_LIST_FAILED
)
107 return VK_ERROR_OUT_OF_HOST_MEMORY
;
114 tu_tiling_config_update_tile_layout(struct tu_tiling_config
*tiling
,
115 const struct tu_device
*dev
,
118 const uint32_t tile_align_w
= 64; /* note: 32 when no input attachments */
119 const uint32_t tile_align_h
= 16;
120 const uint32_t max_tile_width
= 1024;
122 /* note: don't offset the tiling config by render_area.offset,
123 * because binning pass can't deal with it
124 * this means we might end up with more tiles than necessary,
125 * but load/store/etc are still scissored to the render_area
127 tiling
->tile0
.offset
= (VkOffset2D
) {};
129 const uint32_t ra_width
=
130 tiling
->render_area
.extent
.width
+
131 (tiling
->render_area
.offset
.x
- tiling
->tile0
.offset
.x
);
132 const uint32_t ra_height
=
133 tiling
->render_area
.extent
.height
+
134 (tiling
->render_area
.offset
.y
- tiling
->tile0
.offset
.y
);
136 /* start from 1 tile */
137 tiling
->tile_count
= (VkExtent2D
) {
141 tiling
->tile0
.extent
= (VkExtent2D
) {
142 .width
= align(ra_width
, tile_align_w
),
143 .height
= align(ra_height
, tile_align_h
),
146 if (unlikely(dev
->physical_device
->instance
->debug_flags
& TU_DEBUG_FORCEBIN
)) {
147 /* start with 2x2 tiles */
148 tiling
->tile_count
.width
= 2;
149 tiling
->tile_count
.height
= 2;
150 tiling
->tile0
.extent
.width
= align(DIV_ROUND_UP(ra_width
, 2), tile_align_w
);
151 tiling
->tile0
.extent
.height
= align(DIV_ROUND_UP(ra_height
, 2), tile_align_h
);
154 /* do not exceed max tile width */
155 while (tiling
->tile0
.extent
.width
> max_tile_width
) {
156 tiling
->tile_count
.width
++;
157 tiling
->tile0
.extent
.width
=
158 align(DIV_ROUND_UP(ra_width
, tiling
->tile_count
.width
), tile_align_w
);
161 /* will force to sysmem, don't bother trying to have a valid tile config
162 * TODO: just skip all GMEM stuff when sysmem is forced?
167 /* do not exceed gmem size */
168 while (tiling
->tile0
.extent
.width
* tiling
->tile0
.extent
.height
> pixels
) {
169 if (tiling
->tile0
.extent
.width
> MAX2(tile_align_w
, tiling
->tile0
.extent
.height
)) {
170 tiling
->tile_count
.width
++;
171 tiling
->tile0
.extent
.width
=
172 align(DIV_ROUND_UP(ra_width
, tiling
->tile_count
.width
), tile_align_w
);
174 /* if this assert fails then layout is impossible.. */
175 assert(tiling
->tile0
.extent
.height
> tile_align_h
);
176 tiling
->tile_count
.height
++;
177 tiling
->tile0
.extent
.height
=
178 align(DIV_ROUND_UP(ra_height
, tiling
->tile_count
.height
), tile_align_h
);
184 tu_tiling_config_update_pipe_layout(struct tu_tiling_config
*tiling
,
185 const struct tu_device
*dev
)
187 const uint32_t max_pipe_count
= 32; /* A6xx */
189 /* start from 1 tile per pipe */
190 tiling
->pipe0
= (VkExtent2D
) {
194 tiling
->pipe_count
= tiling
->tile_count
;
196 while (tiling
->pipe_count
.width
* tiling
->pipe_count
.height
> max_pipe_count
) {
197 if (tiling
->pipe0
.width
< tiling
->pipe0
.height
) {
198 tiling
->pipe0
.width
+= 1;
199 tiling
->pipe_count
.width
=
200 DIV_ROUND_UP(tiling
->tile_count
.width
, tiling
->pipe0
.width
);
202 tiling
->pipe0
.height
+= 1;
203 tiling
->pipe_count
.height
=
204 DIV_ROUND_UP(tiling
->tile_count
.height
, tiling
->pipe0
.height
);
210 tu_tiling_config_update_pipes(struct tu_tiling_config
*tiling
,
211 const struct tu_device
*dev
)
213 const uint32_t max_pipe_count
= 32; /* A6xx */
214 const uint32_t used_pipe_count
=
215 tiling
->pipe_count
.width
* tiling
->pipe_count
.height
;
216 const VkExtent2D last_pipe
= {
217 .width
= (tiling
->tile_count
.width
- 1) % tiling
->pipe0
.width
+ 1,
218 .height
= (tiling
->tile_count
.height
- 1) % tiling
->pipe0
.height
+ 1,
221 assert(used_pipe_count
<= max_pipe_count
);
222 assert(max_pipe_count
<= ARRAY_SIZE(tiling
->pipe_config
));
224 for (uint32_t y
= 0; y
< tiling
->pipe_count
.height
; y
++) {
225 for (uint32_t x
= 0; x
< tiling
->pipe_count
.width
; x
++) {
226 const uint32_t pipe_x
= tiling
->pipe0
.width
* x
;
227 const uint32_t pipe_y
= tiling
->pipe0
.height
* y
;
228 const uint32_t pipe_w
= (x
== tiling
->pipe_count
.width
- 1)
230 : tiling
->pipe0
.width
;
231 const uint32_t pipe_h
= (y
== tiling
->pipe_count
.height
- 1)
233 : tiling
->pipe0
.height
;
234 const uint32_t n
= tiling
->pipe_count
.width
* y
+ x
;
236 tiling
->pipe_config
[n
] = A6XX_VSC_PIPE_CONFIG_REG_X(pipe_x
) |
237 A6XX_VSC_PIPE_CONFIG_REG_Y(pipe_y
) |
238 A6XX_VSC_PIPE_CONFIG_REG_W(pipe_w
) |
239 A6XX_VSC_PIPE_CONFIG_REG_H(pipe_h
);
240 tiling
->pipe_sizes
[n
] = CP_SET_BIN_DATA5_0_VSC_SIZE(pipe_w
* pipe_h
);
244 memset(tiling
->pipe_config
+ used_pipe_count
, 0,
245 sizeof(uint32_t) * (max_pipe_count
- used_pipe_count
));
249 tu_tiling_config_get_tile(const struct tu_tiling_config
*tiling
,
250 const struct tu_device
*dev
,
253 struct tu_tile
*tile
)
255 /* find the pipe and the slot for tile (tx, ty) */
256 const uint32_t px
= tx
/ tiling
->pipe0
.width
;
257 const uint32_t py
= ty
/ tiling
->pipe0
.height
;
258 const uint32_t sx
= tx
- tiling
->pipe0
.width
* px
;
259 const uint32_t sy
= ty
- tiling
->pipe0
.height
* py
;
260 /* last pipe has different width */
261 const uint32_t pipe_width
=
262 MIN2(tiling
->pipe0
.width
,
263 tiling
->tile_count
.width
- px
* tiling
->pipe0
.width
);
265 assert(tx
< tiling
->tile_count
.width
&& ty
< tiling
->tile_count
.height
);
266 assert(px
< tiling
->pipe_count
.width
&& py
< tiling
->pipe_count
.height
);
267 assert(sx
< tiling
->pipe0
.width
&& sy
< tiling
->pipe0
.height
);
269 /* convert to 1D indices */
270 tile
->pipe
= tiling
->pipe_count
.width
* py
+ px
;
271 tile
->slot
= pipe_width
* sy
+ sx
;
273 /* get the blit area for the tile */
274 tile
->begin
= (VkOffset2D
) {
275 .x
= tiling
->tile0
.offset
.x
+ tiling
->tile0
.extent
.width
* tx
,
276 .y
= tiling
->tile0
.offset
.y
+ tiling
->tile0
.extent
.height
* ty
,
279 (tx
== tiling
->tile_count
.width
- 1)
280 ? tiling
->render_area
.offset
.x
+ tiling
->render_area
.extent
.width
281 : tile
->begin
.x
+ tiling
->tile0
.extent
.width
;
283 (ty
== tiling
->tile_count
.height
- 1)
284 ? tiling
->render_area
.offset
.y
+ tiling
->render_area
.extent
.height
285 : tile
->begin
.y
+ tiling
->tile0
.extent
.height
;
288 enum a3xx_msaa_samples
289 tu_msaa_samples(uint32_t samples
)
301 assert(!"invalid sample count");
306 static enum a4xx_index_size
307 tu6_index_size(VkIndexType type
)
310 case VK_INDEX_TYPE_UINT16
:
311 return INDEX4_SIZE_16_BIT
;
312 case VK_INDEX_TYPE_UINT32
:
313 return INDEX4_SIZE_32_BIT
;
315 unreachable("invalid VkIndexType");
316 return INDEX4_SIZE_8_BIT
;
321 tu6_emit_event_write(struct tu_cmd_buffer
*cmd
,
323 enum vgt_event_type event
,
328 tu_cs_emit_pkt7(cs
, CP_EVENT_WRITE
, need_seqno
? 4 : 1);
329 tu_cs_emit(cs
, CP_EVENT_WRITE_0_EVENT(event
));
331 tu_cs_emit_qw(cs
, cmd
->scratch_bo
.iova
);
332 seqno
= ++cmd
->scratch_seqno
;
333 tu_cs_emit(cs
, seqno
);
340 tu6_emit_cache_flush(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
342 tu6_emit_event_write(cmd
, cs
, 0x31, false);
346 tu6_emit_lrz_flush(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
348 tu6_emit_event_write(cmd
, cs
, LRZ_FLUSH
, false);
352 tu6_emit_wfi(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
354 if (cmd
->wait_for_idle
) {
356 cmd
->wait_for_idle
= false;
361 tu6_emit_zs(struct tu_cmd_buffer
*cmd
,
362 const struct tu_subpass
*subpass
,
365 const struct tu_framebuffer
*fb
= cmd
->state
.framebuffer
;
367 const uint32_t a
= subpass
->depth_stencil_attachment
.attachment
;
368 if (a
== VK_ATTACHMENT_UNUSED
) {
370 A6XX_RB_DEPTH_BUFFER_INFO(.depth_format
= DEPTH6_NONE
),
371 A6XX_RB_DEPTH_BUFFER_PITCH(0),
372 A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH(0),
373 A6XX_RB_DEPTH_BUFFER_BASE(0),
374 A6XX_RB_DEPTH_BUFFER_BASE_GMEM(0));
377 A6XX_GRAS_SU_DEPTH_BUFFER_INFO(.depth_format
= DEPTH6_NONE
));
380 A6XX_GRAS_LRZ_BUFFER_BASE(0),
381 A6XX_GRAS_LRZ_BUFFER_PITCH(0),
382 A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE(0));
384 tu_cs_emit_regs(cs
, A6XX_RB_STENCIL_INFO(0));
389 const struct tu_image_view
*iview
= fb
->attachments
[a
].attachment
;
390 enum a6xx_depth_format fmt
= tu6_pipe2depth(iview
->vk_format
);
393 A6XX_RB_DEPTH_BUFFER_INFO(.depth_format
= fmt
),
394 A6XX_RB_DEPTH_BUFFER_PITCH(tu_image_stride(iview
->image
, iview
->base_mip
)),
395 A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH(iview
->image
->layout
.layer_size
),
396 A6XX_RB_DEPTH_BUFFER_BASE(tu_image_view_base_ref(iview
)),
397 A6XX_RB_DEPTH_BUFFER_BASE_GMEM(cmd
->state
.pass
->attachments
[a
].gmem_offset
));
400 A6XX_GRAS_SU_DEPTH_BUFFER_INFO(.depth_format
= fmt
));
403 A6XX_RB_DEPTH_FLAG_BUFFER_BASE(tu_image_view_ubwc_base_ref(iview
)),
404 A6XX_RB_DEPTH_FLAG_BUFFER_PITCH(tu_image_view_ubwc_pitches(iview
)));
407 A6XX_GRAS_LRZ_BUFFER_BASE(0),
408 A6XX_GRAS_LRZ_BUFFER_PITCH(0),
409 A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE(0));
412 A6XX_RB_STENCIL_INFO(0));
418 tu6_emit_mrt(struct tu_cmd_buffer
*cmd
,
419 const struct tu_subpass
*subpass
,
422 const struct tu_framebuffer
*fb
= cmd
->state
.framebuffer
;
423 unsigned char mrt_comp
[MAX_RTS
] = { 0 };
424 unsigned srgb_cntl
= 0;
426 for (uint32_t i
= 0; i
< subpass
->color_count
; ++i
) {
427 uint32_t a
= subpass
->color_attachments
[i
].attachment
;
428 if (a
== VK_ATTACHMENT_UNUSED
)
431 const struct tu_image_view
*iview
= fb
->attachments
[a
].attachment
;
435 if (vk_format_is_srgb(iview
->vk_format
))
436 srgb_cntl
|= (1 << i
);
438 struct tu_native_format format
=
439 tu6_format_image(iview
->image
, iview
->vk_format
, iview
->base_mip
);
442 A6XX_RB_MRT_BUF_INFO(i
,
443 .color_tile_mode
= format
.tile_mode
,
444 .color_format
= format
.fmt
,
445 .color_swap
= format
.swap
),
446 A6XX_RB_MRT_PITCH(i
, tu_image_stride(iview
->image
, iview
->base_mip
)),
447 A6XX_RB_MRT_ARRAY_PITCH(i
, iview
->image
->layout
.layer_size
),
448 A6XX_RB_MRT_BASE(i
, tu_image_view_base_ref(iview
)),
449 A6XX_RB_MRT_BASE_GMEM(i
, cmd
->state
.pass
->attachments
[a
].gmem_offset
));
452 A6XX_SP_FS_MRT_REG(i
,
453 .color_format
= format
.fmt
,
454 .color_sint
= vk_format_is_sint(iview
->vk_format
),
455 .color_uint
= vk_format_is_uint(iview
->vk_format
)));
458 A6XX_RB_MRT_FLAG_BUFFER_ADDR(i
, tu_image_view_ubwc_base_ref(iview
)),
459 A6XX_RB_MRT_FLAG_BUFFER_PITCH(i
, tu_image_view_ubwc_pitches(iview
)));
463 A6XX_RB_SRGB_CNTL(.dword
= srgb_cntl
));
466 A6XX_SP_SRGB_CNTL(.dword
= srgb_cntl
));
469 A6XX_RB_RENDER_COMPONENTS(
477 .rt7
= mrt_comp
[7]));
480 A6XX_SP_FS_RENDER_COMPONENTS(
488 .rt7
= mrt_comp
[7]));
490 // XXX: We probably can't hardcode LAYER_CNTL_TYPE.
492 A6XX_GRAS_LAYER_CNTL(.layered
= fb
->layers
> 1,
493 .type
= LAYER_2D_ARRAY
));
497 tu6_emit_msaa(struct tu_cs
*cs
, VkSampleCountFlagBits vk_samples
)
499 const enum a3xx_msaa_samples samples
= tu_msaa_samples(vk_samples
);
500 bool msaa_disable
= samples
== MSAA_ONE
;
503 A6XX_SP_TP_RAS_MSAA_CNTL(samples
),
504 A6XX_SP_TP_DEST_MSAA_CNTL(.samples
= samples
,
505 .msaa_disable
= msaa_disable
));
508 A6XX_GRAS_RAS_MSAA_CNTL(samples
),
509 A6XX_GRAS_DEST_MSAA_CNTL(.samples
= samples
,
510 .msaa_disable
= msaa_disable
));
513 A6XX_RB_RAS_MSAA_CNTL(samples
),
514 A6XX_RB_DEST_MSAA_CNTL(.samples
= samples
,
515 .msaa_disable
= msaa_disable
));
518 A6XX_RB_MSAA_CNTL(samples
));
522 tu6_emit_bin_size(struct tu_cs
*cs
,
523 uint32_t bin_w
, uint32_t bin_h
, uint32_t flags
)
526 A6XX_GRAS_BIN_CONTROL(.binw
= bin_w
,
531 A6XX_RB_BIN_CONTROL(.binw
= bin_w
,
535 /* no flag for RB_BIN_CONTROL2... */
537 A6XX_RB_BIN_CONTROL2(.binw
= bin_w
,
542 tu6_emit_render_cntl(struct tu_cmd_buffer
*cmd
,
543 const struct tu_subpass
*subpass
,
547 const struct tu_framebuffer
*fb
= cmd
->state
.framebuffer
;
549 cntl
|= A6XX_RB_RENDER_CNTL_UNK4
;
551 cntl
|= A6XX_RB_RENDER_CNTL_BINNING
;
553 uint32_t mrts_ubwc_enable
= 0;
554 for (uint32_t i
= 0; i
< subpass
->color_count
; ++i
) {
555 uint32_t a
= subpass
->color_attachments
[i
].attachment
;
556 if (a
== VK_ATTACHMENT_UNUSED
)
559 const struct tu_image_view
*iview
= fb
->attachments
[a
].attachment
;
560 if (iview
->image
->layout
.ubwc_layer_size
!= 0)
561 mrts_ubwc_enable
|= 1 << i
;
564 cntl
|= A6XX_RB_RENDER_CNTL_FLAG_MRTS(mrts_ubwc_enable
);
566 const uint32_t a
= subpass
->depth_stencil_attachment
.attachment
;
567 if (a
!= VK_ATTACHMENT_UNUSED
) {
568 const struct tu_image_view
*iview
= fb
->attachments
[a
].attachment
;
569 if (iview
->image
->layout
.ubwc_layer_size
!= 0)
570 cntl
|= A6XX_RB_RENDER_CNTL_FLAG_DEPTH
;
573 /* In the !binning case, we need to set RB_RENDER_CNTL in the draw_cs
574 * in order to set it correctly for the different subpasses. However,
575 * that means the packets we're emitting also happen during binning. So
576 * we need to guard the write on !BINNING at CP execution time.
578 tu_cs_reserve(cs
, 3 + 4);
579 tu_cs_emit_pkt7(cs
, CP_COND_REG_EXEC
, 2);
580 tu_cs_emit(cs
, CP_COND_REG_EXEC_0_MODE(RENDER_MODE
) |
581 CP_COND_REG_EXEC_0_GMEM
| CP_COND_REG_EXEC_0_SYSMEM
);
582 tu_cs_emit(cs
, CP_COND_REG_EXEC_1_DWORDS(4));
585 tu_cs_emit_pkt7(cs
, CP_REG_WRITE
, 3);
586 tu_cs_emit(cs
, CP_REG_WRITE_0_TRACKER(TRACK_RENDER_CNTL
));
587 tu_cs_emit(cs
, REG_A6XX_RB_RENDER_CNTL
);
588 tu_cs_emit(cs
, cntl
);
592 tu6_emit_blit_scissor(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
, bool align
)
594 const VkRect2D
*render_area
= &cmd
->state
.tiling_config
.render_area
;
595 uint32_t x1
= render_area
->offset
.x
;
596 uint32_t y1
= render_area
->offset
.y
;
597 uint32_t x2
= x1
+ render_area
->extent
.width
- 1;
598 uint32_t y2
= y1
+ render_area
->extent
.height
- 1;
601 x1
= x1
& ~(GMEM_ALIGN_W
- 1);
602 y1
= y1
& ~(GMEM_ALIGN_H
- 1);
603 x2
= ALIGN_POT(x2
+ 1, GMEM_ALIGN_W
) - 1;
604 y2
= ALIGN_POT(y2
+ 1, GMEM_ALIGN_H
) - 1;
608 A6XX_RB_BLIT_SCISSOR_TL(.x
= x1
, .y
= y1
),
609 A6XX_RB_BLIT_SCISSOR_BR(.x
= x2
, .y
= y2
));
613 tu6_emit_window_scissor(struct tu_cs
*cs
,
620 A6XX_GRAS_SC_WINDOW_SCISSOR_TL(.x
= x1
, .y
= y1
),
621 A6XX_GRAS_SC_WINDOW_SCISSOR_BR(.x
= x2
, .y
= y2
));
624 A6XX_GRAS_RESOLVE_CNTL_1(.x
= x1
, .y
= y1
),
625 A6XX_GRAS_RESOLVE_CNTL_2(.x
= x2
, .y
= y2
));
629 tu6_emit_window_offset(struct tu_cs
*cs
, uint32_t x1
, uint32_t y1
)
632 A6XX_RB_WINDOW_OFFSET(.x
= x1
, .y
= y1
));
635 A6XX_RB_WINDOW_OFFSET2(.x
= x1
, .y
= y1
));
638 A6XX_SP_WINDOW_OFFSET(.x
= x1
, .y
= y1
));
641 A6XX_SP_TP_WINDOW_OFFSET(.x
= x1
, .y
= y1
));
645 use_hw_binning(struct tu_cmd_buffer
*cmd
)
647 const struct tu_tiling_config
*tiling
= &cmd
->state
.tiling_config
;
649 if (unlikely(cmd
->device
->physical_device
->instance
->debug_flags
& TU_DEBUG_NOBIN
))
652 if (unlikely(cmd
->device
->physical_device
->instance
->debug_flags
& TU_DEBUG_FORCEBIN
))
655 return (tiling
->tile_count
.width
* tiling
->tile_count
.height
) > 2;
659 use_sysmem_rendering(struct tu_cmd_buffer
*cmd
)
661 if (unlikely(cmd
->device
->physical_device
->instance
->debug_flags
& TU_DEBUG_SYSMEM
))
664 /* can't fit attachments into gmem */
665 if (!cmd
->state
.pass
->gmem_pixels
)
668 if (cmd
->state
.framebuffer
->layers
> 1)
671 return cmd
->state
.tiling_config
.force_sysmem
;
675 tu6_emit_tile_select(struct tu_cmd_buffer
*cmd
,
677 const struct tu_tile
*tile
)
679 tu_cs_emit_pkt7(cs
, CP_SET_MARKER
, 1);
680 tu_cs_emit(cs
, A6XX_CP_SET_MARKER_0_MODE(RM6_YIELD
));
682 tu_cs_emit_pkt7(cs
, CP_SET_MARKER
, 1);
683 tu_cs_emit(cs
, A6XX_CP_SET_MARKER_0_MODE(RM6_GMEM
));
685 const uint32_t x1
= tile
->begin
.x
;
686 const uint32_t y1
= tile
->begin
.y
;
687 const uint32_t x2
= tile
->end
.x
- 1;
688 const uint32_t y2
= tile
->end
.y
- 1;
689 tu6_emit_window_scissor(cs
, x1
, y1
, x2
, y2
);
690 tu6_emit_window_offset(cs
, x1
, y1
);
693 A6XX_VPC_SO_OVERRIDE(.so_disable
= false));
695 if (use_hw_binning(cmd
)) {
696 tu_cs_emit_pkt7(cs
, CP_WAIT_FOR_ME
, 0);
698 tu_cs_emit_pkt7(cs
, CP_SET_MODE
, 1);
701 tu_cs_emit_pkt7(cs
, CP_REG_TEST
, 1);
702 tu_cs_emit(cs
, A6XX_CP_REG_TEST_0_REG(OVERFLOW_FLAG_REG
) |
703 A6XX_CP_REG_TEST_0_BIT(0) |
704 A6XX_CP_REG_TEST_0_WAIT_FOR_ME
);
706 tu_cs_reserve(cs
, 3 + 11);
707 tu_cs_emit_pkt7(cs
, CP_COND_REG_EXEC
, 2);
708 tu_cs_emit(cs
, CP_COND_REG_EXEC_0_MODE(PRED_TEST
));
709 tu_cs_emit(cs
, CP_COND_REG_EXEC_1_DWORDS(11));
711 /* if (no overflow) */ {
712 tu_cs_emit_pkt7(cs
, CP_SET_BIN_DATA5
, 7);
713 tu_cs_emit(cs
, cmd
->state
.tiling_config
.pipe_sizes
[tile
->pipe
] |
714 CP_SET_BIN_DATA5_0_VSC_N(tile
->slot
));
715 tu_cs_emit_qw(cs
, cmd
->vsc_data
.iova
+ tile
->pipe
* cmd
->vsc_data_pitch
);
716 tu_cs_emit_qw(cs
, cmd
->vsc_data
.iova
+ (tile
->pipe
* 4) + (32 * cmd
->vsc_data_pitch
));
717 tu_cs_emit_qw(cs
, cmd
->vsc_data2
.iova
+ (tile
->pipe
* cmd
->vsc_data2_pitch
));
719 tu_cs_emit_pkt7(cs
, CP_SET_VISIBILITY_OVERRIDE
, 1);
722 /* use a NOP packet to skip over the 'else' side: */
723 tu_cs_emit_pkt7(cs
, CP_NOP
, 2);
725 tu_cs_emit_pkt7(cs
, CP_SET_VISIBILITY_OVERRIDE
, 1);
729 tu_cs_emit_pkt7(cs
, CP_SET_MODE
, 1);
733 A6XX_RB_UNKNOWN_8804(0));
736 A6XX_SP_TP_UNKNOWN_B304(0));
739 A6XX_GRAS_UNKNOWN_80A4(0));
741 tu_cs_emit_pkt7(cs
, CP_SET_VISIBILITY_OVERRIDE
, 1);
744 tu_cs_emit_pkt7(cs
, CP_SET_MODE
, 1);
750 tu6_emit_sysmem_resolve(struct tu_cmd_buffer
*cmd
,
755 const struct tu_framebuffer
*fb
= cmd
->state
.framebuffer
;
756 struct tu_image_view
*dst
= fb
->attachments
[a
].attachment
;
757 struct tu_image_view
*src
= fb
->attachments
[gmem_a
].attachment
;
759 tu_resolve_sysmem(cmd
, cs
, src
, dst
, fb
->layers
, &cmd
->state
.tiling_config
.render_area
);
763 tu6_emit_tile_store(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
765 const struct tu_render_pass
*pass
= cmd
->state
.pass
;
766 const struct tu_subpass
*subpass
= &pass
->subpasses
[pass
->subpass_count
-1];
768 tu_cs_emit_pkt7(cs
, CP_SET_DRAW_STATE
, 3);
769 tu_cs_emit(cs
, CP_SET_DRAW_STATE__0_COUNT(0) |
770 CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS
|
771 CP_SET_DRAW_STATE__0_GROUP_ID(0));
772 tu_cs_emit(cs
, CP_SET_DRAW_STATE__1_ADDR_LO(0));
773 tu_cs_emit(cs
, CP_SET_DRAW_STATE__2_ADDR_HI(0));
775 tu_cs_emit_pkt7(cs
, CP_SKIP_IB2_ENABLE_GLOBAL
, 1);
778 tu_cs_emit_pkt7(cs
, CP_SET_MARKER
, 1);
779 tu_cs_emit(cs
, A6XX_CP_SET_MARKER_0_MODE(RM6_RESOLVE
));
781 /* blit scissor may have been changed by CmdClearAttachments */
782 tu6_emit_blit_scissor(cmd
, cs
, false);
784 for (uint32_t a
= 0; a
< pass
->attachment_count
; ++a
) {
785 if (pass
->attachments
[a
].gmem_offset
>= 0)
786 tu_store_gmem_attachment(cmd
, cs
, a
, a
);
789 if (subpass
->resolve_attachments
) {
790 for (unsigned i
= 0; i
< subpass
->color_count
; i
++) {
791 uint32_t a
= subpass
->resolve_attachments
[i
].attachment
;
792 if (a
!= VK_ATTACHMENT_UNUSED
)
793 tu_store_gmem_attachment(cmd
, cs
, a
,
794 subpass
->color_attachments
[i
].attachment
);
800 tu6_emit_restart_index(struct tu_cs
*cs
, uint32_t restart_index
)
803 A6XX_PC_RESTART_INDEX(restart_index
));
807 tu6_init_hw(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
809 const struct tu_physical_device
*phys_dev
= cmd
->device
->physical_device
;
811 tu6_emit_cache_flush(cmd
, cs
);
813 tu_cs_emit_write_reg(cs
, REG_A6XX_HLSQ_UPDATE_CNTL
, 0xfffff);
816 A6XX_RB_CCU_CNTL(.offset
= phys_dev
->ccu_offset_bypass
));
817 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_8E04
, 0x00100000);
818 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_AE04
, 0x8);
819 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_AE00
, 0);
820 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_AE0F
, 0x3f);
821 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_B605
, 0x44);
822 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_B600
, 0x100000);
823 tu_cs_emit_write_reg(cs
, REG_A6XX_HLSQ_UNKNOWN_BE00
, 0x80);
824 tu_cs_emit_write_reg(cs
, REG_A6XX_HLSQ_UNKNOWN_BE01
, 0);
826 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9600
, 0);
827 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_UNKNOWN_8600
, 0x880);
828 tu_cs_emit_write_reg(cs
, REG_A6XX_HLSQ_UNKNOWN_BE04
, 0);
829 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_AE03
, 0x00000410);
830 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_IBO_COUNT
, 0);
831 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_B182
, 0);
832 tu_cs_emit_write_reg(cs
, REG_A6XX_HLSQ_UNKNOWN_BB11
, 0);
833 tu_cs_emit_write_reg(cs
, REG_A6XX_UCHE_UNKNOWN_0E12
, 0x3200000);
834 tu_cs_emit_write_reg(cs
, REG_A6XX_UCHE_CLIENT_PF
, 4);
835 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_8E01
, 0x0);
836 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_A982
, 0);
837 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_A9A8
, 0);
838 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_AB00
, 0x5);
839 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_GS_SIV_CNTL
, 0x0000ffff);
841 tu_cs_emit_write_reg(cs
, REG_A6XX_VFD_ADD_OFFSET
, A6XX_VFD_ADD_OFFSET_VERTEX
);
842 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_8811
, 0x00000010);
843 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_MODE_CNTL
, 0x1f);
845 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_SRGB_CNTL
, 0);
847 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_UNKNOWN_8110
, 0);
849 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_RENDER_CONTROL0
, 0x401);
850 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_RENDER_CONTROL1
, 0);
851 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_FS_OUTPUT_CNTL0
, 0);
852 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_8818
, 0);
853 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_8819
, 0);
854 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_881A
, 0);
855 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_881B
, 0);
856 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_881C
, 0);
857 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_881D
, 0);
858 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_881E
, 0);
859 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_88F0
, 0);
861 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9101
, 0xffff00);
862 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9107
, 0);
864 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9236
, 1);
865 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9300
, 0);
867 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_SO_OVERRIDE
,
868 A6XX_VPC_SO_OVERRIDE_SO_DISABLE
);
870 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_UNKNOWN_9801
, 0);
871 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_UNKNOWN_9806
, 0);
872 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_UNKNOWN_9980
, 0);
873 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_UNKNOWN_9990
, 0);
875 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_PRIMITIVE_CNTL_6
, 0);
876 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_UNKNOWN_9B07
, 0);
878 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_A81B
, 0);
880 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_B183
, 0);
882 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_UNKNOWN_8099
, 0);
883 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_UNKNOWN_809B
, 0);
884 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_UNKNOWN_80A0
, 2);
885 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_UNKNOWN_80AF
, 0);
886 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9210
, 0);
887 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9211
, 0);
888 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9602
, 0);
889 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_UNKNOWN_9981
, 0x3);
890 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_UNKNOWN_9E72
, 0);
891 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9108
, 0x3);
892 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_TP_UNKNOWN_B304
, 0);
893 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_TP_UNKNOWN_B309
, 0x000000a2);
894 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_8804
, 0);
895 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_UNKNOWN_80A4
, 0);
896 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_UNKNOWN_80A5
, 0);
897 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_UNKNOWN_80A6
, 0);
898 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_8805
, 0);
899 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_8806
, 0);
900 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_8878
, 0);
901 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_8879
, 0);
902 tu_cs_emit_write_reg(cs
, REG_A6XX_HLSQ_CONTROL_5_REG
, 0xfc);
904 tu_cs_emit_write_reg(cs
, REG_A6XX_VFD_MODE_CNTL
, 0x00000000);
906 tu_cs_emit_write_reg(cs
, REG_A6XX_VFD_UNKNOWN_A008
, 0);
908 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_MODE_CNTL
, 0x0000001f);
910 /* we don't use this yet.. probably best to disable.. */
911 tu_cs_emit_pkt7(cs
, CP_SET_DRAW_STATE
, 3);
912 tu_cs_emit(cs
, CP_SET_DRAW_STATE__0_COUNT(0) |
913 CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS
|
914 CP_SET_DRAW_STATE__0_GROUP_ID(0));
915 tu_cs_emit(cs
, CP_SET_DRAW_STATE__1_ADDR_LO(0));
916 tu_cs_emit(cs
, CP_SET_DRAW_STATE__2_ADDR_HI(0));
918 /* Set not to use streamout by default, */
919 tu_cs_emit_pkt7(cs
, CP_CONTEXT_REG_BUNCH
, 4);
920 tu_cs_emit(cs
, REG_A6XX_VPC_SO_CNTL
);
922 tu_cs_emit(cs
, REG_A6XX_VPC_SO_BUF_CNTL
);
926 A6XX_SP_HS_CTRL_REG0(0));
929 A6XX_SP_GS_CTRL_REG0(0));
932 A6XX_GRAS_LRZ_CNTL(0));
935 A6XX_RB_LRZ_CNTL(0));
938 A6XX_SP_TP_BORDER_COLOR_BASE_ADDR(.bo
= &cmd
->device
->border_color
));
940 A6XX_SP_PS_TP_BORDER_COLOR_BASE_ADDR(.bo
= &cmd
->device
->border_color
));
942 tu_cs_sanity_check(cs
);
946 tu6_cache_flush(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
950 seqno
= tu6_emit_event_write(cmd
, cs
, RB_DONE_TS
, true);
952 tu_cs_emit_pkt7(cs
, CP_WAIT_REG_MEM
, 6);
953 tu_cs_emit(cs
, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ
) |
954 CP_WAIT_REG_MEM_0_POLL_MEMORY
);
955 tu_cs_emit_qw(cs
, cmd
->scratch_bo
.iova
);
956 tu_cs_emit(cs
, CP_WAIT_REG_MEM_3_REF(seqno
));
957 tu_cs_emit(cs
, CP_WAIT_REG_MEM_4_MASK(~0));
958 tu_cs_emit(cs
, CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(16));
960 seqno
= tu6_emit_event_write(cmd
, cs
, CACHE_FLUSH_TS
, true);
962 tu_cs_emit_pkt7(cs
, CP_WAIT_MEM_GTE
, 4);
963 tu_cs_emit(cs
, CP_WAIT_MEM_GTE_0_RESERVED(0));
964 tu_cs_emit_qw(cs
, cmd
->scratch_bo
.iova
);
965 tu_cs_emit(cs
, CP_WAIT_MEM_GTE_3_REF(seqno
));
969 update_vsc_pipe(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
971 const struct tu_tiling_config
*tiling
= &cmd
->state
.tiling_config
;
974 A6XX_VSC_BIN_SIZE(.width
= tiling
->tile0
.extent
.width
,
975 .height
= tiling
->tile0
.extent
.height
),
976 A6XX_VSC_SIZE_ADDRESS(.bo
= &cmd
->vsc_data
,
977 .bo_offset
= 32 * cmd
->vsc_data_pitch
));
980 A6XX_VSC_BIN_COUNT(.nx
= tiling
->tile_count
.width
,
981 .ny
= tiling
->tile_count
.height
));
983 tu_cs_emit_pkt4(cs
, REG_A6XX_VSC_PIPE_CONFIG_REG(0), 32);
984 for (unsigned i
= 0; i
< 32; i
++)
985 tu_cs_emit(cs
, tiling
->pipe_config
[i
]);
988 A6XX_VSC_PIPE_DATA2_ADDRESS(.bo
= &cmd
->vsc_data2
),
989 A6XX_VSC_PIPE_DATA2_PITCH(cmd
->vsc_data2_pitch
),
990 A6XX_VSC_PIPE_DATA2_ARRAY_PITCH(cmd
->vsc_data2
.size
));
993 A6XX_VSC_PIPE_DATA_ADDRESS(.bo
= &cmd
->vsc_data
),
994 A6XX_VSC_PIPE_DATA_PITCH(cmd
->vsc_data_pitch
),
995 A6XX_VSC_PIPE_DATA_ARRAY_PITCH(cmd
->vsc_data
.size
));
999 emit_vsc_overflow_test(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
1001 const struct tu_tiling_config
*tiling
= &cmd
->state
.tiling_config
;
1002 const uint32_t used_pipe_count
=
1003 tiling
->pipe_count
.width
* tiling
->pipe_count
.height
;
1005 /* Clear vsc_scratch: */
1006 tu_cs_emit_pkt7(cs
, CP_MEM_WRITE
, 3);
1007 tu_cs_emit_qw(cs
, cmd
->scratch_bo
.iova
+ ctrl_offset(vsc_scratch
));
1008 tu_cs_emit(cs
, 0x0);
1010 /* Check for overflow, write vsc_scratch if detected: */
1011 for (int i
= 0; i
< used_pipe_count
; i
++) {
1012 tu_cs_emit_pkt7(cs
, CP_COND_WRITE5
, 8);
1013 tu_cs_emit(cs
, CP_COND_WRITE5_0_FUNCTION(WRITE_GE
) |
1014 CP_COND_WRITE5_0_WRITE_MEMORY
);
1015 tu_cs_emit(cs
, CP_COND_WRITE5_1_POLL_ADDR_LO(REG_A6XX_VSC_SIZE_REG(i
)));
1016 tu_cs_emit(cs
, CP_COND_WRITE5_2_POLL_ADDR_HI(0));
1017 tu_cs_emit(cs
, CP_COND_WRITE5_3_REF(cmd
->vsc_data_pitch
));
1018 tu_cs_emit(cs
, CP_COND_WRITE5_4_MASK(~0));
1019 tu_cs_emit_qw(cs
, cmd
->scratch_bo
.iova
+ ctrl_offset(vsc_scratch
));
1020 tu_cs_emit(cs
, CP_COND_WRITE5_7_WRITE_DATA(1 + cmd
->vsc_data_pitch
));
1022 tu_cs_emit_pkt7(cs
, CP_COND_WRITE5
, 8);
1023 tu_cs_emit(cs
, CP_COND_WRITE5_0_FUNCTION(WRITE_GE
) |
1024 CP_COND_WRITE5_0_WRITE_MEMORY
);
1025 tu_cs_emit(cs
, CP_COND_WRITE5_1_POLL_ADDR_LO(REG_A6XX_VSC_SIZE2_REG(i
)));
1026 tu_cs_emit(cs
, CP_COND_WRITE5_2_POLL_ADDR_HI(0));
1027 tu_cs_emit(cs
, CP_COND_WRITE5_3_REF(cmd
->vsc_data2_pitch
));
1028 tu_cs_emit(cs
, CP_COND_WRITE5_4_MASK(~0));
1029 tu_cs_emit_qw(cs
, cmd
->scratch_bo
.iova
+ ctrl_offset(vsc_scratch
));
1030 tu_cs_emit(cs
, CP_COND_WRITE5_7_WRITE_DATA(3 + cmd
->vsc_data2_pitch
));
1033 tu_cs_emit_pkt7(cs
, CP_WAIT_MEM_WRITES
, 0);
1035 tu_cs_emit_pkt7(cs
, CP_WAIT_FOR_ME
, 0);
1037 tu_cs_emit_pkt7(cs
, CP_MEM_TO_REG
, 3);
1038 tu_cs_emit(cs
, CP_MEM_TO_REG_0_REG(OVERFLOW_FLAG_REG
) |
1039 CP_MEM_TO_REG_0_CNT(1 - 1));
1040 tu_cs_emit_qw(cs
, cmd
->scratch_bo
.iova
+ ctrl_offset(vsc_scratch
));
1043 * This is a bit awkward, we really want a way to invert the
1044 * CP_REG_TEST/CP_COND_REG_EXEC logic, so that we can conditionally
1045 * execute cmds to use hwbinning when a bit is *not* set. This
1046 * dance is to invert OVERFLOW_FLAG_REG
1048 * A CP_NOP packet is used to skip executing the 'else' clause
1052 /* b0 will be set if VSC_DATA or VSC_DATA2 overflow: */
1053 tu_cs_emit_pkt7(cs
, CP_REG_TEST
, 1);
1054 tu_cs_emit(cs
, A6XX_CP_REG_TEST_0_REG(OVERFLOW_FLAG_REG
) |
1055 A6XX_CP_REG_TEST_0_BIT(0) |
1056 A6XX_CP_REG_TEST_0_WAIT_FOR_ME
);
1058 tu_cs_reserve(cs
, 3 + 7);
1059 tu_cs_emit_pkt7(cs
, CP_COND_REG_EXEC
, 2);
1060 tu_cs_emit(cs
, CP_COND_REG_EXEC_0_MODE(PRED_TEST
));
1061 tu_cs_emit(cs
, CP_COND_REG_EXEC_1_DWORDS(7));
1065 * On overflow, mirror the value to control->vsc_overflow
1066 * which CPU is checking to detect overflow (see
1067 * check_vsc_overflow())
1069 tu_cs_emit_pkt7(cs
, CP_REG_TO_MEM
, 3);
1070 tu_cs_emit(cs
, CP_REG_TO_MEM_0_REG(OVERFLOW_FLAG_REG
) |
1071 CP_REG_TO_MEM_0_CNT(0));
1072 tu_cs_emit_qw(cs
, cmd
->scratch_bo
.iova
+ ctrl_offset(vsc_overflow
));
1074 tu_cs_emit_pkt4(cs
, OVERFLOW_FLAG_REG
, 1);
1075 tu_cs_emit(cs
, 0x0);
1077 tu_cs_emit_pkt7(cs
, CP_NOP
, 2); /* skip 'else' when 'if' is taken */
1079 tu_cs_emit_pkt4(cs
, OVERFLOW_FLAG_REG
, 1);
1080 tu_cs_emit(cs
, 0x1);
1085 tu6_emit_binning_pass(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
1087 struct tu_physical_device
*phys_dev
= cmd
->device
->physical_device
;
1088 const struct tu_tiling_config
*tiling
= &cmd
->state
.tiling_config
;
1090 uint32_t x1
= tiling
->tile0
.offset
.x
;
1091 uint32_t y1
= tiling
->tile0
.offset
.y
;
1092 uint32_t x2
= tiling
->render_area
.offset
.x
+ tiling
->render_area
.extent
.width
- 1;
1093 uint32_t y2
= tiling
->render_area
.offset
.y
+ tiling
->render_area
.extent
.height
- 1;
1095 tu6_emit_window_scissor(cs
, x1
, y1
, x2
, y2
);
1097 tu_cs_emit_pkt7(cs
, CP_SET_MARKER
, 1);
1098 tu_cs_emit(cs
, A6XX_CP_SET_MARKER_0_MODE(RM6_BINNING
));
1100 tu_cs_emit_pkt7(cs
, CP_SET_VISIBILITY_OVERRIDE
, 1);
1101 tu_cs_emit(cs
, 0x1);
1103 tu_cs_emit_pkt7(cs
, CP_SET_MODE
, 1);
1104 tu_cs_emit(cs
, 0x1);
1109 A6XX_VFD_MODE_CNTL(.binning_pass
= true));
1111 update_vsc_pipe(cmd
, cs
);
1114 A6XX_PC_UNKNOWN_9805(.unknown
= phys_dev
->magic
.PC_UNKNOWN_9805
));
1117 A6XX_SP_UNKNOWN_A0F8(.unknown
= phys_dev
->magic
.SP_UNKNOWN_A0F8
));
1119 tu_cs_emit_pkt7(cs
, CP_EVENT_WRITE
, 1);
1120 tu_cs_emit(cs
, UNK_2C
);
1123 A6XX_RB_WINDOW_OFFSET(.x
= 0, .y
= 0));
1126 A6XX_SP_TP_WINDOW_OFFSET(.x
= 0, .y
= 0));
1128 /* emit IB to binning drawcmds: */
1129 tu_cs_emit_call(cs
, &cmd
->draw_cs
);
1131 tu_cs_emit_pkt7(cs
, CP_SET_DRAW_STATE
, 3);
1132 tu_cs_emit(cs
, CP_SET_DRAW_STATE__0_COUNT(0) |
1133 CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS
|
1134 CP_SET_DRAW_STATE__0_GROUP_ID(0));
1135 tu_cs_emit(cs
, CP_SET_DRAW_STATE__1_ADDR_LO(0));
1136 tu_cs_emit(cs
, CP_SET_DRAW_STATE__2_ADDR_HI(0));
1138 tu_cs_emit_pkt7(cs
, CP_EVENT_WRITE
, 1);
1139 tu_cs_emit(cs
, UNK_2D
);
1141 tu6_emit_event_write(cmd
, cs
, CACHE_INVALIDATE
, false);
1142 tu6_cache_flush(cmd
, cs
);
1146 tu_cs_emit_pkt7(cs
, CP_WAIT_FOR_ME
, 0);
1148 emit_vsc_overflow_test(cmd
, cs
);
1150 tu_cs_emit_pkt7(cs
, CP_SET_VISIBILITY_OVERRIDE
, 1);
1151 tu_cs_emit(cs
, 0x0);
1153 tu_cs_emit_pkt7(cs
, CP_SET_MODE
, 1);
1154 tu_cs_emit(cs
, 0x0);
1156 cmd
->wait_for_idle
= false;
1160 tu_emit_load_clear(struct tu_cmd_buffer
*cmd
,
1161 const VkRenderPassBeginInfo
*info
)
1163 struct tu_cs
*cs
= &cmd
->draw_cs
;
1165 tu_cond_exec_start(cs
, CP_COND_EXEC_0_RENDER_MODE_GMEM
);
1167 tu6_emit_blit_scissor(cmd
, cs
, true);
1169 for (uint32_t i
= 0; i
< cmd
->state
.pass
->attachment_count
; ++i
)
1170 tu_load_gmem_attachment(cmd
, cs
, i
);
1172 tu6_emit_blit_scissor(cmd
, cs
, false);
1174 for (uint32_t i
= 0; i
< cmd
->state
.pass
->attachment_count
; ++i
)
1175 tu_clear_gmem_attachment(cmd
, cs
, i
, info
);
1177 tu_cond_exec_end(cs
);
1179 tu_cond_exec_start(cs
, CP_COND_EXEC_0_RENDER_MODE_SYSMEM
);
1181 for (uint32_t i
= 0; i
< cmd
->state
.pass
->attachment_count
; ++i
)
1182 tu_clear_sysmem_attachment(cmd
, cs
, i
, info
);
1184 tu_cond_exec_end(cs
);
1188 tu6_sysmem_render_begin(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
,
1189 const struct VkRect2D
*renderArea
)
1191 const struct tu_physical_device
*phys_dev
= cmd
->device
->physical_device
;
1192 const struct tu_framebuffer
*fb
= cmd
->state
.framebuffer
;
1194 assert(fb
->width
> 0 && fb
->height
> 0);
1195 tu6_emit_window_scissor(cs
, 0, 0, fb
->width
- 1, fb
->height
- 1);
1196 tu6_emit_window_offset(cs
, 0, 0);
1198 tu6_emit_bin_size(cs
, 0, 0, 0xc00000); /* 0xc00000 = BYPASS? */
1200 tu6_emit_lrz_flush(cmd
, cs
);
1202 tu_cs_emit_pkt7(cs
, CP_SET_MARKER
, 1);
1203 tu_cs_emit(cs
, A6XX_CP_SET_MARKER_0_MODE(RM6_BYPASS
));
1205 tu_cs_emit_pkt7(cs
, CP_SKIP_IB2_ENABLE_GLOBAL
, 1);
1206 tu_cs_emit(cs
, 0x0);
1208 tu6_emit_event_write(cmd
, cs
, PC_CCU_INVALIDATE_COLOR
, false);
1209 tu6_emit_event_write(cmd
, cs
, PC_CCU_INVALIDATE_DEPTH
, false);
1210 tu6_emit_event_write(cmd
, cs
, CACHE_INVALIDATE
, false);
1212 tu6_emit_wfi(cmd
, cs
);
1214 A6XX_RB_CCU_CNTL(.offset
= phys_dev
->ccu_offset_bypass
));
1216 /* enable stream-out, with sysmem there is only one pass: */
1218 A6XX_VPC_SO_OVERRIDE(.so_disable
= false));
1220 tu_cs_emit_pkt7(cs
, CP_SET_VISIBILITY_OVERRIDE
, 1);
1221 tu_cs_emit(cs
, 0x1);
1223 tu_cs_emit_pkt7(cs
, CP_SET_MODE
, 1);
1224 tu_cs_emit(cs
, 0x0);
1226 tu_cs_sanity_check(cs
);
1230 tu6_sysmem_render_end(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
1232 /* Do any resolves of the last subpass. These are handled in the
1233 * tile_store_ib in the gmem path.
1235 const struct tu_subpass
*subpass
= cmd
->state
.subpass
;
1236 if (subpass
->resolve_attachments
) {
1237 for (unsigned i
= 0; i
< subpass
->color_count
; i
++) {
1238 uint32_t a
= subpass
->resolve_attachments
[i
].attachment
;
1239 if (a
!= VK_ATTACHMENT_UNUSED
)
1240 tu6_emit_sysmem_resolve(cmd
, cs
, a
,
1241 subpass
->color_attachments
[i
].attachment
);
1245 tu_cs_emit_call(cs
, &cmd
->draw_epilogue_cs
);
1247 tu_cs_emit_pkt7(cs
, CP_SKIP_IB2_ENABLE_GLOBAL
, 1);
1248 tu_cs_emit(cs
, 0x0);
1250 tu6_emit_lrz_flush(cmd
, cs
);
1252 tu6_emit_event_write(cmd
, cs
, PC_CCU_FLUSH_COLOR_TS
, true);
1253 tu6_emit_event_write(cmd
, cs
, PC_CCU_FLUSH_DEPTH_TS
, true);
1255 tu_cs_sanity_check(cs
);
1260 tu6_tile_render_begin(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
1262 struct tu_physical_device
*phys_dev
= cmd
->device
->physical_device
;
1264 tu6_emit_lrz_flush(cmd
, cs
);
1268 tu6_emit_cache_flush(cmd
, cs
);
1270 tu_cs_emit_pkt7(cs
, CP_SKIP_IB2_ENABLE_GLOBAL
, 1);
1271 tu_cs_emit(cs
, 0x0);
1273 /* TODO: flushing with barriers instead of blindly always flushing */
1274 tu6_emit_event_write(cmd
, cs
, PC_CCU_FLUSH_COLOR_TS
, true);
1275 tu6_emit_event_write(cmd
, cs
, PC_CCU_FLUSH_DEPTH_TS
, true);
1276 tu6_emit_event_write(cmd
, cs
, PC_CCU_INVALIDATE_COLOR
, false);
1277 tu6_emit_event_write(cmd
, cs
, PC_CCU_INVALIDATE_DEPTH
, false);
1281 A6XX_RB_CCU_CNTL(.offset
= phys_dev
->ccu_offset_gmem
, .gmem
= 1));
1283 const struct tu_tiling_config
*tiling
= &cmd
->state
.tiling_config
;
1284 if (use_hw_binning(cmd
)) {
1285 /* enable stream-out during binning pass: */
1286 tu_cs_emit_regs(cs
, A6XX_VPC_SO_OVERRIDE(.so_disable
=false));
1288 tu6_emit_bin_size(cs
,
1289 tiling
->tile0
.extent
.width
,
1290 tiling
->tile0
.extent
.height
,
1291 A6XX_RB_BIN_CONTROL_BINNING_PASS
| 0x6000000);
1293 tu6_emit_render_cntl(cmd
, cmd
->state
.subpass
, cs
, true);
1295 tu6_emit_binning_pass(cmd
, cs
);
1297 /* and disable stream-out for draw pass: */
1298 tu_cs_emit_regs(cs
, A6XX_VPC_SO_OVERRIDE(.so_disable
=true));
1300 tu6_emit_bin_size(cs
,
1301 tiling
->tile0
.extent
.width
,
1302 tiling
->tile0
.extent
.height
,
1303 A6XX_RB_BIN_CONTROL_USE_VIZ
| 0x6000000);
1306 A6XX_VFD_MODE_CNTL(0));
1308 tu_cs_emit_regs(cs
, A6XX_PC_UNKNOWN_9805(.unknown
= phys_dev
->magic
.PC_UNKNOWN_9805
));
1310 tu_cs_emit_regs(cs
, A6XX_SP_UNKNOWN_A0F8(.unknown
= phys_dev
->magic
.SP_UNKNOWN_A0F8
));
1312 tu_cs_emit_pkt7(cs
, CP_SKIP_IB2_ENABLE_GLOBAL
, 1);
1313 tu_cs_emit(cs
, 0x1);
1315 /* no binning pass, so enable stream-out for draw pass:: */
1316 tu_cs_emit_regs(cs
, A6XX_VPC_SO_OVERRIDE(.so_disable
=false));
1318 tu6_emit_bin_size(cs
,
1319 tiling
->tile0
.extent
.width
,
1320 tiling
->tile0
.extent
.height
,
1324 tu_cs_sanity_check(cs
);
1328 tu6_render_tile(struct tu_cmd_buffer
*cmd
,
1330 const struct tu_tile
*tile
)
1332 tu6_emit_tile_select(cmd
, cs
, tile
);
1334 tu_cs_emit_call(cs
, &cmd
->draw_cs
);
1335 cmd
->wait_for_idle
= true;
1337 if (use_hw_binning(cmd
)) {
1338 tu_cs_emit_pkt7(cs
, CP_REG_TEST
, 1);
1339 tu_cs_emit(cs
, A6XX_CP_REG_TEST_0_REG(OVERFLOW_FLAG_REG
) |
1340 A6XX_CP_REG_TEST_0_BIT(0) |
1341 A6XX_CP_REG_TEST_0_WAIT_FOR_ME
);
1343 tu_cs_reserve(cs
, 3 + 2);
1344 tu_cs_emit_pkt7(cs
, CP_COND_REG_EXEC
, 2);
1345 tu_cs_emit(cs
, CP_COND_REG_EXEC_0_MODE(PRED_TEST
));
1346 tu_cs_emit(cs
, CP_COND_REG_EXEC_1_DWORDS(2));
1348 /* if (no overflow) */ {
1349 tu_cs_emit_pkt7(cs
, CP_SET_MARKER
, 1);
1350 tu_cs_emit(cs
, A6XX_CP_SET_MARKER_0_MODE(RM6_ENDVIS
));
1354 tu_cs_emit_ib(cs
, &cmd
->state
.tile_store_ib
);
1356 tu_cs_sanity_check(cs
);
1360 tu6_tile_render_end(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
1362 tu_cs_emit_call(cs
, &cmd
->draw_epilogue_cs
);
1365 A6XX_GRAS_LRZ_CNTL(0));
1367 tu6_emit_lrz_flush(cmd
, cs
);
1369 tu6_emit_event_write(cmd
, cs
, CACHE_FLUSH_TS
, true);
1371 tu_cs_sanity_check(cs
);
1375 tu_cmd_render_tiles(struct tu_cmd_buffer
*cmd
)
1377 const struct tu_tiling_config
*tiling
= &cmd
->state
.tiling_config
;
1379 tu6_tile_render_begin(cmd
, &cmd
->cs
);
1381 for (uint32_t y
= 0; y
< tiling
->tile_count
.height
; y
++) {
1382 for (uint32_t x
= 0; x
< tiling
->tile_count
.width
; x
++) {
1383 struct tu_tile tile
;
1384 tu_tiling_config_get_tile(tiling
, cmd
->device
, x
, y
, &tile
);
1385 tu6_render_tile(cmd
, &cmd
->cs
, &tile
);
1389 tu6_tile_render_end(cmd
, &cmd
->cs
);
1393 tu_cmd_render_sysmem(struct tu_cmd_buffer
*cmd
)
1395 const struct tu_tiling_config
*tiling
= &cmd
->state
.tiling_config
;
1397 tu6_sysmem_render_begin(cmd
, &cmd
->cs
, &tiling
->render_area
);
1399 tu_cs_emit_call(&cmd
->cs
, &cmd
->draw_cs
);
1400 cmd
->wait_for_idle
= true;
1402 tu6_sysmem_render_end(cmd
, &cmd
->cs
);
1406 tu_cmd_prepare_tile_store_ib(struct tu_cmd_buffer
*cmd
)
1408 const uint32_t tile_store_space
= 11 + (35 * 2) * cmd
->state
.pass
->attachment_count
;
1409 struct tu_cs sub_cs
;
1412 tu_cs_begin_sub_stream(&cmd
->sub_cs
, tile_store_space
, &sub_cs
);
1413 if (result
!= VK_SUCCESS
) {
1414 cmd
->record_result
= result
;
1418 /* emit to tile-store sub_cs */
1419 tu6_emit_tile_store(cmd
, &sub_cs
);
1421 cmd
->state
.tile_store_ib
= tu_cs_end_sub_stream(&cmd
->sub_cs
, &sub_cs
);
1425 tu_cmd_update_tiling_config(struct tu_cmd_buffer
*cmd
,
1426 const VkRect2D
*render_area
)
1428 const struct tu_device
*dev
= cmd
->device
;
1429 struct tu_tiling_config
*tiling
= &cmd
->state
.tiling_config
;
1431 tiling
->render_area
= *render_area
;
1432 tiling
->force_sysmem
= false;
1434 tu_tiling_config_update_tile_layout(tiling
, dev
, cmd
->state
.pass
->gmem_pixels
);
1435 tu_tiling_config_update_pipe_layout(tiling
, dev
);
1436 tu_tiling_config_update_pipes(tiling
, dev
);
1439 const struct tu_dynamic_state default_dynamic_state
= {
1455 .blend_constants
= { 0.0f
, 0.0f
, 0.0f
, 0.0f
},
1461 .stencil_compare_mask
=
1466 .stencil_write_mask
=
1471 .stencil_reference
=
1478 static void UNUSED
/* FINISHME */
1479 tu_bind_dynamic_state(struct tu_cmd_buffer
*cmd_buffer
,
1480 const struct tu_dynamic_state
*src
)
1482 struct tu_dynamic_state
*dest
= &cmd_buffer
->state
.dynamic
;
1483 uint32_t copy_mask
= src
->mask
;
1484 uint32_t dest_mask
= 0;
1486 tu_use_args(cmd_buffer
); /* FINISHME */
1488 /* Make sure to copy the number of viewports/scissors because they can
1489 * only be specified at pipeline creation time.
1491 dest
->viewport
.count
= src
->viewport
.count
;
1492 dest
->scissor
.count
= src
->scissor
.count
;
1493 dest
->discard_rectangle
.count
= src
->discard_rectangle
.count
;
1495 if (copy_mask
& TU_DYNAMIC_VIEWPORT
) {
1496 if (memcmp(&dest
->viewport
.viewports
, &src
->viewport
.viewports
,
1497 src
->viewport
.count
* sizeof(VkViewport
))) {
1498 typed_memcpy(dest
->viewport
.viewports
, src
->viewport
.viewports
,
1499 src
->viewport
.count
);
1500 dest_mask
|= TU_DYNAMIC_VIEWPORT
;
1504 if (copy_mask
& TU_DYNAMIC_SCISSOR
) {
1505 if (memcmp(&dest
->scissor
.scissors
, &src
->scissor
.scissors
,
1506 src
->scissor
.count
* sizeof(VkRect2D
))) {
1507 typed_memcpy(dest
->scissor
.scissors
, src
->scissor
.scissors
,
1508 src
->scissor
.count
);
1509 dest_mask
|= TU_DYNAMIC_SCISSOR
;
1513 if (copy_mask
& TU_DYNAMIC_LINE_WIDTH
) {
1514 if (dest
->line_width
!= src
->line_width
) {
1515 dest
->line_width
= src
->line_width
;
1516 dest_mask
|= TU_DYNAMIC_LINE_WIDTH
;
1520 if (copy_mask
& TU_DYNAMIC_DEPTH_BIAS
) {
1521 if (memcmp(&dest
->depth_bias
, &src
->depth_bias
,
1522 sizeof(src
->depth_bias
))) {
1523 dest
->depth_bias
= src
->depth_bias
;
1524 dest_mask
|= TU_DYNAMIC_DEPTH_BIAS
;
1528 if (copy_mask
& TU_DYNAMIC_BLEND_CONSTANTS
) {
1529 if (memcmp(&dest
->blend_constants
, &src
->blend_constants
,
1530 sizeof(src
->blend_constants
))) {
1531 typed_memcpy(dest
->blend_constants
, src
->blend_constants
, 4);
1532 dest_mask
|= TU_DYNAMIC_BLEND_CONSTANTS
;
1536 if (copy_mask
& TU_DYNAMIC_DEPTH_BOUNDS
) {
1537 if (memcmp(&dest
->depth_bounds
, &src
->depth_bounds
,
1538 sizeof(src
->depth_bounds
))) {
1539 dest
->depth_bounds
= src
->depth_bounds
;
1540 dest_mask
|= TU_DYNAMIC_DEPTH_BOUNDS
;
1544 if (copy_mask
& TU_DYNAMIC_STENCIL_COMPARE_MASK
) {
1545 if (memcmp(&dest
->stencil_compare_mask
, &src
->stencil_compare_mask
,
1546 sizeof(src
->stencil_compare_mask
))) {
1547 dest
->stencil_compare_mask
= src
->stencil_compare_mask
;
1548 dest_mask
|= TU_DYNAMIC_STENCIL_COMPARE_MASK
;
1552 if (copy_mask
& TU_DYNAMIC_STENCIL_WRITE_MASK
) {
1553 if (memcmp(&dest
->stencil_write_mask
, &src
->stencil_write_mask
,
1554 sizeof(src
->stencil_write_mask
))) {
1555 dest
->stencil_write_mask
= src
->stencil_write_mask
;
1556 dest_mask
|= TU_DYNAMIC_STENCIL_WRITE_MASK
;
1560 if (copy_mask
& TU_DYNAMIC_STENCIL_REFERENCE
) {
1561 if (memcmp(&dest
->stencil_reference
, &src
->stencil_reference
,
1562 sizeof(src
->stencil_reference
))) {
1563 dest
->stencil_reference
= src
->stencil_reference
;
1564 dest_mask
|= TU_DYNAMIC_STENCIL_REFERENCE
;
1568 if (copy_mask
& TU_DYNAMIC_DISCARD_RECTANGLE
) {
1569 if (memcmp(&dest
->discard_rectangle
.rectangles
,
1570 &src
->discard_rectangle
.rectangles
,
1571 src
->discard_rectangle
.count
* sizeof(VkRect2D
))) {
1572 typed_memcpy(dest
->discard_rectangle
.rectangles
,
1573 src
->discard_rectangle
.rectangles
,
1574 src
->discard_rectangle
.count
);
1575 dest_mask
|= TU_DYNAMIC_DISCARD_RECTANGLE
;
1581 tu_create_cmd_buffer(struct tu_device
*device
,
1582 struct tu_cmd_pool
*pool
,
1583 VkCommandBufferLevel level
,
1584 VkCommandBuffer
*pCommandBuffer
)
1586 struct tu_cmd_buffer
*cmd_buffer
;
1587 cmd_buffer
= vk_zalloc(&pool
->alloc
, sizeof(*cmd_buffer
), 8,
1588 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1589 if (cmd_buffer
== NULL
)
1590 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
1592 cmd_buffer
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
1593 cmd_buffer
->device
= device
;
1594 cmd_buffer
->pool
= pool
;
1595 cmd_buffer
->level
= level
;
1598 list_addtail(&cmd_buffer
->pool_link
, &pool
->cmd_buffers
);
1599 cmd_buffer
->queue_family_index
= pool
->queue_family_index
;
1602 /* Init the pool_link so we can safely call list_del when we destroy
1603 * the command buffer
1605 list_inithead(&cmd_buffer
->pool_link
);
1606 cmd_buffer
->queue_family_index
= TU_QUEUE_GENERAL
;
1609 tu_bo_list_init(&cmd_buffer
->bo_list
);
1610 tu_cs_init(&cmd_buffer
->cs
, device
, TU_CS_MODE_GROW
, 4096);
1611 tu_cs_init(&cmd_buffer
->draw_cs
, device
, TU_CS_MODE_GROW
, 4096);
1612 tu_cs_init(&cmd_buffer
->draw_epilogue_cs
, device
, TU_CS_MODE_GROW
, 4096);
1613 tu_cs_init(&cmd_buffer
->sub_cs
, device
, TU_CS_MODE_SUB_STREAM
, 2048);
1615 *pCommandBuffer
= tu_cmd_buffer_to_handle(cmd_buffer
);
1617 list_inithead(&cmd_buffer
->upload
.list
);
1619 VkResult result
= tu_bo_init_new(device
, &cmd_buffer
->scratch_bo
, 0x1000);
1620 if (result
!= VK_SUCCESS
)
1621 goto fail_scratch_bo
;
1623 /* TODO: resize on overflow */
1624 cmd_buffer
->vsc_data_pitch
= device
->vsc_data_pitch
;
1625 cmd_buffer
->vsc_data2_pitch
= device
->vsc_data2_pitch
;
1626 cmd_buffer
->vsc_data
= device
->vsc_data
;
1627 cmd_buffer
->vsc_data2
= device
->vsc_data2
;
1632 list_del(&cmd_buffer
->pool_link
);
1637 tu_cmd_buffer_destroy(struct tu_cmd_buffer
*cmd_buffer
)
1639 tu_bo_finish(cmd_buffer
->device
, &cmd_buffer
->scratch_bo
);
1641 list_del(&cmd_buffer
->pool_link
);
1643 for (unsigned i
= 0; i
< VK_PIPELINE_BIND_POINT_RANGE_SIZE
; i
++)
1644 free(cmd_buffer
->descriptors
[i
].push_set
.set
.mapped_ptr
);
1646 tu_cs_finish(&cmd_buffer
->cs
);
1647 tu_cs_finish(&cmd_buffer
->draw_cs
);
1648 tu_cs_finish(&cmd_buffer
->draw_epilogue_cs
);
1649 tu_cs_finish(&cmd_buffer
->sub_cs
);
1651 tu_bo_list_destroy(&cmd_buffer
->bo_list
);
1652 vk_free(&cmd_buffer
->pool
->alloc
, cmd_buffer
);
1656 tu_reset_cmd_buffer(struct tu_cmd_buffer
*cmd_buffer
)
1658 cmd_buffer
->wait_for_idle
= true;
1660 cmd_buffer
->record_result
= VK_SUCCESS
;
1662 tu_bo_list_reset(&cmd_buffer
->bo_list
);
1663 tu_cs_reset(&cmd_buffer
->cs
);
1664 tu_cs_reset(&cmd_buffer
->draw_cs
);
1665 tu_cs_reset(&cmd_buffer
->draw_epilogue_cs
);
1666 tu_cs_reset(&cmd_buffer
->sub_cs
);
1668 for (unsigned i
= 0; i
< VK_PIPELINE_BIND_POINT_RANGE_SIZE
; i
++) {
1669 cmd_buffer
->descriptors
[i
].valid
= 0;
1670 cmd_buffer
->descriptors
[i
].push_dirty
= false;
1673 cmd_buffer
->status
= TU_CMD_BUFFER_STATUS_INITIAL
;
1675 return cmd_buffer
->record_result
;
1679 tu_AllocateCommandBuffers(VkDevice _device
,
1680 const VkCommandBufferAllocateInfo
*pAllocateInfo
,
1681 VkCommandBuffer
*pCommandBuffers
)
1683 TU_FROM_HANDLE(tu_device
, device
, _device
);
1684 TU_FROM_HANDLE(tu_cmd_pool
, pool
, pAllocateInfo
->commandPool
);
1686 VkResult result
= VK_SUCCESS
;
1689 for (i
= 0; i
< pAllocateInfo
->commandBufferCount
; i
++) {
1691 if (!list_is_empty(&pool
->free_cmd_buffers
)) {
1692 struct tu_cmd_buffer
*cmd_buffer
= list_first_entry(
1693 &pool
->free_cmd_buffers
, struct tu_cmd_buffer
, pool_link
);
1695 list_del(&cmd_buffer
->pool_link
);
1696 list_addtail(&cmd_buffer
->pool_link
, &pool
->cmd_buffers
);
1698 result
= tu_reset_cmd_buffer(cmd_buffer
);
1699 cmd_buffer
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
1700 cmd_buffer
->level
= pAllocateInfo
->level
;
1702 pCommandBuffers
[i
] = tu_cmd_buffer_to_handle(cmd_buffer
);
1704 result
= tu_create_cmd_buffer(device
, pool
, pAllocateInfo
->level
,
1705 &pCommandBuffers
[i
]);
1707 if (result
!= VK_SUCCESS
)
1711 if (result
!= VK_SUCCESS
) {
1712 tu_FreeCommandBuffers(_device
, pAllocateInfo
->commandPool
, i
,
1715 /* From the Vulkan 1.0.66 spec:
1717 * "vkAllocateCommandBuffers can be used to create multiple
1718 * command buffers. If the creation of any of those command
1719 * buffers fails, the implementation must destroy all
1720 * successfully created command buffer objects from this
1721 * command, set all entries of the pCommandBuffers array to
1722 * NULL and return the error."
1724 memset(pCommandBuffers
, 0,
1725 sizeof(*pCommandBuffers
) * pAllocateInfo
->commandBufferCount
);
1732 tu_FreeCommandBuffers(VkDevice device
,
1733 VkCommandPool commandPool
,
1734 uint32_t commandBufferCount
,
1735 const VkCommandBuffer
*pCommandBuffers
)
1737 for (uint32_t i
= 0; i
< commandBufferCount
; i
++) {
1738 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, pCommandBuffers
[i
]);
1741 if (cmd_buffer
->pool
) {
1742 list_del(&cmd_buffer
->pool_link
);
1743 list_addtail(&cmd_buffer
->pool_link
,
1744 &cmd_buffer
->pool
->free_cmd_buffers
);
1746 tu_cmd_buffer_destroy(cmd_buffer
);
1752 tu_ResetCommandBuffer(VkCommandBuffer commandBuffer
,
1753 VkCommandBufferResetFlags flags
)
1755 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
1756 return tu_reset_cmd_buffer(cmd_buffer
);
1760 tu_BeginCommandBuffer(VkCommandBuffer commandBuffer
,
1761 const VkCommandBufferBeginInfo
*pBeginInfo
)
1763 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
1764 VkResult result
= VK_SUCCESS
;
1766 if (cmd_buffer
->status
!= TU_CMD_BUFFER_STATUS_INITIAL
) {
1767 /* If the command buffer has already been resetted with
1768 * vkResetCommandBuffer, no need to do it again.
1770 result
= tu_reset_cmd_buffer(cmd_buffer
);
1771 if (result
!= VK_SUCCESS
)
1775 memset(&cmd_buffer
->state
, 0, sizeof(cmd_buffer
->state
));
1776 cmd_buffer
->usage_flags
= pBeginInfo
->flags
;
1778 tu_cs_begin(&cmd_buffer
->cs
);
1779 tu_cs_begin(&cmd_buffer
->draw_cs
);
1780 tu_cs_begin(&cmd_buffer
->draw_epilogue_cs
);
1782 cmd_buffer
->scratch_seqno
= 0;
1784 /* setup initial configuration into command buffer */
1785 if (cmd_buffer
->level
== VK_COMMAND_BUFFER_LEVEL_PRIMARY
) {
1786 switch (cmd_buffer
->queue_family_index
) {
1787 case TU_QUEUE_GENERAL
:
1788 tu6_init_hw(cmd_buffer
, &cmd_buffer
->cs
);
1793 } else if (cmd_buffer
->level
== VK_COMMAND_BUFFER_LEVEL_SECONDARY
&&
1794 (pBeginInfo
->flags
& VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT
)) {
1795 assert(pBeginInfo
->pInheritanceInfo
);
1796 cmd_buffer
->state
.pass
= tu_render_pass_from_handle(pBeginInfo
->pInheritanceInfo
->renderPass
);
1797 cmd_buffer
->state
.subpass
= &cmd_buffer
->state
.pass
->subpasses
[pBeginInfo
->pInheritanceInfo
->subpass
];
1800 cmd_buffer
->status
= TU_CMD_BUFFER_STATUS_RECORDING
;
1806 tu_CmdBindVertexBuffers(VkCommandBuffer commandBuffer
,
1807 uint32_t firstBinding
,
1808 uint32_t bindingCount
,
1809 const VkBuffer
*pBuffers
,
1810 const VkDeviceSize
*pOffsets
)
1812 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
1814 assert(firstBinding
+ bindingCount
<= MAX_VBS
);
1816 for (uint32_t i
= 0; i
< bindingCount
; i
++) {
1817 cmd
->state
.vb
.buffers
[firstBinding
+ i
] =
1818 tu_buffer_from_handle(pBuffers
[i
]);
1819 cmd
->state
.vb
.offsets
[firstBinding
+ i
] = pOffsets
[i
];
1822 /* VB states depend on VkPipelineVertexInputStateCreateInfo */
1823 cmd
->state
.dirty
|= TU_CMD_DIRTY_VERTEX_BUFFERS
;
1827 tu_CmdBindIndexBuffer(VkCommandBuffer commandBuffer
,
1829 VkDeviceSize offset
,
1830 VkIndexType indexType
)
1832 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
1833 TU_FROM_HANDLE(tu_buffer
, buf
, buffer
);
1835 /* initialize/update the restart index */
1836 if (!cmd
->state
.index_buffer
|| cmd
->state
.index_type
!= indexType
) {
1837 struct tu_cs
*draw_cs
= &cmd
->draw_cs
;
1839 tu6_emit_restart_index(
1840 draw_cs
, indexType
== VK_INDEX_TYPE_UINT32
? 0xffffffff : 0xffff);
1842 tu_cs_sanity_check(draw_cs
);
1846 if (cmd
->state
.index_buffer
!= buf
)
1847 tu_bo_list_add(&cmd
->bo_list
, buf
->bo
, MSM_SUBMIT_BO_READ
);
1849 cmd
->state
.index_buffer
= buf
;
1850 cmd
->state
.index_offset
= offset
;
1851 cmd
->state
.index_type
= indexType
;
1855 tu_CmdBindDescriptorSets(VkCommandBuffer commandBuffer
,
1856 VkPipelineBindPoint pipelineBindPoint
,
1857 VkPipelineLayout _layout
,
1859 uint32_t descriptorSetCount
,
1860 const VkDescriptorSet
*pDescriptorSets
,
1861 uint32_t dynamicOffsetCount
,
1862 const uint32_t *pDynamicOffsets
)
1864 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
1865 TU_FROM_HANDLE(tu_pipeline_layout
, layout
, _layout
);
1866 unsigned dyn_idx
= 0;
1868 struct tu_descriptor_state
*descriptors_state
=
1869 tu_get_descriptors_state(cmd_buffer
, pipelineBindPoint
);
1871 for (unsigned i
= 0; i
< descriptorSetCount
; ++i
) {
1872 unsigned idx
= i
+ firstSet
;
1873 TU_FROM_HANDLE(tu_descriptor_set
, set
, pDescriptorSets
[i
]);
1875 descriptors_state
->sets
[idx
] = set
;
1876 descriptors_state
->valid
|= (1u << idx
);
1878 for(unsigned j
= 0; j
< set
->layout
->dynamic_offset_count
; ++j
, ++dyn_idx
) {
1879 unsigned idx
= j
+ layout
->set
[i
+ firstSet
].dynamic_offset_start
;
1880 assert(dyn_idx
< dynamicOffsetCount
);
1882 descriptors_state
->dynamic_buffers
[idx
] =
1883 set
->dynamic_descriptors
[j
].va
+ pDynamicOffsets
[dyn_idx
];
1887 cmd_buffer
->state
.dirty
|= TU_CMD_DIRTY_DESCRIPTOR_SETS
;
1890 void tu_CmdBindTransformFeedbackBuffersEXT(VkCommandBuffer commandBuffer
,
1891 uint32_t firstBinding
,
1892 uint32_t bindingCount
,
1893 const VkBuffer
*pBuffers
,
1894 const VkDeviceSize
*pOffsets
,
1895 const VkDeviceSize
*pSizes
)
1897 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
1898 assert(firstBinding
+ bindingCount
<= IR3_MAX_SO_BUFFERS
);
1900 for (uint32_t i
= 0; i
< bindingCount
; i
++) {
1901 uint32_t idx
= firstBinding
+ i
;
1902 TU_FROM_HANDLE(tu_buffer
, buf
, pBuffers
[i
]);
1904 if (pOffsets
[i
] != 0)
1905 cmd
->state
.streamout_reset
|= 1 << idx
;
1907 cmd
->state
.streamout_buf
.buffers
[idx
] = buf
;
1908 cmd
->state
.streamout_buf
.offsets
[idx
] = pOffsets
[i
];
1909 cmd
->state
.streamout_buf
.sizes
[idx
] = pSizes
[i
];
1911 cmd
->state
.streamout_enabled
|= 1 << idx
;
1914 cmd
->state
.dirty
|= TU_CMD_DIRTY_STREAMOUT_BUFFERS
;
1917 void tu_CmdBeginTransformFeedbackEXT(VkCommandBuffer commandBuffer
,
1918 uint32_t firstCounterBuffer
,
1919 uint32_t counterBufferCount
,
1920 const VkBuffer
*pCounterBuffers
,
1921 const VkDeviceSize
*pCounterBufferOffsets
)
1923 assert(firstCounterBuffer
+ counterBufferCount
<= IR3_MAX_SO_BUFFERS
);
1924 /* TODO do something with counter buffer? */
1927 void tu_CmdEndTransformFeedbackEXT(VkCommandBuffer commandBuffer
,
1928 uint32_t firstCounterBuffer
,
1929 uint32_t counterBufferCount
,
1930 const VkBuffer
*pCounterBuffers
,
1931 const VkDeviceSize
*pCounterBufferOffsets
)
1933 assert(firstCounterBuffer
+ counterBufferCount
<= IR3_MAX_SO_BUFFERS
);
1934 /* TODO do something with counter buffer? */
1936 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
1937 cmd
->state
.streamout_enabled
= 0;
1941 tu_CmdPushConstants(VkCommandBuffer commandBuffer
,
1942 VkPipelineLayout layout
,
1943 VkShaderStageFlags stageFlags
,
1946 const void *pValues
)
1948 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
1949 memcpy((void*) cmd
->push_constants
+ offset
, pValues
, size
);
1950 cmd
->state
.dirty
|= TU_CMD_DIRTY_PUSH_CONSTANTS
;
1954 tu_EndCommandBuffer(VkCommandBuffer commandBuffer
)
1956 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
1958 if (cmd_buffer
->scratch_seqno
) {
1959 tu_bo_list_add(&cmd_buffer
->bo_list
, &cmd_buffer
->scratch_bo
,
1960 MSM_SUBMIT_BO_WRITE
);
1963 if (cmd_buffer
->use_vsc_data
) {
1964 tu_bo_list_add(&cmd_buffer
->bo_list
, &cmd_buffer
->vsc_data
,
1965 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_WRITE
);
1966 tu_bo_list_add(&cmd_buffer
->bo_list
, &cmd_buffer
->vsc_data2
,
1967 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_WRITE
);
1970 tu_bo_list_add(&cmd_buffer
->bo_list
, &cmd_buffer
->device
->border_color
,
1971 MSM_SUBMIT_BO_READ
);
1973 for (uint32_t i
= 0; i
< cmd_buffer
->draw_cs
.bo_count
; i
++) {
1974 tu_bo_list_add(&cmd_buffer
->bo_list
, cmd_buffer
->draw_cs
.bos
[i
],
1975 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_DUMP
);
1978 for (uint32_t i
= 0; i
< cmd_buffer
->draw_epilogue_cs
.bo_count
; i
++) {
1979 tu_bo_list_add(&cmd_buffer
->bo_list
, cmd_buffer
->draw_epilogue_cs
.bos
[i
],
1980 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_DUMP
);
1983 for (uint32_t i
= 0; i
< cmd_buffer
->sub_cs
.bo_count
; i
++) {
1984 tu_bo_list_add(&cmd_buffer
->bo_list
, cmd_buffer
->sub_cs
.bos
[i
],
1985 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_DUMP
);
1988 tu_cs_end(&cmd_buffer
->cs
);
1989 tu_cs_end(&cmd_buffer
->draw_cs
);
1990 tu_cs_end(&cmd_buffer
->draw_epilogue_cs
);
1992 cmd_buffer
->status
= TU_CMD_BUFFER_STATUS_EXECUTABLE
;
1994 return cmd_buffer
->record_result
;
1998 tu_CmdBindPipeline(VkCommandBuffer commandBuffer
,
1999 VkPipelineBindPoint pipelineBindPoint
,
2000 VkPipeline _pipeline
)
2002 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2003 TU_FROM_HANDLE(tu_pipeline
, pipeline
, _pipeline
);
2005 switch (pipelineBindPoint
) {
2006 case VK_PIPELINE_BIND_POINT_GRAPHICS
:
2007 cmd
->state
.pipeline
= pipeline
;
2008 cmd
->state
.dirty
|= TU_CMD_DIRTY_PIPELINE
;
2010 case VK_PIPELINE_BIND_POINT_COMPUTE
:
2011 cmd
->state
.compute_pipeline
= pipeline
;
2012 cmd
->state
.dirty
|= TU_CMD_DIRTY_COMPUTE_PIPELINE
;
2015 unreachable("unrecognized pipeline bind point");
2019 tu_bo_list_add(&cmd
->bo_list
, &pipeline
->program
.binary_bo
,
2020 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_DUMP
);
2021 for (uint32_t i
= 0; i
< pipeline
->cs
.bo_count
; i
++) {
2022 tu_bo_list_add(&cmd
->bo_list
, pipeline
->cs
.bos
[i
],
2023 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_DUMP
);
2028 tu_CmdSetViewport(VkCommandBuffer commandBuffer
,
2029 uint32_t firstViewport
,
2030 uint32_t viewportCount
,
2031 const VkViewport
*pViewports
)
2033 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2035 assert(firstViewport
== 0 && viewportCount
== 1);
2036 cmd
->state
.dynamic
.viewport
.viewports
[0] = pViewports
[0];
2037 cmd
->state
.dirty
|= TU_CMD_DIRTY_DYNAMIC_VIEWPORT
;
2041 tu_CmdSetScissor(VkCommandBuffer commandBuffer
,
2042 uint32_t firstScissor
,
2043 uint32_t scissorCount
,
2044 const VkRect2D
*pScissors
)
2046 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2048 assert(firstScissor
== 0 && scissorCount
== 1);
2049 cmd
->state
.dynamic
.scissor
.scissors
[0] = pScissors
[0];
2050 cmd
->state
.dirty
|= TU_CMD_DIRTY_DYNAMIC_SCISSOR
;
2054 tu_CmdSetLineWidth(VkCommandBuffer commandBuffer
, float lineWidth
)
2056 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2058 cmd
->state
.dynamic
.line_width
= lineWidth
;
2060 /* line width depends on VkPipelineRasterizationStateCreateInfo */
2061 cmd
->state
.dirty
|= TU_CMD_DIRTY_DYNAMIC_LINE_WIDTH
;
2065 tu_CmdSetDepthBias(VkCommandBuffer commandBuffer
,
2066 float depthBiasConstantFactor
,
2067 float depthBiasClamp
,
2068 float depthBiasSlopeFactor
)
2070 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2071 struct tu_cs
*draw_cs
= &cmd
->draw_cs
;
2073 tu6_emit_depth_bias(draw_cs
, depthBiasConstantFactor
, depthBiasClamp
,
2074 depthBiasSlopeFactor
);
2076 tu_cs_sanity_check(draw_cs
);
2080 tu_CmdSetBlendConstants(VkCommandBuffer commandBuffer
,
2081 const float blendConstants
[4])
2083 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2084 struct tu_cs
*draw_cs
= &cmd
->draw_cs
;
2086 tu6_emit_blend_constants(draw_cs
, blendConstants
);
2088 tu_cs_sanity_check(draw_cs
);
2092 tu_CmdSetDepthBounds(VkCommandBuffer commandBuffer
,
2093 float minDepthBounds
,
2094 float maxDepthBounds
)
2099 tu_CmdSetStencilCompareMask(VkCommandBuffer commandBuffer
,
2100 VkStencilFaceFlags faceMask
,
2101 uint32_t compareMask
)
2103 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2105 if (faceMask
& VK_STENCIL_FACE_FRONT_BIT
)
2106 cmd
->state
.dynamic
.stencil_compare_mask
.front
= compareMask
;
2107 if (faceMask
& VK_STENCIL_FACE_BACK_BIT
)
2108 cmd
->state
.dynamic
.stencil_compare_mask
.back
= compareMask
;
2110 /* the front/back compare masks must be updated together */
2111 cmd
->state
.dirty
|= TU_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK
;
2115 tu_CmdSetStencilWriteMask(VkCommandBuffer commandBuffer
,
2116 VkStencilFaceFlags faceMask
,
2119 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2121 if (faceMask
& VK_STENCIL_FACE_FRONT_BIT
)
2122 cmd
->state
.dynamic
.stencil_write_mask
.front
= writeMask
;
2123 if (faceMask
& VK_STENCIL_FACE_BACK_BIT
)
2124 cmd
->state
.dynamic
.stencil_write_mask
.back
= writeMask
;
2126 /* the front/back write masks must be updated together */
2127 cmd
->state
.dirty
|= TU_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK
;
2131 tu_CmdSetStencilReference(VkCommandBuffer commandBuffer
,
2132 VkStencilFaceFlags faceMask
,
2135 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2137 if (faceMask
& VK_STENCIL_FACE_FRONT_BIT
)
2138 cmd
->state
.dynamic
.stencil_reference
.front
= reference
;
2139 if (faceMask
& VK_STENCIL_FACE_BACK_BIT
)
2140 cmd
->state
.dynamic
.stencil_reference
.back
= reference
;
2142 /* the front/back references must be updated together */
2143 cmd
->state
.dirty
|= TU_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE
;
2147 tu_CmdExecuteCommands(VkCommandBuffer commandBuffer
,
2148 uint32_t commandBufferCount
,
2149 const VkCommandBuffer
*pCmdBuffers
)
2151 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2154 assert(commandBufferCount
> 0);
2156 for (uint32_t i
= 0; i
< commandBufferCount
; i
++) {
2157 TU_FROM_HANDLE(tu_cmd_buffer
, secondary
, pCmdBuffers
[i
]);
2159 result
= tu_bo_list_merge(&cmd
->bo_list
, &secondary
->bo_list
);
2160 if (result
!= VK_SUCCESS
) {
2161 cmd
->record_result
= result
;
2165 if (secondary
->usage_flags
&
2166 VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT
) {
2167 assert(tu_cs_is_empty(&secondary
->cs
));
2169 result
= tu_cs_add_entries(&cmd
->draw_cs
, &secondary
->draw_cs
);
2170 if (result
!= VK_SUCCESS
) {
2171 cmd
->record_result
= result
;
2175 result
= tu_cs_add_entries(&cmd
->draw_epilogue_cs
,
2176 &secondary
->draw_epilogue_cs
);
2177 if (result
!= VK_SUCCESS
) {
2178 cmd
->record_result
= result
;
2182 assert(tu_cs_is_empty(&secondary
->draw_cs
));
2183 assert(tu_cs_is_empty(&secondary
->draw_epilogue_cs
));
2185 for (uint32_t j
= 0; j
< secondary
->cs
.bo_count
; j
++) {
2186 tu_bo_list_add(&cmd
->bo_list
, secondary
->cs
.bos
[j
],
2187 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_DUMP
);
2190 tu_cs_emit_call(&cmd
->cs
, &secondary
->cs
);
2193 cmd
->state
.dirty
= ~0u; /* TODO: set dirty only what needs to be */
2197 tu_CreateCommandPool(VkDevice _device
,
2198 const VkCommandPoolCreateInfo
*pCreateInfo
,
2199 const VkAllocationCallbacks
*pAllocator
,
2200 VkCommandPool
*pCmdPool
)
2202 TU_FROM_HANDLE(tu_device
, device
, _device
);
2203 struct tu_cmd_pool
*pool
;
2205 pool
= vk_alloc2(&device
->alloc
, pAllocator
, sizeof(*pool
), 8,
2206 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
2208 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
2211 pool
->alloc
= *pAllocator
;
2213 pool
->alloc
= device
->alloc
;
2215 list_inithead(&pool
->cmd_buffers
);
2216 list_inithead(&pool
->free_cmd_buffers
);
2218 pool
->queue_family_index
= pCreateInfo
->queueFamilyIndex
;
2220 *pCmdPool
= tu_cmd_pool_to_handle(pool
);
2226 tu_DestroyCommandPool(VkDevice _device
,
2227 VkCommandPool commandPool
,
2228 const VkAllocationCallbacks
*pAllocator
)
2230 TU_FROM_HANDLE(tu_device
, device
, _device
);
2231 TU_FROM_HANDLE(tu_cmd_pool
, pool
, commandPool
);
2236 list_for_each_entry_safe(struct tu_cmd_buffer
, cmd_buffer
,
2237 &pool
->cmd_buffers
, pool_link
)
2239 tu_cmd_buffer_destroy(cmd_buffer
);
2242 list_for_each_entry_safe(struct tu_cmd_buffer
, cmd_buffer
,
2243 &pool
->free_cmd_buffers
, pool_link
)
2245 tu_cmd_buffer_destroy(cmd_buffer
);
2248 vk_free2(&device
->alloc
, pAllocator
, pool
);
2252 tu_ResetCommandPool(VkDevice device
,
2253 VkCommandPool commandPool
,
2254 VkCommandPoolResetFlags flags
)
2256 TU_FROM_HANDLE(tu_cmd_pool
, pool
, commandPool
);
2259 list_for_each_entry(struct tu_cmd_buffer
, cmd_buffer
, &pool
->cmd_buffers
,
2262 result
= tu_reset_cmd_buffer(cmd_buffer
);
2263 if (result
!= VK_SUCCESS
)
2271 tu_TrimCommandPool(VkDevice device
,
2272 VkCommandPool commandPool
,
2273 VkCommandPoolTrimFlags flags
)
2275 TU_FROM_HANDLE(tu_cmd_pool
, pool
, commandPool
);
2280 list_for_each_entry_safe(struct tu_cmd_buffer
, cmd_buffer
,
2281 &pool
->free_cmd_buffers
, pool_link
)
2283 tu_cmd_buffer_destroy(cmd_buffer
);
2288 tu_CmdBeginRenderPass(VkCommandBuffer commandBuffer
,
2289 const VkRenderPassBeginInfo
*pRenderPassBegin
,
2290 VkSubpassContents contents
)
2292 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2293 TU_FROM_HANDLE(tu_render_pass
, pass
, pRenderPassBegin
->renderPass
);
2294 TU_FROM_HANDLE(tu_framebuffer
, fb
, pRenderPassBegin
->framebuffer
);
2296 cmd
->state
.pass
= pass
;
2297 cmd
->state
.subpass
= pass
->subpasses
;
2298 cmd
->state
.framebuffer
= fb
;
2300 tu_cmd_update_tiling_config(cmd
, &pRenderPassBegin
->renderArea
);
2301 tu_cmd_prepare_tile_store_ib(cmd
);
2303 tu_emit_load_clear(cmd
, pRenderPassBegin
);
2305 tu6_emit_zs(cmd
, cmd
->state
.subpass
, &cmd
->draw_cs
);
2306 tu6_emit_mrt(cmd
, cmd
->state
.subpass
, &cmd
->draw_cs
);
2307 tu6_emit_msaa(&cmd
->draw_cs
, cmd
->state
.subpass
->samples
);
2308 tu6_emit_render_cntl(cmd
, cmd
->state
.subpass
, &cmd
->draw_cs
, false);
2310 /* note: use_hw_binning only checks tiling config */
2311 if (use_hw_binning(cmd
))
2312 cmd
->use_vsc_data
= true;
2314 for (uint32_t i
= 0; i
< fb
->attachment_count
; ++i
) {
2315 const struct tu_image_view
*iview
= fb
->attachments
[i
].attachment
;
2316 tu_bo_list_add(&cmd
->bo_list
, iview
->image
->bo
,
2317 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_WRITE
);
2322 tu_CmdBeginRenderPass2(VkCommandBuffer commandBuffer
,
2323 const VkRenderPassBeginInfo
*pRenderPassBeginInfo
,
2324 const VkSubpassBeginInfoKHR
*pSubpassBeginInfo
)
2326 tu_CmdBeginRenderPass(commandBuffer
, pRenderPassBeginInfo
,
2327 pSubpassBeginInfo
->contents
);
2331 tu_CmdNextSubpass(VkCommandBuffer commandBuffer
, VkSubpassContents contents
)
2333 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2334 const struct tu_render_pass
*pass
= cmd
->state
.pass
;
2335 struct tu_cs
*cs
= &cmd
->draw_cs
;
2337 const struct tu_subpass
*subpass
= cmd
->state
.subpass
++;
2339 tu_cond_exec_start(cs
, CP_COND_EXEC_0_RENDER_MODE_GMEM
);
2341 if (subpass
->resolve_attachments
) {
2342 for (unsigned i
= 0; i
< subpass
->color_count
; i
++) {
2343 uint32_t a
= subpass
->resolve_attachments
[i
].attachment
;
2344 if (a
== VK_ATTACHMENT_UNUSED
)
2347 tu_store_gmem_attachment(cmd
, cs
, a
,
2348 subpass
->color_attachments
[i
].attachment
);
2350 if (pass
->attachments
[a
].gmem_offset
< 0)
2354 * check if the resolved attachment is needed by later subpasses,
2355 * if it is, should be doing a GMEM->GMEM resolve instead of GMEM->MEM->GMEM..
2357 tu_finishme("missing GMEM->GMEM resolve path\n");
2358 tu_emit_load_gmem_attachment(cmd
, cs
, a
);
2362 tu_cond_exec_end(cs
);
2364 tu_cond_exec_start(cs
, CP_COND_EXEC_0_RENDER_MODE_SYSMEM
);
2366 /* Emit flushes so that input attachments will read the correct value.
2367 * TODO: use subpass dependencies to flush or not
2369 tu6_emit_event_write(cmd
, cs
, PC_CCU_FLUSH_COLOR_TS
, true);
2370 tu6_emit_event_write(cmd
, cs
, PC_CCU_FLUSH_DEPTH_TS
, true);
2372 if (subpass
->resolve_attachments
) {
2373 tu6_emit_event_write(cmd
, cs
, CACHE_INVALIDATE
, false);
2375 for (unsigned i
= 0; i
< subpass
->color_count
; i
++) {
2376 uint32_t a
= subpass
->resolve_attachments
[i
].attachment
;
2377 if (a
== VK_ATTACHMENT_UNUSED
)
2380 tu6_emit_sysmem_resolve(cmd
, cs
, a
,
2381 subpass
->color_attachments
[i
].attachment
);
2384 tu6_emit_event_write(cmd
, cs
, PC_CCU_FLUSH_COLOR_TS
, true);
2387 tu_cond_exec_end(cs
);
2389 /* subpass->input_count > 0 then texture cache invalidate is likely to be needed */
2390 if (cmd
->state
.subpass
->input_count
)
2391 tu6_emit_event_write(cmd
, cs
, CACHE_INVALIDATE
, false);
2393 /* emit mrt/zs/msaa/ubwc state for the subpass that is starting */
2394 tu6_emit_zs(cmd
, cmd
->state
.subpass
, cs
);
2395 tu6_emit_mrt(cmd
, cmd
->state
.subpass
, cs
);
2396 tu6_emit_msaa(cs
, cmd
->state
.subpass
->samples
);
2397 tu6_emit_render_cntl(cmd
, cmd
->state
.subpass
, cs
, false);
2401 tu_CmdNextSubpass2(VkCommandBuffer commandBuffer
,
2402 const VkSubpassBeginInfoKHR
*pSubpassBeginInfo
,
2403 const VkSubpassEndInfoKHR
*pSubpassEndInfo
)
2405 tu_CmdNextSubpass(commandBuffer
, pSubpassBeginInfo
->contents
);
2411 * Number of vertices.
2416 * Index of the first vertex.
2418 int32_t vertex_offset
;
2421 * First instance id.
2423 uint32_t first_instance
;
2426 * Number of instances.
2428 uint32_t instance_count
;
2431 * First index (indexed draws only).
2433 uint32_t first_index
;
2436 * Whether it's an indexed draw.
2441 * Indirect draw parameters resource.
2443 struct tu_buffer
*indirect
;
2444 uint64_t indirect_offset
;
2448 * Draw count parameters resource.
2450 struct tu_buffer
*count_buffer
;
2451 uint64_t count_buffer_offset
;
2454 * Stream output parameters resource.
2456 struct tu_buffer
*streamout_buffer
;
2457 uint64_t streamout_buffer_offset
;
2460 #define ENABLE_ALL (CP_SET_DRAW_STATE__0_BINNING | CP_SET_DRAW_STATE__0_GMEM | CP_SET_DRAW_STATE__0_SYSMEM)
2461 #define ENABLE_DRAW (CP_SET_DRAW_STATE__0_GMEM | CP_SET_DRAW_STATE__0_SYSMEM)
2463 enum tu_draw_state_group_id
2465 TU_DRAW_STATE_PROGRAM
,
2466 TU_DRAW_STATE_PROGRAM_BINNING
,
2468 TU_DRAW_STATE_VI_BINNING
,
2472 TU_DRAW_STATE_BLEND
,
2473 TU_DRAW_STATE_VS_CONST
,
2474 TU_DRAW_STATE_FS_CONST
,
2475 TU_DRAW_STATE_VS_TEX
,
2476 TU_DRAW_STATE_FS_TEX_SYSMEM
,
2477 TU_DRAW_STATE_FS_TEX_GMEM
,
2478 TU_DRAW_STATE_FS_IBO
,
2479 TU_DRAW_STATE_VS_PARAMS
,
2481 TU_DRAW_STATE_COUNT
,
2484 struct tu_draw_state_group
2486 enum tu_draw_state_group_id id
;
2487 uint32_t enable_mask
;
2488 struct tu_cs_entry ib
;
2492 sampler_ptr(struct tu_descriptor_state
*descriptors_state
,
2493 const struct tu_descriptor_map
*map
, unsigned i
,
2494 unsigned array_index
)
2496 assert(descriptors_state
->valid
& (1 << map
->set
[i
]));
2498 struct tu_descriptor_set
*set
= descriptors_state
->sets
[map
->set
[i
]];
2499 assert(map
->binding
[i
] < set
->layout
->binding_count
);
2501 const struct tu_descriptor_set_binding_layout
*layout
=
2502 &set
->layout
->binding
[map
->binding
[i
]];
2504 if (layout
->immutable_samplers_offset
) {
2505 const uint32_t *immutable_samplers
=
2506 tu_immutable_samplers(set
->layout
, layout
);
2508 return &immutable_samplers
[array_index
* A6XX_TEX_SAMP_DWORDS
];
2511 switch (layout
->type
) {
2512 case VK_DESCRIPTOR_TYPE_SAMPLER
:
2513 return &set
->mapped_ptr
[layout
->offset
/ 4];
2514 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
2515 return &set
->mapped_ptr
[layout
->offset
/ 4 + A6XX_TEX_CONST_DWORDS
+
2516 array_index
* (A6XX_TEX_CONST_DWORDS
+ A6XX_TEX_SAMP_DWORDS
)];
2518 unreachable("unimplemented descriptor type");
2524 write_tex_const(struct tu_cmd_buffer
*cmd
,
2526 struct tu_descriptor_state
*descriptors_state
,
2527 const struct tu_descriptor_map
*map
,
2528 unsigned i
, unsigned array_index
, bool is_sysmem
)
2530 assert(descriptors_state
->valid
& (1 << map
->set
[i
]));
2532 struct tu_descriptor_set
*set
= descriptors_state
->sets
[map
->set
[i
]];
2533 assert(map
->binding
[i
] < set
->layout
->binding_count
);
2535 const struct tu_descriptor_set_binding_layout
*layout
=
2536 &set
->layout
->binding
[map
->binding
[i
]];
2538 switch (layout
->type
) {
2539 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
:
2540 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
:
2541 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER
:
2542 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT
:
2543 memcpy(dst
, &set
->mapped_ptr
[layout
->offset
/ 4 +
2544 array_index
* A6XX_TEX_CONST_DWORDS
],
2545 A6XX_TEX_CONST_DWORDS
* 4);
2547 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
2548 memcpy(dst
, &set
->mapped_ptr
[layout
->offset
/ 4 +
2550 (A6XX_TEX_CONST_DWORDS
+
2551 A6XX_TEX_SAMP_DWORDS
)],
2552 A6XX_TEX_CONST_DWORDS
* 4);
2555 unreachable("unimplemented descriptor type");
2559 if (layout
->type
== VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT
&& !is_sysmem
) {
2560 const struct tu_tiling_config
*tiling
= &cmd
->state
.tiling_config
;
2561 uint32_t a
= cmd
->state
.subpass
->input_attachments
[map
->value
[i
] +
2562 array_index
].attachment
;
2563 const struct tu_render_pass_attachment
*att
= &cmd
->state
.pass
->attachments
[a
];
2565 assert(att
->gmem_offset
>= 0);
2567 dst
[0] &= ~(A6XX_TEX_CONST_0_SWAP__MASK
| A6XX_TEX_CONST_0_TILE_MODE__MASK
);
2568 dst
[0] |= A6XX_TEX_CONST_0_TILE_MODE(TILE6_2
);
2569 dst
[2] &= ~(A6XX_TEX_CONST_2_TYPE__MASK
| A6XX_TEX_CONST_2_PITCH__MASK
);
2571 A6XX_TEX_CONST_2_TYPE(A6XX_TEX_2D
) |
2572 A6XX_TEX_CONST_2_PITCH(tiling
->tile0
.extent
.width
* att
->cpp
);
2574 dst
[4] = cmd
->device
->physical_device
->gmem_base
+ att
->gmem_offset
;
2575 dst
[5] = A6XX_TEX_CONST_5_DEPTH(1);
2576 for (unsigned i
= 6; i
< A6XX_TEX_CONST_DWORDS
; i
++)
2579 if (cmd
->level
== VK_COMMAND_BUFFER_LEVEL_SECONDARY
)
2580 tu_finishme("patch input attachment pitch for secondary cmd buffer");
2585 write_image_ibo(struct tu_cmd_buffer
*cmd
,
2587 struct tu_descriptor_state
*descriptors_state
,
2588 const struct tu_descriptor_map
*map
,
2589 unsigned i
, unsigned array_index
)
2591 assert(descriptors_state
->valid
& (1 << map
->set
[i
]));
2593 struct tu_descriptor_set
*set
= descriptors_state
->sets
[map
->set
[i
]];
2594 assert(map
->binding
[i
] < set
->layout
->binding_count
);
2596 const struct tu_descriptor_set_binding_layout
*layout
=
2597 &set
->layout
->binding
[map
->binding
[i
]];
2599 assert(layout
->type
== VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
);
2601 memcpy(dst
, &set
->mapped_ptr
[layout
->offset
/ 4 +
2602 (array_index
* 2 + 1) * A6XX_TEX_CONST_DWORDS
],
2603 A6XX_TEX_CONST_DWORDS
* 4);
2607 buffer_ptr(struct tu_descriptor_state
*descriptors_state
,
2608 const struct tu_descriptor_map
*map
,
2609 unsigned i
, unsigned array_index
)
2611 assert(descriptors_state
->valid
& (1 << map
->set
[i
]));
2613 struct tu_descriptor_set
*set
= descriptors_state
->sets
[map
->set
[i
]];
2614 assert(map
->binding
[i
] < set
->layout
->binding_count
);
2616 const struct tu_descriptor_set_binding_layout
*layout
=
2617 &set
->layout
->binding
[map
->binding
[i
]];
2619 switch (layout
->type
) {
2620 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
2621 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
2622 return descriptors_state
->dynamic_buffers
[layout
->dynamic_offset_offset
+
2624 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
2625 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
2626 return (uint64_t) set
->mapped_ptr
[layout
->offset
/ 4 + array_index
* 2 + 1] << 32 |
2627 set
->mapped_ptr
[layout
->offset
/ 4 + array_index
* 2];
2629 unreachable("unimplemented descriptor type");
2634 static inline uint32_t
2635 tu6_stage2opcode(gl_shader_stage type
)
2638 case MESA_SHADER_VERTEX
:
2639 case MESA_SHADER_TESS_CTRL
:
2640 case MESA_SHADER_TESS_EVAL
:
2641 case MESA_SHADER_GEOMETRY
:
2642 return CP_LOAD_STATE6_GEOM
;
2643 case MESA_SHADER_FRAGMENT
:
2644 case MESA_SHADER_COMPUTE
:
2645 case MESA_SHADER_KERNEL
:
2646 return CP_LOAD_STATE6_FRAG
;
2648 unreachable("bad shader type");
2652 static inline enum a6xx_state_block
2653 tu6_stage2shadersb(gl_shader_stage type
)
2656 case MESA_SHADER_VERTEX
:
2657 return SB6_VS_SHADER
;
2658 case MESA_SHADER_FRAGMENT
:
2659 return SB6_FS_SHADER
;
2660 case MESA_SHADER_COMPUTE
:
2661 case MESA_SHADER_KERNEL
:
2662 return SB6_CS_SHADER
;
2664 unreachable("bad shader type");
2670 tu6_emit_user_consts(struct tu_cs
*cs
, const struct tu_pipeline
*pipeline
,
2671 struct tu_descriptor_state
*descriptors_state
,
2672 gl_shader_stage type
,
2673 uint32_t *push_constants
)
2675 const struct tu_program_descriptor_linkage
*link
=
2676 &pipeline
->program
.link
[type
];
2677 const struct ir3_ubo_analysis_state
*state
= &link
->ubo_state
;
2679 if (link
->push_consts
.count
> 0) {
2680 unsigned num_units
= link
->push_consts
.count
;
2681 unsigned offset
= link
->push_consts
.lo
;
2682 tu_cs_emit_pkt7(cs
, tu6_stage2opcode(type
), 3 + num_units
* 4);
2683 tu_cs_emit(cs
, CP_LOAD_STATE6_0_DST_OFF(offset
) |
2684 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS
) |
2685 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT
) |
2686 CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type
)) |
2687 CP_LOAD_STATE6_0_NUM_UNIT(num_units
));
2690 for (unsigned i
= 0; i
< num_units
* 4; i
++)
2691 tu_cs_emit(cs
, push_constants
[i
+ offset
* 4]);
2694 for (uint32_t i
= 0; i
< ARRAY_SIZE(state
->range
); i
++) {
2695 if (state
->range
[i
].start
< state
->range
[i
].end
) {
2696 uint32_t size
= state
->range
[i
].end
- state
->range
[i
].start
;
2697 uint32_t offset
= state
->range
[i
].start
;
2699 /* and even if the start of the const buffer is before
2700 * first_immediate, the end may not be:
2702 size
= MIN2(size
, (16 * link
->constlen
) - state
->range
[i
].offset
);
2707 /* things should be aligned to vec4: */
2708 debug_assert((state
->range
[i
].offset
% 16) == 0);
2709 debug_assert((size
% 16) == 0);
2710 debug_assert((offset
% 16) == 0);
2712 /* Look through the UBO map to find our UBO index, and get the VA for
2716 uint32_t ubo_idx
= i
- 1;
2717 uint32_t ubo_map_base
= 0;
2718 for (int j
= 0; j
< link
->ubo_map
.num
; j
++) {
2719 if (ubo_idx
>= ubo_map_base
&&
2720 ubo_idx
< ubo_map_base
+ link
->ubo_map
.array_size
[j
]) {
2721 va
= buffer_ptr(descriptors_state
, &link
->ubo_map
, j
,
2722 ubo_idx
- ubo_map_base
);
2725 ubo_map_base
+= link
->ubo_map
.array_size
[j
];
2729 tu_cs_emit_pkt7(cs
, tu6_stage2opcode(type
), 3);
2730 tu_cs_emit(cs
, CP_LOAD_STATE6_0_DST_OFF(state
->range
[i
].offset
/ 16) |
2731 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS
) |
2732 CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT
) |
2733 CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type
)) |
2734 CP_LOAD_STATE6_0_NUM_UNIT(size
/ 16));
2735 tu_cs_emit_qw(cs
, va
+ offset
);
2741 tu6_emit_ubos(struct tu_cs
*cs
, const struct tu_pipeline
*pipeline
,
2742 struct tu_descriptor_state
*descriptors_state
,
2743 gl_shader_stage type
)
2745 const struct tu_program_descriptor_linkage
*link
=
2746 &pipeline
->program
.link
[type
];
2748 uint32_t num
= MIN2(link
->ubo_map
.num_desc
, link
->const_state
.num_ubos
);
2749 uint32_t anum
= align(num
, 2);
2754 tu_cs_emit_pkt7(cs
, tu6_stage2opcode(type
), 3 + (2 * anum
));
2755 tu_cs_emit(cs
, CP_LOAD_STATE6_0_DST_OFF(link
->const_state
.offsets
.ubo
) |
2756 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS
) |
2757 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT
) |
2758 CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type
)) |
2759 CP_LOAD_STATE6_0_NUM_UNIT(anum
/2));
2760 tu_cs_emit(cs
, CP_LOAD_STATE6_1_EXT_SRC_ADDR(0));
2761 tu_cs_emit(cs
, CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI(0));
2763 unsigned emitted
= 0;
2764 for (unsigned i
= 0; emitted
< num
&& i
< link
->ubo_map
.num
; i
++) {
2765 for (unsigned j
= 0; emitted
< num
&& j
< link
->ubo_map
.array_size
[i
]; j
++) {
2766 tu_cs_emit_qw(cs
, buffer_ptr(descriptors_state
, &link
->ubo_map
, i
, j
));
2771 for (; emitted
< anum
; emitted
++) {
2772 tu_cs_emit(cs
, 0xffffffff);
2773 tu_cs_emit(cs
, 0xffffffff);
2777 static struct tu_cs_entry
2778 tu6_emit_consts(struct tu_cmd_buffer
*cmd
,
2779 const struct tu_pipeline
*pipeline
,
2780 struct tu_descriptor_state
*descriptors_state
,
2781 gl_shader_stage type
)
2784 tu_cs_begin_sub_stream(&cmd
->sub_cs
, 512, &cs
); /* TODO: maximum size? */
2786 tu6_emit_user_consts(&cs
, pipeline
, descriptors_state
, type
, cmd
->push_constants
);
2787 tu6_emit_ubos(&cs
, pipeline
, descriptors_state
, type
);
2789 return tu_cs_end_sub_stream(&cmd
->sub_cs
, &cs
);
2793 tu6_emit_vs_params(struct tu_cmd_buffer
*cmd
,
2794 const struct tu_draw_info
*draw
,
2795 struct tu_cs_entry
*entry
)
2797 /* TODO: fill out more than just base instance */
2798 const struct tu_program_descriptor_linkage
*link
=
2799 &cmd
->state
.pipeline
->program
.link
[MESA_SHADER_VERTEX
];
2800 const struct ir3_const_state
*const_state
= &link
->const_state
;
2803 if (const_state
->offsets
.driver_param
>= link
->constlen
) {
2804 *entry
= (struct tu_cs_entry
) {};
2808 VkResult result
= tu_cs_begin_sub_stream(&cmd
->sub_cs
, 8, &cs
);
2809 if (result
!= VK_SUCCESS
)
2812 tu_cs_emit_pkt7(&cs
, CP_LOAD_STATE6_GEOM
, 3 + 4);
2813 tu_cs_emit(&cs
, CP_LOAD_STATE6_0_DST_OFF(const_state
->offsets
.driver_param
) |
2814 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS
) |
2815 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT
) |
2816 CP_LOAD_STATE6_0_STATE_BLOCK(SB6_VS_SHADER
) |
2817 CP_LOAD_STATE6_0_NUM_UNIT(1));
2821 STATIC_ASSERT(IR3_DP_INSTID_BASE
== 2);
2825 tu_cs_emit(&cs
, draw
->first_instance
);
2828 *entry
= tu_cs_end_sub_stream(&cmd
->sub_cs
, &cs
);
2833 tu6_emit_textures(struct tu_cmd_buffer
*cmd
,
2834 const struct tu_pipeline
*pipeline
,
2835 struct tu_descriptor_state
*descriptors_state
,
2836 gl_shader_stage type
,
2837 struct tu_cs_entry
*entry
,
2840 struct tu_cs
*draw_state
= &cmd
->sub_cs
;
2841 const struct tu_program_descriptor_linkage
*link
=
2842 &pipeline
->program
.link
[type
];
2845 if (link
->texture_map
.num_desc
== 0 && link
->sampler_map
.num_desc
== 0) {
2846 *entry
= (struct tu_cs_entry
) {};
2850 /* allocate and fill texture state */
2851 struct ts_cs_memory tex_const
;
2852 result
= tu_cs_alloc(draw_state
, link
->texture_map
.num_desc
,
2853 A6XX_TEX_CONST_DWORDS
, &tex_const
);
2854 if (result
!= VK_SUCCESS
)
2858 for (unsigned i
= 0; i
< link
->texture_map
.num
; i
++) {
2859 for (int j
= 0; j
< link
->texture_map
.array_size
[i
]; j
++) {
2860 write_tex_const(cmd
,
2861 &tex_const
.map
[A6XX_TEX_CONST_DWORDS
* tex_index
++],
2862 descriptors_state
, &link
->texture_map
, i
, j
,
2867 /* allocate and fill sampler state */
2868 struct ts_cs_memory tex_samp
= { 0 };
2869 if (link
->sampler_map
.num_desc
) {
2870 result
= tu_cs_alloc(draw_state
, link
->sampler_map
.num_desc
,
2871 A6XX_TEX_SAMP_DWORDS
, &tex_samp
);
2872 if (result
!= VK_SUCCESS
)
2875 int sampler_index
= 0;
2876 for (unsigned i
= 0; i
< link
->sampler_map
.num
; i
++) {
2877 for (int j
= 0; j
< link
->sampler_map
.array_size
[i
]; j
++) {
2878 const uint32_t *sampler
= sampler_ptr(descriptors_state
,
2881 memcpy(&tex_samp
.map
[A6XX_TEX_SAMP_DWORDS
* sampler_index
++],
2882 sampler
, A6XX_TEX_SAMP_DWORDS
* 4);
2887 unsigned tex_samp_reg
, tex_const_reg
, tex_count_reg
;
2888 enum a6xx_state_block sb
;
2891 case MESA_SHADER_VERTEX
:
2893 tex_samp_reg
= REG_A6XX_SP_VS_TEX_SAMP_LO
;
2894 tex_const_reg
= REG_A6XX_SP_VS_TEX_CONST_LO
;
2895 tex_count_reg
= REG_A6XX_SP_VS_TEX_COUNT
;
2897 case MESA_SHADER_FRAGMENT
:
2899 tex_samp_reg
= REG_A6XX_SP_FS_TEX_SAMP_LO
;
2900 tex_const_reg
= REG_A6XX_SP_FS_TEX_CONST_LO
;
2901 tex_count_reg
= REG_A6XX_SP_FS_TEX_COUNT
;
2903 case MESA_SHADER_COMPUTE
:
2905 tex_samp_reg
= REG_A6XX_SP_CS_TEX_SAMP_LO
;
2906 tex_const_reg
= REG_A6XX_SP_CS_TEX_CONST_LO
;
2907 tex_count_reg
= REG_A6XX_SP_CS_TEX_COUNT
;
2910 unreachable("bad state block");
2914 result
= tu_cs_begin_sub_stream(draw_state
, 16, &cs
);
2915 if (result
!= VK_SUCCESS
)
2918 if (link
->sampler_map
.num_desc
) {
2919 /* output sampler state: */
2920 tu_cs_emit_pkt7(&cs
, tu6_stage2opcode(type
), 3);
2921 tu_cs_emit(&cs
, CP_LOAD_STATE6_0_DST_OFF(0) |
2922 CP_LOAD_STATE6_0_STATE_TYPE(ST6_SHADER
) |
2923 CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT
) |
2924 CP_LOAD_STATE6_0_STATE_BLOCK(sb
) |
2925 CP_LOAD_STATE6_0_NUM_UNIT(link
->sampler_map
.num_desc
));
2926 tu_cs_emit_qw(&cs
, tex_samp
.iova
); /* SRC_ADDR_LO/HI */
2928 tu_cs_emit_pkt4(&cs
, tex_samp_reg
, 2);
2929 tu_cs_emit_qw(&cs
, tex_samp
.iova
); /* SRC_ADDR_LO/HI */
2932 /* emit texture state: */
2933 tu_cs_emit_pkt7(&cs
, tu6_stage2opcode(type
), 3);
2934 tu_cs_emit(&cs
, CP_LOAD_STATE6_0_DST_OFF(0) |
2935 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS
) |
2936 CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT
) |
2937 CP_LOAD_STATE6_0_STATE_BLOCK(sb
) |
2938 CP_LOAD_STATE6_0_NUM_UNIT(link
->texture_map
.num_desc
));
2939 tu_cs_emit_qw(&cs
, tex_const
.iova
); /* SRC_ADDR_LO/HI */
2941 tu_cs_emit_pkt4(&cs
, tex_const_reg
, 2);
2942 tu_cs_emit_qw(&cs
, tex_const
.iova
); /* SRC_ADDR_LO/HI */
2944 tu_cs_emit_pkt4(&cs
, tex_count_reg
, 1);
2945 tu_cs_emit(&cs
, link
->texture_map
.num_desc
);
2947 *entry
= tu_cs_end_sub_stream(draw_state
, &cs
);
2952 tu6_emit_ibo(struct tu_cmd_buffer
*cmd
,
2953 const struct tu_pipeline
*pipeline
,
2954 struct tu_descriptor_state
*descriptors_state
,
2955 gl_shader_stage type
,
2956 struct tu_cs_entry
*entry
)
2958 struct tu_cs
*draw_state
= &cmd
->sub_cs
;
2959 const struct tu_program_descriptor_linkage
*link
=
2960 &pipeline
->program
.link
[type
];
2963 unsigned num_desc
= link
->ssbo_map
.num_desc
+ link
->image_map
.num_desc
;
2965 if (num_desc
== 0) {
2966 *entry
= (struct tu_cs_entry
) {};
2970 struct ts_cs_memory ibo_const
;
2971 result
= tu_cs_alloc(draw_state
, num_desc
,
2972 A6XX_TEX_CONST_DWORDS
, &ibo_const
);
2973 if (result
!= VK_SUCCESS
)
2977 for (unsigned i
= 0; i
< link
->ssbo_map
.num
; i
++) {
2978 for (int j
= 0; j
< link
->ssbo_map
.array_size
[i
]; j
++) {
2979 uint32_t *dst
= &ibo_const
.map
[A6XX_TEX_CONST_DWORDS
* ssbo_index
];
2981 uint64_t va
= buffer_ptr(descriptors_state
, &link
->ssbo_map
, i
, j
);
2982 /* We don't expose robustBufferAccess, so leave the size unlimited. */
2983 uint32_t sz
= MAX_STORAGE_BUFFER_RANGE
/ 4;
2985 dst
[0] = A6XX_IBO_0_FMT(FMT6_32_UINT
);
2986 dst
[1] = A6XX_IBO_1_WIDTH(sz
& MASK(15)) |
2987 A6XX_IBO_1_HEIGHT(sz
>> 15);
2988 dst
[2] = A6XX_IBO_2_UNK4
|
2990 A6XX_IBO_2_TYPE(A6XX_TEX_1D
);
2994 for (int i
= 6; i
< A6XX_TEX_CONST_DWORDS
; i
++)
3001 for (unsigned i
= 0; i
< link
->image_map
.num
; i
++) {
3002 for (int j
= 0; j
< link
->image_map
.array_size
[i
]; j
++) {
3003 uint32_t *dst
= &ibo_const
.map
[A6XX_TEX_CONST_DWORDS
* ssbo_index
];
3005 write_image_ibo(cmd
, dst
,
3006 descriptors_state
, &link
->image_map
, i
, j
);
3012 assert(ssbo_index
== num_desc
);
3015 result
= tu_cs_begin_sub_stream(draw_state
, 7, &cs
);
3016 if (result
!= VK_SUCCESS
)
3019 uint32_t opcode
, ibo_addr_reg
;
3020 enum a6xx_state_block sb
;
3021 enum a6xx_state_type st
;
3024 case MESA_SHADER_FRAGMENT
:
3025 opcode
= CP_LOAD_STATE6
;
3028 ibo_addr_reg
= REG_A6XX_SP_IBO_LO
;
3030 case MESA_SHADER_COMPUTE
:
3031 opcode
= CP_LOAD_STATE6_FRAG
;
3034 ibo_addr_reg
= REG_A6XX_SP_CS_IBO_LO
;
3037 unreachable("unsupported stage for ibos");
3040 /* emit texture state: */
3041 tu_cs_emit_pkt7(&cs
, opcode
, 3);
3042 tu_cs_emit(&cs
, CP_LOAD_STATE6_0_DST_OFF(0) |
3043 CP_LOAD_STATE6_0_STATE_TYPE(st
) |
3044 CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT
) |
3045 CP_LOAD_STATE6_0_STATE_BLOCK(sb
) |
3046 CP_LOAD_STATE6_0_NUM_UNIT(num_desc
));
3047 tu_cs_emit_qw(&cs
, ibo_const
.iova
); /* SRC_ADDR_LO/HI */
3049 tu_cs_emit_pkt4(&cs
, ibo_addr_reg
, 2);
3050 tu_cs_emit_qw(&cs
, ibo_const
.iova
); /* SRC_ADDR_LO/HI */
3052 *entry
= tu_cs_end_sub_stream(draw_state
, &cs
);
3057 tu6_emit_streamout(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
3059 struct tu_streamout_state
*tf
= &cmd
->state
.pipeline
->streamout
;
3061 for (unsigned i
= 0; i
< IR3_MAX_SO_BUFFERS
; i
++) {
3062 struct tu_buffer
*buf
= cmd
->state
.streamout_buf
.buffers
[i
];
3067 offset
= cmd
->state
.streamout_buf
.offsets
[i
];
3069 tu_cs_emit_regs(cs
, A6XX_VPC_SO_BUFFER_BASE(i
, .bo
= buf
->bo
,
3070 .bo_offset
= buf
->bo_offset
));
3071 tu_cs_emit_regs(cs
, A6XX_VPC_SO_BUFFER_SIZE(i
, buf
->size
));
3073 if (cmd
->state
.streamout_reset
& (1 << i
)) {
3074 offset
*= tf
->stride
[i
];
3076 tu_cs_emit_regs(cs
, A6XX_VPC_SO_BUFFER_OFFSET(i
, offset
));
3077 cmd
->state
.streamout_reset
&= ~(1 << i
);
3079 tu_cs_emit_pkt7(cs
, CP_MEM_TO_REG
, 3);
3080 tu_cs_emit(cs
, CP_MEM_TO_REG_0_REG(REG_A6XX_VPC_SO_BUFFER_OFFSET(i
)) |
3081 CP_MEM_TO_REG_0_SHIFT_BY_2
| CP_MEM_TO_REG_0_UNK31
|
3082 CP_MEM_TO_REG_0_CNT(0));
3083 tu_cs_emit_qw(cs
, cmd
->scratch_bo
.iova
+
3084 ctrl_offset(flush_base
[i
].offset
));
3087 tu_cs_emit_regs(cs
, A6XX_VPC_SO_FLUSH_BASE(i
, .bo
= &cmd
->scratch_bo
,
3089 ctrl_offset(flush_base
[i
])));
3092 if (cmd
->state
.streamout_enabled
) {
3093 tu_cs_emit_pkt7(cs
, CP_CONTEXT_REG_BUNCH
, 12 + (2 * tf
->prog_count
));
3094 tu_cs_emit(cs
, REG_A6XX_VPC_SO_BUF_CNTL
);
3095 tu_cs_emit(cs
, tf
->vpc_so_buf_cntl
);
3096 tu_cs_emit(cs
, REG_A6XX_VPC_SO_NCOMP(0));
3097 tu_cs_emit(cs
, tf
->ncomp
[0]);
3098 tu_cs_emit(cs
, REG_A6XX_VPC_SO_NCOMP(1));
3099 tu_cs_emit(cs
, tf
->ncomp
[1]);
3100 tu_cs_emit(cs
, REG_A6XX_VPC_SO_NCOMP(2));
3101 tu_cs_emit(cs
, tf
->ncomp
[2]);
3102 tu_cs_emit(cs
, REG_A6XX_VPC_SO_NCOMP(3));
3103 tu_cs_emit(cs
, tf
->ncomp
[3]);
3104 tu_cs_emit(cs
, REG_A6XX_VPC_SO_CNTL
);
3105 tu_cs_emit(cs
, A6XX_VPC_SO_CNTL_ENABLE
);
3106 for (unsigned i
= 0; i
< tf
->prog_count
; i
++) {
3107 tu_cs_emit(cs
, REG_A6XX_VPC_SO_PROG
);
3108 tu_cs_emit(cs
, tf
->prog
[i
]);
3111 tu_cs_emit_pkt7(cs
, CP_CONTEXT_REG_BUNCH
, 4);
3112 tu_cs_emit(cs
, REG_A6XX_VPC_SO_CNTL
);
3114 tu_cs_emit(cs
, REG_A6XX_VPC_SO_BUF_CNTL
);
3120 tu6_bind_draw_states(struct tu_cmd_buffer
*cmd
,
3122 const struct tu_draw_info
*draw
)
3124 const struct tu_pipeline
*pipeline
= cmd
->state
.pipeline
;
3125 const struct tu_dynamic_state
*dynamic
= &cmd
->state
.dynamic
;
3126 struct tu_draw_state_group draw_state_groups
[TU_DRAW_STATE_COUNT
];
3127 uint32_t draw_state_group_count
= 0;
3130 struct tu_descriptor_state
*descriptors_state
=
3131 &cmd
->descriptors
[VK_PIPELINE_BIND_POINT_GRAPHICS
];
3136 A6XX_PC_PRIMITIVE_CNTL_0(.primitive_restart
=
3137 pipeline
->ia
.primitive_restart
&& draw
->indexed
));
3139 if (cmd
->state
.dirty
&
3140 (TU_CMD_DIRTY_PIPELINE
| TU_CMD_DIRTY_DYNAMIC_LINE_WIDTH
) &&
3141 (pipeline
->dynamic_state
.mask
& TU_DYNAMIC_LINE_WIDTH
)) {
3142 tu6_emit_gras_su_cntl(cs
, pipeline
->rast
.gras_su_cntl
,
3143 dynamic
->line_width
);
3146 if ((cmd
->state
.dirty
& TU_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK
) &&
3147 (pipeline
->dynamic_state
.mask
& TU_DYNAMIC_STENCIL_COMPARE_MASK
)) {
3148 tu6_emit_stencil_compare_mask(cs
, dynamic
->stencil_compare_mask
.front
,
3149 dynamic
->stencil_compare_mask
.back
);
3152 if ((cmd
->state
.dirty
& TU_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK
) &&
3153 (pipeline
->dynamic_state
.mask
& TU_DYNAMIC_STENCIL_WRITE_MASK
)) {
3154 tu6_emit_stencil_write_mask(cs
, dynamic
->stencil_write_mask
.front
,
3155 dynamic
->stencil_write_mask
.back
);
3158 if ((cmd
->state
.dirty
& TU_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE
) &&
3159 (pipeline
->dynamic_state
.mask
& TU_DYNAMIC_STENCIL_REFERENCE
)) {
3160 tu6_emit_stencil_reference(cs
, dynamic
->stencil_reference
.front
,
3161 dynamic
->stencil_reference
.back
);
3164 if ((cmd
->state
.dirty
& TU_CMD_DIRTY_DYNAMIC_VIEWPORT
) &&
3165 (pipeline
->dynamic_state
.mask
& TU_DYNAMIC_VIEWPORT
)) {
3166 tu6_emit_viewport(cs
, &cmd
->state
.dynamic
.viewport
.viewports
[0]);
3169 if ((cmd
->state
.dirty
& TU_CMD_DIRTY_DYNAMIC_SCISSOR
) &&
3170 (pipeline
->dynamic_state
.mask
& TU_DYNAMIC_SCISSOR
)) {
3171 tu6_emit_scissor(cs
, &cmd
->state
.dynamic
.scissor
.scissors
[0]);
3174 if (cmd
->state
.dirty
&
3175 (TU_CMD_DIRTY_PIPELINE
| TU_CMD_DIRTY_VERTEX_BUFFERS
)) {
3176 for (uint32_t i
= 0; i
< pipeline
->vi
.count
; i
++) {
3177 const uint32_t binding
= pipeline
->vi
.bindings
[i
];
3178 const struct tu_buffer
*buf
= cmd
->state
.vb
.buffers
[binding
];
3179 const VkDeviceSize offset
= buf
->bo_offset
+
3180 cmd
->state
.vb
.offsets
[binding
];
3181 const VkDeviceSize size
=
3182 offset
< buf
->size
? buf
->size
- offset
: 0;
3185 A6XX_VFD_FETCH_BASE(i
, .bo
= buf
->bo
, .bo_offset
= offset
),
3186 A6XX_VFD_FETCH_SIZE(i
, size
));
3190 if (cmd
->state
.dirty
& TU_CMD_DIRTY_PIPELINE
) {
3191 draw_state_groups
[draw_state_group_count
++] =
3192 (struct tu_draw_state_group
) {
3193 .id
= TU_DRAW_STATE_PROGRAM
,
3194 .enable_mask
= ENABLE_DRAW
,
3195 .ib
= pipeline
->program
.state_ib
,
3197 draw_state_groups
[draw_state_group_count
++] =
3198 (struct tu_draw_state_group
) {
3199 .id
= TU_DRAW_STATE_PROGRAM_BINNING
,
3200 .enable_mask
= CP_SET_DRAW_STATE__0_BINNING
,
3201 .ib
= pipeline
->program
.binning_state_ib
,
3203 draw_state_groups
[draw_state_group_count
++] =
3204 (struct tu_draw_state_group
) {
3205 .id
= TU_DRAW_STATE_VI
,
3206 .enable_mask
= ENABLE_DRAW
,
3207 .ib
= pipeline
->vi
.state_ib
,
3209 draw_state_groups
[draw_state_group_count
++] =
3210 (struct tu_draw_state_group
) {
3211 .id
= TU_DRAW_STATE_VI_BINNING
,
3212 .enable_mask
= CP_SET_DRAW_STATE__0_BINNING
,
3213 .ib
= pipeline
->vi
.binning_state_ib
,
3215 draw_state_groups
[draw_state_group_count
++] =
3216 (struct tu_draw_state_group
) {
3217 .id
= TU_DRAW_STATE_VP
,
3218 .enable_mask
= ENABLE_ALL
,
3219 .ib
= pipeline
->vp
.state_ib
,
3221 draw_state_groups
[draw_state_group_count
++] =
3222 (struct tu_draw_state_group
) {
3223 .id
= TU_DRAW_STATE_RAST
,
3224 .enable_mask
= ENABLE_ALL
,
3225 .ib
= pipeline
->rast
.state_ib
,
3227 draw_state_groups
[draw_state_group_count
++] =
3228 (struct tu_draw_state_group
) {
3229 .id
= TU_DRAW_STATE_DS
,
3230 .enable_mask
= ENABLE_ALL
,
3231 .ib
= pipeline
->ds
.state_ib
,
3233 draw_state_groups
[draw_state_group_count
++] =
3234 (struct tu_draw_state_group
) {
3235 .id
= TU_DRAW_STATE_BLEND
,
3236 .enable_mask
= ENABLE_ALL
,
3237 .ib
= pipeline
->blend
.state_ib
,
3241 if (cmd
->state
.dirty
&
3242 (TU_CMD_DIRTY_PIPELINE
| TU_CMD_DIRTY_DESCRIPTOR_SETS
| TU_CMD_DIRTY_PUSH_CONSTANTS
)) {
3243 draw_state_groups
[draw_state_group_count
++] =
3244 (struct tu_draw_state_group
) {
3245 .id
= TU_DRAW_STATE_VS_CONST
,
3246 .enable_mask
= ENABLE_ALL
,
3247 .ib
= tu6_emit_consts(cmd
, pipeline
, descriptors_state
, MESA_SHADER_VERTEX
)
3249 draw_state_groups
[draw_state_group_count
++] =
3250 (struct tu_draw_state_group
) {
3251 .id
= TU_DRAW_STATE_FS_CONST
,
3252 .enable_mask
= ENABLE_DRAW
,
3253 .ib
= tu6_emit_consts(cmd
, pipeline
, descriptors_state
, MESA_SHADER_FRAGMENT
)
3257 if (cmd
->state
.dirty
& TU_CMD_DIRTY_STREAMOUT_BUFFERS
)
3258 tu6_emit_streamout(cmd
, cs
);
3260 if (cmd
->state
.dirty
&
3261 (TU_CMD_DIRTY_PIPELINE
| TU_CMD_DIRTY_DESCRIPTOR_SETS
)) {
3262 struct tu_cs_entry vs_tex
, fs_tex_sysmem
, fs_tex_gmem
, fs_ibo
;
3264 result
= tu6_emit_textures(cmd
, pipeline
, descriptors_state
,
3265 MESA_SHADER_VERTEX
, &vs_tex
, false);
3266 if (result
!= VK_SUCCESS
)
3269 /* TODO: we could emit just one texture descriptor draw state when there
3270 * are no input attachments, which is the most common case. We could
3271 * also split out the sampler state, which doesn't change even for input
3274 result
= tu6_emit_textures(cmd
, pipeline
, descriptors_state
,
3275 MESA_SHADER_FRAGMENT
, &fs_tex_sysmem
, true);
3276 if (result
!= VK_SUCCESS
)
3279 result
= tu6_emit_textures(cmd
, pipeline
, descriptors_state
,
3280 MESA_SHADER_FRAGMENT
, &fs_tex_gmem
, false);
3281 if (result
!= VK_SUCCESS
)
3284 result
= tu6_emit_ibo(cmd
, pipeline
, descriptors_state
,
3285 MESA_SHADER_FRAGMENT
, &fs_ibo
);
3286 if (result
!= VK_SUCCESS
)
3289 draw_state_groups
[draw_state_group_count
++] =
3290 (struct tu_draw_state_group
) {
3291 .id
= TU_DRAW_STATE_VS_TEX
,
3292 .enable_mask
= ENABLE_ALL
,
3295 draw_state_groups
[draw_state_group_count
++] =
3296 (struct tu_draw_state_group
) {
3297 .id
= TU_DRAW_STATE_FS_TEX_GMEM
,
3298 .enable_mask
= CP_SET_DRAW_STATE__0_GMEM
,
3301 draw_state_groups
[draw_state_group_count
++] =
3302 (struct tu_draw_state_group
) {
3303 .id
= TU_DRAW_STATE_FS_TEX_SYSMEM
,
3304 .enable_mask
= CP_SET_DRAW_STATE__0_SYSMEM
,
3305 .ib
= fs_tex_sysmem
,
3307 draw_state_groups
[draw_state_group_count
++] =
3308 (struct tu_draw_state_group
) {
3309 .id
= TU_DRAW_STATE_FS_IBO
,
3310 .enable_mask
= ENABLE_DRAW
,
3315 struct tu_cs_entry vs_params
;
3316 result
= tu6_emit_vs_params(cmd
, draw
, &vs_params
);
3317 if (result
!= VK_SUCCESS
)
3320 draw_state_groups
[draw_state_group_count
++] =
3321 (struct tu_draw_state_group
) {
3322 .id
= TU_DRAW_STATE_VS_PARAMS
,
3323 .enable_mask
= ENABLE_ALL
,
3327 tu_cs_emit_pkt7(cs
, CP_SET_DRAW_STATE
, 3 * draw_state_group_count
);
3328 for (uint32_t i
= 0; i
< draw_state_group_count
; i
++) {
3329 const struct tu_draw_state_group
*group
= &draw_state_groups
[i
];
3330 debug_assert((group
->enable_mask
& ~ENABLE_ALL
) == 0);
3331 uint32_t cp_set_draw_state
=
3332 CP_SET_DRAW_STATE__0_COUNT(group
->ib
.size
/ 4) |
3333 group
->enable_mask
|
3334 CP_SET_DRAW_STATE__0_GROUP_ID(group
->id
);
3336 if (group
->ib
.size
) {
3337 iova
= group
->ib
.bo
->iova
+ group
->ib
.offset
;
3339 cp_set_draw_state
|= CP_SET_DRAW_STATE__0_DISABLE
;
3343 tu_cs_emit(cs
, cp_set_draw_state
);
3344 tu_cs_emit_qw(cs
, iova
);
3347 tu_cs_sanity_check(cs
);
3350 if (cmd
->state
.dirty
& TU_CMD_DIRTY_VERTEX_BUFFERS
) {
3351 for (uint32_t i
= 0; i
< MAX_VBS
; i
++) {
3352 const struct tu_buffer
*buf
= cmd
->state
.vb
.buffers
[i
];
3354 tu_bo_list_add(&cmd
->bo_list
, buf
->bo
, MSM_SUBMIT_BO_READ
);
3357 if (cmd
->state
.dirty
& TU_CMD_DIRTY_DESCRIPTOR_SETS
) {
3359 for_each_bit(i
, descriptors_state
->valid
) {
3360 struct tu_descriptor_set
*set
= descriptors_state
->sets
[i
];
3361 for (unsigned j
= 0; j
< set
->layout
->buffer_count
; ++j
)
3362 if (set
->descriptors
[j
]) {
3363 tu_bo_list_add(&cmd
->bo_list
, set
->descriptors
[j
],
3364 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_WRITE
);
3368 if (cmd
->state
.dirty
& TU_CMD_DIRTY_STREAMOUT_BUFFERS
) {
3369 for (unsigned i
= 0; i
< IR3_MAX_SO_BUFFERS
; i
++) {
3370 const struct tu_buffer
*buf
= cmd
->state
.streamout_buf
.buffers
[i
];
3372 tu_bo_list_add(&cmd
->bo_list
, buf
->bo
,
3373 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_WRITE
);
3378 /* Fragment shader state overwrites compute shader state, so flag the
3379 * compute pipeline for re-emit.
3381 cmd
->state
.dirty
= TU_CMD_DIRTY_COMPUTE_PIPELINE
;
3386 tu6_emit_draw_indirect(struct tu_cmd_buffer
*cmd
,
3388 const struct tu_draw_info
*draw
)
3390 const enum pc_di_primtype primtype
= cmd
->state
.pipeline
->ia
.primtype
;
3391 bool has_gs
= cmd
->state
.pipeline
->active_stages
&
3392 VK_SHADER_STAGE_GEOMETRY_BIT
;
3395 A6XX_VFD_INDEX_OFFSET(draw
->vertex_offset
),
3396 A6XX_VFD_INSTANCE_START_OFFSET(draw
->first_instance
));
3398 if (draw
->indexed
) {
3399 const enum a4xx_index_size index_size
=
3400 tu6_index_size(cmd
->state
.index_type
);
3401 const uint32_t index_bytes
=
3402 (cmd
->state
.index_type
== VK_INDEX_TYPE_UINT32
) ? 4 : 2;
3403 const struct tu_buffer
*index_buf
= cmd
->state
.index_buffer
;
3404 unsigned max_indicies
=
3405 (index_buf
->size
- cmd
->state
.index_offset
) / index_bytes
;
3407 const uint32_t cp_draw_indx
=
3408 CP_DRAW_INDX_OFFSET_0_PRIM_TYPE(primtype
) |
3409 CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(DI_SRC_SEL_DMA
) |
3410 CP_DRAW_INDX_OFFSET_0_INDEX_SIZE(index_size
) |
3411 CP_DRAW_INDX_OFFSET_0_VIS_CULL(USE_VISIBILITY
) |
3412 COND(has_gs
, CP_DRAW_INDX_OFFSET_0_GS_ENABLE
) | 0x2000;
3414 tu_cs_emit_pkt7(cs
, CP_DRAW_INDX_INDIRECT
, 6);
3415 tu_cs_emit(cs
, cp_draw_indx
);
3416 tu_cs_emit_qw(cs
, index_buf
->bo
->iova
+ cmd
->state
.index_offset
);
3417 tu_cs_emit(cs
, A5XX_CP_DRAW_INDX_INDIRECT_3_MAX_INDICES(max_indicies
));
3418 tu_cs_emit_qw(cs
, draw
->indirect
->bo
->iova
+ draw
->indirect_offset
);
3420 const uint32_t cp_draw_indx
=
3421 CP_DRAW_INDX_OFFSET_0_PRIM_TYPE(primtype
) |
3422 CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(DI_SRC_SEL_AUTO_INDEX
) |
3423 CP_DRAW_INDX_OFFSET_0_VIS_CULL(USE_VISIBILITY
) |
3424 COND(has_gs
, CP_DRAW_INDX_OFFSET_0_GS_ENABLE
) | 0x2000;
3426 tu_cs_emit_pkt7(cs
, CP_DRAW_INDIRECT
, 3);
3427 tu_cs_emit(cs
, cp_draw_indx
);
3428 tu_cs_emit_qw(cs
, draw
->indirect
->bo
->iova
+ draw
->indirect_offset
);
3431 tu_bo_list_add(&cmd
->bo_list
, draw
->indirect
->bo
, MSM_SUBMIT_BO_READ
);
3435 tu6_emit_draw_direct(struct tu_cmd_buffer
*cmd
,
3437 const struct tu_draw_info
*draw
)
3440 const enum pc_di_primtype primtype
= cmd
->state
.pipeline
->ia
.primtype
;
3441 bool has_gs
= cmd
->state
.pipeline
->active_stages
&
3442 VK_SHADER_STAGE_GEOMETRY_BIT
;
3445 A6XX_VFD_INDEX_OFFSET(draw
->vertex_offset
),
3446 A6XX_VFD_INSTANCE_START_OFFSET(draw
->first_instance
));
3448 /* TODO hw binning */
3449 if (draw
->indexed
) {
3450 const enum a4xx_index_size index_size
=
3451 tu6_index_size(cmd
->state
.index_type
);
3452 const uint32_t index_bytes
=
3453 (cmd
->state
.index_type
== VK_INDEX_TYPE_UINT32
) ? 4 : 2;
3454 const struct tu_buffer
*buf
= cmd
->state
.index_buffer
;
3455 const VkDeviceSize offset
= buf
->bo_offset
+ cmd
->state
.index_offset
+
3456 index_bytes
* draw
->first_index
;
3457 const uint32_t size
= index_bytes
* draw
->count
;
3459 const uint32_t cp_draw_indx
=
3460 CP_DRAW_INDX_OFFSET_0_PRIM_TYPE(primtype
) |
3461 CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(DI_SRC_SEL_DMA
) |
3462 CP_DRAW_INDX_OFFSET_0_INDEX_SIZE(index_size
) |
3463 CP_DRAW_INDX_OFFSET_0_VIS_CULL(USE_VISIBILITY
) |
3464 COND(has_gs
, CP_DRAW_INDX_OFFSET_0_GS_ENABLE
) | 0x2000;
3466 tu_cs_emit_pkt7(cs
, CP_DRAW_INDX_OFFSET
, 7);
3467 tu_cs_emit(cs
, cp_draw_indx
);
3468 tu_cs_emit(cs
, draw
->instance_count
);
3469 tu_cs_emit(cs
, draw
->count
);
3470 tu_cs_emit(cs
, 0x0); /* XXX */
3471 tu_cs_emit_qw(cs
, buf
->bo
->iova
+ offset
);
3472 tu_cs_emit(cs
, size
);
3474 const uint32_t cp_draw_indx
=
3475 CP_DRAW_INDX_OFFSET_0_PRIM_TYPE(primtype
) |
3476 CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(DI_SRC_SEL_AUTO_INDEX
) |
3477 CP_DRAW_INDX_OFFSET_0_VIS_CULL(USE_VISIBILITY
) |
3478 COND(has_gs
, CP_DRAW_INDX_OFFSET_0_GS_ENABLE
) | 0x2000;
3480 tu_cs_emit_pkt7(cs
, CP_DRAW_INDX_OFFSET
, 3);
3481 tu_cs_emit(cs
, cp_draw_indx
);
3482 tu_cs_emit(cs
, draw
->instance_count
);
3483 tu_cs_emit(cs
, draw
->count
);
3488 tu_draw(struct tu_cmd_buffer
*cmd
, const struct tu_draw_info
*draw
)
3490 struct tu_cs
*cs
= &cmd
->draw_cs
;
3493 result
= tu6_bind_draw_states(cmd
, cs
, draw
);
3494 if (result
!= VK_SUCCESS
) {
3495 cmd
->record_result
= result
;
3500 tu6_emit_draw_indirect(cmd
, cs
, draw
);
3502 tu6_emit_draw_direct(cmd
, cs
, draw
);
3504 if (cmd
->state
.streamout_enabled
) {
3505 for (unsigned i
= 0; i
< IR3_MAX_SO_BUFFERS
; i
++) {
3506 if (cmd
->state
.streamout_enabled
& (1 << i
))
3507 tu6_emit_event_write(cmd
, cs
, FLUSH_SO_0
+ i
, false);
3511 cmd
->wait_for_idle
= true;
3513 tu_cs_sanity_check(cs
);
3517 tu_CmdDraw(VkCommandBuffer commandBuffer
,
3518 uint32_t vertexCount
,
3519 uint32_t instanceCount
,
3520 uint32_t firstVertex
,
3521 uint32_t firstInstance
)
3523 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
3524 struct tu_draw_info info
= {};
3526 info
.count
= vertexCount
;
3527 info
.instance_count
= instanceCount
;
3528 info
.first_instance
= firstInstance
;
3529 info
.vertex_offset
= firstVertex
;
3531 tu_draw(cmd_buffer
, &info
);
3535 tu_CmdDrawIndexed(VkCommandBuffer commandBuffer
,
3536 uint32_t indexCount
,
3537 uint32_t instanceCount
,
3538 uint32_t firstIndex
,
3539 int32_t vertexOffset
,
3540 uint32_t firstInstance
)
3542 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
3543 struct tu_draw_info info
= {};
3545 info
.indexed
= true;
3546 info
.count
= indexCount
;
3547 info
.instance_count
= instanceCount
;
3548 info
.first_index
= firstIndex
;
3549 info
.vertex_offset
= vertexOffset
;
3550 info
.first_instance
= firstInstance
;
3552 tu_draw(cmd_buffer
, &info
);
3556 tu_CmdDrawIndirect(VkCommandBuffer commandBuffer
,
3558 VkDeviceSize offset
,
3562 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
3563 TU_FROM_HANDLE(tu_buffer
, buffer
, _buffer
);
3564 struct tu_draw_info info
= {};
3566 info
.count
= drawCount
;
3567 info
.indirect
= buffer
;
3568 info
.indirect_offset
= offset
;
3569 info
.stride
= stride
;
3571 tu_draw(cmd_buffer
, &info
);
3575 tu_CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer
,
3577 VkDeviceSize offset
,
3581 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
3582 TU_FROM_HANDLE(tu_buffer
, buffer
, _buffer
);
3583 struct tu_draw_info info
= {};
3585 info
.indexed
= true;
3586 info
.count
= drawCount
;
3587 info
.indirect
= buffer
;
3588 info
.indirect_offset
= offset
;
3589 info
.stride
= stride
;
3591 tu_draw(cmd_buffer
, &info
);
3594 void tu_CmdDrawIndirectByteCountEXT(VkCommandBuffer commandBuffer
,
3595 uint32_t instanceCount
,
3596 uint32_t firstInstance
,
3597 VkBuffer _counterBuffer
,
3598 VkDeviceSize counterBufferOffset
,
3599 uint32_t counterOffset
,
3600 uint32_t vertexStride
)
3602 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
3603 TU_FROM_HANDLE(tu_buffer
, buffer
, _counterBuffer
);
3605 struct tu_draw_info info
= {};
3607 info
.instance_count
= instanceCount
;
3608 info
.first_instance
= firstInstance
;
3609 info
.streamout_buffer
= buffer
;
3610 info
.streamout_buffer_offset
= counterBufferOffset
;
3611 info
.stride
= vertexStride
;
3613 tu_draw(cmd_buffer
, &info
);
3616 struct tu_dispatch_info
3619 * Determine the layout of the grid (in block units) to be used.
3624 * A starting offset for the grid. If unaligned is set, the offset
3625 * must still be aligned.
3627 uint32_t offsets
[3];
3629 * Whether it's an unaligned compute dispatch.
3634 * Indirect compute parameters resource.
3636 struct tu_buffer
*indirect
;
3637 uint64_t indirect_offset
;
3641 tu_emit_compute_driver_params(struct tu_cs
*cs
, struct tu_pipeline
*pipeline
,
3642 const struct tu_dispatch_info
*info
)
3644 gl_shader_stage type
= MESA_SHADER_COMPUTE
;
3645 const struct tu_program_descriptor_linkage
*link
=
3646 &pipeline
->program
.link
[type
];
3647 const struct ir3_const_state
*const_state
= &link
->const_state
;
3648 uint32_t offset
= const_state
->offsets
.driver_param
;
3650 if (link
->constlen
<= offset
)
3653 if (!info
->indirect
) {
3654 uint32_t driver_params
[IR3_DP_CS_COUNT
] = {
3655 [IR3_DP_NUM_WORK_GROUPS_X
] = info
->blocks
[0],
3656 [IR3_DP_NUM_WORK_GROUPS_Y
] = info
->blocks
[1],
3657 [IR3_DP_NUM_WORK_GROUPS_Z
] = info
->blocks
[2],
3658 [IR3_DP_LOCAL_GROUP_SIZE_X
] = pipeline
->compute
.local_size
[0],
3659 [IR3_DP_LOCAL_GROUP_SIZE_Y
] = pipeline
->compute
.local_size
[1],
3660 [IR3_DP_LOCAL_GROUP_SIZE_Z
] = pipeline
->compute
.local_size
[2],
3663 uint32_t num_consts
= MIN2(const_state
->num_driver_params
,
3664 (link
->constlen
- offset
) * 4);
3665 /* push constants */
3666 tu_cs_emit_pkt7(cs
, tu6_stage2opcode(type
), 3 + num_consts
);
3667 tu_cs_emit(cs
, CP_LOAD_STATE6_0_DST_OFF(offset
) |
3668 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS
) |
3669 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT
) |
3670 CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type
)) |
3671 CP_LOAD_STATE6_0_NUM_UNIT(num_consts
/ 4));
3675 for (i
= 0; i
< num_consts
; i
++)
3676 tu_cs_emit(cs
, driver_params
[i
]);
3678 tu_finishme("Indirect driver params");
3683 tu_dispatch(struct tu_cmd_buffer
*cmd
,
3684 const struct tu_dispatch_info
*info
)
3686 struct tu_cs
*cs
= &cmd
->cs
;
3687 struct tu_pipeline
*pipeline
= cmd
->state
.compute_pipeline
;
3688 struct tu_descriptor_state
*descriptors_state
=
3689 &cmd
->descriptors
[VK_PIPELINE_BIND_POINT_COMPUTE
];
3692 if (cmd
->state
.dirty
& TU_CMD_DIRTY_COMPUTE_PIPELINE
)
3693 tu_cs_emit_ib(cs
, &pipeline
->program
.state_ib
);
3695 struct tu_cs_entry ib
;
3697 ib
= tu6_emit_consts(cmd
, pipeline
, descriptors_state
, MESA_SHADER_COMPUTE
);
3699 tu_cs_emit_ib(cs
, &ib
);
3701 tu_emit_compute_driver_params(cs
, pipeline
, info
);
3703 result
= tu6_emit_textures(cmd
, pipeline
, descriptors_state
,
3704 MESA_SHADER_COMPUTE
, &ib
, false);
3705 if (result
!= VK_SUCCESS
) {
3706 cmd
->record_result
= result
;
3711 tu_cs_emit_ib(cs
, &ib
);
3713 result
= tu6_emit_ibo(cmd
, pipeline
, descriptors_state
, MESA_SHADER_COMPUTE
, &ib
);
3714 if (result
!= VK_SUCCESS
) {
3715 cmd
->record_result
= result
;
3720 tu_cs_emit_ib(cs
, &ib
);
3723 if (cmd
->state
.dirty
& TU_CMD_DIRTY_DESCRIPTOR_SETS
) {
3725 for_each_bit(i
, descriptors_state
->valid
) {
3726 struct tu_descriptor_set
*set
= descriptors_state
->sets
[i
];
3727 for (unsigned j
= 0; j
< set
->layout
->buffer_count
; ++j
)
3728 if (set
->descriptors
[j
]) {
3729 tu_bo_list_add(&cmd
->bo_list
, set
->descriptors
[j
],
3730 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_WRITE
);
3735 /* Compute shader state overwrites fragment shader state, so we flag the
3736 * graphics pipeline for re-emit.
3738 cmd
->state
.dirty
= TU_CMD_DIRTY_PIPELINE
;
3740 tu_cs_emit_pkt7(cs
, CP_SET_MARKER
, 1);
3741 tu_cs_emit(cs
, A6XX_CP_SET_MARKER_0_MODE(RM6_COMPUTE
));
3743 const uint32_t *local_size
= pipeline
->compute
.local_size
;
3744 const uint32_t *num_groups
= info
->blocks
;
3746 A6XX_HLSQ_CS_NDRANGE_0(.kerneldim
= 3,
3747 .localsizex
= local_size
[0] - 1,
3748 .localsizey
= local_size
[1] - 1,
3749 .localsizez
= local_size
[2] - 1),
3750 A6XX_HLSQ_CS_NDRANGE_1(.globalsize_x
= local_size
[0] * num_groups
[0]),
3751 A6XX_HLSQ_CS_NDRANGE_2(.globaloff_x
= 0),
3752 A6XX_HLSQ_CS_NDRANGE_3(.globalsize_y
= local_size
[1] * num_groups
[1]),
3753 A6XX_HLSQ_CS_NDRANGE_4(.globaloff_y
= 0),
3754 A6XX_HLSQ_CS_NDRANGE_5(.globalsize_z
= local_size
[2] * num_groups
[2]),
3755 A6XX_HLSQ_CS_NDRANGE_6(.globaloff_z
= 0));
3758 A6XX_HLSQ_CS_KERNEL_GROUP_X(1),
3759 A6XX_HLSQ_CS_KERNEL_GROUP_Y(1),
3760 A6XX_HLSQ_CS_KERNEL_GROUP_Z(1));
3762 if (info
->indirect
) {
3763 uint64_t iova
= tu_buffer_iova(info
->indirect
) + info
->indirect_offset
;
3765 tu_bo_list_add(&cmd
->bo_list
, info
->indirect
->bo
,
3766 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_WRITE
);
3768 tu_cs_emit_pkt7(cs
, CP_EXEC_CS_INDIRECT
, 4);
3769 tu_cs_emit(cs
, 0x00000000);
3770 tu_cs_emit_qw(cs
, iova
);
3772 A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEX(local_size
[0] - 1) |
3773 A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEY(local_size
[1] - 1) |
3774 A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEZ(local_size
[2] - 1));
3776 tu_cs_emit_pkt7(cs
, CP_EXEC_CS
, 4);
3777 tu_cs_emit(cs
, 0x00000000);
3778 tu_cs_emit(cs
, CP_EXEC_CS_1_NGROUPS_X(info
->blocks
[0]));
3779 tu_cs_emit(cs
, CP_EXEC_CS_2_NGROUPS_Y(info
->blocks
[1]));
3780 tu_cs_emit(cs
, CP_EXEC_CS_3_NGROUPS_Z(info
->blocks
[2]));
3785 tu6_emit_cache_flush(cmd
, cs
);
3789 tu_CmdDispatchBase(VkCommandBuffer commandBuffer
,
3797 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
3798 struct tu_dispatch_info info
= {};
3804 info
.offsets
[0] = base_x
;
3805 info
.offsets
[1] = base_y
;
3806 info
.offsets
[2] = base_z
;
3807 tu_dispatch(cmd_buffer
, &info
);
3811 tu_CmdDispatch(VkCommandBuffer commandBuffer
,
3816 tu_CmdDispatchBase(commandBuffer
, 0, 0, 0, x
, y
, z
);
3820 tu_CmdDispatchIndirect(VkCommandBuffer commandBuffer
,
3822 VkDeviceSize offset
)
3824 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
3825 TU_FROM_HANDLE(tu_buffer
, buffer
, _buffer
);
3826 struct tu_dispatch_info info
= {};
3828 info
.indirect
= buffer
;
3829 info
.indirect_offset
= offset
;
3831 tu_dispatch(cmd_buffer
, &info
);
3835 tu_CmdEndRenderPass(VkCommandBuffer commandBuffer
)
3837 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
3839 tu_cs_end(&cmd_buffer
->draw_cs
);
3840 tu_cs_end(&cmd_buffer
->draw_epilogue_cs
);
3842 if (use_sysmem_rendering(cmd_buffer
))
3843 tu_cmd_render_sysmem(cmd_buffer
);
3845 tu_cmd_render_tiles(cmd_buffer
);
3847 /* discard draw_cs and draw_epilogue_cs entries now that the tiles are
3849 tu_cs_discard_entries(&cmd_buffer
->draw_cs
);
3850 tu_cs_begin(&cmd_buffer
->draw_cs
);
3851 tu_cs_discard_entries(&cmd_buffer
->draw_epilogue_cs
);
3852 tu_cs_begin(&cmd_buffer
->draw_epilogue_cs
);
3854 cmd_buffer
->state
.pass
= NULL
;
3855 cmd_buffer
->state
.subpass
= NULL
;
3856 cmd_buffer
->state
.framebuffer
= NULL
;
3860 tu_CmdEndRenderPass2(VkCommandBuffer commandBuffer
,
3861 const VkSubpassEndInfoKHR
*pSubpassEndInfo
)
3863 tu_CmdEndRenderPass(commandBuffer
);
3866 struct tu_barrier_info
3868 uint32_t eventCount
;
3869 const VkEvent
*pEvents
;
3870 VkPipelineStageFlags srcStageMask
;
3874 tu_barrier(struct tu_cmd_buffer
*cmd
,
3875 uint32_t memoryBarrierCount
,
3876 const VkMemoryBarrier
*pMemoryBarriers
,
3877 uint32_t bufferMemoryBarrierCount
,
3878 const VkBufferMemoryBarrier
*pBufferMemoryBarriers
,
3879 uint32_t imageMemoryBarrierCount
,
3880 const VkImageMemoryBarrier
*pImageMemoryBarriers
,
3881 const struct tu_barrier_info
*info
)
3883 /* renderpass case is only for subpass self-dependencies
3884 * which means syncing the render output with texture cache
3885 * note: only the CACHE_INVALIDATE is needed in GMEM mode
3886 * and in sysmem mode we might not need either color/depth flush
3888 if (cmd
->state
.pass
) {
3889 tu6_emit_event_write(cmd
, &cmd
->draw_cs
, PC_CCU_FLUSH_COLOR_TS
, true);
3890 tu6_emit_event_write(cmd
, &cmd
->draw_cs
, PC_CCU_FLUSH_DEPTH_TS
, true);
3891 tu6_emit_event_write(cmd
, &cmd
->draw_cs
, CACHE_INVALIDATE
, false);
3897 tu_CmdPipelineBarrier(VkCommandBuffer commandBuffer
,
3898 VkPipelineStageFlags srcStageMask
,
3899 VkPipelineStageFlags dstStageMask
,
3900 VkDependencyFlags dependencyFlags
,
3901 uint32_t memoryBarrierCount
,
3902 const VkMemoryBarrier
*pMemoryBarriers
,
3903 uint32_t bufferMemoryBarrierCount
,
3904 const VkBufferMemoryBarrier
*pBufferMemoryBarriers
,
3905 uint32_t imageMemoryBarrierCount
,
3906 const VkImageMemoryBarrier
*pImageMemoryBarriers
)
3908 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
3909 struct tu_barrier_info info
;
3911 info
.eventCount
= 0;
3912 info
.pEvents
= NULL
;
3913 info
.srcStageMask
= srcStageMask
;
3915 tu_barrier(cmd_buffer
, memoryBarrierCount
, pMemoryBarriers
,
3916 bufferMemoryBarrierCount
, pBufferMemoryBarriers
,
3917 imageMemoryBarrierCount
, pImageMemoryBarriers
, &info
);
3921 write_event(struct tu_cmd_buffer
*cmd
, struct tu_event
*event
, unsigned value
)
3923 struct tu_cs
*cs
= &cmd
->cs
;
3925 tu_bo_list_add(&cmd
->bo_list
, &event
->bo
, MSM_SUBMIT_BO_WRITE
);
3927 /* TODO: any flush required before/after ? */
3929 tu_cs_emit_pkt7(cs
, CP_MEM_WRITE
, 3);
3930 tu_cs_emit_qw(cs
, event
->bo
.iova
); /* ADDR_LO/HI */
3931 tu_cs_emit(cs
, value
);
3935 tu_CmdSetEvent(VkCommandBuffer commandBuffer
,
3937 VkPipelineStageFlags stageMask
)
3939 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
3940 TU_FROM_HANDLE(tu_event
, event
, _event
);
3942 write_event(cmd
, event
, 1);
3946 tu_CmdResetEvent(VkCommandBuffer commandBuffer
,
3948 VkPipelineStageFlags stageMask
)
3950 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
3951 TU_FROM_HANDLE(tu_event
, event
, _event
);
3953 write_event(cmd
, event
, 0);
3957 tu_CmdWaitEvents(VkCommandBuffer commandBuffer
,
3958 uint32_t eventCount
,
3959 const VkEvent
*pEvents
,
3960 VkPipelineStageFlags srcStageMask
,
3961 VkPipelineStageFlags dstStageMask
,
3962 uint32_t memoryBarrierCount
,
3963 const VkMemoryBarrier
*pMemoryBarriers
,
3964 uint32_t bufferMemoryBarrierCount
,
3965 const VkBufferMemoryBarrier
*pBufferMemoryBarriers
,
3966 uint32_t imageMemoryBarrierCount
,
3967 const VkImageMemoryBarrier
*pImageMemoryBarriers
)
3969 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
3970 struct tu_cs
*cs
= &cmd
->cs
;
3972 /* TODO: any flush required before/after? (CP_WAIT_FOR_ME?) */
3974 for (uint32_t i
= 0; i
< eventCount
; i
++) {
3975 TU_FROM_HANDLE(tu_event
, event
, pEvents
[i
]);
3977 tu_bo_list_add(&cmd
->bo_list
, &event
->bo
, MSM_SUBMIT_BO_READ
);
3979 tu_cs_emit_pkt7(cs
, CP_WAIT_REG_MEM
, 6);
3980 tu_cs_emit(cs
, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ
) |
3981 CP_WAIT_REG_MEM_0_POLL_MEMORY
);
3982 tu_cs_emit_qw(cs
, event
->bo
.iova
); /* POLL_ADDR_LO/HI */
3983 tu_cs_emit(cs
, CP_WAIT_REG_MEM_3_REF(1));
3984 tu_cs_emit(cs
, CP_WAIT_REG_MEM_4_MASK(~0u));
3985 tu_cs_emit(cs
, CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(20));
3990 tu_CmdSetDeviceMask(VkCommandBuffer commandBuffer
, uint32_t deviceMask
)