2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
28 #include "tu_private.h"
30 #include "registers/adreno_pm4.xml.h"
31 #include "registers/adreno_common.xml.h"
32 #include "registers/a6xx.xml.h"
34 #include "vk_format.h"
39 #define OVERFLOW_FLAG_REG REG_A6XX_CP_SCRATCH_REG(0)
42 tu_bo_list_init(struct tu_bo_list
*list
)
44 list
->count
= list
->capacity
= 0;
45 list
->bo_infos
= NULL
;
49 tu_bo_list_destroy(struct tu_bo_list
*list
)
55 tu_bo_list_reset(struct tu_bo_list
*list
)
61 * \a flags consists of MSM_SUBMIT_BO_FLAGS.
64 tu_bo_list_add_info(struct tu_bo_list
*list
,
65 const struct drm_msm_gem_submit_bo
*bo_info
)
67 assert(bo_info
->handle
!= 0);
69 for (uint32_t i
= 0; i
< list
->count
; ++i
) {
70 if (list
->bo_infos
[i
].handle
== bo_info
->handle
) {
71 assert(list
->bo_infos
[i
].presumed
== bo_info
->presumed
);
72 list
->bo_infos
[i
].flags
|= bo_info
->flags
;
77 /* grow list->bo_infos if needed */
78 if (list
->count
== list
->capacity
) {
79 uint32_t new_capacity
= MAX2(2 * list
->count
, 16);
80 struct drm_msm_gem_submit_bo
*new_bo_infos
= realloc(
81 list
->bo_infos
, new_capacity
* sizeof(struct drm_msm_gem_submit_bo
));
83 return TU_BO_LIST_FAILED
;
84 list
->bo_infos
= new_bo_infos
;
85 list
->capacity
= new_capacity
;
88 list
->bo_infos
[list
->count
] = *bo_info
;
93 tu_bo_list_add(struct tu_bo_list
*list
,
94 const struct tu_bo
*bo
,
97 return tu_bo_list_add_info(list
, &(struct drm_msm_gem_submit_bo
) {
99 .handle
= bo
->gem_handle
,
100 .presumed
= bo
->iova
,
105 tu_bo_list_merge(struct tu_bo_list
*list
, const struct tu_bo_list
*other
)
107 for (uint32_t i
= 0; i
< other
->count
; i
++) {
108 if (tu_bo_list_add_info(list
, other
->bo_infos
+ i
) == TU_BO_LIST_FAILED
)
109 return VK_ERROR_OUT_OF_HOST_MEMORY
;
116 tu_tiling_config_update_tile_layout(struct tu_tiling_config
*tiling
,
117 const struct tu_device
*dev
,
120 const uint32_t tile_align_w
= dev
->physical_device
->tile_align_w
;
121 const uint32_t tile_align_h
= dev
->physical_device
->tile_align_h
;
122 const uint32_t max_tile_width
= 1024; /* A6xx */
124 tiling
->tile0
.offset
= (VkOffset2D
) {
125 .x
= tiling
->render_area
.offset
.x
& ~(tile_align_w
- 1),
126 .y
= tiling
->render_area
.offset
.y
& ~(tile_align_h
- 1),
129 const uint32_t ra_width
=
130 tiling
->render_area
.extent
.width
+
131 (tiling
->render_area
.offset
.x
- tiling
->tile0
.offset
.x
);
132 const uint32_t ra_height
=
133 tiling
->render_area
.extent
.height
+
134 (tiling
->render_area
.offset
.y
- tiling
->tile0
.offset
.y
);
136 /* start from 1 tile */
137 tiling
->tile_count
= (VkExtent2D
) {
141 tiling
->tile0
.extent
= (VkExtent2D
) {
142 .width
= align(ra_width
, tile_align_w
),
143 .height
= align(ra_height
, tile_align_h
),
146 /* do not exceed max tile width */
147 while (tiling
->tile0
.extent
.width
> max_tile_width
) {
148 tiling
->tile_count
.width
++;
149 tiling
->tile0
.extent
.width
=
150 align(ra_width
/ tiling
->tile_count
.width
, tile_align_w
);
153 /* do not exceed gmem size */
154 while (tiling
->tile0
.extent
.width
* tiling
->tile0
.extent
.height
> pixels
) {
155 if (tiling
->tile0
.extent
.width
> MAX2(tile_align_w
, tiling
->tile0
.extent
.height
)) {
156 tiling
->tile_count
.width
++;
157 tiling
->tile0
.extent
.width
=
158 align(DIV_ROUND_UP(ra_width
, tiling
->tile_count
.width
), tile_align_w
);
160 /* if this assert fails then layout is impossible.. */
161 assert(tiling
->tile0
.extent
.height
> tile_align_h
);
162 tiling
->tile_count
.height
++;
163 tiling
->tile0
.extent
.height
=
164 align(DIV_ROUND_UP(ra_height
, tiling
->tile_count
.height
), tile_align_h
);
170 tu_tiling_config_update_pipe_layout(struct tu_tiling_config
*tiling
,
171 const struct tu_device
*dev
)
173 const uint32_t max_pipe_count
= 32; /* A6xx */
175 /* start from 1 tile per pipe */
176 tiling
->pipe0
= (VkExtent2D
) {
180 tiling
->pipe_count
= tiling
->tile_count
;
182 /* do not exceed max pipe count vertically */
183 while (tiling
->pipe_count
.height
> max_pipe_count
) {
184 tiling
->pipe0
.height
+= 2;
185 tiling
->pipe_count
.height
=
186 (tiling
->tile_count
.height
+ tiling
->pipe0
.height
- 1) /
187 tiling
->pipe0
.height
;
190 /* do not exceed max pipe count */
191 while (tiling
->pipe_count
.width
* tiling
->pipe_count
.height
>
193 tiling
->pipe0
.width
+= 1;
194 tiling
->pipe_count
.width
=
195 (tiling
->tile_count
.width
+ tiling
->pipe0
.width
- 1) /
201 tu_tiling_config_update_pipes(struct tu_tiling_config
*tiling
,
202 const struct tu_device
*dev
)
204 const uint32_t max_pipe_count
= 32; /* A6xx */
205 const uint32_t used_pipe_count
=
206 tiling
->pipe_count
.width
* tiling
->pipe_count
.height
;
207 const VkExtent2D last_pipe
= {
208 .width
= (tiling
->tile_count
.width
- 1) % tiling
->pipe0
.width
+ 1,
209 .height
= (tiling
->tile_count
.height
- 1) % tiling
->pipe0
.height
+ 1,
212 assert(used_pipe_count
<= max_pipe_count
);
213 assert(max_pipe_count
<= ARRAY_SIZE(tiling
->pipe_config
));
215 for (uint32_t y
= 0; y
< tiling
->pipe_count
.height
; y
++) {
216 for (uint32_t x
= 0; x
< tiling
->pipe_count
.width
; x
++) {
217 const uint32_t pipe_x
= tiling
->pipe0
.width
* x
;
218 const uint32_t pipe_y
= tiling
->pipe0
.height
* y
;
219 const uint32_t pipe_w
= (x
== tiling
->pipe_count
.width
- 1)
221 : tiling
->pipe0
.width
;
222 const uint32_t pipe_h
= (y
== tiling
->pipe_count
.height
- 1)
224 : tiling
->pipe0
.height
;
225 const uint32_t n
= tiling
->pipe_count
.width
* y
+ x
;
227 tiling
->pipe_config
[n
] = A6XX_VSC_PIPE_CONFIG_REG_X(pipe_x
) |
228 A6XX_VSC_PIPE_CONFIG_REG_Y(pipe_y
) |
229 A6XX_VSC_PIPE_CONFIG_REG_W(pipe_w
) |
230 A6XX_VSC_PIPE_CONFIG_REG_H(pipe_h
);
231 tiling
->pipe_sizes
[n
] = CP_SET_BIN_DATA5_0_VSC_SIZE(pipe_w
* pipe_h
);
235 memset(tiling
->pipe_config
+ used_pipe_count
, 0,
236 sizeof(uint32_t) * (max_pipe_count
- used_pipe_count
));
240 tu_tiling_config_get_tile(const struct tu_tiling_config
*tiling
,
241 const struct tu_device
*dev
,
244 struct tu_tile
*tile
)
246 /* find the pipe and the slot for tile (tx, ty) */
247 const uint32_t px
= tx
/ tiling
->pipe0
.width
;
248 const uint32_t py
= ty
/ tiling
->pipe0
.height
;
249 const uint32_t sx
= tx
- tiling
->pipe0
.width
* px
;
250 const uint32_t sy
= ty
- tiling
->pipe0
.height
* py
;
252 assert(tx
< tiling
->tile_count
.width
&& ty
< tiling
->tile_count
.height
);
253 assert(px
< tiling
->pipe_count
.width
&& py
< tiling
->pipe_count
.height
);
254 assert(sx
< tiling
->pipe0
.width
&& sy
< tiling
->pipe0
.height
);
256 /* convert to 1D indices */
257 tile
->pipe
= tiling
->pipe_count
.width
* py
+ px
;
258 tile
->slot
= tiling
->pipe0
.width
* sy
+ sx
;
260 /* get the blit area for the tile */
261 tile
->begin
= (VkOffset2D
) {
262 .x
= tiling
->tile0
.offset
.x
+ tiling
->tile0
.extent
.width
* tx
,
263 .y
= tiling
->tile0
.offset
.y
+ tiling
->tile0
.extent
.height
* ty
,
266 (tx
== tiling
->tile_count
.width
- 1)
267 ? tiling
->render_area
.offset
.x
+ tiling
->render_area
.extent
.width
268 : tile
->begin
.x
+ tiling
->tile0
.extent
.width
;
270 (ty
== tiling
->tile_count
.height
- 1)
271 ? tiling
->render_area
.offset
.y
+ tiling
->render_area
.extent
.height
272 : tile
->begin
.y
+ tiling
->tile0
.extent
.height
;
275 enum a3xx_msaa_samples
276 tu_msaa_samples(uint32_t samples
)
288 assert(!"invalid sample count");
293 static enum a4xx_index_size
294 tu6_index_size(VkIndexType type
)
297 case VK_INDEX_TYPE_UINT16
:
298 return INDEX4_SIZE_16_BIT
;
299 case VK_INDEX_TYPE_UINT32
:
300 return INDEX4_SIZE_32_BIT
;
302 unreachable("invalid VkIndexType");
303 return INDEX4_SIZE_8_BIT
;
308 tu6_emit_marker(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
310 tu_cs_emit_write_reg(cs
, cmd
->marker_reg
, ++cmd
->marker_seqno
);
314 tu6_emit_event_write(struct tu_cmd_buffer
*cmd
,
316 enum vgt_event_type event
,
321 tu_cs_emit_pkt7(cs
, CP_EVENT_WRITE
, need_seqno
? 4 : 1);
322 tu_cs_emit(cs
, CP_EVENT_WRITE_0_EVENT(event
));
324 tu_cs_emit_qw(cs
, cmd
->scratch_bo
.iova
);
325 seqno
= ++cmd
->scratch_seqno
;
326 tu_cs_emit(cs
, seqno
);
333 tu6_emit_cache_flush(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
335 tu6_emit_event_write(cmd
, cs
, 0x31, false);
339 tu6_emit_lrz_flush(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
341 tu6_emit_event_write(cmd
, cs
, LRZ_FLUSH
, false);
345 tu6_emit_wfi(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
347 if (cmd
->wait_for_idle
) {
349 cmd
->wait_for_idle
= false;
354 tu6_emit_flag_buffer(struct tu_cs
*cs
, const struct tu_image_view
*iview
)
356 uint64_t va
= tu_image_ubwc_base(iview
->image
, iview
->base_mip
, iview
->base_layer
);
357 uint32_t pitch
= tu_image_ubwc_pitch(iview
->image
, iview
->base_mip
);
358 uint32_t size
= tu_image_ubwc_size(iview
->image
, iview
->base_mip
);
359 if (iview
->image
->layout
.ubwc_size
) {
360 tu_cs_emit_qw(cs
, va
);
361 tu_cs_emit(cs
, A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_PITCH(pitch
) |
362 A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_ARRAY_PITCH(size
>> 2));
364 tu_cs_emit_qw(cs
, 0);
370 tu6_emit_zs(struct tu_cmd_buffer
*cmd
,
371 const struct tu_subpass
*subpass
,
374 const struct tu_framebuffer
*fb
= cmd
->state
.framebuffer
;
376 const uint32_t a
= subpass
->depth_stencil_attachment
.attachment
;
377 if (a
== VK_ATTACHMENT_UNUSED
) {
378 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_DEPTH_BUFFER_INFO
, 6);
379 tu_cs_emit(cs
, A6XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT(DEPTH6_NONE
));
380 tu_cs_emit(cs
, 0x00000000); /* RB_DEPTH_BUFFER_PITCH */
381 tu_cs_emit(cs
, 0x00000000); /* RB_DEPTH_BUFFER_ARRAY_PITCH */
382 tu_cs_emit(cs
, 0x00000000); /* RB_DEPTH_BUFFER_BASE_LO */
383 tu_cs_emit(cs
, 0x00000000); /* RB_DEPTH_BUFFER_BASE_HI */
384 tu_cs_emit(cs
, 0x00000000); /* RB_DEPTH_BUFFER_BASE_GMEM */
386 tu_cs_emit_pkt4(cs
, REG_A6XX_GRAS_SU_DEPTH_BUFFER_INFO
, 1);
388 A6XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT(DEPTH6_NONE
));
390 tu_cs_emit_pkt4(cs
, REG_A6XX_GRAS_LRZ_BUFFER_BASE_LO
, 5);
391 tu_cs_emit(cs
, 0x00000000); /* RB_DEPTH_FLAG_BUFFER_BASE_LO */
392 tu_cs_emit(cs
, 0x00000000); /* RB_DEPTH_FLAG_BUFFER_BASE_HI */
393 tu_cs_emit(cs
, 0x00000000); /* GRAS_LRZ_BUFFER_PITCH */
394 tu_cs_emit(cs
, 0x00000000); /* GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_LO */
395 tu_cs_emit(cs
, 0x00000000); /* GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_HI */
397 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_STENCIL_INFO
, 1);
398 tu_cs_emit(cs
, 0x00000000); /* RB_STENCIL_INFO */
403 const struct tu_image_view
*iview
= fb
->attachments
[a
].attachment
;
404 enum a6xx_depth_format fmt
= tu6_pipe2depth(iview
->vk_format
);
406 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_DEPTH_BUFFER_INFO
, 6);
407 tu_cs_emit(cs
, A6XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT(fmt
));
408 tu_cs_emit(cs
, A6XX_RB_DEPTH_BUFFER_PITCH(tu_image_stride(iview
->image
, iview
->base_mip
)));
409 tu_cs_emit(cs
, A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH(iview
->image
->layout
.layer_size
));
410 tu_cs_emit_qw(cs
, tu_image_base(iview
->image
, iview
->base_mip
, iview
->base_layer
));
411 tu_cs_emit(cs
, cmd
->state
.pass
->attachments
[a
].gmem_offset
);
413 tu_cs_emit_pkt4(cs
, REG_A6XX_GRAS_SU_DEPTH_BUFFER_INFO
, 1);
414 tu_cs_emit(cs
, A6XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT(fmt
));
416 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_DEPTH_FLAG_BUFFER_BASE_LO
, 3);
417 tu6_emit_flag_buffer(cs
, iview
);
419 tu_cs_emit_pkt4(cs
, REG_A6XX_GRAS_LRZ_BUFFER_BASE_LO
, 5);
420 tu_cs_emit(cs
, 0x00000000); /* RB_DEPTH_FLAG_BUFFER_BASE_LO */
421 tu_cs_emit(cs
, 0x00000000); /* RB_DEPTH_FLAG_BUFFER_BASE_HI */
422 tu_cs_emit(cs
, 0x00000000); /* GRAS_LRZ_BUFFER_PITCH */
423 tu_cs_emit(cs
, 0x00000000); /* GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_LO */
424 tu_cs_emit(cs
, 0x00000000); /* GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_HI */
426 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_STENCIL_INFO
, 1);
427 tu_cs_emit(cs
, 0x00000000); /* RB_STENCIL_INFO */
433 tu6_emit_mrt(struct tu_cmd_buffer
*cmd
,
434 const struct tu_subpass
*subpass
,
437 const struct tu_framebuffer
*fb
= cmd
->state
.framebuffer
;
438 unsigned char mrt_comp
[MAX_RTS
] = { 0 };
439 unsigned srgb_cntl
= 0;
441 for (uint32_t i
= 0; i
< subpass
->color_count
; ++i
) {
442 uint32_t a
= subpass
->color_attachments
[i
].attachment
;
443 if (a
== VK_ATTACHMENT_UNUSED
)
446 const struct tu_image_view
*iview
= fb
->attachments
[a
].attachment
;
447 const enum a6xx_tile_mode tile_mode
=
448 tu6_get_image_tile_mode(iview
->image
, iview
->base_mip
);
452 if (vk_format_is_srgb(iview
->vk_format
))
453 srgb_cntl
|= (1 << i
);
455 const struct tu_native_format
*format
=
456 tu6_get_native_format(iview
->vk_format
);
457 assert(format
&& format
->rb
>= 0);
459 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_MRT_BUF_INFO(i
), 6);
460 tu_cs_emit(cs
, A6XX_RB_MRT_BUF_INFO_COLOR_FORMAT(format
->rb
) |
461 A6XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE(tile_mode
) |
462 A6XX_RB_MRT_BUF_INFO_COLOR_SWAP(format
->swap
));
463 tu_cs_emit(cs
, A6XX_RB_MRT_PITCH(tu_image_stride(iview
->image
, iview
->base_mip
)));
464 tu_cs_emit(cs
, A6XX_RB_MRT_ARRAY_PITCH(iview
->image
->layout
.layer_size
));
465 tu_cs_emit_qw(cs
, tu_image_base(iview
->image
, iview
->base_mip
, iview
->base_layer
));
466 tu_cs_emit(cs
, cmd
->state
.pass
->attachments
[a
].gmem_offset
);
468 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_FS_MRT_REG(i
), 1);
469 tu_cs_emit(cs
, A6XX_SP_FS_MRT_REG_COLOR_FORMAT(format
->rb
) |
470 COND(vk_format_is_sint(iview
->vk_format
), A6XX_SP_FS_MRT_REG_COLOR_SINT
) |
471 COND(vk_format_is_uint(iview
->vk_format
), A6XX_SP_FS_MRT_REG_COLOR_UINT
));
473 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_MRT_FLAG_BUFFER(i
), 3);
474 tu6_emit_flag_buffer(cs
, iview
);
477 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_SRGB_CNTL
, 1);
478 tu_cs_emit(cs
, srgb_cntl
);
480 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_SRGB_CNTL
, 1);
481 tu_cs_emit(cs
, srgb_cntl
);
483 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_RENDER_COMPONENTS
, 1);
484 tu_cs_emit(cs
, A6XX_RB_RENDER_COMPONENTS_RT0(mrt_comp
[0]) |
485 A6XX_RB_RENDER_COMPONENTS_RT1(mrt_comp
[1]) |
486 A6XX_RB_RENDER_COMPONENTS_RT2(mrt_comp
[2]) |
487 A6XX_RB_RENDER_COMPONENTS_RT3(mrt_comp
[3]) |
488 A6XX_RB_RENDER_COMPONENTS_RT4(mrt_comp
[4]) |
489 A6XX_RB_RENDER_COMPONENTS_RT5(mrt_comp
[5]) |
490 A6XX_RB_RENDER_COMPONENTS_RT6(mrt_comp
[6]) |
491 A6XX_RB_RENDER_COMPONENTS_RT7(mrt_comp
[7]));
493 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_FS_RENDER_COMPONENTS
, 1);
494 tu_cs_emit(cs
, A6XX_SP_FS_RENDER_COMPONENTS_RT0(mrt_comp
[0]) |
495 A6XX_SP_FS_RENDER_COMPONENTS_RT1(mrt_comp
[1]) |
496 A6XX_SP_FS_RENDER_COMPONENTS_RT2(mrt_comp
[2]) |
497 A6XX_SP_FS_RENDER_COMPONENTS_RT3(mrt_comp
[3]) |
498 A6XX_SP_FS_RENDER_COMPONENTS_RT4(mrt_comp
[4]) |
499 A6XX_SP_FS_RENDER_COMPONENTS_RT5(mrt_comp
[5]) |
500 A6XX_SP_FS_RENDER_COMPONENTS_RT6(mrt_comp
[6]) |
501 A6XX_SP_FS_RENDER_COMPONENTS_RT7(mrt_comp
[7]));
505 tu6_emit_msaa(struct tu_cmd_buffer
*cmd
,
506 const struct tu_subpass
*subpass
,
509 const enum a3xx_msaa_samples samples
= tu_msaa_samples(subpass
->samples
);
511 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_TP_RAS_MSAA_CNTL
, 2);
512 tu_cs_emit(cs
, A6XX_SP_TP_RAS_MSAA_CNTL_SAMPLES(samples
));
513 tu_cs_emit(cs
, A6XX_SP_TP_DEST_MSAA_CNTL_SAMPLES(samples
) |
514 COND(samples
== MSAA_ONE
, A6XX_SP_TP_DEST_MSAA_CNTL_MSAA_DISABLE
));
516 tu_cs_emit_pkt4(cs
, REG_A6XX_GRAS_RAS_MSAA_CNTL
, 2);
517 tu_cs_emit(cs
, A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES(samples
));
518 tu_cs_emit(cs
, A6XX_GRAS_DEST_MSAA_CNTL_SAMPLES(samples
) |
519 COND(samples
== MSAA_ONE
, A6XX_GRAS_DEST_MSAA_CNTL_MSAA_DISABLE
));
521 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_RAS_MSAA_CNTL
, 2);
522 tu_cs_emit(cs
, A6XX_RB_RAS_MSAA_CNTL_SAMPLES(samples
));
523 tu_cs_emit(cs
, A6XX_RB_DEST_MSAA_CNTL_SAMPLES(samples
) |
524 COND(samples
== MSAA_ONE
, A6XX_RB_DEST_MSAA_CNTL_MSAA_DISABLE
));
526 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_MSAA_CNTL
, 1);
527 tu_cs_emit(cs
, A6XX_RB_MSAA_CNTL_SAMPLES(samples
));
531 tu6_emit_bin_size(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
, uint32_t flags
)
533 const struct tu_tiling_config
*tiling
= &cmd
->state
.tiling_config
;
534 const uint32_t bin_w
= tiling
->tile0
.extent
.width
;
535 const uint32_t bin_h
= tiling
->tile0
.extent
.height
;
537 tu_cs_emit_pkt4(cs
, REG_A6XX_GRAS_BIN_CONTROL
, 1);
538 tu_cs_emit(cs
, A6XX_GRAS_BIN_CONTROL_BINW(bin_w
) |
539 A6XX_GRAS_BIN_CONTROL_BINH(bin_h
) | flags
);
541 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_BIN_CONTROL
, 1);
542 tu_cs_emit(cs
, A6XX_RB_BIN_CONTROL_BINW(bin_w
) |
543 A6XX_RB_BIN_CONTROL_BINH(bin_h
) | flags
);
545 /* no flag for RB_BIN_CONTROL2... */
546 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_BIN_CONTROL2
, 1);
547 tu_cs_emit(cs
, A6XX_RB_BIN_CONTROL2_BINW(bin_w
) |
548 A6XX_RB_BIN_CONTROL2_BINH(bin_h
));
552 tu6_emit_render_cntl(struct tu_cmd_buffer
*cmd
,
557 cntl
|= A6XX_RB_RENDER_CNTL_UNK4
;
559 cntl
|= A6XX_RB_RENDER_CNTL_BINNING
;
561 tu_cs_emit_pkt7(cs
, CP_REG_WRITE
, 3);
563 tu_cs_emit(cs
, REG_A6XX_RB_RENDER_CNTL
);
564 tu_cs_emit(cs
, cntl
);
568 tu6_emit_blit_scissor(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
, bool align
)
570 const VkRect2D
*render_area
= &cmd
->state
.tiling_config
.render_area
;
571 uint32_t x1
= render_area
->offset
.x
;
572 uint32_t y1
= render_area
->offset
.y
;
573 uint32_t x2
= x1
+ render_area
->extent
.width
- 1;
574 uint32_t y2
= y1
+ render_area
->extent
.height
- 1;
576 /* TODO: alignment requirement seems to be less than tile_align_w/h */
578 x1
= x1
& ~cmd
->device
->physical_device
->tile_align_w
;
579 y1
= y1
& ~cmd
->device
->physical_device
->tile_align_h
;
580 x2
= ALIGN_POT(x2
+ 1, cmd
->device
->physical_device
->tile_align_w
) - 1;
581 y2
= ALIGN_POT(y2
+ 1, cmd
->device
->physical_device
->tile_align_h
) - 1;
584 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_BLIT_SCISSOR_TL
, 2);
586 A6XX_RB_BLIT_SCISSOR_TL_X(x1
) | A6XX_RB_BLIT_SCISSOR_TL_Y(y1
));
588 A6XX_RB_BLIT_SCISSOR_BR_X(x2
) | A6XX_RB_BLIT_SCISSOR_BR_Y(y2
));
592 tu6_emit_blit_info(struct tu_cmd_buffer
*cmd
,
594 const struct tu_image_view
*iview
,
595 uint32_t gmem_offset
,
598 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_BLIT_INFO
, 1);
599 tu_cs_emit(cs
, resolve
? 0 : (A6XX_RB_BLIT_INFO_UNK0
| A6XX_RB_BLIT_INFO_GMEM
));
601 const struct tu_native_format
*format
=
602 tu6_get_native_format(iview
->vk_format
);
603 assert(format
&& format
->rb
>= 0);
605 enum a6xx_tile_mode tile_mode
=
606 tu6_get_image_tile_mode(iview
->image
, iview
->base_mip
);
607 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_BLIT_DST_INFO
, 5);
608 tu_cs_emit(cs
, A6XX_RB_BLIT_DST_INFO_TILE_MODE(tile_mode
) |
609 A6XX_RB_BLIT_DST_INFO_SAMPLES(tu_msaa_samples(iview
->image
->samples
)) |
610 A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT(format
->rb
) |
611 A6XX_RB_BLIT_DST_INFO_COLOR_SWAP(format
->swap
) |
612 COND(iview
->image
->layout
.ubwc_size
,
613 A6XX_RB_BLIT_DST_INFO_FLAGS
));
614 tu_cs_emit_qw(cs
, tu_image_base(iview
->image
, iview
->base_mip
, iview
->base_layer
));
615 tu_cs_emit(cs
, A6XX_RB_BLIT_DST_PITCH(tu_image_stride(iview
->image
, iview
->base_mip
)));
616 tu_cs_emit(cs
, A6XX_RB_BLIT_DST_ARRAY_PITCH(iview
->image
->layout
.layer_size
));
618 if (iview
->image
->layout
.ubwc_size
) {
619 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_BLIT_FLAG_DST_LO
, 3);
620 tu6_emit_flag_buffer(cs
, iview
);
623 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_BLIT_BASE_GMEM
, 1);
624 tu_cs_emit(cs
, gmem_offset
);
628 tu6_emit_blit(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
630 tu6_emit_marker(cmd
, cs
);
631 tu6_emit_event_write(cmd
, cs
, BLIT
, false);
632 tu6_emit_marker(cmd
, cs
);
636 tu6_emit_window_scissor(struct tu_cmd_buffer
*cmd
,
643 tu_cs_emit_pkt4(cs
, REG_A6XX_GRAS_SC_WINDOW_SCISSOR_TL
, 2);
644 tu_cs_emit(cs
, A6XX_GRAS_SC_WINDOW_SCISSOR_TL_X(x1
) |
645 A6XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(y1
));
646 tu_cs_emit(cs
, A6XX_GRAS_SC_WINDOW_SCISSOR_BR_X(x2
) |
647 A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(y2
));
649 tu_cs_emit_pkt4(cs
, REG_A6XX_GRAS_RESOLVE_CNTL_1
, 2);
651 cs
, A6XX_GRAS_RESOLVE_CNTL_1_X(x1
) | A6XX_GRAS_RESOLVE_CNTL_1_Y(y1
));
653 cs
, A6XX_GRAS_RESOLVE_CNTL_2_X(x2
) | A6XX_GRAS_RESOLVE_CNTL_2_Y(y2
));
657 tu6_emit_window_offset(struct tu_cmd_buffer
*cmd
,
662 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_WINDOW_OFFSET
, 1);
663 tu_cs_emit(cs
, A6XX_RB_WINDOW_OFFSET_X(x1
) | A6XX_RB_WINDOW_OFFSET_Y(y1
));
665 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_WINDOW_OFFSET2
, 1);
667 A6XX_RB_WINDOW_OFFSET2_X(x1
) | A6XX_RB_WINDOW_OFFSET2_Y(y1
));
669 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_WINDOW_OFFSET
, 1);
670 tu_cs_emit(cs
, A6XX_SP_WINDOW_OFFSET_X(x1
) | A6XX_SP_WINDOW_OFFSET_Y(y1
));
672 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_TP_WINDOW_OFFSET
, 1);
674 cs
, A6XX_SP_TP_WINDOW_OFFSET_X(x1
) | A6XX_SP_TP_WINDOW_OFFSET_Y(y1
));
678 use_hw_binning(struct tu_cmd_buffer
*cmd
)
680 const struct tu_tiling_config
*tiling
= &cmd
->state
.tiling_config
;
682 if (unlikely(cmd
->device
->physical_device
->instance
->debug_flags
& TU_DEBUG_NOBIN
))
685 return (tiling
->tile_count
.width
* tiling
->tile_count
.height
) > 2;
689 tu6_emit_tile_select(struct tu_cmd_buffer
*cmd
,
691 const struct tu_tile
*tile
)
693 tu_cs_emit_pkt7(cs
, CP_SET_MARKER
, 1);
694 tu_cs_emit(cs
, A6XX_CP_SET_MARKER_0_MODE(0x7));
696 tu6_emit_marker(cmd
, cs
);
697 tu_cs_emit_pkt7(cs
, CP_SET_MARKER
, 1);
698 tu_cs_emit(cs
, A6XX_CP_SET_MARKER_0_MODE(RM6_GMEM
) | 0x10);
699 tu6_emit_marker(cmd
, cs
);
701 const uint32_t x1
= tile
->begin
.x
;
702 const uint32_t y1
= tile
->begin
.y
;
703 const uint32_t x2
= tile
->end
.x
- 1;
704 const uint32_t y2
= tile
->end
.y
- 1;
705 tu6_emit_window_scissor(cmd
, cs
, x1
, y1
, x2
, y2
);
706 tu6_emit_window_offset(cmd
, cs
, x1
, y1
);
708 tu_cs_emit_pkt4(cs
, REG_A6XX_VPC_SO_OVERRIDE
, 1);
709 tu_cs_emit(cs
, A6XX_VPC_SO_OVERRIDE_SO_DISABLE
);
711 if (use_hw_binning(cmd
)) {
712 tu_cs_emit_pkt7(cs
, CP_WAIT_FOR_ME
, 0);
714 tu_cs_emit_pkt7(cs
, CP_SET_MODE
, 1);
717 tu_cs_emit_pkt7(cs
, CP_REG_TEST
, 1);
718 tu_cs_emit(cs
, A6XX_CP_REG_TEST_0_REG(OVERFLOW_FLAG_REG
) |
719 A6XX_CP_REG_TEST_0_BIT(0) |
720 A6XX_CP_REG_TEST_0_WAIT_FOR_ME
);
722 tu_cs_emit_pkt7(cs
, CP_COND_REG_EXEC
, 2);
723 tu_cs_emit(cs
, 0x10000000);
724 tu_cs_emit(cs
, 11); /* conditionally execute next 11 dwords */
726 /* if (no overflow) */ {
727 tu_cs_emit_pkt7(cs
, CP_SET_BIN_DATA5
, 7);
728 tu_cs_emit(cs
, cmd
->state
.tiling_config
.pipe_sizes
[tile
->pipe
] |
729 CP_SET_BIN_DATA5_0_VSC_N(tile
->slot
));
730 tu_cs_emit_qw(cs
, cmd
->vsc_data
.iova
+ tile
->pipe
* cmd
->vsc_data_pitch
);
731 tu_cs_emit_qw(cs
, cmd
->vsc_data
.iova
+ (tile
->pipe
* 4) + (32 * cmd
->vsc_data_pitch
));
732 tu_cs_emit_qw(cs
, cmd
->vsc_data2
.iova
+ (tile
->pipe
* cmd
->vsc_data2_pitch
));
734 tu_cs_emit_pkt7(cs
, CP_SET_VISIBILITY_OVERRIDE
, 1);
737 /* use a NOP packet to skip over the 'else' side: */
738 tu_cs_emit_pkt7(cs
, CP_NOP
, 2);
740 tu_cs_emit_pkt7(cs
, CP_SET_VISIBILITY_OVERRIDE
, 1);
744 tu_cs_emit_pkt7(cs
, CP_SET_MODE
, 1);
747 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_UNKNOWN_8804
, 1);
750 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_TP_UNKNOWN_B304
, 1);
753 tu_cs_emit_pkt4(cs
, REG_A6XX_GRAS_UNKNOWN_80A4
, 1);
756 tu_cs_emit_pkt7(cs
, CP_SET_VISIBILITY_OVERRIDE
, 1);
759 tu_cs_emit_pkt7(cs
, CP_SET_MODE
, 1);
765 tu6_emit_load_attachment(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
, uint32_t a
)
767 const struct tu_tiling_config
*tiling
= &cmd
->state
.tiling_config
;
768 const struct tu_framebuffer
*fb
= cmd
->state
.framebuffer
;
769 const struct tu_image_view
*iview
= fb
->attachments
[a
].attachment
;
770 const struct tu_render_pass_attachment
*attachment
=
771 &cmd
->state
.pass
->attachments
[a
];
773 if (attachment
->gmem_offset
< 0)
776 const uint32_t x1
= tiling
->render_area
.offset
.x
;
777 const uint32_t y1
= tiling
->render_area
.offset
.y
;
778 const uint32_t x2
= x1
+ tiling
->render_area
.extent
.width
;
779 const uint32_t y2
= y1
+ tiling
->render_area
.extent
.height
;
780 const uint32_t tile_x2
=
781 tiling
->tile0
.offset
.x
+ tiling
->tile0
.extent
.width
* tiling
->tile_count
.width
;
782 const uint32_t tile_y2
=
783 tiling
->tile0
.offset
.y
+ tiling
->tile0
.extent
.height
* tiling
->tile_count
.height
;
785 x1
!= tiling
->tile0
.offset
.x
|| x2
!= MIN2(fb
->width
, tile_x2
) ||
786 y1
!= tiling
->tile0
.offset
.y
|| y2
!= MIN2(fb
->height
, tile_y2
);
789 tu_finishme("improve handling of unaligned render area");
791 if (attachment
->load_op
== VK_ATTACHMENT_LOAD_OP_LOAD
)
794 if (vk_format_has_stencil(iview
->vk_format
) &&
795 attachment
->stencil_load_op
== VK_ATTACHMENT_LOAD_OP_LOAD
)
799 tu6_emit_blit_info(cmd
, cs
, iview
, attachment
->gmem_offset
, false);
800 tu6_emit_blit(cmd
, cs
);
805 tu6_emit_clear_attachment(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
,
807 const VkRenderPassBeginInfo
*info
)
809 const struct tu_framebuffer
*fb
= cmd
->state
.framebuffer
;
810 const struct tu_image_view
*iview
= fb
->attachments
[a
].attachment
;
811 const struct tu_render_pass_attachment
*attachment
=
812 &cmd
->state
.pass
->attachments
[a
];
813 unsigned clear_mask
= 0;
815 /* note: this means it isn't used by any subpass and shouldn't be cleared anyway */
816 if (attachment
->gmem_offset
< 0)
819 if (attachment
->load_op
== VK_ATTACHMENT_LOAD_OP_CLEAR
)
822 if (vk_format_has_stencil(iview
->vk_format
)) {
824 if (attachment
->stencil_load_op
== VK_ATTACHMENT_LOAD_OP_CLEAR
)
830 const struct tu_native_format
*format
=
831 tu6_get_native_format(iview
->vk_format
);
832 assert(format
&& format
->rb
>= 0);
834 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_BLIT_DST_INFO
, 1);
835 tu_cs_emit(cs
, A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT(format
->rb
));
837 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_BLIT_INFO
, 1);
838 tu_cs_emit(cs
, A6XX_RB_BLIT_INFO_GMEM
| A6XX_RB_BLIT_INFO_CLEAR_MASK(clear_mask
));
840 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_BLIT_BASE_GMEM
, 1);
841 tu_cs_emit(cs
, attachment
->gmem_offset
);
843 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_UNKNOWN_88D0
, 1);
846 uint32_t clear_vals
[4] = { 0 };
847 tu_pack_clear_value(&info
->pClearValues
[a
], iview
->vk_format
, clear_vals
);
849 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_BLIT_CLEAR_COLOR_DW0
, 4);
850 tu_cs_emit(cs
, clear_vals
[0]);
851 tu_cs_emit(cs
, clear_vals
[1]);
852 tu_cs_emit(cs
, clear_vals
[2]);
853 tu_cs_emit(cs
, clear_vals
[3]);
855 tu6_emit_blit(cmd
, cs
);
859 tu6_emit_store_attachment(struct tu_cmd_buffer
*cmd
,
864 if (cmd
->state
.pass
->attachments
[a
].store_op
== VK_ATTACHMENT_STORE_OP_DONT_CARE
)
867 tu6_emit_blit_info(cmd
, cs
,
868 cmd
->state
.framebuffer
->attachments
[a
].attachment
,
869 cmd
->state
.pass
->attachments
[gmem_a
].gmem_offset
, true);
870 tu6_emit_blit(cmd
, cs
);
874 tu6_emit_tile_store(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
876 const struct tu_render_pass
*pass
= cmd
->state
.pass
;
877 const struct tu_subpass
*subpass
= &pass
->subpasses
[pass
->subpass_count
-1];
879 tu_cs_emit_pkt7(cs
, CP_SET_DRAW_STATE
, 3);
880 tu_cs_emit(cs
, CP_SET_DRAW_STATE__0_COUNT(0) |
881 CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS
|
882 CP_SET_DRAW_STATE__0_GROUP_ID(0));
883 tu_cs_emit(cs
, CP_SET_DRAW_STATE__1_ADDR_LO(0));
884 tu_cs_emit(cs
, CP_SET_DRAW_STATE__2_ADDR_HI(0));
886 tu_cs_emit_pkt7(cs
, CP_SKIP_IB2_ENABLE_GLOBAL
, 1);
889 tu6_emit_marker(cmd
, cs
);
890 tu_cs_emit_pkt7(cs
, CP_SET_MARKER
, 1);
891 tu_cs_emit(cs
, A6XX_CP_SET_MARKER_0_MODE(RM6_RESOLVE
) | 0x10);
892 tu6_emit_marker(cmd
, cs
);
894 tu6_emit_blit_scissor(cmd
, cs
, true);
896 for (uint32_t a
= 0; a
< pass
->attachment_count
; ++a
) {
897 if (pass
->attachments
[a
].gmem_offset
>= 0)
898 tu6_emit_store_attachment(cmd
, cs
, a
, a
);
901 if (subpass
->resolve_attachments
) {
902 for (unsigned i
= 0; i
< subpass
->color_count
; i
++) {
903 uint32_t a
= subpass
->resolve_attachments
[i
].attachment
;
904 if (a
!= VK_ATTACHMENT_UNUSED
)
905 tu6_emit_store_attachment(cmd
, cs
, a
,
906 subpass
->color_attachments
[i
].attachment
);
912 tu6_emit_restart_index(struct tu_cs
*cs
, uint32_t restart_index
)
914 tu_cs_emit_pkt4(cs
, REG_A6XX_PC_RESTART_INDEX
, 1);
915 tu_cs_emit(cs
, restart_index
);
919 tu6_init_hw(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
921 VkResult result
= tu_cs_reserve_space(cmd
->device
, cs
, 256);
922 if (result
!= VK_SUCCESS
) {
923 cmd
->record_result
= result
;
927 tu6_emit_cache_flush(cmd
, cs
);
929 tu_cs_emit_write_reg(cs
, REG_A6XX_HLSQ_UPDATE_CNTL
, 0xfffff);
931 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_CCU_CNTL
, 0x7c400004);
932 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_8E04
, 0x00100000);
933 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_AE04
, 0x8);
934 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_AE00
, 0);
935 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_AE0F
, 0x3f);
936 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_B605
, 0x44);
937 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_B600
, 0x100000);
938 tu_cs_emit_write_reg(cs
, REG_A6XX_HLSQ_UNKNOWN_BE00
, 0x80);
939 tu_cs_emit_write_reg(cs
, REG_A6XX_HLSQ_UNKNOWN_BE01
, 0);
941 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9600
, 0);
942 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_UNKNOWN_8600
, 0x880);
943 tu_cs_emit_write_reg(cs
, REG_A6XX_HLSQ_UNKNOWN_BE04
, 0);
944 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_AE03
, 0x00000410);
945 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_IBO_COUNT
, 0);
946 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_B182
, 0);
947 tu_cs_emit_write_reg(cs
, REG_A6XX_HLSQ_UNKNOWN_BB11
, 0);
948 tu_cs_emit_write_reg(cs
, REG_A6XX_UCHE_UNKNOWN_0E12
, 0x3200000);
949 tu_cs_emit_write_reg(cs
, REG_A6XX_UCHE_CLIENT_PF
, 4);
950 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_8E01
, 0x0);
951 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_AB00
, 0x5);
952 tu_cs_emit_write_reg(cs
, REG_A6XX_VFD_ADD_OFFSET
, A6XX_VFD_ADD_OFFSET_VERTEX
);
953 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_8811
, 0x00000010);
954 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_MODE_CNTL
, 0x1f);
956 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_SRGB_CNTL
, 0);
958 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_UNKNOWN_8101
, 0);
959 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_SAMPLE_CNTL
, 0);
960 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_UNKNOWN_8110
, 0);
962 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_RENDER_CONTROL0
, 0x401);
963 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_RENDER_CONTROL1
, 0);
964 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_FS_OUTPUT_CNTL0
, 0);
965 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_SAMPLE_CNTL
, 0);
966 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_8818
, 0);
967 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_8819
, 0);
968 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_881A
, 0);
969 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_881B
, 0);
970 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_881C
, 0);
971 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_881D
, 0);
972 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_881E
, 0);
973 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_88F0
, 0);
975 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9101
, 0xffff00);
976 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9107
, 0);
978 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9236
, 1);
979 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9300
, 0);
981 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_SO_OVERRIDE
,
982 A6XX_VPC_SO_OVERRIDE_SO_DISABLE
);
984 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_UNKNOWN_9801
, 0);
985 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_UNKNOWN_9806
, 0);
986 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_UNKNOWN_9980
, 0);
988 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_PRIMITIVE_CNTL_6
, 0);
989 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_UNKNOWN_9B07
, 0);
991 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_A81B
, 0);
993 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_UNKNOWN_B183
, 0);
995 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_UNKNOWN_8099
, 0);
996 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_UNKNOWN_809B
, 0);
997 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_UNKNOWN_80A0
, 2);
998 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_UNKNOWN_80AF
, 0);
999 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9210
, 0);
1000 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9211
, 0);
1001 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9602
, 0);
1002 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_UNKNOWN_9981
, 0x3);
1003 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_UNKNOWN_9E72
, 0);
1004 tu_cs_emit_write_reg(cs
, REG_A6XX_VPC_UNKNOWN_9108
, 0x3);
1005 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_TP_UNKNOWN_B304
, 0);
1006 tu_cs_emit_write_reg(cs
, REG_A6XX_SP_TP_UNKNOWN_B309
, 0x000000a2);
1007 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_8804
, 0);
1008 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_UNKNOWN_80A4
, 0);
1009 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_UNKNOWN_80A5
, 0);
1010 tu_cs_emit_write_reg(cs
, REG_A6XX_GRAS_UNKNOWN_80A6
, 0);
1011 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_8805
, 0);
1012 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_8806
, 0);
1013 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_8878
, 0);
1014 tu_cs_emit_write_reg(cs
, REG_A6XX_RB_UNKNOWN_8879
, 0);
1015 tu_cs_emit_write_reg(cs
, REG_A6XX_HLSQ_CONTROL_5_REG
, 0xfc);
1017 tu6_emit_marker(cmd
, cs
);
1019 tu_cs_emit_write_reg(cs
, REG_A6XX_VFD_MODE_CNTL
, 0x00000000);
1021 tu_cs_emit_write_reg(cs
, REG_A6XX_VFD_UNKNOWN_A008
, 0);
1023 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_MODE_CNTL
, 0x0000001f);
1025 /* we don't use this yet.. probably best to disable.. */
1026 tu_cs_emit_pkt7(cs
, CP_SET_DRAW_STATE
, 3);
1027 tu_cs_emit(cs
, CP_SET_DRAW_STATE__0_COUNT(0) |
1028 CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS
|
1029 CP_SET_DRAW_STATE__0_GROUP_ID(0));
1030 tu_cs_emit(cs
, CP_SET_DRAW_STATE__1_ADDR_LO(0));
1031 tu_cs_emit(cs
, CP_SET_DRAW_STATE__2_ADDR_HI(0));
1033 tu_cs_emit_pkt4(cs
, REG_A6XX_VPC_SO_BUFFER_BASE_LO(0), 3);
1034 tu_cs_emit(cs
, 0x00000000); /* VPC_SO_BUFFER_BASE_LO_0 */
1035 tu_cs_emit(cs
, 0x00000000); /* VPC_SO_BUFFER_BASE_HI_0 */
1036 tu_cs_emit(cs
, 0x00000000); /* VPC_SO_BUFFER_SIZE_0 */
1038 tu_cs_emit_pkt4(cs
, REG_A6XX_VPC_SO_FLUSH_BASE_LO(0), 2);
1039 tu_cs_emit(cs
, 0x00000000); /* VPC_SO_FLUSH_BASE_LO_0 */
1040 tu_cs_emit(cs
, 0x00000000); /* VPC_SO_FLUSH_BASE_HI_0 */
1042 tu_cs_emit_pkt4(cs
, REG_A6XX_VPC_SO_BUF_CNTL
, 1);
1043 tu_cs_emit(cs
, 0x00000000); /* VPC_SO_BUF_CNTL */
1045 tu_cs_emit_pkt4(cs
, REG_A6XX_VPC_SO_BUFFER_OFFSET(0), 1);
1046 tu_cs_emit(cs
, 0x00000000); /* UNKNOWN_E2AB */
1048 tu_cs_emit_pkt4(cs
, REG_A6XX_VPC_SO_BUFFER_BASE_LO(1), 3);
1049 tu_cs_emit(cs
, 0x00000000);
1050 tu_cs_emit(cs
, 0x00000000);
1051 tu_cs_emit(cs
, 0x00000000);
1053 tu_cs_emit_pkt4(cs
, REG_A6XX_VPC_SO_BUFFER_OFFSET(1), 6);
1054 tu_cs_emit(cs
, 0x00000000);
1055 tu_cs_emit(cs
, 0x00000000);
1056 tu_cs_emit(cs
, 0x00000000);
1057 tu_cs_emit(cs
, 0x00000000);
1058 tu_cs_emit(cs
, 0x00000000);
1059 tu_cs_emit(cs
, 0x00000000);
1061 tu_cs_emit_pkt4(cs
, REG_A6XX_VPC_SO_BUFFER_OFFSET(2), 6);
1062 tu_cs_emit(cs
, 0x00000000);
1063 tu_cs_emit(cs
, 0x00000000);
1064 tu_cs_emit(cs
, 0x00000000);
1065 tu_cs_emit(cs
, 0x00000000);
1066 tu_cs_emit(cs
, 0x00000000);
1067 tu_cs_emit(cs
, 0x00000000);
1069 tu_cs_emit_pkt4(cs
, REG_A6XX_VPC_SO_BUFFER_OFFSET(3), 3);
1070 tu_cs_emit(cs
, 0x00000000);
1071 tu_cs_emit(cs
, 0x00000000);
1072 tu_cs_emit(cs
, 0x00000000);
1074 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_HS_CTRL_REG0
, 1);
1075 tu_cs_emit(cs
, 0x00000000);
1077 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_GS_CTRL_REG0
, 1);
1078 tu_cs_emit(cs
, 0x00000000);
1080 tu_cs_emit_pkt4(cs
, REG_A6XX_GRAS_LRZ_CNTL
, 1);
1081 tu_cs_emit(cs
, 0x00000000);
1083 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_LRZ_CNTL
, 1);
1084 tu_cs_emit(cs
, 0x00000000);
1086 tu_cs_sanity_check(cs
);
1090 tu6_cache_flush(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
1094 seqno
= tu6_emit_event_write(cmd
, cs
, CACHE_FLUSH_AND_INV_EVENT
, true);
1096 tu_cs_emit_pkt7(cs
, CP_WAIT_REG_MEM
, 6);
1097 tu_cs_emit(cs
, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ
) |
1098 CP_WAIT_REG_MEM_0_POLL_MEMORY
);
1099 tu_cs_emit_qw(cs
, cmd
->scratch_bo
.iova
);
1100 tu_cs_emit(cs
, CP_WAIT_REG_MEM_3_REF(seqno
));
1101 tu_cs_emit(cs
, CP_WAIT_REG_MEM_4_MASK(~0));
1102 tu_cs_emit(cs
, CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(16));
1104 seqno
= tu6_emit_event_write(cmd
, cs
, CACHE_FLUSH_TS
, true);
1106 tu_cs_emit_pkt7(cs
, CP_WAIT_MEM_GTE
, 4);
1107 tu_cs_emit(cs
, CP_WAIT_MEM_GTE_0_RESERVED(0));
1108 tu_cs_emit_qw(cs
, cmd
->scratch_bo
.iova
);
1109 tu_cs_emit(cs
, CP_WAIT_MEM_GTE_3_REF(seqno
));
1113 update_vsc_pipe(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
1115 const struct tu_tiling_config
*tiling
= &cmd
->state
.tiling_config
;
1117 tu_cs_emit_pkt4(cs
, REG_A6XX_VSC_BIN_SIZE
, 3);
1118 tu_cs_emit(cs
, A6XX_VSC_BIN_SIZE_WIDTH(tiling
->tile0
.extent
.width
) |
1119 A6XX_VSC_BIN_SIZE_HEIGHT(tiling
->tile0
.extent
.height
));
1120 tu_cs_emit_qw(cs
, cmd
->vsc_data
.iova
+ 32 * cmd
->vsc_data_pitch
); /* VSC_SIZE_ADDRESS_LO/HI */
1122 tu_cs_emit_pkt4(cs
, REG_A6XX_VSC_BIN_COUNT
, 1);
1123 tu_cs_emit(cs
, A6XX_VSC_BIN_COUNT_NX(tiling
->tile_count
.width
) |
1124 A6XX_VSC_BIN_COUNT_NY(tiling
->tile_count
.height
));
1126 tu_cs_emit_pkt4(cs
, REG_A6XX_VSC_PIPE_CONFIG_REG(0), 32);
1127 for (unsigned i
= 0; i
< 32; i
++)
1128 tu_cs_emit(cs
, tiling
->pipe_config
[i
]);
1130 tu_cs_emit_pkt4(cs
, REG_A6XX_VSC_PIPE_DATA2_ADDRESS_LO
, 4);
1131 tu_cs_emit_qw(cs
, cmd
->vsc_data2
.iova
);
1132 tu_cs_emit(cs
, cmd
->vsc_data2_pitch
);
1133 tu_cs_emit(cs
, cmd
->vsc_data2
.size
);
1135 tu_cs_emit_pkt4(cs
, REG_A6XX_VSC_PIPE_DATA_ADDRESS_LO
, 4);
1136 tu_cs_emit_qw(cs
, cmd
->vsc_data
.iova
);
1137 tu_cs_emit(cs
, cmd
->vsc_data_pitch
);
1138 tu_cs_emit(cs
, cmd
->vsc_data
.size
);
1142 emit_vsc_overflow_test(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
1144 const struct tu_tiling_config
*tiling
= &cmd
->state
.tiling_config
;
1145 const uint32_t used_pipe_count
=
1146 tiling
->pipe_count
.width
* tiling
->pipe_count
.height
;
1148 /* Clear vsc_scratch: */
1149 tu_cs_emit_pkt7(cs
, CP_MEM_WRITE
, 3);
1150 tu_cs_emit_qw(cs
, cmd
->scratch_bo
.iova
+ VSC_SCRATCH
);
1151 tu_cs_emit(cs
, 0x0);
1153 /* Check for overflow, write vsc_scratch if detected: */
1154 for (int i
= 0; i
< used_pipe_count
; i
++) {
1155 tu_cs_emit_pkt7(cs
, CP_COND_WRITE5
, 8);
1156 tu_cs_emit(cs
, CP_COND_WRITE5_0_FUNCTION(WRITE_GE
) |
1157 CP_COND_WRITE5_0_WRITE_MEMORY
);
1158 tu_cs_emit(cs
, CP_COND_WRITE5_1_POLL_ADDR_LO(REG_A6XX_VSC_SIZE_REG(i
)));
1159 tu_cs_emit(cs
, CP_COND_WRITE5_2_POLL_ADDR_HI(0));
1160 tu_cs_emit(cs
, CP_COND_WRITE5_3_REF(cmd
->vsc_data_pitch
));
1161 tu_cs_emit(cs
, CP_COND_WRITE5_4_MASK(~0));
1162 tu_cs_emit_qw(cs
, cmd
->scratch_bo
.iova
+ VSC_SCRATCH
);
1163 tu_cs_emit(cs
, CP_COND_WRITE5_7_WRITE_DATA(1 + cmd
->vsc_data_pitch
));
1165 tu_cs_emit_pkt7(cs
, CP_COND_WRITE5
, 8);
1166 tu_cs_emit(cs
, CP_COND_WRITE5_0_FUNCTION(WRITE_GE
) |
1167 CP_COND_WRITE5_0_WRITE_MEMORY
);
1168 tu_cs_emit(cs
, CP_COND_WRITE5_1_POLL_ADDR_LO(REG_A6XX_VSC_SIZE2_REG(i
)));
1169 tu_cs_emit(cs
, CP_COND_WRITE5_2_POLL_ADDR_HI(0));
1170 tu_cs_emit(cs
, CP_COND_WRITE5_3_REF(cmd
->vsc_data2_pitch
));
1171 tu_cs_emit(cs
, CP_COND_WRITE5_4_MASK(~0));
1172 tu_cs_emit_qw(cs
, cmd
->scratch_bo
.iova
+ VSC_SCRATCH
);
1173 tu_cs_emit(cs
, CP_COND_WRITE5_7_WRITE_DATA(3 + cmd
->vsc_data2_pitch
));
1176 tu_cs_emit_pkt7(cs
, CP_WAIT_MEM_WRITES
, 0);
1178 tu_cs_emit_pkt7(cs
, CP_WAIT_FOR_ME
, 0);
1180 tu_cs_emit_pkt7(cs
, CP_MEM_TO_REG
, 3);
1181 tu_cs_emit(cs
, CP_MEM_TO_REG_0_REG(OVERFLOW_FLAG_REG
) |
1182 CP_MEM_TO_REG_0_CNT(1 - 1));
1183 tu_cs_emit_qw(cs
, cmd
->scratch_bo
.iova
+ VSC_SCRATCH
);
1186 * This is a bit awkward, we really want a way to invert the
1187 * CP_REG_TEST/CP_COND_REG_EXEC logic, so that we can conditionally
1188 * execute cmds to use hwbinning when a bit is *not* set. This
1189 * dance is to invert OVERFLOW_FLAG_REG
1191 * A CP_NOP packet is used to skip executing the 'else' clause
1195 /* b0 will be set if VSC_DATA or VSC_DATA2 overflow: */
1196 tu_cs_emit_pkt7(cs
, CP_REG_TEST
, 1);
1197 tu_cs_emit(cs
, A6XX_CP_REG_TEST_0_REG(OVERFLOW_FLAG_REG
) |
1198 A6XX_CP_REG_TEST_0_BIT(0) |
1199 A6XX_CP_REG_TEST_0_WAIT_FOR_ME
);
1201 tu_cs_emit_pkt7(cs
, CP_COND_REG_EXEC
, 2);
1202 tu_cs_emit(cs
, 0x10000000);
1203 tu_cs_emit(cs
, 7); /* conditionally execute next 7 dwords */
1207 * On overflow, mirror the value to control->vsc_overflow
1208 * which CPU is checking to detect overflow (see
1209 * check_vsc_overflow())
1211 tu_cs_emit_pkt7(cs
, CP_REG_TO_MEM
, 3);
1212 tu_cs_emit(cs
, CP_REG_TO_MEM_0_REG(OVERFLOW_FLAG_REG
) |
1213 CP_REG_TO_MEM_0_CNT(0));
1214 tu_cs_emit_qw(cs
, cmd
->scratch_bo
.iova
+ VSC_OVERFLOW
);
1216 tu_cs_emit_pkt4(cs
, OVERFLOW_FLAG_REG
, 1);
1217 tu_cs_emit(cs
, 0x0);
1219 tu_cs_emit_pkt7(cs
, CP_NOP
, 2); /* skip 'else' when 'if' is taken */
1221 tu_cs_emit_pkt4(cs
, OVERFLOW_FLAG_REG
, 1);
1222 tu_cs_emit(cs
, 0x1);
1227 tu6_emit_binning_pass(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
1229 const struct tu_tiling_config
*tiling
= &cmd
->state
.tiling_config
;
1231 uint32_t x1
= tiling
->tile0
.offset
.x
;
1232 uint32_t y1
= tiling
->tile0
.offset
.y
;
1233 uint32_t x2
= tiling
->render_area
.offset
.x
+ tiling
->render_area
.extent
.width
- 1;
1234 uint32_t y2
= tiling
->render_area
.offset
.y
+ tiling
->render_area
.extent
.height
- 1;
1236 tu6_emit_window_scissor(cmd
, cs
, x1
, y1
, x2
, y2
);
1238 tu6_emit_marker(cmd
, cs
);
1239 tu_cs_emit_pkt7(cs
, CP_SET_MARKER
, 1);
1240 tu_cs_emit(cs
, A6XX_CP_SET_MARKER_0_MODE(RM6_BINNING
));
1241 tu6_emit_marker(cmd
, cs
);
1243 tu_cs_emit_pkt7(cs
, CP_SET_VISIBILITY_OVERRIDE
, 1);
1244 tu_cs_emit(cs
, 0x1);
1246 tu_cs_emit_pkt7(cs
, CP_SET_MODE
, 1);
1247 tu_cs_emit(cs
, 0x1);
1251 tu_cs_emit_pkt4(cs
, REG_A6XX_VFD_MODE_CNTL
, 1);
1252 tu_cs_emit(cs
, A6XX_VFD_MODE_CNTL_BINNING_PASS
);
1254 update_vsc_pipe(cmd
, cs
);
1256 tu_cs_emit_pkt4(cs
, REG_A6XX_PC_UNKNOWN_9805
, 1);
1257 tu_cs_emit(cs
, 0x1);
1259 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_UNKNOWN_A0F8
, 1);
1260 tu_cs_emit(cs
, 0x1);
1262 tu_cs_emit_pkt7(cs
, CP_EVENT_WRITE
, 1);
1263 tu_cs_emit(cs
, UNK_2C
);
1265 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_WINDOW_OFFSET
, 1);
1266 tu_cs_emit(cs
, A6XX_RB_WINDOW_OFFSET_X(0) |
1267 A6XX_RB_WINDOW_OFFSET_Y(0));
1269 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_TP_WINDOW_OFFSET
, 1);
1270 tu_cs_emit(cs
, A6XX_SP_TP_WINDOW_OFFSET_X(0) |
1271 A6XX_SP_TP_WINDOW_OFFSET_Y(0));
1273 /* emit IB to binning drawcmds: */
1274 tu_cs_emit_call(cs
, &cmd
->draw_cs
);
1276 tu_cs_emit_pkt7(cs
, CP_SET_DRAW_STATE
, 3);
1277 tu_cs_emit(cs
, CP_SET_DRAW_STATE__0_COUNT(0) |
1278 CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS
|
1279 CP_SET_DRAW_STATE__0_GROUP_ID(0));
1280 tu_cs_emit(cs
, CP_SET_DRAW_STATE__1_ADDR_LO(0));
1281 tu_cs_emit(cs
, CP_SET_DRAW_STATE__2_ADDR_HI(0));
1283 tu_cs_emit_pkt7(cs
, CP_EVENT_WRITE
, 1);
1284 tu_cs_emit(cs
, UNK_2D
);
1286 tu6_emit_event_write(cmd
, cs
, CACHE_INVALIDATE
, false);
1287 tu6_cache_flush(cmd
, cs
);
1291 tu_cs_emit_pkt7(cs
, CP_WAIT_FOR_ME
, 0);
1293 emit_vsc_overflow_test(cmd
, cs
);
1295 tu_cs_emit_pkt7(cs
, CP_SET_VISIBILITY_OVERRIDE
, 1);
1296 tu_cs_emit(cs
, 0x0);
1298 tu_cs_emit_pkt7(cs
, CP_SET_MODE
, 1);
1299 tu_cs_emit(cs
, 0x0);
1303 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_CCU_CNTL
, 1);
1304 tu_cs_emit(cs
, 0x7c400004);
1306 cmd
->wait_for_idle
= false;
1310 tu6_render_begin(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
1312 VkResult result
= tu_cs_reserve_space(cmd
->device
, cs
, 1024);
1313 if (result
!= VK_SUCCESS
) {
1314 cmd
->record_result
= result
;
1318 tu6_emit_lrz_flush(cmd
, cs
);
1322 tu6_emit_cache_flush(cmd
, cs
);
1324 tu_cs_emit_pkt7(cs
, CP_SKIP_IB2_ENABLE_GLOBAL
, 1);
1325 tu_cs_emit(cs
, 0x0);
1327 /* 0x10000000 for BYPASS.. 0x7c13c080 for GMEM: */
1328 tu6_emit_wfi(cmd
, cs
);
1329 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_CCU_CNTL
, 1);
1330 tu_cs_emit(cs
, 0x7c400004); /* RB_CCU_CNTL */
1332 if (use_hw_binning(cmd
)) {
1333 tu6_emit_bin_size(cmd
, cs
, A6XX_RB_BIN_CONTROL_BINNING_PASS
| 0x6000000);
1335 tu6_emit_render_cntl(cmd
, cs
, true);
1337 tu6_emit_binning_pass(cmd
, cs
);
1339 tu6_emit_bin_size(cmd
, cs
, A6XX_RB_BIN_CONTROL_USE_VIZ
| 0x6000000);
1341 tu_cs_emit_pkt4(cs
, REG_A6XX_VFD_MODE_CNTL
, 1);
1342 tu_cs_emit(cs
, 0x0);
1344 tu_cs_emit_pkt4(cs
, REG_A6XX_PC_UNKNOWN_9805
, 1);
1345 tu_cs_emit(cs
, 0x1);
1347 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_UNKNOWN_A0F8
, 1);
1348 tu_cs_emit(cs
, 0x1);
1350 tu_cs_emit_pkt7(cs
, CP_SKIP_IB2_ENABLE_GLOBAL
, 1);
1351 tu_cs_emit(cs
, 0x1);
1353 tu6_emit_bin_size(cmd
, cs
, 0x6000000);
1356 tu6_emit_render_cntl(cmd
, cs
, false);
1358 tu_cs_sanity_check(cs
);
1362 tu6_render_tile(struct tu_cmd_buffer
*cmd
,
1364 const struct tu_tile
*tile
)
1366 const uint32_t render_tile_space
= 256 + tu_cs_get_call_size(&cmd
->draw_cs
);
1367 VkResult result
= tu_cs_reserve_space(cmd
->device
, cs
, render_tile_space
);
1368 if (result
!= VK_SUCCESS
) {
1369 cmd
->record_result
= result
;
1373 tu6_emit_tile_select(cmd
, cs
, tile
);
1374 tu_cs_emit_ib(cs
, &cmd
->state
.tile_load_ib
);
1376 tu_cs_emit_call(cs
, &cmd
->draw_cs
);
1377 cmd
->wait_for_idle
= true;
1379 if (use_hw_binning(cmd
)) {
1380 tu_cs_emit_pkt7(cs
, CP_REG_TEST
, 1);
1381 tu_cs_emit(cs
, A6XX_CP_REG_TEST_0_REG(OVERFLOW_FLAG_REG
) |
1382 A6XX_CP_REG_TEST_0_BIT(0) |
1383 A6XX_CP_REG_TEST_0_WAIT_FOR_ME
);
1385 tu_cs_emit_pkt7(cs
, CP_COND_REG_EXEC
, 2);
1386 tu_cs_emit(cs
, 0x10000000);
1387 tu_cs_emit(cs
, 2); /* conditionally execute next 2 dwords */
1389 /* if (no overflow) */ {
1390 tu_cs_emit_pkt7(cs
, CP_SET_MARKER
, 1);
1391 tu_cs_emit(cs
, A6XX_CP_SET_MARKER_0_MODE(0x5) | 0x10);
1395 tu_cs_emit_ib(cs
, &cmd
->state
.tile_store_ib
);
1397 tu_cs_sanity_check(cs
);
1401 tu6_render_end(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
)
1403 VkResult result
= tu_cs_reserve_space(cmd
->device
, cs
, 16);
1404 if (result
!= VK_SUCCESS
) {
1405 cmd
->record_result
= result
;
1409 tu_cs_emit_pkt4(cs
, REG_A6XX_GRAS_LRZ_CNTL
, 1);
1412 tu6_emit_lrz_flush(cmd
, cs
);
1414 tu6_emit_event_write(cmd
, cs
, CACHE_FLUSH_TS
, true);
1416 tu_cs_sanity_check(cs
);
1420 tu_cmd_render_tiles(struct tu_cmd_buffer
*cmd
)
1422 const struct tu_tiling_config
*tiling
= &cmd
->state
.tiling_config
;
1424 tu6_render_begin(cmd
, &cmd
->cs
);
1426 for (uint32_t y
= 0; y
< tiling
->tile_count
.height
; y
++) {
1427 for (uint32_t x
= 0; x
< tiling
->tile_count
.width
; x
++) {
1428 struct tu_tile tile
;
1429 tu_tiling_config_get_tile(tiling
, cmd
->device
, x
, y
, &tile
);
1430 tu6_render_tile(cmd
, &cmd
->cs
, &tile
);
1434 tu6_render_end(cmd
, &cmd
->cs
);
1438 tu_cmd_prepare_tile_load_ib(struct tu_cmd_buffer
*cmd
,
1439 const VkRenderPassBeginInfo
*info
)
1441 const uint32_t tile_load_space
=
1442 8 + (23+19) * cmd
->state
.pass
->attachment_count
+
1443 21 + (13 * cmd
->state
.subpass
->color_count
+ 8) + 11;
1445 struct tu_cs sub_cs
;
1447 VkResult result
= tu_cs_begin_sub_stream(cmd
->device
, &cmd
->sub_cs
,
1448 tile_load_space
, &sub_cs
);
1449 if (result
!= VK_SUCCESS
) {
1450 cmd
->record_result
= result
;
1454 tu6_emit_blit_scissor(cmd
, &sub_cs
, true);
1456 for (uint32_t i
= 0; i
< cmd
->state
.pass
->attachment_count
; ++i
)
1457 tu6_emit_load_attachment(cmd
, &sub_cs
, i
);
1459 tu6_emit_blit_scissor(cmd
, &sub_cs
, false);
1461 for (uint32_t i
= 0; i
< cmd
->state
.pass
->attachment_count
; ++i
)
1462 tu6_emit_clear_attachment(cmd
, &sub_cs
, i
, info
);
1464 /* invalidate because reading input attachments will cache GMEM and
1465 * the cache isn''t updated when GMEM is written
1466 * TODO: is there a no-cache bit for textures?
1468 if (cmd
->state
.subpass
->input_count
)
1469 tu6_emit_event_write(cmd
, &sub_cs
, CACHE_INVALIDATE
, false);
1471 tu6_emit_zs(cmd
, cmd
->state
.subpass
, &sub_cs
);
1472 tu6_emit_mrt(cmd
, cmd
->state
.subpass
, &sub_cs
);
1473 tu6_emit_msaa(cmd
, cmd
->state
.subpass
, &sub_cs
);
1475 cmd
->state
.tile_load_ib
= tu_cs_end_sub_stream(&cmd
->sub_cs
, &sub_cs
);
1479 tu_cmd_prepare_tile_store_ib(struct tu_cmd_buffer
*cmd
)
1481 const uint32_t tile_store_space
= 32 + 23 * cmd
->state
.pass
->attachment_count
;
1482 struct tu_cs sub_cs
;
1484 VkResult result
= tu_cs_begin_sub_stream(cmd
->device
, &cmd
->sub_cs
,
1485 tile_store_space
, &sub_cs
);
1486 if (result
!= VK_SUCCESS
) {
1487 cmd
->record_result
= result
;
1491 /* emit to tile-store sub_cs */
1492 tu6_emit_tile_store(cmd
, &sub_cs
);
1494 cmd
->state
.tile_store_ib
= tu_cs_end_sub_stream(&cmd
->sub_cs
, &sub_cs
);
1498 tu_cmd_update_tiling_config(struct tu_cmd_buffer
*cmd
,
1499 const VkRect2D
*render_area
)
1501 const struct tu_device
*dev
= cmd
->device
;
1502 struct tu_tiling_config
*tiling
= &cmd
->state
.tiling_config
;
1504 tiling
->render_area
= *render_area
;
1506 tu_tiling_config_update_tile_layout(tiling
, dev
, cmd
->state
.pass
->gmem_pixels
);
1507 tu_tiling_config_update_pipe_layout(tiling
, dev
);
1508 tu_tiling_config_update_pipes(tiling
, dev
);
1511 const struct tu_dynamic_state default_dynamic_state
= {
1527 .blend_constants
= { 0.0f
, 0.0f
, 0.0f
, 0.0f
},
1533 .stencil_compare_mask
=
1538 .stencil_write_mask
=
1543 .stencil_reference
=
1550 static void UNUSED
/* FINISHME */
1551 tu_bind_dynamic_state(struct tu_cmd_buffer
*cmd_buffer
,
1552 const struct tu_dynamic_state
*src
)
1554 struct tu_dynamic_state
*dest
= &cmd_buffer
->state
.dynamic
;
1555 uint32_t copy_mask
= src
->mask
;
1556 uint32_t dest_mask
= 0;
1558 tu_use_args(cmd_buffer
); /* FINISHME */
1560 /* Make sure to copy the number of viewports/scissors because they can
1561 * only be specified at pipeline creation time.
1563 dest
->viewport
.count
= src
->viewport
.count
;
1564 dest
->scissor
.count
= src
->scissor
.count
;
1565 dest
->discard_rectangle
.count
= src
->discard_rectangle
.count
;
1567 if (copy_mask
& TU_DYNAMIC_VIEWPORT
) {
1568 if (memcmp(&dest
->viewport
.viewports
, &src
->viewport
.viewports
,
1569 src
->viewport
.count
* sizeof(VkViewport
))) {
1570 typed_memcpy(dest
->viewport
.viewports
, src
->viewport
.viewports
,
1571 src
->viewport
.count
);
1572 dest_mask
|= TU_DYNAMIC_VIEWPORT
;
1576 if (copy_mask
& TU_DYNAMIC_SCISSOR
) {
1577 if (memcmp(&dest
->scissor
.scissors
, &src
->scissor
.scissors
,
1578 src
->scissor
.count
* sizeof(VkRect2D
))) {
1579 typed_memcpy(dest
->scissor
.scissors
, src
->scissor
.scissors
,
1580 src
->scissor
.count
);
1581 dest_mask
|= TU_DYNAMIC_SCISSOR
;
1585 if (copy_mask
& TU_DYNAMIC_LINE_WIDTH
) {
1586 if (dest
->line_width
!= src
->line_width
) {
1587 dest
->line_width
= src
->line_width
;
1588 dest_mask
|= TU_DYNAMIC_LINE_WIDTH
;
1592 if (copy_mask
& TU_DYNAMIC_DEPTH_BIAS
) {
1593 if (memcmp(&dest
->depth_bias
, &src
->depth_bias
,
1594 sizeof(src
->depth_bias
))) {
1595 dest
->depth_bias
= src
->depth_bias
;
1596 dest_mask
|= TU_DYNAMIC_DEPTH_BIAS
;
1600 if (copy_mask
& TU_DYNAMIC_BLEND_CONSTANTS
) {
1601 if (memcmp(&dest
->blend_constants
, &src
->blend_constants
,
1602 sizeof(src
->blend_constants
))) {
1603 typed_memcpy(dest
->blend_constants
, src
->blend_constants
, 4);
1604 dest_mask
|= TU_DYNAMIC_BLEND_CONSTANTS
;
1608 if (copy_mask
& TU_DYNAMIC_DEPTH_BOUNDS
) {
1609 if (memcmp(&dest
->depth_bounds
, &src
->depth_bounds
,
1610 sizeof(src
->depth_bounds
))) {
1611 dest
->depth_bounds
= src
->depth_bounds
;
1612 dest_mask
|= TU_DYNAMIC_DEPTH_BOUNDS
;
1616 if (copy_mask
& TU_DYNAMIC_STENCIL_COMPARE_MASK
) {
1617 if (memcmp(&dest
->stencil_compare_mask
, &src
->stencil_compare_mask
,
1618 sizeof(src
->stencil_compare_mask
))) {
1619 dest
->stencil_compare_mask
= src
->stencil_compare_mask
;
1620 dest_mask
|= TU_DYNAMIC_STENCIL_COMPARE_MASK
;
1624 if (copy_mask
& TU_DYNAMIC_STENCIL_WRITE_MASK
) {
1625 if (memcmp(&dest
->stencil_write_mask
, &src
->stencil_write_mask
,
1626 sizeof(src
->stencil_write_mask
))) {
1627 dest
->stencil_write_mask
= src
->stencil_write_mask
;
1628 dest_mask
|= TU_DYNAMIC_STENCIL_WRITE_MASK
;
1632 if (copy_mask
& TU_DYNAMIC_STENCIL_REFERENCE
) {
1633 if (memcmp(&dest
->stencil_reference
, &src
->stencil_reference
,
1634 sizeof(src
->stencil_reference
))) {
1635 dest
->stencil_reference
= src
->stencil_reference
;
1636 dest_mask
|= TU_DYNAMIC_STENCIL_REFERENCE
;
1640 if (copy_mask
& TU_DYNAMIC_DISCARD_RECTANGLE
) {
1641 if (memcmp(&dest
->discard_rectangle
.rectangles
,
1642 &src
->discard_rectangle
.rectangles
,
1643 src
->discard_rectangle
.count
* sizeof(VkRect2D
))) {
1644 typed_memcpy(dest
->discard_rectangle
.rectangles
,
1645 src
->discard_rectangle
.rectangles
,
1646 src
->discard_rectangle
.count
);
1647 dest_mask
|= TU_DYNAMIC_DISCARD_RECTANGLE
;
1653 tu_create_cmd_buffer(struct tu_device
*device
,
1654 struct tu_cmd_pool
*pool
,
1655 VkCommandBufferLevel level
,
1656 VkCommandBuffer
*pCommandBuffer
)
1658 struct tu_cmd_buffer
*cmd_buffer
;
1659 cmd_buffer
= vk_zalloc(&pool
->alloc
, sizeof(*cmd_buffer
), 8,
1660 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1661 if (cmd_buffer
== NULL
)
1662 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
1664 cmd_buffer
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
1665 cmd_buffer
->device
= device
;
1666 cmd_buffer
->pool
= pool
;
1667 cmd_buffer
->level
= level
;
1670 list_addtail(&cmd_buffer
->pool_link
, &pool
->cmd_buffers
);
1671 cmd_buffer
->queue_family_index
= pool
->queue_family_index
;
1674 /* Init the pool_link so we can safely call list_del when we destroy
1675 * the command buffer
1677 list_inithead(&cmd_buffer
->pool_link
);
1678 cmd_buffer
->queue_family_index
= TU_QUEUE_GENERAL
;
1681 tu_bo_list_init(&cmd_buffer
->bo_list
);
1682 tu_cs_init(&cmd_buffer
->cs
, TU_CS_MODE_GROW
, 4096);
1683 tu_cs_init(&cmd_buffer
->draw_cs
, TU_CS_MODE_GROW
, 4096);
1684 tu_cs_init(&cmd_buffer
->sub_cs
, TU_CS_MODE_SUB_STREAM
, 2048);
1686 *pCommandBuffer
= tu_cmd_buffer_to_handle(cmd_buffer
);
1688 list_inithead(&cmd_buffer
->upload
.list
);
1690 cmd_buffer
->marker_reg
= REG_A6XX_CP_SCRATCH_REG(
1691 cmd_buffer
->level
== VK_COMMAND_BUFFER_LEVEL_PRIMARY
? 7 : 6);
1693 VkResult result
= tu_bo_init_new(device
, &cmd_buffer
->scratch_bo
, 0x1000);
1694 if (result
!= VK_SUCCESS
)
1697 #define VSC_DATA_SIZE(pitch) ((pitch) * 32 + 0x100) /* extra size to store VSC_SIZE */
1698 #define VSC_DATA2_SIZE(pitch) ((pitch) * 32)
1700 /* TODO: resize on overflow or compute a max size from # of vertices in renderpass?? */
1701 cmd_buffer
->vsc_data_pitch
= 0x440 * 4;
1702 cmd_buffer
->vsc_data2_pitch
= 0x1040 * 4;
1704 result
= tu_bo_init_new(device
, &cmd_buffer
->vsc_data
, VSC_DATA_SIZE(cmd_buffer
->vsc_data_pitch
));
1705 if (result
!= VK_SUCCESS
)
1708 result
= tu_bo_init_new(device
, &cmd_buffer
->vsc_data2
, VSC_DATA2_SIZE(cmd_buffer
->vsc_data2_pitch
));
1709 if (result
!= VK_SUCCESS
)
1710 goto fail_vsc_data2
;
1715 tu_bo_finish(cmd_buffer
->device
, &cmd_buffer
->vsc_data
);
1717 tu_bo_finish(cmd_buffer
->device
, &cmd_buffer
->scratch_bo
);
1722 tu_cmd_buffer_destroy(struct tu_cmd_buffer
*cmd_buffer
)
1724 tu_bo_finish(cmd_buffer
->device
, &cmd_buffer
->scratch_bo
);
1725 tu_bo_finish(cmd_buffer
->device
, &cmd_buffer
->vsc_data
);
1726 tu_bo_finish(cmd_buffer
->device
, &cmd_buffer
->vsc_data2
);
1728 list_del(&cmd_buffer
->pool_link
);
1730 for (unsigned i
= 0; i
< VK_PIPELINE_BIND_POINT_RANGE_SIZE
; i
++)
1731 free(cmd_buffer
->descriptors
[i
].push_set
.set
.mapped_ptr
);
1733 tu_cs_finish(cmd_buffer
->device
, &cmd_buffer
->cs
);
1734 tu_cs_finish(cmd_buffer
->device
, &cmd_buffer
->draw_cs
);
1735 tu_cs_finish(cmd_buffer
->device
, &cmd_buffer
->sub_cs
);
1737 tu_bo_list_destroy(&cmd_buffer
->bo_list
);
1738 vk_free(&cmd_buffer
->pool
->alloc
, cmd_buffer
);
1742 tu_reset_cmd_buffer(struct tu_cmd_buffer
*cmd_buffer
)
1744 cmd_buffer
->wait_for_idle
= true;
1746 cmd_buffer
->record_result
= VK_SUCCESS
;
1748 tu_bo_list_reset(&cmd_buffer
->bo_list
);
1749 tu_cs_reset(cmd_buffer
->device
, &cmd_buffer
->cs
);
1750 tu_cs_reset(cmd_buffer
->device
, &cmd_buffer
->draw_cs
);
1751 tu_cs_reset(cmd_buffer
->device
, &cmd_buffer
->sub_cs
);
1753 for (unsigned i
= 0; i
< VK_PIPELINE_BIND_POINT_RANGE_SIZE
; i
++) {
1754 cmd_buffer
->descriptors
[i
].dirty
= 0;
1755 cmd_buffer
->descriptors
[i
].valid
= 0;
1756 cmd_buffer
->descriptors
[i
].push_dirty
= false;
1759 cmd_buffer
->status
= TU_CMD_BUFFER_STATUS_INITIAL
;
1761 return cmd_buffer
->record_result
;
1765 tu_AllocateCommandBuffers(VkDevice _device
,
1766 const VkCommandBufferAllocateInfo
*pAllocateInfo
,
1767 VkCommandBuffer
*pCommandBuffers
)
1769 TU_FROM_HANDLE(tu_device
, device
, _device
);
1770 TU_FROM_HANDLE(tu_cmd_pool
, pool
, pAllocateInfo
->commandPool
);
1772 VkResult result
= VK_SUCCESS
;
1775 for (i
= 0; i
< pAllocateInfo
->commandBufferCount
; i
++) {
1777 if (!list_is_empty(&pool
->free_cmd_buffers
)) {
1778 struct tu_cmd_buffer
*cmd_buffer
= list_first_entry(
1779 &pool
->free_cmd_buffers
, struct tu_cmd_buffer
, pool_link
);
1781 list_del(&cmd_buffer
->pool_link
);
1782 list_addtail(&cmd_buffer
->pool_link
, &pool
->cmd_buffers
);
1784 result
= tu_reset_cmd_buffer(cmd_buffer
);
1785 cmd_buffer
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
1786 cmd_buffer
->level
= pAllocateInfo
->level
;
1788 pCommandBuffers
[i
] = tu_cmd_buffer_to_handle(cmd_buffer
);
1790 result
= tu_create_cmd_buffer(device
, pool
, pAllocateInfo
->level
,
1791 &pCommandBuffers
[i
]);
1793 if (result
!= VK_SUCCESS
)
1797 if (result
!= VK_SUCCESS
) {
1798 tu_FreeCommandBuffers(_device
, pAllocateInfo
->commandPool
, i
,
1801 /* From the Vulkan 1.0.66 spec:
1803 * "vkAllocateCommandBuffers can be used to create multiple
1804 * command buffers. If the creation of any of those command
1805 * buffers fails, the implementation must destroy all
1806 * successfully created command buffer objects from this
1807 * command, set all entries of the pCommandBuffers array to
1808 * NULL and return the error."
1810 memset(pCommandBuffers
, 0,
1811 sizeof(*pCommandBuffers
) * pAllocateInfo
->commandBufferCount
);
1818 tu_FreeCommandBuffers(VkDevice device
,
1819 VkCommandPool commandPool
,
1820 uint32_t commandBufferCount
,
1821 const VkCommandBuffer
*pCommandBuffers
)
1823 for (uint32_t i
= 0; i
< commandBufferCount
; i
++) {
1824 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, pCommandBuffers
[i
]);
1827 if (cmd_buffer
->pool
) {
1828 list_del(&cmd_buffer
->pool_link
);
1829 list_addtail(&cmd_buffer
->pool_link
,
1830 &cmd_buffer
->pool
->free_cmd_buffers
);
1832 tu_cmd_buffer_destroy(cmd_buffer
);
1838 tu_ResetCommandBuffer(VkCommandBuffer commandBuffer
,
1839 VkCommandBufferResetFlags flags
)
1841 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
1842 return tu_reset_cmd_buffer(cmd_buffer
);
1846 tu_BeginCommandBuffer(VkCommandBuffer commandBuffer
,
1847 const VkCommandBufferBeginInfo
*pBeginInfo
)
1849 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
1850 VkResult result
= VK_SUCCESS
;
1852 if (cmd_buffer
->status
!= TU_CMD_BUFFER_STATUS_INITIAL
) {
1853 /* If the command buffer has already been resetted with
1854 * vkResetCommandBuffer, no need to do it again.
1856 result
= tu_reset_cmd_buffer(cmd_buffer
);
1857 if (result
!= VK_SUCCESS
)
1861 memset(&cmd_buffer
->state
, 0, sizeof(cmd_buffer
->state
));
1862 cmd_buffer
->usage_flags
= pBeginInfo
->flags
;
1864 tu_cs_begin(&cmd_buffer
->cs
);
1865 tu_cs_begin(&cmd_buffer
->draw_cs
);
1867 cmd_buffer
->marker_seqno
= 0;
1868 cmd_buffer
->scratch_seqno
= 0;
1870 /* setup initial configuration into command buffer */
1871 if (cmd_buffer
->level
== VK_COMMAND_BUFFER_LEVEL_PRIMARY
) {
1872 switch (cmd_buffer
->queue_family_index
) {
1873 case TU_QUEUE_GENERAL
:
1874 tu6_init_hw(cmd_buffer
, &cmd_buffer
->cs
);
1879 } else if (cmd_buffer
->level
== VK_COMMAND_BUFFER_LEVEL_SECONDARY
&&
1880 (pBeginInfo
->flags
& VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT
)) {
1881 assert(pBeginInfo
->pInheritanceInfo
);
1882 cmd_buffer
->state
.pass
= tu_render_pass_from_handle(pBeginInfo
->pInheritanceInfo
->renderPass
);
1883 cmd_buffer
->state
.subpass
= &cmd_buffer
->state
.pass
->subpasses
[pBeginInfo
->pInheritanceInfo
->subpass
];
1886 cmd_buffer
->status
= TU_CMD_BUFFER_STATUS_RECORDING
;
1892 tu_CmdBindVertexBuffers(VkCommandBuffer commandBuffer
,
1893 uint32_t firstBinding
,
1894 uint32_t bindingCount
,
1895 const VkBuffer
*pBuffers
,
1896 const VkDeviceSize
*pOffsets
)
1898 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
1900 assert(firstBinding
+ bindingCount
<= MAX_VBS
);
1902 for (uint32_t i
= 0; i
< bindingCount
; i
++) {
1903 cmd
->state
.vb
.buffers
[firstBinding
+ i
] =
1904 tu_buffer_from_handle(pBuffers
[i
]);
1905 cmd
->state
.vb
.offsets
[firstBinding
+ i
] = pOffsets
[i
];
1908 /* VB states depend on VkPipelineVertexInputStateCreateInfo */
1909 cmd
->state
.dirty
|= TU_CMD_DIRTY_VERTEX_BUFFERS
;
1913 tu_CmdBindIndexBuffer(VkCommandBuffer commandBuffer
,
1915 VkDeviceSize offset
,
1916 VkIndexType indexType
)
1918 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
1919 TU_FROM_HANDLE(tu_buffer
, buf
, buffer
);
1921 /* initialize/update the restart index */
1922 if (!cmd
->state
.index_buffer
|| cmd
->state
.index_type
!= indexType
) {
1923 struct tu_cs
*draw_cs
= &cmd
->draw_cs
;
1924 VkResult result
= tu_cs_reserve_space(cmd
->device
, draw_cs
, 2);
1925 if (result
!= VK_SUCCESS
) {
1926 cmd
->record_result
= result
;
1930 tu6_emit_restart_index(
1931 draw_cs
, indexType
== VK_INDEX_TYPE_UINT32
? 0xffffffff : 0xffff);
1933 tu_cs_sanity_check(draw_cs
);
1937 if (cmd
->state
.index_buffer
!= buf
)
1938 tu_bo_list_add(&cmd
->bo_list
, buf
->bo
, MSM_SUBMIT_BO_READ
);
1940 cmd
->state
.index_buffer
= buf
;
1941 cmd
->state
.index_offset
= offset
;
1942 cmd
->state
.index_type
= indexType
;
1946 tu_CmdBindDescriptorSets(VkCommandBuffer commandBuffer
,
1947 VkPipelineBindPoint pipelineBindPoint
,
1948 VkPipelineLayout _layout
,
1950 uint32_t descriptorSetCount
,
1951 const VkDescriptorSet
*pDescriptorSets
,
1952 uint32_t dynamicOffsetCount
,
1953 const uint32_t *pDynamicOffsets
)
1955 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
1956 TU_FROM_HANDLE(tu_pipeline_layout
, layout
, _layout
);
1957 unsigned dyn_idx
= 0;
1959 struct tu_descriptor_state
*descriptors_state
=
1960 tu_get_descriptors_state(cmd_buffer
, pipelineBindPoint
);
1962 for (unsigned i
= 0; i
< descriptorSetCount
; ++i
) {
1963 unsigned idx
= i
+ firstSet
;
1964 TU_FROM_HANDLE(tu_descriptor_set
, set
, pDescriptorSets
[i
]);
1966 descriptors_state
->sets
[idx
] = set
;
1967 descriptors_state
->valid
|= (1u << idx
);
1969 for(unsigned j
= 0; j
< set
->layout
->dynamic_offset_count
; ++j
, ++dyn_idx
) {
1970 unsigned idx
= j
+ layout
->set
[i
+ firstSet
].dynamic_offset_start
;
1971 assert(dyn_idx
< dynamicOffsetCount
);
1973 descriptors_state
->dynamic_buffers
[idx
] =
1974 set
->dynamic_descriptors
[j
].va
+ pDynamicOffsets
[dyn_idx
];
1978 cmd_buffer
->state
.dirty
|= TU_CMD_DIRTY_DESCRIPTOR_SETS
;
1982 tu_CmdPushConstants(VkCommandBuffer commandBuffer
,
1983 VkPipelineLayout layout
,
1984 VkShaderStageFlags stageFlags
,
1987 const void *pValues
)
1989 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
1990 memcpy((void*) cmd
->push_constants
+ offset
, pValues
, size
);
1991 cmd
->state
.dirty
|= TU_CMD_DIRTY_PUSH_CONSTANTS
;
1995 tu_EndCommandBuffer(VkCommandBuffer commandBuffer
)
1997 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
1999 if (cmd_buffer
->scratch_seqno
) {
2000 tu_bo_list_add(&cmd_buffer
->bo_list
, &cmd_buffer
->scratch_bo
,
2001 MSM_SUBMIT_BO_WRITE
);
2004 if (cmd_buffer
->use_vsc_data
) {
2005 tu_bo_list_add(&cmd_buffer
->bo_list
, &cmd_buffer
->vsc_data
,
2006 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_WRITE
);
2007 tu_bo_list_add(&cmd_buffer
->bo_list
, &cmd_buffer
->vsc_data2
,
2008 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_WRITE
);
2011 for (uint32_t i
= 0; i
< cmd_buffer
->draw_cs
.bo_count
; i
++) {
2012 tu_bo_list_add(&cmd_buffer
->bo_list
, cmd_buffer
->draw_cs
.bos
[i
],
2013 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_DUMP
);
2016 for (uint32_t i
= 0; i
< cmd_buffer
->sub_cs
.bo_count
; i
++) {
2017 tu_bo_list_add(&cmd_buffer
->bo_list
, cmd_buffer
->sub_cs
.bos
[i
],
2018 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_DUMP
);
2021 tu_cs_end(&cmd_buffer
->cs
);
2022 tu_cs_end(&cmd_buffer
->draw_cs
);
2024 cmd_buffer
->status
= TU_CMD_BUFFER_STATUS_EXECUTABLE
;
2026 return cmd_buffer
->record_result
;
2030 tu_CmdBindPipeline(VkCommandBuffer commandBuffer
,
2031 VkPipelineBindPoint pipelineBindPoint
,
2032 VkPipeline _pipeline
)
2034 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2035 TU_FROM_HANDLE(tu_pipeline
, pipeline
, _pipeline
);
2037 switch (pipelineBindPoint
) {
2038 case VK_PIPELINE_BIND_POINT_GRAPHICS
:
2039 cmd
->state
.pipeline
= pipeline
;
2040 cmd
->state
.dirty
|= TU_CMD_DIRTY_PIPELINE
;
2042 case VK_PIPELINE_BIND_POINT_COMPUTE
:
2043 cmd
->state
.compute_pipeline
= pipeline
;
2044 cmd
->state
.dirty
|= TU_CMD_DIRTY_COMPUTE_PIPELINE
;
2047 unreachable("unrecognized pipeline bind point");
2051 tu_bo_list_add(&cmd
->bo_list
, &pipeline
->program
.binary_bo
,
2052 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_DUMP
);
2053 for (uint32_t i
= 0; i
< pipeline
->cs
.bo_count
; i
++) {
2054 tu_bo_list_add(&cmd
->bo_list
, pipeline
->cs
.bos
[i
],
2055 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_DUMP
);
2060 tu_CmdSetViewport(VkCommandBuffer commandBuffer
,
2061 uint32_t firstViewport
,
2062 uint32_t viewportCount
,
2063 const VkViewport
*pViewports
)
2065 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2066 struct tu_cs
*draw_cs
= &cmd
->draw_cs
;
2068 VkResult result
= tu_cs_reserve_space(cmd
->device
, draw_cs
, 12);
2069 if (result
!= VK_SUCCESS
) {
2070 cmd
->record_result
= result
;
2074 assert(firstViewport
== 0 && viewportCount
== 1);
2075 tu6_emit_viewport(draw_cs
, pViewports
);
2077 tu_cs_sanity_check(draw_cs
);
2081 tu_CmdSetScissor(VkCommandBuffer commandBuffer
,
2082 uint32_t firstScissor
,
2083 uint32_t scissorCount
,
2084 const VkRect2D
*pScissors
)
2086 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2087 struct tu_cs
*draw_cs
= &cmd
->draw_cs
;
2089 VkResult result
= tu_cs_reserve_space(cmd
->device
, draw_cs
, 3);
2090 if (result
!= VK_SUCCESS
) {
2091 cmd
->record_result
= result
;
2095 assert(firstScissor
== 0 && scissorCount
== 1);
2096 tu6_emit_scissor(draw_cs
, pScissors
);
2098 tu_cs_sanity_check(draw_cs
);
2102 tu_CmdSetLineWidth(VkCommandBuffer commandBuffer
, float lineWidth
)
2104 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2106 cmd
->state
.dynamic
.line_width
= lineWidth
;
2108 /* line width depends on VkPipelineRasterizationStateCreateInfo */
2109 cmd
->state
.dirty
|= TU_CMD_DIRTY_DYNAMIC_LINE_WIDTH
;
2113 tu_CmdSetDepthBias(VkCommandBuffer commandBuffer
,
2114 float depthBiasConstantFactor
,
2115 float depthBiasClamp
,
2116 float depthBiasSlopeFactor
)
2118 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2119 struct tu_cs
*draw_cs
= &cmd
->draw_cs
;
2121 VkResult result
= tu_cs_reserve_space(cmd
->device
, draw_cs
, 4);
2122 if (result
!= VK_SUCCESS
) {
2123 cmd
->record_result
= result
;
2127 tu6_emit_depth_bias(draw_cs
, depthBiasConstantFactor
, depthBiasClamp
,
2128 depthBiasSlopeFactor
);
2130 tu_cs_sanity_check(draw_cs
);
2134 tu_CmdSetBlendConstants(VkCommandBuffer commandBuffer
,
2135 const float blendConstants
[4])
2137 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2138 struct tu_cs
*draw_cs
= &cmd
->draw_cs
;
2140 VkResult result
= tu_cs_reserve_space(cmd
->device
, draw_cs
, 5);
2141 if (result
!= VK_SUCCESS
) {
2142 cmd
->record_result
= result
;
2146 tu6_emit_blend_constants(draw_cs
, blendConstants
);
2148 tu_cs_sanity_check(draw_cs
);
2152 tu_CmdSetDepthBounds(VkCommandBuffer commandBuffer
,
2153 float minDepthBounds
,
2154 float maxDepthBounds
)
2159 tu_CmdSetStencilCompareMask(VkCommandBuffer commandBuffer
,
2160 VkStencilFaceFlags faceMask
,
2161 uint32_t compareMask
)
2163 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2165 if (faceMask
& VK_STENCIL_FACE_FRONT_BIT
)
2166 cmd
->state
.dynamic
.stencil_compare_mask
.front
= compareMask
;
2167 if (faceMask
& VK_STENCIL_FACE_BACK_BIT
)
2168 cmd
->state
.dynamic
.stencil_compare_mask
.back
= compareMask
;
2170 /* the front/back compare masks must be updated together */
2171 cmd
->state
.dirty
|= TU_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK
;
2175 tu_CmdSetStencilWriteMask(VkCommandBuffer commandBuffer
,
2176 VkStencilFaceFlags faceMask
,
2179 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2181 if (faceMask
& VK_STENCIL_FACE_FRONT_BIT
)
2182 cmd
->state
.dynamic
.stencil_write_mask
.front
= writeMask
;
2183 if (faceMask
& VK_STENCIL_FACE_BACK_BIT
)
2184 cmd
->state
.dynamic
.stencil_write_mask
.back
= writeMask
;
2186 /* the front/back write masks must be updated together */
2187 cmd
->state
.dirty
|= TU_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK
;
2191 tu_CmdSetStencilReference(VkCommandBuffer commandBuffer
,
2192 VkStencilFaceFlags faceMask
,
2195 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2197 if (faceMask
& VK_STENCIL_FACE_FRONT_BIT
)
2198 cmd
->state
.dynamic
.stencil_reference
.front
= reference
;
2199 if (faceMask
& VK_STENCIL_FACE_BACK_BIT
)
2200 cmd
->state
.dynamic
.stencil_reference
.back
= reference
;
2202 /* the front/back references must be updated together */
2203 cmd
->state
.dirty
|= TU_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE
;
2207 tu_CmdExecuteCommands(VkCommandBuffer commandBuffer
,
2208 uint32_t commandBufferCount
,
2209 const VkCommandBuffer
*pCmdBuffers
)
2211 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2214 assert(commandBufferCount
> 0);
2216 for (uint32_t i
= 0; i
< commandBufferCount
; i
++) {
2217 TU_FROM_HANDLE(tu_cmd_buffer
, secondary
, pCmdBuffers
[i
]);
2219 result
= tu_bo_list_merge(&cmd
->bo_list
, &secondary
->bo_list
);
2220 if (result
!= VK_SUCCESS
) {
2221 cmd
->record_result
= result
;
2225 result
= tu_cs_add_entries(&cmd
->draw_cs
, &secondary
->draw_cs
);
2226 if (result
!= VK_SUCCESS
) {
2227 cmd
->record_result
= result
;
2231 cmd
->state
.dirty
= ~0u; /* TODO: set dirty only what needs to be */
2235 tu_CreateCommandPool(VkDevice _device
,
2236 const VkCommandPoolCreateInfo
*pCreateInfo
,
2237 const VkAllocationCallbacks
*pAllocator
,
2238 VkCommandPool
*pCmdPool
)
2240 TU_FROM_HANDLE(tu_device
, device
, _device
);
2241 struct tu_cmd_pool
*pool
;
2243 pool
= vk_alloc2(&device
->alloc
, pAllocator
, sizeof(*pool
), 8,
2244 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
2246 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
2249 pool
->alloc
= *pAllocator
;
2251 pool
->alloc
= device
->alloc
;
2253 list_inithead(&pool
->cmd_buffers
);
2254 list_inithead(&pool
->free_cmd_buffers
);
2256 pool
->queue_family_index
= pCreateInfo
->queueFamilyIndex
;
2258 *pCmdPool
= tu_cmd_pool_to_handle(pool
);
2264 tu_DestroyCommandPool(VkDevice _device
,
2265 VkCommandPool commandPool
,
2266 const VkAllocationCallbacks
*pAllocator
)
2268 TU_FROM_HANDLE(tu_device
, device
, _device
);
2269 TU_FROM_HANDLE(tu_cmd_pool
, pool
, commandPool
);
2274 list_for_each_entry_safe(struct tu_cmd_buffer
, cmd_buffer
,
2275 &pool
->cmd_buffers
, pool_link
)
2277 tu_cmd_buffer_destroy(cmd_buffer
);
2280 list_for_each_entry_safe(struct tu_cmd_buffer
, cmd_buffer
,
2281 &pool
->free_cmd_buffers
, pool_link
)
2283 tu_cmd_buffer_destroy(cmd_buffer
);
2286 vk_free2(&device
->alloc
, pAllocator
, pool
);
2290 tu_ResetCommandPool(VkDevice device
,
2291 VkCommandPool commandPool
,
2292 VkCommandPoolResetFlags flags
)
2294 TU_FROM_HANDLE(tu_cmd_pool
, pool
, commandPool
);
2297 list_for_each_entry(struct tu_cmd_buffer
, cmd_buffer
, &pool
->cmd_buffers
,
2300 result
= tu_reset_cmd_buffer(cmd_buffer
);
2301 if (result
!= VK_SUCCESS
)
2309 tu_TrimCommandPool(VkDevice device
,
2310 VkCommandPool commandPool
,
2311 VkCommandPoolTrimFlags flags
)
2313 TU_FROM_HANDLE(tu_cmd_pool
, pool
, commandPool
);
2318 list_for_each_entry_safe(struct tu_cmd_buffer
, cmd_buffer
,
2319 &pool
->free_cmd_buffers
, pool_link
)
2321 tu_cmd_buffer_destroy(cmd_buffer
);
2326 tu_CmdBeginRenderPass(VkCommandBuffer commandBuffer
,
2327 const VkRenderPassBeginInfo
*pRenderPassBegin
,
2328 VkSubpassContents contents
)
2330 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2331 TU_FROM_HANDLE(tu_render_pass
, pass
, pRenderPassBegin
->renderPass
);
2332 TU_FROM_HANDLE(tu_framebuffer
, fb
, pRenderPassBegin
->framebuffer
);
2334 cmd
->state
.pass
= pass
;
2335 cmd
->state
.subpass
= pass
->subpasses
;
2336 cmd
->state
.framebuffer
= fb
;
2338 tu_cmd_update_tiling_config(cmd
, &pRenderPassBegin
->renderArea
);
2339 tu_cmd_prepare_tile_load_ib(cmd
, pRenderPassBegin
);
2340 tu_cmd_prepare_tile_store_ib(cmd
);
2342 /* note: use_hw_binning only checks tiling config */
2343 if (use_hw_binning(cmd
))
2344 cmd
->use_vsc_data
= true;
2346 for (uint32_t i
= 0; i
< fb
->attachment_count
; ++i
) {
2347 const struct tu_image_view
*iview
= fb
->attachments
[i
].attachment
;
2348 tu_bo_list_add(&cmd
->bo_list
, iview
->image
->bo
,
2349 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_WRITE
);
2354 tu_CmdBeginRenderPass2(VkCommandBuffer commandBuffer
,
2355 const VkRenderPassBeginInfo
*pRenderPassBeginInfo
,
2356 const VkSubpassBeginInfoKHR
*pSubpassBeginInfo
)
2358 tu_CmdBeginRenderPass(commandBuffer
, pRenderPassBeginInfo
,
2359 pSubpassBeginInfo
->contents
);
2363 tu_CmdNextSubpass(VkCommandBuffer commandBuffer
, VkSubpassContents contents
)
2365 TU_FROM_HANDLE(tu_cmd_buffer
, cmd
, commandBuffer
);
2366 const struct tu_render_pass
*pass
= cmd
->state
.pass
;
2367 struct tu_cs
*cs
= &cmd
->draw_cs
;
2369 VkResult result
= tu_cs_reserve_space(cmd
->device
, cs
, 1024);
2370 if (result
!= VK_SUCCESS
) {
2371 cmd
->record_result
= result
;
2375 const struct tu_subpass
*subpass
= cmd
->state
.subpass
++;
2377 * if msaa samples change between subpasses,
2378 * attachment store is broken for some attachments
2380 if (subpass
->resolve_attachments
) {
2381 tu6_emit_blit_scissor(cmd
, cs
, true);
2382 for (unsigned i
= 0; i
< subpass
->color_count
; i
++) {
2383 uint32_t a
= subpass
->resolve_attachments
[i
].attachment
;
2384 if (a
!= VK_ATTACHMENT_UNUSED
) {
2385 tu6_emit_store_attachment(cmd
, cs
, a
,
2386 subpass
->color_attachments
[i
].attachment
);
2391 /* invalidate because reading input attachments will cache GMEM and
2392 * the cache isn''t updated when GMEM is written
2393 * TODO: is there a no-cache bit for textures?
2395 if (cmd
->state
.subpass
->input_count
)
2396 tu6_emit_event_write(cmd
, cs
, CACHE_INVALIDATE
, false);
2398 /* emit mrt/zs/msaa state for the subpass that is starting */
2399 tu6_emit_zs(cmd
, cmd
->state
.subpass
, cs
);
2400 tu6_emit_mrt(cmd
, cmd
->state
.subpass
, cs
);
2401 tu6_emit_msaa(cmd
, cmd
->state
.subpass
, cs
);
2404 * since we don't know how to do GMEM->GMEM resolve,
2405 * resolve attachments are resolved to memory then loaded to GMEM again if needed
2407 if (subpass
->resolve_attachments
) {
2408 for (unsigned i
= 0; i
< subpass
->color_count
; i
++) {
2409 uint32_t a
= subpass
->resolve_attachments
[i
].attachment
;
2410 const struct tu_image_view
*iview
=
2411 cmd
->state
.framebuffer
->attachments
[a
].attachment
;
2412 if (a
!= VK_ATTACHMENT_UNUSED
&& pass
->attachments
[a
].gmem_offset
>= 0) {
2413 tu_finishme("missing GMEM->GMEM resolve, performance will suffer\n");
2414 tu6_emit_blit_info(cmd
, cs
, iview
, pass
->attachments
[a
].gmem_offset
, false);
2415 tu6_emit_blit(cmd
, cs
);
2422 tu_CmdNextSubpass2(VkCommandBuffer commandBuffer
,
2423 const VkSubpassBeginInfoKHR
*pSubpassBeginInfo
,
2424 const VkSubpassEndInfoKHR
*pSubpassEndInfo
)
2426 tu_CmdNextSubpass(commandBuffer
, pSubpassBeginInfo
->contents
);
2432 * Number of vertices.
2437 * Index of the first vertex.
2439 int32_t vertex_offset
;
2442 * First instance id.
2444 uint32_t first_instance
;
2447 * Number of instances.
2449 uint32_t instance_count
;
2452 * First index (indexed draws only).
2454 uint32_t first_index
;
2457 * Whether it's an indexed draw.
2462 * Indirect draw parameters resource.
2464 struct tu_buffer
*indirect
;
2465 uint64_t indirect_offset
;
2469 * Draw count parameters resource.
2471 struct tu_buffer
*count_buffer
;
2472 uint64_t count_buffer_offset
;
2475 #define ENABLE_ALL (CP_SET_DRAW_STATE__0_BINNING | CP_SET_DRAW_STATE__0_GMEM | CP_SET_DRAW_STATE__0_SYSMEM)
2476 #define ENABLE_DRAW (CP_SET_DRAW_STATE__0_GMEM | CP_SET_DRAW_STATE__0_SYSMEM)
2478 enum tu_draw_state_group_id
2480 TU_DRAW_STATE_PROGRAM
,
2481 TU_DRAW_STATE_PROGRAM_BINNING
,
2483 TU_DRAW_STATE_VI_BINNING
,
2487 TU_DRAW_STATE_BLEND
,
2488 TU_DRAW_STATE_VS_CONST
,
2489 TU_DRAW_STATE_FS_CONST
,
2490 TU_DRAW_STATE_VS_TEX
,
2491 TU_DRAW_STATE_FS_TEX
,
2492 TU_DRAW_STATE_FS_IBO
,
2493 TU_DRAW_STATE_VS_PARAMS
,
2495 TU_DRAW_STATE_COUNT
,
2498 struct tu_draw_state_group
2500 enum tu_draw_state_group_id id
;
2501 uint32_t enable_mask
;
2502 struct tu_cs_entry ib
;
2505 const static struct tu_sampler
*
2506 sampler_ptr(struct tu_descriptor_state
*descriptors_state
,
2507 const struct tu_descriptor_map
*map
, unsigned i
,
2508 unsigned array_index
)
2510 assert(descriptors_state
->valid
& (1 << map
->set
[i
]));
2512 struct tu_descriptor_set
*set
= descriptors_state
->sets
[map
->set
[i
]];
2513 assert(map
->binding
[i
] < set
->layout
->binding_count
);
2515 const struct tu_descriptor_set_binding_layout
*layout
=
2516 &set
->layout
->binding
[map
->binding
[i
]];
2518 if (layout
->immutable_samplers_offset
) {
2519 const struct tu_sampler
*immutable_samplers
=
2520 tu_immutable_samplers(set
->layout
, layout
);
2522 return &immutable_samplers
[array_index
];
2525 switch (layout
->type
) {
2526 case VK_DESCRIPTOR_TYPE_SAMPLER
:
2527 return (struct tu_sampler
*) &set
->mapped_ptr
[layout
->offset
/ 4];
2528 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
2529 return (struct tu_sampler
*) &set
->mapped_ptr
[layout
->offset
/ 4 + A6XX_TEX_CONST_DWORDS
+
2531 (A6XX_TEX_CONST_DWORDS
+
2532 sizeof(struct tu_sampler
) / 4)];
2534 unreachable("unimplemented descriptor type");
2540 write_tex_const(struct tu_cmd_buffer
*cmd
,
2542 struct tu_descriptor_state
*descriptors_state
,
2543 const struct tu_descriptor_map
*map
,
2544 unsigned i
, unsigned array_index
)
2546 assert(descriptors_state
->valid
& (1 << map
->set
[i
]));
2548 struct tu_descriptor_set
*set
= descriptors_state
->sets
[map
->set
[i
]];
2549 assert(map
->binding
[i
] < set
->layout
->binding_count
);
2551 const struct tu_descriptor_set_binding_layout
*layout
=
2552 &set
->layout
->binding
[map
->binding
[i
]];
2554 switch (layout
->type
) {
2555 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
:
2556 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
:
2557 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER
:
2558 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT
:
2559 memcpy(dst
, &set
->mapped_ptr
[layout
->offset
/ 4 +
2560 array_index
* A6XX_TEX_CONST_DWORDS
],
2561 A6XX_TEX_CONST_DWORDS
* 4);
2563 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
2564 memcpy(dst
, &set
->mapped_ptr
[layout
->offset
/ 4 +
2566 (A6XX_TEX_CONST_DWORDS
+
2567 sizeof(struct tu_sampler
) / 4)],
2568 A6XX_TEX_CONST_DWORDS
* 4);
2571 unreachable("unimplemented descriptor type");
2575 if (layout
->type
== VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT
) {
2576 const struct tu_tiling_config
*tiling
= &cmd
->state
.tiling_config
;
2577 uint32_t a
= cmd
->state
.subpass
->input_attachments
[map
->value
[i
] +
2578 array_index
].attachment
;
2579 const struct tu_render_pass_attachment
*att
= &cmd
->state
.pass
->attachments
[a
];
2581 assert(att
->gmem_offset
>= 0);
2583 dst
[0] &= ~(A6XX_TEX_CONST_0_SWAP__MASK
| A6XX_TEX_CONST_0_TILE_MODE__MASK
);
2584 dst
[0] |= A6XX_TEX_CONST_0_TILE_MODE(TILE6_2
);
2585 dst
[2] &= ~(A6XX_TEX_CONST_2_TYPE__MASK
| A6XX_TEX_CONST_2_PITCH__MASK
);
2587 A6XX_TEX_CONST_2_TYPE(A6XX_TEX_2D
) |
2588 A6XX_TEX_CONST_2_PITCH(tiling
->tile0
.extent
.width
* att
->cpp
);
2590 dst
[4] = 0x100000 + att
->gmem_offset
;
2591 dst
[5] = A6XX_TEX_CONST_5_DEPTH(1);
2592 for (unsigned i
= 6; i
< A6XX_TEX_CONST_DWORDS
; i
++)
2595 if (cmd
->level
== VK_COMMAND_BUFFER_LEVEL_SECONDARY
)
2596 tu_finishme("patch input attachment pitch for secondary cmd buffer");
2601 write_image_ibo(struct tu_cmd_buffer
*cmd
,
2603 struct tu_descriptor_state
*descriptors_state
,
2604 const struct tu_descriptor_map
*map
,
2605 unsigned i
, unsigned array_index
)
2607 assert(descriptors_state
->valid
& (1 << map
->set
[i
]));
2609 struct tu_descriptor_set
*set
= descriptors_state
->sets
[map
->set
[i
]];
2610 assert(map
->binding
[i
] < set
->layout
->binding_count
);
2612 const struct tu_descriptor_set_binding_layout
*layout
=
2613 &set
->layout
->binding
[map
->binding
[i
]];
2615 assert(layout
->type
== VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
);
2617 memcpy(dst
, &set
->mapped_ptr
[layout
->offset
/ 4 +
2618 (array_index
* 2 + 1) * A6XX_TEX_CONST_DWORDS
],
2619 A6XX_TEX_CONST_DWORDS
* 4);
2623 buffer_ptr(struct tu_descriptor_state
*descriptors_state
,
2624 const struct tu_descriptor_map
*map
,
2625 unsigned i
, unsigned array_index
)
2627 assert(descriptors_state
->valid
& (1 << map
->set
[i
]));
2629 struct tu_descriptor_set
*set
= descriptors_state
->sets
[map
->set
[i
]];
2630 assert(map
->binding
[i
] < set
->layout
->binding_count
);
2632 const struct tu_descriptor_set_binding_layout
*layout
=
2633 &set
->layout
->binding
[map
->binding
[i
]];
2635 switch (layout
->type
) {
2636 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
2637 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
2638 return descriptors_state
->dynamic_buffers
[layout
->dynamic_offset_offset
+
2640 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
2641 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
2642 return (uint64_t) set
->mapped_ptr
[layout
->offset
/ 4 + array_index
* 2 + 1] << 32 |
2643 set
->mapped_ptr
[layout
->offset
/ 4 + array_index
* 2];
2645 unreachable("unimplemented descriptor type");
2650 static inline uint32_t
2651 tu6_stage2opcode(gl_shader_stage type
)
2654 case MESA_SHADER_VERTEX
:
2655 case MESA_SHADER_TESS_CTRL
:
2656 case MESA_SHADER_TESS_EVAL
:
2657 case MESA_SHADER_GEOMETRY
:
2658 return CP_LOAD_STATE6_GEOM
;
2659 case MESA_SHADER_FRAGMENT
:
2660 case MESA_SHADER_COMPUTE
:
2661 case MESA_SHADER_KERNEL
:
2662 return CP_LOAD_STATE6_FRAG
;
2664 unreachable("bad shader type");
2668 static inline enum a6xx_state_block
2669 tu6_stage2shadersb(gl_shader_stage type
)
2672 case MESA_SHADER_VERTEX
:
2673 return SB6_VS_SHADER
;
2674 case MESA_SHADER_FRAGMENT
:
2675 return SB6_FS_SHADER
;
2676 case MESA_SHADER_COMPUTE
:
2677 case MESA_SHADER_KERNEL
:
2678 return SB6_CS_SHADER
;
2680 unreachable("bad shader type");
2686 tu6_emit_user_consts(struct tu_cs
*cs
, const struct tu_pipeline
*pipeline
,
2687 struct tu_descriptor_state
*descriptors_state
,
2688 gl_shader_stage type
,
2689 uint32_t *push_constants
)
2691 const struct tu_program_descriptor_linkage
*link
=
2692 &pipeline
->program
.link
[type
];
2693 const struct ir3_ubo_analysis_state
*state
= &link
->ubo_state
;
2695 for (uint32_t i
= 0; i
< ARRAY_SIZE(state
->range
); i
++) {
2696 if (state
->range
[i
].start
< state
->range
[i
].end
) {
2697 uint32_t size
= state
->range
[i
].end
- state
->range
[i
].start
;
2698 uint32_t offset
= state
->range
[i
].start
;
2700 /* and even if the start of the const buffer is before
2701 * first_immediate, the end may not be:
2703 size
= MIN2(size
, (16 * link
->constlen
) - state
->range
[i
].offset
);
2708 /* things should be aligned to vec4: */
2709 debug_assert((state
->range
[i
].offset
% 16) == 0);
2710 debug_assert((size
% 16) == 0);
2711 debug_assert((offset
% 16) == 0);
2714 /* push constants */
2715 tu_cs_emit_pkt7(cs
, tu6_stage2opcode(type
), 3 + (size
/ 4));
2716 tu_cs_emit(cs
, CP_LOAD_STATE6_0_DST_OFF(state
->range
[i
].offset
/ 16) |
2717 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS
) |
2718 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT
) |
2719 CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type
)) |
2720 CP_LOAD_STATE6_0_NUM_UNIT(size
/ 16));
2723 for (unsigned i
= 0; i
< size
/ 4; i
++)
2724 tu_cs_emit(cs
, push_constants
[i
+ offset
/ 4]);
2728 /* Look through the UBO map to find our UBO index, and get the VA for
2732 uint32_t ubo_idx
= i
- 1;
2733 uint32_t ubo_map_base
= 0;
2734 for (int j
= 0; j
< link
->ubo_map
.num
; j
++) {
2735 if (ubo_idx
>= ubo_map_base
&&
2736 ubo_idx
< ubo_map_base
+ link
->ubo_map
.array_size
[j
]) {
2737 va
= buffer_ptr(descriptors_state
, &link
->ubo_map
, j
,
2738 ubo_idx
- ubo_map_base
);
2741 ubo_map_base
+= link
->ubo_map
.array_size
[j
];
2745 tu_cs_emit_pkt7(cs
, tu6_stage2opcode(type
), 3);
2746 tu_cs_emit(cs
, CP_LOAD_STATE6_0_DST_OFF(state
->range
[i
].offset
/ 16) |
2747 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS
) |
2748 CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT
) |
2749 CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type
)) |
2750 CP_LOAD_STATE6_0_NUM_UNIT(size
/ 16));
2751 tu_cs_emit_qw(cs
, va
+ offset
);
2757 tu6_emit_ubos(struct tu_cs
*cs
, const struct tu_pipeline
*pipeline
,
2758 struct tu_descriptor_state
*descriptors_state
,
2759 gl_shader_stage type
)
2761 const struct tu_program_descriptor_linkage
*link
=
2762 &pipeline
->program
.link
[type
];
2764 uint32_t num
= MIN2(link
->ubo_map
.num_desc
, link
->const_state
.num_ubos
);
2765 uint32_t anum
= align(num
, 2);
2770 tu_cs_emit_pkt7(cs
, tu6_stage2opcode(type
), 3 + (2 * anum
));
2771 tu_cs_emit(cs
, CP_LOAD_STATE6_0_DST_OFF(link
->const_state
.offsets
.ubo
) |
2772 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS
) |
2773 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT
) |
2774 CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type
)) |
2775 CP_LOAD_STATE6_0_NUM_UNIT(anum
/2));
2776 tu_cs_emit(cs
, CP_LOAD_STATE6_1_EXT_SRC_ADDR(0));
2777 tu_cs_emit(cs
, CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI(0));
2779 unsigned emitted
= 0;
2780 for (unsigned i
= 0; emitted
< num
&& i
< link
->ubo_map
.num
; i
++) {
2781 for (unsigned j
= 0; emitted
< num
&& j
< link
->ubo_map
.array_size
[i
]; j
++) {
2782 tu_cs_emit_qw(cs
, buffer_ptr(descriptors_state
, &link
->ubo_map
, i
, j
));
2787 for (; emitted
< anum
; emitted
++) {
2788 tu_cs_emit(cs
, 0xffffffff);
2789 tu_cs_emit(cs
, 0xffffffff);
2793 static struct tu_cs_entry
2794 tu6_emit_consts(struct tu_cmd_buffer
*cmd
,
2795 const struct tu_pipeline
*pipeline
,
2796 struct tu_descriptor_state
*descriptors_state
,
2797 gl_shader_stage type
)
2800 tu_cs_begin_sub_stream(cmd
->device
, &cmd
->sub_cs
, 512, &cs
); /* TODO: maximum size? */
2802 tu6_emit_user_consts(&cs
, pipeline
, descriptors_state
, type
, cmd
->push_constants
);
2803 tu6_emit_ubos(&cs
, pipeline
, descriptors_state
, type
);
2805 return tu_cs_end_sub_stream(&cmd
->sub_cs
, &cs
);
2809 tu6_emit_vs_params(struct tu_cmd_buffer
*cmd
,
2810 const struct tu_draw_info
*draw
,
2811 struct tu_cs_entry
*entry
)
2813 /* TODO: fill out more than just base instance */
2814 const struct tu_program_descriptor_linkage
*link
=
2815 &cmd
->state
.pipeline
->program
.link
[MESA_SHADER_VERTEX
];
2816 const struct ir3_const_state
*const_state
= &link
->const_state
;
2819 if (const_state
->offsets
.driver_param
>= link
->constlen
) {
2820 *entry
= (struct tu_cs_entry
) {};
2824 VkResult result
= tu_cs_begin_sub_stream(cmd
->device
, &cmd
->sub_cs
, 8, &cs
);
2825 if (result
!= VK_SUCCESS
)
2828 tu_cs_emit_pkt7(&cs
, CP_LOAD_STATE6_GEOM
, 3 + 4);
2829 tu_cs_emit(&cs
, CP_LOAD_STATE6_0_DST_OFF(const_state
->offsets
.driver_param
) |
2830 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS
) |
2831 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT
) |
2832 CP_LOAD_STATE6_0_STATE_BLOCK(SB6_VS_SHADER
) |
2833 CP_LOAD_STATE6_0_NUM_UNIT(1));
2837 STATIC_ASSERT(IR3_DP_INSTID_BASE
== 2);
2841 tu_cs_emit(&cs
, draw
->first_instance
);
2844 *entry
= tu_cs_end_sub_stream(&cmd
->sub_cs
, &cs
);
2849 tu6_emit_textures(struct tu_cmd_buffer
*cmd
,
2850 const struct tu_pipeline
*pipeline
,
2851 struct tu_descriptor_state
*descriptors_state
,
2852 gl_shader_stage type
,
2853 struct tu_cs_entry
*entry
,
2856 struct tu_device
*device
= cmd
->device
;
2857 struct tu_cs
*draw_state
= &cmd
->sub_cs
;
2858 const struct tu_program_descriptor_linkage
*link
=
2859 &pipeline
->program
.link
[type
];
2862 if (link
->texture_map
.num_desc
== 0 && link
->sampler_map
.num_desc
== 0) {
2863 *entry
= (struct tu_cs_entry
) {};
2867 /* allocate and fill texture state */
2868 struct ts_cs_memory tex_const
;
2869 result
= tu_cs_alloc(device
, draw_state
, link
->texture_map
.num_desc
,
2870 A6XX_TEX_CONST_DWORDS
, &tex_const
);
2871 if (result
!= VK_SUCCESS
)
2875 for (unsigned i
= 0; i
< link
->texture_map
.num
; i
++) {
2876 for (int j
= 0; j
< link
->texture_map
.array_size
[i
]; j
++) {
2877 write_tex_const(cmd
,
2878 &tex_const
.map
[A6XX_TEX_CONST_DWORDS
* tex_index
++],
2879 descriptors_state
, &link
->texture_map
, i
, j
);
2883 /* allocate and fill sampler state */
2884 struct ts_cs_memory tex_samp
= { 0 };
2885 if (link
->sampler_map
.num_desc
) {
2886 result
= tu_cs_alloc(device
, draw_state
, link
->sampler_map
.num_desc
,
2887 A6XX_TEX_SAMP_DWORDS
, &tex_samp
);
2888 if (result
!= VK_SUCCESS
)
2891 int sampler_index
= 0;
2892 for (unsigned i
= 0; i
< link
->sampler_map
.num
; i
++) {
2893 for (int j
= 0; j
< link
->sampler_map
.array_size
[i
]; j
++) {
2894 const struct tu_sampler
*sampler
= sampler_ptr(descriptors_state
,
2897 memcpy(&tex_samp
.map
[A6XX_TEX_SAMP_DWORDS
* sampler_index
++],
2898 sampler
->state
, sizeof(sampler
->state
));
2899 *needs_border
|= sampler
->needs_border
;
2904 unsigned tex_samp_reg
, tex_const_reg
, tex_count_reg
;
2905 enum a6xx_state_block sb
;
2908 case MESA_SHADER_VERTEX
:
2910 tex_samp_reg
= REG_A6XX_SP_VS_TEX_SAMP_LO
;
2911 tex_const_reg
= REG_A6XX_SP_VS_TEX_CONST_LO
;
2912 tex_count_reg
= REG_A6XX_SP_VS_TEX_COUNT
;
2914 case MESA_SHADER_FRAGMENT
:
2916 tex_samp_reg
= REG_A6XX_SP_FS_TEX_SAMP_LO
;
2917 tex_const_reg
= REG_A6XX_SP_FS_TEX_CONST_LO
;
2918 tex_count_reg
= REG_A6XX_SP_FS_TEX_COUNT
;
2920 case MESA_SHADER_COMPUTE
:
2922 tex_samp_reg
= REG_A6XX_SP_CS_TEX_SAMP_LO
;
2923 tex_const_reg
= REG_A6XX_SP_CS_TEX_CONST_LO
;
2924 tex_count_reg
= REG_A6XX_SP_CS_TEX_COUNT
;
2927 unreachable("bad state block");
2931 result
= tu_cs_begin_sub_stream(device
, draw_state
, 16, &cs
);
2932 if (result
!= VK_SUCCESS
)
2935 if (link
->sampler_map
.num_desc
) {
2936 /* output sampler state: */
2937 tu_cs_emit_pkt7(&cs
, tu6_stage2opcode(type
), 3);
2938 tu_cs_emit(&cs
, CP_LOAD_STATE6_0_DST_OFF(0) |
2939 CP_LOAD_STATE6_0_STATE_TYPE(ST6_SHADER
) |
2940 CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT
) |
2941 CP_LOAD_STATE6_0_STATE_BLOCK(sb
) |
2942 CP_LOAD_STATE6_0_NUM_UNIT(link
->sampler_map
.num_desc
));
2943 tu_cs_emit_qw(&cs
, tex_samp
.iova
); /* SRC_ADDR_LO/HI */
2945 tu_cs_emit_pkt4(&cs
, tex_samp_reg
, 2);
2946 tu_cs_emit_qw(&cs
, tex_samp
.iova
); /* SRC_ADDR_LO/HI */
2949 /* emit texture state: */
2950 tu_cs_emit_pkt7(&cs
, tu6_stage2opcode(type
), 3);
2951 tu_cs_emit(&cs
, CP_LOAD_STATE6_0_DST_OFF(0) |
2952 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS
) |
2953 CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT
) |
2954 CP_LOAD_STATE6_0_STATE_BLOCK(sb
) |
2955 CP_LOAD_STATE6_0_NUM_UNIT(link
->texture_map
.num_desc
));
2956 tu_cs_emit_qw(&cs
, tex_const
.iova
); /* SRC_ADDR_LO/HI */
2958 tu_cs_emit_pkt4(&cs
, tex_const_reg
, 2);
2959 tu_cs_emit_qw(&cs
, tex_const
.iova
); /* SRC_ADDR_LO/HI */
2961 tu_cs_emit_pkt4(&cs
, tex_count_reg
, 1);
2962 tu_cs_emit(&cs
, link
->texture_map
.num_desc
);
2964 *entry
= tu_cs_end_sub_stream(draw_state
, &cs
);
2969 tu6_emit_ibo(struct tu_cmd_buffer
*cmd
,
2970 const struct tu_pipeline
*pipeline
,
2971 struct tu_descriptor_state
*descriptors_state
,
2972 gl_shader_stage type
,
2973 struct tu_cs_entry
*entry
)
2975 struct tu_device
*device
= cmd
->device
;
2976 struct tu_cs
*draw_state
= &cmd
->sub_cs
;
2977 const struct tu_program_descriptor_linkage
*link
=
2978 &pipeline
->program
.link
[type
];
2981 unsigned num_desc
= link
->ssbo_map
.num_desc
+ link
->image_map
.num_desc
;
2983 if (num_desc
== 0) {
2984 *entry
= (struct tu_cs_entry
) {};
2988 struct ts_cs_memory ibo_const
;
2989 result
= tu_cs_alloc(device
, draw_state
, num_desc
,
2990 A6XX_TEX_CONST_DWORDS
, &ibo_const
);
2991 if (result
!= VK_SUCCESS
)
2995 for (unsigned i
= 0; i
< link
->ssbo_map
.num
; i
++) {
2996 for (int j
= 0; j
< link
->ssbo_map
.array_size
[i
]; j
++) {
2997 uint32_t *dst
= &ibo_const
.map
[A6XX_TEX_CONST_DWORDS
* ssbo_index
];
2999 uint64_t va
= buffer_ptr(descriptors_state
, &link
->ssbo_map
, i
, j
);
3000 /* We don't expose robustBufferAccess, so leave the size unlimited. */
3001 uint32_t sz
= MAX_STORAGE_BUFFER_RANGE
/ 4;
3003 dst
[0] = A6XX_IBO_0_FMT(TFMT6_32_UINT
);
3004 dst
[1] = A6XX_IBO_1_WIDTH(sz
& MASK(15)) |
3005 A6XX_IBO_1_HEIGHT(sz
>> 15);
3006 dst
[2] = A6XX_IBO_2_UNK4
|
3008 A6XX_IBO_2_TYPE(A6XX_TEX_1D
);
3012 for (int i
= 6; i
< A6XX_TEX_CONST_DWORDS
; i
++)
3019 for (unsigned i
= 0; i
< link
->image_map
.num
; i
++) {
3020 for (int j
= 0; j
< link
->image_map
.array_size
[i
]; j
++) {
3021 uint32_t *dst
= &ibo_const
.map
[A6XX_TEX_CONST_DWORDS
* ssbo_index
];
3023 write_image_ibo(cmd
, dst
,
3024 descriptors_state
, &link
->image_map
, i
, j
);
3030 assert(ssbo_index
== num_desc
);
3033 result
= tu_cs_begin_sub_stream(device
, draw_state
, 7, &cs
);
3034 if (result
!= VK_SUCCESS
)
3037 uint32_t opcode
, ibo_addr_reg
;
3038 enum a6xx_state_block sb
;
3039 enum a6xx_state_type st
;
3042 case MESA_SHADER_FRAGMENT
:
3043 opcode
= CP_LOAD_STATE6
;
3046 ibo_addr_reg
= REG_A6XX_SP_IBO_LO
;
3048 case MESA_SHADER_COMPUTE
:
3049 opcode
= CP_LOAD_STATE6_FRAG
;
3052 ibo_addr_reg
= REG_A6XX_SP_CS_IBO_LO
;
3055 unreachable("unsupported stage for ibos");
3058 /* emit texture state: */
3059 tu_cs_emit_pkt7(&cs
, opcode
, 3);
3060 tu_cs_emit(&cs
, CP_LOAD_STATE6_0_DST_OFF(0) |
3061 CP_LOAD_STATE6_0_STATE_TYPE(st
) |
3062 CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT
) |
3063 CP_LOAD_STATE6_0_STATE_BLOCK(sb
) |
3064 CP_LOAD_STATE6_0_NUM_UNIT(num_desc
));
3065 tu_cs_emit_qw(&cs
, ibo_const
.iova
); /* SRC_ADDR_LO/HI */
3067 tu_cs_emit_pkt4(&cs
, ibo_addr_reg
, 2);
3068 tu_cs_emit_qw(&cs
, ibo_const
.iova
); /* SRC_ADDR_LO/HI */
3070 *entry
= tu_cs_end_sub_stream(draw_state
, &cs
);
3074 struct PACKED bcolor_entry
{
3086 uint32_t z24
; /* also s8? */
3087 uint16_t srgb
[4]; /* appears to duplicate fp16[], but clamped, used for srgb */
3089 } border_color
[] = {
3090 [VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK
] = {},
3091 [VK_BORDER_COLOR_INT_TRANSPARENT_BLACK
] = {},
3092 [VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK
] = {
3093 .fp32
[3] = 0x3f800000,
3101 .rgb10a2
= 0xc0000000,
3104 [VK_BORDER_COLOR_INT_OPAQUE_BLACK
] = {
3108 [VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE
] = {
3109 .fp32
[0 ... 3] = 0x3f800000,
3110 .ui16
[0 ... 3] = 0xffff,
3111 .si16
[0 ... 3] = 0x7fff,
3112 .fp16
[0 ... 3] = 0x3c00,
3116 .ui8
[0 ... 3] = 0xff,
3117 .si8
[0 ... 3] = 0x7f,
3118 .rgb10a2
= 0xffffffff,
3120 .srgb
[0 ... 3] = 0x3c00,
3122 [VK_BORDER_COLOR_INT_OPAQUE_WHITE
] = {
3129 tu6_emit_border_color(struct tu_cmd_buffer
*cmd
,
3132 STATIC_ASSERT(sizeof(struct bcolor_entry
) == 128);
3134 const struct tu_pipeline
*pipeline
= cmd
->state
.pipeline
;
3135 struct tu_descriptor_state
*descriptors_state
=
3136 &cmd
->descriptors
[VK_PIPELINE_BIND_POINT_GRAPHICS
];
3137 const struct tu_descriptor_map
*vs_sampler
=
3138 &pipeline
->program
.link
[MESA_SHADER_VERTEX
].sampler_map
;
3139 const struct tu_descriptor_map
*fs_sampler
=
3140 &pipeline
->program
.link
[MESA_SHADER_FRAGMENT
].sampler_map
;
3141 struct ts_cs_memory ptr
;
3143 VkResult result
= tu_cs_alloc(cmd
->device
, &cmd
->sub_cs
,
3144 vs_sampler
->num_desc
+ fs_sampler
->num_desc
,
3147 if (result
!= VK_SUCCESS
)
3150 for (unsigned i
= 0; i
< vs_sampler
->num
; i
++) {
3151 for (unsigned j
= 0; j
< vs_sampler
->array_size
[i
]; j
++) {
3152 const struct tu_sampler
*sampler
= sampler_ptr(descriptors_state
,
3154 memcpy(ptr
.map
, &border_color
[sampler
->border
], 128);
3159 for (unsigned i
= 0; i
< fs_sampler
->num
; i
++) {
3160 for (unsigned j
= 0; j
< fs_sampler
->array_size
[i
]; j
++) {
3161 const struct tu_sampler
*sampler
= sampler_ptr(descriptors_state
,
3163 memcpy(ptr
.map
, &border_color
[sampler
->border
], 128);
3168 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_TP_BORDER_COLOR_BASE_ADDR_LO
, 2);
3169 tu_cs_emit_qw(cs
, ptr
.iova
);
3174 tu6_bind_draw_states(struct tu_cmd_buffer
*cmd
,
3176 const struct tu_draw_info
*draw
)
3178 const struct tu_pipeline
*pipeline
= cmd
->state
.pipeline
;
3179 const struct tu_dynamic_state
*dynamic
= &cmd
->state
.dynamic
;
3180 struct tu_draw_state_group draw_state_groups
[TU_DRAW_STATE_COUNT
];
3181 uint32_t draw_state_group_count
= 0;
3183 struct tu_descriptor_state
*descriptors_state
=
3184 &cmd
->descriptors
[VK_PIPELINE_BIND_POINT_GRAPHICS
];
3186 VkResult result
= tu_cs_reserve_space(cmd
->device
, cs
, 256);
3187 if (result
!= VK_SUCCESS
)
3192 uint32_t pc_primitive_cntl
= 0;
3193 if (pipeline
->ia
.primitive_restart
&& draw
->indexed
)
3194 pc_primitive_cntl
|= A6XX_PC_PRIMITIVE_CNTL_0_PRIMITIVE_RESTART
;
3196 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_UNKNOWN_9806
, 0);
3197 tu_cs_emit_write_reg(cs
, REG_A6XX_PC_UNKNOWN_9990
, 0);
3198 tu_cs_emit_write_reg(cs
, REG_A6XX_VFD_UNKNOWN_A008
, 0);
3200 tu_cs_emit_pkt4(cs
, REG_A6XX_PC_PRIMITIVE_CNTL_0
, 1);
3201 tu_cs_emit(cs
, pc_primitive_cntl
);
3203 if (cmd
->state
.dirty
&
3204 (TU_CMD_DIRTY_PIPELINE
| TU_CMD_DIRTY_DYNAMIC_LINE_WIDTH
) &&
3205 (pipeline
->dynamic_state
.mask
& TU_DYNAMIC_LINE_WIDTH
)) {
3206 tu6_emit_gras_su_cntl(cs
, pipeline
->rast
.gras_su_cntl
,
3207 dynamic
->line_width
);
3210 if ((cmd
->state
.dirty
& TU_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK
) &&
3211 (pipeline
->dynamic_state
.mask
& TU_DYNAMIC_STENCIL_COMPARE_MASK
)) {
3212 tu6_emit_stencil_compare_mask(cs
, dynamic
->stencil_compare_mask
.front
,
3213 dynamic
->stencil_compare_mask
.back
);
3216 if ((cmd
->state
.dirty
& TU_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK
) &&
3217 (pipeline
->dynamic_state
.mask
& TU_DYNAMIC_STENCIL_WRITE_MASK
)) {
3218 tu6_emit_stencil_write_mask(cs
, dynamic
->stencil_write_mask
.front
,
3219 dynamic
->stencil_write_mask
.back
);
3222 if ((cmd
->state
.dirty
& TU_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE
) &&
3223 (pipeline
->dynamic_state
.mask
& TU_DYNAMIC_STENCIL_REFERENCE
)) {
3224 tu6_emit_stencil_reference(cs
, dynamic
->stencil_reference
.front
,
3225 dynamic
->stencil_reference
.back
);
3228 if (cmd
->state
.dirty
&
3229 (TU_CMD_DIRTY_PIPELINE
| TU_CMD_DIRTY_VERTEX_BUFFERS
)) {
3230 for (uint32_t i
= 0; i
< pipeline
->vi
.count
; i
++) {
3231 const uint32_t binding
= pipeline
->vi
.bindings
[i
];
3232 const uint32_t stride
= pipeline
->vi
.strides
[i
];
3233 const struct tu_buffer
*buf
= cmd
->state
.vb
.buffers
[binding
];
3234 const VkDeviceSize offset
= buf
->bo_offset
+
3235 cmd
->state
.vb
.offsets
[binding
] +
3236 pipeline
->vi
.offsets
[i
];
3237 const VkDeviceSize size
=
3238 offset
< buf
->bo
->size
? buf
->bo
->size
- offset
: 0;
3240 tu_cs_emit_pkt4(cs
, REG_A6XX_VFD_FETCH(i
), 4);
3241 tu_cs_emit_qw(cs
, buf
->bo
->iova
+ offset
);
3242 tu_cs_emit(cs
, size
);
3243 tu_cs_emit(cs
, stride
);
3247 if (cmd
->state
.dirty
& TU_CMD_DIRTY_PIPELINE
) {
3248 draw_state_groups
[draw_state_group_count
++] =
3249 (struct tu_draw_state_group
) {
3250 .id
= TU_DRAW_STATE_PROGRAM
,
3251 .enable_mask
= ENABLE_DRAW
,
3252 .ib
= pipeline
->program
.state_ib
,
3254 draw_state_groups
[draw_state_group_count
++] =
3255 (struct tu_draw_state_group
) {
3256 .id
= TU_DRAW_STATE_PROGRAM_BINNING
,
3257 .enable_mask
= CP_SET_DRAW_STATE__0_BINNING
,
3258 .ib
= pipeline
->program
.binning_state_ib
,
3260 draw_state_groups
[draw_state_group_count
++] =
3261 (struct tu_draw_state_group
) {
3262 .id
= TU_DRAW_STATE_VI
,
3263 .enable_mask
= ENABLE_DRAW
,
3264 .ib
= pipeline
->vi
.state_ib
,
3266 draw_state_groups
[draw_state_group_count
++] =
3267 (struct tu_draw_state_group
) {
3268 .id
= TU_DRAW_STATE_VI_BINNING
,
3269 .enable_mask
= CP_SET_DRAW_STATE__0_BINNING
,
3270 .ib
= pipeline
->vi
.binning_state_ib
,
3272 draw_state_groups
[draw_state_group_count
++] =
3273 (struct tu_draw_state_group
) {
3274 .id
= TU_DRAW_STATE_VP
,
3275 .enable_mask
= ENABLE_ALL
,
3276 .ib
= pipeline
->vp
.state_ib
,
3278 draw_state_groups
[draw_state_group_count
++] =
3279 (struct tu_draw_state_group
) {
3280 .id
= TU_DRAW_STATE_RAST
,
3281 .enable_mask
= ENABLE_ALL
,
3282 .ib
= pipeline
->rast
.state_ib
,
3284 draw_state_groups
[draw_state_group_count
++] =
3285 (struct tu_draw_state_group
) {
3286 .id
= TU_DRAW_STATE_DS
,
3287 .enable_mask
= ENABLE_ALL
,
3288 .ib
= pipeline
->ds
.state_ib
,
3290 draw_state_groups
[draw_state_group_count
++] =
3291 (struct tu_draw_state_group
) {
3292 .id
= TU_DRAW_STATE_BLEND
,
3293 .enable_mask
= ENABLE_ALL
,
3294 .ib
= pipeline
->blend
.state_ib
,
3298 if (cmd
->state
.dirty
&
3299 (TU_CMD_DIRTY_PIPELINE
| TU_CMD_DIRTY_DESCRIPTOR_SETS
| TU_CMD_DIRTY_PUSH_CONSTANTS
)) {
3300 draw_state_groups
[draw_state_group_count
++] =
3301 (struct tu_draw_state_group
) {
3302 .id
= TU_DRAW_STATE_VS_CONST
,
3303 .enable_mask
= ENABLE_ALL
,
3304 .ib
= tu6_emit_consts(cmd
, pipeline
, descriptors_state
, MESA_SHADER_VERTEX
)
3306 draw_state_groups
[draw_state_group_count
++] =
3307 (struct tu_draw_state_group
) {
3308 .id
= TU_DRAW_STATE_FS_CONST
,
3309 .enable_mask
= ENABLE_DRAW
,
3310 .ib
= tu6_emit_consts(cmd
, pipeline
, descriptors_state
, MESA_SHADER_FRAGMENT
)
3314 if (cmd
->state
.dirty
&
3315 (TU_CMD_DIRTY_PIPELINE
| TU_CMD_DIRTY_DESCRIPTOR_SETS
)) {
3316 bool needs_border
= false;
3317 struct tu_cs_entry vs_tex
, fs_tex
, fs_ibo
;
3319 result
= tu6_emit_textures(cmd
, pipeline
, descriptors_state
,
3320 MESA_SHADER_VERTEX
, &vs_tex
, &needs_border
);
3321 if (result
!= VK_SUCCESS
)
3324 result
= tu6_emit_textures(cmd
, pipeline
, descriptors_state
,
3325 MESA_SHADER_FRAGMENT
, &fs_tex
, &needs_border
);
3326 if (result
!= VK_SUCCESS
)
3329 result
= tu6_emit_ibo(cmd
, pipeline
, descriptors_state
,
3330 MESA_SHADER_FRAGMENT
, &fs_ibo
);
3331 if (result
!= VK_SUCCESS
)
3334 draw_state_groups
[draw_state_group_count
++] =
3335 (struct tu_draw_state_group
) {
3336 .id
= TU_DRAW_STATE_VS_TEX
,
3337 .enable_mask
= ENABLE_ALL
,
3340 draw_state_groups
[draw_state_group_count
++] =
3341 (struct tu_draw_state_group
) {
3342 .id
= TU_DRAW_STATE_FS_TEX
,
3343 .enable_mask
= ENABLE_DRAW
,
3346 draw_state_groups
[draw_state_group_count
++] =
3347 (struct tu_draw_state_group
) {
3348 .id
= TU_DRAW_STATE_FS_IBO
,
3349 .enable_mask
= ENABLE_DRAW
,
3354 result
= tu6_emit_border_color(cmd
, cs
);
3355 if (result
!= VK_SUCCESS
)
3360 struct tu_cs_entry vs_params
;
3361 result
= tu6_emit_vs_params(cmd
, draw
, &vs_params
);
3362 if (result
!= VK_SUCCESS
)
3365 draw_state_groups
[draw_state_group_count
++] =
3366 (struct tu_draw_state_group
) {
3367 .id
= TU_DRAW_STATE_VS_PARAMS
,
3368 .enable_mask
= ENABLE_ALL
,
3372 tu_cs_emit_pkt7(cs
, CP_SET_DRAW_STATE
, 3 * draw_state_group_count
);
3373 for (uint32_t i
= 0; i
< draw_state_group_count
; i
++) {
3374 const struct tu_draw_state_group
*group
= &draw_state_groups
[i
];
3375 debug_assert((group
->enable_mask
& ~ENABLE_ALL
) == 0);
3376 uint32_t cp_set_draw_state
=
3377 CP_SET_DRAW_STATE__0_COUNT(group
->ib
.size
/ 4) |
3378 group
->enable_mask
|
3379 CP_SET_DRAW_STATE__0_GROUP_ID(group
->id
);
3381 if (group
->ib
.size
) {
3382 iova
= group
->ib
.bo
->iova
+ group
->ib
.offset
;
3384 cp_set_draw_state
|= CP_SET_DRAW_STATE__0_DISABLE
;
3388 tu_cs_emit(cs
, cp_set_draw_state
);
3389 tu_cs_emit_qw(cs
, iova
);
3392 tu_cs_sanity_check(cs
);
3395 if (cmd
->state
.dirty
& TU_CMD_DIRTY_VERTEX_BUFFERS
) {
3396 for (uint32_t i
= 0; i
< MAX_VBS
; i
++) {
3397 const struct tu_buffer
*buf
= cmd
->state
.vb
.buffers
[i
];
3399 tu_bo_list_add(&cmd
->bo_list
, buf
->bo
, MSM_SUBMIT_BO_READ
);
3402 if (cmd
->state
.dirty
& TU_CMD_DIRTY_DESCRIPTOR_SETS
) {
3404 for_each_bit(i
, descriptors_state
->valid
) {
3405 struct tu_descriptor_set
*set
= descriptors_state
->sets
[i
];
3406 for (unsigned j
= 0; j
< set
->layout
->buffer_count
; ++j
)
3407 if (set
->descriptors
[j
]) {
3408 tu_bo_list_add(&cmd
->bo_list
, set
->descriptors
[j
],
3409 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_WRITE
);
3414 /* Fragment shader state overwrites compute shader state, so flag the
3415 * compute pipeline for re-emit.
3417 cmd
->state
.dirty
= TU_CMD_DIRTY_COMPUTE_PIPELINE
;
3422 tu6_emit_draw_direct(struct tu_cmd_buffer
*cmd
,
3424 const struct tu_draw_info
*draw
)
3427 const enum pc_di_primtype primtype
= cmd
->state
.pipeline
->ia
.primtype
;
3429 tu_cs_emit_pkt4(cs
, REG_A6XX_VFD_INDEX_OFFSET
, 2);
3430 tu_cs_emit(cs
, draw
->vertex_offset
);
3431 tu_cs_emit(cs
, draw
->first_instance
);
3433 /* TODO hw binning */
3434 if (draw
->indexed
) {
3435 const enum a4xx_index_size index_size
=
3436 tu6_index_size(cmd
->state
.index_type
);
3437 const uint32_t index_bytes
=
3438 (cmd
->state
.index_type
== VK_INDEX_TYPE_UINT32
) ? 4 : 2;
3439 const struct tu_buffer
*buf
= cmd
->state
.index_buffer
;
3440 const VkDeviceSize offset
= buf
->bo_offset
+ cmd
->state
.index_offset
+
3441 index_bytes
* draw
->first_index
;
3442 const uint32_t size
= index_bytes
* draw
->count
;
3444 const uint32_t cp_draw_indx
=
3445 CP_DRAW_INDX_OFFSET_0_PRIM_TYPE(primtype
) |
3446 CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(DI_SRC_SEL_DMA
) |
3447 CP_DRAW_INDX_OFFSET_0_INDEX_SIZE(index_size
) |
3448 CP_DRAW_INDX_OFFSET_0_VIS_CULL(USE_VISIBILITY
) | 0x2000;
3450 tu_cs_emit_pkt7(cs
, CP_DRAW_INDX_OFFSET
, 7);
3451 tu_cs_emit(cs
, cp_draw_indx
);
3452 tu_cs_emit(cs
, draw
->instance_count
);
3453 tu_cs_emit(cs
, draw
->count
);
3454 tu_cs_emit(cs
, 0x0); /* XXX */
3455 tu_cs_emit_qw(cs
, buf
->bo
->iova
+ offset
);
3456 tu_cs_emit(cs
, size
);
3458 const uint32_t cp_draw_indx
=
3459 CP_DRAW_INDX_OFFSET_0_PRIM_TYPE(primtype
) |
3460 CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(DI_SRC_SEL_AUTO_INDEX
) |
3461 CP_DRAW_INDX_OFFSET_0_VIS_CULL(USE_VISIBILITY
) | 0x2000;
3463 tu_cs_emit_pkt7(cs
, CP_DRAW_INDX_OFFSET
, 3);
3464 tu_cs_emit(cs
, cp_draw_indx
);
3465 tu_cs_emit(cs
, draw
->instance_count
);
3466 tu_cs_emit(cs
, draw
->count
);
3471 tu_draw(struct tu_cmd_buffer
*cmd
, const struct tu_draw_info
*draw
)
3473 struct tu_cs
*cs
= &cmd
->draw_cs
;
3476 result
= tu6_bind_draw_states(cmd
, cs
, draw
);
3477 if (result
!= VK_SUCCESS
) {
3478 cmd
->record_result
= result
;
3482 result
= tu_cs_reserve_space(cmd
->device
, cs
, 32);
3483 if (result
!= VK_SUCCESS
) {
3484 cmd
->record_result
= result
;
3488 if (draw
->indirect
) {
3489 tu_finishme("indirect draw");
3493 /* TODO tu6_emit_marker should pick different regs depending on cs */
3495 tu6_emit_marker(cmd
, cs
);
3496 tu6_emit_draw_direct(cmd
, cs
, draw
);
3497 tu6_emit_marker(cmd
, cs
);
3499 cmd
->wait_for_idle
= true;
3501 tu_cs_sanity_check(cs
);
3505 tu_CmdDraw(VkCommandBuffer commandBuffer
,
3506 uint32_t vertexCount
,
3507 uint32_t instanceCount
,
3508 uint32_t firstVertex
,
3509 uint32_t firstInstance
)
3511 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
3512 struct tu_draw_info info
= {};
3514 info
.count
= vertexCount
;
3515 info
.instance_count
= instanceCount
;
3516 info
.first_instance
= firstInstance
;
3517 info
.vertex_offset
= firstVertex
;
3519 tu_draw(cmd_buffer
, &info
);
3523 tu_CmdDrawIndexed(VkCommandBuffer commandBuffer
,
3524 uint32_t indexCount
,
3525 uint32_t instanceCount
,
3526 uint32_t firstIndex
,
3527 int32_t vertexOffset
,
3528 uint32_t firstInstance
)
3530 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
3531 struct tu_draw_info info
= {};
3533 info
.indexed
= true;
3534 info
.count
= indexCount
;
3535 info
.instance_count
= instanceCount
;
3536 info
.first_index
= firstIndex
;
3537 info
.vertex_offset
= vertexOffset
;
3538 info
.first_instance
= firstInstance
;
3540 tu_draw(cmd_buffer
, &info
);
3544 tu_CmdDrawIndirect(VkCommandBuffer commandBuffer
,
3546 VkDeviceSize offset
,
3550 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
3551 TU_FROM_HANDLE(tu_buffer
, buffer
, _buffer
);
3552 struct tu_draw_info info
= {};
3554 info
.count
= drawCount
;
3555 info
.indirect
= buffer
;
3556 info
.indirect_offset
= offset
;
3557 info
.stride
= stride
;
3559 tu_draw(cmd_buffer
, &info
);
3563 tu_CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer
,
3565 VkDeviceSize offset
,
3569 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
3570 TU_FROM_HANDLE(tu_buffer
, buffer
, _buffer
);
3571 struct tu_draw_info info
= {};
3573 info
.indexed
= true;
3574 info
.count
= drawCount
;
3575 info
.indirect
= buffer
;
3576 info
.indirect_offset
= offset
;
3577 info
.stride
= stride
;
3579 tu_draw(cmd_buffer
, &info
);
3582 struct tu_dispatch_info
3585 * Determine the layout of the grid (in block units) to be used.
3590 * A starting offset for the grid. If unaligned is set, the offset
3591 * must still be aligned.
3593 uint32_t offsets
[3];
3595 * Whether it's an unaligned compute dispatch.
3600 * Indirect compute parameters resource.
3602 struct tu_buffer
*indirect
;
3603 uint64_t indirect_offset
;
3607 tu_emit_compute_driver_params(struct tu_cs
*cs
, struct tu_pipeline
*pipeline
,
3608 const struct tu_dispatch_info
*info
)
3610 gl_shader_stage type
= MESA_SHADER_COMPUTE
;
3611 const struct tu_program_descriptor_linkage
*link
=
3612 &pipeline
->program
.link
[type
];
3613 const struct ir3_const_state
*const_state
= &link
->const_state
;
3614 uint32_t offset
= const_state
->offsets
.driver_param
;
3616 if (link
->constlen
<= offset
)
3619 if (!info
->indirect
) {
3620 uint32_t driver_params
[IR3_DP_CS_COUNT
] = {
3621 [IR3_DP_NUM_WORK_GROUPS_X
] = info
->blocks
[0],
3622 [IR3_DP_NUM_WORK_GROUPS_Y
] = info
->blocks
[1],
3623 [IR3_DP_NUM_WORK_GROUPS_Z
] = info
->blocks
[2],
3624 [IR3_DP_LOCAL_GROUP_SIZE_X
] = pipeline
->compute
.local_size
[0],
3625 [IR3_DP_LOCAL_GROUP_SIZE_Y
] = pipeline
->compute
.local_size
[1],
3626 [IR3_DP_LOCAL_GROUP_SIZE_Z
] = pipeline
->compute
.local_size
[2],
3629 uint32_t num_consts
= MIN2(const_state
->num_driver_params
,
3630 (link
->constlen
- offset
) * 4);
3631 /* push constants */
3632 tu_cs_emit_pkt7(cs
, tu6_stage2opcode(type
), 3 + num_consts
);
3633 tu_cs_emit(cs
, CP_LOAD_STATE6_0_DST_OFF(offset
) |
3634 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS
) |
3635 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT
) |
3636 CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type
)) |
3637 CP_LOAD_STATE6_0_NUM_UNIT(num_consts
/ 4));
3641 for (i
= 0; i
< num_consts
; i
++)
3642 tu_cs_emit(cs
, driver_params
[i
]);
3644 tu_finishme("Indirect driver params");
3649 tu_dispatch(struct tu_cmd_buffer
*cmd
,
3650 const struct tu_dispatch_info
*info
)
3652 struct tu_cs
*cs
= &cmd
->cs
;
3653 struct tu_pipeline
*pipeline
= cmd
->state
.compute_pipeline
;
3654 struct tu_descriptor_state
*descriptors_state
=
3655 &cmd
->descriptors
[VK_PIPELINE_BIND_POINT_COMPUTE
];
3657 VkResult result
= tu_cs_reserve_space(cmd
->device
, cs
, 256);
3658 if (result
!= VK_SUCCESS
) {
3659 cmd
->record_result
= result
;
3663 if (cmd
->state
.dirty
& TU_CMD_DIRTY_COMPUTE_PIPELINE
)
3664 tu_cs_emit_ib(cs
, &pipeline
->program
.state_ib
);
3666 struct tu_cs_entry ib
;
3668 ib
= tu6_emit_consts(cmd
, pipeline
, descriptors_state
, MESA_SHADER_COMPUTE
);
3670 tu_cs_emit_ib(cs
, &ib
);
3672 tu_emit_compute_driver_params(cs
, pipeline
, info
);
3675 result
= tu6_emit_textures(cmd
, pipeline
, descriptors_state
,
3676 MESA_SHADER_COMPUTE
, &ib
, &needs_border
);
3677 if (result
!= VK_SUCCESS
) {
3678 cmd
->record_result
= result
;
3683 tu_cs_emit_ib(cs
, &ib
);
3686 tu_finishme("compute border color");
3688 result
= tu6_emit_ibo(cmd
, pipeline
, descriptors_state
, MESA_SHADER_COMPUTE
, &ib
);
3689 if (result
!= VK_SUCCESS
) {
3690 cmd
->record_result
= result
;
3695 tu_cs_emit_ib(cs
, &ib
);
3698 if (cmd
->state
.dirty
& TU_CMD_DIRTY_DESCRIPTOR_SETS
) {
3700 for_each_bit(i
, descriptors_state
->valid
) {
3701 struct tu_descriptor_set
*set
= descriptors_state
->sets
[i
];
3702 for (unsigned j
= 0; j
< set
->layout
->buffer_count
; ++j
)
3703 if (set
->descriptors
[j
]) {
3704 tu_bo_list_add(&cmd
->bo_list
, set
->descriptors
[j
],
3705 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_WRITE
);
3710 /* Compute shader state overwrites fragment shader state, so we flag the
3711 * graphics pipeline for re-emit.
3713 cmd
->state
.dirty
= TU_CMD_DIRTY_PIPELINE
;
3715 tu_cs_emit_pkt7(cs
, CP_SET_MARKER
, 1);
3716 tu_cs_emit(cs
, A6XX_CP_SET_MARKER_0_MODE(0x8));
3718 const uint32_t *local_size
= pipeline
->compute
.local_size
;
3719 const uint32_t *num_groups
= info
->blocks
;
3720 tu_cs_emit_pkt4(cs
, REG_A6XX_HLSQ_CS_NDRANGE_0
, 7);
3722 A6XX_HLSQ_CS_NDRANGE_0_KERNELDIM(3) |
3723 A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX(local_size
[0] - 1) |
3724 A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY(local_size
[1] - 1) |
3725 A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ(local_size
[2] - 1));
3726 tu_cs_emit(cs
, A6XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X(local_size
[0] * num_groups
[0]));
3727 tu_cs_emit(cs
, 0); /* HLSQ_CS_NDRANGE_2_GLOBALOFF_X */
3728 tu_cs_emit(cs
, A6XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y(local_size
[1] * num_groups
[1]));
3729 tu_cs_emit(cs
, 0); /* HLSQ_CS_NDRANGE_4_GLOBALOFF_Y */
3730 tu_cs_emit(cs
, A6XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z(local_size
[2] * num_groups
[2]));
3731 tu_cs_emit(cs
, 0); /* HLSQ_CS_NDRANGE_6_GLOBALOFF_Z */
3733 tu_cs_emit_pkt4(cs
, REG_A6XX_HLSQ_CS_KERNEL_GROUP_X
, 3);
3734 tu_cs_emit(cs
, 1); /* HLSQ_CS_KERNEL_GROUP_X */
3735 tu_cs_emit(cs
, 1); /* HLSQ_CS_KERNEL_GROUP_Y */
3736 tu_cs_emit(cs
, 1); /* HLSQ_CS_KERNEL_GROUP_Z */
3738 if (info
->indirect
) {
3739 uint64_t iova
= tu_buffer_iova(info
->indirect
) + info
->indirect_offset
;
3741 tu_bo_list_add(&cmd
->bo_list
, info
->indirect
->bo
,
3742 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_WRITE
);
3744 tu_cs_emit_pkt7(cs
, CP_EXEC_CS_INDIRECT
, 4);
3745 tu_cs_emit(cs
, 0x00000000);
3746 tu_cs_emit_qw(cs
, iova
);
3748 A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEX(local_size
[0] - 1) |
3749 A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEY(local_size
[1] - 1) |
3750 A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEZ(local_size
[2] - 1));
3752 tu_cs_emit_pkt7(cs
, CP_EXEC_CS
, 4);
3753 tu_cs_emit(cs
, 0x00000000);
3754 tu_cs_emit(cs
, CP_EXEC_CS_1_NGROUPS_X(info
->blocks
[0]));
3755 tu_cs_emit(cs
, CP_EXEC_CS_2_NGROUPS_Y(info
->blocks
[1]));
3756 tu_cs_emit(cs
, CP_EXEC_CS_3_NGROUPS_Z(info
->blocks
[2]));
3761 tu6_emit_cache_flush(cmd
, cs
);
3765 tu_CmdDispatchBase(VkCommandBuffer commandBuffer
,
3773 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
3774 struct tu_dispatch_info info
= {};
3780 info
.offsets
[0] = base_x
;
3781 info
.offsets
[1] = base_y
;
3782 info
.offsets
[2] = base_z
;
3783 tu_dispatch(cmd_buffer
, &info
);
3787 tu_CmdDispatch(VkCommandBuffer commandBuffer
,
3792 tu_CmdDispatchBase(commandBuffer
, 0, 0, 0, x
, y
, z
);
3796 tu_CmdDispatchIndirect(VkCommandBuffer commandBuffer
,
3798 VkDeviceSize offset
)
3800 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
3801 TU_FROM_HANDLE(tu_buffer
, buffer
, _buffer
);
3802 struct tu_dispatch_info info
= {};
3804 info
.indirect
= buffer
;
3805 info
.indirect_offset
= offset
;
3807 tu_dispatch(cmd_buffer
, &info
);
3811 tu_CmdEndRenderPass(VkCommandBuffer commandBuffer
)
3813 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
3815 tu_cs_end(&cmd_buffer
->draw_cs
);
3817 tu_cmd_render_tiles(cmd_buffer
);
3819 /* discard draw_cs entries now that the tiles are rendered */
3820 tu_cs_discard_entries(&cmd_buffer
->draw_cs
);
3821 tu_cs_begin(&cmd_buffer
->draw_cs
);
3823 cmd_buffer
->state
.pass
= NULL
;
3824 cmd_buffer
->state
.subpass
= NULL
;
3825 cmd_buffer
->state
.framebuffer
= NULL
;
3829 tu_CmdEndRenderPass2(VkCommandBuffer commandBuffer
,
3830 const VkSubpassEndInfoKHR
*pSubpassEndInfo
)
3832 tu_CmdEndRenderPass(commandBuffer
);
3835 struct tu_barrier_info
3837 uint32_t eventCount
;
3838 const VkEvent
*pEvents
;
3839 VkPipelineStageFlags srcStageMask
;
3843 tu_barrier(struct tu_cmd_buffer
*cmd_buffer
,
3844 uint32_t memoryBarrierCount
,
3845 const VkMemoryBarrier
*pMemoryBarriers
,
3846 uint32_t bufferMemoryBarrierCount
,
3847 const VkBufferMemoryBarrier
*pBufferMemoryBarriers
,
3848 uint32_t imageMemoryBarrierCount
,
3849 const VkImageMemoryBarrier
*pImageMemoryBarriers
,
3850 const struct tu_barrier_info
*info
)
3855 tu_CmdPipelineBarrier(VkCommandBuffer commandBuffer
,
3856 VkPipelineStageFlags srcStageMask
,
3857 VkPipelineStageFlags destStageMask
,
3859 uint32_t memoryBarrierCount
,
3860 const VkMemoryBarrier
*pMemoryBarriers
,
3861 uint32_t bufferMemoryBarrierCount
,
3862 const VkBufferMemoryBarrier
*pBufferMemoryBarriers
,
3863 uint32_t imageMemoryBarrierCount
,
3864 const VkImageMemoryBarrier
*pImageMemoryBarriers
)
3866 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
3867 struct tu_barrier_info info
;
3869 info
.eventCount
= 0;
3870 info
.pEvents
= NULL
;
3871 info
.srcStageMask
= srcStageMask
;
3873 tu_barrier(cmd_buffer
, memoryBarrierCount
, pMemoryBarriers
,
3874 bufferMemoryBarrierCount
, pBufferMemoryBarriers
,
3875 imageMemoryBarrierCount
, pImageMemoryBarriers
, &info
);
3879 write_event(struct tu_cmd_buffer
*cmd_buffer
,
3880 struct tu_event
*event
,
3881 VkPipelineStageFlags stageMask
,
3887 tu_CmdSetEvent(VkCommandBuffer commandBuffer
,
3889 VkPipelineStageFlags stageMask
)
3891 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
3892 TU_FROM_HANDLE(tu_event
, event
, _event
);
3894 write_event(cmd_buffer
, event
, stageMask
, 1);
3898 tu_CmdResetEvent(VkCommandBuffer commandBuffer
,
3900 VkPipelineStageFlags stageMask
)
3902 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
3903 TU_FROM_HANDLE(tu_event
, event
, _event
);
3905 write_event(cmd_buffer
, event
, stageMask
, 0);
3909 tu_CmdWaitEvents(VkCommandBuffer commandBuffer
,
3910 uint32_t eventCount
,
3911 const VkEvent
*pEvents
,
3912 VkPipelineStageFlags srcStageMask
,
3913 VkPipelineStageFlags dstStageMask
,
3914 uint32_t memoryBarrierCount
,
3915 const VkMemoryBarrier
*pMemoryBarriers
,
3916 uint32_t bufferMemoryBarrierCount
,
3917 const VkBufferMemoryBarrier
*pBufferMemoryBarriers
,
3918 uint32_t imageMemoryBarrierCount
,
3919 const VkImageMemoryBarrier
*pImageMemoryBarriers
)
3921 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
3922 struct tu_barrier_info info
;
3924 info
.eventCount
= eventCount
;
3925 info
.pEvents
= pEvents
;
3926 info
.srcStageMask
= 0;
3928 tu_barrier(cmd_buffer
, memoryBarrierCount
, pMemoryBarriers
,
3929 bufferMemoryBarrierCount
, pBufferMemoryBarriers
,
3930 imageMemoryBarrierCount
, pImageMemoryBarriers
, &info
);
3934 tu_CmdSetDeviceMask(VkCommandBuffer commandBuffer
, uint32_t deviceMask
)