2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
28 #include "tu_private.h"
30 #include "main/menums.h"
32 #include "nir/nir_builder.h"
33 #include "spirv/nir_spirv.h"
34 #include "util/debug.h"
35 #include "util/mesa-sha1.h"
36 #include "util/u_atomic.h"
37 #include "vk_format.h"
42 struct tu_pipeline_builder
44 struct tu_device
*device
;
45 struct tu_pipeline_cache
*cache
;
46 struct tu_pipeline_layout
*layout
;
47 const VkAllocationCallbacks
*alloc
;
48 const VkGraphicsPipelineCreateInfo
*create_info
;
50 struct tu_shader
*shaders
[MESA_SHADER_STAGES
];
51 uint32_t shader_offsets
[MESA_SHADER_STAGES
];
52 uint32_t binning_vs_offset
;
53 uint32_t shader_total_size
;
55 bool rasterizer_discard
;
56 /* these states are affectd by rasterizer_discard */
57 VkSampleCountFlagBits samples
;
58 bool use_depth_stencil_attachment
;
59 bool use_color_attachments
;
60 uint32_t color_attachment_count
;
61 VkFormat color_attachment_formats
[MAX_RTS
];
64 static enum tu_dynamic_state_bits
65 tu_dynamic_state_bit(VkDynamicState state
)
68 case VK_DYNAMIC_STATE_VIEWPORT
:
69 return TU_DYNAMIC_VIEWPORT
;
70 case VK_DYNAMIC_STATE_SCISSOR
:
71 return TU_DYNAMIC_SCISSOR
;
72 case VK_DYNAMIC_STATE_LINE_WIDTH
:
73 return TU_DYNAMIC_LINE_WIDTH
;
74 case VK_DYNAMIC_STATE_DEPTH_BIAS
:
75 return TU_DYNAMIC_DEPTH_BIAS
;
76 case VK_DYNAMIC_STATE_BLEND_CONSTANTS
:
77 return TU_DYNAMIC_BLEND_CONSTANTS
;
78 case VK_DYNAMIC_STATE_DEPTH_BOUNDS
:
79 return TU_DYNAMIC_DEPTH_BOUNDS
;
80 case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK
:
81 return TU_DYNAMIC_STENCIL_COMPARE_MASK
;
82 case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK
:
83 return TU_DYNAMIC_STENCIL_WRITE_MASK
;
84 case VK_DYNAMIC_STATE_STENCIL_REFERENCE
:
85 return TU_DYNAMIC_STENCIL_REFERENCE
;
87 unreachable("invalid dynamic state");
92 static gl_shader_stage
93 tu_shader_stage(VkShaderStageFlagBits stage
)
96 case VK_SHADER_STAGE_VERTEX_BIT
:
97 return MESA_SHADER_VERTEX
;
98 case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT
:
99 return MESA_SHADER_TESS_CTRL
;
100 case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT
:
101 return MESA_SHADER_TESS_EVAL
;
102 case VK_SHADER_STAGE_GEOMETRY_BIT
:
103 return MESA_SHADER_GEOMETRY
;
104 case VK_SHADER_STAGE_FRAGMENT_BIT
:
105 return MESA_SHADER_FRAGMENT
;
106 case VK_SHADER_STAGE_COMPUTE_BIT
:
107 return MESA_SHADER_COMPUTE
;
109 unreachable("invalid VkShaderStageFlagBits");
110 return MESA_SHADER_NONE
;
114 static const VkVertexInputAttributeDescription
*
115 tu_find_vertex_input_attribute(
116 const VkPipelineVertexInputStateCreateInfo
*vi_info
, uint32_t slot
)
118 assert(slot
>= VERT_ATTRIB_GENERIC0
);
119 slot
-= VERT_ATTRIB_GENERIC0
;
120 for (uint32_t i
= 0; i
< vi_info
->vertexAttributeDescriptionCount
; i
++) {
121 if (vi_info
->pVertexAttributeDescriptions
[i
].location
== slot
)
122 return &vi_info
->pVertexAttributeDescriptions
[i
];
127 static const VkVertexInputBindingDescription
*
128 tu_find_vertex_input_binding(
129 const VkPipelineVertexInputStateCreateInfo
*vi_info
,
130 const VkVertexInputAttributeDescription
*vi_attr
)
133 for (uint32_t i
= 0; i
< vi_info
->vertexBindingDescriptionCount
; i
++) {
134 if (vi_info
->pVertexBindingDescriptions
[i
].binding
== vi_attr
->binding
)
135 return &vi_info
->pVertexBindingDescriptions
[i
];
141 tu_logic_op_reads_dst(VkLogicOp op
)
144 case VK_LOGIC_OP_CLEAR
:
145 case VK_LOGIC_OP_COPY
:
146 case VK_LOGIC_OP_COPY_INVERTED
:
147 case VK_LOGIC_OP_SET
:
155 tu_blend_factor_no_dst_alpha(VkBlendFactor factor
)
157 /* treat dst alpha as 1.0 and avoid reading it */
159 case VK_BLEND_FACTOR_DST_ALPHA
:
160 return VK_BLEND_FACTOR_ONE
;
161 case VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA
:
162 return VK_BLEND_FACTOR_ZERO
;
168 static enum pc_di_primtype
169 tu6_primtype(VkPrimitiveTopology topology
)
172 case VK_PRIMITIVE_TOPOLOGY_POINT_LIST
:
173 return DI_PT_POINTLIST
;
174 case VK_PRIMITIVE_TOPOLOGY_LINE_LIST
:
175 return DI_PT_LINELIST
;
176 case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP
:
177 return DI_PT_LINESTRIP
;
178 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST
:
179 return DI_PT_TRILIST
;
180 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP
:
181 return DI_PT_TRISTRIP
;
182 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN
:
184 case VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY
:
185 return DI_PT_LINE_ADJ
;
186 case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY
:
187 return DI_PT_LINESTRIP_ADJ
;
188 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY
:
189 return DI_PT_TRI_ADJ
;
190 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY
:
191 return DI_PT_TRISTRIP_ADJ
;
192 case VK_PRIMITIVE_TOPOLOGY_PATCH_LIST
:
194 unreachable("invalid primitive topology");
199 static enum adreno_compare_func
200 tu6_compare_func(VkCompareOp op
)
203 case VK_COMPARE_OP_NEVER
:
205 case VK_COMPARE_OP_LESS
:
207 case VK_COMPARE_OP_EQUAL
:
209 case VK_COMPARE_OP_LESS_OR_EQUAL
:
211 case VK_COMPARE_OP_GREATER
:
213 case VK_COMPARE_OP_NOT_EQUAL
:
214 return FUNC_NOTEQUAL
;
215 case VK_COMPARE_OP_GREATER_OR_EQUAL
:
217 case VK_COMPARE_OP_ALWAYS
:
220 unreachable("invalid VkCompareOp");
225 static enum adreno_stencil_op
226 tu6_stencil_op(VkStencilOp op
)
229 case VK_STENCIL_OP_KEEP
:
231 case VK_STENCIL_OP_ZERO
:
233 case VK_STENCIL_OP_REPLACE
:
234 return STENCIL_REPLACE
;
235 case VK_STENCIL_OP_INCREMENT_AND_CLAMP
:
236 return STENCIL_INCR_CLAMP
;
237 case VK_STENCIL_OP_DECREMENT_AND_CLAMP
:
238 return STENCIL_DECR_CLAMP
;
239 case VK_STENCIL_OP_INVERT
:
240 return STENCIL_INVERT
;
241 case VK_STENCIL_OP_INCREMENT_AND_WRAP
:
242 return STENCIL_INCR_WRAP
;
243 case VK_STENCIL_OP_DECREMENT_AND_WRAP
:
244 return STENCIL_DECR_WRAP
;
246 unreachable("invalid VkStencilOp");
251 static enum a3xx_rop_code
252 tu6_rop(VkLogicOp op
)
255 case VK_LOGIC_OP_CLEAR
:
257 case VK_LOGIC_OP_AND
:
259 case VK_LOGIC_OP_AND_REVERSE
:
260 return ROP_AND_REVERSE
;
261 case VK_LOGIC_OP_COPY
:
263 case VK_LOGIC_OP_AND_INVERTED
:
264 return ROP_AND_INVERTED
;
265 case VK_LOGIC_OP_NO_OP
:
267 case VK_LOGIC_OP_XOR
:
271 case VK_LOGIC_OP_NOR
:
273 case VK_LOGIC_OP_EQUIVALENT
:
275 case VK_LOGIC_OP_INVERT
:
277 case VK_LOGIC_OP_OR_REVERSE
:
278 return ROP_OR_REVERSE
;
279 case VK_LOGIC_OP_COPY_INVERTED
:
280 return ROP_COPY_INVERTED
;
281 case VK_LOGIC_OP_OR_INVERTED
:
282 return ROP_OR_INVERTED
;
283 case VK_LOGIC_OP_NAND
:
285 case VK_LOGIC_OP_SET
:
288 unreachable("invalid VkLogicOp");
293 static enum adreno_rb_blend_factor
294 tu6_blend_factor(VkBlendFactor factor
)
297 case VK_BLEND_FACTOR_ZERO
:
299 case VK_BLEND_FACTOR_ONE
:
301 case VK_BLEND_FACTOR_SRC_COLOR
:
302 return FACTOR_SRC_COLOR
;
303 case VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR
:
304 return FACTOR_ONE_MINUS_SRC_COLOR
;
305 case VK_BLEND_FACTOR_DST_COLOR
:
306 return FACTOR_DST_COLOR
;
307 case VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR
:
308 return FACTOR_ONE_MINUS_DST_COLOR
;
309 case VK_BLEND_FACTOR_SRC_ALPHA
:
310 return FACTOR_SRC_ALPHA
;
311 case VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA
:
312 return FACTOR_ONE_MINUS_SRC_ALPHA
;
313 case VK_BLEND_FACTOR_DST_ALPHA
:
314 return FACTOR_DST_ALPHA
;
315 case VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA
:
316 return FACTOR_ONE_MINUS_DST_ALPHA
;
317 case VK_BLEND_FACTOR_CONSTANT_COLOR
:
318 return FACTOR_CONSTANT_COLOR
;
319 case VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR
:
320 return FACTOR_ONE_MINUS_CONSTANT_COLOR
;
321 case VK_BLEND_FACTOR_CONSTANT_ALPHA
:
322 return FACTOR_CONSTANT_ALPHA
;
323 case VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA
:
324 return FACTOR_ONE_MINUS_CONSTANT_ALPHA
;
325 case VK_BLEND_FACTOR_SRC_ALPHA_SATURATE
:
326 return FACTOR_SRC_ALPHA_SATURATE
;
327 case VK_BLEND_FACTOR_SRC1_COLOR
:
328 return FACTOR_SRC1_COLOR
;
329 case VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR
:
330 return FACTOR_ONE_MINUS_SRC1_COLOR
;
331 case VK_BLEND_FACTOR_SRC1_ALPHA
:
332 return FACTOR_SRC1_ALPHA
;
333 case VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA
:
334 return FACTOR_ONE_MINUS_SRC1_ALPHA
;
336 unreachable("invalid VkBlendFactor");
341 static enum a3xx_rb_blend_opcode
342 tu6_blend_op(VkBlendOp op
)
345 case VK_BLEND_OP_ADD
:
346 return BLEND_DST_PLUS_SRC
;
347 case VK_BLEND_OP_SUBTRACT
:
348 return BLEND_SRC_MINUS_DST
;
349 case VK_BLEND_OP_REVERSE_SUBTRACT
:
350 return BLEND_DST_MINUS_SRC
;
351 case VK_BLEND_OP_MIN
:
352 return BLEND_MIN_DST_SRC
;
353 case VK_BLEND_OP_MAX
:
354 return BLEND_MAX_DST_SRC
;
356 unreachable("invalid VkBlendOp");
357 return BLEND_DST_PLUS_SRC
;
362 tu_shader_nibo(const struct tu_shader
*shader
)
364 /* Don't use ir3_shader_nibo(), because that would include declared but
365 * unused storage images and SSBOs.
367 return shader
->ssbo_map
.num_desc
+ shader
->image_map
.num_desc
;
371 tu6_emit_vs_config(struct tu_cs
*cs
, struct tu_shader
*shader
,
372 const struct ir3_shader_variant
*vs
)
374 uint32_t sp_vs_ctrl
=
375 A6XX_SP_VS_CTRL_REG0_THREADSIZE(FOUR_QUADS
) |
376 A6XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(vs
->info
.max_reg
+ 1) |
377 A6XX_SP_VS_CTRL_REG0_MERGEDREGS
|
378 A6XX_SP_VS_CTRL_REG0_BRANCHSTACK(vs
->branchstack
);
380 sp_vs_ctrl
|= A6XX_SP_VS_CTRL_REG0_PIXLODENABLE
;
381 if (vs
->need_fine_derivatives
)
382 sp_vs_ctrl
|= A6XX_SP_VS_CTRL_REG0_DIFF_FINE
;
384 uint32_t sp_vs_config
= A6XX_SP_VS_CONFIG_NTEX(shader
->texture_map
.num_desc
) |
385 A6XX_SP_VS_CONFIG_NSAMP(shader
->sampler_map
.num_desc
);
387 sp_vs_config
|= A6XX_SP_VS_CONFIG_ENABLED
;
389 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_VS_CTRL_REG0
, 1);
390 tu_cs_emit(cs
, sp_vs_ctrl
);
392 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_VS_CONFIG
, 2);
393 tu_cs_emit(cs
, sp_vs_config
);
394 tu_cs_emit(cs
, vs
->instrlen
);
396 tu_cs_emit_pkt4(cs
, REG_A6XX_HLSQ_VS_CNTL
, 1);
397 tu_cs_emit(cs
, A6XX_HLSQ_VS_CNTL_CONSTLEN(align(vs
->constlen
, 4)) |
398 A6XX_HLSQ_VS_CNTL_ENABLED
);
402 tu6_emit_hs_config(struct tu_cs
*cs
, struct tu_shader
*shader
,
403 const struct ir3_shader_variant
*hs
)
405 uint32_t sp_hs_config
= 0;
407 sp_hs_config
|= A6XX_SP_HS_CONFIG_ENABLED
;
409 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_HS_UNKNOWN_A831
, 1);
412 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_HS_CONFIG
, 2);
413 tu_cs_emit(cs
, sp_hs_config
);
414 tu_cs_emit(cs
, hs
->instrlen
);
416 tu_cs_emit_pkt4(cs
, REG_A6XX_HLSQ_HS_CNTL
, 1);
417 tu_cs_emit(cs
, A6XX_HLSQ_HS_CNTL_CONSTLEN(align(hs
->constlen
, 4)));
421 tu6_emit_ds_config(struct tu_cs
*cs
, struct tu_shader
*shader
,
422 const struct ir3_shader_variant
*ds
)
424 uint32_t sp_ds_config
= 0;
426 sp_ds_config
|= A6XX_SP_DS_CONFIG_ENABLED
;
428 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_DS_CONFIG
, 2);
429 tu_cs_emit(cs
, sp_ds_config
);
430 tu_cs_emit(cs
, ds
->instrlen
);
432 tu_cs_emit_pkt4(cs
, REG_A6XX_HLSQ_DS_CNTL
, 1);
433 tu_cs_emit(cs
, A6XX_HLSQ_DS_CNTL_CONSTLEN(align(ds
->constlen
, 4)));
437 tu6_emit_gs_config(struct tu_cs
*cs
, struct tu_shader
*shader
,
438 const struct ir3_shader_variant
*gs
)
440 uint32_t sp_gs_config
= 0;
442 sp_gs_config
|= A6XX_SP_GS_CONFIG_ENABLED
;
444 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_GS_UNKNOWN_A871
, 1);
447 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_GS_CONFIG
, 2);
448 tu_cs_emit(cs
, sp_gs_config
);
449 tu_cs_emit(cs
, gs
->instrlen
);
451 tu_cs_emit_pkt4(cs
, REG_A6XX_HLSQ_GS_CNTL
, 1);
452 tu_cs_emit(cs
, A6XX_HLSQ_GS_CNTL_CONSTLEN(align(gs
->constlen
, 4)));
456 tu6_emit_fs_config(struct tu_cs
*cs
, struct tu_shader
*shader
,
457 const struct ir3_shader_variant
*fs
)
459 uint32_t sp_fs_ctrl
=
460 A6XX_SP_FS_CTRL_REG0_THREADSIZE(FOUR_QUADS
) | 0x1000000 |
461 A6XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT(fs
->info
.max_reg
+ 1) |
462 A6XX_SP_FS_CTRL_REG0_MERGEDREGS
|
463 A6XX_SP_FS_CTRL_REG0_BRANCHSTACK(fs
->branchstack
);
464 if (fs
->total_in
> 0)
465 sp_fs_ctrl
|= A6XX_SP_FS_CTRL_REG0_VARYING
;
467 sp_fs_ctrl
|= A6XX_SP_FS_CTRL_REG0_PIXLODENABLE
;
468 if (fs
->need_fine_derivatives
)
469 sp_fs_ctrl
|= A6XX_SP_FS_CTRL_REG0_DIFF_FINE
;
471 uint32_t sp_fs_config
= A6XX_SP_FS_CONFIG_NTEX(shader
->texture_map
.num_desc
) |
472 A6XX_SP_FS_CONFIG_NSAMP(shader
->sampler_map
.num_desc
) |
473 A6XX_SP_FS_CONFIG_NIBO(tu_shader_nibo(shader
));
475 sp_fs_config
|= A6XX_SP_FS_CONFIG_ENABLED
;
477 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_UNKNOWN_A9A8
, 1);
480 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_UNKNOWN_AB00
, 1);
483 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_FS_CTRL_REG0
, 1);
484 tu_cs_emit(cs
, sp_fs_ctrl
);
486 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_FS_CONFIG
, 2);
487 tu_cs_emit(cs
, sp_fs_config
);
488 tu_cs_emit(cs
, fs
->instrlen
);
490 tu_cs_emit_pkt4(cs
, REG_A6XX_HLSQ_FS_CNTL
, 1);
491 tu_cs_emit(cs
, A6XX_HLSQ_FS_CNTL_CONSTLEN(align(fs
->constlen
, 4)) |
492 A6XX_HLSQ_FS_CNTL_ENABLED
);
494 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_IBO_COUNT
, 1);
495 tu_cs_emit(cs
, tu_shader_nibo(shader
));
499 tu6_emit_cs_config(struct tu_cs
*cs
, const struct tu_shader
*shader
,
500 const struct ir3_shader_variant
*v
)
502 tu_cs_emit_pkt4(cs
, REG_A6XX_HLSQ_UPDATE_CNTL
, 1);
503 tu_cs_emit(cs
, 0xff);
505 unsigned constlen
= align(v
->constlen
, 4);
506 tu_cs_emit_pkt4(cs
, REG_A6XX_HLSQ_CS_CNTL
, 1);
507 tu_cs_emit(cs
, A6XX_HLSQ_CS_CNTL_CONSTLEN(constlen
) |
508 A6XX_HLSQ_CS_CNTL_ENABLED
);
510 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_CS_CONFIG
, 2);
511 tu_cs_emit(cs
, A6XX_SP_CS_CONFIG_ENABLED
|
512 A6XX_SP_CS_CONFIG_NIBO(tu_shader_nibo(shader
)) |
513 A6XX_SP_CS_CONFIG_NTEX(shader
->texture_map
.num_desc
) |
514 A6XX_SP_CS_CONFIG_NSAMP(shader
->sampler_map
.num_desc
));
515 tu_cs_emit(cs
, v
->instrlen
);
517 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_CS_CTRL_REG0
, 1);
518 tu_cs_emit(cs
, A6XX_SP_CS_CTRL_REG0_THREADSIZE(FOUR_QUADS
) |
519 A6XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT(v
->info
.max_reg
+ 1) |
520 A6XX_SP_CS_CTRL_REG0_MERGEDREGS
|
521 A6XX_SP_CS_CTRL_REG0_BRANCHSTACK(v
->branchstack
) |
522 COND(v
->need_pixlod
, A6XX_SP_CS_CTRL_REG0_PIXLODENABLE
) |
523 COND(v
->need_fine_derivatives
, A6XX_SP_CS_CTRL_REG0_DIFF_FINE
));
525 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_CS_UNKNOWN_A9B1
, 1);
526 tu_cs_emit(cs
, 0x41);
528 uint32_t local_invocation_id
=
529 ir3_find_sysval_regid(v
, SYSTEM_VALUE_LOCAL_INVOCATION_ID
);
530 uint32_t work_group_id
=
531 ir3_find_sysval_regid(v
, SYSTEM_VALUE_WORK_GROUP_ID
);
533 tu_cs_emit_pkt4(cs
, REG_A6XX_HLSQ_CS_CNTL_0
, 2);
535 A6XX_HLSQ_CS_CNTL_0_WGIDCONSTID(work_group_id
) |
536 A6XX_HLSQ_CS_CNTL_0_UNK0(regid(63, 0)) |
537 A6XX_HLSQ_CS_CNTL_0_UNK1(regid(63, 0)) |
538 A6XX_HLSQ_CS_CNTL_0_LOCALIDREGID(local_invocation_id
));
539 tu_cs_emit(cs
, 0x2fc); /* HLSQ_CS_UNKNOWN_B998 */
541 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_CS_IBO_COUNT
, 1);
542 tu_cs_emit(cs
, tu_shader_nibo(shader
));
546 tu6_emit_vs_system_values(struct tu_cs
*cs
,
547 const struct ir3_shader_variant
*vs
)
549 const uint32_t vertexid_regid
=
550 ir3_find_sysval_regid(vs
, SYSTEM_VALUE_VERTEX_ID
);
551 const uint32_t instanceid_regid
=
552 ir3_find_sysval_regid(vs
, SYSTEM_VALUE_INSTANCE_ID
);
554 tu_cs_emit_pkt4(cs
, REG_A6XX_VFD_CONTROL_1
, 6);
555 tu_cs_emit(cs
, A6XX_VFD_CONTROL_1_REGID4VTX(vertexid_regid
) |
556 A6XX_VFD_CONTROL_1_REGID4INST(instanceid_regid
) |
558 tu_cs_emit(cs
, 0x0000fcfc); /* VFD_CONTROL_2 */
559 tu_cs_emit(cs
, 0xfcfcfcfc); /* VFD_CONTROL_3 */
560 tu_cs_emit(cs
, 0x000000fc); /* VFD_CONTROL_4 */
561 tu_cs_emit(cs
, 0x0000fcfc); /* VFD_CONTROL_5 */
562 tu_cs_emit(cs
, 0x00000000); /* VFD_CONTROL_6 */
566 tu6_emit_vpc(struct tu_cs
*cs
,
567 const struct ir3_shader_variant
*vs
,
568 const struct ir3_shader_variant
*fs
,
571 struct ir3_shader_linkage linkage
= { 0 };
572 ir3_link_shaders(&linkage
, vs
, fs
);
574 if (vs
->shader
->stream_output
.num_outputs
&& !binning_pass
)
575 tu_finishme("stream output");
577 BITSET_DECLARE(vpc_var_enables
, 128) = { 0 };
578 for (uint32_t i
= 0; i
< linkage
.cnt
; i
++) {
579 const uint32_t comp_count
= util_last_bit(linkage
.var
[i
].compmask
);
580 for (uint32_t j
= 0; j
< comp_count
; j
++)
581 BITSET_SET(vpc_var_enables
, linkage
.var
[i
].loc
+ j
);
584 tu_cs_emit_pkt4(cs
, REG_A6XX_VPC_VAR_DISABLE(0), 4);
585 tu_cs_emit(cs
, ~vpc_var_enables
[0]);
586 tu_cs_emit(cs
, ~vpc_var_enables
[1]);
587 tu_cs_emit(cs
, ~vpc_var_enables
[2]);
588 tu_cs_emit(cs
, ~vpc_var_enables
[3]);
590 /* a6xx finds position/pointsize at the end */
591 const uint32_t position_regid
=
592 ir3_find_output_regid(vs
, VARYING_SLOT_POS
);
593 const uint32_t pointsize_regid
=
594 ir3_find_output_regid(vs
, VARYING_SLOT_PSIZ
);
595 uint32_t pointsize_loc
= 0xff, position_loc
= 0xff;
596 if (position_regid
!= regid(63, 0)) {
597 position_loc
= linkage
.max_loc
;
598 ir3_link_add(&linkage
, position_regid
, 0xf, linkage
.max_loc
);
600 if (pointsize_regid
!= regid(63, 0)) {
601 pointsize_loc
= linkage
.max_loc
;
602 ir3_link_add(&linkage
, pointsize_regid
, 0x1, linkage
.max_loc
);
605 /* map vs outputs to VPC */
606 assert(linkage
.cnt
<= 32);
607 const uint32_t sp_vs_out_count
= (linkage
.cnt
+ 1) / 2;
608 const uint32_t sp_vs_vpc_dst_count
= (linkage
.cnt
+ 3) / 4;
609 uint32_t sp_vs_out
[16];
610 uint32_t sp_vs_vpc_dst
[8];
611 sp_vs_out
[sp_vs_out_count
- 1] = 0;
612 sp_vs_vpc_dst
[sp_vs_vpc_dst_count
- 1] = 0;
613 for (uint32_t i
= 0; i
< linkage
.cnt
; i
++) {
614 ((uint16_t *) sp_vs_out
)[i
] =
615 A6XX_SP_VS_OUT_REG_A_REGID(linkage
.var
[i
].regid
) |
616 A6XX_SP_VS_OUT_REG_A_COMPMASK(linkage
.var
[i
].compmask
);
617 ((uint8_t *) sp_vs_vpc_dst
)[i
] =
618 A6XX_SP_VS_VPC_DST_REG_OUTLOC0(linkage
.var
[i
].loc
);
621 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_VS_OUT_REG(0), sp_vs_out_count
);
622 tu_cs_emit_array(cs
, sp_vs_out
, sp_vs_out_count
);
624 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_VS_VPC_DST_REG(0), sp_vs_vpc_dst_count
);
625 tu_cs_emit_array(cs
, sp_vs_vpc_dst
, sp_vs_vpc_dst_count
);
627 tu_cs_emit_pkt4(cs
, REG_A6XX_VPC_CNTL_0
, 1);
628 tu_cs_emit(cs
, A6XX_VPC_CNTL_0_NUMNONPOSVAR(fs
->total_in
) |
629 (fs
->total_in
> 0 ? A6XX_VPC_CNTL_0_VARYING
: 0) |
632 tu_cs_emit_pkt4(cs
, REG_A6XX_VPC_PACK
, 1);
633 tu_cs_emit(cs
, A6XX_VPC_PACK_POSITIONLOC(position_loc
) |
634 A6XX_VPC_PACK_PSIZELOC(pointsize_loc
) |
635 A6XX_VPC_PACK_STRIDE_IN_VPC(linkage
.max_loc
));
637 tu_cs_emit_pkt4(cs
, REG_A6XX_VPC_GS_SIV_CNTL
, 1);
638 tu_cs_emit(cs
, 0x0000ffff); /* XXX */
640 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_PRIMITIVE_CNTL
, 1);
641 tu_cs_emit(cs
, A6XX_SP_PRIMITIVE_CNTL_VSOUT(linkage
.cnt
));
643 tu_cs_emit_pkt4(cs
, REG_A6XX_PC_PRIMITIVE_CNTL_1
, 1);
644 tu_cs_emit(cs
, A6XX_PC_PRIMITIVE_CNTL_1_STRIDE_IN_VPC(linkage
.max_loc
) |
645 (vs
->writes_psize
? A6XX_PC_PRIMITIVE_CNTL_1_PSIZE
: 0));
649 tu6_vpc_varying_mode(const struct ir3_shader_variant
*fs
,
651 uint8_t *interp_mode
,
652 uint8_t *ps_repl_mode
)
666 PS_REPL_ONE_MINUS_T
= 3,
669 const uint32_t compmask
= fs
->inputs
[index
].compmask
;
671 /* NOTE: varyings are packed, so if compmask is 0xb then first, second, and
672 * fourth component occupy three consecutive varying slots
677 if (fs
->inputs
[index
].slot
== VARYING_SLOT_PNTC
) {
678 if (compmask
& 0x1) {
679 *ps_repl_mode
|= PS_REPL_S
<< shift
;
682 if (compmask
& 0x2) {
683 *ps_repl_mode
|= PS_REPL_T
<< shift
;
686 if (compmask
& 0x4) {
687 *interp_mode
|= INTERP_ZERO
<< shift
;
690 if (compmask
& 0x8) {
691 *interp_mode
|= INTERP_ONE
<< 6;
694 } else if ((fs
->inputs
[index
].interpolate
== INTERP_MODE_FLAT
) ||
695 fs
->inputs
[index
].rasterflat
) {
696 for (int i
= 0; i
< 4; i
++) {
697 if (compmask
& (1 << i
)) {
698 *interp_mode
|= INTERP_FLAT
<< shift
;
708 tu6_emit_vpc_varying_modes(struct tu_cs
*cs
,
709 const struct ir3_shader_variant
*fs
,
712 uint32_t interp_modes
[8] = { 0 };
713 uint32_t ps_repl_modes
[8] = { 0 };
717 (i
= ir3_next_varying(fs
, i
)) < (int) fs
->inputs_count
;) {
719 /* get the mode for input i */
721 uint8_t ps_repl_mode
;
723 tu6_vpc_varying_mode(fs
, i
, &interp_mode
, &ps_repl_mode
);
725 /* OR the mode into the array */
726 const uint32_t inloc
= fs
->inputs
[i
].inloc
* 2;
727 uint32_t n
= inloc
/ 32;
728 uint32_t shift
= inloc
% 32;
729 interp_modes
[n
] |= interp_mode
<< shift
;
730 ps_repl_modes
[n
] |= ps_repl_mode
<< shift
;
731 if (shift
+ bits
> 32) {
735 interp_modes
[n
] |= interp_mode
>> shift
;
736 ps_repl_modes
[n
] |= ps_repl_mode
>> shift
;
741 tu_cs_emit_pkt4(cs
, REG_A6XX_VPC_VARYING_INTERP_MODE(0), 8);
742 tu_cs_emit_array(cs
, interp_modes
, 8);
744 tu_cs_emit_pkt4(cs
, REG_A6XX_VPC_VARYING_PS_REPL_MODE(0), 8);
745 tu_cs_emit_array(cs
, ps_repl_modes
, 8);
749 tu6_emit_fs_inputs(struct tu_cs
*cs
, const struct ir3_shader_variant
*fs
)
751 uint32_t face_regid
, coord_regid
, zwcoord_regid
, samp_id_regid
;
752 uint32_t ij_pix_regid
, ij_samp_regid
, ij_cent_regid
, ij_size_regid
;
753 uint32_t smask_in_regid
;
755 bool sample_shading
= fs
->per_samp
; /* TODO | key->sample_shading; */
756 bool enable_varyings
= fs
->total_in
> 0;
758 samp_id_regid
= ir3_find_sysval_regid(fs
, SYSTEM_VALUE_SAMPLE_ID
);
759 smask_in_regid
= ir3_find_sysval_regid(fs
, SYSTEM_VALUE_SAMPLE_MASK_IN
);
760 face_regid
= ir3_find_sysval_regid(fs
, SYSTEM_VALUE_FRONT_FACE
);
761 coord_regid
= ir3_find_sysval_regid(fs
, SYSTEM_VALUE_FRAG_COORD
);
762 zwcoord_regid
= VALIDREG(coord_regid
) ? coord_regid
+ 2 : regid(63, 0);
763 ij_pix_regid
= ir3_find_sysval_regid(fs
, SYSTEM_VALUE_BARYCENTRIC_PIXEL
);
764 ij_samp_regid
= ir3_find_sysval_regid(fs
, SYSTEM_VALUE_BARYCENTRIC_SAMPLE
);
765 ij_cent_regid
= ir3_find_sysval_regid(fs
, SYSTEM_VALUE_BARYCENTRIC_CENTROID
);
766 ij_size_regid
= ir3_find_sysval_regid(fs
, SYSTEM_VALUE_BARYCENTRIC_SIZE
);
768 if (fs
->num_sampler_prefetch
> 0) {
769 assert(VALIDREG(ij_pix_regid
));
770 /* also, it seems like ij_pix is *required* to be r0.x */
771 assert(ij_pix_regid
== regid(0, 0));
774 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_FS_PREFETCH_CNTL
, 1 + fs
->num_sampler_prefetch
);
775 tu_cs_emit(cs
, A6XX_SP_FS_PREFETCH_CNTL_COUNT(fs
->num_sampler_prefetch
) |
776 A6XX_SP_FS_PREFETCH_CNTL_UNK4(regid(63, 0)) |
778 for (int i
= 0; i
< fs
->num_sampler_prefetch
; i
++) {
779 const struct ir3_sampler_prefetch
*prefetch
= &fs
->sampler_prefetch
[i
];
780 tu_cs_emit(cs
, A6XX_SP_FS_PREFETCH_CMD_SRC(prefetch
->src
) |
781 A6XX_SP_FS_PREFETCH_CMD_SAMP_ID(prefetch
->samp_id
) |
782 A6XX_SP_FS_PREFETCH_CMD_TEX_ID(prefetch
->tex_id
) |
783 A6XX_SP_FS_PREFETCH_CMD_DST(prefetch
->dst
) |
784 A6XX_SP_FS_PREFETCH_CMD_WRMASK(prefetch
->wrmask
) |
785 COND(prefetch
->half_precision
, A6XX_SP_FS_PREFETCH_CMD_HALF
) |
786 A6XX_SP_FS_PREFETCH_CMD_CMD(prefetch
->cmd
));
789 tu_cs_emit_pkt4(cs
, REG_A6XX_HLSQ_CONTROL_1_REG
, 5);
791 tu_cs_emit(cs
, A6XX_HLSQ_CONTROL_2_REG_FACEREGID(face_regid
) |
792 A6XX_HLSQ_CONTROL_2_REG_SAMPLEID(samp_id_regid
) |
793 A6XX_HLSQ_CONTROL_2_REG_SAMPLEMASK(smask_in_regid
) |
794 A6XX_HLSQ_CONTROL_2_REG_SIZE(ij_size_regid
));
795 tu_cs_emit(cs
, A6XX_HLSQ_CONTROL_3_REG_BARY_IJ_PIXEL(ij_pix_regid
) |
796 A6XX_HLSQ_CONTROL_3_REG_BARY_IJ_CENTROID(ij_cent_regid
) |
798 tu_cs_emit(cs
, A6XX_HLSQ_CONTROL_4_REG_XYCOORDREGID(coord_regid
) |
799 A6XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID(zwcoord_regid
) |
800 A6XX_HLSQ_CONTROL_4_REG_BARY_IJ_PIXEL_PERSAMP(ij_samp_regid
) |
802 tu_cs_emit(cs
, 0xfc);
804 tu_cs_emit_pkt4(cs
, REG_A6XX_HLSQ_UNKNOWN_B980
, 1);
805 tu_cs_emit(cs
, enable_varyings
? 3 : 1);
807 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_UNKNOWN_A982
, 1);
808 tu_cs_emit(cs
, 0); /* XXX */
810 tu_cs_emit_pkt4(cs
, REG_A6XX_HLSQ_UPDATE_CNTL
, 1);
811 tu_cs_emit(cs
, 0xff); /* XXX */
813 tu_cs_emit_pkt4(cs
, REG_A6XX_GRAS_CNTL
, 1);
815 CONDREG(ij_pix_regid
, A6XX_GRAS_CNTL_VARYING
) |
816 CONDREG(ij_cent_regid
, A6XX_GRAS_CNTL_CENTROID
) |
817 CONDREG(ij_samp_regid
, A6XX_GRAS_CNTL_PERSAMP_VARYING
) |
818 COND(VALIDREG(ij_size_regid
) && !sample_shading
, A6XX_GRAS_CNTL_SIZE
) |
819 COND(VALIDREG(ij_size_regid
) && sample_shading
, A6XX_GRAS_CNTL_SIZE_PERSAMP
) |
821 A6XX_GRAS_CNTL_SIZE
|
822 A6XX_GRAS_CNTL_XCOORD
|
823 A6XX_GRAS_CNTL_YCOORD
|
824 A6XX_GRAS_CNTL_ZCOORD
|
825 A6XX_GRAS_CNTL_WCOORD
) |
826 COND(fs
->frag_face
, A6XX_GRAS_CNTL_SIZE
));
828 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_RENDER_CONTROL0
, 2);
830 CONDREG(ij_pix_regid
, A6XX_RB_RENDER_CONTROL0_VARYING
) |
831 CONDREG(ij_cent_regid
, A6XX_RB_RENDER_CONTROL0_CENTROID
) |
832 CONDREG(ij_samp_regid
, A6XX_RB_RENDER_CONTROL0_PERSAMP_VARYING
) |
833 COND(enable_varyings
, A6XX_RB_RENDER_CONTROL0_UNK10
) |
834 COND(VALIDREG(ij_size_regid
) && !sample_shading
, A6XX_RB_RENDER_CONTROL0_SIZE
) |
835 COND(VALIDREG(ij_size_regid
) && sample_shading
, A6XX_RB_RENDER_CONTROL0_SIZE_PERSAMP
) |
837 A6XX_RB_RENDER_CONTROL0_SIZE
|
838 A6XX_RB_RENDER_CONTROL0_XCOORD
|
839 A6XX_RB_RENDER_CONTROL0_YCOORD
|
840 A6XX_RB_RENDER_CONTROL0_ZCOORD
|
841 A6XX_RB_RENDER_CONTROL0_WCOORD
) |
842 COND(fs
->frag_face
, A6XX_RB_RENDER_CONTROL0_SIZE
));
844 CONDREG(smask_in_regid
, A6XX_RB_RENDER_CONTROL1_SAMPLEMASK
) |
845 CONDREG(samp_id_regid
, A6XX_RB_RENDER_CONTROL1_SAMPLEID
) |
846 CONDREG(ij_size_regid
, A6XX_RB_RENDER_CONTROL1_SIZE
) |
847 COND(fs
->frag_face
, A6XX_RB_RENDER_CONTROL1_FACENESS
));
851 tu6_emit_fs_outputs(struct tu_cs
*cs
,
852 const struct ir3_shader_variant
*fs
,
855 uint32_t smask_regid
, posz_regid
;
857 posz_regid
= ir3_find_output_regid(fs
, FRAG_RESULT_DEPTH
);
858 smask_regid
= ir3_find_output_regid(fs
, FRAG_RESULT_SAMPLE_MASK
);
860 uint32_t fragdata_regid
[8];
861 if (fs
->color0_mrt
) {
862 fragdata_regid
[0] = ir3_find_output_regid(fs
, FRAG_RESULT_COLOR
);
863 for (uint32_t i
= 1; i
< ARRAY_SIZE(fragdata_regid
); i
++)
864 fragdata_regid
[i
] = fragdata_regid
[0];
866 for (uint32_t i
= 0; i
< ARRAY_SIZE(fragdata_regid
); i
++)
867 fragdata_regid
[i
] = ir3_find_output_regid(fs
, FRAG_RESULT_DATA0
+ i
);
870 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_FS_OUTPUT_CNTL0
, 2);
871 tu_cs_emit(cs
, A6XX_SP_FS_OUTPUT_CNTL0_DEPTH_REGID(posz_regid
) |
872 A6XX_SP_FS_OUTPUT_CNTL0_SAMPMASK_REGID(smask_regid
) |
874 tu_cs_emit(cs
, A6XX_SP_FS_OUTPUT_CNTL1_MRT(mrt_count
));
876 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_FS_OUTPUT_REG(0), 8);
877 for (uint32_t i
= 0; i
< ARRAY_SIZE(fragdata_regid
); i
++) {
878 // TODO we could have a mix of half and full precision outputs,
879 // we really need to figure out half-precision from IR3_REG_HALF
880 tu_cs_emit(cs
, A6XX_SP_FS_OUTPUT_REG_REGID(fragdata_regid
[i
]) |
881 (false ? A6XX_SP_FS_OUTPUT_REG_HALF_PRECISION
: 0));
884 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_FS_OUTPUT_CNTL0
, 2);
885 tu_cs_emit(cs
, COND(fs
->writes_pos
, A6XX_RB_FS_OUTPUT_CNTL0_FRAG_WRITES_Z
) |
886 COND(fs
->writes_smask
, A6XX_RB_FS_OUTPUT_CNTL0_FRAG_WRITES_SAMPMASK
));
887 tu_cs_emit(cs
, A6XX_RB_FS_OUTPUT_CNTL1_MRT(mrt_count
));
889 uint32_t gras_su_depth_plane_cntl
= 0;
890 uint32_t rb_depth_plane_cntl
= 0;
891 if (fs
->no_earlyz
|| fs
->writes_pos
) {
892 gras_su_depth_plane_cntl
|= A6XX_GRAS_SU_DEPTH_PLANE_CNTL_FRAG_WRITES_Z
;
893 rb_depth_plane_cntl
|= A6XX_RB_DEPTH_PLANE_CNTL_FRAG_WRITES_Z
;
896 tu_cs_emit_pkt4(cs
, REG_A6XX_GRAS_SU_DEPTH_PLANE_CNTL
, 1);
897 tu_cs_emit(cs
, gras_su_depth_plane_cntl
);
899 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_DEPTH_PLANE_CNTL
, 1);
900 tu_cs_emit(cs
, rb_depth_plane_cntl
);
904 tu6_emit_shader_object(struct tu_cs
*cs
,
905 gl_shader_stage stage
,
906 const struct ir3_shader_variant
*variant
,
907 const struct tu_bo
*binary_bo
,
908 uint32_t binary_offset
)
912 enum a6xx_state_block sb
;
914 case MESA_SHADER_VERTEX
:
915 reg
= REG_A6XX_SP_VS_OBJ_START_LO
;
916 opcode
= CP_LOAD_STATE6_GEOM
;
919 case MESA_SHADER_TESS_CTRL
:
920 reg
= REG_A6XX_SP_HS_OBJ_START_LO
;
921 opcode
= CP_LOAD_STATE6_GEOM
;
924 case MESA_SHADER_TESS_EVAL
:
925 reg
= REG_A6XX_SP_DS_OBJ_START_LO
;
926 opcode
= CP_LOAD_STATE6_GEOM
;
929 case MESA_SHADER_GEOMETRY
:
930 reg
= REG_A6XX_SP_GS_OBJ_START_LO
;
931 opcode
= CP_LOAD_STATE6_GEOM
;
934 case MESA_SHADER_FRAGMENT
:
935 reg
= REG_A6XX_SP_FS_OBJ_START_LO
;
936 opcode
= CP_LOAD_STATE6_FRAG
;
939 case MESA_SHADER_COMPUTE
:
940 reg
= REG_A6XX_SP_CS_OBJ_START_LO
;
941 opcode
= CP_LOAD_STATE6_FRAG
;
945 unreachable("invalid gl_shader_stage");
946 opcode
= CP_LOAD_STATE6_GEOM
;
951 if (!variant
->instrlen
) {
952 tu_cs_emit_pkt4(cs
, reg
, 2);
953 tu_cs_emit_qw(cs
, 0);
957 assert(variant
->type
== stage
);
959 const uint64_t binary_iova
= binary_bo
->iova
+ binary_offset
;
960 assert((binary_iova
& 0x3) == 0);
962 tu_cs_emit_pkt4(cs
, reg
, 2);
963 tu_cs_emit_qw(cs
, binary_iova
);
965 /* always indirect */
966 const bool indirect
= true;
968 tu_cs_emit_pkt7(cs
, opcode
, 3);
969 tu_cs_emit(cs
, CP_LOAD_STATE6_0_DST_OFF(0) |
970 CP_LOAD_STATE6_0_STATE_TYPE(ST6_SHADER
) |
971 CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT
) |
972 CP_LOAD_STATE6_0_STATE_BLOCK(sb
) |
973 CP_LOAD_STATE6_0_NUM_UNIT(variant
->instrlen
));
974 tu_cs_emit_qw(cs
, binary_iova
);
976 const void *binary
= binary_bo
->map
+ binary_offset
;
978 tu_cs_emit_pkt7(cs
, opcode
, 3 + variant
->info
.sizedwords
);
979 tu_cs_emit(cs
, CP_LOAD_STATE6_0_DST_OFF(0) |
980 CP_LOAD_STATE6_0_STATE_TYPE(ST6_SHADER
) |
981 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT
) |
982 CP_LOAD_STATE6_0_STATE_BLOCK(sb
) |
983 CP_LOAD_STATE6_0_NUM_UNIT(variant
->instrlen
));
984 tu_cs_emit_qw(cs
, 0);
985 tu_cs_emit_array(cs
, binary
, variant
->info
.sizedwords
);
990 tu6_emit_immediates(struct tu_cs
*cs
, const struct ir3_shader_variant
*v
,
991 uint32_t opcode
, enum a6xx_state_block block
)
997 const struct ir3_const_state
*const_state
= &v
->shader
->const_state
;
998 uint32_t base
= const_state
->offsets
.immediate
;
999 int size
= const_state
->immediates_count
;
1001 /* truncate size to avoid writing constants that shader
1004 size
= MIN2(size
+ base
, v
->constlen
) - base
;
1009 tu_cs_emit_pkt7(cs
, opcode
, 3 + size
* 4);
1010 tu_cs_emit(cs
, CP_LOAD_STATE6_0_DST_OFF(base
) |
1011 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS
) |
1012 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT
) |
1013 CP_LOAD_STATE6_0_STATE_BLOCK(block
) |
1014 CP_LOAD_STATE6_0_NUM_UNIT(size
));
1015 tu_cs_emit(cs
, CP_LOAD_STATE6_1_EXT_SRC_ADDR(0));
1016 tu_cs_emit(cs
, CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI(0));
1018 for (unsigned i
= 0; i
< size
; i
++) {
1019 tu_cs_emit(cs
, const_state
->immediates
[i
].val
[0]);
1020 tu_cs_emit(cs
, const_state
->immediates
[i
].val
[1]);
1021 tu_cs_emit(cs
, const_state
->immediates
[i
].val
[2]);
1022 tu_cs_emit(cs
, const_state
->immediates
[i
].val
[3]);
1027 tu6_emit_program(struct tu_cs
*cs
,
1028 const struct tu_pipeline_builder
*builder
,
1029 const struct tu_bo
*binary_bo
,
1032 static const struct ir3_shader_variant dummy_variant
= {
1033 .type
= MESA_SHADER_NONE
1035 assert(builder
->shaders
[MESA_SHADER_VERTEX
]);
1036 const struct ir3_shader_variant
*vs
=
1037 &builder
->shaders
[MESA_SHADER_VERTEX
]->variants
[0];
1038 const struct ir3_shader_variant
*hs
=
1039 builder
->shaders
[MESA_SHADER_TESS_CTRL
]
1040 ? &builder
->shaders
[MESA_SHADER_TESS_CTRL
]->variants
[0]
1042 const struct ir3_shader_variant
*ds
=
1043 builder
->shaders
[MESA_SHADER_TESS_EVAL
]
1044 ? &builder
->shaders
[MESA_SHADER_TESS_EVAL
]->variants
[0]
1046 const struct ir3_shader_variant
*gs
=
1047 builder
->shaders
[MESA_SHADER_GEOMETRY
]
1048 ? &builder
->shaders
[MESA_SHADER_GEOMETRY
]->variants
[0]
1050 const struct ir3_shader_variant
*fs
=
1051 builder
->shaders
[MESA_SHADER_FRAGMENT
]
1052 ? &builder
->shaders
[MESA_SHADER_FRAGMENT
]->variants
[0]
1056 vs
= &builder
->shaders
[MESA_SHADER_VERTEX
]->variants
[1];
1057 fs
= &dummy_variant
;
1060 tu6_emit_vs_config(cs
, builder
->shaders
[MESA_SHADER_VERTEX
], vs
);
1061 tu6_emit_hs_config(cs
, builder
->shaders
[MESA_SHADER_TESS_CTRL
], hs
);
1062 tu6_emit_ds_config(cs
, builder
->shaders
[MESA_SHADER_TESS_EVAL
], ds
);
1063 tu6_emit_gs_config(cs
, builder
->shaders
[MESA_SHADER_GEOMETRY
], gs
);
1064 tu6_emit_fs_config(cs
, builder
->shaders
[MESA_SHADER_FRAGMENT
], fs
);
1066 tu6_emit_vs_system_values(cs
, vs
);
1067 tu6_emit_vpc(cs
, vs
, fs
, binning_pass
);
1068 tu6_emit_vpc_varying_modes(cs
, fs
, binning_pass
);
1069 tu6_emit_fs_inputs(cs
, fs
);
1070 tu6_emit_fs_outputs(cs
, fs
, builder
->color_attachment_count
);
1072 tu6_emit_shader_object(cs
, MESA_SHADER_VERTEX
, vs
, binary_bo
,
1073 binning_pass
? builder
->binning_vs_offset
: builder
->shader_offsets
[MESA_SHADER_VERTEX
]);
1075 tu6_emit_shader_object(cs
, MESA_SHADER_FRAGMENT
, fs
, binary_bo
,
1076 builder
->shader_offsets
[MESA_SHADER_FRAGMENT
]);
1078 tu6_emit_immediates(cs
, vs
, CP_LOAD_STATE6_GEOM
, SB6_VS_SHADER
);
1080 tu6_emit_immediates(cs
, fs
, CP_LOAD_STATE6_FRAG
, SB6_FS_SHADER
);
1084 tu6_emit_vertex_input(struct tu_cs
*cs
,
1085 const struct ir3_shader_variant
*vs
,
1086 const VkPipelineVertexInputStateCreateInfo
*vi_info
,
1087 uint8_t bindings
[MAX_VERTEX_ATTRIBS
],
1088 uint16_t strides
[MAX_VERTEX_ATTRIBS
],
1089 uint16_t offsets
[MAX_VERTEX_ATTRIBS
],
1092 uint32_t vfd_decode_idx
= 0;
1094 for (uint32_t i
= 0; i
< vs
->inputs_count
; i
++) {
1095 if (vs
->inputs
[i
].sysval
|| !vs
->inputs
[i
].compmask
)
1098 const VkVertexInputAttributeDescription
*vi_attr
=
1099 tu_find_vertex_input_attribute(vi_info
, vs
->inputs
[i
].slot
);
1100 const VkVertexInputBindingDescription
*vi_binding
=
1101 tu_find_vertex_input_binding(vi_info
, vi_attr
);
1102 assert(vi_attr
&& vi_binding
);
1104 const struct tu_native_format
*format
=
1105 tu6_get_native_format(vi_attr
->format
);
1106 assert(format
&& format
->vtx
>= 0);
1108 uint32_t vfd_decode
= A6XX_VFD_DECODE_INSTR_IDX(vfd_decode_idx
) |
1109 A6XX_VFD_DECODE_INSTR_FORMAT(format
->vtx
) |
1110 A6XX_VFD_DECODE_INSTR_SWAP(format
->swap
) |
1111 A6XX_VFD_DECODE_INSTR_UNK30
;
1112 if (vi_binding
->inputRate
== VK_VERTEX_INPUT_RATE_INSTANCE
)
1113 vfd_decode
|= A6XX_VFD_DECODE_INSTR_INSTANCED
;
1114 if (!vk_format_is_int(vi_attr
->format
))
1115 vfd_decode
|= A6XX_VFD_DECODE_INSTR_FLOAT
;
1117 const uint32_t vfd_decode_step_rate
= 1;
1119 const uint32_t vfd_dest_cntl
=
1120 A6XX_VFD_DEST_CNTL_INSTR_WRITEMASK(vs
->inputs
[i
].compmask
) |
1121 A6XX_VFD_DEST_CNTL_INSTR_REGID(vs
->inputs
[i
].regid
);
1123 tu_cs_emit_pkt4(cs
, REG_A6XX_VFD_DECODE(vfd_decode_idx
), 2);
1124 tu_cs_emit(cs
, vfd_decode
);
1125 tu_cs_emit(cs
, vfd_decode_step_rate
);
1127 tu_cs_emit_pkt4(cs
, REG_A6XX_VFD_DEST_CNTL(vfd_decode_idx
), 1);
1128 tu_cs_emit(cs
, vfd_dest_cntl
);
1130 bindings
[vfd_decode_idx
] = vi_binding
->binding
;
1131 strides
[vfd_decode_idx
] = vi_binding
->stride
;
1132 offsets
[vfd_decode_idx
] = vi_attr
->offset
;
1135 assert(vfd_decode_idx
<= MAX_VERTEX_ATTRIBS
);
1138 tu_cs_emit_pkt4(cs
, REG_A6XX_VFD_CONTROL_0
, 1);
1140 cs
, A6XX_VFD_CONTROL_0_VTXCNT(vfd_decode_idx
) | (vfd_decode_idx
<< 8));
1142 *count
= vfd_decode_idx
;
1146 tu6_guardband_adj(uint32_t v
)
1149 return (uint32_t)(511.0 - 65.0 * (log2(v
) - 8.0));
1155 tu6_emit_viewport(struct tu_cs
*cs
, const VkViewport
*viewport
)
1159 scales
[0] = viewport
->width
/ 2.0f
;
1160 scales
[1] = viewport
->height
/ 2.0f
;
1161 scales
[2] = viewport
->maxDepth
- viewport
->minDepth
;
1162 offsets
[0] = viewport
->x
+ scales
[0];
1163 offsets
[1] = viewport
->y
+ scales
[1];
1164 offsets
[2] = viewport
->minDepth
;
1168 min
.x
= (int32_t) viewport
->x
;
1169 max
.x
= (int32_t) ceilf(viewport
->x
+ viewport
->width
);
1170 if (viewport
->height
>= 0.0f
) {
1171 min
.y
= (int32_t) viewport
->y
;
1172 max
.y
= (int32_t) ceilf(viewport
->y
+ viewport
->height
);
1174 min
.y
= (int32_t)(viewport
->y
+ viewport
->height
);
1175 max
.y
= (int32_t) ceilf(viewport
->y
);
1177 /* the spec allows viewport->height to be 0.0f */
1180 assert(min
.x
>= 0 && min
.x
< max
.x
);
1181 assert(min
.y
>= 0 && min
.y
< max
.y
);
1183 VkExtent2D guardband_adj
;
1184 guardband_adj
.width
= tu6_guardband_adj(max
.x
- min
.x
);
1185 guardband_adj
.height
= tu6_guardband_adj(max
.y
- min
.y
);
1187 tu_cs_emit_pkt4(cs
, REG_A6XX_GRAS_CL_VPORT_XOFFSET_0
, 6);
1188 tu_cs_emit(cs
, A6XX_GRAS_CL_VPORT_XOFFSET_0(offsets
[0]).value
);
1189 tu_cs_emit(cs
, A6XX_GRAS_CL_VPORT_XSCALE_0(scales
[0]).value
);
1190 tu_cs_emit(cs
, A6XX_GRAS_CL_VPORT_YOFFSET_0(offsets
[1]).value
);
1191 tu_cs_emit(cs
, A6XX_GRAS_CL_VPORT_YSCALE_0(scales
[1]).value
);
1192 tu_cs_emit(cs
, A6XX_GRAS_CL_VPORT_ZOFFSET_0(offsets
[2]).value
);
1193 tu_cs_emit(cs
, A6XX_GRAS_CL_VPORT_ZSCALE_0(scales
[2]).value
);
1195 tu_cs_emit_pkt4(cs
, REG_A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0
, 2);
1196 tu_cs_emit(cs
, A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X(min
.x
) |
1197 A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y(min
.y
));
1198 tu_cs_emit(cs
, A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X(max
.x
- 1) |
1199 A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y(max
.y
- 1));
1201 tu_cs_emit_pkt4(cs
, REG_A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ
, 1);
1203 A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ(guardband_adj
.width
) |
1204 A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT(guardband_adj
.height
));
1208 tu6_emit_scissor(struct tu_cs
*cs
, const VkRect2D
*scissor
)
1210 const VkOffset2D min
= scissor
->offset
;
1211 const VkOffset2D max
= {
1212 scissor
->offset
.x
+ scissor
->extent
.width
,
1213 scissor
->offset
.y
+ scissor
->extent
.height
,
1216 tu_cs_emit_pkt4(cs
, REG_A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0
, 2);
1217 tu_cs_emit(cs
, A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X(min
.x
) |
1218 A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y(min
.y
));
1219 tu_cs_emit(cs
, A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X(max
.x
- 1) |
1220 A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y(max
.y
- 1));
1224 tu6_emit_gras_unknowns(struct tu_cs
*cs
)
1226 tu_cs_emit_pkt4(cs
, REG_A6XX_GRAS_UNKNOWN_8000
, 1);
1227 tu_cs_emit(cs
, 0x80);
1228 tu_cs_emit_pkt4(cs
, REG_A6XX_GRAS_UNKNOWN_8001
, 1);
1229 tu_cs_emit(cs
, 0x0);
1230 tu_cs_emit_pkt4(cs
, REG_A6XX_GRAS_LAYER_CNTL
, 1);
1231 tu_cs_emit(cs
, 0x0);
1235 tu6_emit_point_size(struct tu_cs
*cs
)
1237 tu_cs_emit_pkt4(cs
, REG_A6XX_GRAS_SU_POINT_MINMAX
, 2);
1238 tu_cs_emit(cs
, A6XX_GRAS_SU_POINT_MINMAX_MIN(1.0f
/ 16.0f
) |
1239 A6XX_GRAS_SU_POINT_MINMAX_MAX(4092.0f
));
1240 tu_cs_emit(cs
, A6XX_GRAS_SU_POINT_SIZE(1.0f
).value
);
1244 tu6_gras_su_cntl(const VkPipelineRasterizationStateCreateInfo
*rast_info
,
1245 VkSampleCountFlagBits samples
)
1247 uint32_t gras_su_cntl
= 0;
1249 if (rast_info
->cullMode
& VK_CULL_MODE_FRONT_BIT
)
1250 gras_su_cntl
|= A6XX_GRAS_SU_CNTL_CULL_FRONT
;
1251 if (rast_info
->cullMode
& VK_CULL_MODE_BACK_BIT
)
1252 gras_su_cntl
|= A6XX_GRAS_SU_CNTL_CULL_BACK
;
1254 if (rast_info
->frontFace
== VK_FRONT_FACE_CLOCKWISE
)
1255 gras_su_cntl
|= A6XX_GRAS_SU_CNTL_FRONT_CW
;
1257 /* don't set A6XX_GRAS_SU_CNTL_LINEHALFWIDTH */
1259 if (rast_info
->depthBiasEnable
)
1260 gras_su_cntl
|= A6XX_GRAS_SU_CNTL_POLY_OFFSET
;
1262 if (samples
> VK_SAMPLE_COUNT_1_BIT
)
1263 gras_su_cntl
|= A6XX_GRAS_SU_CNTL_MSAA_ENABLE
;
1265 return gras_su_cntl
;
1269 tu6_emit_gras_su_cntl(struct tu_cs
*cs
,
1270 uint32_t gras_su_cntl
,
1273 assert((gras_su_cntl
& A6XX_GRAS_SU_CNTL_LINEHALFWIDTH__MASK
) == 0);
1274 gras_su_cntl
|= A6XX_GRAS_SU_CNTL_LINEHALFWIDTH(line_width
/ 2.0f
);
1276 tu_cs_emit_pkt4(cs
, REG_A6XX_GRAS_SU_CNTL
, 1);
1277 tu_cs_emit(cs
, gras_su_cntl
);
1281 tu6_emit_depth_bias(struct tu_cs
*cs
,
1282 float constant_factor
,
1286 tu_cs_emit_pkt4(cs
, REG_A6XX_GRAS_SU_POLY_OFFSET_SCALE
, 3);
1287 tu_cs_emit(cs
, A6XX_GRAS_SU_POLY_OFFSET_SCALE(slope_factor
).value
);
1288 tu_cs_emit(cs
, A6XX_GRAS_SU_POLY_OFFSET_OFFSET(constant_factor
).value
);
1289 tu_cs_emit(cs
, A6XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP(clamp
).value
);
1293 tu6_emit_alpha_control_disable(struct tu_cs
*cs
)
1295 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_ALPHA_CONTROL
, 1);
1300 tu6_emit_depth_control(struct tu_cs
*cs
,
1301 const VkPipelineDepthStencilStateCreateInfo
*ds_info
)
1303 assert(!ds_info
->depthBoundsTestEnable
);
1305 uint32_t rb_depth_cntl
= 0;
1306 if (ds_info
->depthTestEnable
) {
1308 A6XX_RB_DEPTH_CNTL_Z_ENABLE
|
1309 A6XX_RB_DEPTH_CNTL_ZFUNC(tu6_compare_func(ds_info
->depthCompareOp
)) |
1310 A6XX_RB_DEPTH_CNTL_Z_TEST_ENABLE
;
1312 if (ds_info
->depthWriteEnable
)
1313 rb_depth_cntl
|= A6XX_RB_DEPTH_CNTL_Z_WRITE_ENABLE
;
1316 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_DEPTH_CNTL
, 1);
1317 tu_cs_emit(cs
, rb_depth_cntl
);
1321 tu6_emit_stencil_control(struct tu_cs
*cs
,
1322 const VkPipelineDepthStencilStateCreateInfo
*ds_info
)
1324 uint32_t rb_stencil_control
= 0;
1325 if (ds_info
->stencilTestEnable
) {
1326 const VkStencilOpState
*front
= &ds_info
->front
;
1327 const VkStencilOpState
*back
= &ds_info
->back
;
1328 rb_stencil_control
|=
1329 A6XX_RB_STENCIL_CONTROL_STENCIL_ENABLE
|
1330 A6XX_RB_STENCIL_CONTROL_STENCIL_ENABLE_BF
|
1331 A6XX_RB_STENCIL_CONTROL_STENCIL_READ
|
1332 A6XX_RB_STENCIL_CONTROL_FUNC(tu6_compare_func(front
->compareOp
)) |
1333 A6XX_RB_STENCIL_CONTROL_FAIL(tu6_stencil_op(front
->failOp
)) |
1334 A6XX_RB_STENCIL_CONTROL_ZPASS(tu6_stencil_op(front
->passOp
)) |
1335 A6XX_RB_STENCIL_CONTROL_ZFAIL(tu6_stencil_op(front
->depthFailOp
)) |
1336 A6XX_RB_STENCIL_CONTROL_FUNC_BF(tu6_compare_func(back
->compareOp
)) |
1337 A6XX_RB_STENCIL_CONTROL_FAIL_BF(tu6_stencil_op(back
->failOp
)) |
1338 A6XX_RB_STENCIL_CONTROL_ZPASS_BF(tu6_stencil_op(back
->passOp
)) |
1339 A6XX_RB_STENCIL_CONTROL_ZFAIL_BF(tu6_stencil_op(back
->depthFailOp
));
1342 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_STENCIL_CONTROL
, 1);
1343 tu_cs_emit(cs
, rb_stencil_control
);
1347 tu6_emit_stencil_compare_mask(struct tu_cs
*cs
, uint32_t front
, uint32_t back
)
1349 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_STENCILMASK
, 1);
1351 cs
, A6XX_RB_STENCILMASK_MASK(front
) | A6XX_RB_STENCILMASK_BFMASK(back
));
1355 tu6_emit_stencil_write_mask(struct tu_cs
*cs
, uint32_t front
, uint32_t back
)
1357 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_STENCILWRMASK
, 1);
1358 tu_cs_emit(cs
, A6XX_RB_STENCILWRMASK_WRMASK(front
) |
1359 A6XX_RB_STENCILWRMASK_BFWRMASK(back
));
1363 tu6_emit_stencil_reference(struct tu_cs
*cs
, uint32_t front
, uint32_t back
)
1365 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_STENCILREF
, 1);
1367 A6XX_RB_STENCILREF_REF(front
) | A6XX_RB_STENCILREF_BFREF(back
));
1371 tu6_rb_mrt_blend_control(const VkPipelineColorBlendAttachmentState
*att
,
1374 const enum a3xx_rb_blend_opcode color_op
= tu6_blend_op(att
->colorBlendOp
);
1375 const enum adreno_rb_blend_factor src_color_factor
= tu6_blend_factor(
1376 has_alpha
? att
->srcColorBlendFactor
1377 : tu_blend_factor_no_dst_alpha(att
->srcColorBlendFactor
));
1378 const enum adreno_rb_blend_factor dst_color_factor
= tu6_blend_factor(
1379 has_alpha
? att
->dstColorBlendFactor
1380 : tu_blend_factor_no_dst_alpha(att
->dstColorBlendFactor
));
1381 const enum a3xx_rb_blend_opcode alpha_op
= tu6_blend_op(att
->alphaBlendOp
);
1382 const enum adreno_rb_blend_factor src_alpha_factor
=
1383 tu6_blend_factor(att
->srcAlphaBlendFactor
);
1384 const enum adreno_rb_blend_factor dst_alpha_factor
=
1385 tu6_blend_factor(att
->dstAlphaBlendFactor
);
1387 return A6XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR(src_color_factor
) |
1388 A6XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(color_op
) |
1389 A6XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR(dst_color_factor
) |
1390 A6XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR(src_alpha_factor
) |
1391 A6XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(alpha_op
) |
1392 A6XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR(dst_alpha_factor
);
1396 tu6_rb_mrt_control(const VkPipelineColorBlendAttachmentState
*att
,
1397 uint32_t rb_mrt_control_rop
,
1401 uint32_t rb_mrt_control
=
1402 A6XX_RB_MRT_CONTROL_COMPONENT_ENABLE(att
->colorWriteMask
);
1404 /* ignore blending and logic op for integer attachments */
1406 rb_mrt_control
|= A6XX_RB_MRT_CONTROL_ROP_CODE(ROP_COPY
);
1407 return rb_mrt_control
;
1410 rb_mrt_control
|= rb_mrt_control_rop
;
1412 if (att
->blendEnable
) {
1413 rb_mrt_control
|= A6XX_RB_MRT_CONTROL_BLEND
;
1416 rb_mrt_control
|= A6XX_RB_MRT_CONTROL_BLEND2
;
1419 return rb_mrt_control
;
1423 tu6_emit_rb_mrt_controls(struct tu_cs
*cs
,
1424 const VkPipelineColorBlendStateCreateInfo
*blend_info
,
1425 const VkFormat attachment_formats
[MAX_RTS
],
1426 uint32_t *blend_enable_mask
)
1428 *blend_enable_mask
= 0;
1430 bool rop_reads_dst
= false;
1431 uint32_t rb_mrt_control_rop
= 0;
1432 if (blend_info
->logicOpEnable
) {
1433 rop_reads_dst
= tu_logic_op_reads_dst(blend_info
->logicOp
);
1434 rb_mrt_control_rop
=
1435 A6XX_RB_MRT_CONTROL_ROP_ENABLE
|
1436 A6XX_RB_MRT_CONTROL_ROP_CODE(tu6_rop(blend_info
->logicOp
));
1439 for (uint32_t i
= 0; i
< blend_info
->attachmentCount
; i
++) {
1440 const VkPipelineColorBlendAttachmentState
*att
=
1441 &blend_info
->pAttachments
[i
];
1442 const VkFormat format
= attachment_formats
[i
];
1444 uint32_t rb_mrt_control
= 0;
1445 uint32_t rb_mrt_blend_control
= 0;
1446 if (format
!= VK_FORMAT_UNDEFINED
) {
1447 const bool is_int
= vk_format_is_int(format
);
1448 const bool has_alpha
= vk_format_has_alpha(format
);
1451 tu6_rb_mrt_control(att
, rb_mrt_control_rop
, is_int
, has_alpha
);
1452 rb_mrt_blend_control
= tu6_rb_mrt_blend_control(att
, has_alpha
);
1454 if (att
->blendEnable
|| rop_reads_dst
)
1455 *blend_enable_mask
|= 1 << i
;
1458 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_MRT_CONTROL(i
), 2);
1459 tu_cs_emit(cs
, rb_mrt_control
);
1460 tu_cs_emit(cs
, rb_mrt_blend_control
);
1463 for (uint32_t i
= blend_info
->attachmentCount
; i
< MAX_RTS
; i
++) {
1464 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_MRT_CONTROL(i
), 2);
1471 tu6_emit_blend_control(struct tu_cs
*cs
,
1472 uint32_t blend_enable_mask
,
1473 const VkPipelineMultisampleStateCreateInfo
*msaa_info
)
1475 assert(!msaa_info
->sampleShadingEnable
);
1476 assert(!msaa_info
->alphaToOneEnable
);
1478 uint32_t sp_blend_cntl
= A6XX_SP_BLEND_CNTL_UNK8
;
1479 if (blend_enable_mask
)
1480 sp_blend_cntl
|= A6XX_SP_BLEND_CNTL_ENABLED
;
1481 if (msaa_info
->alphaToCoverageEnable
)
1482 sp_blend_cntl
|= A6XX_SP_BLEND_CNTL_ALPHA_TO_COVERAGE
;
1484 const uint32_t sample_mask
=
1485 msaa_info
->pSampleMask
? *msaa_info
->pSampleMask
1486 : ((1 << msaa_info
->rasterizationSamples
) - 1);
1488 /* set A6XX_RB_BLEND_CNTL_INDEPENDENT_BLEND only when enabled? */
1489 uint32_t rb_blend_cntl
=
1490 A6XX_RB_BLEND_CNTL_ENABLE_BLEND(blend_enable_mask
) |
1491 A6XX_RB_BLEND_CNTL_INDEPENDENT_BLEND
|
1492 A6XX_RB_BLEND_CNTL_SAMPLE_MASK(sample_mask
);
1493 if (msaa_info
->alphaToCoverageEnable
)
1494 rb_blend_cntl
|= A6XX_RB_BLEND_CNTL_ALPHA_TO_COVERAGE
;
1496 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_BLEND_CNTL
, 1);
1497 tu_cs_emit(cs
, sp_blend_cntl
);
1499 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_BLEND_CNTL
, 1);
1500 tu_cs_emit(cs
, rb_blend_cntl
);
1504 tu6_emit_blend_constants(struct tu_cs
*cs
, const float constants
[4])
1506 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_BLEND_RED_F32
, 4);
1507 tu_cs_emit_array(cs
, (const uint32_t *) constants
, 4);
1511 tu_pipeline_create(struct tu_device
*dev
,
1512 const VkAllocationCallbacks
*pAllocator
,
1513 struct tu_pipeline
**out_pipeline
)
1515 struct tu_pipeline
*pipeline
=
1516 vk_zalloc2(&dev
->alloc
, pAllocator
, sizeof(*pipeline
), 8,
1517 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1519 return VK_ERROR_OUT_OF_HOST_MEMORY
;
1521 tu_cs_init(&pipeline
->cs
, TU_CS_MODE_SUB_STREAM
, 2048);
1523 /* reserve the space now such that tu_cs_begin_sub_stream never fails */
1524 VkResult result
= tu_cs_reserve_space(dev
, &pipeline
->cs
, 2048);
1525 if (result
!= VK_SUCCESS
) {
1526 vk_free2(&dev
->alloc
, pAllocator
, pipeline
);
1530 *out_pipeline
= pipeline
;
1536 tu_pipeline_builder_compile_shaders(struct tu_pipeline_builder
*builder
)
1538 const VkPipelineShaderStageCreateInfo
*stage_infos
[MESA_SHADER_STAGES
] = {
1541 for (uint32_t i
= 0; i
< builder
->create_info
->stageCount
; i
++) {
1542 gl_shader_stage stage
=
1543 tu_shader_stage(builder
->create_info
->pStages
[i
].stage
);
1544 stage_infos
[stage
] = &builder
->create_info
->pStages
[i
];
1547 struct tu_shader_compile_options options
;
1548 tu_shader_compile_options_init(&options
, builder
->create_info
);
1550 /* compile shaders in reverse order */
1551 struct tu_shader
*next_stage_shader
= NULL
;
1552 for (gl_shader_stage stage
= MESA_SHADER_STAGES
- 1;
1553 stage
> MESA_SHADER_NONE
; stage
--) {
1554 const VkPipelineShaderStageCreateInfo
*stage_info
= stage_infos
[stage
];
1558 struct tu_shader
*shader
=
1559 tu_shader_create(builder
->device
, stage
, stage_info
, builder
->layout
,
1562 return VK_ERROR_OUT_OF_HOST_MEMORY
;
1565 tu_shader_compile(builder
->device
, shader
, next_stage_shader
,
1566 &options
, builder
->alloc
);
1567 if (result
!= VK_SUCCESS
)
1570 builder
->shaders
[stage
] = shader
;
1571 builder
->shader_offsets
[stage
] = builder
->shader_total_size
;
1572 builder
->shader_total_size
+=
1573 sizeof(uint32_t) * shader
->variants
[0].info
.sizedwords
;
1575 next_stage_shader
= shader
;
1578 if (builder
->shaders
[MESA_SHADER_VERTEX
]->has_binning_pass
) {
1579 const struct tu_shader
*vs
= builder
->shaders
[MESA_SHADER_VERTEX
];
1580 builder
->binning_vs_offset
= builder
->shader_total_size
;
1581 builder
->shader_total_size
+=
1582 sizeof(uint32_t) * vs
->variants
[1].info
.sizedwords
;
1589 tu_pipeline_builder_upload_shaders(struct tu_pipeline_builder
*builder
,
1590 struct tu_pipeline
*pipeline
)
1592 struct tu_bo
*bo
= &pipeline
->program
.binary_bo
;
1595 tu_bo_init_new(builder
->device
, bo
, builder
->shader_total_size
);
1596 if (result
!= VK_SUCCESS
)
1599 result
= tu_bo_map(builder
->device
, bo
);
1600 if (result
!= VK_SUCCESS
)
1603 for (uint32_t i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
1604 const struct tu_shader
*shader
= builder
->shaders
[i
];
1608 memcpy(bo
->map
+ builder
->shader_offsets
[i
], shader
->binary
,
1609 sizeof(uint32_t) * shader
->variants
[0].info
.sizedwords
);
1612 if (builder
->shaders
[MESA_SHADER_VERTEX
]->has_binning_pass
) {
1613 const struct tu_shader
*vs
= builder
->shaders
[MESA_SHADER_VERTEX
];
1614 memcpy(bo
->map
+ builder
->binning_vs_offset
, vs
->binning_binary
,
1615 sizeof(uint32_t) * vs
->variants
[1].info
.sizedwords
);
1622 tu_pipeline_builder_parse_dynamic(struct tu_pipeline_builder
*builder
,
1623 struct tu_pipeline
*pipeline
)
1625 const VkPipelineDynamicStateCreateInfo
*dynamic_info
=
1626 builder
->create_info
->pDynamicState
;
1631 for (uint32_t i
= 0; i
< dynamic_info
->dynamicStateCount
; i
++) {
1632 pipeline
->dynamic_state
.mask
|=
1633 tu_dynamic_state_bit(dynamic_info
->pDynamicStates
[i
]);
1638 tu_pipeline_set_linkage(struct tu_program_descriptor_linkage
*link
,
1639 struct tu_shader
*shader
,
1640 struct ir3_shader_variant
*v
)
1642 link
->ubo_state
= v
->shader
->ubo_state
;
1643 link
->const_state
= v
->shader
->const_state
;
1644 link
->constlen
= v
->constlen
;
1645 link
->texture_map
= shader
->texture_map
;
1646 link
->sampler_map
= shader
->sampler_map
;
1647 link
->ubo_map
= shader
->ubo_map
;
1648 link
->ssbo_map
= shader
->ssbo_map
;
1649 link
->image_map
= shader
->image_map
;
1653 tu_pipeline_builder_parse_shader_stages(struct tu_pipeline_builder
*builder
,
1654 struct tu_pipeline
*pipeline
)
1656 struct tu_cs prog_cs
;
1657 tu_cs_begin_sub_stream(builder
->device
, &pipeline
->cs
, 512, &prog_cs
);
1658 tu6_emit_program(&prog_cs
, builder
, &pipeline
->program
.binary_bo
, false);
1659 pipeline
->program
.state_ib
= tu_cs_end_sub_stream(&pipeline
->cs
, &prog_cs
);
1661 tu_cs_begin_sub_stream(builder
->device
, &pipeline
->cs
, 512, &prog_cs
);
1662 tu6_emit_program(&prog_cs
, builder
, &pipeline
->program
.binary_bo
, true);
1663 pipeline
->program
.binning_state_ib
=
1664 tu_cs_end_sub_stream(&pipeline
->cs
, &prog_cs
);
1666 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
1667 if (!builder
->shaders
[i
])
1670 tu_pipeline_set_linkage(&pipeline
->program
.link
[i
],
1671 builder
->shaders
[i
],
1672 &builder
->shaders
[i
]->variants
[0]);
1677 tu_pipeline_builder_parse_vertex_input(struct tu_pipeline_builder
*builder
,
1678 struct tu_pipeline
*pipeline
)
1680 const VkPipelineVertexInputStateCreateInfo
*vi_info
=
1681 builder
->create_info
->pVertexInputState
;
1682 const struct tu_shader
*vs
= builder
->shaders
[MESA_SHADER_VERTEX
];
1685 tu_cs_begin_sub_stream(builder
->device
, &pipeline
->cs
,
1686 MAX_VERTEX_ATTRIBS
* 5 + 2, &vi_cs
);
1687 tu6_emit_vertex_input(&vi_cs
, &vs
->variants
[0], vi_info
,
1688 pipeline
->vi
.bindings
, pipeline
->vi
.strides
,
1689 pipeline
->vi
.offsets
, &pipeline
->vi
.count
);
1690 pipeline
->vi
.state_ib
= tu_cs_end_sub_stream(&pipeline
->cs
, &vi_cs
);
1692 if (vs
->has_binning_pass
) {
1693 tu_cs_begin_sub_stream(builder
->device
, &pipeline
->cs
,
1694 MAX_VERTEX_ATTRIBS
* 5 + 2, &vi_cs
);
1695 tu6_emit_vertex_input(
1696 &vi_cs
, &vs
->variants
[1], vi_info
, pipeline
->vi
.binning_bindings
,
1697 pipeline
->vi
.binning_strides
, pipeline
->vi
.binning_offsets
,
1698 &pipeline
->vi
.binning_count
);
1699 pipeline
->vi
.binning_state_ib
=
1700 tu_cs_end_sub_stream(&pipeline
->cs
, &vi_cs
);
1705 tu_pipeline_builder_parse_input_assembly(struct tu_pipeline_builder
*builder
,
1706 struct tu_pipeline
*pipeline
)
1708 const VkPipelineInputAssemblyStateCreateInfo
*ia_info
=
1709 builder
->create_info
->pInputAssemblyState
;
1711 pipeline
->ia
.primtype
= tu6_primtype(ia_info
->topology
);
1712 pipeline
->ia
.primitive_restart
= ia_info
->primitiveRestartEnable
;
1716 tu_pipeline_builder_parse_viewport(struct tu_pipeline_builder
*builder
,
1717 struct tu_pipeline
*pipeline
)
1721 * pViewportState is a pointer to an instance of the
1722 * VkPipelineViewportStateCreateInfo structure, and is ignored if the
1723 * pipeline has rasterization disabled."
1725 * We leave the relevant registers stale in that case.
1727 if (builder
->rasterizer_discard
)
1730 const VkPipelineViewportStateCreateInfo
*vp_info
=
1731 builder
->create_info
->pViewportState
;
1734 tu_cs_begin_sub_stream(builder
->device
, &pipeline
->cs
, 15, &vp_cs
);
1736 if (!(pipeline
->dynamic_state
.mask
& TU_DYNAMIC_VIEWPORT
)) {
1737 assert(vp_info
->viewportCount
== 1);
1738 tu6_emit_viewport(&vp_cs
, vp_info
->pViewports
);
1741 if (!(pipeline
->dynamic_state
.mask
& TU_DYNAMIC_SCISSOR
)) {
1742 assert(vp_info
->scissorCount
== 1);
1743 tu6_emit_scissor(&vp_cs
, vp_info
->pScissors
);
1746 pipeline
->vp
.state_ib
= tu_cs_end_sub_stream(&pipeline
->cs
, &vp_cs
);
1750 tu_pipeline_builder_parse_rasterization(struct tu_pipeline_builder
*builder
,
1751 struct tu_pipeline
*pipeline
)
1753 const VkPipelineRasterizationStateCreateInfo
*rast_info
=
1754 builder
->create_info
->pRasterizationState
;
1756 assert(!rast_info
->depthClampEnable
);
1757 assert(rast_info
->polygonMode
== VK_POLYGON_MODE_FILL
);
1759 struct tu_cs rast_cs
;
1760 tu_cs_begin_sub_stream(builder
->device
, &pipeline
->cs
, 20, &rast_cs
);
1762 /* move to hw ctx init? */
1763 tu6_emit_gras_unknowns(&rast_cs
);
1764 tu6_emit_point_size(&rast_cs
);
1766 const uint32_t gras_su_cntl
=
1767 tu6_gras_su_cntl(rast_info
, builder
->samples
);
1769 if (!(pipeline
->dynamic_state
.mask
& TU_DYNAMIC_LINE_WIDTH
))
1770 tu6_emit_gras_su_cntl(&rast_cs
, gras_su_cntl
, rast_info
->lineWidth
);
1772 if (!(pipeline
->dynamic_state
.mask
& TU_DYNAMIC_DEPTH_BIAS
)) {
1773 tu6_emit_depth_bias(&rast_cs
, rast_info
->depthBiasConstantFactor
,
1774 rast_info
->depthBiasClamp
,
1775 rast_info
->depthBiasSlopeFactor
);
1778 pipeline
->rast
.state_ib
= tu_cs_end_sub_stream(&pipeline
->cs
, &rast_cs
);
1780 pipeline
->rast
.gras_su_cntl
= gras_su_cntl
;
1784 tu_pipeline_builder_parse_depth_stencil(struct tu_pipeline_builder
*builder
,
1785 struct tu_pipeline
*pipeline
)
1789 * pDepthStencilState is a pointer to an instance of the
1790 * VkPipelineDepthStencilStateCreateInfo structure, and is ignored if
1791 * the pipeline has rasterization disabled or if the subpass of the
1792 * render pass the pipeline is created against does not use a
1793 * depth/stencil attachment.
1795 * We disable both depth and stenil tests in those cases.
1797 static const VkPipelineDepthStencilStateCreateInfo dummy_ds_info
;
1798 const VkPipelineDepthStencilStateCreateInfo
*ds_info
=
1799 builder
->use_depth_stencil_attachment
1800 ? builder
->create_info
->pDepthStencilState
1804 tu_cs_begin_sub_stream(builder
->device
, &pipeline
->cs
, 12, &ds_cs
);
1806 /* move to hw ctx init? */
1807 tu6_emit_alpha_control_disable(&ds_cs
);
1809 tu6_emit_depth_control(&ds_cs
, ds_info
);
1810 tu6_emit_stencil_control(&ds_cs
, ds_info
);
1812 if (!(pipeline
->dynamic_state
.mask
& TU_DYNAMIC_STENCIL_COMPARE_MASK
)) {
1813 tu6_emit_stencil_compare_mask(&ds_cs
, ds_info
->front
.compareMask
,
1814 ds_info
->back
.compareMask
);
1816 if (!(pipeline
->dynamic_state
.mask
& TU_DYNAMIC_STENCIL_WRITE_MASK
)) {
1817 tu6_emit_stencil_write_mask(&ds_cs
, ds_info
->front
.writeMask
,
1818 ds_info
->back
.writeMask
);
1820 if (!(pipeline
->dynamic_state
.mask
& TU_DYNAMIC_STENCIL_REFERENCE
)) {
1821 tu6_emit_stencil_reference(&ds_cs
, ds_info
->front
.reference
,
1822 ds_info
->back
.reference
);
1825 pipeline
->ds
.state_ib
= tu_cs_end_sub_stream(&pipeline
->cs
, &ds_cs
);
1829 tu_pipeline_builder_parse_multisample_and_color_blend(
1830 struct tu_pipeline_builder
*builder
, struct tu_pipeline
*pipeline
)
1834 * pMultisampleState is a pointer to an instance of the
1835 * VkPipelineMultisampleStateCreateInfo, and is ignored if the pipeline
1836 * has rasterization disabled.
1840 * pColorBlendState is a pointer to an instance of the
1841 * VkPipelineColorBlendStateCreateInfo structure, and is ignored if the
1842 * pipeline has rasterization disabled or if the subpass of the render
1843 * pass the pipeline is created against does not use any color
1846 * We leave the relevant registers stale when rasterization is disabled.
1848 if (builder
->rasterizer_discard
)
1851 static const VkPipelineColorBlendStateCreateInfo dummy_blend_info
;
1852 const VkPipelineMultisampleStateCreateInfo
*msaa_info
=
1853 builder
->create_info
->pMultisampleState
;
1854 const VkPipelineColorBlendStateCreateInfo
*blend_info
=
1855 builder
->use_color_attachments
? builder
->create_info
->pColorBlendState
1856 : &dummy_blend_info
;
1858 struct tu_cs blend_cs
;
1859 tu_cs_begin_sub_stream(builder
->device
, &pipeline
->cs
, MAX_RTS
* 3 + 9,
1862 uint32_t blend_enable_mask
;
1863 tu6_emit_rb_mrt_controls(&blend_cs
, blend_info
,
1864 builder
->color_attachment_formats
,
1865 &blend_enable_mask
);
1867 if (!(pipeline
->dynamic_state
.mask
& TU_DYNAMIC_BLEND_CONSTANTS
))
1868 tu6_emit_blend_constants(&blend_cs
, blend_info
->blendConstants
);
1870 tu6_emit_blend_control(&blend_cs
, blend_enable_mask
, msaa_info
);
1872 pipeline
->blend
.state_ib
= tu_cs_end_sub_stream(&pipeline
->cs
, &blend_cs
);
1876 tu_pipeline_finish(struct tu_pipeline
*pipeline
,
1877 struct tu_device
*dev
,
1878 const VkAllocationCallbacks
*alloc
)
1880 tu_cs_finish(dev
, &pipeline
->cs
);
1882 if (pipeline
->program
.binary_bo
.gem_handle
)
1883 tu_bo_finish(dev
, &pipeline
->program
.binary_bo
);
1887 tu_pipeline_builder_build(struct tu_pipeline_builder
*builder
,
1888 struct tu_pipeline
**pipeline
)
1890 VkResult result
= tu_pipeline_create(builder
->device
, builder
->alloc
,
1892 if (result
!= VK_SUCCESS
)
1895 /* compile and upload shaders */
1896 result
= tu_pipeline_builder_compile_shaders(builder
);
1897 if (result
== VK_SUCCESS
)
1898 result
= tu_pipeline_builder_upload_shaders(builder
, *pipeline
);
1899 if (result
!= VK_SUCCESS
) {
1900 tu_pipeline_finish(*pipeline
, builder
->device
, builder
->alloc
);
1901 vk_free2(&builder
->device
->alloc
, builder
->alloc
, *pipeline
);
1902 *pipeline
= VK_NULL_HANDLE
;
1907 tu_pipeline_builder_parse_dynamic(builder
, *pipeline
);
1908 tu_pipeline_builder_parse_shader_stages(builder
, *pipeline
);
1909 tu_pipeline_builder_parse_vertex_input(builder
, *pipeline
);
1910 tu_pipeline_builder_parse_input_assembly(builder
, *pipeline
);
1911 tu_pipeline_builder_parse_viewport(builder
, *pipeline
);
1912 tu_pipeline_builder_parse_rasterization(builder
, *pipeline
);
1913 tu_pipeline_builder_parse_depth_stencil(builder
, *pipeline
);
1914 tu_pipeline_builder_parse_multisample_and_color_blend(builder
, *pipeline
);
1916 /* we should have reserved enough space upfront such that the CS never
1919 assert((*pipeline
)->cs
.bo_count
== 1);
1925 tu_pipeline_builder_finish(struct tu_pipeline_builder
*builder
)
1927 for (uint32_t i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
1928 if (!builder
->shaders
[i
])
1930 tu_shader_destroy(builder
->device
, builder
->shaders
[i
], builder
->alloc
);
1935 tu_pipeline_builder_init_graphics(
1936 struct tu_pipeline_builder
*builder
,
1937 struct tu_device
*dev
,
1938 struct tu_pipeline_cache
*cache
,
1939 const VkGraphicsPipelineCreateInfo
*create_info
,
1940 const VkAllocationCallbacks
*alloc
)
1942 TU_FROM_HANDLE(tu_pipeline_layout
, layout
, create_info
->layout
);
1944 *builder
= (struct tu_pipeline_builder
) {
1947 .create_info
= create_info
,
1952 builder
->rasterizer_discard
=
1953 create_info
->pRasterizationState
->rasterizerDiscardEnable
;
1955 if (builder
->rasterizer_discard
) {
1956 builder
->samples
= VK_SAMPLE_COUNT_1_BIT
;
1958 builder
->samples
= create_info
->pMultisampleState
->rasterizationSamples
;
1960 const struct tu_render_pass
*pass
=
1961 tu_render_pass_from_handle(create_info
->renderPass
);
1962 const struct tu_subpass
*subpass
=
1963 &pass
->subpasses
[create_info
->subpass
];
1965 builder
->use_depth_stencil_attachment
=
1966 subpass
->depth_stencil_attachment
.attachment
!= VK_ATTACHMENT_UNUSED
;
1968 assert(subpass
->color_count
== 0 ||
1969 !create_info
->pColorBlendState
||
1970 subpass
->color_count
== create_info
->pColorBlendState
->attachmentCount
);
1971 builder
->color_attachment_count
= subpass
->color_count
;
1972 for (uint32_t i
= 0; i
< subpass
->color_count
; i
++) {
1973 const uint32_t a
= subpass
->color_attachments
[i
].attachment
;
1974 if (a
== VK_ATTACHMENT_UNUSED
)
1977 builder
->color_attachment_formats
[i
] = pass
->attachments
[a
].format
;
1978 builder
->use_color_attachments
= true;
1984 tu_graphics_pipeline_create(VkDevice device
,
1985 VkPipelineCache pipelineCache
,
1986 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
1987 const VkAllocationCallbacks
*pAllocator
,
1988 VkPipeline
*pPipeline
)
1990 TU_FROM_HANDLE(tu_device
, dev
, device
);
1991 TU_FROM_HANDLE(tu_pipeline_cache
, cache
, pipelineCache
);
1993 struct tu_pipeline_builder builder
;
1994 tu_pipeline_builder_init_graphics(&builder
, dev
, cache
,
1995 pCreateInfo
, pAllocator
);
1997 struct tu_pipeline
*pipeline
= NULL
;
1998 VkResult result
= tu_pipeline_builder_build(&builder
, &pipeline
);
1999 tu_pipeline_builder_finish(&builder
);
2001 if (result
== VK_SUCCESS
)
2002 *pPipeline
= tu_pipeline_to_handle(pipeline
);
2004 *pPipeline
= VK_NULL_HANDLE
;
2010 tu_CreateGraphicsPipelines(VkDevice device
,
2011 VkPipelineCache pipelineCache
,
2013 const VkGraphicsPipelineCreateInfo
*pCreateInfos
,
2014 const VkAllocationCallbacks
*pAllocator
,
2015 VkPipeline
*pPipelines
)
2017 VkResult final_result
= VK_SUCCESS
;
2019 for (uint32_t i
= 0; i
< count
; i
++) {
2020 VkResult result
= tu_graphics_pipeline_create(device
, pipelineCache
,
2021 &pCreateInfos
[i
], pAllocator
,
2024 if (result
!= VK_SUCCESS
)
2025 final_result
= result
;
2028 return final_result
;
2032 tu6_emit_compute_program(struct tu_cs
*cs
,
2033 struct tu_shader
*shader
,
2034 const struct tu_bo
*binary_bo
)
2036 const struct ir3_shader_variant
*v
= &shader
->variants
[0];
2038 tu6_emit_cs_config(cs
, shader
, v
);
2040 /* The compute program is the only one in the pipeline, so 0 offset. */
2041 tu6_emit_shader_object(cs
, MESA_SHADER_COMPUTE
, v
, binary_bo
, 0);
2043 tu6_emit_immediates(cs
, v
, CP_LOAD_STATE6_FRAG
, SB6_CS_SHADER
);
2047 tu_compute_upload_shader(VkDevice device
,
2048 struct tu_pipeline
*pipeline
,
2049 struct tu_shader
*shader
)
2051 TU_FROM_HANDLE(tu_device
, dev
, device
);
2052 struct tu_bo
*bo
= &pipeline
->program
.binary_bo
;
2053 struct ir3_shader_variant
*v
= &shader
->variants
[0];
2055 uint32_t shader_size
= sizeof(uint32_t) * v
->info
.sizedwords
;
2057 tu_bo_init_new(dev
, bo
, shader_size
);
2058 if (result
!= VK_SUCCESS
)
2061 result
= tu_bo_map(dev
, bo
);
2062 if (result
!= VK_SUCCESS
)
2065 memcpy(bo
->map
, shader
->binary
, shader_size
);
2072 tu_compute_pipeline_create(VkDevice device
,
2073 VkPipelineCache _cache
,
2074 const VkComputePipelineCreateInfo
*pCreateInfo
,
2075 const VkAllocationCallbacks
*pAllocator
,
2076 VkPipeline
*pPipeline
)
2078 TU_FROM_HANDLE(tu_device
, dev
, device
);
2079 TU_FROM_HANDLE(tu_pipeline_layout
, layout
, pCreateInfo
->layout
);
2080 const VkPipelineShaderStageCreateInfo
*stage_info
= &pCreateInfo
->stage
;
2083 struct tu_pipeline
*pipeline
;
2085 result
= tu_pipeline_create(dev
, pAllocator
, &pipeline
);
2086 if (result
!= VK_SUCCESS
)
2089 pipeline
->layout
= layout
;
2091 struct tu_shader_compile_options options
;
2092 tu_shader_compile_options_init(&options
, NULL
);
2094 struct tu_shader
*shader
=
2095 tu_shader_create(dev
, MESA_SHADER_COMPUTE
, stage_info
, layout
, pAllocator
);
2097 result
= VK_ERROR_OUT_OF_HOST_MEMORY
;
2101 result
= tu_shader_compile(dev
, shader
, NULL
, &options
, pAllocator
);
2102 if (result
!= VK_SUCCESS
)
2105 struct ir3_shader_variant
*v
= &shader
->variants
[0];
2107 tu_pipeline_set_linkage(&pipeline
->program
.link
[MESA_SHADER_COMPUTE
],
2110 result
= tu_compute_upload_shader(device
, pipeline
, shader
);
2111 if (result
!= VK_SUCCESS
)
2114 for (int i
= 0; i
< 3; i
++)
2115 pipeline
->compute
.local_size
[i
] = v
->shader
->nir
->info
.cs
.local_size
[i
];
2117 struct tu_cs prog_cs
;
2118 tu_cs_begin_sub_stream(dev
, &pipeline
->cs
, 512, &prog_cs
);
2119 tu6_emit_compute_program(&prog_cs
, shader
, &pipeline
->program
.binary_bo
);
2120 pipeline
->program
.state_ib
= tu_cs_end_sub_stream(&pipeline
->cs
, &prog_cs
);
2122 *pPipeline
= tu_pipeline_to_handle(pipeline
);
2126 tu_shader_destroy(dev
, shader
, pAllocator
);
2127 if (result
!= VK_SUCCESS
) {
2128 tu_pipeline_finish(pipeline
, dev
, pAllocator
);
2129 vk_free2(&dev
->alloc
, pAllocator
, pipeline
);
2136 tu_CreateComputePipelines(VkDevice device
,
2137 VkPipelineCache pipelineCache
,
2139 const VkComputePipelineCreateInfo
*pCreateInfos
,
2140 const VkAllocationCallbacks
*pAllocator
,
2141 VkPipeline
*pPipelines
)
2143 VkResult final_result
= VK_SUCCESS
;
2145 for (uint32_t i
= 0; i
< count
; i
++) {
2146 VkResult result
= tu_compute_pipeline_create(device
, pipelineCache
,
2148 pAllocator
, &pPipelines
[i
]);
2149 if (result
!= VK_SUCCESS
)
2150 final_result
= result
;
2153 return final_result
;
2157 tu_DestroyPipeline(VkDevice _device
,
2158 VkPipeline _pipeline
,
2159 const VkAllocationCallbacks
*pAllocator
)
2161 TU_FROM_HANDLE(tu_device
, dev
, _device
);
2162 TU_FROM_HANDLE(tu_pipeline
, pipeline
, _pipeline
);
2167 tu_pipeline_finish(pipeline
, dev
, pAllocator
);
2168 vk_free2(&dev
->alloc
, pAllocator
, pipeline
);