2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
28 #include "tu_private.h"
30 #include "ir3/ir3_nir.h"
31 #include "main/menums.h"
33 #include "nir/nir_builder.h"
34 #include "spirv/nir_spirv.h"
35 #include "util/debug.h"
36 #include "util/mesa-sha1.h"
37 #include "util/u_atomic.h"
38 #include "vk_format.h"
43 struct tu_pipeline_builder
45 struct tu_device
*device
;
46 struct tu_pipeline_cache
*cache
;
47 struct tu_pipeline_layout
*layout
;
48 const VkAllocationCallbacks
*alloc
;
49 const VkGraphicsPipelineCreateInfo
*create_info
;
51 struct tu_shader
*shaders
[MESA_SHADER_STAGES
];
52 uint32_t shader_offsets
[MESA_SHADER_STAGES
];
53 uint32_t binning_vs_offset
;
54 uint32_t shader_total_size
;
56 bool rasterizer_discard
;
57 /* these states are affectd by rasterizer_discard */
58 VkSampleCountFlagBits samples
;
59 bool use_depth_stencil_attachment
;
60 bool use_color_attachments
;
61 uint32_t color_attachment_count
;
62 VkFormat color_attachment_formats
[MAX_RTS
];
65 static enum tu_dynamic_state_bits
66 tu_dynamic_state_bit(VkDynamicState state
)
69 case VK_DYNAMIC_STATE_VIEWPORT
:
70 return TU_DYNAMIC_VIEWPORT
;
71 case VK_DYNAMIC_STATE_SCISSOR
:
72 return TU_DYNAMIC_SCISSOR
;
73 case VK_DYNAMIC_STATE_LINE_WIDTH
:
74 return TU_DYNAMIC_LINE_WIDTH
;
75 case VK_DYNAMIC_STATE_DEPTH_BIAS
:
76 return TU_DYNAMIC_DEPTH_BIAS
;
77 case VK_DYNAMIC_STATE_BLEND_CONSTANTS
:
78 return TU_DYNAMIC_BLEND_CONSTANTS
;
79 case VK_DYNAMIC_STATE_DEPTH_BOUNDS
:
80 return TU_DYNAMIC_DEPTH_BOUNDS
;
81 case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK
:
82 return TU_DYNAMIC_STENCIL_COMPARE_MASK
;
83 case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK
:
84 return TU_DYNAMIC_STENCIL_WRITE_MASK
;
85 case VK_DYNAMIC_STATE_STENCIL_REFERENCE
:
86 return TU_DYNAMIC_STENCIL_REFERENCE
;
88 unreachable("invalid dynamic state");
93 static gl_shader_stage
94 tu_shader_stage(VkShaderStageFlagBits stage
)
97 case VK_SHADER_STAGE_VERTEX_BIT
:
98 return MESA_SHADER_VERTEX
;
99 case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT
:
100 return MESA_SHADER_TESS_CTRL
;
101 case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT
:
102 return MESA_SHADER_TESS_EVAL
;
103 case VK_SHADER_STAGE_GEOMETRY_BIT
:
104 return MESA_SHADER_GEOMETRY
;
105 case VK_SHADER_STAGE_FRAGMENT_BIT
:
106 return MESA_SHADER_FRAGMENT
;
107 case VK_SHADER_STAGE_COMPUTE_BIT
:
108 return MESA_SHADER_COMPUTE
;
110 unreachable("invalid VkShaderStageFlagBits");
111 return MESA_SHADER_NONE
;
115 static const VkVertexInputAttributeDescription
*
116 tu_find_vertex_input_attribute(
117 const VkPipelineVertexInputStateCreateInfo
*vi_info
, uint32_t slot
)
119 assert(slot
>= VERT_ATTRIB_GENERIC0
);
120 slot
-= VERT_ATTRIB_GENERIC0
;
121 for (uint32_t i
= 0; i
< vi_info
->vertexAttributeDescriptionCount
; i
++) {
122 if (vi_info
->pVertexAttributeDescriptions
[i
].location
== slot
)
123 return &vi_info
->pVertexAttributeDescriptions
[i
];
128 static const VkVertexInputBindingDescription
*
129 tu_find_vertex_input_binding(
130 const VkPipelineVertexInputStateCreateInfo
*vi_info
,
131 const VkVertexInputAttributeDescription
*vi_attr
)
134 for (uint32_t i
= 0; i
< vi_info
->vertexBindingDescriptionCount
; i
++) {
135 if (vi_info
->pVertexBindingDescriptions
[i
].binding
== vi_attr
->binding
)
136 return &vi_info
->pVertexBindingDescriptions
[i
];
142 tu_logic_op_reads_dst(VkLogicOp op
)
145 case VK_LOGIC_OP_CLEAR
:
146 case VK_LOGIC_OP_COPY
:
147 case VK_LOGIC_OP_COPY_INVERTED
:
148 case VK_LOGIC_OP_SET
:
156 tu_blend_factor_no_dst_alpha(VkBlendFactor factor
)
158 /* treat dst alpha as 1.0 and avoid reading it */
160 case VK_BLEND_FACTOR_DST_ALPHA
:
161 return VK_BLEND_FACTOR_ONE
;
162 case VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA
:
163 return VK_BLEND_FACTOR_ZERO
;
169 static enum pc_di_primtype
170 tu6_primtype(VkPrimitiveTopology topology
)
173 case VK_PRIMITIVE_TOPOLOGY_POINT_LIST
:
174 return DI_PT_POINTLIST
;
175 case VK_PRIMITIVE_TOPOLOGY_LINE_LIST
:
176 return DI_PT_LINELIST
;
177 case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP
:
178 return DI_PT_LINESTRIP
;
179 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST
:
180 return DI_PT_TRILIST
;
181 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP
:
182 return DI_PT_TRISTRIP
;
183 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN
:
185 case VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY
:
186 return DI_PT_LINE_ADJ
;
187 case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY
:
188 return DI_PT_LINESTRIP_ADJ
;
189 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY
:
190 return DI_PT_TRI_ADJ
;
191 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY
:
192 return DI_PT_TRISTRIP_ADJ
;
193 case VK_PRIMITIVE_TOPOLOGY_PATCH_LIST
:
195 unreachable("invalid primitive topology");
200 static enum adreno_compare_func
201 tu6_compare_func(VkCompareOp op
)
204 case VK_COMPARE_OP_NEVER
:
206 case VK_COMPARE_OP_LESS
:
208 case VK_COMPARE_OP_EQUAL
:
210 case VK_COMPARE_OP_LESS_OR_EQUAL
:
212 case VK_COMPARE_OP_GREATER
:
214 case VK_COMPARE_OP_NOT_EQUAL
:
215 return FUNC_NOTEQUAL
;
216 case VK_COMPARE_OP_GREATER_OR_EQUAL
:
218 case VK_COMPARE_OP_ALWAYS
:
221 unreachable("invalid VkCompareOp");
226 static enum adreno_stencil_op
227 tu6_stencil_op(VkStencilOp op
)
230 case VK_STENCIL_OP_KEEP
:
232 case VK_STENCIL_OP_ZERO
:
234 case VK_STENCIL_OP_REPLACE
:
235 return STENCIL_REPLACE
;
236 case VK_STENCIL_OP_INCREMENT_AND_CLAMP
:
237 return STENCIL_INCR_CLAMP
;
238 case VK_STENCIL_OP_DECREMENT_AND_CLAMP
:
239 return STENCIL_DECR_CLAMP
;
240 case VK_STENCIL_OP_INVERT
:
241 return STENCIL_INVERT
;
242 case VK_STENCIL_OP_INCREMENT_AND_WRAP
:
243 return STENCIL_INCR_WRAP
;
244 case VK_STENCIL_OP_DECREMENT_AND_WRAP
:
245 return STENCIL_DECR_WRAP
;
247 unreachable("invalid VkStencilOp");
252 static enum a3xx_rop_code
253 tu6_rop(VkLogicOp op
)
256 case VK_LOGIC_OP_CLEAR
:
258 case VK_LOGIC_OP_AND
:
260 case VK_LOGIC_OP_AND_REVERSE
:
261 return ROP_AND_REVERSE
;
262 case VK_LOGIC_OP_COPY
:
264 case VK_LOGIC_OP_AND_INVERTED
:
265 return ROP_AND_INVERTED
;
266 case VK_LOGIC_OP_NO_OP
:
268 case VK_LOGIC_OP_XOR
:
272 case VK_LOGIC_OP_NOR
:
274 case VK_LOGIC_OP_EQUIVALENT
:
276 case VK_LOGIC_OP_INVERT
:
278 case VK_LOGIC_OP_OR_REVERSE
:
279 return ROP_OR_REVERSE
;
280 case VK_LOGIC_OP_COPY_INVERTED
:
281 return ROP_COPY_INVERTED
;
282 case VK_LOGIC_OP_OR_INVERTED
:
283 return ROP_OR_INVERTED
;
284 case VK_LOGIC_OP_NAND
:
286 case VK_LOGIC_OP_SET
:
289 unreachable("invalid VkLogicOp");
294 static enum adreno_rb_blend_factor
295 tu6_blend_factor(VkBlendFactor factor
)
298 case VK_BLEND_FACTOR_ZERO
:
300 case VK_BLEND_FACTOR_ONE
:
302 case VK_BLEND_FACTOR_SRC_COLOR
:
303 return FACTOR_SRC_COLOR
;
304 case VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR
:
305 return FACTOR_ONE_MINUS_SRC_COLOR
;
306 case VK_BLEND_FACTOR_DST_COLOR
:
307 return FACTOR_DST_COLOR
;
308 case VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR
:
309 return FACTOR_ONE_MINUS_DST_COLOR
;
310 case VK_BLEND_FACTOR_SRC_ALPHA
:
311 return FACTOR_SRC_ALPHA
;
312 case VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA
:
313 return FACTOR_ONE_MINUS_SRC_ALPHA
;
314 case VK_BLEND_FACTOR_DST_ALPHA
:
315 return FACTOR_DST_ALPHA
;
316 case VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA
:
317 return FACTOR_ONE_MINUS_DST_ALPHA
;
318 case VK_BLEND_FACTOR_CONSTANT_COLOR
:
319 return FACTOR_CONSTANT_COLOR
;
320 case VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR
:
321 return FACTOR_ONE_MINUS_CONSTANT_COLOR
;
322 case VK_BLEND_FACTOR_CONSTANT_ALPHA
:
323 return FACTOR_CONSTANT_ALPHA
;
324 case VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA
:
325 return FACTOR_ONE_MINUS_CONSTANT_ALPHA
;
326 case VK_BLEND_FACTOR_SRC_ALPHA_SATURATE
:
327 return FACTOR_SRC_ALPHA_SATURATE
;
328 case VK_BLEND_FACTOR_SRC1_COLOR
:
329 return FACTOR_SRC1_COLOR
;
330 case VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR
:
331 return FACTOR_ONE_MINUS_SRC1_COLOR
;
332 case VK_BLEND_FACTOR_SRC1_ALPHA
:
333 return FACTOR_SRC1_ALPHA
;
334 case VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA
:
335 return FACTOR_ONE_MINUS_SRC1_ALPHA
;
337 unreachable("invalid VkBlendFactor");
342 static enum a3xx_rb_blend_opcode
343 tu6_blend_op(VkBlendOp op
)
346 case VK_BLEND_OP_ADD
:
347 return BLEND_DST_PLUS_SRC
;
348 case VK_BLEND_OP_SUBTRACT
:
349 return BLEND_SRC_MINUS_DST
;
350 case VK_BLEND_OP_REVERSE_SUBTRACT
:
351 return BLEND_DST_MINUS_SRC
;
352 case VK_BLEND_OP_MIN
:
353 return BLEND_MIN_DST_SRC
;
354 case VK_BLEND_OP_MAX
:
355 return BLEND_MAX_DST_SRC
;
357 unreachable("invalid VkBlendOp");
358 return BLEND_DST_PLUS_SRC
;
363 tu_shader_nibo(const struct tu_shader
*shader
)
365 /* Don't use ir3_shader_nibo(), because that would include declared but
366 * unused storage images and SSBOs.
368 return shader
->ssbo_map
.num_desc
+ shader
->image_map
.num_desc
;
372 tu6_emit_vs_config(struct tu_cs
*cs
, struct tu_shader
*shader
,
373 const struct ir3_shader_variant
*vs
)
375 uint32_t sp_vs_ctrl
=
376 A6XX_SP_VS_CTRL_REG0_THREADSIZE(FOUR_QUADS
) |
377 A6XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(vs
->info
.max_reg
+ 1) |
378 A6XX_SP_VS_CTRL_REG0_MERGEDREGS
|
379 A6XX_SP_VS_CTRL_REG0_BRANCHSTACK(vs
->branchstack
);
381 sp_vs_ctrl
|= A6XX_SP_VS_CTRL_REG0_PIXLODENABLE
;
382 if (vs
->need_fine_derivatives
)
383 sp_vs_ctrl
|= A6XX_SP_VS_CTRL_REG0_DIFF_FINE
;
385 uint32_t sp_vs_config
= A6XX_SP_VS_CONFIG_NTEX(shader
->texture_map
.num_desc
) |
386 A6XX_SP_VS_CONFIG_NSAMP(shader
->sampler_map
.num_desc
);
388 sp_vs_config
|= A6XX_SP_VS_CONFIG_ENABLED
;
390 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_VS_CTRL_REG0
, 1);
391 tu_cs_emit(cs
, sp_vs_ctrl
);
393 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_VS_CONFIG
, 2);
394 tu_cs_emit(cs
, sp_vs_config
);
395 tu_cs_emit(cs
, vs
->instrlen
);
397 tu_cs_emit_pkt4(cs
, REG_A6XX_HLSQ_VS_CNTL
, 1);
398 tu_cs_emit(cs
, A6XX_HLSQ_VS_CNTL_CONSTLEN(align(vs
->constlen
, 4)) |
399 A6XX_HLSQ_VS_CNTL_ENABLED
);
403 tu6_emit_hs_config(struct tu_cs
*cs
, struct tu_shader
*shader
,
404 const struct ir3_shader_variant
*hs
)
406 uint32_t sp_hs_config
= 0;
408 sp_hs_config
|= A6XX_SP_HS_CONFIG_ENABLED
;
410 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_HS_UNKNOWN_A831
, 1);
413 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_HS_CONFIG
, 2);
414 tu_cs_emit(cs
, sp_hs_config
);
415 tu_cs_emit(cs
, hs
->instrlen
);
417 tu_cs_emit_pkt4(cs
, REG_A6XX_HLSQ_HS_CNTL
, 1);
418 tu_cs_emit(cs
, A6XX_HLSQ_HS_CNTL_CONSTLEN(align(hs
->constlen
, 4)));
422 tu6_emit_ds_config(struct tu_cs
*cs
, struct tu_shader
*shader
,
423 const struct ir3_shader_variant
*ds
)
425 uint32_t sp_ds_config
= 0;
427 sp_ds_config
|= A6XX_SP_DS_CONFIG_ENABLED
;
429 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_DS_CONFIG
, 2);
430 tu_cs_emit(cs
, sp_ds_config
);
431 tu_cs_emit(cs
, ds
->instrlen
);
433 tu_cs_emit_pkt4(cs
, REG_A6XX_HLSQ_DS_CNTL
, 1);
434 tu_cs_emit(cs
, A6XX_HLSQ_DS_CNTL_CONSTLEN(align(ds
->constlen
, 4)));
438 tu6_emit_gs_config(struct tu_cs
*cs
, struct tu_shader
*shader
,
439 const struct ir3_shader_variant
*gs
)
441 uint32_t sp_gs_config
= 0;
443 sp_gs_config
|= A6XX_SP_GS_CONFIG_ENABLED
;
445 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_GS_UNKNOWN_A871
, 1);
448 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_GS_CONFIG
, 2);
449 tu_cs_emit(cs
, sp_gs_config
);
450 tu_cs_emit(cs
, gs
->instrlen
);
452 tu_cs_emit_pkt4(cs
, REG_A6XX_HLSQ_GS_CNTL
, 1);
453 tu_cs_emit(cs
, A6XX_HLSQ_GS_CNTL_CONSTLEN(align(gs
->constlen
, 4)));
457 tu6_emit_fs_config(struct tu_cs
*cs
, struct tu_shader
*shader
,
458 const struct ir3_shader_variant
*fs
)
460 uint32_t sp_fs_ctrl
=
461 A6XX_SP_FS_CTRL_REG0_THREADSIZE(FOUR_QUADS
) | 0x1000000 |
462 A6XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT(fs
->info
.max_reg
+ 1) |
463 A6XX_SP_FS_CTRL_REG0_MERGEDREGS
|
464 A6XX_SP_FS_CTRL_REG0_BRANCHSTACK(fs
->branchstack
);
465 if (fs
->total_in
> 0)
466 sp_fs_ctrl
|= A6XX_SP_FS_CTRL_REG0_VARYING
;
468 sp_fs_ctrl
|= A6XX_SP_FS_CTRL_REG0_PIXLODENABLE
;
469 if (fs
->need_fine_derivatives
)
470 sp_fs_ctrl
|= A6XX_SP_FS_CTRL_REG0_DIFF_FINE
;
472 uint32_t sp_fs_config
= 0;
473 unsigned shader_nibo
= 0;
475 shader_nibo
= tu_shader_nibo(shader
);
476 sp_fs_config
= A6XX_SP_FS_CONFIG_NTEX(shader
->texture_map
.num_desc
) |
477 A6XX_SP_FS_CONFIG_NSAMP(shader
->sampler_map
.num_desc
) |
478 A6XX_SP_FS_CONFIG_NIBO(shader_nibo
);
482 sp_fs_config
|= A6XX_SP_FS_CONFIG_ENABLED
;
484 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_FS_CTRL_REG0
, 1);
485 tu_cs_emit(cs
, sp_fs_ctrl
);
487 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_FS_CONFIG
, 2);
488 tu_cs_emit(cs
, sp_fs_config
);
489 tu_cs_emit(cs
, fs
->instrlen
);
491 tu_cs_emit_pkt4(cs
, REG_A6XX_HLSQ_FS_CNTL
, 1);
492 tu_cs_emit(cs
, A6XX_HLSQ_FS_CNTL_CONSTLEN(align(fs
->constlen
, 4)) |
493 A6XX_HLSQ_FS_CNTL_ENABLED
);
495 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_IBO_COUNT
, 1);
496 tu_cs_emit(cs
, shader_nibo
);
500 tu6_emit_cs_config(struct tu_cs
*cs
, const struct tu_shader
*shader
,
501 const struct ir3_shader_variant
*v
)
503 tu_cs_emit_pkt4(cs
, REG_A6XX_HLSQ_UPDATE_CNTL
, 1);
504 tu_cs_emit(cs
, 0xff);
506 unsigned constlen
= align(v
->constlen
, 4);
507 tu_cs_emit_pkt4(cs
, REG_A6XX_HLSQ_CS_CNTL
, 1);
508 tu_cs_emit(cs
, A6XX_HLSQ_CS_CNTL_CONSTLEN(constlen
) |
509 A6XX_HLSQ_CS_CNTL_ENABLED
);
511 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_CS_CONFIG
, 2);
512 tu_cs_emit(cs
, A6XX_SP_CS_CONFIG_ENABLED
|
513 A6XX_SP_CS_CONFIG_NIBO(tu_shader_nibo(shader
)) |
514 A6XX_SP_CS_CONFIG_NTEX(shader
->texture_map
.num_desc
) |
515 A6XX_SP_CS_CONFIG_NSAMP(shader
->sampler_map
.num_desc
));
516 tu_cs_emit(cs
, v
->instrlen
);
518 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_CS_CTRL_REG0
, 1);
519 tu_cs_emit(cs
, A6XX_SP_CS_CTRL_REG0_THREADSIZE(FOUR_QUADS
) |
520 A6XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT(v
->info
.max_reg
+ 1) |
521 A6XX_SP_CS_CTRL_REG0_MERGEDREGS
|
522 A6XX_SP_CS_CTRL_REG0_BRANCHSTACK(v
->branchstack
) |
523 COND(v
->need_pixlod
, A6XX_SP_CS_CTRL_REG0_PIXLODENABLE
) |
524 COND(v
->need_fine_derivatives
, A6XX_SP_CS_CTRL_REG0_DIFF_FINE
));
526 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_CS_UNKNOWN_A9B1
, 1);
527 tu_cs_emit(cs
, 0x41);
529 uint32_t local_invocation_id
=
530 ir3_find_sysval_regid(v
, SYSTEM_VALUE_LOCAL_INVOCATION_ID
);
531 uint32_t work_group_id
=
532 ir3_find_sysval_regid(v
, SYSTEM_VALUE_WORK_GROUP_ID
);
534 tu_cs_emit_pkt4(cs
, REG_A6XX_HLSQ_CS_CNTL_0
, 2);
536 A6XX_HLSQ_CS_CNTL_0_WGIDCONSTID(work_group_id
) |
537 A6XX_HLSQ_CS_CNTL_0_UNK0(regid(63, 0)) |
538 A6XX_HLSQ_CS_CNTL_0_UNK1(regid(63, 0)) |
539 A6XX_HLSQ_CS_CNTL_0_LOCALIDREGID(local_invocation_id
));
540 tu_cs_emit(cs
, 0x2fc); /* HLSQ_CS_UNKNOWN_B998 */
542 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_CS_IBO_COUNT
, 1);
543 tu_cs_emit(cs
, tu_shader_nibo(shader
));
547 tu6_emit_vs_system_values(struct tu_cs
*cs
,
548 const struct ir3_shader_variant
*vs
)
550 const uint32_t vertexid_regid
=
551 ir3_find_sysval_regid(vs
, SYSTEM_VALUE_VERTEX_ID
);
552 const uint32_t instanceid_regid
=
553 ir3_find_sysval_regid(vs
, SYSTEM_VALUE_INSTANCE_ID
);
555 tu_cs_emit_pkt4(cs
, REG_A6XX_VFD_CONTROL_1
, 6);
556 tu_cs_emit(cs
, A6XX_VFD_CONTROL_1_REGID4VTX(vertexid_regid
) |
557 A6XX_VFD_CONTROL_1_REGID4INST(instanceid_regid
) |
559 tu_cs_emit(cs
, 0x0000fcfc); /* VFD_CONTROL_2 */
560 tu_cs_emit(cs
, 0xfcfcfcfc); /* VFD_CONTROL_3 */
561 tu_cs_emit(cs
, 0x000000fc); /* VFD_CONTROL_4 */
562 tu_cs_emit(cs
, 0x0000fcfc); /* VFD_CONTROL_5 */
563 tu_cs_emit(cs
, 0x00000000); /* VFD_CONTROL_6 */
566 /* Add any missing varyings needed for stream-out. Otherwise varyings not
567 * used by fragment shader will be stripped out.
570 tu6_link_streamout(struct ir3_shader_linkage
*l
,
571 const struct ir3_shader_variant
*v
)
573 const struct ir3_stream_output_info
*info
= &v
->shader
->stream_output
;
576 * First, any stream-out varyings not already in linkage map (ie. also
577 * consumed by frag shader) need to be added:
579 for (unsigned i
= 0; i
< info
->num_outputs
; i
++) {
580 const struct ir3_stream_output
*out
= &info
->output
[i
];
582 (1 << (out
->num_components
+ out
->start_component
)) - 1;
583 unsigned k
= out
->register_index
;
584 unsigned idx
, nextloc
= 0;
586 /* psize/pos need to be the last entries in linkage map, and will
587 * get added link_stream_out, so skip over them:
589 if (v
->outputs
[k
].slot
== VARYING_SLOT_PSIZ
||
590 v
->outputs
[k
].slot
== VARYING_SLOT_POS
)
593 for (idx
= 0; idx
< l
->cnt
; idx
++) {
594 if (l
->var
[idx
].regid
== v
->outputs
[k
].regid
)
596 nextloc
= MAX2(nextloc
, l
->var
[idx
].loc
+ 4);
599 /* add if not already in linkage map: */
601 ir3_link_add(l
, v
->outputs
[k
].regid
, compmask
, nextloc
);
603 /* expand component-mask if needed, ie streaming out all components
604 * but frag shader doesn't consume all components:
606 if (compmask
& ~l
->var
[idx
].compmask
) {
607 l
->var
[idx
].compmask
|= compmask
;
608 l
->max_loc
= MAX2(l
->max_loc
, l
->var
[idx
].loc
+
609 util_last_bit(l
->var
[idx
].compmask
));
615 tu6_setup_streamout(const struct ir3_shader_variant
*v
,
616 struct ir3_shader_linkage
*l
, struct tu_streamout_state
*tf
)
618 const struct ir3_stream_output_info
*info
= &v
->shader
->stream_output
;
620 memset(tf
, 0, sizeof(*tf
));
622 tf
->prog_count
= align(l
->max_loc
, 2) / 2;
624 debug_assert(tf
->prog_count
< ARRAY_SIZE(tf
->prog
));
626 /* set stride info to the streamout state */
627 for (unsigned i
= 0; i
< IR3_MAX_SO_BUFFERS
; i
++)
628 tf
->stride
[i
] = info
->stride
[i
];
630 for (unsigned i
= 0; i
< info
->num_outputs
; i
++) {
631 const struct ir3_stream_output
*out
= &info
->output
[i
];
632 unsigned k
= out
->register_index
;
635 tf
->ncomp
[out
->output_buffer
] += out
->num_components
;
637 /* linkage map sorted by order frag shader wants things, so
638 * a bit less ideal here..
640 for (idx
= 0; idx
< l
->cnt
; idx
++)
641 if (l
->var
[idx
].regid
== v
->outputs
[k
].regid
)
644 debug_assert(idx
< l
->cnt
);
646 for (unsigned j
= 0; j
< out
->num_components
; j
++) {
647 unsigned c
= j
+ out
->start_component
;
648 unsigned loc
= l
->var
[idx
].loc
+ c
;
649 unsigned off
= j
+ out
->dst_offset
; /* in dwords */
652 tf
->prog
[loc
/2] |= A6XX_VPC_SO_PROG_B_EN
|
653 A6XX_VPC_SO_PROG_B_BUF(out
->output_buffer
) |
654 A6XX_VPC_SO_PROG_B_OFF(off
* 4);
656 tf
->prog
[loc
/2] |= A6XX_VPC_SO_PROG_A_EN
|
657 A6XX_VPC_SO_PROG_A_BUF(out
->output_buffer
) |
658 A6XX_VPC_SO_PROG_A_OFF(off
* 4);
663 tf
->vpc_so_buf_cntl
= A6XX_VPC_SO_BUF_CNTL_ENABLE
|
664 COND(tf
->ncomp
[0] > 0, A6XX_VPC_SO_BUF_CNTL_BUF0
) |
665 COND(tf
->ncomp
[1] > 0, A6XX_VPC_SO_BUF_CNTL_BUF1
) |
666 COND(tf
->ncomp
[2] > 0, A6XX_VPC_SO_BUF_CNTL_BUF2
) |
667 COND(tf
->ncomp
[3] > 0, A6XX_VPC_SO_BUF_CNTL_BUF3
);
671 tu6_emit_const(struct tu_cs
*cs
, uint32_t opcode
, uint32_t base
,
672 enum a6xx_state_block block
, uint32_t offset
,
673 uint32_t size
, uint32_t *dwords
) {
674 assert(size
% 4 == 0);
676 tu_cs_emit_pkt7(cs
, opcode
, 3 + size
);
677 tu_cs_emit(cs
, CP_LOAD_STATE6_0_DST_OFF(base
) |
678 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS
) |
679 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT
) |
680 CP_LOAD_STATE6_0_STATE_BLOCK(block
) |
681 CP_LOAD_STATE6_0_NUM_UNIT(size
/ 4));
683 tu_cs_emit(cs
, CP_LOAD_STATE6_1_EXT_SRC_ADDR(0));
684 tu_cs_emit(cs
, CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI(0));
685 dwords
= (uint32_t *)&((uint8_t *)dwords
)[offset
];
687 tu_cs_emit_array(cs
, dwords
, size
);
691 tu6_emit_link_map(struct tu_cs
*cs
,
692 const struct ir3_shader_variant
*producer
,
693 const struct ir3_shader_variant
*consumer
) {
694 const struct ir3_const_state
*const_state
= &consumer
->shader
->const_state
;
695 uint32_t base
= const_state
->offsets
.primitive_map
;
696 uint32_t patch_locs
[MAX_VARYING
] = { }, num_loc
;
697 num_loc
= ir3_link_geometry_stages(producer
, consumer
, patch_locs
);
698 int size
= DIV_ROUND_UP(num_loc
, 4);
700 size
= (MIN2(size
+ base
, consumer
->constlen
) - base
) * 4;
702 tu6_emit_const(cs
, CP_LOAD_STATE6_GEOM
, base
, SB6_GS_SHADER
, 0, size
,
707 gl_primitive_to_tess(uint16_t primitive
) {
713 case GL_TRIANGLE_STRIP
:
721 tu6_emit_vpc(struct tu_cs
*cs
,
722 const struct ir3_shader_variant
*vs
,
723 const struct ir3_shader_variant
*gs
,
724 const struct ir3_shader_variant
*fs
,
726 struct tu_streamout_state
*tf
)
728 bool has_gs
= gs
->type
!= MESA_SHADER_NONE
;
729 const struct ir3_shader_variant
*last_shader
= has_gs
? gs
: vs
;
730 struct ir3_shader_linkage linkage
= { 0 };
731 ir3_link_shaders(&linkage
, last_shader
, fs
);
733 if (last_shader
->shader
->stream_output
.num_outputs
)
734 tu6_link_streamout(&linkage
, last_shader
);
736 BITSET_DECLARE(vpc_var_enables
, 128) = { 0 };
737 for (uint32_t i
= 0; i
< linkage
.cnt
; i
++) {
738 const uint32_t comp_count
= util_last_bit(linkage
.var
[i
].compmask
);
739 for (uint32_t j
= 0; j
< comp_count
; j
++)
740 BITSET_SET(vpc_var_enables
, linkage
.var
[i
].loc
+ j
);
743 tu_cs_emit_pkt4(cs
, REG_A6XX_VPC_VAR_DISABLE(0), 4);
744 tu_cs_emit(cs
, ~vpc_var_enables
[0]);
745 tu_cs_emit(cs
, ~vpc_var_enables
[1]);
746 tu_cs_emit(cs
, ~vpc_var_enables
[2]);
747 tu_cs_emit(cs
, ~vpc_var_enables
[3]);
749 /* a6xx finds position/pointsize at the end */
750 const uint32_t position_regid
=
751 ir3_find_output_regid(last_shader
, VARYING_SLOT_POS
);
752 const uint32_t pointsize_regid
=
753 ir3_find_output_regid(last_shader
, VARYING_SLOT_PSIZ
);
754 const uint32_t layer_regid
= has_gs
?
755 ir3_find_output_regid(gs
, VARYING_SLOT_LAYER
) : regid(63, 0);
757 uint32_t pointsize_loc
= 0xff, position_loc
= 0xff, layer_loc
= 0xff;
758 if (layer_regid
!= regid(63, 0)) {
759 layer_loc
= linkage
.max_loc
;
760 ir3_link_add(&linkage
, layer_regid
, 0x1, linkage
.max_loc
);
762 if (position_regid
!= regid(63, 0)) {
763 position_loc
= linkage
.max_loc
;
764 ir3_link_add(&linkage
, position_regid
, 0xf, linkage
.max_loc
);
766 if (pointsize_regid
!= regid(63, 0)) {
767 pointsize_loc
= linkage
.max_loc
;
768 ir3_link_add(&linkage
, pointsize_regid
, 0x1, linkage
.max_loc
);
771 if (last_shader
->shader
->stream_output
.num_outputs
)
772 tu6_setup_streamout(last_shader
, &linkage
, tf
);
774 /* map outputs of the last shader to VPC */
775 assert(linkage
.cnt
<= 32);
776 const uint32_t sp_out_count
= DIV_ROUND_UP(linkage
.cnt
, 2);
777 const uint32_t sp_vpc_dst_count
= DIV_ROUND_UP(linkage
.cnt
, 4);
779 uint32_t sp_vpc_dst
[8];
780 for (uint32_t i
= 0; i
< linkage
.cnt
; i
++) {
781 ((uint16_t *) sp_out
)[i
] =
782 A6XX_SP_VS_OUT_REG_A_REGID(linkage
.var
[i
].regid
) |
783 A6XX_SP_VS_OUT_REG_A_COMPMASK(linkage
.var
[i
].compmask
);
784 ((uint8_t *) sp_vpc_dst
)[i
] =
785 A6XX_SP_VS_VPC_DST_REG_OUTLOC0(linkage
.var
[i
].loc
);
789 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_GS_OUT_REG(0), sp_out_count
);
791 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_VS_OUT_REG(0), sp_out_count
);
792 tu_cs_emit_array(cs
, sp_out
, sp_out_count
);
795 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_GS_VPC_DST_REG(0), sp_vpc_dst_count
);
797 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_VS_VPC_DST_REG(0), sp_vpc_dst_count
);
798 tu_cs_emit_array(cs
, sp_vpc_dst
, sp_vpc_dst_count
);
800 tu_cs_emit_pkt4(cs
, REG_A6XX_VPC_CNTL_0
, 1);
801 tu_cs_emit(cs
, A6XX_VPC_CNTL_0_NUMNONPOSVAR(fs
->total_in
) |
802 (fs
->total_in
> 0 ? A6XX_VPC_CNTL_0_VARYING
: 0) |
805 tu_cs_emit_pkt4(cs
, REG_A6XX_VPC_PACK
, 1);
806 tu_cs_emit(cs
, A6XX_VPC_PACK_POSITIONLOC(position_loc
) |
807 A6XX_VPC_PACK_PSIZELOC(pointsize_loc
) |
808 A6XX_VPC_PACK_STRIDE_IN_VPC(linkage
.max_loc
));
811 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_GS_CTRL_REG0
, 1);
812 tu_cs_emit(cs
, A6XX_SP_GS_CTRL_REG0_THREADSIZE(TWO_QUADS
) |
813 A6XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT(gs
->info
.max_reg
+ 1) |
814 A6XX_SP_GS_CTRL_REG0_BRANCHSTACK(gs
->branchstack
) |
815 COND(gs
->need_pixlod
, A6XX_SP_GS_CTRL_REG0_PIXLODENABLE
));
817 tu6_emit_link_map(cs
, vs
, gs
);
819 uint32_t primitive_regid
=
820 ir3_find_sysval_regid(gs
, SYSTEM_VALUE_PRIMITIVE_ID
);
821 tu_cs_emit_pkt4(cs
, REG_A6XX_VPC_PACK_GS
, 1);
822 tu_cs_emit(cs
, A6XX_VPC_PACK_GS_POSITIONLOC(position_loc
) |
823 A6XX_VPC_PACK_GS_PSIZELOC(pointsize_loc
) |
824 A6XX_VPC_PACK_GS_STRIDE_IN_VPC(linkage
.max_loc
));
826 tu_cs_emit_pkt4(cs
, REG_A6XX_VPC_UNKNOWN_9105
, 1);
827 tu_cs_emit(cs
, A6XX_VPC_UNKNOWN_9105_LAYERLOC(layer_loc
) | 0xff00);
829 tu_cs_emit_pkt4(cs
, REG_A6XX_GRAS_UNKNOWN_809C
, 1);
830 tu_cs_emit(cs
, CONDREG(layer_regid
,
831 A6XX_GRAS_UNKNOWN_809C_GS_WRITES_LAYER
));
833 uint32_t flags_regid
= ir3_find_output_regid(gs
,
834 VARYING_SLOT_GS_VERTEX_FLAGS_IR3
);
836 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_PRIMITIVE_CNTL_GS
, 1);
837 tu_cs_emit(cs
, A6XX_SP_PRIMITIVE_CNTL_GS_GSOUT(linkage
.cnt
) |
838 A6XX_SP_PRIMITIVE_CNTL_GS_FLAGS_REGID(flags_regid
));
840 tu_cs_emit_pkt4(cs
, REG_A6XX_PC_PRIMITIVE_CNTL_2
, 1);
841 tu_cs_emit(cs
, A6XX_PC_PRIMITIVE_CNTL_2_STRIDE_IN_VPC(linkage
.max_loc
) |
842 CONDREG(pointsize_regid
, A6XX_PC_PRIMITIVE_CNTL_2_PSIZE
) |
843 CONDREG(layer_regid
, A6XX_PC_PRIMITIVE_CNTL_2_LAYER
) |
844 CONDREG(primitive_regid
, A6XX_PC_PRIMITIVE_CNTL_2_PRIMITIVE_ID
));
846 uint32_t vertices_out
= gs
->shader
->nir
->info
.gs
.vertices_out
- 1;
848 gl_primitive_to_tess(gs
->shader
->nir
->info
.gs
.output_primitive
);
849 uint32_t invocations
= gs
->shader
->nir
->info
.gs
.invocations
- 1;
850 tu_cs_emit_pkt4(cs
, REG_A6XX_PC_PRIMITIVE_CNTL_5
, 1);
852 A6XX_PC_PRIMITIVE_CNTL_5_GS_VERTICES_OUT(vertices_out
) |
853 A6XX_PC_PRIMITIVE_CNTL_5_GS_OUTPUT(output
) |
854 A6XX_PC_PRIMITIVE_CNTL_5_GS_INVOCATIONS(invocations
));
856 tu_cs_emit_pkt4(cs
, REG_A6XX_PC_PRIMITIVE_CNTL_3
, 1);
859 tu_cs_emit_pkt4(cs
, REG_A6XX_GRAS_UNKNOWN_8003
, 1);
862 tu_cs_emit_pkt4(cs
, REG_A6XX_VPC_UNKNOWN_9100
, 1);
863 tu_cs_emit(cs
, 0xff);
865 tu_cs_emit_pkt4(cs
, REG_A6XX_VPC_UNKNOWN_9102
, 1);
866 tu_cs_emit(cs
, 0xffff00);
868 /* Size of per-primitive alloction in ldlw memory in vec4s. */
870 gs
->shader
->nir
->info
.gs
.vertices_in
*
871 DIV_ROUND_UP(vs
->shader
->output_size
, 4);
872 tu_cs_emit_pkt4(cs
, REG_A6XX_PC_PRIMITIVE_CNTL_6
, 1);
873 tu_cs_emit(cs
, A6XX_PC_PRIMITIVE_CNTL_6_STRIDE_IN_VPC(vec4_size
));
875 tu_cs_emit_pkt4(cs
, REG_A6XX_PC_UNKNOWN_9B07
, 1);
878 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_GS_UNKNOWN_A871
, 1);
879 tu_cs_emit(cs
, vs
->shader
->output_size
);
882 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_PRIMITIVE_CNTL
, 1);
883 tu_cs_emit(cs
, A6XX_SP_PRIMITIVE_CNTL_VSOUT(linkage
.cnt
));
885 tu_cs_emit_pkt4(cs
, REG_A6XX_PC_PRIMITIVE_CNTL_1
, 1);
886 tu_cs_emit(cs
, A6XX_PC_PRIMITIVE_CNTL_1_STRIDE_IN_VPC(linkage
.max_loc
) |
887 (last_shader
->writes_psize
? A6XX_PC_PRIMITIVE_CNTL_1_PSIZE
: 0));
891 tu6_vpc_varying_mode(const struct ir3_shader_variant
*fs
,
893 uint8_t *interp_mode
,
894 uint8_t *ps_repl_mode
)
908 PS_REPL_ONE_MINUS_T
= 3,
911 const uint32_t compmask
= fs
->inputs
[index
].compmask
;
913 /* NOTE: varyings are packed, so if compmask is 0xb then first, second, and
914 * fourth component occupy three consecutive varying slots
919 if (fs
->inputs
[index
].slot
== VARYING_SLOT_PNTC
) {
920 if (compmask
& 0x1) {
921 *ps_repl_mode
|= PS_REPL_S
<< shift
;
924 if (compmask
& 0x2) {
925 *ps_repl_mode
|= PS_REPL_T
<< shift
;
928 if (compmask
& 0x4) {
929 *interp_mode
|= INTERP_ZERO
<< shift
;
932 if (compmask
& 0x8) {
933 *interp_mode
|= INTERP_ONE
<< 6;
936 } else if ((fs
->inputs
[index
].interpolate
== INTERP_MODE_FLAT
) ||
937 fs
->inputs
[index
].rasterflat
) {
938 for (int i
= 0; i
< 4; i
++) {
939 if (compmask
& (1 << i
)) {
940 *interp_mode
|= INTERP_FLAT
<< shift
;
950 tu6_emit_vpc_varying_modes(struct tu_cs
*cs
,
951 const struct ir3_shader_variant
*fs
,
954 uint32_t interp_modes
[8] = { 0 };
955 uint32_t ps_repl_modes
[8] = { 0 };
959 (i
= ir3_next_varying(fs
, i
)) < (int) fs
->inputs_count
;) {
961 /* get the mode for input i */
963 uint8_t ps_repl_mode
;
965 tu6_vpc_varying_mode(fs
, i
, &interp_mode
, &ps_repl_mode
);
967 /* OR the mode into the array */
968 const uint32_t inloc
= fs
->inputs
[i
].inloc
* 2;
969 uint32_t n
= inloc
/ 32;
970 uint32_t shift
= inloc
% 32;
971 interp_modes
[n
] |= interp_mode
<< shift
;
972 ps_repl_modes
[n
] |= ps_repl_mode
<< shift
;
973 if (shift
+ bits
> 32) {
977 interp_modes
[n
] |= interp_mode
>> shift
;
978 ps_repl_modes
[n
] |= ps_repl_mode
>> shift
;
983 tu_cs_emit_pkt4(cs
, REG_A6XX_VPC_VARYING_INTERP_MODE(0), 8);
984 tu_cs_emit_array(cs
, interp_modes
, 8);
986 tu_cs_emit_pkt4(cs
, REG_A6XX_VPC_VARYING_PS_REPL_MODE(0), 8);
987 tu_cs_emit_array(cs
, ps_repl_modes
, 8);
991 tu6_emit_fs_inputs(struct tu_cs
*cs
, const struct ir3_shader_variant
*fs
)
993 uint32_t face_regid
, coord_regid
, zwcoord_regid
, samp_id_regid
;
994 uint32_t ij_pix_regid
, ij_samp_regid
, ij_cent_regid
, ij_size_regid
;
995 uint32_t smask_in_regid
;
997 bool sample_shading
= fs
->per_samp
; /* TODO | key->sample_shading; */
998 bool enable_varyings
= fs
->total_in
> 0;
1000 samp_id_regid
= ir3_find_sysval_regid(fs
, SYSTEM_VALUE_SAMPLE_ID
);
1001 smask_in_regid
= ir3_find_sysval_regid(fs
, SYSTEM_VALUE_SAMPLE_MASK_IN
);
1002 face_regid
= ir3_find_sysval_regid(fs
, SYSTEM_VALUE_FRONT_FACE
);
1003 coord_regid
= ir3_find_sysval_regid(fs
, SYSTEM_VALUE_FRAG_COORD
);
1004 zwcoord_regid
= VALIDREG(coord_regid
) ? coord_regid
+ 2 : regid(63, 0);
1005 ij_pix_regid
= ir3_find_sysval_regid(fs
, SYSTEM_VALUE_BARYCENTRIC_PERSP_PIXEL
);
1006 ij_samp_regid
= ir3_find_sysval_regid(fs
, SYSTEM_VALUE_BARYCENTRIC_PERSP_SAMPLE
);
1007 ij_cent_regid
= ir3_find_sysval_regid(fs
, SYSTEM_VALUE_BARYCENTRIC_PERSP_CENTROID
);
1008 ij_size_regid
= ir3_find_sysval_regid(fs
, SYSTEM_VALUE_BARYCENTRIC_PERSP_SIZE
);
1010 if (fs
->num_sampler_prefetch
> 0) {
1011 assert(VALIDREG(ij_pix_regid
));
1012 /* also, it seems like ij_pix is *required* to be r0.x */
1013 assert(ij_pix_regid
== regid(0, 0));
1016 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_FS_PREFETCH_CNTL
, 1 + fs
->num_sampler_prefetch
);
1017 tu_cs_emit(cs
, A6XX_SP_FS_PREFETCH_CNTL_COUNT(fs
->num_sampler_prefetch
) |
1018 A6XX_SP_FS_PREFETCH_CNTL_UNK4(regid(63, 0)) |
1020 for (int i
= 0; i
< fs
->num_sampler_prefetch
; i
++) {
1021 const struct ir3_sampler_prefetch
*prefetch
= &fs
->sampler_prefetch
[i
];
1022 tu_cs_emit(cs
, A6XX_SP_FS_PREFETCH_CMD_SRC(prefetch
->src
) |
1023 A6XX_SP_FS_PREFETCH_CMD_SAMP_ID(prefetch
->samp_id
) |
1024 A6XX_SP_FS_PREFETCH_CMD_TEX_ID(prefetch
->tex_id
) |
1025 A6XX_SP_FS_PREFETCH_CMD_DST(prefetch
->dst
) |
1026 A6XX_SP_FS_PREFETCH_CMD_WRMASK(prefetch
->wrmask
) |
1027 COND(prefetch
->half_precision
, A6XX_SP_FS_PREFETCH_CMD_HALF
) |
1028 A6XX_SP_FS_PREFETCH_CMD_CMD(prefetch
->cmd
));
1031 tu_cs_emit_pkt4(cs
, REG_A6XX_HLSQ_CONTROL_1_REG
, 5);
1032 tu_cs_emit(cs
, 0x7);
1033 tu_cs_emit(cs
, A6XX_HLSQ_CONTROL_2_REG_FACEREGID(face_regid
) |
1034 A6XX_HLSQ_CONTROL_2_REG_SAMPLEID(samp_id_regid
) |
1035 A6XX_HLSQ_CONTROL_2_REG_SAMPLEMASK(smask_in_regid
) |
1036 A6XX_HLSQ_CONTROL_2_REG_SIZE(ij_size_regid
));
1037 tu_cs_emit(cs
, A6XX_HLSQ_CONTROL_3_REG_BARY_IJ_PIXEL(ij_pix_regid
) |
1038 A6XX_HLSQ_CONTROL_3_REG_BARY_IJ_CENTROID(ij_cent_regid
) |
1040 tu_cs_emit(cs
, A6XX_HLSQ_CONTROL_4_REG_XYCOORDREGID(coord_regid
) |
1041 A6XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID(zwcoord_regid
) |
1042 A6XX_HLSQ_CONTROL_4_REG_BARY_IJ_PIXEL_PERSAMP(ij_samp_regid
) |
1044 tu_cs_emit(cs
, 0xfc);
1046 tu_cs_emit_pkt4(cs
, REG_A6XX_HLSQ_UNKNOWN_B980
, 1);
1047 tu_cs_emit(cs
, enable_varyings
? 3 : 1);
1049 tu_cs_emit_pkt4(cs
, REG_A6XX_HLSQ_UPDATE_CNTL
, 1);
1050 tu_cs_emit(cs
, 0xff); /* XXX */
1052 tu_cs_emit_pkt4(cs
, REG_A6XX_GRAS_CNTL
, 1);
1054 CONDREG(ij_pix_regid
, A6XX_GRAS_CNTL_VARYING
) |
1055 CONDREG(ij_cent_regid
, A6XX_GRAS_CNTL_CENTROID
) |
1056 CONDREG(ij_samp_regid
, A6XX_GRAS_CNTL_PERSAMP_VARYING
) |
1057 COND(VALIDREG(ij_size_regid
) && !sample_shading
, A6XX_GRAS_CNTL_SIZE
) |
1058 COND(VALIDREG(ij_size_regid
) && sample_shading
, A6XX_GRAS_CNTL_SIZE_PERSAMP
) |
1059 COND(fs
->frag_coord
,
1060 A6XX_GRAS_CNTL_SIZE
|
1061 A6XX_GRAS_CNTL_XCOORD
|
1062 A6XX_GRAS_CNTL_YCOORD
|
1063 A6XX_GRAS_CNTL_ZCOORD
|
1064 A6XX_GRAS_CNTL_WCOORD
) |
1065 COND(fs
->frag_face
, A6XX_GRAS_CNTL_SIZE
));
1067 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_RENDER_CONTROL0
, 2);
1069 CONDREG(ij_pix_regid
, A6XX_RB_RENDER_CONTROL0_VARYING
) |
1070 CONDREG(ij_cent_regid
, A6XX_RB_RENDER_CONTROL0_CENTROID
) |
1071 CONDREG(ij_samp_regid
, A6XX_RB_RENDER_CONTROL0_PERSAMP_VARYING
) |
1072 COND(enable_varyings
, A6XX_RB_RENDER_CONTROL0_UNK10
) |
1073 COND(VALIDREG(ij_size_regid
) && !sample_shading
, A6XX_RB_RENDER_CONTROL0_SIZE
) |
1074 COND(VALIDREG(ij_size_regid
) && sample_shading
, A6XX_RB_RENDER_CONTROL0_SIZE_PERSAMP
) |
1075 COND(fs
->frag_coord
,
1076 A6XX_RB_RENDER_CONTROL0_SIZE
|
1077 A6XX_RB_RENDER_CONTROL0_XCOORD
|
1078 A6XX_RB_RENDER_CONTROL0_YCOORD
|
1079 A6XX_RB_RENDER_CONTROL0_ZCOORD
|
1080 A6XX_RB_RENDER_CONTROL0_WCOORD
) |
1081 COND(fs
->frag_face
, A6XX_RB_RENDER_CONTROL0_SIZE
));
1083 CONDREG(smask_in_regid
, A6XX_RB_RENDER_CONTROL1_SAMPLEMASK
) |
1084 CONDREG(samp_id_regid
, A6XX_RB_RENDER_CONTROL1_SAMPLEID
) |
1085 CONDREG(ij_size_regid
, A6XX_RB_RENDER_CONTROL1_SIZE
) |
1086 COND(fs
->frag_face
, A6XX_RB_RENDER_CONTROL1_FACENESS
));
1088 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_SAMPLE_CNTL
, 1);
1089 tu_cs_emit(cs
, COND(sample_shading
, A6XX_RB_SAMPLE_CNTL_PER_SAMP_MODE
));
1091 tu_cs_emit_pkt4(cs
, REG_A6XX_GRAS_UNKNOWN_8101
, 1);
1092 tu_cs_emit(cs
, COND(sample_shading
, 0x6)); // XXX
1094 tu_cs_emit_pkt4(cs
, REG_A6XX_GRAS_SAMPLE_CNTL
, 1);
1095 tu_cs_emit(cs
, COND(sample_shading
, A6XX_GRAS_SAMPLE_CNTL_PER_SAMP_MODE
));
1099 tu6_emit_fs_outputs(struct tu_cs
*cs
,
1100 const struct ir3_shader_variant
*fs
,
1103 uint32_t smask_regid
, posz_regid
;
1105 posz_regid
= ir3_find_output_regid(fs
, FRAG_RESULT_DEPTH
);
1106 smask_regid
= ir3_find_output_regid(fs
, FRAG_RESULT_SAMPLE_MASK
);
1108 uint32_t fragdata_regid
[8];
1109 if (fs
->color0_mrt
) {
1110 fragdata_regid
[0] = ir3_find_output_regid(fs
, FRAG_RESULT_COLOR
);
1111 for (uint32_t i
= 1; i
< ARRAY_SIZE(fragdata_regid
); i
++)
1112 fragdata_regid
[i
] = fragdata_regid
[0];
1114 for (uint32_t i
= 0; i
< ARRAY_SIZE(fragdata_regid
); i
++)
1115 fragdata_regid
[i
] = ir3_find_output_regid(fs
, FRAG_RESULT_DATA0
+ i
);
1118 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_FS_OUTPUT_CNTL0
, 2);
1119 tu_cs_emit(cs
, A6XX_SP_FS_OUTPUT_CNTL0_DEPTH_REGID(posz_regid
) |
1120 A6XX_SP_FS_OUTPUT_CNTL0_SAMPMASK_REGID(smask_regid
) |
1122 tu_cs_emit(cs
, A6XX_SP_FS_OUTPUT_CNTL1_MRT(mrt_count
));
1124 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_FS_OUTPUT_REG(0), 8);
1125 for (uint32_t i
= 0; i
< ARRAY_SIZE(fragdata_regid
); i
++) {
1126 // TODO we could have a mix of half and full precision outputs,
1127 // we really need to figure out half-precision from IR3_REG_HALF
1128 tu_cs_emit(cs
, A6XX_SP_FS_OUTPUT_REG_REGID(fragdata_regid
[i
]) |
1129 (false ? A6XX_SP_FS_OUTPUT_REG_HALF_PRECISION
: 0));
1132 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_FS_OUTPUT_CNTL0
, 2);
1133 tu_cs_emit(cs
, COND(fs
->writes_pos
, A6XX_RB_FS_OUTPUT_CNTL0_FRAG_WRITES_Z
) |
1134 COND(fs
->writes_smask
, A6XX_RB_FS_OUTPUT_CNTL0_FRAG_WRITES_SAMPMASK
));
1135 tu_cs_emit(cs
, A6XX_RB_FS_OUTPUT_CNTL1_MRT(mrt_count
));
1137 uint32_t gras_su_depth_plane_cntl
= 0;
1138 uint32_t rb_depth_plane_cntl
= 0;
1139 if (fs
->no_earlyz
|| fs
->writes_pos
) {
1140 gras_su_depth_plane_cntl
|= A6XX_GRAS_SU_DEPTH_PLANE_CNTL_FRAG_WRITES_Z
;
1141 rb_depth_plane_cntl
|= A6XX_RB_DEPTH_PLANE_CNTL_FRAG_WRITES_Z
;
1144 tu_cs_emit_pkt4(cs
, REG_A6XX_GRAS_SU_DEPTH_PLANE_CNTL
, 1);
1145 tu_cs_emit(cs
, gras_su_depth_plane_cntl
);
1147 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_DEPTH_PLANE_CNTL
, 1);
1148 tu_cs_emit(cs
, rb_depth_plane_cntl
);
1152 tu6_emit_shader_object(struct tu_cs
*cs
,
1153 gl_shader_stage stage
,
1154 const struct ir3_shader_variant
*variant
,
1155 const struct tu_bo
*binary_bo
,
1156 uint32_t binary_offset
)
1160 enum a6xx_state_block sb
;
1162 case MESA_SHADER_VERTEX
:
1163 reg
= REG_A6XX_SP_VS_OBJ_START_LO
;
1164 opcode
= CP_LOAD_STATE6_GEOM
;
1167 case MESA_SHADER_TESS_CTRL
:
1168 reg
= REG_A6XX_SP_HS_OBJ_START_LO
;
1169 opcode
= CP_LOAD_STATE6_GEOM
;
1172 case MESA_SHADER_TESS_EVAL
:
1173 reg
= REG_A6XX_SP_DS_OBJ_START_LO
;
1174 opcode
= CP_LOAD_STATE6_GEOM
;
1177 case MESA_SHADER_GEOMETRY
:
1178 reg
= REG_A6XX_SP_GS_OBJ_START_LO
;
1179 opcode
= CP_LOAD_STATE6_GEOM
;
1182 case MESA_SHADER_FRAGMENT
:
1183 reg
= REG_A6XX_SP_FS_OBJ_START_LO
;
1184 opcode
= CP_LOAD_STATE6_FRAG
;
1187 case MESA_SHADER_COMPUTE
:
1188 reg
= REG_A6XX_SP_CS_OBJ_START_LO
;
1189 opcode
= CP_LOAD_STATE6_FRAG
;
1193 unreachable("invalid gl_shader_stage");
1194 opcode
= CP_LOAD_STATE6_GEOM
;
1199 if (!variant
->instrlen
) {
1200 tu_cs_emit_pkt4(cs
, reg
, 2);
1201 tu_cs_emit_qw(cs
, 0);
1205 assert(variant
->type
== stage
);
1207 const uint64_t binary_iova
= binary_bo
->iova
+ binary_offset
;
1208 assert((binary_iova
& 0xf) == 0);
1209 /* note: it looks like HW might try to read a few instructions beyond the instrlen size
1210 * of the shader. this could be a potential source of problems at some point
1211 * possibly this doesn't happen if shader iova is aligned enough (to 4k for example)
1214 tu_cs_emit_pkt4(cs
, reg
, 2);
1215 tu_cs_emit_qw(cs
, binary_iova
);
1217 /* always indirect */
1218 const bool indirect
= true;
1220 tu_cs_emit_pkt7(cs
, opcode
, 3);
1221 tu_cs_emit(cs
, CP_LOAD_STATE6_0_DST_OFF(0) |
1222 CP_LOAD_STATE6_0_STATE_TYPE(ST6_SHADER
) |
1223 CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT
) |
1224 CP_LOAD_STATE6_0_STATE_BLOCK(sb
) |
1225 CP_LOAD_STATE6_0_NUM_UNIT(variant
->instrlen
));
1226 tu_cs_emit_qw(cs
, binary_iova
);
1228 const void *binary
= binary_bo
->map
+ binary_offset
;
1230 tu_cs_emit_pkt7(cs
, opcode
, 3 + variant
->info
.sizedwords
);
1231 tu_cs_emit(cs
, CP_LOAD_STATE6_0_DST_OFF(0) |
1232 CP_LOAD_STATE6_0_STATE_TYPE(ST6_SHADER
) |
1233 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT
) |
1234 CP_LOAD_STATE6_0_STATE_BLOCK(sb
) |
1235 CP_LOAD_STATE6_0_NUM_UNIT(variant
->instrlen
));
1236 tu_cs_emit_qw(cs
, 0);
1237 tu_cs_emit_array(cs
, binary
, variant
->info
.sizedwords
);
1242 tu6_emit_immediates(struct tu_cs
*cs
, const struct ir3_shader_variant
*v
,
1243 uint32_t opcode
, enum a6xx_state_block block
)
1249 const struct ir3_const_state
*const_state
= &v
->shader
->const_state
;
1250 uint32_t base
= const_state
->offsets
.immediate
;
1251 int size
= const_state
->immediates_count
;
1253 /* truncate size to avoid writing constants that shader
1256 size
= MIN2(size
+ base
, v
->constlen
) - base
;
1261 tu_cs_emit_pkt7(cs
, opcode
, 3 + size
* 4);
1262 tu_cs_emit(cs
, CP_LOAD_STATE6_0_DST_OFF(base
) |
1263 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS
) |
1264 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT
) |
1265 CP_LOAD_STATE6_0_STATE_BLOCK(block
) |
1266 CP_LOAD_STATE6_0_NUM_UNIT(size
));
1267 tu_cs_emit(cs
, CP_LOAD_STATE6_1_EXT_SRC_ADDR(0));
1268 tu_cs_emit(cs
, CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI(0));
1270 for (unsigned i
= 0; i
< size
; i
++) {
1271 tu_cs_emit(cs
, const_state
->immediates
[i
].val
[0]);
1272 tu_cs_emit(cs
, const_state
->immediates
[i
].val
[1]);
1273 tu_cs_emit(cs
, const_state
->immediates
[i
].val
[2]);
1274 tu_cs_emit(cs
, const_state
->immediates
[i
].val
[3]);
1279 tu6_emit_geometry_consts(struct tu_cs
*cs
,
1280 const struct ir3_shader_variant
*vs
,
1281 const struct ir3_shader_variant
*gs
) {
1282 unsigned num_vertices
= gs
->shader
->nir
->info
.gs
.vertices_in
;
1284 uint32_t params
[4] = {
1285 vs
->shader
->output_size
* num_vertices
* 4, /* primitive stride */
1286 vs
->shader
->output_size
* 4, /* vertex stride */
1290 uint32_t vs_base
= vs
->shader
->const_state
.offsets
.primitive_param
;
1291 tu6_emit_const(cs
, CP_LOAD_STATE6_GEOM
, vs_base
, SB6_VS_SHADER
, 0,
1292 ARRAY_SIZE(params
), params
);
1294 uint32_t gs_base
= gs
->shader
->const_state
.offsets
.primitive_param
;
1295 tu6_emit_const(cs
, CP_LOAD_STATE6_GEOM
, gs_base
, SB6_GS_SHADER
, 0,
1296 ARRAY_SIZE(params
), params
);
1300 tu6_emit_program(struct tu_cs
*cs
,
1301 const struct tu_pipeline_builder
*builder
,
1302 const struct tu_bo
*binary_bo
,
1304 struct tu_streamout_state
*tf
)
1306 static const struct ir3_shader_variant dummy_variant
= {
1307 .type
= MESA_SHADER_NONE
1309 assert(builder
->shaders
[MESA_SHADER_VERTEX
]);
1310 const struct ir3_shader_variant
*vs
=
1311 &builder
->shaders
[MESA_SHADER_VERTEX
]->variants
[0];
1312 const struct ir3_shader_variant
*hs
=
1313 builder
->shaders
[MESA_SHADER_TESS_CTRL
]
1314 ? &builder
->shaders
[MESA_SHADER_TESS_CTRL
]->variants
[0]
1316 const struct ir3_shader_variant
*ds
=
1317 builder
->shaders
[MESA_SHADER_TESS_EVAL
]
1318 ? &builder
->shaders
[MESA_SHADER_TESS_EVAL
]->variants
[0]
1320 const struct ir3_shader_variant
*gs
=
1321 builder
->shaders
[MESA_SHADER_GEOMETRY
]
1322 ? &builder
->shaders
[MESA_SHADER_GEOMETRY
]->variants
[0]
1324 const struct ir3_shader_variant
*fs
=
1325 builder
->shaders
[MESA_SHADER_FRAGMENT
]
1326 ? &builder
->shaders
[MESA_SHADER_FRAGMENT
]->variants
[0]
1328 bool has_gs
= gs
->type
!= MESA_SHADER_NONE
;
1331 /* if we have streamout, use full VS in binning pass, as the
1332 * binning pass VS will have outputs on other than position/psize
1335 if (vs
->shader
->stream_output
.num_outputs
== 0)
1336 vs
= &builder
->shaders
[MESA_SHADER_VERTEX
]->variants
[1];
1337 fs
= &dummy_variant
;
1340 tu6_emit_vs_config(cs
, builder
->shaders
[MESA_SHADER_VERTEX
], vs
);
1341 tu6_emit_hs_config(cs
, builder
->shaders
[MESA_SHADER_TESS_CTRL
], hs
);
1342 tu6_emit_ds_config(cs
, builder
->shaders
[MESA_SHADER_TESS_EVAL
], ds
);
1343 tu6_emit_gs_config(cs
, builder
->shaders
[MESA_SHADER_GEOMETRY
], gs
);
1344 tu6_emit_fs_config(cs
, builder
->shaders
[MESA_SHADER_FRAGMENT
], fs
);
1346 tu6_emit_vs_system_values(cs
, vs
);
1347 tu6_emit_vpc(cs
, vs
, gs
, fs
, binning_pass
, tf
);
1348 tu6_emit_vpc_varying_modes(cs
, fs
, binning_pass
);
1349 tu6_emit_fs_inputs(cs
, fs
);
1350 tu6_emit_fs_outputs(cs
, fs
, builder
->color_attachment_count
);
1352 tu6_emit_shader_object(cs
, MESA_SHADER_VERTEX
, vs
, binary_bo
,
1353 binning_pass
? builder
->binning_vs_offset
: builder
->shader_offsets
[MESA_SHADER_VERTEX
]);
1355 tu6_emit_shader_object(cs
, MESA_SHADER_GEOMETRY
, gs
, binary_bo
,
1356 builder
->shader_offsets
[MESA_SHADER_GEOMETRY
]);
1357 tu6_emit_shader_object(cs
, MESA_SHADER_FRAGMENT
, fs
, binary_bo
,
1358 builder
->shader_offsets
[MESA_SHADER_FRAGMENT
]);
1360 tu6_emit_immediates(cs
, vs
, CP_LOAD_STATE6_GEOM
, SB6_VS_SHADER
);
1362 tu6_emit_immediates(cs
, gs
, CP_LOAD_STATE6_GEOM
, SB6_GS_SHADER
);
1363 tu6_emit_geometry_consts(cs
, vs
, gs
);
1366 tu6_emit_immediates(cs
, fs
, CP_LOAD_STATE6_FRAG
, SB6_FS_SHADER
);
1370 tu6_emit_vertex_input(struct tu_cs
*cs
,
1371 const struct ir3_shader_variant
*vs
,
1372 const VkPipelineVertexInputStateCreateInfo
*vi_info
,
1373 uint8_t bindings
[MAX_VERTEX_ATTRIBS
],
1374 uint16_t strides
[MAX_VERTEX_ATTRIBS
],
1375 uint16_t offsets
[MAX_VERTEX_ATTRIBS
],
1378 uint32_t vfd_decode_idx
= 0;
1380 for (uint32_t i
= 0; i
< vs
->inputs_count
; i
++) {
1381 if (vs
->inputs
[i
].sysval
|| !vs
->inputs
[i
].compmask
)
1384 const VkVertexInputAttributeDescription
*vi_attr
=
1385 tu_find_vertex_input_attribute(vi_info
, vs
->inputs
[i
].slot
);
1386 const VkVertexInputBindingDescription
*vi_binding
=
1387 tu_find_vertex_input_binding(vi_info
, vi_attr
);
1388 assert(vi_attr
&& vi_binding
);
1390 const struct tu_native_format format
= tu6_format_vtx(vi_attr
->format
);
1392 uint32_t vfd_decode
= A6XX_VFD_DECODE_INSTR_IDX(vfd_decode_idx
) |
1393 A6XX_VFD_DECODE_INSTR_FORMAT(format
.fmt
) |
1394 A6XX_VFD_DECODE_INSTR_SWAP(format
.swap
) |
1395 A6XX_VFD_DECODE_INSTR_UNK30
;
1396 if (vi_binding
->inputRate
== VK_VERTEX_INPUT_RATE_INSTANCE
)
1397 vfd_decode
|= A6XX_VFD_DECODE_INSTR_INSTANCED
;
1398 if (!vk_format_is_int(vi_attr
->format
))
1399 vfd_decode
|= A6XX_VFD_DECODE_INSTR_FLOAT
;
1401 const uint32_t vfd_decode_step_rate
= 1;
1403 const uint32_t vfd_dest_cntl
=
1404 A6XX_VFD_DEST_CNTL_INSTR_WRITEMASK(vs
->inputs
[i
].compmask
) |
1405 A6XX_VFD_DEST_CNTL_INSTR_REGID(vs
->inputs
[i
].regid
);
1407 tu_cs_emit_pkt4(cs
, REG_A6XX_VFD_DECODE(vfd_decode_idx
), 2);
1408 tu_cs_emit(cs
, vfd_decode
);
1409 tu_cs_emit(cs
, vfd_decode_step_rate
);
1411 tu_cs_emit_pkt4(cs
, REG_A6XX_VFD_DEST_CNTL(vfd_decode_idx
), 1);
1412 tu_cs_emit(cs
, vfd_dest_cntl
);
1414 bindings
[vfd_decode_idx
] = vi_binding
->binding
;
1415 strides
[vfd_decode_idx
] = vi_binding
->stride
;
1416 offsets
[vfd_decode_idx
] = vi_attr
->offset
;
1419 assert(vfd_decode_idx
<= MAX_VERTEX_ATTRIBS
);
1422 tu_cs_emit_pkt4(cs
, REG_A6XX_VFD_CONTROL_0
, 1);
1424 cs
, A6XX_VFD_CONTROL_0_VTXCNT(vfd_decode_idx
) | (vfd_decode_idx
<< 8));
1426 *count
= vfd_decode_idx
;
1430 tu6_guardband_adj(uint32_t v
)
1433 return (uint32_t)(511.0 - 65.0 * (log2(v
) - 8.0));
1439 tu6_emit_viewport(struct tu_cs
*cs
, const VkViewport
*viewport
)
1443 scales
[0] = viewport
->width
/ 2.0f
;
1444 scales
[1] = viewport
->height
/ 2.0f
;
1445 scales
[2] = viewport
->maxDepth
- viewport
->minDepth
;
1446 offsets
[0] = viewport
->x
+ scales
[0];
1447 offsets
[1] = viewport
->y
+ scales
[1];
1448 offsets
[2] = viewport
->minDepth
;
1452 min
.x
= (int32_t) viewport
->x
;
1453 max
.x
= (int32_t) ceilf(viewport
->x
+ viewport
->width
);
1454 if (viewport
->height
>= 0.0f
) {
1455 min
.y
= (int32_t) viewport
->y
;
1456 max
.y
= (int32_t) ceilf(viewport
->y
+ viewport
->height
);
1458 min
.y
= (int32_t)(viewport
->y
+ viewport
->height
);
1459 max
.y
= (int32_t) ceilf(viewport
->y
);
1461 /* the spec allows viewport->height to be 0.0f */
1464 assert(min
.x
>= 0 && min
.x
< max
.x
);
1465 assert(min
.y
>= 0 && min
.y
< max
.y
);
1467 VkExtent2D guardband_adj
;
1468 guardband_adj
.width
= tu6_guardband_adj(max
.x
- min
.x
);
1469 guardband_adj
.height
= tu6_guardband_adj(max
.y
- min
.y
);
1471 tu_cs_emit_pkt4(cs
, REG_A6XX_GRAS_CL_VPORT_XOFFSET_0
, 6);
1472 tu_cs_emit(cs
, A6XX_GRAS_CL_VPORT_XOFFSET_0(offsets
[0]).value
);
1473 tu_cs_emit(cs
, A6XX_GRAS_CL_VPORT_XSCALE_0(scales
[0]).value
);
1474 tu_cs_emit(cs
, A6XX_GRAS_CL_VPORT_YOFFSET_0(offsets
[1]).value
);
1475 tu_cs_emit(cs
, A6XX_GRAS_CL_VPORT_YSCALE_0(scales
[1]).value
);
1476 tu_cs_emit(cs
, A6XX_GRAS_CL_VPORT_ZOFFSET_0(offsets
[2]).value
);
1477 tu_cs_emit(cs
, A6XX_GRAS_CL_VPORT_ZSCALE_0(scales
[2]).value
);
1479 tu_cs_emit_pkt4(cs
, REG_A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0
, 2);
1480 tu_cs_emit(cs
, A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X(min
.x
) |
1481 A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y(min
.y
));
1482 tu_cs_emit(cs
, A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X(max
.x
- 1) |
1483 A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y(max
.y
- 1));
1485 tu_cs_emit_pkt4(cs
, REG_A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ
, 1);
1487 A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ(guardband_adj
.width
) |
1488 A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT(guardband_adj
.height
));
1490 float z_clamp_min
= MIN2(viewport
->minDepth
, viewport
->maxDepth
);
1491 float z_clamp_max
= MAX2(viewport
->minDepth
, viewport
->maxDepth
);
1494 A6XX_GRAS_CL_Z_CLAMP_MIN(z_clamp_min
),
1495 A6XX_GRAS_CL_Z_CLAMP_MAX(z_clamp_max
));
1498 A6XX_RB_Z_CLAMP_MIN(z_clamp_min
),
1499 A6XX_RB_Z_CLAMP_MAX(z_clamp_max
));
1503 tu6_emit_scissor(struct tu_cs
*cs
, const VkRect2D
*scissor
)
1505 const VkOffset2D min
= scissor
->offset
;
1506 const VkOffset2D max
= {
1507 scissor
->offset
.x
+ scissor
->extent
.width
,
1508 scissor
->offset
.y
+ scissor
->extent
.height
,
1511 tu_cs_emit_pkt4(cs
, REG_A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0
, 2);
1512 tu_cs_emit(cs
, A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X(min
.x
) |
1513 A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y(min
.y
));
1514 tu_cs_emit(cs
, A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X(max
.x
- 1) |
1515 A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y(max
.y
- 1));
1519 tu6_emit_gras_unknowns(struct tu_cs
*cs
)
1521 tu_cs_emit_pkt4(cs
, REG_A6XX_GRAS_UNKNOWN_8001
, 1);
1522 tu_cs_emit(cs
, 0x0);
1523 tu_cs_emit_pkt4(cs
, REG_A6XX_GRAS_LAYER_CNTL
, 1);
1524 tu_cs_emit(cs
, 0x0);
1528 tu6_emit_point_size(struct tu_cs
*cs
)
1530 tu_cs_emit_pkt4(cs
, REG_A6XX_GRAS_SU_POINT_MINMAX
, 2);
1531 tu_cs_emit(cs
, A6XX_GRAS_SU_POINT_MINMAX_MIN(1.0f
/ 16.0f
) |
1532 A6XX_GRAS_SU_POINT_MINMAX_MAX(4092.0f
));
1533 tu_cs_emit(cs
, A6XX_GRAS_SU_POINT_SIZE(1.0f
).value
);
1537 tu6_gras_su_cntl(const VkPipelineRasterizationStateCreateInfo
*rast_info
,
1538 VkSampleCountFlagBits samples
)
1540 uint32_t gras_su_cntl
= 0;
1542 if (rast_info
->cullMode
& VK_CULL_MODE_FRONT_BIT
)
1543 gras_su_cntl
|= A6XX_GRAS_SU_CNTL_CULL_FRONT
;
1544 if (rast_info
->cullMode
& VK_CULL_MODE_BACK_BIT
)
1545 gras_su_cntl
|= A6XX_GRAS_SU_CNTL_CULL_BACK
;
1547 if (rast_info
->frontFace
== VK_FRONT_FACE_CLOCKWISE
)
1548 gras_su_cntl
|= A6XX_GRAS_SU_CNTL_FRONT_CW
;
1550 /* don't set A6XX_GRAS_SU_CNTL_LINEHALFWIDTH */
1552 if (rast_info
->depthBiasEnable
)
1553 gras_su_cntl
|= A6XX_GRAS_SU_CNTL_POLY_OFFSET
;
1555 if (samples
> VK_SAMPLE_COUNT_1_BIT
)
1556 gras_su_cntl
|= A6XX_GRAS_SU_CNTL_MSAA_ENABLE
;
1558 return gras_su_cntl
;
1562 tu6_emit_gras_su_cntl(struct tu_cs
*cs
,
1563 uint32_t gras_su_cntl
,
1566 assert((gras_su_cntl
& A6XX_GRAS_SU_CNTL_LINEHALFWIDTH__MASK
) == 0);
1567 gras_su_cntl
|= A6XX_GRAS_SU_CNTL_LINEHALFWIDTH(line_width
/ 2.0f
);
1569 tu_cs_emit_pkt4(cs
, REG_A6XX_GRAS_SU_CNTL
, 1);
1570 tu_cs_emit(cs
, gras_su_cntl
);
1574 tu6_emit_depth_bias(struct tu_cs
*cs
,
1575 float constant_factor
,
1579 tu_cs_emit_pkt4(cs
, REG_A6XX_GRAS_SU_POLY_OFFSET_SCALE
, 3);
1580 tu_cs_emit(cs
, A6XX_GRAS_SU_POLY_OFFSET_SCALE(slope_factor
).value
);
1581 tu_cs_emit(cs
, A6XX_GRAS_SU_POLY_OFFSET_OFFSET(constant_factor
).value
);
1582 tu_cs_emit(cs
, A6XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP(clamp
).value
);
1586 tu6_emit_alpha_control_disable(struct tu_cs
*cs
)
1588 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_ALPHA_CONTROL
, 1);
1593 tu6_emit_depth_control(struct tu_cs
*cs
,
1594 const VkPipelineDepthStencilStateCreateInfo
*ds_info
,
1595 const VkPipelineRasterizationStateCreateInfo
*rast_info
)
1597 assert(!ds_info
->depthBoundsTestEnable
);
1599 uint32_t rb_depth_cntl
= 0;
1600 if (ds_info
->depthTestEnable
) {
1602 A6XX_RB_DEPTH_CNTL_Z_ENABLE
|
1603 A6XX_RB_DEPTH_CNTL_ZFUNC(tu6_compare_func(ds_info
->depthCompareOp
)) |
1604 A6XX_RB_DEPTH_CNTL_Z_TEST_ENABLE
;
1606 if (rast_info
->depthClampEnable
)
1607 rb_depth_cntl
|= A6XX_RB_DEPTH_CNTL_Z_CLAMP_ENABLE
;
1609 if (ds_info
->depthWriteEnable
)
1610 rb_depth_cntl
|= A6XX_RB_DEPTH_CNTL_Z_WRITE_ENABLE
;
1613 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_DEPTH_CNTL
, 1);
1614 tu_cs_emit(cs
, rb_depth_cntl
);
1618 tu6_emit_stencil_control(struct tu_cs
*cs
,
1619 const VkPipelineDepthStencilStateCreateInfo
*ds_info
)
1621 uint32_t rb_stencil_control
= 0;
1622 if (ds_info
->stencilTestEnable
) {
1623 const VkStencilOpState
*front
= &ds_info
->front
;
1624 const VkStencilOpState
*back
= &ds_info
->back
;
1625 rb_stencil_control
|=
1626 A6XX_RB_STENCIL_CONTROL_STENCIL_ENABLE
|
1627 A6XX_RB_STENCIL_CONTROL_STENCIL_ENABLE_BF
|
1628 A6XX_RB_STENCIL_CONTROL_STENCIL_READ
|
1629 A6XX_RB_STENCIL_CONTROL_FUNC(tu6_compare_func(front
->compareOp
)) |
1630 A6XX_RB_STENCIL_CONTROL_FAIL(tu6_stencil_op(front
->failOp
)) |
1631 A6XX_RB_STENCIL_CONTROL_ZPASS(tu6_stencil_op(front
->passOp
)) |
1632 A6XX_RB_STENCIL_CONTROL_ZFAIL(tu6_stencil_op(front
->depthFailOp
)) |
1633 A6XX_RB_STENCIL_CONTROL_FUNC_BF(tu6_compare_func(back
->compareOp
)) |
1634 A6XX_RB_STENCIL_CONTROL_FAIL_BF(tu6_stencil_op(back
->failOp
)) |
1635 A6XX_RB_STENCIL_CONTROL_ZPASS_BF(tu6_stencil_op(back
->passOp
)) |
1636 A6XX_RB_STENCIL_CONTROL_ZFAIL_BF(tu6_stencil_op(back
->depthFailOp
));
1639 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_STENCIL_CONTROL
, 1);
1640 tu_cs_emit(cs
, rb_stencil_control
);
1644 tu6_emit_stencil_compare_mask(struct tu_cs
*cs
, uint32_t front
, uint32_t back
)
1646 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_STENCILMASK
, 1);
1648 cs
, A6XX_RB_STENCILMASK_MASK(front
) | A6XX_RB_STENCILMASK_BFMASK(back
));
1652 tu6_emit_stencil_write_mask(struct tu_cs
*cs
, uint32_t front
, uint32_t back
)
1654 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_STENCILWRMASK
, 1);
1655 tu_cs_emit(cs
, A6XX_RB_STENCILWRMASK_WRMASK(front
) |
1656 A6XX_RB_STENCILWRMASK_BFWRMASK(back
));
1660 tu6_emit_stencil_reference(struct tu_cs
*cs
, uint32_t front
, uint32_t back
)
1662 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_STENCILREF
, 1);
1664 A6XX_RB_STENCILREF_REF(front
) | A6XX_RB_STENCILREF_BFREF(back
));
1668 tu6_rb_mrt_blend_control(const VkPipelineColorBlendAttachmentState
*att
,
1671 const enum a3xx_rb_blend_opcode color_op
= tu6_blend_op(att
->colorBlendOp
);
1672 const enum adreno_rb_blend_factor src_color_factor
= tu6_blend_factor(
1673 has_alpha
? att
->srcColorBlendFactor
1674 : tu_blend_factor_no_dst_alpha(att
->srcColorBlendFactor
));
1675 const enum adreno_rb_blend_factor dst_color_factor
= tu6_blend_factor(
1676 has_alpha
? att
->dstColorBlendFactor
1677 : tu_blend_factor_no_dst_alpha(att
->dstColorBlendFactor
));
1678 const enum a3xx_rb_blend_opcode alpha_op
= tu6_blend_op(att
->alphaBlendOp
);
1679 const enum adreno_rb_blend_factor src_alpha_factor
=
1680 tu6_blend_factor(att
->srcAlphaBlendFactor
);
1681 const enum adreno_rb_blend_factor dst_alpha_factor
=
1682 tu6_blend_factor(att
->dstAlphaBlendFactor
);
1684 return A6XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR(src_color_factor
) |
1685 A6XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(color_op
) |
1686 A6XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR(dst_color_factor
) |
1687 A6XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR(src_alpha_factor
) |
1688 A6XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(alpha_op
) |
1689 A6XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR(dst_alpha_factor
);
1693 tu6_rb_mrt_control(const VkPipelineColorBlendAttachmentState
*att
,
1694 uint32_t rb_mrt_control_rop
,
1698 uint32_t rb_mrt_control
=
1699 A6XX_RB_MRT_CONTROL_COMPONENT_ENABLE(att
->colorWriteMask
);
1701 /* ignore blending and logic op for integer attachments */
1703 rb_mrt_control
|= A6XX_RB_MRT_CONTROL_ROP_CODE(ROP_COPY
);
1704 return rb_mrt_control
;
1707 rb_mrt_control
|= rb_mrt_control_rop
;
1709 if (att
->blendEnable
) {
1710 rb_mrt_control
|= A6XX_RB_MRT_CONTROL_BLEND
;
1713 rb_mrt_control
|= A6XX_RB_MRT_CONTROL_BLEND2
;
1716 return rb_mrt_control
;
1720 tu6_emit_rb_mrt_controls(struct tu_cs
*cs
,
1721 const VkPipelineColorBlendStateCreateInfo
*blend_info
,
1722 const VkFormat attachment_formats
[MAX_RTS
],
1723 uint32_t *blend_enable_mask
)
1725 *blend_enable_mask
= 0;
1727 bool rop_reads_dst
= false;
1728 uint32_t rb_mrt_control_rop
= 0;
1729 if (blend_info
->logicOpEnable
) {
1730 rop_reads_dst
= tu_logic_op_reads_dst(blend_info
->logicOp
);
1731 rb_mrt_control_rop
=
1732 A6XX_RB_MRT_CONTROL_ROP_ENABLE
|
1733 A6XX_RB_MRT_CONTROL_ROP_CODE(tu6_rop(blend_info
->logicOp
));
1736 for (uint32_t i
= 0; i
< blend_info
->attachmentCount
; i
++) {
1737 const VkPipelineColorBlendAttachmentState
*att
=
1738 &blend_info
->pAttachments
[i
];
1739 const VkFormat format
= attachment_formats
[i
];
1741 uint32_t rb_mrt_control
= 0;
1742 uint32_t rb_mrt_blend_control
= 0;
1743 if (format
!= VK_FORMAT_UNDEFINED
) {
1744 const bool is_int
= vk_format_is_int(format
);
1745 const bool has_alpha
= vk_format_has_alpha(format
);
1748 tu6_rb_mrt_control(att
, rb_mrt_control_rop
, is_int
, has_alpha
);
1749 rb_mrt_blend_control
= tu6_rb_mrt_blend_control(att
, has_alpha
);
1751 if (att
->blendEnable
|| rop_reads_dst
)
1752 *blend_enable_mask
|= 1 << i
;
1755 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_MRT_CONTROL(i
), 2);
1756 tu_cs_emit(cs
, rb_mrt_control
);
1757 tu_cs_emit(cs
, rb_mrt_blend_control
);
1762 tu6_emit_blend_control(struct tu_cs
*cs
,
1763 uint32_t blend_enable_mask
,
1764 const VkPipelineMultisampleStateCreateInfo
*msaa_info
)
1766 assert(!msaa_info
->alphaToOneEnable
);
1768 uint32_t sp_blend_cntl
= A6XX_SP_BLEND_CNTL_UNK8
;
1769 if (blend_enable_mask
)
1770 sp_blend_cntl
|= A6XX_SP_BLEND_CNTL_ENABLED
;
1771 if (msaa_info
->alphaToCoverageEnable
)
1772 sp_blend_cntl
|= A6XX_SP_BLEND_CNTL_ALPHA_TO_COVERAGE
;
1774 const uint32_t sample_mask
=
1775 msaa_info
->pSampleMask
? *msaa_info
->pSampleMask
1776 : ((1 << msaa_info
->rasterizationSamples
) - 1);
1778 /* set A6XX_RB_BLEND_CNTL_INDEPENDENT_BLEND only when enabled? */
1779 uint32_t rb_blend_cntl
=
1780 A6XX_RB_BLEND_CNTL_ENABLE_BLEND(blend_enable_mask
) |
1781 A6XX_RB_BLEND_CNTL_INDEPENDENT_BLEND
|
1782 A6XX_RB_BLEND_CNTL_SAMPLE_MASK(sample_mask
);
1783 if (msaa_info
->alphaToCoverageEnable
)
1784 rb_blend_cntl
|= A6XX_RB_BLEND_CNTL_ALPHA_TO_COVERAGE
;
1786 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_BLEND_CNTL
, 1);
1787 tu_cs_emit(cs
, sp_blend_cntl
);
1789 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_BLEND_CNTL
, 1);
1790 tu_cs_emit(cs
, rb_blend_cntl
);
1794 tu6_emit_blend_constants(struct tu_cs
*cs
, const float constants
[4])
1796 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_BLEND_RED_F32
, 4);
1797 tu_cs_emit_array(cs
, (const uint32_t *) constants
, 4);
1801 tu_pipeline_create(struct tu_device
*dev
,
1802 const VkAllocationCallbacks
*pAllocator
,
1803 struct tu_pipeline
**out_pipeline
)
1805 struct tu_pipeline
*pipeline
=
1806 vk_zalloc2(&dev
->alloc
, pAllocator
, sizeof(*pipeline
), 8,
1807 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1809 return VK_ERROR_OUT_OF_HOST_MEMORY
;
1811 tu_cs_init(&pipeline
->cs
, dev
, TU_CS_MODE_SUB_STREAM
, 2048);
1813 /* reserve the space now such that tu_cs_begin_sub_stream never fails */
1814 VkResult result
= tu_cs_reserve_space(&pipeline
->cs
, 2048);
1815 if (result
!= VK_SUCCESS
) {
1816 vk_free2(&dev
->alloc
, pAllocator
, pipeline
);
1820 *out_pipeline
= pipeline
;
1826 tu_pipeline_builder_compile_shaders(struct tu_pipeline_builder
*builder
)
1828 const VkPipelineShaderStageCreateInfo
*stage_infos
[MESA_SHADER_STAGES
] = {
1831 for (uint32_t i
= 0; i
< builder
->create_info
->stageCount
; i
++) {
1832 gl_shader_stage stage
=
1833 tu_shader_stage(builder
->create_info
->pStages
[i
].stage
);
1834 stage_infos
[stage
] = &builder
->create_info
->pStages
[i
];
1837 struct tu_shader_compile_options options
;
1838 tu_shader_compile_options_init(&options
, builder
->create_info
);
1840 /* compile shaders in reverse order */
1841 struct tu_shader
*next_stage_shader
= NULL
;
1842 for (gl_shader_stage stage
= MESA_SHADER_STAGES
- 1;
1843 stage
> MESA_SHADER_NONE
; stage
--) {
1844 const VkPipelineShaderStageCreateInfo
*stage_info
= stage_infos
[stage
];
1848 struct tu_shader
*shader
=
1849 tu_shader_create(builder
->device
, stage
, stage_info
, builder
->layout
,
1852 return VK_ERROR_OUT_OF_HOST_MEMORY
;
1855 tu_shader_compile(builder
->device
, shader
, next_stage_shader
,
1856 &options
, builder
->alloc
);
1857 if (result
!= VK_SUCCESS
)
1860 builder
->shaders
[stage
] = shader
;
1861 builder
->shader_offsets
[stage
] = builder
->shader_total_size
;
1862 builder
->shader_total_size
+=
1863 sizeof(uint32_t) * shader
->variants
[0].info
.sizedwords
;
1865 next_stage_shader
= shader
;
1868 if (builder
->shaders
[MESA_SHADER_VERTEX
]->has_binning_pass
) {
1869 const struct tu_shader
*vs
= builder
->shaders
[MESA_SHADER_VERTEX
];
1870 const struct ir3_shader_variant
*variant
;
1872 if (vs
->ir3_shader
.stream_output
.num_outputs
)
1873 variant
= &vs
->variants
[0];
1875 variant
= &vs
->variants
[1];
1877 builder
->binning_vs_offset
= builder
->shader_total_size
;
1878 builder
->shader_total_size
+=
1879 sizeof(uint32_t) * variant
->info
.sizedwords
;
1886 tu_pipeline_builder_upload_shaders(struct tu_pipeline_builder
*builder
,
1887 struct tu_pipeline
*pipeline
)
1889 struct tu_bo
*bo
= &pipeline
->program
.binary_bo
;
1892 tu_bo_init_new(builder
->device
, bo
, builder
->shader_total_size
);
1893 if (result
!= VK_SUCCESS
)
1896 result
= tu_bo_map(builder
->device
, bo
);
1897 if (result
!= VK_SUCCESS
)
1900 for (uint32_t i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
1901 const struct tu_shader
*shader
= builder
->shaders
[i
];
1905 memcpy(bo
->map
+ builder
->shader_offsets
[i
], shader
->binary
,
1906 sizeof(uint32_t) * shader
->variants
[0].info
.sizedwords
);
1909 if (builder
->shaders
[MESA_SHADER_VERTEX
]->has_binning_pass
) {
1910 const struct tu_shader
*vs
= builder
->shaders
[MESA_SHADER_VERTEX
];
1911 const struct ir3_shader_variant
*variant
;
1914 if (vs
->ir3_shader
.stream_output
.num_outputs
) {
1915 variant
= &vs
->variants
[0];
1918 variant
= &vs
->variants
[1];
1919 bin
= vs
->binning_binary
;
1922 memcpy(bo
->map
+ builder
->binning_vs_offset
, bin
,
1923 sizeof(uint32_t) * variant
->info
.sizedwords
);
1930 tu_pipeline_builder_parse_dynamic(struct tu_pipeline_builder
*builder
,
1931 struct tu_pipeline
*pipeline
)
1933 const VkPipelineDynamicStateCreateInfo
*dynamic_info
=
1934 builder
->create_info
->pDynamicState
;
1939 for (uint32_t i
= 0; i
< dynamic_info
->dynamicStateCount
; i
++) {
1940 pipeline
->dynamic_state
.mask
|=
1941 tu_dynamic_state_bit(dynamic_info
->pDynamicStates
[i
]);
1946 tu_pipeline_set_linkage(struct tu_program_descriptor_linkage
*link
,
1947 struct tu_shader
*shader
,
1948 struct ir3_shader_variant
*v
)
1950 link
->ubo_state
= v
->shader
->ubo_state
;
1951 link
->const_state
= v
->shader
->const_state
;
1952 link
->constlen
= v
->constlen
;
1953 link
->texture_map
= shader
->texture_map
;
1954 link
->sampler_map
= shader
->sampler_map
;
1955 link
->ubo_map
= shader
->ubo_map
;
1956 link
->ssbo_map
= shader
->ssbo_map
;
1957 link
->image_map
= shader
->image_map
;
1961 tu_pipeline_builder_parse_shader_stages(struct tu_pipeline_builder
*builder
,
1962 struct tu_pipeline
*pipeline
)
1964 struct tu_cs prog_cs
;
1965 tu_cs_begin_sub_stream(&pipeline
->cs
, 512, &prog_cs
);
1966 tu6_emit_program(&prog_cs
, builder
, &pipeline
->program
.binary_bo
, false, &pipeline
->streamout
);
1967 pipeline
->program
.state_ib
= tu_cs_end_sub_stream(&pipeline
->cs
, &prog_cs
);
1969 tu_cs_begin_sub_stream(&pipeline
->cs
, 512, &prog_cs
);
1970 tu6_emit_program(&prog_cs
, builder
, &pipeline
->program
.binary_bo
, true, &pipeline
->streamout
);
1971 pipeline
->program
.binning_state_ib
=
1972 tu_cs_end_sub_stream(&pipeline
->cs
, &prog_cs
);
1974 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
1975 if (!builder
->shaders
[i
])
1978 tu_pipeline_set_linkage(&pipeline
->program
.link
[i
],
1979 builder
->shaders
[i
],
1980 &builder
->shaders
[i
]->variants
[0]);
1985 tu_pipeline_builder_parse_vertex_input(struct tu_pipeline_builder
*builder
,
1986 struct tu_pipeline
*pipeline
)
1988 const VkPipelineVertexInputStateCreateInfo
*vi_info
=
1989 builder
->create_info
->pVertexInputState
;
1990 const struct tu_shader
*vs
= builder
->shaders
[MESA_SHADER_VERTEX
];
1993 tu_cs_begin_sub_stream(&pipeline
->cs
,
1994 MAX_VERTEX_ATTRIBS
* 5 + 2, &vi_cs
);
1995 tu6_emit_vertex_input(&vi_cs
, &vs
->variants
[0], vi_info
,
1996 pipeline
->vi
.bindings
, pipeline
->vi
.strides
,
1997 pipeline
->vi
.offsets
, &pipeline
->vi
.count
);
1998 pipeline
->vi
.state_ib
= tu_cs_end_sub_stream(&pipeline
->cs
, &vi_cs
);
2000 if (vs
->has_binning_pass
) {
2001 tu_cs_begin_sub_stream(&pipeline
->cs
,
2002 MAX_VERTEX_ATTRIBS
* 5 + 2, &vi_cs
);
2003 tu6_emit_vertex_input(
2004 &vi_cs
, &vs
->variants
[1], vi_info
, pipeline
->vi
.binning_bindings
,
2005 pipeline
->vi
.binning_strides
, pipeline
->vi
.binning_offsets
,
2006 &pipeline
->vi
.binning_count
);
2007 pipeline
->vi
.binning_state_ib
=
2008 tu_cs_end_sub_stream(&pipeline
->cs
, &vi_cs
);
2013 tu_pipeline_builder_parse_input_assembly(struct tu_pipeline_builder
*builder
,
2014 struct tu_pipeline
*pipeline
)
2016 const VkPipelineInputAssemblyStateCreateInfo
*ia_info
=
2017 builder
->create_info
->pInputAssemblyState
;
2019 pipeline
->ia
.primtype
= tu6_primtype(ia_info
->topology
);
2020 pipeline
->ia
.primitive_restart
= ia_info
->primitiveRestartEnable
;
2024 tu_pipeline_builder_parse_viewport(struct tu_pipeline_builder
*builder
,
2025 struct tu_pipeline
*pipeline
)
2029 * pViewportState is a pointer to an instance of the
2030 * VkPipelineViewportStateCreateInfo structure, and is ignored if the
2031 * pipeline has rasterization disabled."
2033 * We leave the relevant registers stale in that case.
2035 if (builder
->rasterizer_discard
)
2038 const VkPipelineViewportStateCreateInfo
*vp_info
=
2039 builder
->create_info
->pViewportState
;
2042 tu_cs_begin_sub_stream(&pipeline
->cs
, 21, &vp_cs
);
2044 if (!(pipeline
->dynamic_state
.mask
& TU_DYNAMIC_VIEWPORT
)) {
2045 assert(vp_info
->viewportCount
== 1);
2046 tu6_emit_viewport(&vp_cs
, vp_info
->pViewports
);
2049 if (!(pipeline
->dynamic_state
.mask
& TU_DYNAMIC_SCISSOR
)) {
2050 assert(vp_info
->scissorCount
== 1);
2051 tu6_emit_scissor(&vp_cs
, vp_info
->pScissors
);
2054 pipeline
->vp
.state_ib
= tu_cs_end_sub_stream(&pipeline
->cs
, &vp_cs
);
2058 tu_pipeline_builder_parse_rasterization(struct tu_pipeline_builder
*builder
,
2059 struct tu_pipeline
*pipeline
)
2061 const VkPipelineRasterizationStateCreateInfo
*rast_info
=
2062 builder
->create_info
->pRasterizationState
;
2064 assert(rast_info
->polygonMode
== VK_POLYGON_MODE_FILL
);
2066 struct tu_cs rast_cs
;
2067 tu_cs_begin_sub_stream(&pipeline
->cs
, 20, &rast_cs
);
2070 tu_cs_emit_regs(&rast_cs
,
2072 .znear_clip_disable
= rast_info
->depthClampEnable
,
2073 .zfar_clip_disable
= rast_info
->depthClampEnable
,
2074 .unk5
= rast_info
->depthClampEnable
,
2075 .zero_gb_scale_z
= 1,
2076 .vp_clip_code_ignore
= 1));
2077 /* move to hw ctx init? */
2078 tu6_emit_gras_unknowns(&rast_cs
);
2079 tu6_emit_point_size(&rast_cs
);
2081 const uint32_t gras_su_cntl
=
2082 tu6_gras_su_cntl(rast_info
, builder
->samples
);
2084 if (!(pipeline
->dynamic_state
.mask
& TU_DYNAMIC_LINE_WIDTH
))
2085 tu6_emit_gras_su_cntl(&rast_cs
, gras_su_cntl
, rast_info
->lineWidth
);
2087 if (!(pipeline
->dynamic_state
.mask
& TU_DYNAMIC_DEPTH_BIAS
)) {
2088 tu6_emit_depth_bias(&rast_cs
, rast_info
->depthBiasConstantFactor
,
2089 rast_info
->depthBiasClamp
,
2090 rast_info
->depthBiasSlopeFactor
);
2093 pipeline
->rast
.state_ib
= tu_cs_end_sub_stream(&pipeline
->cs
, &rast_cs
);
2095 pipeline
->rast
.gras_su_cntl
= gras_su_cntl
;
2099 tu_pipeline_builder_parse_depth_stencil(struct tu_pipeline_builder
*builder
,
2100 struct tu_pipeline
*pipeline
)
2104 * pDepthStencilState is a pointer to an instance of the
2105 * VkPipelineDepthStencilStateCreateInfo structure, and is ignored if
2106 * the pipeline has rasterization disabled or if the subpass of the
2107 * render pass the pipeline is created against does not use a
2108 * depth/stencil attachment.
2110 * We disable both depth and stenil tests in those cases.
2112 static const VkPipelineDepthStencilStateCreateInfo dummy_ds_info
;
2113 const VkPipelineDepthStencilStateCreateInfo
*ds_info
=
2114 builder
->use_depth_stencil_attachment
2115 ? builder
->create_info
->pDepthStencilState
2119 tu_cs_begin_sub_stream(&pipeline
->cs
, 12, &ds_cs
);
2121 /* move to hw ctx init? */
2122 tu6_emit_alpha_control_disable(&ds_cs
);
2124 tu6_emit_depth_control(&ds_cs
, ds_info
, builder
->create_info
->pRasterizationState
);
2125 tu6_emit_stencil_control(&ds_cs
, ds_info
);
2127 if (!(pipeline
->dynamic_state
.mask
& TU_DYNAMIC_STENCIL_COMPARE_MASK
)) {
2128 tu6_emit_stencil_compare_mask(&ds_cs
, ds_info
->front
.compareMask
,
2129 ds_info
->back
.compareMask
);
2131 if (!(pipeline
->dynamic_state
.mask
& TU_DYNAMIC_STENCIL_WRITE_MASK
)) {
2132 tu6_emit_stencil_write_mask(&ds_cs
, ds_info
->front
.writeMask
,
2133 ds_info
->back
.writeMask
);
2135 if (!(pipeline
->dynamic_state
.mask
& TU_DYNAMIC_STENCIL_REFERENCE
)) {
2136 tu6_emit_stencil_reference(&ds_cs
, ds_info
->front
.reference
,
2137 ds_info
->back
.reference
);
2140 pipeline
->ds
.state_ib
= tu_cs_end_sub_stream(&pipeline
->cs
, &ds_cs
);
2144 tu_pipeline_builder_parse_multisample_and_color_blend(
2145 struct tu_pipeline_builder
*builder
, struct tu_pipeline
*pipeline
)
2149 * pMultisampleState is a pointer to an instance of the
2150 * VkPipelineMultisampleStateCreateInfo, and is ignored if the pipeline
2151 * has rasterization disabled.
2155 * pColorBlendState is a pointer to an instance of the
2156 * VkPipelineColorBlendStateCreateInfo structure, and is ignored if the
2157 * pipeline has rasterization disabled or if the subpass of the render
2158 * pass the pipeline is created against does not use any color
2161 * We leave the relevant registers stale when rasterization is disabled.
2163 if (builder
->rasterizer_discard
)
2166 static const VkPipelineColorBlendStateCreateInfo dummy_blend_info
;
2167 const VkPipelineMultisampleStateCreateInfo
*msaa_info
=
2168 builder
->create_info
->pMultisampleState
;
2169 const VkPipelineColorBlendStateCreateInfo
*blend_info
=
2170 builder
->use_color_attachments
? builder
->create_info
->pColorBlendState
2171 : &dummy_blend_info
;
2173 struct tu_cs blend_cs
;
2174 tu_cs_begin_sub_stream(&pipeline
->cs
, MAX_RTS
* 3 + 9, &blend_cs
);
2176 uint32_t blend_enable_mask
;
2177 tu6_emit_rb_mrt_controls(&blend_cs
, blend_info
,
2178 builder
->color_attachment_formats
,
2179 &blend_enable_mask
);
2181 if (!(pipeline
->dynamic_state
.mask
& TU_DYNAMIC_BLEND_CONSTANTS
))
2182 tu6_emit_blend_constants(&blend_cs
, blend_info
->blendConstants
);
2184 tu6_emit_blend_control(&blend_cs
, blend_enable_mask
, msaa_info
);
2186 pipeline
->blend
.state_ib
= tu_cs_end_sub_stream(&pipeline
->cs
, &blend_cs
);
2190 tu_pipeline_finish(struct tu_pipeline
*pipeline
,
2191 struct tu_device
*dev
,
2192 const VkAllocationCallbacks
*alloc
)
2194 tu_cs_finish(&pipeline
->cs
);
2196 if (pipeline
->program
.binary_bo
.gem_handle
)
2197 tu_bo_finish(dev
, &pipeline
->program
.binary_bo
);
2201 tu_pipeline_builder_build(struct tu_pipeline_builder
*builder
,
2202 struct tu_pipeline
**pipeline
)
2204 VkResult result
= tu_pipeline_create(builder
->device
, builder
->alloc
,
2206 if (result
!= VK_SUCCESS
)
2209 /* compile and upload shaders */
2210 result
= tu_pipeline_builder_compile_shaders(builder
);
2211 if (result
== VK_SUCCESS
)
2212 result
= tu_pipeline_builder_upload_shaders(builder
, *pipeline
);
2213 if (result
!= VK_SUCCESS
) {
2214 tu_pipeline_finish(*pipeline
, builder
->device
, builder
->alloc
);
2215 vk_free2(&builder
->device
->alloc
, builder
->alloc
, *pipeline
);
2216 *pipeline
= VK_NULL_HANDLE
;
2221 tu_pipeline_builder_parse_dynamic(builder
, *pipeline
);
2222 tu_pipeline_builder_parse_shader_stages(builder
, *pipeline
);
2223 tu_pipeline_builder_parse_vertex_input(builder
, *pipeline
);
2224 tu_pipeline_builder_parse_input_assembly(builder
, *pipeline
);
2225 tu_pipeline_builder_parse_viewport(builder
, *pipeline
);
2226 tu_pipeline_builder_parse_rasterization(builder
, *pipeline
);
2227 tu_pipeline_builder_parse_depth_stencil(builder
, *pipeline
);
2228 tu_pipeline_builder_parse_multisample_and_color_blend(builder
, *pipeline
);
2230 /* we should have reserved enough space upfront such that the CS never
2233 assert((*pipeline
)->cs
.bo_count
== 1);
2239 tu_pipeline_builder_finish(struct tu_pipeline_builder
*builder
)
2241 for (uint32_t i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
2242 if (!builder
->shaders
[i
])
2244 tu_shader_destroy(builder
->device
, builder
->shaders
[i
], builder
->alloc
);
2249 tu_pipeline_builder_init_graphics(
2250 struct tu_pipeline_builder
*builder
,
2251 struct tu_device
*dev
,
2252 struct tu_pipeline_cache
*cache
,
2253 const VkGraphicsPipelineCreateInfo
*create_info
,
2254 const VkAllocationCallbacks
*alloc
)
2256 TU_FROM_HANDLE(tu_pipeline_layout
, layout
, create_info
->layout
);
2258 *builder
= (struct tu_pipeline_builder
) {
2261 .create_info
= create_info
,
2266 builder
->rasterizer_discard
=
2267 create_info
->pRasterizationState
->rasterizerDiscardEnable
;
2269 if (builder
->rasterizer_discard
) {
2270 builder
->samples
= VK_SAMPLE_COUNT_1_BIT
;
2272 builder
->samples
= create_info
->pMultisampleState
->rasterizationSamples
;
2274 const struct tu_render_pass
*pass
=
2275 tu_render_pass_from_handle(create_info
->renderPass
);
2276 const struct tu_subpass
*subpass
=
2277 &pass
->subpasses
[create_info
->subpass
];
2279 builder
->use_depth_stencil_attachment
=
2280 subpass
->depth_stencil_attachment
.attachment
!= VK_ATTACHMENT_UNUSED
;
2282 assert(subpass
->color_count
== 0 ||
2283 !create_info
->pColorBlendState
||
2284 subpass
->color_count
== create_info
->pColorBlendState
->attachmentCount
);
2285 builder
->color_attachment_count
= subpass
->color_count
;
2286 for (uint32_t i
= 0; i
< subpass
->color_count
; i
++) {
2287 const uint32_t a
= subpass
->color_attachments
[i
].attachment
;
2288 if (a
== VK_ATTACHMENT_UNUSED
)
2291 builder
->color_attachment_formats
[i
] = pass
->attachments
[a
].format
;
2292 builder
->use_color_attachments
= true;
2298 tu_graphics_pipeline_create(VkDevice device
,
2299 VkPipelineCache pipelineCache
,
2300 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
2301 const VkAllocationCallbacks
*pAllocator
,
2302 VkPipeline
*pPipeline
)
2304 TU_FROM_HANDLE(tu_device
, dev
, device
);
2305 TU_FROM_HANDLE(tu_pipeline_cache
, cache
, pipelineCache
);
2307 struct tu_pipeline_builder builder
;
2308 tu_pipeline_builder_init_graphics(&builder
, dev
, cache
,
2309 pCreateInfo
, pAllocator
);
2311 struct tu_pipeline
*pipeline
= NULL
;
2312 VkResult result
= tu_pipeline_builder_build(&builder
, &pipeline
);
2313 tu_pipeline_builder_finish(&builder
);
2315 if (result
== VK_SUCCESS
)
2316 *pPipeline
= tu_pipeline_to_handle(pipeline
);
2318 *pPipeline
= VK_NULL_HANDLE
;
2324 tu_CreateGraphicsPipelines(VkDevice device
,
2325 VkPipelineCache pipelineCache
,
2327 const VkGraphicsPipelineCreateInfo
*pCreateInfos
,
2328 const VkAllocationCallbacks
*pAllocator
,
2329 VkPipeline
*pPipelines
)
2331 VkResult final_result
= VK_SUCCESS
;
2333 for (uint32_t i
= 0; i
< count
; i
++) {
2334 VkResult result
= tu_graphics_pipeline_create(device
, pipelineCache
,
2335 &pCreateInfos
[i
], pAllocator
,
2338 if (result
!= VK_SUCCESS
)
2339 final_result
= result
;
2342 return final_result
;
2346 tu6_emit_compute_program(struct tu_cs
*cs
,
2347 struct tu_shader
*shader
,
2348 const struct tu_bo
*binary_bo
)
2350 const struct ir3_shader_variant
*v
= &shader
->variants
[0];
2352 tu6_emit_cs_config(cs
, shader
, v
);
2354 /* The compute program is the only one in the pipeline, so 0 offset. */
2355 tu6_emit_shader_object(cs
, MESA_SHADER_COMPUTE
, v
, binary_bo
, 0);
2357 tu6_emit_immediates(cs
, v
, CP_LOAD_STATE6_FRAG
, SB6_CS_SHADER
);
2361 tu_compute_upload_shader(VkDevice device
,
2362 struct tu_pipeline
*pipeline
,
2363 struct tu_shader
*shader
)
2365 TU_FROM_HANDLE(tu_device
, dev
, device
);
2366 struct tu_bo
*bo
= &pipeline
->program
.binary_bo
;
2367 struct ir3_shader_variant
*v
= &shader
->variants
[0];
2369 uint32_t shader_size
= sizeof(uint32_t) * v
->info
.sizedwords
;
2371 tu_bo_init_new(dev
, bo
, shader_size
);
2372 if (result
!= VK_SUCCESS
)
2375 result
= tu_bo_map(dev
, bo
);
2376 if (result
!= VK_SUCCESS
)
2379 memcpy(bo
->map
, shader
->binary
, shader_size
);
2386 tu_compute_pipeline_create(VkDevice device
,
2387 VkPipelineCache _cache
,
2388 const VkComputePipelineCreateInfo
*pCreateInfo
,
2389 const VkAllocationCallbacks
*pAllocator
,
2390 VkPipeline
*pPipeline
)
2392 TU_FROM_HANDLE(tu_device
, dev
, device
);
2393 TU_FROM_HANDLE(tu_pipeline_layout
, layout
, pCreateInfo
->layout
);
2394 const VkPipelineShaderStageCreateInfo
*stage_info
= &pCreateInfo
->stage
;
2397 struct tu_pipeline
*pipeline
;
2399 *pPipeline
= VK_NULL_HANDLE
;
2401 result
= tu_pipeline_create(dev
, pAllocator
, &pipeline
);
2402 if (result
!= VK_SUCCESS
)
2405 pipeline
->layout
= layout
;
2407 struct tu_shader_compile_options options
;
2408 tu_shader_compile_options_init(&options
, NULL
);
2410 struct tu_shader
*shader
=
2411 tu_shader_create(dev
, MESA_SHADER_COMPUTE
, stage_info
, layout
, pAllocator
);
2413 result
= VK_ERROR_OUT_OF_HOST_MEMORY
;
2417 result
= tu_shader_compile(dev
, shader
, NULL
, &options
, pAllocator
);
2418 if (result
!= VK_SUCCESS
)
2421 struct ir3_shader_variant
*v
= &shader
->variants
[0];
2423 tu_pipeline_set_linkage(&pipeline
->program
.link
[MESA_SHADER_COMPUTE
],
2426 result
= tu_compute_upload_shader(device
, pipeline
, shader
);
2427 if (result
!= VK_SUCCESS
)
2430 for (int i
= 0; i
< 3; i
++)
2431 pipeline
->compute
.local_size
[i
] = v
->shader
->nir
->info
.cs
.local_size
[i
];
2433 struct tu_cs prog_cs
;
2434 tu_cs_begin_sub_stream(&pipeline
->cs
, 512, &prog_cs
);
2435 tu6_emit_compute_program(&prog_cs
, shader
, &pipeline
->program
.binary_bo
);
2436 pipeline
->program
.state_ib
= tu_cs_end_sub_stream(&pipeline
->cs
, &prog_cs
);
2438 *pPipeline
= tu_pipeline_to_handle(pipeline
);
2443 tu_shader_destroy(dev
, shader
, pAllocator
);
2445 tu_pipeline_finish(pipeline
, dev
, pAllocator
);
2446 vk_free2(&dev
->alloc
, pAllocator
, pipeline
);
2452 tu_CreateComputePipelines(VkDevice device
,
2453 VkPipelineCache pipelineCache
,
2455 const VkComputePipelineCreateInfo
*pCreateInfos
,
2456 const VkAllocationCallbacks
*pAllocator
,
2457 VkPipeline
*pPipelines
)
2459 VkResult final_result
= VK_SUCCESS
;
2461 for (uint32_t i
= 0; i
< count
; i
++) {
2462 VkResult result
= tu_compute_pipeline_create(device
, pipelineCache
,
2464 pAllocator
, &pPipelines
[i
]);
2465 if (result
!= VK_SUCCESS
)
2466 final_result
= result
;
2469 return final_result
;
2473 tu_DestroyPipeline(VkDevice _device
,
2474 VkPipeline _pipeline
,
2475 const VkAllocationCallbacks
*pAllocator
)
2477 TU_FROM_HANDLE(tu_device
, dev
, _device
);
2478 TU_FROM_HANDLE(tu_pipeline
, pipeline
, _pipeline
);
2483 tu_pipeline_finish(pipeline
, dev
, pAllocator
);
2484 vk_free2(&dev
->alloc
, pAllocator
, pipeline
);