2 * Copyright © 2018 Valve Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include <unordered_map>
29 #include "nir_control_flow.h"
30 #include "vulkan/radv_shader.h"
31 #include "vulkan/radv_descriptor_set.h"
32 #include "vulkan/radv_shader_args.h"
34 #include "ac_exp_param.h"
35 #include "ac_shader_util.h"
37 #include "util/u_math.h"
39 #define MAX_INLINE_PUSH_CONSTS 8
43 struct shader_io_state
{
44 uint8_t mask
[VARYING_SLOT_MAX
];
45 Temp temps
[VARYING_SLOT_MAX
* 4u];
48 memset(mask
, 0, sizeof(mask
));
49 std::fill_n(temps
, VARYING_SLOT_MAX
* 4u, Temp(0, RegClass::v1
));
54 has_glc_vmem_load
= 0x1,
55 has_nonglc_vmem_load
= 0x2,
56 has_glc_vmem_store
= 0x4,
57 has_nonglc_vmem_store
= 0x8,
59 has_vmem_store
= has_glc_vmem_store
| has_nonglc_vmem_store
,
60 has_vmem_loadstore
= has_vmem_store
| has_glc_vmem_load
| has_nonglc_vmem_load
,
61 has_nonglc_vmem_loadstore
= has_nonglc_vmem_load
| has_nonglc_vmem_store
,
63 buffer_is_restrict
= 0x10,
67 const struct radv_nir_compiler_options
*options
;
68 struct radv_shader_args
*args
;
71 uint32_t constant_data_offset
;
73 std::unique_ptr
<Temp
[]> allocated
;
74 std::unordered_map
<unsigned, std::array
<Temp
,NIR_MAX_VEC_COMPONENTS
>> allocated_vec
;
75 Stage stage
; /* Stage */
76 bool has_gfx10_wave64_bpermute
= false;
79 uint16_t loop_nest_depth
= 0;
83 bool has_divergent_continue
= false;
84 bool has_divergent_branch
= false;
87 bool is_divergent
= false;
89 bool exec_potentially_empty_discard
= false; /* set to false when loop_nest_depth==0 && parent_if.is_divergent==false */
90 uint16_t exec_potentially_empty_break_depth
= UINT16_MAX
;
91 /* Set to false when loop_nest_depth==exec_potentially_empty_break_depth
92 * and parent_if.is_divergent==false. Called _break but it's also used for
94 bool exec_potentially_empty_break
= false;
95 std::unique_ptr
<unsigned[]> nir_to_aco
; /* NIR block index to ACO block index */
98 uint32_t resource_flag_offsets
[MAX_SETS
];
99 std::vector
<uint8_t> buffer_resource_flags
;
101 Temp arg_temps
[AC_MAX_ARGS
];
104 Temp persp_centroid
, linear_centroid
;
109 /* VS output information */
110 bool export_clip_dists
;
111 unsigned num_clip_distances
;
112 unsigned num_cull_distances
;
114 /* tessellation information */
115 unsigned tcs_tess_lvl_out_loc
;
116 unsigned tcs_tess_lvl_in_loc
;
117 uint64_t tcs_temp_only_inputs
;
118 uint32_t tcs_num_inputs
;
119 uint32_t tcs_num_outputs
;
120 uint32_t tcs_num_patch_outputs
;
121 uint32_t tcs_num_patches
;
122 bool tcs_in_out_eq
= false;
124 /* I/O information */
125 shader_io_state inputs
;
126 shader_io_state outputs
;
127 uint8_t output_drv_loc_to_var_slot
[MESA_SHADER_COMPUTE
][VARYING_SLOT_MAX
];
128 uint8_t output_tcs_patch_drv_loc_to_var_slot
[VARYING_SLOT_MAX
];
131 Temp
get_arg(isel_context
*ctx
, struct ac_arg arg
)
134 return ctx
->arg_temps
[arg
.arg_index
];
137 unsigned get_interp_input(nir_intrinsic_op intrin
, enum glsl_interp_mode interp
)
140 case INTERP_MODE_SMOOTH
:
141 case INTERP_MODE_NONE
:
142 if (intrin
== nir_intrinsic_load_barycentric_pixel
||
143 intrin
== nir_intrinsic_load_barycentric_at_sample
||
144 intrin
== nir_intrinsic_load_barycentric_at_offset
)
145 return S_0286CC_PERSP_CENTER_ENA(1);
146 else if (intrin
== nir_intrinsic_load_barycentric_centroid
)
147 return S_0286CC_PERSP_CENTROID_ENA(1);
148 else if (intrin
== nir_intrinsic_load_barycentric_sample
)
149 return S_0286CC_PERSP_SAMPLE_ENA(1);
151 case INTERP_MODE_NOPERSPECTIVE
:
152 if (intrin
== nir_intrinsic_load_barycentric_pixel
)
153 return S_0286CC_LINEAR_CENTER_ENA(1);
154 else if (intrin
== nir_intrinsic_load_barycentric_centroid
)
155 return S_0286CC_LINEAR_CENTROID_ENA(1);
156 else if (intrin
== nir_intrinsic_load_barycentric_sample
)
157 return S_0286CC_LINEAR_SAMPLE_ENA(1);
165 /* If one side of a divergent IF ends in a branch and the other doesn't, we
166 * might have to emit the contents of the side without the branch at the merge
167 * block instead. This is so that we can use any SGPR live-out of the side
168 * without the branch without creating a linear phi in the invert or merge block. */
170 sanitize_if(nir_function_impl
*impl
, nir_if
*nif
)
172 //TODO: skip this if the condition is uniform and there are no divergent breaks/continues?
174 nir_block
*then_block
= nir_if_last_then_block(nif
);
175 nir_block
*else_block
= nir_if_last_else_block(nif
);
176 bool then_jump
= nir_block_ends_in_jump(then_block
) || nir_block_is_unreachable(then_block
);
177 bool else_jump
= nir_block_ends_in_jump(else_block
) || nir_block_is_unreachable(else_block
);
178 if (then_jump
== else_jump
)
181 /* If the continue from block is empty then return as there is nothing to
184 if (nir_cf_list_is_empty_block(else_jump
? &nif
->then_list
: &nif
->else_list
))
187 /* Even though this if statement has a jump on one side, we may still have
188 * phis afterwards. Single-source phis can be produced by loop unrolling
189 * or dead control-flow passes and are perfectly legal. Run a quick phi
190 * removal on the block after the if to clean up any such phis.
192 nir_opt_remove_phis_block(nir_cf_node_as_block(nir_cf_node_next(&nif
->cf_node
)));
194 /* Finally, move the continue from branch after the if-statement. */
195 nir_block
*last_continue_from_blk
= else_jump
? then_block
: else_block
;
196 nir_block
*first_continue_from_blk
= else_jump
?
197 nir_if_first_then_block(nif
) : nir_if_first_else_block(nif
);
200 nir_cf_extract(&tmp
, nir_before_block(first_continue_from_blk
),
201 nir_after_block(last_continue_from_blk
));
202 nir_cf_reinsert(&tmp
, nir_after_cf_node(&nif
->cf_node
));
204 /* nir_cf_extract() invalidates dominance metadata, but it should still be
205 * correct because of the specific type of transformation we did. Block
206 * indices are not valid except for block_0's, which is all we care about for
207 * nir_block_is_unreachable(). */
208 impl
->valid_metadata
=
209 (nir_metadata
)(impl
->valid_metadata
| nir_metadata_dominance
| nir_metadata_block_index
);
215 sanitize_cf_list(nir_function_impl
*impl
, struct exec_list
*cf_list
)
217 bool progress
= false;
218 foreach_list_typed(nir_cf_node
, cf_node
, node
, cf_list
) {
219 switch (cf_node
->type
) {
220 case nir_cf_node_block
:
222 case nir_cf_node_if
: {
223 nir_if
*nif
= nir_cf_node_as_if(cf_node
);
224 progress
|= sanitize_cf_list(impl
, &nif
->then_list
);
225 progress
|= sanitize_cf_list(impl
, &nif
->else_list
);
226 progress
|= sanitize_if(impl
, nif
);
229 case nir_cf_node_loop
: {
230 nir_loop
*loop
= nir_cf_node_as_loop(cf_node
);
231 progress
|= sanitize_cf_list(impl
, &loop
->body
);
234 case nir_cf_node_function
:
235 unreachable("Invalid cf type");
242 void get_buffer_resource_flags(isel_context
*ctx
, nir_ssa_def
*def
, unsigned access
,
243 uint8_t **flags
, uint32_t *count
)
246 unsigned binding
= 0;
249 /* global resources are considered aliasing with all other buffers and
251 // TODO: only merge flags of resources which can really alias.
252 } else if (def
->parent_instr
->type
== nir_instr_type_intrinsic
) {
253 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(def
->parent_instr
);
254 if (intrin
->intrinsic
== nir_intrinsic_vulkan_resource_index
) {
255 desc_set
= nir_intrinsic_desc_set(intrin
);
256 binding
= nir_intrinsic_binding(intrin
);
258 } else if (def
->parent_instr
->type
== nir_instr_type_deref
) {
259 nir_deref_instr
*deref
= nir_instr_as_deref(def
->parent_instr
);
260 assert(deref
->type
->is_image());
261 if (deref
->type
->sampler_dimensionality
!= GLSL_SAMPLER_DIM_BUF
) {
267 nir_variable
*var
= nir_deref_instr_get_variable(deref
);
268 desc_set
= var
->data
.descriptor_set
;
269 binding
= var
->data
.binding
;
273 *flags
= ctx
->buffer_resource_flags
.data();
274 *count
= ctx
->buffer_resource_flags
.size();
278 unsigned set_offset
= ctx
->resource_flag_offsets
[desc_set
];
280 if (!(ctx
->buffer_resource_flags
[set_offset
+ binding
] & buffer_is_restrict
)) {
281 /* Non-restrict buffers alias only with other non-restrict buffers.
282 * We reserve flags[0] for these. */
283 *flags
= ctx
->buffer_resource_flags
.data();
288 *flags
= ctx
->buffer_resource_flags
.data() + set_offset
+ binding
;
292 uint8_t get_all_buffer_resource_flags(isel_context
*ctx
, nir_ssa_def
*def
, unsigned access
)
296 get_buffer_resource_flags(ctx
, def
, access
, &flags
, &count
);
299 for (unsigned i
= 0; i
< count
; i
++)
304 bool can_subdword_ssbo_store_use_smem(nir_intrinsic_instr
*intrin
)
306 unsigned wrmask
= nir_intrinsic_write_mask(intrin
);
307 if (util_last_bit(wrmask
) != util_bitcount(wrmask
) ||
308 util_bitcount(wrmask
) * intrin
->src
[0].ssa
->bit_size
% 32 ||
309 util_bitcount(wrmask
) != intrin
->src
[0].ssa
->num_components
)
312 if (nir_intrinsic_align_mul(intrin
) % 4 || nir_intrinsic_align_offset(intrin
) % 4)
318 void fill_desc_set_info(isel_context
*ctx
, nir_function_impl
*impl
)
320 radv_pipeline_layout
*pipeline_layout
= ctx
->options
->layout
;
322 unsigned resource_flag_count
= 1; /* +1 to reserve flags[0] for aliased resources */
323 for (unsigned i
= 0; i
< pipeline_layout
->num_sets
; i
++) {
324 radv_descriptor_set_layout
*layout
= pipeline_layout
->set
[i
].layout
;
325 ctx
->resource_flag_offsets
[i
] = resource_flag_count
;
326 resource_flag_count
+= layout
->binding_count
;
328 ctx
->buffer_resource_flags
= std::vector
<uint8_t>(resource_flag_count
);
330 nir_foreach_variable_with_modes(var
, impl
->function
->shader
, nir_var_mem_ssbo
) {
331 if (var
->data
.access
& ACCESS_RESTRICT
) {
332 uint32_t offset
= ctx
->resource_flag_offsets
[var
->data
.descriptor_set
];
333 ctx
->buffer_resource_flags
[offset
+ var
->data
.binding
] |= buffer_is_restrict
;
337 nir_foreach_block(block
, impl
) {
338 nir_foreach_instr(instr
, block
) {
339 if (instr
->type
!= nir_instr_type_intrinsic
)
341 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
342 if (!nir_intrinsic_has_access(intrin
))
345 nir_ssa_def
*res
= NULL
;
346 unsigned access
= nir_intrinsic_access(intrin
);
348 bool glc
= access
& (ACCESS_VOLATILE
| ACCESS_COHERENT
| ACCESS_NON_READABLE
);
349 switch (intrin
->intrinsic
) {
350 case nir_intrinsic_load_ssbo
: {
351 if (nir_dest_is_divergent(intrin
->dest
) && (!glc
|| ctx
->program
->chip_class
>= GFX8
))
352 flags
|= glc
? has_glc_vmem_load
: has_nonglc_vmem_load
;
353 res
= intrin
->src
[0].ssa
;
356 case nir_intrinsic_ssbo_atomic_add
:
357 case nir_intrinsic_ssbo_atomic_imin
:
358 case nir_intrinsic_ssbo_atomic_umin
:
359 case nir_intrinsic_ssbo_atomic_imax
:
360 case nir_intrinsic_ssbo_atomic_umax
:
361 case nir_intrinsic_ssbo_atomic_and
:
362 case nir_intrinsic_ssbo_atomic_or
:
363 case nir_intrinsic_ssbo_atomic_xor
:
364 case nir_intrinsic_ssbo_atomic_exchange
:
365 case nir_intrinsic_ssbo_atomic_comp_swap
:
366 flags
|= has_glc_vmem_load
| has_glc_vmem_store
;
367 res
= intrin
->src
[0].ssa
;
369 case nir_intrinsic_store_ssbo
:
370 if (nir_src_is_divergent(intrin
->src
[2]) ||
371 ctx
->program
->chip_class
< GFX8
|| ctx
->program
->chip_class
>= GFX10_3
||
372 (intrin
->src
[0].ssa
->bit_size
< 32 && !can_subdword_ssbo_store_use_smem(intrin
)))
373 flags
|= glc
? has_glc_vmem_store
: has_nonglc_vmem_store
;
374 res
= intrin
->src
[1].ssa
;
376 case nir_intrinsic_load_global
:
377 if (!(access
& ACCESS_NON_WRITEABLE
))
378 flags
|= glc
? has_glc_vmem_load
: has_nonglc_vmem_load
;
380 case nir_intrinsic_store_global
:
381 flags
|= glc
? has_glc_vmem_store
: has_nonglc_vmem_store
;
383 case nir_intrinsic_global_atomic_add
:
384 case nir_intrinsic_global_atomic_imin
:
385 case nir_intrinsic_global_atomic_umin
:
386 case nir_intrinsic_global_atomic_imax
:
387 case nir_intrinsic_global_atomic_umax
:
388 case nir_intrinsic_global_atomic_and
:
389 case nir_intrinsic_global_atomic_or
:
390 case nir_intrinsic_global_atomic_xor
:
391 case nir_intrinsic_global_atomic_exchange
:
392 case nir_intrinsic_global_atomic_comp_swap
:
393 flags
|= has_glc_vmem_load
| has_glc_vmem_store
;
395 case nir_intrinsic_image_deref_load
:
396 res
= intrin
->src
[0].ssa
;
397 flags
|= glc
? has_glc_vmem_load
: has_nonglc_vmem_load
;
399 case nir_intrinsic_image_deref_store
:
400 res
= intrin
->src
[0].ssa
;
401 flags
|= (glc
|| ctx
->program
->chip_class
== GFX6
) ? has_glc_vmem_store
: has_nonglc_vmem_store
;
403 case nir_intrinsic_image_deref_atomic_add
:
404 case nir_intrinsic_image_deref_atomic_umin
:
405 case nir_intrinsic_image_deref_atomic_imin
:
406 case nir_intrinsic_image_deref_atomic_umax
:
407 case nir_intrinsic_image_deref_atomic_imax
:
408 case nir_intrinsic_image_deref_atomic_and
:
409 case nir_intrinsic_image_deref_atomic_or
:
410 case nir_intrinsic_image_deref_atomic_xor
:
411 case nir_intrinsic_image_deref_atomic_exchange
:
412 case nir_intrinsic_image_deref_atomic_comp_swap
:
413 res
= intrin
->src
[0].ssa
;
414 flags
|= has_glc_vmem_load
| has_glc_vmem_store
;
422 get_buffer_resource_flags(ctx
, res
, access
, &flags_ptr
, &count
);
424 for (unsigned i
= 0; i
< count
; i
++)
425 flags_ptr
[i
] |= flags
;
430 void apply_nuw_to_ssa(nir_shader
*shader
, struct hash_table
*range_ht
, nir_ssa_def
*ssa
,
431 const nir_unsigned_upper_bound_config
*config
)
433 nir_ssa_scalar scalar
;
437 if (!nir_ssa_scalar_is_alu(scalar
) || nir_ssa_scalar_alu_op(scalar
) != nir_op_iadd
)
440 nir_alu_instr
*add
= nir_instr_as_alu(ssa
->parent_instr
);
442 if (add
->no_unsigned_wrap
)
445 nir_ssa_scalar src0
= nir_ssa_scalar_chase_alu_src(scalar
, 0);
446 nir_ssa_scalar src1
= nir_ssa_scalar_chase_alu_src(scalar
, 1);
448 if (nir_ssa_scalar_is_const(src0
)) {
449 nir_ssa_scalar tmp
= src0
;
454 uint32_t src1_ub
= nir_unsigned_upper_bound(shader
, range_ht
, src1
, config
);
455 add
->no_unsigned_wrap
= !nir_addition_might_overflow(shader
, range_ht
, src0
, src1_ub
, config
);
458 void apply_nuw_to_offsets(isel_context
*ctx
, nir_function_impl
*impl
)
460 nir_unsigned_upper_bound_config config
;
461 config
.min_subgroup_size
= 64;
462 config
.max_subgroup_size
= 64;
463 if (ctx
->shader
->info
.stage
== MESA_SHADER_COMPUTE
&& ctx
->options
->key
.cs
.subgroup_size
) {
464 config
.min_subgroup_size
= ctx
->options
->key
.cs
.subgroup_size
;
465 config
.max_subgroup_size
= ctx
->options
->key
.cs
.subgroup_size
;
467 config
.max_work_group_invocations
= 2048;
468 config
.max_work_group_count
[0] = 65535;
469 config
.max_work_group_count
[1] = 65535;
470 config
.max_work_group_count
[2] = 65535;
471 config
.max_work_group_size
[0] = 2048;
472 config
.max_work_group_size
[1] = 2048;
473 config
.max_work_group_size
[2] = 2048;
474 for (unsigned i
= 0; i
< MAX_VERTEX_ATTRIBS
; i
++) {
475 unsigned attrib_format
= ctx
->options
->key
.vs
.vertex_attribute_formats
[i
];
476 unsigned dfmt
= attrib_format
& 0xf;
477 unsigned nfmt
= (attrib_format
>> 4) & 0x7;
479 uint32_t max
= UINT32_MAX
;
480 if (nfmt
== V_008F0C_BUF_NUM_FORMAT_UNORM
) {
482 } else if (nfmt
== V_008F0C_BUF_NUM_FORMAT_UINT
||
483 nfmt
== V_008F0C_BUF_NUM_FORMAT_USCALED
) {
484 bool uscaled
= nfmt
== V_008F0C_BUF_NUM_FORMAT_USCALED
;
486 case V_008F0C_BUF_DATA_FORMAT_8
:
487 case V_008F0C_BUF_DATA_FORMAT_8_8
:
488 case V_008F0C_BUF_DATA_FORMAT_8_8_8_8
:
489 max
= uscaled
? 0x437f0000u
: UINT8_MAX
;
491 case V_008F0C_BUF_DATA_FORMAT_10_10_10_2
:
492 case V_008F0C_BUF_DATA_FORMAT_2_10_10_10
:
493 max
= uscaled
? 0x447fc000u
: 1023;
495 case V_008F0C_BUF_DATA_FORMAT_10_11_11
:
496 case V_008F0C_BUF_DATA_FORMAT_11_11_10
:
497 max
= uscaled
? 0x44ffe000u
: 2047;
499 case V_008F0C_BUF_DATA_FORMAT_16
:
500 case V_008F0C_BUF_DATA_FORMAT_16_16
:
501 case V_008F0C_BUF_DATA_FORMAT_16_16_16_16
:
502 max
= uscaled
? 0x477fff00u
: UINT16_MAX
;
504 case V_008F0C_BUF_DATA_FORMAT_32
:
505 case V_008F0C_BUF_DATA_FORMAT_32_32
:
506 case V_008F0C_BUF_DATA_FORMAT_32_32_32
:
507 case V_008F0C_BUF_DATA_FORMAT_32_32_32_32
:
508 max
= uscaled
? 0x4f800000u
: UINT32_MAX
;
512 config
.vertex_attrib_max
[i
] = max
;
515 struct hash_table
*range_ht
= _mesa_pointer_hash_table_create(NULL
);
517 nir_foreach_block(block
, impl
) {
518 nir_foreach_instr(instr
, block
) {
519 if (instr
->type
!= nir_instr_type_intrinsic
)
521 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
523 switch (intrin
->intrinsic
) {
524 case nir_intrinsic_load_constant
:
525 case nir_intrinsic_load_uniform
:
526 case nir_intrinsic_load_push_constant
:
527 if (!nir_src_is_divergent(intrin
->src
[0]))
528 apply_nuw_to_ssa(ctx
->shader
, range_ht
, intrin
->src
[0].ssa
, &config
);
530 case nir_intrinsic_load_ubo
:
531 case nir_intrinsic_load_ssbo
:
532 if (!nir_src_is_divergent(intrin
->src
[1]))
533 apply_nuw_to_ssa(ctx
->shader
, range_ht
, intrin
->src
[1].ssa
, &config
);
535 case nir_intrinsic_store_ssbo
:
536 if (!nir_src_is_divergent(intrin
->src
[2]))
537 apply_nuw_to_ssa(ctx
->shader
, range_ht
, intrin
->src
[2].ssa
, &config
);
545 _mesa_hash_table_destroy(range_ht
, NULL
);
548 RegClass
get_reg_class(isel_context
*ctx
, RegType type
, unsigned components
, unsigned bitsize
)
551 return RegClass(RegType::sgpr
, ctx
->program
->lane_mask
.size() * components
);
553 return RegClass::get(type
, components
* bitsize
/ 8u);
556 void init_context(isel_context
*ctx
, nir_shader
*shader
)
558 nir_function_impl
*impl
= nir_shader_get_entrypoint(shader
);
559 unsigned lane_mask_size
= ctx
->program
->lane_mask
.size();
561 ctx
->shader
= shader
;
562 nir_divergence_analysis(shader
, nir_divergence_view_index_uniform
);
564 fill_desc_set_info(ctx
, impl
);
566 apply_nuw_to_offsets(ctx
, impl
);
568 /* sanitize control flow */
569 nir_metadata_require(impl
, nir_metadata_dominance
);
570 sanitize_cf_list(impl
, &impl
->body
);
571 nir_metadata_preserve(impl
, (nir_metadata
)~nir_metadata_block_index
);
573 /* we'll need this for isel */
574 nir_metadata_require(impl
, nir_metadata_block_index
);
576 if (!(ctx
->stage
& sw_gs_copy
) && ctx
->options
->dump_preoptir
) {
577 fprintf(stderr
, "NIR shader before instruction selection:\n");
578 nir_print_shader(shader
, stderr
);
581 std::unique_ptr
<Temp
[]> allocated
{new Temp
[impl
->ssa_alloc
]()};
583 unsigned spi_ps_inputs
= 0;
585 std::unique_ptr
<unsigned[]> nir_to_aco
{new unsigned[impl
->num_blocks
]()};
587 /* TODO: make this recursive to improve compile times and merge with fill_desc_set_info() */
591 nir_foreach_block(block
, impl
) {
592 nir_foreach_instr(instr
, block
) {
593 switch(instr
->type
) {
594 case nir_instr_type_alu
: {
595 nir_alu_instr
*alu_instr
= nir_instr_as_alu(instr
);
596 RegType type
= RegType::sgpr
;
597 switch(alu_instr
->op
) {
616 case nir_op_fround_even
:
620 case nir_op_f2f16_rtz
:
621 case nir_op_f2f16_rtne
:
630 case nir_op_pack_half_2x16
:
631 case nir_op_unpack_half_2x16_split_x
:
632 case nir_op_unpack_half_2x16_split_y
:
635 case nir_op_fddx_fine
:
636 case nir_op_fddy_fine
:
637 case nir_op_fddx_coarse
:
638 case nir_op_fddy_coarse
:
639 case nir_op_fquantize2f16
:
641 case nir_op_frexp_sig
:
642 case nir_op_frexp_exp
:
643 case nir_op_cube_face_index
:
644 case nir_op_cube_face_coord
:
645 type
= RegType::vgpr
;
661 type
= nir_dest_is_divergent(alu_instr
->dest
.dest
) ? RegType::vgpr
: RegType::sgpr
;
664 type
= nir_dest_is_divergent(alu_instr
->dest
.dest
) ? RegType::vgpr
: RegType::sgpr
;
667 for (unsigned i
= 0; i
< nir_op_infos
[alu_instr
->op
].num_inputs
; i
++) {
668 if (allocated
[alu_instr
->src
[i
].src
.ssa
->index
].type() == RegType::vgpr
)
669 type
= RegType::vgpr
;
674 RegClass rc
= get_reg_class(ctx
, type
, alu_instr
->dest
.dest
.ssa
.num_components
, alu_instr
->dest
.dest
.ssa
.bit_size
);
675 allocated
[alu_instr
->dest
.dest
.ssa
.index
] = Temp(0, rc
);
678 case nir_instr_type_load_const
: {
679 unsigned num_components
= nir_instr_as_load_const(instr
)->def
.num_components
;
680 unsigned bit_size
= nir_instr_as_load_const(instr
)->def
.bit_size
;
681 RegClass rc
= get_reg_class(ctx
, RegType::sgpr
, num_components
, bit_size
);
682 allocated
[nir_instr_as_load_const(instr
)->def
.index
] = Temp(0, rc
);
685 case nir_instr_type_intrinsic
: {
686 nir_intrinsic_instr
*intrinsic
= nir_instr_as_intrinsic(instr
);
687 if (!nir_intrinsic_infos
[intrinsic
->intrinsic
].has_dest
)
689 RegType type
= RegType::sgpr
;
690 switch(intrinsic
->intrinsic
) {
691 case nir_intrinsic_load_push_constant
:
692 case nir_intrinsic_load_work_group_id
:
693 case nir_intrinsic_load_num_work_groups
:
694 case nir_intrinsic_load_subgroup_id
:
695 case nir_intrinsic_load_num_subgroups
:
696 case nir_intrinsic_load_first_vertex
:
697 case nir_intrinsic_load_base_instance
:
698 case nir_intrinsic_get_buffer_size
:
699 case nir_intrinsic_vote_all
:
700 case nir_intrinsic_vote_any
:
701 case nir_intrinsic_read_first_invocation
:
702 case nir_intrinsic_read_invocation
:
703 case nir_intrinsic_first_invocation
:
704 case nir_intrinsic_ballot
:
705 type
= RegType::sgpr
;
707 case nir_intrinsic_load_sample_id
:
708 case nir_intrinsic_load_sample_mask_in
:
709 case nir_intrinsic_load_input
:
710 case nir_intrinsic_load_output
:
711 case nir_intrinsic_load_input_vertex
:
712 case nir_intrinsic_load_per_vertex_input
:
713 case nir_intrinsic_load_per_vertex_output
:
714 case nir_intrinsic_load_vertex_id
:
715 case nir_intrinsic_load_vertex_id_zero_base
:
716 case nir_intrinsic_load_barycentric_sample
:
717 case nir_intrinsic_load_barycentric_pixel
:
718 case nir_intrinsic_load_barycentric_model
:
719 case nir_intrinsic_load_barycentric_centroid
:
720 case nir_intrinsic_load_barycentric_at_sample
:
721 case nir_intrinsic_load_barycentric_at_offset
:
722 case nir_intrinsic_load_interpolated_input
:
723 case nir_intrinsic_load_frag_coord
:
724 case nir_intrinsic_load_sample_pos
:
725 case nir_intrinsic_load_layer_id
:
726 case nir_intrinsic_load_local_invocation_id
:
727 case nir_intrinsic_load_local_invocation_index
:
728 case nir_intrinsic_load_subgroup_invocation
:
729 case nir_intrinsic_load_tess_coord
:
730 case nir_intrinsic_write_invocation_amd
:
731 case nir_intrinsic_mbcnt_amd
:
732 case nir_intrinsic_load_instance_id
:
733 case nir_intrinsic_ssbo_atomic_add
:
734 case nir_intrinsic_ssbo_atomic_imin
:
735 case nir_intrinsic_ssbo_atomic_umin
:
736 case nir_intrinsic_ssbo_atomic_imax
:
737 case nir_intrinsic_ssbo_atomic_umax
:
738 case nir_intrinsic_ssbo_atomic_and
:
739 case nir_intrinsic_ssbo_atomic_or
:
740 case nir_intrinsic_ssbo_atomic_xor
:
741 case nir_intrinsic_ssbo_atomic_exchange
:
742 case nir_intrinsic_ssbo_atomic_comp_swap
:
743 case nir_intrinsic_global_atomic_add
:
744 case nir_intrinsic_global_atomic_imin
:
745 case nir_intrinsic_global_atomic_umin
:
746 case nir_intrinsic_global_atomic_imax
:
747 case nir_intrinsic_global_atomic_umax
:
748 case nir_intrinsic_global_atomic_and
:
749 case nir_intrinsic_global_atomic_or
:
750 case nir_intrinsic_global_atomic_xor
:
751 case nir_intrinsic_global_atomic_exchange
:
752 case nir_intrinsic_global_atomic_comp_swap
:
753 case nir_intrinsic_image_deref_atomic_add
:
754 case nir_intrinsic_image_deref_atomic_umin
:
755 case nir_intrinsic_image_deref_atomic_imin
:
756 case nir_intrinsic_image_deref_atomic_umax
:
757 case nir_intrinsic_image_deref_atomic_imax
:
758 case nir_intrinsic_image_deref_atomic_and
:
759 case nir_intrinsic_image_deref_atomic_or
:
760 case nir_intrinsic_image_deref_atomic_xor
:
761 case nir_intrinsic_image_deref_atomic_exchange
:
762 case nir_intrinsic_image_deref_atomic_comp_swap
:
763 case nir_intrinsic_image_deref_size
:
764 case nir_intrinsic_shared_atomic_add
:
765 case nir_intrinsic_shared_atomic_imin
:
766 case nir_intrinsic_shared_atomic_umin
:
767 case nir_intrinsic_shared_atomic_imax
:
768 case nir_intrinsic_shared_atomic_umax
:
769 case nir_intrinsic_shared_atomic_and
:
770 case nir_intrinsic_shared_atomic_or
:
771 case nir_intrinsic_shared_atomic_xor
:
772 case nir_intrinsic_shared_atomic_exchange
:
773 case nir_intrinsic_shared_atomic_comp_swap
:
774 case nir_intrinsic_shared_atomic_fadd
:
775 case nir_intrinsic_load_scratch
:
776 case nir_intrinsic_load_invocation_id
:
777 case nir_intrinsic_load_primitive_id
:
778 type
= RegType::vgpr
;
780 case nir_intrinsic_shuffle
:
781 case nir_intrinsic_quad_broadcast
:
782 case nir_intrinsic_quad_swap_horizontal
:
783 case nir_intrinsic_quad_swap_vertical
:
784 case nir_intrinsic_quad_swap_diagonal
:
785 case nir_intrinsic_quad_swizzle_amd
:
786 case nir_intrinsic_masked_swizzle_amd
:
787 case nir_intrinsic_inclusive_scan
:
788 case nir_intrinsic_exclusive_scan
:
789 case nir_intrinsic_reduce
:
790 case nir_intrinsic_load_ubo
:
791 case nir_intrinsic_load_ssbo
:
792 case nir_intrinsic_load_global
:
793 case nir_intrinsic_vulkan_resource_index
:
794 case nir_intrinsic_load_shared
:
795 type
= nir_dest_is_divergent(intrinsic
->dest
) ? RegType::vgpr
: RegType::sgpr
;
797 case nir_intrinsic_load_view_index
:
798 type
= ctx
->stage
== fragment_fs
? RegType::vgpr
: RegType::sgpr
;
801 for (unsigned i
= 0; i
< nir_intrinsic_infos
[intrinsic
->intrinsic
].num_srcs
; i
++) {
802 if (allocated
[intrinsic
->src
[i
].ssa
->index
].type() == RegType::vgpr
)
803 type
= RegType::vgpr
;
807 RegClass rc
= get_reg_class(ctx
, type
, intrinsic
->dest
.ssa
.num_components
, intrinsic
->dest
.ssa
.bit_size
);
808 allocated
[intrinsic
->dest
.ssa
.index
] = Temp(0, rc
);
810 switch(intrinsic
->intrinsic
) {
811 case nir_intrinsic_load_barycentric_sample
:
812 case nir_intrinsic_load_barycentric_pixel
:
813 case nir_intrinsic_load_barycentric_centroid
:
814 case nir_intrinsic_load_barycentric_at_sample
:
815 case nir_intrinsic_load_barycentric_at_offset
: {
816 glsl_interp_mode mode
= (glsl_interp_mode
)nir_intrinsic_interp_mode(intrinsic
);
817 spi_ps_inputs
|= get_interp_input(intrinsic
->intrinsic
, mode
);
820 case nir_intrinsic_load_barycentric_model
:
821 spi_ps_inputs
|= S_0286CC_PERSP_PULL_MODEL_ENA(1);
823 case nir_intrinsic_load_front_face
:
824 spi_ps_inputs
|= S_0286CC_FRONT_FACE_ENA(1);
826 case nir_intrinsic_load_frag_coord
:
827 case nir_intrinsic_load_sample_pos
: {
828 uint8_t mask
= nir_ssa_def_components_read(&intrinsic
->dest
.ssa
);
829 for (unsigned i
= 0; i
< 4; i
++) {
831 spi_ps_inputs
|= S_0286CC_POS_X_FLOAT_ENA(1) << i
;
836 case nir_intrinsic_load_sample_id
:
837 spi_ps_inputs
|= S_0286CC_ANCILLARY_ENA(1);
839 case nir_intrinsic_load_sample_mask_in
:
840 spi_ps_inputs
|= S_0286CC_ANCILLARY_ENA(1);
841 spi_ps_inputs
|= S_0286CC_SAMPLE_COVERAGE_ENA(1);
848 case nir_instr_type_tex
: {
849 nir_tex_instr
* tex
= nir_instr_as_tex(instr
);
850 unsigned size
= tex
->dest
.ssa
.num_components
;
852 if (tex
->dest
.ssa
.bit_size
== 64)
854 if (tex
->op
== nir_texop_texture_samples
) {
855 assert(!tex
->dest
.ssa
.divergent
);
857 if (nir_dest_is_divergent(tex
->dest
))
858 allocated
[tex
->dest
.ssa
.index
] = Temp(0, RegClass(RegType::vgpr
, size
));
860 allocated
[tex
->dest
.ssa
.index
] = Temp(0, RegClass(RegType::sgpr
, size
));
863 case nir_instr_type_parallel_copy
: {
864 nir_foreach_parallel_copy_entry(entry
, nir_instr_as_parallel_copy(instr
)) {
865 allocated
[entry
->dest
.ssa
.index
] = allocated
[entry
->src
.ssa
->index
];
869 case nir_instr_type_ssa_undef
: {
870 unsigned num_components
= nir_instr_as_ssa_undef(instr
)->def
.num_components
;
871 unsigned bit_size
= nir_instr_as_ssa_undef(instr
)->def
.bit_size
;
872 RegClass rc
= get_reg_class(ctx
, RegType::sgpr
, num_components
, bit_size
);
873 allocated
[nir_instr_as_ssa_undef(instr
)->def
.index
] = Temp(0, rc
);
876 case nir_instr_type_phi
: {
877 nir_phi_instr
* phi
= nir_instr_as_phi(instr
);
879 unsigned size
= phi
->dest
.ssa
.num_components
;
881 if (phi
->dest
.ssa
.bit_size
== 1) {
882 assert(size
== 1 && "multiple components not yet supported on boolean phis.");
883 type
= RegType::sgpr
;
884 size
*= lane_mask_size
;
885 allocated
[phi
->dest
.ssa
.index
] = Temp(0, RegClass(type
, size
));
889 if (nir_dest_is_divergent(phi
->dest
)) {
890 type
= RegType::vgpr
;
892 type
= RegType::sgpr
;
893 nir_foreach_phi_src (src
, phi
) {
894 if (allocated
[src
->src
.ssa
->index
].type() == RegType::vgpr
)
895 type
= RegType::vgpr
;
896 if (allocated
[src
->src
.ssa
->index
].type() == RegType::none
)
901 RegClass rc
= get_reg_class(ctx
, type
, phi
->dest
.ssa
.num_components
, phi
->dest
.ssa
.bit_size
);
902 if (rc
!= allocated
[phi
->dest
.ssa
.index
].regClass()) {
905 nir_foreach_phi_src(src
, phi
)
906 assert(allocated
[src
->src
.ssa
->index
].size() == rc
.size());
908 allocated
[phi
->dest
.ssa
.index
] = Temp(0, rc
);
918 if (G_0286CC_POS_W_FLOAT_ENA(spi_ps_inputs
)) {
919 /* If POS_W_FLOAT (11) is enabled, at least one of PERSP_* must be enabled too */
920 spi_ps_inputs
|= S_0286CC_PERSP_CENTER_ENA(1);
923 if (!(spi_ps_inputs
& 0x7F)) {
924 /* At least one of PERSP_* (0xF) or LINEAR_* (0x70) must be enabled */
925 spi_ps_inputs
|= S_0286CC_PERSP_CENTER_ENA(1);
928 ctx
->program
->config
->spi_ps_input_ena
= spi_ps_inputs
;
929 ctx
->program
->config
->spi_ps_input_addr
= spi_ps_inputs
;
931 for (unsigned i
= 0; i
< impl
->ssa_alloc
; i
++)
932 allocated
[i
] = Temp(ctx
->program
->allocateId(), allocated
[i
].regClass());
934 ctx
->allocated
.reset(allocated
.release());
935 ctx
->cf_info
.nir_to_aco
.reset(nir_to_aco
.release());
937 /* align and copy constant data */
938 while (ctx
->program
->constant_data
.size() % 4u)
939 ctx
->program
->constant_data
.push_back(0);
940 ctx
->constant_data_offset
= ctx
->program
->constant_data
.size();
941 ctx
->program
->constant_data
.insert(ctx
->program
->constant_data
.end(),
942 (uint8_t*)shader
->constant_data
,
943 (uint8_t*)shader
->constant_data
+ shader
->constant_data_size
);
946 Pseudo_instruction
*add_startpgm(struct isel_context
*ctx
)
948 unsigned arg_count
= ctx
->args
->ac
.arg_count
;
949 if (ctx
->stage
== fragment_fs
) {
950 /* LLVM optimizes away unused FS inputs and computes spi_ps_input_addr
951 * itself and then communicates the results back via the ELF binary.
952 * Mirror what LLVM does by re-mapping the VGPR arguments here.
954 * TODO: If we made the FS input scanning code into a separate pass that
955 * could run before argument setup, then this wouldn't be necessary
958 struct ac_shader_args
*args
= &ctx
->args
->ac
;
960 for (unsigned i
= 0, vgpr_arg
= 0, vgpr_reg
= 0; i
< args
->arg_count
; i
++) {
961 if (args
->args
[i
].file
!= AC_ARG_VGPR
) {
966 if (!(ctx
->program
->config
->spi_ps_input_addr
& (1 << vgpr_arg
))) {
967 args
->args
[i
].skip
= true;
969 args
->args
[i
].offset
= vgpr_reg
;
970 vgpr_reg
+= args
->args
[i
].size
;
977 aco_ptr
<Pseudo_instruction
> startpgm
{create_instruction
<Pseudo_instruction
>(aco_opcode::p_startpgm
, Format::PSEUDO
, 0, arg_count
+ 1)};
978 for (unsigned i
= 0, arg
= 0; i
< ctx
->args
->ac
.arg_count
; i
++) {
979 if (ctx
->args
->ac
.args
[i
].skip
)
982 enum ac_arg_regfile file
= ctx
->args
->ac
.args
[i
].file
;
983 unsigned size
= ctx
->args
->ac
.args
[i
].size
;
984 unsigned reg
= ctx
->args
->ac
.args
[i
].offset
;
985 RegClass type
= RegClass(file
== AC_ARG_SGPR
? RegType::sgpr
: RegType::vgpr
, size
);
986 Temp dst
= Temp
{ctx
->program
->allocateId(), type
};
987 ctx
->arg_temps
[i
] = dst
;
988 startpgm
->definitions
[arg
] = Definition(dst
);
989 startpgm
->definitions
[arg
].setFixed(PhysReg
{file
== AC_ARG_SGPR
? reg
: reg
+ 256});
992 startpgm
->definitions
[arg_count
] = Definition
{ctx
->program
->allocateId(), exec
, ctx
->program
->lane_mask
};
993 Pseudo_instruction
*instr
= startpgm
.get();
994 ctx
->block
->instructions
.push_back(std::move(startpgm
));
996 /* Stash these in the program so that they can be accessed later when
999 ctx
->program
->private_segment_buffer
= get_arg(ctx
, ctx
->args
->ring_offsets
);
1000 ctx
->program
->scratch_offset
= get_arg(ctx
, ctx
->args
->scratch_offset
);
1006 type_size(const struct glsl_type
*type
, bool bindless
)
1008 // TODO: don't we need type->std430_base_alignment() here?
1009 return glsl_count_attribute_slots(type
, false);
1013 shared_var_info(const struct glsl_type
*type
, unsigned *size
, unsigned *align
)
1015 assert(glsl_type_is_vector_or_scalar(type
));
1017 uint32_t comp_size
= glsl_type_is_boolean(type
)
1018 ? 4 : glsl_get_bit_size(type
) / 8;
1019 unsigned length
= glsl_get_vector_elements(type
);
1020 *size
= comp_size
* length
,
1025 mem_vectorize_callback(unsigned align
, unsigned bit_size
,
1026 unsigned num_components
, unsigned high_offset
,
1027 nir_intrinsic_instr
*low
, nir_intrinsic_instr
*high
)
1029 if (num_components
> 4)
1032 /* >128 bit loads are split except with SMEM */
1033 if (bit_size
* num_components
> 128)
1036 switch (low
->intrinsic
) {
1037 case nir_intrinsic_load_global
:
1038 case nir_intrinsic_store_global
:
1039 case nir_intrinsic_store_ssbo
:
1040 case nir_intrinsic_load_ssbo
:
1041 case nir_intrinsic_load_ubo
:
1042 case nir_intrinsic_load_push_constant
:
1043 return align
% (bit_size
== 8 ? 2 : 4) == 0;
1044 case nir_intrinsic_load_deref
:
1045 case nir_intrinsic_store_deref
:
1046 assert(nir_src_as_deref(low
->src
[0])->mode
== nir_var_mem_shared
);
1048 case nir_intrinsic_load_shared
:
1049 case nir_intrinsic_store_shared
:
1050 if (bit_size
* num_components
> 64) /* 96 and 128 bit loads require 128 bit alignment and are split otherwise */
1051 return align
% 16 == 0;
1053 return align
% (bit_size
== 8 ? 2 : 4) == 0;
1061 setup_vs_output_info(isel_context
*ctx
, nir_shader
*nir
,
1062 bool export_prim_id
, bool export_clip_dists
,
1063 radv_vs_output_info
*outinfo
)
1065 memset(outinfo
->vs_output_param_offset
, AC_EXP_PARAM_UNDEFINED
,
1066 sizeof(outinfo
->vs_output_param_offset
));
1068 outinfo
->param_exports
= 0;
1069 int pos_written
= 0x1;
1070 if (outinfo
->writes_pointsize
|| outinfo
->writes_viewport_index
|| outinfo
->writes_layer
)
1071 pos_written
|= 1 << 1;
1073 uint64_t mask
= nir
->info
.outputs_written
;
1075 int idx
= u_bit_scan64(&mask
);
1076 if (idx
>= VARYING_SLOT_VAR0
|| idx
== VARYING_SLOT_LAYER
||
1077 idx
== VARYING_SLOT_PRIMITIVE_ID
|| idx
== VARYING_SLOT_VIEWPORT
||
1078 ((idx
== VARYING_SLOT_CLIP_DIST0
|| idx
== VARYING_SLOT_CLIP_DIST1
) && export_clip_dists
)) {
1079 if (outinfo
->vs_output_param_offset
[idx
] == AC_EXP_PARAM_UNDEFINED
)
1080 outinfo
->vs_output_param_offset
[idx
] = outinfo
->param_exports
++;
1083 if (outinfo
->writes_layer
&&
1084 outinfo
->vs_output_param_offset
[VARYING_SLOT_LAYER
] == AC_EXP_PARAM_UNDEFINED
) {
1085 /* when ctx->options->key.has_multiview_view_index = true, the layer
1086 * variable isn't declared in NIR and it's isel's job to get the layer */
1087 outinfo
->vs_output_param_offset
[VARYING_SLOT_LAYER
] = outinfo
->param_exports
++;
1090 if (export_prim_id
) {
1091 assert(outinfo
->vs_output_param_offset
[VARYING_SLOT_PRIMITIVE_ID
] == AC_EXP_PARAM_UNDEFINED
);
1092 outinfo
->vs_output_param_offset
[VARYING_SLOT_PRIMITIVE_ID
] = outinfo
->param_exports
++;
1095 ctx
->export_clip_dists
= export_clip_dists
;
1096 ctx
->num_clip_distances
= util_bitcount(outinfo
->clip_dist_mask
);
1097 ctx
->num_cull_distances
= util_bitcount(outinfo
->cull_dist_mask
);
1099 assert(ctx
->num_clip_distances
+ ctx
->num_cull_distances
<= 8);
1101 if (ctx
->num_clip_distances
+ ctx
->num_cull_distances
> 0)
1102 pos_written
|= 1 << 2;
1103 if (ctx
->num_clip_distances
+ ctx
->num_cull_distances
> 4)
1104 pos_written
|= 1 << 3;
1106 outinfo
->pos_exports
= util_bitcount(pos_written
);
1110 setup_vs_variables(isel_context
*ctx
, nir_shader
*nir
)
1112 nir_foreach_shader_in_variable(variable
, nir
)
1114 variable
->data
.driver_location
= variable
->data
.location
* 4;
1116 nir_foreach_shader_out_variable(variable
, nir
)
1118 if (ctx
->stage
== vertex_vs
|| ctx
->stage
== ngg_vertex_gs
)
1119 variable
->data
.driver_location
= variable
->data
.location
* 4;
1121 assert(variable
->data
.location
>= 0 && variable
->data
.location
<= UINT8_MAX
);
1122 ctx
->output_drv_loc_to_var_slot
[MESA_SHADER_VERTEX
][variable
->data
.driver_location
/ 4] = variable
->data
.location
;
1125 if (ctx
->stage
== vertex_vs
|| ctx
->stage
== ngg_vertex_gs
) {
1126 radv_vs_output_info
*outinfo
= &ctx
->program
->info
->vs
.outinfo
;
1127 setup_vs_output_info(ctx
, nir
, outinfo
->export_prim_id
,
1128 ctx
->options
->key
.vs_common_out
.export_clip_dists
, outinfo
);
1129 } else if (ctx
->stage
== vertex_ls
) {
1130 ctx
->tcs_num_inputs
= ctx
->program
->info
->vs
.num_linked_outputs
;
1133 if (ctx
->stage
== ngg_vertex_gs
&& ctx
->args
->options
->key
.vs_common_out
.export_prim_id
) {
1134 /* We need to store the primitive IDs in LDS */
1135 unsigned lds_size
= ctx
->program
->info
->ngg_info
.esgs_ring_size
;
1136 ctx
->program
->config
->lds_size
= (lds_size
+ ctx
->program
->lds_alloc_granule
- 1) /
1137 ctx
->program
->lds_alloc_granule
;
1141 void setup_gs_variables(isel_context
*ctx
, nir_shader
*nir
)
1143 if (ctx
->stage
== vertex_geometry_gs
|| ctx
->stage
== tess_eval_geometry_gs
)
1144 ctx
->program
->config
->lds_size
= ctx
->program
->info
->gs_ring_info
.lds_size
; /* Already in units of the alloc granularity */
1146 nir_foreach_shader_out_variable(variable
, nir
) {
1147 variable
->data
.driver_location
= variable
->data
.location
* 4;
1150 if (ctx
->stage
== vertex_geometry_gs
)
1151 ctx
->program
->info
->gs
.es_type
= MESA_SHADER_VERTEX
;
1152 else if (ctx
->stage
== tess_eval_geometry_gs
)
1153 ctx
->program
->info
->gs
.es_type
= MESA_SHADER_TESS_EVAL
;
1157 setup_tcs_info(isel_context
*ctx
, nir_shader
*nir
, nir_shader
*vs
)
1159 /* When the number of TCS input and output vertices are the same (typically 3):
1160 * - There is an equal amount of LS and HS invocations
1161 * - In case of merged LSHS shaders, the LS and HS halves of the shader
1162 * always process the exact same vertex. We can use this knowledge to optimize them.
1164 * We don't set tcs_in_out_eq if the float controls differ because that might
1165 * involve different float modes for the same block and our optimizer
1166 * doesn't handle a instruction dominating another with a different mode.
1168 ctx
->tcs_in_out_eq
=
1169 ctx
->stage
== vertex_tess_control_hs
&&
1170 ctx
->args
->options
->key
.tcs
.input_vertices
== nir
->info
.tess
.tcs_vertices_out
&&
1171 vs
->info
.float_controls_execution_mode
== nir
->info
.float_controls_execution_mode
;
1173 if (ctx
->tcs_in_out_eq
) {
1174 ctx
->tcs_temp_only_inputs
= ~nir
->info
.tess
.tcs_cross_invocation_inputs_read
&
1175 ~nir
->info
.inputs_read_indirectly
&
1176 nir
->info
.inputs_read
;
1179 ctx
->tcs_num_inputs
= ctx
->program
->info
->tcs
.num_linked_inputs
;
1180 ctx
->tcs_num_outputs
= ctx
->program
->info
->tcs
.num_linked_outputs
;
1181 ctx
->tcs_num_patch_outputs
= ctx
->program
->info
->tcs
.num_linked_patch_outputs
;
1183 ctx
->tcs_num_patches
= get_tcs_num_patches(
1184 ctx
->args
->options
->key
.tcs
.input_vertices
,
1185 nir
->info
.tess
.tcs_vertices_out
,
1186 ctx
->tcs_num_inputs
,
1187 ctx
->tcs_num_outputs
,
1188 ctx
->tcs_num_patch_outputs
,
1189 ctx
->args
->options
->tess_offchip_block_dw_size
,
1190 ctx
->args
->options
->chip_class
,
1191 ctx
->args
->options
->family
);
1192 unsigned lds_size
= calculate_tess_lds_size(
1193 ctx
->args
->options
->chip_class
,
1194 ctx
->args
->options
->key
.tcs
.input_vertices
,
1195 nir
->info
.tess
.tcs_vertices_out
,
1196 ctx
->tcs_num_inputs
,
1197 ctx
->tcs_num_patches
,
1198 ctx
->tcs_num_outputs
,
1199 ctx
->tcs_num_patch_outputs
);
1201 ctx
->args
->shader_info
->tcs
.num_patches
= ctx
->tcs_num_patches
;
1202 ctx
->args
->shader_info
->tcs
.num_lds_blocks
= lds_size
;
1203 ctx
->program
->config
->lds_size
= (lds_size
+ ctx
->program
->lds_alloc_granule
- 1) /
1204 ctx
->program
->lds_alloc_granule
;
1208 setup_tcs_variables(isel_context
*ctx
, nir_shader
*nir
)
1210 nir_foreach_shader_out_variable(variable
, nir
) {
1211 assert(variable
->data
.location
>= 0 && variable
->data
.location
<= UINT8_MAX
);
1213 if (variable
->data
.location
== VARYING_SLOT_TESS_LEVEL_OUTER
)
1214 ctx
->tcs_tess_lvl_out_loc
= variable
->data
.driver_location
* 4u;
1215 else if (variable
->data
.location
== VARYING_SLOT_TESS_LEVEL_INNER
)
1216 ctx
->tcs_tess_lvl_in_loc
= variable
->data
.driver_location
* 4u;
1218 if (variable
->data
.patch
)
1219 ctx
->output_tcs_patch_drv_loc_to_var_slot
[variable
->data
.driver_location
/ 4] = variable
->data
.location
;
1221 ctx
->output_drv_loc_to_var_slot
[MESA_SHADER_TESS_CTRL
][variable
->data
.driver_location
/ 4] = variable
->data
.location
;
1226 setup_tes_variables(isel_context
*ctx
, nir_shader
*nir
)
1228 ctx
->tcs_num_patches
= ctx
->args
->options
->key
.tes
.num_patches
;
1229 ctx
->tcs_num_outputs
= ctx
->program
->info
->tes
.num_linked_inputs
;
1231 nir_foreach_shader_out_variable(variable
, nir
) {
1232 if (ctx
->stage
== tess_eval_vs
|| ctx
->stage
== ngg_tess_eval_gs
)
1233 variable
->data
.driver_location
= variable
->data
.location
* 4;
1236 if (ctx
->stage
== tess_eval_vs
|| ctx
->stage
== ngg_tess_eval_gs
) {
1237 radv_vs_output_info
*outinfo
= &ctx
->program
->info
->tes
.outinfo
;
1238 setup_vs_output_info(ctx
, nir
, outinfo
->export_prim_id
,
1239 ctx
->options
->key
.vs_common_out
.export_clip_dists
, outinfo
);
1244 setup_variables(isel_context
*ctx
, nir_shader
*nir
)
1246 switch (nir
->info
.stage
) {
1247 case MESA_SHADER_FRAGMENT
: {
1248 nir_foreach_shader_out_variable(variable
, nir
)
1250 int idx
= variable
->data
.location
+ variable
->data
.index
;
1251 variable
->data
.driver_location
= idx
* 4;
1255 case MESA_SHADER_COMPUTE
: {
1256 ctx
->program
->config
->lds_size
= (nir
->info
.cs
.shared_size
+ ctx
->program
->lds_alloc_granule
- 1) /
1257 ctx
->program
->lds_alloc_granule
;
1260 case MESA_SHADER_VERTEX
: {
1261 setup_vs_variables(ctx
, nir
);
1264 case MESA_SHADER_GEOMETRY
: {
1265 setup_gs_variables(ctx
, nir
);
1268 case MESA_SHADER_TESS_CTRL
: {
1269 setup_tcs_variables(ctx
, nir
);
1272 case MESA_SHADER_TESS_EVAL
: {
1273 setup_tes_variables(ctx
, nir
);
1277 unreachable("Unhandled shader stage.");
1282 lower_bit_size_callback(const nir_alu_instr
*alu
, void *_
)
1284 if (nir_op_is_vec(alu
->op
))
1287 unsigned bit_size
= alu
->dest
.dest
.ssa
.bit_size
;
1288 if (nir_alu_instr_is_comparison(alu
))
1289 bit_size
= nir_src_bit_size(alu
->src
[0].src
);
1291 if (bit_size
>= 32 || bit_size
== 1)
1294 if (alu
->op
== nir_op_bcsel
)
1297 const nir_op_info
*info
= &nir_op_infos
[alu
->op
];
1299 if (info
->is_conversion
)
1302 bool is_integer
= info
->output_type
& (nir_type_uint
| nir_type_int
);
1303 for (unsigned i
= 0; is_integer
&& (i
< info
->num_inputs
); i
++)
1304 is_integer
= info
->input_types
[i
] & (nir_type_uint
| nir_type_int
);
1306 return is_integer
? 32 : 0;
1310 setup_nir(isel_context
*ctx
, nir_shader
*nir
)
1312 /* the variable setup has to be done before lower_io / CSE */
1313 setup_variables(ctx
, nir
);
1315 /* optimize and lower memory operations */
1316 if (nir_lower_explicit_io(nir
, nir_var_mem_global
, nir_address_format_64bit_global
)) {
1317 nir_opt_constant_folding(nir
);
1321 bool lower_to_scalar
= false;
1322 bool lower_pack
= false;
1323 nir_variable_mode robust_modes
= (nir_variable_mode
)0;
1325 if (ctx
->options
->robust_buffer_access
) {
1326 robust_modes
= (nir_variable_mode
)(nir_var_mem_ubo
|
1328 nir_var_mem_global
|
1329 nir_var_mem_push_const
);
1332 if (nir_opt_load_store_vectorize(nir
,
1333 (nir_variable_mode
)(nir_var_mem_ssbo
| nir_var_mem_ubo
|
1334 nir_var_mem_push_const
| nir_var_mem_shared
|
1335 nir_var_mem_global
),
1336 mem_vectorize_callback
, robust_modes
)) {
1337 lower_to_scalar
= true;
1340 if (nir
->info
.stage
!= MESA_SHADER_COMPUTE
)
1341 nir_lower_io(nir
, (nir_variable_mode
)(nir_var_shader_in
| nir_var_shader_out
), type_size
, (nir_lower_io_options
)0);
1343 lower_to_scalar
|= nir_opt_shrink_vectors(nir
);
1345 if (lower_to_scalar
)
1346 nir_lower_alu_to_scalar(nir
, NULL
, NULL
);
1348 nir_lower_pack(nir
);
1350 /* lower ALU operations */
1351 nir_lower_int64(nir
);
1353 if (nir_lower_bit_size(nir
, lower_bit_size_callback
, NULL
))
1354 nir_copy_prop(nir
); /* allow nir_opt_idiv_const() to optimize lowered divisions */
1356 nir_opt_idiv_const(nir
, 32);
1357 nir_lower_idiv(nir
, nir_lower_idiv_precise
);
1359 /* optimize the lowered ALU operations */
1360 bool more_algebraic
= true;
1361 while (more_algebraic
) {
1362 more_algebraic
= false;
1363 NIR_PASS_V(nir
, nir_copy_prop
);
1364 NIR_PASS_V(nir
, nir_opt_dce
);
1365 NIR_PASS_V(nir
, nir_opt_constant_folding
);
1366 NIR_PASS(more_algebraic
, nir
, nir_opt_algebraic
);
1369 /* Do late algebraic optimization to turn add(a, neg(b)) back into
1370 * subs, then the mandatory cleanup after algebraic. Note that it may
1371 * produce fnegs, and if so then we need to keep running to squash
1374 bool more_late_algebraic
= true;
1375 while (more_late_algebraic
) {
1376 more_late_algebraic
= false;
1377 NIR_PASS(more_late_algebraic
, nir
, nir_opt_algebraic_late
);
1378 NIR_PASS_V(nir
, nir_opt_constant_folding
);
1379 NIR_PASS_V(nir
, nir_copy_prop
);
1380 NIR_PASS_V(nir
, nir_opt_dce
);
1381 NIR_PASS_V(nir
, nir_opt_cse
);
1384 /* cleanup passes */
1385 nir_lower_load_const_to_scalar(nir
);
1386 nir_move_options move_opts
= (nir_move_options
)(
1387 nir_move_const_undef
| nir_move_load_ubo
| nir_move_load_input
|
1388 nir_move_comparisons
| nir_move_copies
);
1389 nir_opt_sink(nir
, move_opts
);
1390 nir_opt_move(nir
, move_opts
);
1391 nir_convert_to_lcssa(nir
, true, false);
1392 nir_lower_phis_to_scalar(nir
);
1394 nir_function_impl
*func
= nir_shader_get_entrypoint(nir
);
1395 nir_index_ssa_defs(func
);
1399 setup_xnack(Program
*program
)
1401 switch (program
->family
) {
1409 program
->xnack_enabled
= true;
1417 setup_isel_context(Program
* program
,
1418 unsigned shader_count
,
1419 struct nir_shader
*const *shaders
,
1420 ac_shader_config
* config
,
1421 struct radv_shader_args
*args
,
1422 bool is_gs_copy_shader
)
1425 for (unsigned i
= 0; i
< shader_count
; i
++) {
1426 switch (shaders
[i
]->info
.stage
) {
1427 case MESA_SHADER_VERTEX
:
1430 case MESA_SHADER_TESS_CTRL
:
1433 case MESA_SHADER_TESS_EVAL
:
1436 case MESA_SHADER_GEOMETRY
:
1437 stage
|= is_gs_copy_shader
? sw_gs_copy
: sw_gs
;
1439 case MESA_SHADER_FRAGMENT
:
1442 case MESA_SHADER_COMPUTE
:
1446 unreachable("Shader stage not implemented");
1449 bool gfx9_plus
= args
->options
->chip_class
>= GFX9
;
1450 bool ngg
= args
->shader_info
->is_ngg
&& args
->options
->chip_class
>= GFX10
;
1451 if (stage
== sw_vs
&& args
->shader_info
->vs
.as_es
&& !ngg
)
1453 else if (stage
== sw_vs
&& !args
->shader_info
->vs
.as_ls
&& !ngg
)
1455 else if (stage
== sw_vs
&& ngg
)
1456 stage
|= hw_ngg_gs
; /* GFX10/NGG: VS without GS uses the HW GS stage */
1457 else if (stage
== sw_gs
)
1459 else if (stage
== sw_fs
)
1461 else if (stage
== sw_cs
)
1463 else if (stage
== sw_gs_copy
)
1465 else if (stage
== (sw_vs
| sw_gs
) && gfx9_plus
&& !ngg
)
1467 else if (stage
== sw_vs
&& args
->shader_info
->vs
.as_ls
)
1468 stage
|= hw_ls
; /* GFX6-8: VS is a Local Shader, when tessellation is used */
1469 else if (stage
== sw_tcs
)
1470 stage
|= hw_hs
; /* GFX6-8: TCS is a Hull Shader */
1471 else if (stage
== (sw_vs
| sw_tcs
))
1472 stage
|= hw_hs
; /* GFX9-10: VS+TCS merged into a Hull Shader */
1473 else if (stage
== sw_tes
&& !args
->shader_info
->tes
.as_es
&& !ngg
)
1474 stage
|= hw_vs
; /* GFX6-9: TES without GS uses the HW VS stage (and GFX10/legacy) */
1475 else if (stage
== sw_tes
&& !args
->shader_info
->tes
.as_es
&& ngg
)
1476 stage
|= hw_ngg_gs
; /* GFX10/NGG: TES without GS uses the HW GS stage */
1477 else if (stage
== sw_tes
&& args
->shader_info
->tes
.as_es
&& !ngg
)
1478 stage
|= hw_es
; /* GFX6-8: TES is an Export Shader */
1479 else if (stage
== (sw_tes
| sw_gs
) && gfx9_plus
&& !ngg
)
1480 stage
|= hw_gs
; /* GFX9: TES+GS merged into a GS (and GFX10/legacy) */
1482 unreachable("Shader stage not implemented");
1484 init_program(program
, stage
, args
->shader_info
,
1485 args
->options
->chip_class
, args
->options
->family
, config
);
1487 isel_context ctx
= {};
1488 ctx
.program
= program
;
1490 ctx
.options
= args
->options
;
1491 ctx
.stage
= program
->stage
;
1493 /* TODO: Check if we need to adjust min_waves for unknown workgroup sizes. */
1494 if (program
->stage
& (hw_vs
| hw_fs
)) {
1495 /* PS and legacy VS have separate waves, no workgroups */
1496 program
->workgroup_size
= program
->wave_size
;
1497 } else if (program
->stage
== compute_cs
) {
1498 /* CS sets the workgroup size explicitly */
1499 unsigned* bsize
= program
->info
->cs
.block_size
;
1500 program
->workgroup_size
= bsize
[0] * bsize
[1] * bsize
[2];
1501 } else if ((program
->stage
& hw_es
) || program
->stage
== geometry_gs
) {
1502 /* Unmerged ESGS operate in workgroups if on-chip GS (LDS rings) are enabled on GFX7-8 (not implemented in Mesa) */
1503 program
->workgroup_size
= program
->wave_size
;
1504 } else if (program
->stage
& hw_gs
) {
1505 /* If on-chip GS (LDS rings) are enabled on GFX9 or later, merged GS operates in workgroups */
1506 assert(program
->chip_class
>= GFX9
);
1507 uint32_t es_verts_per_subgrp
= G_028A44_ES_VERTS_PER_SUBGRP(program
->info
->gs_ring_info
.vgt_gs_onchip_cntl
);
1508 uint32_t gs_instr_prims_in_subgrp
= G_028A44_GS_INST_PRIMS_IN_SUBGRP(program
->info
->gs_ring_info
.vgt_gs_onchip_cntl
);
1509 uint32_t workgroup_size
= MAX2(es_verts_per_subgrp
, gs_instr_prims_in_subgrp
);
1510 program
->workgroup_size
= MAX2(MIN2(workgroup_size
, 256), 1);
1511 } else if (program
->stage
== vertex_ls
) {
1512 /* Unmerged LS operates in workgroups */
1513 program
->workgroup_size
= UINT_MAX
; /* TODO: probably tcs_num_patches * tcs_vertices_in, but those are not plumbed to ACO for LS */
1514 } else if (program
->stage
== tess_control_hs
) {
1515 /* Unmerged HS operates in workgroups, size is determined by the output vertices */
1516 setup_tcs_info(&ctx
, shaders
[0], NULL
);
1517 program
->workgroup_size
= ctx
.tcs_num_patches
* shaders
[0]->info
.tess
.tcs_vertices_out
;
1518 } else if (program
->stage
== vertex_tess_control_hs
) {
1519 /* Merged LSHS operates in workgroups, but can still have a different number of LS and HS invocations */
1520 setup_tcs_info(&ctx
, shaders
[1], shaders
[0]);
1521 program
->workgroup_size
= ctx
.tcs_num_patches
* MAX2(shaders
[1]->info
.tess
.tcs_vertices_out
, ctx
.args
->options
->key
.tcs
.input_vertices
);
1522 } else if (program
->stage
& hw_ngg_gs
) {
1523 /* TODO: Calculate workgroup size of NGG shaders. */
1524 program
->workgroup_size
= UINT_MAX
;
1526 unreachable("Unsupported shader stage.");
1529 calc_min_waves(program
);
1530 program
->vgpr_limit
= get_addr_vgpr_from_waves(program
, program
->min_waves
);
1531 program
->sgpr_limit
= get_addr_sgpr_from_waves(program
, program
->min_waves
);
1533 unsigned scratch_size
= 0;
1534 if (program
->stage
== gs_copy_vs
) {
1535 assert(shader_count
== 1);
1536 setup_vs_output_info(&ctx
, shaders
[0], false, true, &args
->shader_info
->vs
.outinfo
);
1538 for (unsigned i
= 0; i
< shader_count
; i
++) {
1539 nir_shader
*nir
= shaders
[i
];
1540 setup_nir(&ctx
, nir
);
1543 for (unsigned i
= 0; i
< shader_count
; i
++)
1544 scratch_size
= std::max(scratch_size
, shaders
[i
]->scratch_size
);
1547 ctx
.program
->config
->scratch_bytes_per_wave
= align(scratch_size
* ctx
.program
->wave_size
, 1024);
1549 ctx
.block
= ctx
.program
->create_and_insert_block();
1550 ctx
.block
->loop_nest_depth
= 0;
1551 ctx
.block
->kind
= block_kind_top_level
;
1553 setup_xnack(program
);
1554 program
->sram_ecc_enabled
= args
->options
->family
== CHIP_ARCTURUS
;
1555 /* apparently gfx702 also has fast v_fma_f32 but I can't find a family for that */
1556 program
->has_fast_fma32
= program
->chip_class
>= GFX9
;
1557 if (args
->options
->family
== CHIP_TAHITI
|| args
->options
->family
== CHIP_CARRIZO
|| args
->options
->family
== CHIP_HAWAII
)
1558 program
->has_fast_fma32
= true;