2 * Copyright © 2018 Valve Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include <unordered_map>
29 #include "nir_control_flow.h"
30 #include "vulkan/radv_shader.h"
31 #include "vulkan/radv_descriptor_set.h"
32 #include "vulkan/radv_shader_args.h"
34 #include "ac_exp_param.h"
35 #include "ac_shader_util.h"
37 #include "util/u_math.h"
39 #define MAX_INLINE_PUSH_CONSTS 8
43 struct shader_io_state
{
44 uint8_t mask
[VARYING_SLOT_VAR31
+ 1];
45 Temp temps
[VARYING_SLOT_VAR31
+ 1][4];
49 const struct radv_nir_compiler_options
*options
;
50 struct radv_shader_args
*args
;
53 uint32_t constant_data_offset
;
56 std::unique_ptr
<Temp
[]> allocated
;
57 std::unordered_map
<unsigned, std::array
<Temp
,NIR_MAX_VEC_COMPONENTS
>> allocated_vec
;
58 Stage stage
; /* Stage */
59 bool has_gfx10_wave64_bpermute
= false;
62 uint16_t loop_nest_depth
= 0;
66 bool has_divergent_continue
= false;
67 bool has_divergent_branch
= false;
70 bool is_divergent
= false;
72 bool exec_potentially_empty_discard
= false; /* set to false when loop_nest_depth==0 && parent_if.is_divergent==false */
73 uint16_t exec_potentially_empty_break_depth
= UINT16_MAX
;
74 /* Set to false when loop_nest_depth==exec_potentially_empty_break_depth
75 * and parent_if.is_divergent==false. Called _break but it's also used for
77 bool exec_potentially_empty_break
= false;
78 std::unique_ptr
<unsigned[]> nir_to_aco
; /* NIR block index to ACO block index */
81 Temp arg_temps
[AC_MAX_ARGS
];
84 Temp persp_centroid
, linear_centroid
;
89 /* gathered information */
90 uint64_t input_masks
[MESA_SHADER_COMPUTE
];
91 uint64_t output_masks
[MESA_SHADER_COMPUTE
];
93 /* VS output information */
94 bool export_clip_dists
;
95 unsigned num_clip_distances
;
96 unsigned num_cull_distances
;
98 /* tessellation information */
99 unsigned tcs_tess_lvl_out_loc
;
100 unsigned tcs_tess_lvl_in_loc
;
101 uint32_t tcs_num_inputs
;
102 uint32_t tcs_num_patches
;
103 bool tcs_in_out_eq
= false;
105 /* I/O information */
106 shader_io_state inputs
;
107 shader_io_state outputs
;
110 Temp
get_arg(isel_context
*ctx
, struct ac_arg arg
)
113 return ctx
->arg_temps
[arg
.arg_index
];
116 unsigned get_interp_input(nir_intrinsic_op intrin
, enum glsl_interp_mode interp
)
119 case INTERP_MODE_SMOOTH
:
120 case INTERP_MODE_NONE
:
121 if (intrin
== nir_intrinsic_load_barycentric_pixel
||
122 intrin
== nir_intrinsic_load_barycentric_at_sample
||
123 intrin
== nir_intrinsic_load_barycentric_at_offset
)
124 return S_0286CC_PERSP_CENTER_ENA(1);
125 else if (intrin
== nir_intrinsic_load_barycentric_centroid
)
126 return S_0286CC_PERSP_CENTROID_ENA(1);
127 else if (intrin
== nir_intrinsic_load_barycentric_sample
)
128 return S_0286CC_PERSP_SAMPLE_ENA(1);
130 case INTERP_MODE_NOPERSPECTIVE
:
131 if (intrin
== nir_intrinsic_load_barycentric_pixel
)
132 return S_0286CC_LINEAR_CENTER_ENA(1);
133 else if (intrin
== nir_intrinsic_load_barycentric_centroid
)
134 return S_0286CC_LINEAR_CENTROID_ENA(1);
135 else if (intrin
== nir_intrinsic_load_barycentric_sample
)
136 return S_0286CC_LINEAR_SAMPLE_ENA(1);
144 /* If one side of a divergent IF ends in a branch and the other doesn't, we
145 * might have to emit the contents of the side without the branch at the merge
146 * block instead. This is so that we can use any SGPR live-out of the side
147 * without the branch without creating a linear phi in the invert or merge block. */
149 sanitize_if(nir_function_impl
*impl
, bool *divergent
, nir_if
*nif
)
151 if (!divergent
[nif
->condition
.ssa
->index
])
154 nir_block
*then_block
= nir_if_last_then_block(nif
);
155 nir_block
*else_block
= nir_if_last_else_block(nif
);
156 bool then_jump
= nir_block_ends_in_jump(then_block
) || nir_block_is_unreachable(then_block
);
157 bool else_jump
= nir_block_ends_in_jump(else_block
) || nir_block_is_unreachable(else_block
);
158 if (then_jump
== else_jump
)
161 /* If the continue from block is empty then return as there is nothing to
164 if (nir_cf_list_is_empty_block(else_jump
? &nif
->then_list
: &nif
->else_list
))
167 /* Even though this if statement has a jump on one side, we may still have
168 * phis afterwards. Single-source phis can be produced by loop unrolling
169 * or dead control-flow passes and are perfectly legal. Run a quick phi
170 * removal on the block after the if to clean up any such phis.
172 nir_opt_remove_phis_block(nir_cf_node_as_block(nir_cf_node_next(&nif
->cf_node
)));
174 /* Finally, move the continue from branch after the if-statement. */
175 nir_block
*last_continue_from_blk
= else_jump
? then_block
: else_block
;
176 nir_block
*first_continue_from_blk
= else_jump
?
177 nir_if_first_then_block(nif
) : nir_if_first_else_block(nif
);
180 nir_cf_extract(&tmp
, nir_before_block(first_continue_from_blk
),
181 nir_after_block(last_continue_from_blk
));
182 nir_cf_reinsert(&tmp
, nir_after_cf_node(&nif
->cf_node
));
184 /* nir_cf_extract() invalidates dominance metadata, but it should still be
185 * correct because of the specific type of transformation we did. Block
186 * indices are not valid except for block_0's, which is all we care about for
187 * nir_block_is_unreachable(). */
188 impl
->valid_metadata
=
189 (nir_metadata
)(impl
->valid_metadata
| nir_metadata_dominance
| nir_metadata_block_index
);
195 sanitize_cf_list(nir_function_impl
*impl
, bool *divergent
, struct exec_list
*cf_list
)
197 bool progress
= false;
198 foreach_list_typed(nir_cf_node
, cf_node
, node
, cf_list
) {
199 switch (cf_node
->type
) {
200 case nir_cf_node_block
:
202 case nir_cf_node_if
: {
203 nir_if
*nif
= nir_cf_node_as_if(cf_node
);
204 progress
|= sanitize_cf_list(impl
, divergent
, &nif
->then_list
);
205 progress
|= sanitize_cf_list(impl
, divergent
, &nif
->else_list
);
206 progress
|= sanitize_if(impl
, divergent
, nif
);
209 case nir_cf_node_loop
: {
210 nir_loop
*loop
= nir_cf_node_as_loop(cf_node
);
211 progress
|= sanitize_cf_list(impl
, divergent
, &loop
->body
);
214 case nir_cf_node_function
:
215 unreachable("Invalid cf type");
222 void init_context(isel_context
*ctx
, nir_shader
*shader
)
224 nir_function_impl
*impl
= nir_shader_get_entrypoint(shader
);
225 unsigned lane_mask_size
= ctx
->program
->lane_mask
.size();
227 ctx
->shader
= shader
;
228 ctx
->divergent_vals
= nir_divergence_analysis(shader
, nir_divergence_view_index_uniform
);
230 /* sanitize control flow */
231 nir_metadata_require(impl
, nir_metadata_dominance
);
232 sanitize_cf_list(impl
, ctx
->divergent_vals
, &impl
->body
);
233 nir_metadata_preserve(impl
, (nir_metadata
)~nir_metadata_block_index
);
235 /* we'll need this for isel */
236 nir_metadata_require(impl
, nir_metadata_block_index
);
238 if (!(ctx
->stage
& sw_gs_copy
) && ctx
->options
->dump_preoptir
) {
239 fprintf(stderr
, "NIR shader before instruction selection:\n");
240 nir_print_shader(shader
, stderr
);
243 std::unique_ptr
<Temp
[]> allocated
{new Temp
[impl
->ssa_alloc
]()};
245 unsigned spi_ps_inputs
= 0;
247 std::unique_ptr
<unsigned[]> nir_to_aco
{new unsigned[impl
->num_blocks
]()};
252 nir_foreach_block(block
, impl
) {
253 nir_foreach_instr(instr
, block
) {
254 switch(instr
->type
) {
255 case nir_instr_type_alu
: {
256 nir_alu_instr
*alu_instr
= nir_instr_as_alu(instr
);
257 unsigned size
= alu_instr
->dest
.dest
.ssa
.num_components
;
258 if (alu_instr
->dest
.dest
.ssa
.bit_size
== 64)
260 RegType type
= RegType::sgpr
;
261 switch(alu_instr
->op
) {
283 case nir_op_fround_even
:
292 case nir_op_pack_half_2x16
:
293 case nir_op_unpack_half_2x16_split_x
:
294 case nir_op_unpack_half_2x16_split_y
:
297 case nir_op_fddx_fine
:
298 case nir_op_fddy_fine
:
299 case nir_op_fddx_coarse
:
300 case nir_op_fddy_coarse
:
301 case nir_op_fquantize2f16
:
303 case nir_op_frexp_sig
:
304 case nir_op_frexp_exp
:
305 case nir_op_cube_face_index
:
306 case nir_op_cube_face_coord
:
307 type
= RegType::vgpr
;
320 size
= lane_mask_size
;
328 type
= ctx
->divergent_vals
[alu_instr
->dest
.dest
.ssa
.index
] ? RegType::vgpr
: RegType::sgpr
;
331 if (alu_instr
->dest
.dest
.ssa
.bit_size
== 1) {
332 size
= lane_mask_size
;
334 if (ctx
->divergent_vals
[alu_instr
->dest
.dest
.ssa
.index
]) {
335 type
= RegType::vgpr
;
337 if (allocated
[alu_instr
->src
[1].src
.ssa
->index
].type() == RegType::vgpr
||
338 allocated
[alu_instr
->src
[2].src
.ssa
->index
].type() == RegType::vgpr
) {
339 type
= RegType::vgpr
;
342 if (alu_instr
->src
[1].src
.ssa
->num_components
== 1 && alu_instr
->src
[2].src
.ssa
->num_components
== 1) {
343 assert(allocated
[alu_instr
->src
[1].src
.ssa
->index
].size() == allocated
[alu_instr
->src
[2].src
.ssa
->index
].size());
344 size
= allocated
[alu_instr
->src
[1].src
.ssa
->index
].size();
349 if (alu_instr
->dest
.dest
.ssa
.bit_size
== 1) {
350 size
= lane_mask_size
;
352 type
= ctx
->divergent_vals
[alu_instr
->dest
.dest
.ssa
.index
] ? RegType::vgpr
: RegType::sgpr
;
356 if (alu_instr
->dest
.dest
.ssa
.bit_size
== 1) {
357 size
= lane_mask_size
;
359 for (unsigned i
= 0; i
< nir_op_infos
[alu_instr
->op
].num_inputs
; i
++) {
360 if (allocated
[alu_instr
->src
[i
].src
.ssa
->index
].type() == RegType::vgpr
)
361 type
= RegType::vgpr
;
366 allocated
[alu_instr
->dest
.dest
.ssa
.index
] = Temp(0, RegClass(type
, size
));
369 case nir_instr_type_load_const
: {
370 unsigned size
= nir_instr_as_load_const(instr
)->def
.num_components
;
371 if (nir_instr_as_load_const(instr
)->def
.bit_size
== 64)
373 else if (nir_instr_as_load_const(instr
)->def
.bit_size
== 1)
374 size
*= lane_mask_size
;
375 allocated
[nir_instr_as_load_const(instr
)->def
.index
] = Temp(0, RegClass(RegType::sgpr
, size
));
378 case nir_instr_type_intrinsic
: {
379 nir_intrinsic_instr
*intrinsic
= nir_instr_as_intrinsic(instr
);
380 if (!nir_intrinsic_infos
[intrinsic
->intrinsic
].has_dest
)
382 unsigned size
= intrinsic
->dest
.ssa
.num_components
;
383 if (intrinsic
->dest
.ssa
.bit_size
== 64)
385 RegType type
= RegType::sgpr
;
386 switch(intrinsic
->intrinsic
) {
387 case nir_intrinsic_load_push_constant
:
388 case nir_intrinsic_load_work_group_id
:
389 case nir_intrinsic_load_num_work_groups
:
390 case nir_intrinsic_load_subgroup_id
:
391 case nir_intrinsic_load_num_subgroups
:
392 case nir_intrinsic_load_first_vertex
:
393 case nir_intrinsic_load_base_instance
:
394 case nir_intrinsic_get_buffer_size
:
395 case nir_intrinsic_vote_all
:
396 case nir_intrinsic_vote_any
:
397 case nir_intrinsic_read_first_invocation
:
398 case nir_intrinsic_read_invocation
:
399 case nir_intrinsic_first_invocation
:
400 type
= RegType::sgpr
;
401 if (intrinsic
->dest
.ssa
.bit_size
== 1)
402 size
= lane_mask_size
;
404 case nir_intrinsic_ballot
:
405 type
= RegType::sgpr
;
407 case nir_intrinsic_load_sample_id
:
408 case nir_intrinsic_load_sample_mask_in
:
409 case nir_intrinsic_load_input
:
410 case nir_intrinsic_load_output
:
411 case nir_intrinsic_load_input_vertex
:
412 case nir_intrinsic_load_per_vertex_input
:
413 case nir_intrinsic_load_per_vertex_output
:
414 case nir_intrinsic_load_vertex_id
:
415 case nir_intrinsic_load_vertex_id_zero_base
:
416 case nir_intrinsic_load_barycentric_sample
:
417 case nir_intrinsic_load_barycentric_pixel
:
418 case nir_intrinsic_load_barycentric_model
:
419 case nir_intrinsic_load_barycentric_centroid
:
420 case nir_intrinsic_load_barycentric_at_sample
:
421 case nir_intrinsic_load_barycentric_at_offset
:
422 case nir_intrinsic_load_interpolated_input
:
423 case nir_intrinsic_load_frag_coord
:
424 case nir_intrinsic_load_sample_pos
:
425 case nir_intrinsic_load_layer_id
:
426 case nir_intrinsic_load_local_invocation_id
:
427 case nir_intrinsic_load_local_invocation_index
:
428 case nir_intrinsic_load_subgroup_invocation
:
429 case nir_intrinsic_load_tess_coord
:
430 case nir_intrinsic_write_invocation_amd
:
431 case nir_intrinsic_mbcnt_amd
:
432 case nir_intrinsic_load_instance_id
:
433 case nir_intrinsic_ssbo_atomic_add
:
434 case nir_intrinsic_ssbo_atomic_imin
:
435 case nir_intrinsic_ssbo_atomic_umin
:
436 case nir_intrinsic_ssbo_atomic_imax
:
437 case nir_intrinsic_ssbo_atomic_umax
:
438 case nir_intrinsic_ssbo_atomic_and
:
439 case nir_intrinsic_ssbo_atomic_or
:
440 case nir_intrinsic_ssbo_atomic_xor
:
441 case nir_intrinsic_ssbo_atomic_exchange
:
442 case nir_intrinsic_ssbo_atomic_comp_swap
:
443 case nir_intrinsic_global_atomic_add
:
444 case nir_intrinsic_global_atomic_imin
:
445 case nir_intrinsic_global_atomic_umin
:
446 case nir_intrinsic_global_atomic_imax
:
447 case nir_intrinsic_global_atomic_umax
:
448 case nir_intrinsic_global_atomic_and
:
449 case nir_intrinsic_global_atomic_or
:
450 case nir_intrinsic_global_atomic_xor
:
451 case nir_intrinsic_global_atomic_exchange
:
452 case nir_intrinsic_global_atomic_comp_swap
:
453 case nir_intrinsic_image_deref_atomic_add
:
454 case nir_intrinsic_image_deref_atomic_umin
:
455 case nir_intrinsic_image_deref_atomic_imin
:
456 case nir_intrinsic_image_deref_atomic_umax
:
457 case nir_intrinsic_image_deref_atomic_imax
:
458 case nir_intrinsic_image_deref_atomic_and
:
459 case nir_intrinsic_image_deref_atomic_or
:
460 case nir_intrinsic_image_deref_atomic_xor
:
461 case nir_intrinsic_image_deref_atomic_exchange
:
462 case nir_intrinsic_image_deref_atomic_comp_swap
:
463 case nir_intrinsic_image_deref_size
:
464 case nir_intrinsic_shared_atomic_add
:
465 case nir_intrinsic_shared_atomic_imin
:
466 case nir_intrinsic_shared_atomic_umin
:
467 case nir_intrinsic_shared_atomic_imax
:
468 case nir_intrinsic_shared_atomic_umax
:
469 case nir_intrinsic_shared_atomic_and
:
470 case nir_intrinsic_shared_atomic_or
:
471 case nir_intrinsic_shared_atomic_xor
:
472 case nir_intrinsic_shared_atomic_exchange
:
473 case nir_intrinsic_shared_atomic_comp_swap
:
474 case nir_intrinsic_load_scratch
:
475 case nir_intrinsic_load_invocation_id
:
476 case nir_intrinsic_load_primitive_id
:
477 type
= RegType::vgpr
;
479 case nir_intrinsic_shuffle
:
480 case nir_intrinsic_quad_broadcast
:
481 case nir_intrinsic_quad_swap_horizontal
:
482 case nir_intrinsic_quad_swap_vertical
:
483 case nir_intrinsic_quad_swap_diagonal
:
484 case nir_intrinsic_quad_swizzle_amd
:
485 case nir_intrinsic_masked_swizzle_amd
:
486 case nir_intrinsic_inclusive_scan
:
487 case nir_intrinsic_exclusive_scan
:
488 if (intrinsic
->dest
.ssa
.bit_size
== 1) {
489 size
= lane_mask_size
;
490 type
= RegType::sgpr
;
491 } else if (!ctx
->divergent_vals
[intrinsic
->dest
.ssa
.index
]) {
492 type
= RegType::sgpr
;
494 type
= RegType::vgpr
;
497 case nir_intrinsic_load_view_index
:
498 type
= ctx
->stage
== fragment_fs
? RegType::vgpr
: RegType::sgpr
;
500 case nir_intrinsic_load_front_face
:
501 case nir_intrinsic_load_helper_invocation
:
502 case nir_intrinsic_is_helper_invocation
:
503 type
= RegType::sgpr
;
504 size
= lane_mask_size
;
506 case nir_intrinsic_reduce
:
507 if (intrinsic
->dest
.ssa
.bit_size
== 1) {
508 size
= lane_mask_size
;
509 type
= RegType::sgpr
;
510 } else if (!ctx
->divergent_vals
[intrinsic
->dest
.ssa
.index
]) {
511 type
= RegType::sgpr
;
513 type
= RegType::vgpr
;
516 case nir_intrinsic_load_ubo
:
517 case nir_intrinsic_load_ssbo
:
518 case nir_intrinsic_load_global
:
519 case nir_intrinsic_vulkan_resource_index
:
520 type
= ctx
->divergent_vals
[intrinsic
->dest
.ssa
.index
] ? RegType::vgpr
: RegType::sgpr
;
522 /* due to copy propagation, the swizzled imov is removed if num dest components == 1 */
523 case nir_intrinsic_load_shared
:
524 if (ctx
->divergent_vals
[intrinsic
->dest
.ssa
.index
])
525 type
= RegType::vgpr
;
527 type
= RegType::sgpr
;
530 for (unsigned i
= 0; i
< nir_intrinsic_infos
[intrinsic
->intrinsic
].num_srcs
; i
++) {
531 if (allocated
[intrinsic
->src
[i
].ssa
->index
].type() == RegType::vgpr
)
532 type
= RegType::vgpr
;
536 allocated
[intrinsic
->dest
.ssa
.index
] = Temp(0, RegClass(type
, size
));
538 switch(intrinsic
->intrinsic
) {
539 case nir_intrinsic_load_barycentric_sample
:
540 case nir_intrinsic_load_barycentric_pixel
:
541 case nir_intrinsic_load_barycentric_centroid
:
542 case nir_intrinsic_load_barycentric_at_sample
:
543 case nir_intrinsic_load_barycentric_at_offset
: {
544 glsl_interp_mode mode
= (glsl_interp_mode
)nir_intrinsic_interp_mode(intrinsic
);
545 spi_ps_inputs
|= get_interp_input(intrinsic
->intrinsic
, mode
);
548 case nir_intrinsic_load_barycentric_model
:
549 spi_ps_inputs
|= S_0286CC_PERSP_PULL_MODEL_ENA(1);
551 case nir_intrinsic_load_front_face
:
552 spi_ps_inputs
|= S_0286CC_FRONT_FACE_ENA(1);
554 case nir_intrinsic_load_frag_coord
:
555 case nir_intrinsic_load_sample_pos
: {
556 uint8_t mask
= nir_ssa_def_components_read(&intrinsic
->dest
.ssa
);
557 for (unsigned i
= 0; i
< 4; i
++) {
559 spi_ps_inputs
|= S_0286CC_POS_X_FLOAT_ENA(1) << i
;
564 case nir_intrinsic_load_sample_id
:
565 spi_ps_inputs
|= S_0286CC_ANCILLARY_ENA(1);
567 case nir_intrinsic_load_sample_mask_in
:
568 spi_ps_inputs
|= S_0286CC_ANCILLARY_ENA(1);
569 spi_ps_inputs
|= S_0286CC_SAMPLE_COVERAGE_ENA(1);
576 case nir_instr_type_tex
: {
577 nir_tex_instr
* tex
= nir_instr_as_tex(instr
);
578 unsigned size
= tex
->dest
.ssa
.num_components
;
580 if (tex
->dest
.ssa
.bit_size
== 64)
582 if (tex
->op
== nir_texop_texture_samples
)
583 assert(!ctx
->divergent_vals
[tex
->dest
.ssa
.index
]);
584 if (ctx
->divergent_vals
[tex
->dest
.ssa
.index
])
585 allocated
[tex
->dest
.ssa
.index
] = Temp(0, RegClass(RegType::vgpr
, size
));
587 allocated
[tex
->dest
.ssa
.index
] = Temp(0, RegClass(RegType::sgpr
, size
));
590 case nir_instr_type_parallel_copy
: {
591 nir_foreach_parallel_copy_entry(entry
, nir_instr_as_parallel_copy(instr
)) {
592 allocated
[entry
->dest
.ssa
.index
] = allocated
[entry
->src
.ssa
->index
];
596 case nir_instr_type_ssa_undef
: {
597 unsigned size
= nir_instr_as_ssa_undef(instr
)->def
.num_components
;
598 if (nir_instr_as_ssa_undef(instr
)->def
.bit_size
== 64)
600 else if (nir_instr_as_ssa_undef(instr
)->def
.bit_size
== 1)
601 size
*= lane_mask_size
;
602 allocated
[nir_instr_as_ssa_undef(instr
)->def
.index
] = Temp(0, RegClass(RegType::sgpr
, size
));
605 case nir_instr_type_phi
: {
606 nir_phi_instr
* phi
= nir_instr_as_phi(instr
);
608 unsigned size
= phi
->dest
.ssa
.num_components
;
610 if (phi
->dest
.ssa
.bit_size
== 1) {
611 assert(size
== 1 && "multiple components not yet supported on boolean phis.");
612 type
= RegType::sgpr
;
613 size
*= lane_mask_size
;
614 allocated
[phi
->dest
.ssa
.index
] = Temp(0, RegClass(type
, size
));
618 if (ctx
->divergent_vals
[phi
->dest
.ssa
.index
]) {
619 type
= RegType::vgpr
;
621 type
= RegType::sgpr
;
622 nir_foreach_phi_src (src
, phi
) {
623 if (allocated
[src
->src
.ssa
->index
].type() == RegType::vgpr
)
624 type
= RegType::vgpr
;
625 if (allocated
[src
->src
.ssa
->index
].type() == RegType::none
)
630 size
*= phi
->dest
.ssa
.bit_size
== 64 ? 2 : 1;
631 RegClass rc
= RegClass(type
, size
);
632 if (rc
!= allocated
[phi
->dest
.ssa
.index
].regClass()) {
635 nir_foreach_phi_src(src
, phi
)
636 assert(allocated
[src
->src
.ssa
->index
].size() == rc
.size());
638 allocated
[phi
->dest
.ssa
.index
] = Temp(0, rc
);
648 if (G_0286CC_POS_W_FLOAT_ENA(spi_ps_inputs
)) {
649 /* If POS_W_FLOAT (11) is enabled, at least one of PERSP_* must be enabled too */
650 spi_ps_inputs
|= S_0286CC_PERSP_CENTER_ENA(1);
653 if (!(spi_ps_inputs
& 0x7F)) {
654 /* At least one of PERSP_* (0xF) or LINEAR_* (0x70) must be enabled */
655 spi_ps_inputs
|= S_0286CC_PERSP_CENTER_ENA(1);
658 ctx
->program
->config
->spi_ps_input_ena
= spi_ps_inputs
;
659 ctx
->program
->config
->spi_ps_input_addr
= spi_ps_inputs
;
661 for (unsigned i
= 0; i
< impl
->ssa_alloc
; i
++)
662 allocated
[i
] = Temp(ctx
->program
->allocateId(), allocated
[i
].regClass());
664 ctx
->allocated
.reset(allocated
.release());
665 ctx
->cf_info
.nir_to_aco
.reset(nir_to_aco
.release());
668 Pseudo_instruction
*add_startpgm(struct isel_context
*ctx
)
670 unsigned arg_count
= ctx
->args
->ac
.arg_count
;
671 if (ctx
->stage
== fragment_fs
) {
672 /* LLVM optimizes away unused FS inputs and computes spi_ps_input_addr
673 * itself and then communicates the results back via the ELF binary.
674 * Mirror what LLVM does by re-mapping the VGPR arguments here.
676 * TODO: If we made the FS input scanning code into a separate pass that
677 * could run before argument setup, then this wouldn't be necessary
680 struct ac_shader_args
*args
= &ctx
->args
->ac
;
682 for (unsigned i
= 0, vgpr_arg
= 0, vgpr_reg
= 0; i
< args
->arg_count
; i
++) {
683 if (args
->args
[i
].file
!= AC_ARG_VGPR
) {
688 if (!(ctx
->program
->config
->spi_ps_input_addr
& (1 << vgpr_arg
))) {
689 args
->args
[i
].skip
= true;
691 args
->args
[i
].offset
= vgpr_reg
;
692 vgpr_reg
+= args
->args
[i
].size
;
699 aco_ptr
<Pseudo_instruction
> startpgm
{create_instruction
<Pseudo_instruction
>(aco_opcode::p_startpgm
, Format::PSEUDO
, 0, arg_count
+ 1)};
700 for (unsigned i
= 0, arg
= 0; i
< ctx
->args
->ac
.arg_count
; i
++) {
701 if (ctx
->args
->ac
.args
[i
].skip
)
704 enum ac_arg_regfile file
= ctx
->args
->ac
.args
[i
].file
;
705 unsigned size
= ctx
->args
->ac
.args
[i
].size
;
706 unsigned reg
= ctx
->args
->ac
.args
[i
].offset
;
707 RegClass type
= RegClass(file
== AC_ARG_SGPR
? RegType::sgpr
: RegType::vgpr
, size
);
708 Temp dst
= Temp
{ctx
->program
->allocateId(), type
};
709 ctx
->arg_temps
[i
] = dst
;
710 startpgm
->definitions
[arg
] = Definition(dst
);
711 startpgm
->definitions
[arg
].setFixed(PhysReg
{file
== AC_ARG_SGPR
? reg
: reg
+ 256});
714 startpgm
->definitions
[arg_count
] = Definition
{ctx
->program
->allocateId(), exec
, ctx
->program
->lane_mask
};
715 Pseudo_instruction
*instr
= startpgm
.get();
716 ctx
->block
->instructions
.push_back(std::move(startpgm
));
718 /* Stash these in the program so that they can be accessed later when
721 ctx
->program
->private_segment_buffer
= get_arg(ctx
, ctx
->args
->ring_offsets
);
722 ctx
->program
->scratch_offset
= get_arg(ctx
, ctx
->args
->scratch_offset
);
728 type_size(const struct glsl_type
*type
, bool bindless
)
730 // TODO: don't we need type->std430_base_alignment() here?
731 return glsl_count_attribute_slots(type
, false);
735 shared_var_info(const struct glsl_type
*type
, unsigned *size
, unsigned *align
)
737 assert(glsl_type_is_vector_or_scalar(type
));
739 uint32_t comp_size
= glsl_type_is_boolean(type
)
740 ? 4 : glsl_get_bit_size(type
) / 8;
741 unsigned length
= glsl_get_vector_elements(type
);
742 *size
= comp_size
* length
,
747 mem_vectorize_callback(unsigned align
, unsigned bit_size
,
748 unsigned num_components
, unsigned high_offset
,
749 nir_intrinsic_instr
*low
, nir_intrinsic_instr
*high
)
751 if ((bit_size
!= 32 && bit_size
!= 64) || num_components
> 4)
754 /* >128 bit loads are split except with SMEM */
755 if (bit_size
* num_components
> 128)
758 switch (low
->intrinsic
) {
759 case nir_intrinsic_load_ubo
:
760 case nir_intrinsic_load_ssbo
:
761 case nir_intrinsic_store_ssbo
:
762 case nir_intrinsic_load_push_constant
:
763 return align
% 4 == 0;
764 case nir_intrinsic_load_deref
:
765 case nir_intrinsic_store_deref
:
766 assert(nir_src_as_deref(low
->src
[0])->mode
== nir_var_mem_shared
);
768 case nir_intrinsic_load_shared
:
769 case nir_intrinsic_store_shared
:
770 if (bit_size
* num_components
> 64) /* 96 and 128 bit loads require 128 bit alignment and are split otherwise */
771 return align
% 16 == 0;
773 return align
% 4 == 0;
781 setup_vs_output_info(isel_context
*ctx
, nir_shader
*nir
,
782 bool export_prim_id
, bool export_clip_dists
,
783 radv_vs_output_info
*outinfo
)
785 memset(outinfo
->vs_output_param_offset
, AC_EXP_PARAM_UNDEFINED
,
786 sizeof(outinfo
->vs_output_param_offset
));
788 outinfo
->param_exports
= 0;
789 int pos_written
= 0x1;
790 if (outinfo
->writes_pointsize
|| outinfo
->writes_viewport_index
|| outinfo
->writes_layer
)
791 pos_written
|= 1 << 1;
793 uint64_t mask
= ctx
->output_masks
[nir
->info
.stage
];
795 int idx
= u_bit_scan64(&mask
);
796 if (idx
>= VARYING_SLOT_VAR0
|| idx
== VARYING_SLOT_LAYER
|| idx
== VARYING_SLOT_PRIMITIVE_ID
||
797 ((idx
== VARYING_SLOT_CLIP_DIST0
|| idx
== VARYING_SLOT_CLIP_DIST1
) && export_clip_dists
)) {
798 if (outinfo
->vs_output_param_offset
[idx
] == AC_EXP_PARAM_UNDEFINED
)
799 outinfo
->vs_output_param_offset
[idx
] = outinfo
->param_exports
++;
802 if (outinfo
->writes_layer
&&
803 outinfo
->vs_output_param_offset
[VARYING_SLOT_LAYER
] == AC_EXP_PARAM_UNDEFINED
) {
804 /* when ctx->options->key.has_multiview_view_index = true, the layer
805 * variable isn't declared in NIR and it's isel's job to get the layer */
806 outinfo
->vs_output_param_offset
[VARYING_SLOT_LAYER
] = outinfo
->param_exports
++;
809 if (export_prim_id
) {
810 assert(outinfo
->vs_output_param_offset
[VARYING_SLOT_PRIMITIVE_ID
] == AC_EXP_PARAM_UNDEFINED
);
811 outinfo
->vs_output_param_offset
[VARYING_SLOT_PRIMITIVE_ID
] = outinfo
->param_exports
++;
814 ctx
->export_clip_dists
= export_clip_dists
;
815 ctx
->num_clip_distances
= util_bitcount(outinfo
->clip_dist_mask
);
816 ctx
->num_cull_distances
= util_bitcount(outinfo
->cull_dist_mask
);
818 assert(ctx
->num_clip_distances
+ ctx
->num_cull_distances
<= 8);
820 if (ctx
->num_clip_distances
+ ctx
->num_cull_distances
> 0)
821 pos_written
|= 1 << 2;
822 if (ctx
->num_clip_distances
+ ctx
->num_cull_distances
> 4)
823 pos_written
|= 1 << 3;
825 outinfo
->pos_exports
= util_bitcount(pos_written
);
829 setup_vs_variables(isel_context
*ctx
, nir_shader
*nir
)
831 nir_foreach_variable(variable
, &nir
->inputs
)
833 variable
->data
.driver_location
= variable
->data
.location
* 4;
835 nir_foreach_variable(variable
, &nir
->outputs
)
837 if (ctx
->stage
== vertex_geometry_gs
)
838 variable
->data
.driver_location
= util_bitcount64(ctx
->output_masks
[nir
->info
.stage
] & ((1ull << variable
->data
.location
) - 1ull)) * 4;
839 else if (ctx
->stage
== vertex_es
||
840 ctx
->stage
== vertex_ls
||
841 ctx
->stage
== vertex_tess_control_hs
)
842 // TODO: make this more compact
843 variable
->data
.driver_location
= shader_io_get_unique_index((gl_varying_slot
) variable
->data
.location
) * 4;
844 else if (ctx
->stage
== vertex_vs
)
845 variable
->data
.driver_location
= variable
->data
.location
* 4;
847 unreachable("Unsupported VS stage");
850 if (ctx
->stage
== vertex_vs
) {
851 radv_vs_output_info
*outinfo
= &ctx
->program
->info
->vs
.outinfo
;
852 setup_vs_output_info(ctx
, nir
, outinfo
->export_prim_id
,
853 ctx
->options
->key
.vs_common_out
.export_clip_dists
, outinfo
);
854 } else if (ctx
->stage
== vertex_geometry_gs
|| ctx
->stage
== vertex_es
) {
855 /* TODO: radv_nir_shader_info_pass() already sets this but it's larger
856 * than it needs to be in order to set it better, we have to improve
857 * radv_nir_shader_info_pass() because gfx9_get_gs_info() uses
858 * esgs_itemsize and has to be done before compilation
860 /* radv_es_output_info *outinfo = &ctx->program->info->vs.es_info;
861 outinfo->esgs_itemsize = util_bitcount64(ctx->output_masks[nir->info.stage]) * 16u; */
865 void setup_gs_variables(isel_context
*ctx
, nir_shader
*nir
)
867 if (ctx
->stage
== vertex_geometry_gs
|| ctx
->stage
== tess_eval_geometry_gs
) {
868 nir_foreach_variable(variable
, &nir
->inputs
) {
869 variable
->data
.driver_location
= util_bitcount64(ctx
->input_masks
[nir
->info
.stage
] & ((1ull << variable
->data
.location
) - 1ull)) * 4;
871 } else if (ctx
->stage
== geometry_gs
) {
872 //TODO: make this more compact
873 nir_foreach_variable(variable
, &nir
->inputs
) {
874 variable
->data
.driver_location
= shader_io_get_unique_index((gl_varying_slot
)variable
->data
.location
) * 4;
877 unreachable("Unsupported GS stage.");
880 nir_foreach_variable(variable
, &nir
->outputs
) {
881 variable
->data
.driver_location
= variable
->data
.location
* 4;
884 if (ctx
->stage
== vertex_geometry_gs
)
885 ctx
->program
->info
->gs
.es_type
= MESA_SHADER_VERTEX
;
886 else if (ctx
->stage
== tess_eval_geometry_gs
)
887 ctx
->program
->info
->gs
.es_type
= MESA_SHADER_TESS_EVAL
;
891 setup_tcs_variables(isel_context
*ctx
, nir_shader
*nir
)
893 switch (ctx
->stage
) {
894 case tess_control_hs
:
895 ctx
->tcs_num_inputs
= ctx
->args
->options
->key
.tcs
.num_inputs
;
897 case vertex_tess_control_hs
:
898 ctx
->tcs_num_inputs
= util_last_bit64(ctx
->args
->shader_info
->vs
.ls_outputs_written
);
901 unreachable("Unsupported TCS shader stage");
904 /* When the number of TCS input and output vertices are the same (typically 3):
905 * - There is an equal amount of LS and HS invocations
906 * - In case of merged LSHS shaders, the LS and HS halves of the shader
907 * always process the exact same vertex. We can use this knowledge to optimize them.
910 ctx
->stage
== vertex_tess_control_hs
&&
911 ctx
->args
->options
->key
.tcs
.input_vertices
== nir
->info
.tess
.tcs_vertices_out
;
913 ctx
->tcs_num_patches
= get_tcs_num_patches(
914 ctx
->args
->options
->key
.tcs
.input_vertices
,
915 nir
->info
.tess
.tcs_vertices_out
,
917 ctx
->args
->shader_info
->tcs
.outputs_written
,
918 ctx
->args
->shader_info
->tcs
.patch_outputs_written
,
919 ctx
->args
->options
->tess_offchip_block_dw_size
,
920 ctx
->args
->options
->chip_class
,
921 ctx
->args
->options
->family
);
922 unsigned lds_size
= calculate_tess_lds_size(
923 ctx
->args
->options
->key
.tcs
.input_vertices
,
924 nir
->info
.tess
.tcs_vertices_out
,
926 ctx
->tcs_num_patches
,
927 ctx
->args
->shader_info
->tcs
.outputs_written
,
928 ctx
->args
->shader_info
->tcs
.patch_outputs_written
);
930 ctx
->args
->shader_info
->tcs
.num_patches
= ctx
->tcs_num_patches
;
931 ctx
->args
->shader_info
->tcs
.lds_size
= lds_size
;
932 ctx
->program
->config
->lds_size
= (lds_size
+ ctx
->program
->lds_alloc_granule
- 1) /
933 ctx
->program
->lds_alloc_granule
;
935 nir_foreach_variable(variable
, &nir
->inputs
) {
936 variable
->data
.driver_location
= shader_io_get_unique_index((gl_varying_slot
) variable
->data
.location
) * 4;
939 nir_foreach_variable(variable
, &nir
->outputs
) {
940 variable
->data
.driver_location
= shader_io_get_unique_index((gl_varying_slot
) variable
->data
.location
) * 4;
943 ctx
->tcs_tess_lvl_out_loc
= shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_OUTER
) * 16u;
944 ctx
->tcs_tess_lvl_in_loc
= shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_INNER
) * 16u;
948 setup_tes_variables(isel_context
*ctx
, nir_shader
*nir
)
950 ctx
->tcs_num_patches
= ctx
->args
->options
->key
.tes
.num_patches
;
952 nir_foreach_variable(variable
, &nir
->inputs
) {
953 variable
->data
.driver_location
= shader_io_get_unique_index((gl_varying_slot
) variable
->data
.location
) * 4;
956 nir_foreach_variable(variable
, &nir
->outputs
) {
957 if (ctx
->stage
== tess_eval_vs
)
958 variable
->data
.driver_location
= variable
->data
.location
* 4;
959 else if (ctx
->stage
== tess_eval_es
)
960 variable
->data
.driver_location
= shader_io_get_unique_index((gl_varying_slot
) variable
->data
.location
) * 4;
961 else if (ctx
->stage
== tess_eval_geometry_gs
)
962 variable
->data
.driver_location
= util_bitcount64(ctx
->output_masks
[nir
->info
.stage
] & ((1ull << variable
->data
.location
) - 1ull)) * 4;
964 unreachable("Unsupported TES shader stage");
967 if (ctx
->stage
== tess_eval_vs
) {
968 radv_vs_output_info
*outinfo
= &ctx
->program
->info
->tes
.outinfo
;
969 setup_vs_output_info(ctx
, nir
, outinfo
->export_prim_id
,
970 ctx
->options
->key
.vs_common_out
.export_clip_dists
, outinfo
);
975 setup_variables(isel_context
*ctx
, nir_shader
*nir
)
977 switch (nir
->info
.stage
) {
978 case MESA_SHADER_FRAGMENT
: {
979 nir_foreach_variable(variable
, &nir
->outputs
)
981 int idx
= variable
->data
.location
+ variable
->data
.index
;
982 variable
->data
.driver_location
= idx
* 4;
986 case MESA_SHADER_COMPUTE
: {
987 ctx
->program
->config
->lds_size
= (nir
->info
.cs
.shared_size
+ ctx
->program
->lds_alloc_granule
- 1) /
988 ctx
->program
->lds_alloc_granule
;
991 case MESA_SHADER_VERTEX
: {
992 setup_vs_variables(ctx
, nir
);
995 case MESA_SHADER_GEOMETRY
: {
996 setup_gs_variables(ctx
, nir
);
999 case MESA_SHADER_TESS_CTRL
: {
1000 setup_tcs_variables(ctx
, nir
);
1003 case MESA_SHADER_TESS_EVAL
: {
1004 setup_tes_variables(ctx
, nir
);
1008 unreachable("Unhandled shader stage.");
1013 get_io_masks(isel_context
*ctx
, unsigned shader_count
, struct nir_shader
*const *shaders
)
1015 for (unsigned i
= 0; i
< shader_count
; i
++) {
1016 nir_shader
*nir
= shaders
[i
];
1017 if (nir
->info
.stage
== MESA_SHADER_COMPUTE
)
1020 uint64_t output_mask
= 0;
1021 nir_foreach_variable(variable
, &nir
->outputs
) {
1022 const glsl_type
*type
= variable
->type
;
1023 if (nir_is_per_vertex_io(variable
, nir
->info
.stage
))
1024 type
= type
->fields
.array
;
1025 unsigned slots
= type
->count_attribute_slots(false);
1026 if (variable
->data
.compact
) {
1027 unsigned component_count
= variable
->data
.location_frac
+ type
->length
;
1028 slots
= (component_count
+ 3) / 4;
1030 output_mask
|= ((1ull << slots
) - 1) << variable
->data
.location
;
1033 uint64_t input_mask
= 0;
1034 nir_foreach_variable(variable
, &nir
->inputs
) {
1035 const glsl_type
*type
= variable
->type
;
1036 if (nir_is_per_vertex_io(variable
, nir
->info
.stage
))
1037 type
= type
->fields
.array
;
1038 unsigned slots
= type
->count_attribute_slots(false);
1039 if (variable
->data
.compact
) {
1040 unsigned component_count
= variable
->data
.location_frac
+ type
->length
;
1041 slots
= (component_count
+ 3) / 4;
1043 input_mask
|= ((1ull << slots
) - 1) << variable
->data
.location
;
1046 ctx
->output_masks
[nir
->info
.stage
] |= output_mask
;
1047 if (i
+ 1 < shader_count
)
1048 ctx
->input_masks
[shaders
[i
+ 1]->info
.stage
] |= output_mask
;
1050 ctx
->input_masks
[nir
->info
.stage
] |= input_mask
;
1052 ctx
->output_masks
[shaders
[i
- 1]->info
.stage
] |= input_mask
;
1057 setup_nir(isel_context
*ctx
, nir_shader
*nir
)
1059 Program
*program
= ctx
->program
;
1061 /* align and copy constant data */
1062 while (program
->constant_data
.size() % 4u)
1063 program
->constant_data
.push_back(0);
1064 ctx
->constant_data_offset
= program
->constant_data
.size();
1065 program
->constant_data
.insert(program
->constant_data
.end(),
1066 (uint8_t*)nir
->constant_data
,
1067 (uint8_t*)nir
->constant_data
+ nir
->constant_data_size
);
1069 /* the variable setup has to be done before lower_io / CSE */
1070 setup_variables(ctx
, nir
);
1072 /* optimize and lower memory operations */
1073 bool lower_to_scalar
= false;
1074 bool lower_pack
= false;
1075 if (nir_opt_load_store_vectorize(nir
,
1076 (nir_variable_mode
)(nir_var_mem_ssbo
| nir_var_mem_ubo
|
1077 nir_var_mem_push_const
| nir_var_mem_shared
),
1078 mem_vectorize_callback
)) {
1079 lower_to_scalar
= true;
1082 if (nir
->info
.stage
!= MESA_SHADER_COMPUTE
)
1083 nir_lower_io(nir
, (nir_variable_mode
)(nir_var_shader_in
| nir_var_shader_out
), type_size
, (nir_lower_io_options
)0);
1084 nir_lower_explicit_io(nir
, nir_var_mem_global
, nir_address_format_64bit_global
);
1086 if (lower_to_scalar
)
1087 nir_lower_alu_to_scalar(nir
, NULL
, NULL
);
1089 nir_lower_pack(nir
);
1091 /* lower ALU operations */
1092 // TODO: implement logic64 in aco, it's more effective for sgprs
1093 nir_lower_int64(nir
, nir
->options
->lower_int64_options
);
1095 nir_opt_idiv_const(nir
, 32);
1096 nir_lower_idiv(nir
, nir_lower_idiv_precise
);
1098 /* optimize the lowered ALU operations */
1099 bool more_algebraic
= true;
1100 while (more_algebraic
) {
1101 more_algebraic
= false;
1102 NIR_PASS_V(nir
, nir_copy_prop
);
1103 NIR_PASS_V(nir
, nir_opt_dce
);
1104 NIR_PASS_V(nir
, nir_opt_constant_folding
);
1105 NIR_PASS(more_algebraic
, nir
, nir_opt_algebraic
);
1108 /* Do late algebraic optimization to turn add(a, neg(b)) back into
1109 * subs, then the mandatory cleanup after algebraic. Note that it may
1110 * produce fnegs, and if so then we need to keep running to squash
1113 bool more_late_algebraic
= true;
1114 while (more_late_algebraic
) {
1115 more_late_algebraic
= false;
1116 NIR_PASS(more_late_algebraic
, nir
, nir_opt_algebraic_late
);
1117 NIR_PASS_V(nir
, nir_opt_constant_folding
);
1118 NIR_PASS_V(nir
, nir_copy_prop
);
1119 NIR_PASS_V(nir
, nir_opt_dce
);
1120 NIR_PASS_V(nir
, nir_opt_cse
);
1123 /* cleanup passes */
1124 nir_lower_load_const_to_scalar(nir
);
1125 nir_opt_shrink_load(nir
);
1126 nir_move_options move_opts
= (nir_move_options
)(
1127 nir_move_const_undef
| nir_move_load_ubo
| nir_move_load_input
|
1128 nir_move_comparisons
| nir_move_copies
);
1129 nir_opt_sink(nir
, move_opts
);
1130 nir_opt_move(nir
, move_opts
);
1131 nir_convert_to_lcssa(nir
, true, false);
1132 nir_lower_phis_to_scalar(nir
);
1134 nir_function_impl
*func
= nir_shader_get_entrypoint(nir
);
1135 nir_index_ssa_defs(func
);
1139 setup_isel_context(Program
* program
,
1140 unsigned shader_count
,
1141 struct nir_shader
*const *shaders
,
1142 ac_shader_config
* config
,
1143 struct radv_shader_args
*args
,
1144 bool is_gs_copy_shader
)
1147 for (unsigned i
= 0; i
< shader_count
; i
++) {
1148 switch (shaders
[i
]->info
.stage
) {
1149 case MESA_SHADER_VERTEX
:
1150 program
->stage
|= sw_vs
;
1152 case MESA_SHADER_TESS_CTRL
:
1153 program
->stage
|= sw_tcs
;
1155 case MESA_SHADER_TESS_EVAL
:
1156 program
->stage
|= sw_tes
;
1158 case MESA_SHADER_GEOMETRY
:
1159 program
->stage
|= is_gs_copy_shader
? sw_gs_copy
: sw_gs
;
1161 case MESA_SHADER_FRAGMENT
:
1162 program
->stage
|= sw_fs
;
1164 case MESA_SHADER_COMPUTE
:
1165 program
->stage
|= sw_cs
;
1168 unreachable("Shader stage not implemented");
1171 bool gfx9_plus
= args
->options
->chip_class
>= GFX9
;
1172 bool ngg
= args
->shader_info
->is_ngg
&& args
->options
->chip_class
>= GFX10
;
1173 if (program
->stage
== sw_vs
&& args
->shader_info
->vs
.as_es
)
1174 program
->stage
|= hw_es
;
1175 else if (program
->stage
== sw_vs
&& !args
->shader_info
->vs
.as_ls
)
1176 program
->stage
|= hw_vs
;
1177 else if (program
->stage
== sw_gs
)
1178 program
->stage
|= hw_gs
;
1179 else if (program
->stage
== sw_fs
)
1180 program
->stage
|= hw_fs
;
1181 else if (program
->stage
== sw_cs
)
1182 program
->stage
|= hw_cs
;
1183 else if (program
->stage
== sw_gs_copy
)
1184 program
->stage
|= hw_vs
;
1185 else if (program
->stage
== (sw_vs
| sw_gs
) && gfx9_plus
&& !ngg
)
1186 program
->stage
|= hw_gs
;
1187 else if (program
->stage
== sw_vs
&& args
->shader_info
->vs
.as_ls
)
1188 program
->stage
|= hw_ls
; /* GFX6-8: VS is a Local Shader, when tessellation is used */
1189 else if (program
->stage
== sw_tcs
)
1190 program
->stage
|= hw_hs
; /* GFX6-8: TCS is a Hull Shader */
1191 else if (program
->stage
== (sw_vs
| sw_tcs
))
1192 program
->stage
|= hw_hs
; /* GFX9-10: VS+TCS merged into a Hull Shader */
1193 else if (program
->stage
== sw_tes
&& !args
->shader_info
->tes
.as_es
&& !ngg
)
1194 program
->stage
|= hw_vs
; /* GFX6-9: TES without GS uses the HW VS stage (and GFX10/legacy) */
1195 else if (program
->stage
== sw_tes
&& args
->shader_info
->tes
.as_es
&& !ngg
)
1196 program
->stage
|= hw_es
; /* GFX6-8: TES is an Export Shader */
1197 else if (program
->stage
== (sw_tes
| sw_gs
) && gfx9_plus
&& !ngg
)
1198 program
->stage
|= hw_gs
; /* GFX9: TES+GS merged into a GS (and GFX10/legacy) */
1200 unreachable("Shader stage not implemented");
1202 program
->config
= config
;
1203 program
->info
= args
->shader_info
;
1204 program
->chip_class
= args
->options
->chip_class
;
1205 program
->family
= args
->options
->family
;
1206 program
->wave_size
= args
->shader_info
->wave_size
;
1207 program
->lane_mask
= program
->wave_size
== 32 ? s1
: s2
;
1209 program
->lds_alloc_granule
= args
->options
->chip_class
>= GFX7
? 512 : 256;
1210 program
->lds_limit
= args
->options
->chip_class
>= GFX7
? 65536 : 32768;
1211 /* apparently gfx702 also has 16-bank LDS but I can't find a family for that */
1212 program
->has_16bank_lds
= args
->options
->family
== CHIP_KABINI
|| args
->options
->family
== CHIP_STONEY
;
1214 program
->vgpr_limit
= 256;
1215 program
->vgpr_alloc_granule
= 3;
1217 if (args
->options
->chip_class
>= GFX10
) {
1218 program
->physical_sgprs
= 2560; /* doesn't matter as long as it's at least 128 * 20 */
1219 program
->sgpr_alloc_granule
= 127;
1220 program
->sgpr_limit
= 106;
1221 program
->vgpr_alloc_granule
= program
->wave_size
== 32 ? 7 : 3;
1222 } else if (program
->chip_class
>= GFX8
) {
1223 program
->physical_sgprs
= 800;
1224 program
->sgpr_alloc_granule
= 15;
1225 if (args
->options
->family
== CHIP_TONGA
|| args
->options
->family
== CHIP_ICELAND
)
1226 program
->sgpr_limit
= 94; /* workaround hardware bug */
1228 program
->sgpr_limit
= 102;
1230 program
->physical_sgprs
= 512;
1231 program
->sgpr_alloc_granule
= 7;
1232 program
->sgpr_limit
= 104;
1235 calc_min_waves(program
);
1236 program
->vgpr_limit
= get_addr_vgpr_from_waves(program
, program
->min_waves
);
1237 program
->sgpr_limit
= get_addr_sgpr_from_waves(program
, program
->min_waves
);
1239 isel_context ctx
= {};
1240 ctx
.program
= program
;
1242 ctx
.options
= args
->options
;
1243 ctx
.stage
= program
->stage
;
1245 get_io_masks(&ctx
, shader_count
, shaders
);
1247 unsigned scratch_size
= 0;
1248 if (program
->stage
== gs_copy_vs
) {
1249 assert(shader_count
== 1);
1250 setup_vs_output_info(&ctx
, shaders
[0], false, true, &args
->shader_info
->vs
.outinfo
);
1252 for (unsigned i
= 0; i
< shader_count
; i
++) {
1253 nir_shader
*nir
= shaders
[i
];
1254 setup_nir(&ctx
, nir
);
1257 for (unsigned i
= 0; i
< shader_count
; i
++)
1258 scratch_size
= std::max(scratch_size
, shaders
[i
]->scratch_size
);
1261 ctx
.program
->config
->scratch_bytes_per_wave
= align(scratch_size
* ctx
.program
->wave_size
, 1024);
1263 ctx
.block
= ctx
.program
->create_and_insert_block();
1264 ctx
.block
->loop_nest_depth
= 0;
1265 ctx
.block
->kind
= block_kind_top_level
;