2 * Copyright © 2018 Valve Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include <unordered_map>
29 #include "vulkan/radv_shader.h"
30 #include "vulkan/radv_descriptor_set.h"
32 #include "ac_exp_param.h"
34 #include "util/u_math.h"
36 #define MAX_INLINE_PUSH_CONSTS 8
66 struct vs_output_state
{
67 uint8_t mask
[VARYING_SLOT_VAR31
+ 1];
68 Temp outputs
[VARYING_SLOT_VAR31
+ 1][4];
72 struct radv_nir_compiler_options
*options
;
75 uint32_t constant_data_offset
;
78 std::unique_ptr
<Temp
[]> allocated
;
79 std::unordered_map
<unsigned, std::array
<Temp
,4>> allocated_vec
;
80 Stage stage
; /* Stage */
83 uint16_t loop_nest_depth
= 0;
87 bool has_divergent_continue
= false;
88 bool has_divergent_branch
= false;
91 bool is_divergent
= false;
93 bool exec_potentially_empty
= false;
97 bool scratch_enabled
= false;
98 Temp private_segment_buffer
= Temp(0, s2
); /* also the part of the scratch descriptor on compute */
99 Temp scratch_offset
= Temp(0, s1
);
101 /* inputs common for merged stages */
102 Temp merged_wave_info
= Temp(0, s1
);
105 bool fs_vgpr_args
[fs_input::max_inputs
];
106 Temp fs_inputs
[fs_input::max_inputs
];
107 Temp prim_mask
= Temp(0, s1
);
108 Temp descriptor_sets
[MAX_SETS
];
109 Temp push_constants
= Temp(0, s1
);
110 Temp inline_push_consts
[MAX_INLINE_PUSH_CONSTS
];
111 unsigned num_inline_push_consts
= 0;
112 unsigned base_inline_push_consts
= 0;
115 Temp vertex_buffers
= Temp(0, s1
);
116 Temp base_vertex
= Temp(0, s1
);
117 Temp start_instance
= Temp(0, s1
);
118 Temp draw_id
= Temp(0, s1
);
119 Temp view_index
= Temp(0, s1
);
120 Temp es2gs_offset
= Temp(0, s1
);
121 Temp vertex_id
= Temp(0, v1
);
122 Temp rel_auto_id
= Temp(0, v1
);
123 Temp instance_id
= Temp(0, v1
);
124 Temp vs_prim_id
= Temp(0, v1
);
125 bool needs_instance_id
;
128 Temp num_workgroups
[3] = {Temp(0, s1
), Temp(0, s1
), Temp(0, s1
)};
129 Temp workgroup_ids
[3] = {Temp(0, s1
), Temp(0, s1
), Temp(0, s1
)};
130 Temp tg_size
= Temp(0, s1
);
131 Temp local_invocation_ids
[3] = {Temp(0, v1
), Temp(0, v1
), Temp(0, v1
)};
133 /* VS output information */
134 unsigned num_clip_distances
;
135 unsigned num_cull_distances
;
136 vs_output_state vs_output
;
139 Temp streamout_buffers
= Temp(0, s1
);
140 Temp streamout_write_idx
= Temp(0, s1
);
141 Temp streamout_config
= Temp(0, s1
);
142 Temp streamout_offset
[4] = {Temp(0, s1
), Temp(0, s1
), Temp(0, s1
), Temp(0, s1
)};
145 fs_input
get_interp_input(nir_intrinsic_op intrin
, enum glsl_interp_mode interp
)
148 case INTERP_MODE_SMOOTH
:
149 case INTERP_MODE_NONE
:
150 if (intrin
== nir_intrinsic_load_barycentric_pixel
||
151 intrin
== nir_intrinsic_load_barycentric_at_sample
||
152 intrin
== nir_intrinsic_load_barycentric_at_offset
)
153 return fs_input::persp_center_p1
;
154 else if (intrin
== nir_intrinsic_load_barycentric_centroid
)
155 return fs_input::persp_centroid_p1
;
156 else if (intrin
== nir_intrinsic_load_barycentric_sample
)
157 return fs_input::persp_sample_p1
;
159 case INTERP_MODE_NOPERSPECTIVE
:
160 if (intrin
== nir_intrinsic_load_barycentric_pixel
)
161 return fs_input::linear_center_p1
;
162 else if (intrin
== nir_intrinsic_load_barycentric_centroid
)
163 return fs_input::linear_centroid_p1
;
164 else if (intrin
== nir_intrinsic_load_barycentric_sample
)
165 return fs_input::linear_sample_p1
;
170 return fs_input::max_inputs
;
173 void init_context(isel_context
*ctx
, nir_shader
*shader
)
175 nir_function_impl
*impl
= nir_shader_get_entrypoint(shader
);
177 ctx
->shader
= shader
;
178 ctx
->divergent_vals
= nir_divergence_analysis(shader
, nir_divergence_view_index_uniform
);
180 std::unique_ptr
<Temp
[]> allocated
{new Temp
[impl
->ssa_alloc
]()};
181 memset(&ctx
->fs_vgpr_args
, false, sizeof(ctx
->fs_vgpr_args
));
186 nir_foreach_block(block
, impl
) {
187 nir_foreach_instr(instr
, block
) {
188 switch(instr
->type
) {
189 case nir_instr_type_alu
: {
190 nir_alu_instr
*alu_instr
= nir_instr_as_alu(instr
);
191 unsigned size
= alu_instr
->dest
.dest
.ssa
.num_components
;
192 if (alu_instr
->dest
.dest
.ssa
.bit_size
== 64)
194 RegType type
= RegType::sgpr
;
195 switch(alu_instr
->op
) {
217 case nir_op_fround_even
:
226 case nir_op_pack_half_2x16
:
227 case nir_op_unpack_half_2x16_split_x
:
228 case nir_op_unpack_half_2x16_split_y
:
231 case nir_op_fddx_fine
:
232 case nir_op_fddy_fine
:
233 case nir_op_fddx_coarse
:
234 case nir_op_fddy_coarse
:
235 case nir_op_fquantize2f16
:
237 case nir_op_frexp_sig
:
238 case nir_op_frexp_exp
:
239 case nir_op_cube_face_index
:
240 case nir_op_cube_face_coord
:
241 type
= RegType::vgpr
;
253 size
= alu_instr
->src
[0].src
.ssa
->bit_size
== 64 ? 2 : 1;
258 if (ctx
->divergent_vals
[alu_instr
->dest
.dest
.ssa
.index
]) {
261 for (unsigned i
= 0; i
< nir_op_infos
[alu_instr
->op
].num_inputs
; i
++) {
262 if (allocated
[alu_instr
->src
[i
].src
.ssa
->index
].type() == RegType::vgpr
)
273 type
= ctx
->divergent_vals
[alu_instr
->dest
.dest
.ssa
.index
] ? RegType::vgpr
: RegType::sgpr
;
276 if (alu_instr
->dest
.dest
.ssa
.bit_size
== 1) {
277 if (ctx
->divergent_vals
[alu_instr
->dest
.dest
.ssa
.index
])
279 else if (allocated
[alu_instr
->src
[1].src
.ssa
->index
].regClass() == s2
&&
280 allocated
[alu_instr
->src
[2].src
.ssa
->index
].regClass() == s2
)
285 if (ctx
->divergent_vals
[alu_instr
->dest
.dest
.ssa
.index
]) {
286 type
= RegType::vgpr
;
288 if (allocated
[alu_instr
->src
[1].src
.ssa
->index
].type() == RegType::vgpr
||
289 allocated
[alu_instr
->src
[2].src
.ssa
->index
].type() == RegType::vgpr
) {
290 type
= RegType::vgpr
;
293 if (alu_instr
->src
[1].src
.ssa
->num_components
== 1 && alu_instr
->src
[2].src
.ssa
->num_components
== 1) {
294 assert(allocated
[alu_instr
->src
[1].src
.ssa
->index
].size() == allocated
[alu_instr
->src
[2].src
.ssa
->index
].size());
295 size
= allocated
[alu_instr
->src
[1].src
.ssa
->index
].size();
300 if (alu_instr
->dest
.dest
.ssa
.bit_size
== 1) {
301 size
= allocated
[alu_instr
->src
[0].src
.ssa
->index
].size();
303 type
= ctx
->divergent_vals
[alu_instr
->dest
.dest
.ssa
.index
] ? RegType::vgpr
: RegType::sgpr
;
308 if (alu_instr
->dest
.dest
.ssa
.bit_size
== 1) {
309 size
= ctx
->divergent_vals
[alu_instr
->dest
.dest
.ssa
.index
] ? 2 : 1;
315 if (alu_instr
->dest
.dest
.ssa
.bit_size
== 1) {
316 if (ctx
->divergent_vals
[alu_instr
->dest
.dest
.ssa
.index
]) {
320 for (unsigned i
= 0; i
< nir_op_infos
[alu_instr
->op
].num_inputs
; i
++) {
321 if (allocated
[alu_instr
->src
[i
].src
.ssa
->index
].regClass() == s1
) {
328 for (unsigned i
= 0; i
< nir_op_infos
[alu_instr
->op
].num_inputs
; i
++) {
329 if (allocated
[alu_instr
->src
[i
].src
.ssa
->index
].type() == RegType::vgpr
)
330 type
= RegType::vgpr
;
335 allocated
[alu_instr
->dest
.dest
.ssa
.index
] = Temp(0, RegClass(type
, size
));
338 case nir_instr_type_load_const
: {
339 unsigned size
= nir_instr_as_load_const(instr
)->def
.num_components
;
340 if (nir_instr_as_load_const(instr
)->def
.bit_size
== 64)
342 allocated
[nir_instr_as_load_const(instr
)->def
.index
] = Temp(0, RegClass(RegType::sgpr
, size
));
345 case nir_instr_type_intrinsic
: {
346 nir_intrinsic_instr
*intrinsic
= nir_instr_as_intrinsic(instr
);
347 if (!nir_intrinsic_infos
[intrinsic
->intrinsic
].has_dest
)
349 unsigned size
= intrinsic
->dest
.ssa
.num_components
;
350 if (intrinsic
->dest
.ssa
.bit_size
== 64)
352 RegType type
= RegType::sgpr
;
353 switch(intrinsic
->intrinsic
) {
354 case nir_intrinsic_load_push_constant
:
355 case nir_intrinsic_load_work_group_id
:
356 case nir_intrinsic_load_num_work_groups
:
357 case nir_intrinsic_load_subgroup_id
:
358 case nir_intrinsic_load_num_subgroups
:
359 case nir_intrinsic_load_first_vertex
:
360 case nir_intrinsic_load_base_instance
:
361 case nir_intrinsic_get_buffer_size
:
362 case nir_intrinsic_vote_all
:
363 case nir_intrinsic_vote_any
:
364 case nir_intrinsic_read_first_invocation
:
365 case nir_intrinsic_read_invocation
:
366 case nir_intrinsic_first_invocation
:
367 case nir_intrinsic_vulkan_resource_index
:
368 type
= RegType::sgpr
;
370 case nir_intrinsic_ballot
:
371 type
= RegType::sgpr
;
374 case nir_intrinsic_load_sample_id
:
375 case nir_intrinsic_load_sample_mask_in
:
376 case nir_intrinsic_load_input
:
377 case nir_intrinsic_load_vertex_id
:
378 case nir_intrinsic_load_vertex_id_zero_base
:
379 case nir_intrinsic_load_barycentric_sample
:
380 case nir_intrinsic_load_barycentric_pixel
:
381 case nir_intrinsic_load_barycentric_centroid
:
382 case nir_intrinsic_load_barycentric_at_sample
:
383 case nir_intrinsic_load_barycentric_at_offset
:
384 case nir_intrinsic_load_interpolated_input
:
385 case nir_intrinsic_load_frag_coord
:
386 case nir_intrinsic_load_sample_pos
:
387 case nir_intrinsic_load_layer_id
:
388 case nir_intrinsic_load_local_invocation_id
:
389 case nir_intrinsic_load_local_invocation_index
:
390 case nir_intrinsic_load_subgroup_invocation
:
391 case nir_intrinsic_write_invocation_amd
:
392 case nir_intrinsic_mbcnt_amd
:
393 case nir_intrinsic_load_instance_id
:
394 case nir_intrinsic_ssbo_atomic_add
:
395 case nir_intrinsic_ssbo_atomic_imin
:
396 case nir_intrinsic_ssbo_atomic_umin
:
397 case nir_intrinsic_ssbo_atomic_imax
:
398 case nir_intrinsic_ssbo_atomic_umax
:
399 case nir_intrinsic_ssbo_atomic_and
:
400 case nir_intrinsic_ssbo_atomic_or
:
401 case nir_intrinsic_ssbo_atomic_xor
:
402 case nir_intrinsic_ssbo_atomic_exchange
:
403 case nir_intrinsic_ssbo_atomic_comp_swap
:
404 case nir_intrinsic_image_deref_atomic_add
:
405 case nir_intrinsic_image_deref_atomic_umin
:
406 case nir_intrinsic_image_deref_atomic_imin
:
407 case nir_intrinsic_image_deref_atomic_umax
:
408 case nir_intrinsic_image_deref_atomic_imax
:
409 case nir_intrinsic_image_deref_atomic_and
:
410 case nir_intrinsic_image_deref_atomic_or
:
411 case nir_intrinsic_image_deref_atomic_xor
:
412 case nir_intrinsic_image_deref_atomic_exchange
:
413 case nir_intrinsic_image_deref_atomic_comp_swap
:
414 case nir_intrinsic_image_deref_size
:
415 case nir_intrinsic_shared_atomic_add
:
416 case nir_intrinsic_shared_atomic_imin
:
417 case nir_intrinsic_shared_atomic_umin
:
418 case nir_intrinsic_shared_atomic_imax
:
419 case nir_intrinsic_shared_atomic_umax
:
420 case nir_intrinsic_shared_atomic_and
:
421 case nir_intrinsic_shared_atomic_or
:
422 case nir_intrinsic_shared_atomic_xor
:
423 case nir_intrinsic_shared_atomic_exchange
:
424 case nir_intrinsic_shared_atomic_comp_swap
:
425 case nir_intrinsic_load_scratch
:
426 type
= RegType::vgpr
;
428 case nir_intrinsic_shuffle
:
429 case nir_intrinsic_quad_broadcast
:
430 case nir_intrinsic_quad_swap_horizontal
:
431 case nir_intrinsic_quad_swap_vertical
:
432 case nir_intrinsic_quad_swap_diagonal
:
433 case nir_intrinsic_quad_swizzle_amd
:
434 case nir_intrinsic_masked_swizzle_amd
:
435 case nir_intrinsic_inclusive_scan
:
436 case nir_intrinsic_exclusive_scan
:
437 if (!ctx
->divergent_vals
[intrinsic
->dest
.ssa
.index
]) {
438 type
= RegType::sgpr
;
439 } else if (intrinsic
->src
[0].ssa
->bit_size
== 1) {
440 type
= RegType::sgpr
;
443 type
= RegType::vgpr
;
446 case nir_intrinsic_load_view_index
:
447 type
= ctx
->stage
== fragment_fs
? RegType::vgpr
: RegType::sgpr
;
449 case nir_intrinsic_load_front_face
:
450 case nir_intrinsic_load_helper_invocation
:
451 case nir_intrinsic_is_helper_invocation
:
452 type
= RegType::sgpr
;
455 case nir_intrinsic_reduce
:
456 if (nir_intrinsic_cluster_size(intrinsic
) == 0 ||
457 !ctx
->divergent_vals
[intrinsic
->dest
.ssa
.index
]) {
458 type
= RegType::sgpr
;
459 } else if (intrinsic
->src
[0].ssa
->bit_size
== 1) {
460 type
= RegType::sgpr
;
463 type
= RegType::vgpr
;
466 case nir_intrinsic_load_ubo
:
467 case nir_intrinsic_load_ssbo
:
468 case nir_intrinsic_load_global
:
469 type
= ctx
->divergent_vals
[intrinsic
->dest
.ssa
.index
] ? RegType::vgpr
: RegType::sgpr
;
471 /* due to copy propagation, the swizzled imov is removed if num dest components == 1 */
472 case nir_intrinsic_load_shared
:
473 if (ctx
->divergent_vals
[intrinsic
->dest
.ssa
.index
])
474 type
= RegType::vgpr
;
476 type
= RegType::sgpr
;
479 for (unsigned i
= 0; i
< nir_intrinsic_infos
[intrinsic
->intrinsic
].num_srcs
; i
++) {
480 if (allocated
[intrinsic
->src
[i
].ssa
->index
].type() == RegType::vgpr
)
481 type
= RegType::vgpr
;
485 allocated
[intrinsic
->dest
.ssa
.index
] = Temp(0, RegClass(type
, size
));
487 switch(intrinsic
->intrinsic
) {
488 case nir_intrinsic_load_barycentric_sample
:
489 case nir_intrinsic_load_barycentric_pixel
:
490 case nir_intrinsic_load_barycentric_centroid
:
491 case nir_intrinsic_load_barycentric_at_sample
:
492 case nir_intrinsic_load_barycentric_at_offset
: {
493 glsl_interp_mode mode
= (glsl_interp_mode
)nir_intrinsic_interp_mode(intrinsic
);
494 ctx
->fs_vgpr_args
[get_interp_input(intrinsic
->intrinsic
, mode
)] = true;
497 case nir_intrinsic_load_front_face
:
498 ctx
->fs_vgpr_args
[fs_input::front_face
] = true;
500 case nir_intrinsic_load_frag_coord
:
501 case nir_intrinsic_load_sample_pos
: {
502 uint8_t mask
= nir_ssa_def_components_read(&intrinsic
->dest
.ssa
);
503 for (unsigned i
= 0; i
< 4; i
++) {
505 ctx
->fs_vgpr_args
[fs_input::frag_pos_0
+ i
] = true;
510 case nir_intrinsic_load_sample_id
:
511 ctx
->fs_vgpr_args
[fs_input::ancillary
] = true;
513 case nir_intrinsic_load_sample_mask_in
:
514 ctx
->fs_vgpr_args
[fs_input::ancillary
] = true;
515 ctx
->fs_vgpr_args
[fs_input::sample_coverage
] = true;
522 case nir_instr_type_tex
: {
523 nir_tex_instr
* tex
= nir_instr_as_tex(instr
);
524 unsigned size
= tex
->dest
.ssa
.num_components
;
526 if (tex
->dest
.ssa
.bit_size
== 64)
528 if (tex
->op
== nir_texop_texture_samples
)
529 assert(!ctx
->divergent_vals
[tex
->dest
.ssa
.index
]);
530 if (ctx
->divergent_vals
[tex
->dest
.ssa
.index
])
531 allocated
[tex
->dest
.ssa
.index
] = Temp(0, RegClass(RegType::vgpr
, size
));
533 allocated
[tex
->dest
.ssa
.index
] = Temp(0, RegClass(RegType::sgpr
, size
));
536 case nir_instr_type_parallel_copy
: {
537 nir_foreach_parallel_copy_entry(entry
, nir_instr_as_parallel_copy(instr
)) {
538 allocated
[entry
->dest
.ssa
.index
] = allocated
[entry
->src
.ssa
->index
];
542 case nir_instr_type_ssa_undef
: {
543 unsigned size
= nir_instr_as_ssa_undef(instr
)->def
.num_components
;
544 if (nir_instr_as_ssa_undef(instr
)->def
.bit_size
== 64)
546 allocated
[nir_instr_as_ssa_undef(instr
)->def
.index
] = Temp(0, RegClass(RegType::sgpr
, size
));
549 case nir_instr_type_phi
: {
550 nir_phi_instr
* phi
= nir_instr_as_phi(instr
);
552 unsigned size
= phi
->dest
.ssa
.num_components
;
554 if (phi
->dest
.ssa
.bit_size
== 1) {
555 assert(size
== 1 && "multiple components not yet supported on boolean phis.");
556 type
= RegType::sgpr
;
557 size
*= ctx
->divergent_vals
[phi
->dest
.ssa
.index
] ? 2 : 1;
558 allocated
[phi
->dest
.ssa
.index
] = Temp(0, RegClass(type
, size
));
562 if (ctx
->divergent_vals
[phi
->dest
.ssa
.index
]) {
563 type
= RegType::vgpr
;
565 type
= RegType::sgpr
;
566 nir_foreach_phi_src (src
, phi
) {
567 if (allocated
[src
->src
.ssa
->index
].type() == RegType::vgpr
)
568 type
= RegType::vgpr
;
569 if (allocated
[src
->src
.ssa
->index
].type() == RegType::none
)
574 size
*= phi
->dest
.ssa
.bit_size
== 64 ? 2 : 1;
575 RegClass rc
= RegClass(type
, size
);
576 if (rc
!= allocated
[phi
->dest
.ssa
.index
].regClass()) {
579 nir_foreach_phi_src(src
, phi
)
580 assert(allocated
[src
->src
.ssa
->index
].size() == rc
.size());
582 allocated
[phi
->dest
.ssa
.index
] = Temp(0, rc
);
592 for (unsigned i
= 0; i
< impl
->ssa_alloc
; i
++)
593 allocated
[i
] = Temp(ctx
->program
->allocateId(), allocated
[i
].regClass());
595 ctx
->allocated
.reset(allocated
.release());
598 struct user_sgpr_info
{
600 uint8_t remaining_sgprs
;
601 uint8_t user_sgpr_idx
;
602 bool need_ring_offsets
;
603 bool indirect_all_descriptor_sets
;
606 static void allocate_inline_push_consts(isel_context
*ctx
,
607 user_sgpr_info
& user_sgpr_info
)
609 uint8_t remaining_sgprs
= user_sgpr_info
.remaining_sgprs
;
611 /* Only supported if shaders use push constants. */
612 if (ctx
->program
->info
->min_push_constant_used
== UINT8_MAX
)
615 /* Only supported if shaders don't have indirect push constants. */
616 if (ctx
->program
->info
->has_indirect_push_constants
)
619 /* Only supported for 32-bit push constants. */
620 //TODO: it's possible that some day, the load/store vectorization could make this inaccurate
621 if (!ctx
->program
->info
->has_only_32bit_push_constants
)
624 uint8_t num_push_consts
=
625 (ctx
->program
->info
->max_push_constant_used
-
626 ctx
->program
->info
->min_push_constant_used
) / 4;
628 /* Check if the number of user SGPRs is large enough. */
629 if (num_push_consts
< remaining_sgprs
) {
630 ctx
->program
->info
->num_inline_push_consts
= num_push_consts
;
632 ctx
->program
->info
->num_inline_push_consts
= remaining_sgprs
;
635 /* Clamp to the maximum number of allowed inlined push constants. */
636 if (ctx
->program
->info
->num_inline_push_consts
> MAX_INLINE_PUSH_CONSTS
)
637 ctx
->program
->info
->num_inline_push_consts
= MAX_INLINE_PUSH_CONSTS
;
639 if (ctx
->program
->info
->num_inline_push_consts
== num_push_consts
&&
640 !ctx
->program
->info
->loads_dynamic_offsets
) {
641 /* Disable the default push constants path if all constants are
642 * inlined and if shaders don't use dynamic descriptors.
644 ctx
->program
->info
->loads_push_constants
= false;
645 user_sgpr_info
.num_sgpr
--;
646 user_sgpr_info
.remaining_sgprs
++;
649 ctx
->program
->info
->base_inline_push_consts
=
650 ctx
->program
->info
->min_push_constant_used
/ 4;
652 user_sgpr_info
.num_sgpr
+= ctx
->program
->info
->num_inline_push_consts
;
653 user_sgpr_info
.remaining_sgprs
-= ctx
->program
->info
->num_inline_push_consts
;
656 static void allocate_user_sgprs(isel_context
*ctx
,
657 bool needs_view_index
, user_sgpr_info
& user_sgpr_info
)
659 memset(&user_sgpr_info
, 0, sizeof(struct user_sgpr_info
));
660 uint32_t user_sgpr_count
= 0;
662 /* until we sort out scratch/global buffers always assign ring offsets for gs/vs/es */
663 if (ctx
->stage
!= fragment_fs
&&
664 ctx
->stage
!= compute_cs
665 /*|| ctx->is_gs_copy_shader */)
666 user_sgpr_info
.need_ring_offsets
= true;
668 if (ctx
->stage
== fragment_fs
&&
669 ctx
->program
->info
->ps
.needs_sample_positions
)
670 user_sgpr_info
.need_ring_offsets
= true;
672 /* 2 user sgprs will nearly always be allocated for scratch/rings */
673 if (ctx
->options
->supports_spill
|| user_sgpr_info
.need_ring_offsets
|| ctx
->scratch_enabled
)
674 user_sgpr_count
+= 2;
676 switch (ctx
->stage
) {
678 /* if (!ctx->is_gs_copy_shader) */ {
679 if (ctx
->program
->info
->vs
.has_vertex_buffers
)
681 user_sgpr_count
+= ctx
->program
->info
->vs
.needs_draw_id
? 3 : 2;
685 //user_sgpr_count += ctx->program->info->ps.needs_sample_positions;
688 if (ctx
->program
->info
->cs
.uses_grid_size
)
689 user_sgpr_count
+= 3;
692 unreachable("Shader stage not implemented");
695 if (needs_view_index
)
698 if (ctx
->program
->info
->loads_push_constants
)
699 user_sgpr_count
+= 1; /* we use 32bit pointers */
701 if (ctx
->program
->info
->so
.num_outputs
)
702 user_sgpr_count
+= 1; /* we use 32bit pointers */
704 uint32_t available_sgprs
= ctx
->options
->chip_class
>= GFX9
&& !(ctx
->stage
& hw_cs
) ? 32 : 16;
705 uint32_t remaining_sgprs
= available_sgprs
- user_sgpr_count
;
706 uint32_t num_desc_set
= util_bitcount(ctx
->program
->info
->desc_set_used_mask
);
708 if (available_sgprs
< user_sgpr_count
+ num_desc_set
) {
709 user_sgpr_info
.indirect_all_descriptor_sets
= true;
710 user_sgpr_info
.num_sgpr
= user_sgpr_count
+ 1;
711 user_sgpr_info
.remaining_sgprs
= remaining_sgprs
- 1;
713 user_sgpr_info
.num_sgpr
= user_sgpr_count
+ num_desc_set
;
714 user_sgpr_info
.remaining_sgprs
= remaining_sgprs
- num_desc_set
;
717 allocate_inline_push_consts(ctx
, user_sgpr_info
);
722 RegClass types
[MAX_ARGS
];
723 Temp
*assign
[MAX_ARGS
];
724 PhysReg reg
[MAX_ARGS
];
725 unsigned array_params_mask
;
728 uint8_t num_sgprs_used
;
729 uint8_t num_vgprs_used
;
733 add_arg(arg_info
*info
, RegClass rc
, Temp
*param_ptr
, unsigned reg
)
735 assert(info
->count
< MAX_ARGS
);
737 info
->assign
[info
->count
] = param_ptr
;
738 info
->types
[info
->count
] = rc
;
740 if (rc
.type() == RegType::sgpr
) {
741 info
->num_sgprs_used
+= rc
.size();
743 info
->reg
[info
->count
] = PhysReg
{reg
};
745 assert(rc
.type() == RegType::vgpr
);
746 info
->num_vgprs_used
+= rc
.size();
747 info
->reg
[info
->count
] = PhysReg
{reg
+ 256};
753 set_loc(struct radv_userdata_info
*ud_info
, uint8_t *sgpr_idx
, uint8_t num_sgprs
)
755 ud_info
->sgpr_idx
= *sgpr_idx
;
756 ud_info
->num_sgprs
= num_sgprs
;
757 *sgpr_idx
+= num_sgprs
;
761 set_loc_shader(isel_context
*ctx
, int idx
, uint8_t *sgpr_idx
,
764 struct radv_userdata_info
*ud_info
= &ctx
->program
->info
->user_sgprs_locs
.shader_data
[idx
];
767 set_loc(ud_info
, sgpr_idx
, num_sgprs
);
771 set_loc_shader_ptr(isel_context
*ctx
, int idx
, uint8_t *sgpr_idx
)
773 bool use_32bit_pointers
= idx
!= AC_UD_SCRATCH_RING_OFFSETS
;
775 set_loc_shader(ctx
, idx
, sgpr_idx
, use_32bit_pointers
? 1 : 2);
779 set_loc_desc(isel_context
*ctx
, int idx
, uint8_t *sgpr_idx
)
781 struct radv_userdata_locations
*locs
= &ctx
->program
->info
->user_sgprs_locs
;
782 struct radv_userdata_info
*ud_info
= &locs
->descriptor_sets
[idx
];
785 set_loc(ud_info
, sgpr_idx
, 1);
786 locs
->descriptor_sets_enabled
|= 1 << idx
;
790 declare_global_input_sgprs(isel_context
*ctx
,
791 /* bool has_previous_stage, gl_shader_stage previous_stage, */
792 user_sgpr_info
*user_sgpr_info
,
793 struct arg_info
*args
,
796 /* 1 for each descriptor set */
797 if (!user_sgpr_info
->indirect_all_descriptor_sets
) {
798 uint32_t mask
= ctx
->program
->info
->desc_set_used_mask
;
800 int i
= u_bit_scan(&mask
);
801 add_arg(args
, s1
, &desc_sets
[i
], user_sgpr_info
->user_sgpr_idx
);
802 set_loc_desc(ctx
, i
, &user_sgpr_info
->user_sgpr_idx
);
804 /* NIR->LLVM might have set this to true if RADV_DEBUG=compiletime */
805 ctx
->program
->info
->need_indirect_descriptor_sets
= false;
807 add_arg(args
, s1
, desc_sets
, user_sgpr_info
->user_sgpr_idx
);
808 set_loc_shader_ptr(ctx
, AC_UD_INDIRECT_DESCRIPTOR_SETS
, &user_sgpr_info
->user_sgpr_idx
);
809 ctx
->program
->info
->need_indirect_descriptor_sets
= true;
812 if (ctx
->program
->info
->loads_push_constants
) {
813 /* 1 for push constants and dynamic descriptors */
814 add_arg(args
, s1
, &ctx
->push_constants
, user_sgpr_info
->user_sgpr_idx
);
815 set_loc_shader_ptr(ctx
, AC_UD_PUSH_CONSTANTS
, &user_sgpr_info
->user_sgpr_idx
);
818 if (ctx
->program
->info
->num_inline_push_consts
) {
819 unsigned count
= ctx
->program
->info
->num_inline_push_consts
;
820 for (unsigned i
= 0; i
< count
; i
++)
821 add_arg(args
, s1
, &ctx
->inline_push_consts
[i
], user_sgpr_info
->user_sgpr_idx
+ i
);
822 set_loc_shader(ctx
, AC_UD_INLINE_PUSH_CONSTANTS
, &user_sgpr_info
->user_sgpr_idx
, count
);
824 ctx
->num_inline_push_consts
= ctx
->program
->info
->num_inline_push_consts
;
825 ctx
->base_inline_push_consts
= ctx
->program
->info
->base_inline_push_consts
;
828 if (ctx
->program
->info
->so
.num_outputs
) {
829 add_arg(args
, s1
, &ctx
->streamout_buffers
, user_sgpr_info
->user_sgpr_idx
);
830 set_loc_shader_ptr(ctx
, AC_UD_STREAMOUT_BUFFERS
, &user_sgpr_info
->user_sgpr_idx
);
835 declare_vs_input_vgprs(isel_context
*ctx
, struct arg_info
*args
)
837 unsigned vgpr_idx
= 0;
838 add_arg(args
, v1
, &ctx
->vertex_id
, vgpr_idx
++);
839 /* if (!ctx->is_gs_copy_shader) */ {
840 if (ctx
->options
->key
.vs
.out
.as_ls
) {
841 add_arg(args
, v1
, &ctx
->rel_auto_id
, vgpr_idx
++);
842 add_arg(args
, v1
, &ctx
->instance_id
, vgpr_idx
++);
844 add_arg(args
, v1
, &ctx
->instance_id
, vgpr_idx
++);
845 add_arg(args
, v1
, &ctx
->vs_prim_id
, vgpr_idx
++);
847 add_arg(args
, v1
, NULL
, vgpr_idx
); /* unused */
852 declare_streamout_sgprs(isel_context
*ctx
, struct arg_info
*args
, unsigned *idx
)
854 /* Streamout SGPRs. */
855 if (ctx
->program
->info
->so
.num_outputs
) {
856 assert(ctx
->stage
& hw_vs
);
858 if (ctx
->stage
!= tess_eval_vs
) {
859 add_arg(args
, s1
, &ctx
->streamout_config
, (*idx
)++);
861 args
->assign
[args
->count
- 1] = &ctx
->streamout_config
;
862 args
->types
[args
->count
- 1] = s1
;
865 add_arg(args
, s1
, &ctx
->streamout_write_idx
, (*idx
)++);
868 /* A streamout buffer offset is loaded if the stride is non-zero. */
869 for (unsigned i
= 0; i
< 4; i
++) {
870 if (!ctx
->program
->info
->so
.strides
[i
])
873 add_arg(args
, s1
, &ctx
->streamout_offset
[i
], (*idx
)++);
877 static bool needs_view_index_sgpr(isel_context
*ctx
)
879 switch (ctx
->stage
) {
881 return ctx
->program
->info
->needs_multiview_view_index
|| ctx
->options
->key
.has_multiview_view_index
;
883 return ctx
->program
->info
->needs_multiview_view_index
&& ctx
->options
->key
.has_multiview_view_index
;
885 case vertex_tess_control_ls
:
886 case vertex_geometry_es
:
887 case tess_control_hs
:
889 case tess_eval_geometry_es
:
891 return ctx
->program
->info
->needs_multiview_view_index
;
898 add_fs_arg(isel_context
*ctx
, arg_info
*args
, unsigned &vgpr_idx
, fs_input input
, unsigned value
, bool enable_next
= false, RegClass rc
= v1
)
900 if (!ctx
->fs_vgpr_args
[input
])
903 add_arg(args
, rc
, &ctx
->fs_inputs
[input
], vgpr_idx
);
904 vgpr_idx
+= rc
.size();
907 add_arg(args
, rc
, &ctx
->fs_inputs
[input
+ 1], vgpr_idx
);
908 vgpr_idx
+= rc
.size();
911 ctx
->program
->config
->spi_ps_input_addr
|= value
;
912 ctx
->program
->config
->spi_ps_input_ena
|= value
;
916 void add_startpgm(struct isel_context
*ctx
)
918 user_sgpr_info user_sgpr_info
;
919 bool needs_view_index
= needs_view_index_sgpr(ctx
);
920 allocate_user_sgprs(ctx
, needs_view_index
, user_sgpr_info
);
923 /* this needs to be in sgprs 0 and 1 */
924 if (ctx
->options
->supports_spill
|| user_sgpr_info
.need_ring_offsets
|| ctx
->scratch_enabled
) {
925 add_arg(&args
, s2
, &ctx
->private_segment_buffer
, 0);
926 set_loc_shader_ptr(ctx
, AC_UD_SCRATCH_RING_OFFSETS
, &user_sgpr_info
.user_sgpr_idx
);
929 unsigned vgpr_idx
= 0;
930 switch (ctx
->stage
) {
932 declare_global_input_sgprs(ctx
, &user_sgpr_info
, &args
, ctx
->descriptor_sets
);
933 if (ctx
->program
->info
->vs
.has_vertex_buffers
) {
934 add_arg(&args
, s1
, &ctx
->vertex_buffers
, user_sgpr_info
.user_sgpr_idx
);
935 set_loc_shader_ptr(ctx
, AC_UD_VS_VERTEX_BUFFERS
, &user_sgpr_info
.user_sgpr_idx
);
937 add_arg(&args
, s1
, &ctx
->base_vertex
, user_sgpr_info
.user_sgpr_idx
);
938 add_arg(&args
, s1
, &ctx
->start_instance
, user_sgpr_info
.user_sgpr_idx
+ 1);
939 if (ctx
->program
->info
->vs
.needs_draw_id
) {
940 add_arg(&args
, s1
, &ctx
->draw_id
, user_sgpr_info
.user_sgpr_idx
+ 2);
941 set_loc_shader(ctx
, AC_UD_VS_BASE_VERTEX_START_INSTANCE
, &user_sgpr_info
.user_sgpr_idx
, 3);
943 set_loc_shader(ctx
, AC_UD_VS_BASE_VERTEX_START_INSTANCE
, &user_sgpr_info
.user_sgpr_idx
, 2);
945 if (needs_view_index
) {
946 add_arg(&args
, s1
, &ctx
->view_index
, user_sgpr_info
.user_sgpr_idx
);
947 set_loc_shader(ctx
, AC_UD_VIEW_INDEX
, &user_sgpr_info
.user_sgpr_idx
, 1);
950 assert(user_sgpr_info
.user_sgpr_idx
== user_sgpr_info
.num_sgpr
);
951 unsigned idx
= user_sgpr_info
.user_sgpr_idx
;
952 if (ctx
->options
->key
.vs
.out
.as_es
)
953 add_arg(&args
, s1
, &ctx
->es2gs_offset
, idx
++);
955 declare_streamout_sgprs(ctx
, &args
, &idx
);
957 if (ctx
->scratch_enabled
)
958 add_arg(&args
, s1
, &ctx
->scratch_offset
, idx
++);
960 declare_vs_input_vgprs(ctx
, &args
);
964 declare_global_input_sgprs(ctx
, &user_sgpr_info
, &args
, ctx
->descriptor_sets
);
966 assert(user_sgpr_info
.user_sgpr_idx
== user_sgpr_info
.num_sgpr
);
967 add_arg(&args
, s1
, &ctx
->prim_mask
, user_sgpr_info
.user_sgpr_idx
);
969 if (ctx
->scratch_enabled
)
970 add_arg(&args
, s1
, &ctx
->scratch_offset
, user_sgpr_info
.user_sgpr_idx
+ 1);
972 ctx
->program
->config
->spi_ps_input_addr
= 0;
973 ctx
->program
->config
->spi_ps_input_ena
= 0;
975 bool has_interp_mode
= false;
977 has_interp_mode
|= add_fs_arg(ctx
, &args
, vgpr_idx
, fs_input::persp_sample_p1
, S_0286CC_PERSP_SAMPLE_ENA(1), true);
978 has_interp_mode
|= add_fs_arg(ctx
, &args
, vgpr_idx
, fs_input::persp_center_p1
, S_0286CC_PERSP_CENTER_ENA(1), true);
979 has_interp_mode
|= add_fs_arg(ctx
, &args
, vgpr_idx
, fs_input::persp_centroid_p1
, S_0286CC_PERSP_CENTROID_ENA(1), true);
980 has_interp_mode
|= add_fs_arg(ctx
, &args
, vgpr_idx
, fs_input::persp_pull_model
, S_0286CC_PERSP_PULL_MODEL_ENA(1), false, v3
);
982 if (!has_interp_mode
&& ctx
->fs_vgpr_args
[fs_input::frag_pos_3
]) {
983 /* If POS_W_FLOAT (11) is enabled, at least one of PERSP_* must be enabled too */
984 ctx
->fs_vgpr_args
[fs_input::persp_center_p1
] = true;
985 has_interp_mode
= add_fs_arg(ctx
, &args
, vgpr_idx
, fs_input::persp_center_p1
, S_0286CC_PERSP_CENTER_ENA(1), true);
988 has_interp_mode
|= add_fs_arg(ctx
, &args
, vgpr_idx
, fs_input::linear_sample_p1
, S_0286CC_LINEAR_SAMPLE_ENA(1), true);
989 has_interp_mode
|= add_fs_arg(ctx
, &args
, vgpr_idx
, fs_input::linear_center_p1
, S_0286CC_LINEAR_CENTER_ENA(1), true);
990 has_interp_mode
|= add_fs_arg(ctx
, &args
, vgpr_idx
, fs_input::linear_centroid_p1
, S_0286CC_LINEAR_CENTROID_ENA(1), true);
991 has_interp_mode
|= add_fs_arg(ctx
, &args
, vgpr_idx
, fs_input::line_stipple
, S_0286CC_LINE_STIPPLE_TEX_ENA(1));
993 if (!has_interp_mode
) {
994 /* At least one of PERSP_* (0xF) or LINEAR_* (0x70) must be enabled */
995 ctx
->fs_vgpr_args
[fs_input::persp_center_p1
] = true;
996 has_interp_mode
= add_fs_arg(ctx
, &args
, vgpr_idx
, fs_input::persp_center_p1
, S_0286CC_PERSP_CENTER_ENA(1), true);
999 add_fs_arg(ctx
, &args
, vgpr_idx
, fs_input::frag_pos_0
, S_0286CC_POS_X_FLOAT_ENA(1));
1000 add_fs_arg(ctx
, &args
, vgpr_idx
, fs_input::frag_pos_1
, S_0286CC_POS_Y_FLOAT_ENA(1));
1001 add_fs_arg(ctx
, &args
, vgpr_idx
, fs_input::frag_pos_2
, S_0286CC_POS_Z_FLOAT_ENA(1));
1002 add_fs_arg(ctx
, &args
, vgpr_idx
, fs_input::frag_pos_3
, S_0286CC_POS_W_FLOAT_ENA(1));
1004 add_fs_arg(ctx
, &args
, vgpr_idx
, fs_input::front_face
, S_0286CC_FRONT_FACE_ENA(1));
1005 add_fs_arg(ctx
, &args
, vgpr_idx
, fs_input::ancillary
, S_0286CC_ANCILLARY_ENA(1));
1006 add_fs_arg(ctx
, &args
, vgpr_idx
, fs_input::sample_coverage
, S_0286CC_SAMPLE_COVERAGE_ENA(1));
1007 add_fs_arg(ctx
, &args
, vgpr_idx
, fs_input::fixed_pt
, S_0286CC_POS_FIXED_PT_ENA(1));
1009 ASSERTED
bool unset_interp_mode
= !(ctx
->program
->config
->spi_ps_input_addr
& 0x7F) ||
1010 (G_0286CC_POS_W_FLOAT_ENA(ctx
->program
->config
->spi_ps_input_addr
)
1011 && !(ctx
->program
->config
->spi_ps_input_addr
& 0xF));
1013 assert(has_interp_mode
);
1014 assert(!unset_interp_mode
);
1018 declare_global_input_sgprs(ctx
, &user_sgpr_info
, &args
, ctx
->descriptor_sets
);
1020 if (ctx
->program
->info
->cs
.uses_grid_size
) {
1021 add_arg(&args
, s1
, &ctx
->num_workgroups
[0], user_sgpr_info
.user_sgpr_idx
);
1022 add_arg(&args
, s1
, &ctx
->num_workgroups
[1], user_sgpr_info
.user_sgpr_idx
+ 1);
1023 add_arg(&args
, s1
, &ctx
->num_workgroups
[2], user_sgpr_info
.user_sgpr_idx
+ 2);
1024 set_loc_shader(ctx
, AC_UD_CS_GRID_SIZE
, &user_sgpr_info
.user_sgpr_idx
, 3);
1026 assert(user_sgpr_info
.user_sgpr_idx
== user_sgpr_info
.num_sgpr
);
1027 unsigned idx
= user_sgpr_info
.user_sgpr_idx
;
1028 for (unsigned i
= 0; i
< 3; i
++) {
1029 if (ctx
->program
->info
->cs
.uses_block_id
[i
])
1030 add_arg(&args
, s1
, &ctx
->workgroup_ids
[i
], idx
++);
1033 if (ctx
->program
->info
->cs
.uses_local_invocation_idx
)
1034 add_arg(&args
, s1
, &ctx
->tg_size
, idx
++);
1035 if (ctx
->scratch_enabled
)
1036 add_arg(&args
, s1
, &ctx
->scratch_offset
, idx
++);
1038 add_arg(&args
, v1
, &ctx
->local_invocation_ids
[0], vgpr_idx
++);
1039 add_arg(&args
, v1
, &ctx
->local_invocation_ids
[1], vgpr_idx
++);
1040 add_arg(&args
, v1
, &ctx
->local_invocation_ids
[2], vgpr_idx
++);
1044 unreachable("Shader stage not implemented");
1047 ctx
->program
->info
->num_input_vgprs
= 0;
1048 ctx
->program
->info
->num_input_sgprs
= args
.num_sgprs_used
;
1049 ctx
->program
->info
->num_user_sgprs
= user_sgpr_info
.num_sgpr
;
1050 ctx
->program
->info
->num_input_vgprs
= args
.num_vgprs_used
;
1052 aco_ptr
<Pseudo_instruction
> startpgm
{create_instruction
<Pseudo_instruction
>(aco_opcode::p_startpgm
, Format::PSEUDO
, 0, args
.count
+ 1)};
1053 for (unsigned i
= 0; i
< args
.count
; i
++) {
1054 if (args
.assign
[i
]) {
1055 *args
.assign
[i
] = Temp
{ctx
->program
->allocateId(), args
.types
[i
]};
1056 startpgm
->definitions
[i
] = Definition(*args
.assign
[i
]);
1057 startpgm
->definitions
[i
].setFixed(args
.reg
[i
]);
1060 startpgm
->definitions
[args
.count
] = Definition
{ctx
->program
->allocateId(), exec
, s2
};
1061 ctx
->block
->instructions
.push_back(std::move(startpgm
));
1065 type_size(const struct glsl_type
*type
, bool bindless
)
1067 // TODO: don't we need type->std430_base_alignment() here?
1068 return glsl_count_attribute_slots(type
, false);
1072 shared_var_info(const struct glsl_type
*type
, unsigned *size
, unsigned *align
)
1074 assert(glsl_type_is_vector_or_scalar(type
));
1076 uint32_t comp_size
= glsl_type_is_boolean(type
)
1077 ? 4 : glsl_get_bit_size(type
) / 8;
1078 unsigned length
= glsl_get_vector_elements(type
);
1079 *size
= comp_size
* length
,
1084 get_align(nir_variable_mode mode
, bool is_store
, unsigned bit_size
, unsigned num_components
)
1086 /* TODO: ACO doesn't have good support for non-32-bit reads/writes yet */
1091 case nir_var_mem_ubo
:
1092 case nir_var_mem_ssbo
:
1093 //case nir_var_mem_push_const: enable with 1240!
1094 case nir_var_mem_shared
:
1095 /* TODO: what are the alignment requirements for LDS? */
1096 return num_components
<= 4 ? 4 : -1;
1103 setup_vs_variables(isel_context
*ctx
, nir_shader
*nir
)
1105 nir_foreach_variable(variable
, &nir
->inputs
)
1107 variable
->data
.driver_location
= variable
->data
.location
* 4;
1109 nir_foreach_variable(variable
, &nir
->outputs
)
1111 variable
->data
.driver_location
= variable
->data
.location
* 4;
1114 radv_vs_output_info
*outinfo
= &ctx
->program
->info
->vs
.outinfo
;
1116 memset(outinfo
->vs_output_param_offset
, AC_EXP_PARAM_UNDEFINED
,
1117 sizeof(outinfo
->vs_output_param_offset
));
1119 ctx
->needs_instance_id
= ctx
->program
->info
->vs
.needs_instance_id
;
1121 bool export_clip_dists
= ctx
->options
->key
.vs_common_out
.export_clip_dists
;
1123 outinfo
->param_exports
= 0;
1124 int pos_written
= 0x1;
1125 if (outinfo
->writes_pointsize
|| outinfo
->writes_viewport_index
|| outinfo
->writes_layer
)
1126 pos_written
|= 1 << 1;
1128 nir_foreach_variable(variable
, &nir
->outputs
)
1130 int idx
= variable
->data
.location
;
1131 unsigned slots
= variable
->type
->count_attribute_slots(false);
1132 if (variable
->data
.compact
) {
1133 unsigned component_count
= variable
->data
.location_frac
+ variable
->type
->length
;
1134 slots
= (component_count
+ 3) / 4;
1137 if (idx
>= VARYING_SLOT_VAR0
|| idx
== VARYING_SLOT_LAYER
|| idx
== VARYING_SLOT_PRIMITIVE_ID
||
1138 ((idx
== VARYING_SLOT_CLIP_DIST0
|| idx
== VARYING_SLOT_CLIP_DIST1
) && export_clip_dists
)) {
1139 for (unsigned i
= 0; i
< slots
; i
++) {
1140 if (outinfo
->vs_output_param_offset
[idx
+ i
] == AC_EXP_PARAM_UNDEFINED
)
1141 outinfo
->vs_output_param_offset
[idx
+ i
] = outinfo
->param_exports
++;
1145 if (outinfo
->writes_layer
&&
1146 outinfo
->vs_output_param_offset
[VARYING_SLOT_LAYER
] == AC_EXP_PARAM_UNDEFINED
) {
1147 /* when ctx->options->key.has_multiview_view_index = true, the layer
1148 * variable isn't declared in NIR and it's isel's job to get the layer */
1149 outinfo
->vs_output_param_offset
[VARYING_SLOT_LAYER
] = outinfo
->param_exports
++;
1152 if (outinfo
->export_prim_id
) {
1153 assert(outinfo
->vs_output_param_offset
[VARYING_SLOT_PRIMITIVE_ID
] == AC_EXP_PARAM_UNDEFINED
);
1154 outinfo
->vs_output_param_offset
[VARYING_SLOT_PRIMITIVE_ID
] = outinfo
->param_exports
++;
1157 ctx
->num_clip_distances
= util_bitcount(outinfo
->clip_dist_mask
);
1158 ctx
->num_cull_distances
= util_bitcount(outinfo
->cull_dist_mask
);
1160 assert(ctx
->num_clip_distances
+ ctx
->num_cull_distances
<= 8);
1162 if (ctx
->num_clip_distances
+ ctx
->num_cull_distances
> 0)
1163 pos_written
|= 1 << 2;
1164 if (ctx
->num_clip_distances
+ ctx
->num_cull_distances
> 4)
1165 pos_written
|= 1 << 3;
1167 outinfo
->pos_exports
= util_bitcount(pos_written
);
1171 setup_variables(isel_context
*ctx
, nir_shader
*nir
)
1173 switch (nir
->info
.stage
) {
1174 case MESA_SHADER_FRAGMENT
: {
1175 nir_foreach_variable(variable
, &nir
->outputs
)
1177 int idx
= variable
->data
.location
+ variable
->data
.index
;
1178 variable
->data
.driver_location
= idx
* 4;
1182 case MESA_SHADER_COMPUTE
: {
1183 unsigned lds_allocation_size_unit
= 4 * 64;
1184 if (ctx
->program
->chip_class
>= GFX7
)
1185 lds_allocation_size_unit
= 4 * 128;
1186 ctx
->program
->config
->lds_size
= (nir
->info
.cs
.shared_size
+ lds_allocation_size_unit
- 1) / lds_allocation_size_unit
;
1189 case MESA_SHADER_VERTEX
: {
1190 setup_vs_variables(ctx
, nir
);
1194 unreachable("Unhandled shader stage.");
1199 setup_isel_context(Program
* program
,
1200 unsigned shader_count
,
1201 struct nir_shader
*const *shaders
,
1202 ac_shader_config
* config
,
1203 radv_shader_info
*info
,
1204 radv_nir_compiler_options
*options
)
1207 for (unsigned i
= 0; i
< shader_count
; i
++) {
1208 switch (shaders
[i
]->info
.stage
) {
1209 case MESA_SHADER_VERTEX
:
1210 program
->stage
|= sw_vs
;
1212 case MESA_SHADER_TESS_CTRL
:
1213 program
->stage
|= sw_tcs
;
1215 case MESA_SHADER_TESS_EVAL
:
1216 program
->stage
|= sw_tes
;
1218 case MESA_SHADER_GEOMETRY
:
1219 program
->stage
|= sw_gs
;
1221 case MESA_SHADER_FRAGMENT
:
1222 program
->stage
|= sw_fs
;
1224 case MESA_SHADER_COMPUTE
:
1225 program
->stage
|= sw_cs
;
1228 unreachable("Shader stage not implemented");
1231 if (program
->stage
== sw_vs
)
1232 program
->stage
|= hw_vs
;
1233 else if (program
->stage
== sw_fs
)
1234 program
->stage
|= hw_fs
;
1235 else if (program
->stage
== sw_cs
)
1236 program
->stage
|= hw_cs
;
1238 unreachable("Shader stage not implemented");
1240 program
->config
= config
;
1241 program
->info
= info
;
1242 program
->chip_class
= options
->chip_class
;
1243 program
->family
= options
->family
;
1244 program
->wave_size
= options
->wave_size
;
1245 program
->sgpr_limit
= options
->chip_class
>= GFX8
? 102 : 104;
1246 if (options
->family
== CHIP_TONGA
|| options
->family
== CHIP_ICELAND
)
1247 program
->sgpr_limit
= 94; /* workaround hardware bug */
1249 for (unsigned i
= 0; i
< MAX_SETS
; ++i
)
1250 program
->info
->user_sgprs_locs
.descriptor_sets
[i
].sgpr_idx
= -1;
1251 for (unsigned i
= 0; i
< AC_UD_MAX_UD
; ++i
)
1252 program
->info
->user_sgprs_locs
.shader_data
[i
].sgpr_idx
= -1;
1254 isel_context ctx
= {};
1255 ctx
.program
= program
;
1256 ctx
.options
= options
;
1257 ctx
.stage
= program
->stage
;
1259 for (unsigned i
= 0; i
< fs_input::max_inputs
; ++i
)
1260 ctx
.fs_inputs
[i
] = Temp(0, v1
);
1261 ctx
.fs_inputs
[fs_input::persp_pull_model
] = Temp(0, v3
);
1262 for (unsigned i
= 0; i
< MAX_SETS
; ++i
)
1263 ctx
.descriptor_sets
[i
] = Temp(0, s1
);
1264 for (unsigned i
= 0; i
< MAX_INLINE_PUSH_CONSTS
; ++i
)
1265 ctx
.inline_push_consts
[i
] = Temp(0, s1
);
1266 for (unsigned i
= 0; i
<= VARYING_SLOT_VAR31
; ++i
) {
1267 for (unsigned j
= 0; j
< 4; ++j
)
1268 ctx
.vs_output
.outputs
[i
][j
] = Temp(0, v1
);
1271 for (unsigned i
= 0; i
< shader_count
; i
++) {
1272 nir_shader
*nir
= shaders
[i
];
1274 /* align and copy constant data */
1275 while (program
->constant_data
.size() % 4u)
1276 program
->constant_data
.push_back(0);
1277 ctx
.constant_data_offset
= program
->constant_data
.size();
1278 program
->constant_data
.insert(program
->constant_data
.end(),
1279 (uint8_t*)nir
->constant_data
,
1280 (uint8_t*)nir
->constant_data
+ nir
->constant_data_size
);
1282 /* the variable setup has to be done before lower_io / CSE */
1283 if (nir
->info
.stage
== MESA_SHADER_COMPUTE
)
1284 nir_lower_vars_to_explicit_types(nir
, nir_var_mem_shared
, shared_var_info
);
1285 setup_variables(&ctx
, nir
);
1287 /* optimize and lower memory operations */
1288 bool lower_to_scalar
= false;
1289 bool lower_pack
= false;
1290 // TODO: uncomment this once !1240 is merged
1291 /*if (nir_opt_load_store_vectorize(nir,
1292 (nir_variable_mode)(nir_var_mem_ssbo | nir_var_mem_ubo |
1293 nir_var_mem_push_const | nir_var_mem_shared),
1295 lower_to_scalar = true;
1298 if (nir
->info
.stage
== MESA_SHADER_COMPUTE
)
1299 lower_to_scalar
|= nir_lower_explicit_io(nir
, nir_var_mem_shared
, nir_address_format_32bit_offset
);
1301 nir_lower_io(nir
, (nir_variable_mode
)(nir_var_shader_in
| nir_var_shader_out
), type_size
, (nir_lower_io_options
)0);
1302 nir_lower_explicit_io(nir
, nir_var_mem_global
, nir_address_format_64bit_global
);
1304 if (lower_to_scalar
)
1305 nir_lower_alu_to_scalar(nir
, NULL
, NULL
);
1307 nir_lower_pack(nir
);
1309 /* lower ALU operations */
1310 // TODO: implement logic64 in aco, it's more effective for sgprs
1311 nir_lower_int64(nir
, (nir_lower_int64_options
) (nir_lower_imul64
|
1312 nir_lower_imul_high64
|
1313 nir_lower_imul_2x32_64
|
1314 nir_lower_divmod64
|
1316 nir_lower_minmax64
|
1319 nir_opt_idiv_const(nir
, 32);
1320 nir_lower_idiv(nir
); // TODO: use the LLVM path once !1239 is merged
1322 /* optimize the lowered ALU operations */
1324 nir_opt_constant_folding(nir
);
1325 nir_opt_algebraic(nir
);
1327 /* Do late algebraic optimization to turn add(a, neg(b)) back into
1328 * subs, then the mandatory cleanup after algebraic. Note that it may
1329 * produce fnegs, and if so then we need to keep running to squash
1332 bool more_late_algebraic
= true;
1333 while (more_late_algebraic
) {
1334 more_late_algebraic
= false;
1335 NIR_PASS(more_late_algebraic
, nir
, nir_opt_algebraic_late
);
1336 NIR_PASS_V(nir
, nir_opt_constant_folding
);
1337 NIR_PASS_V(nir
, nir_copy_prop
);
1338 NIR_PASS_V(nir
, nir_opt_dce
);
1339 NIR_PASS_V(nir
, nir_opt_cse
);
1342 /* cleanup passes */
1343 nir_lower_load_const_to_scalar(nir
);
1344 nir_opt_shrink_load(nir
);
1345 nir_move_options move_opts
= (nir_move_options
)(
1346 nir_move_const_undef
| nir_move_load_ubo
| nir_move_load_input
| nir_move_comparisons
);
1347 nir_opt_sink(nir
, move_opts
);
1348 nir_opt_move(nir
, move_opts
);
1349 nir_convert_to_lcssa(nir
, true, false);
1350 nir_lower_phis_to_scalar(nir
);
1352 nir_function_impl
*func
= nir_shader_get_entrypoint(nir
);
1353 nir_index_ssa_defs(func
);
1355 if (options
->dump_preoptir
) {
1356 fprintf(stderr
, "NIR shader before instruction selection:\n");
1357 nir_print_shader(nir
, stderr
);
1361 unsigned scratch_size
= 0;
1362 for (unsigned i
= 0; i
< shader_count
; i
++)
1363 scratch_size
= std::max(scratch_size
, shaders
[i
]->scratch_size
);
1364 ctx
.scratch_enabled
= scratch_size
> 0;
1365 ctx
.program
->config
->scratch_bytes_per_wave
= align(scratch_size
* ctx
.options
->wave_size
, 1024);
1366 ctx
.program
->config
->float_mode
= V_00B028_FP_64_DENORMS
;
1367 ctx
.program
->info
->wave_size
= ctx
.options
->wave_size
;
1369 ctx
.block
= ctx
.program
->create_and_insert_block();
1370 ctx
.block
->loop_nest_depth
= 0;
1371 ctx
.block
->kind
= block_kind_top_level
;