2 * Copyright © 2018 Valve Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include <unordered_map>
29 #include "vulkan/radv_shader.h"
30 #include "vulkan/radv_descriptor_set.h"
32 #include "ac_exp_param.h"
33 #include "ac_shader_util.h"
35 #include "util/u_math.h"
37 #define MAX_INLINE_PUSH_CONSTS 8
67 struct vs_output_state
{
68 uint8_t mask
[VARYING_SLOT_VAR31
+ 1];
69 Temp outputs
[VARYING_SLOT_VAR31
+ 1][4];
73 struct radv_nir_compiler_options
*options
;
76 uint32_t constant_data_offset
;
79 std::unique_ptr
<Temp
[]> allocated
;
80 std::unordered_map
<unsigned, std::array
<Temp
,4>> allocated_vec
;
81 Stage stage
; /* Stage */
84 uint16_t loop_nest_depth
= 0;
88 bool has_divergent_continue
= false;
89 bool has_divergent_branch
= false;
92 bool is_divergent
= false;
94 bool exec_potentially_empty
= false;
98 bool scratch_enabled
= false;
99 Temp private_segment_buffer
= Temp(0, s2
); /* also the part of the scratch descriptor on compute */
100 Temp scratch_offset
= Temp(0, s1
);
102 /* inputs common for merged stages */
103 Temp merged_wave_info
= Temp(0, s1
);
106 bool fs_vgpr_args
[fs_input::max_inputs
];
107 Temp fs_inputs
[fs_input::max_inputs
];
108 Temp prim_mask
= Temp(0, s1
);
109 Temp descriptor_sets
[MAX_SETS
];
110 Temp push_constants
= Temp(0, s1
);
111 Temp inline_push_consts
[MAX_INLINE_PUSH_CONSTS
];
112 unsigned num_inline_push_consts
= 0;
113 unsigned base_inline_push_consts
= 0;
116 Temp vertex_buffers
= Temp(0, s1
);
117 Temp base_vertex
= Temp(0, s1
);
118 Temp start_instance
= Temp(0, s1
);
119 Temp draw_id
= Temp(0, s1
);
120 Temp view_index
= Temp(0, s1
);
121 Temp es2gs_offset
= Temp(0, s1
);
122 Temp vertex_id
= Temp(0, v1
);
123 Temp rel_auto_id
= Temp(0, v1
);
124 Temp instance_id
= Temp(0, v1
);
125 Temp vs_prim_id
= Temp(0, v1
);
126 bool needs_instance_id
;
129 Temp num_workgroups
[3] = {Temp(0, s1
), Temp(0, s1
), Temp(0, s1
)};
130 Temp workgroup_ids
[3] = {Temp(0, s1
), Temp(0, s1
), Temp(0, s1
)};
131 Temp tg_size
= Temp(0, s1
);
132 Temp local_invocation_ids
[3] = {Temp(0, v1
), Temp(0, v1
), Temp(0, v1
)};
134 /* VS output information */
135 unsigned num_clip_distances
;
136 unsigned num_cull_distances
;
137 vs_output_state vs_output
;
140 Temp streamout_buffers
= Temp(0, s1
);
141 Temp streamout_write_idx
= Temp(0, s1
);
142 Temp streamout_config
= Temp(0, s1
);
143 Temp streamout_offset
[4] = {Temp(0, s1
), Temp(0, s1
), Temp(0, s1
), Temp(0, s1
)};
146 fs_input
get_interp_input(nir_intrinsic_op intrin
, enum glsl_interp_mode interp
)
149 case INTERP_MODE_SMOOTH
:
150 case INTERP_MODE_NONE
:
151 if (intrin
== nir_intrinsic_load_barycentric_pixel
||
152 intrin
== nir_intrinsic_load_barycentric_at_sample
||
153 intrin
== nir_intrinsic_load_barycentric_at_offset
)
154 return fs_input::persp_center_p1
;
155 else if (intrin
== nir_intrinsic_load_barycentric_centroid
)
156 return fs_input::persp_centroid_p1
;
157 else if (intrin
== nir_intrinsic_load_barycentric_sample
)
158 return fs_input::persp_sample_p1
;
160 case INTERP_MODE_NOPERSPECTIVE
:
161 if (intrin
== nir_intrinsic_load_barycentric_pixel
)
162 return fs_input::linear_center_p1
;
163 else if (intrin
== nir_intrinsic_load_barycentric_centroid
)
164 return fs_input::linear_centroid_p1
;
165 else if (intrin
== nir_intrinsic_load_barycentric_sample
)
166 return fs_input::linear_sample_p1
;
171 return fs_input::max_inputs
;
174 void init_context(isel_context
*ctx
, nir_shader
*shader
)
176 nir_function_impl
*impl
= nir_shader_get_entrypoint(shader
);
178 ctx
->shader
= shader
;
179 ctx
->divergent_vals
= nir_divergence_analysis(shader
, nir_divergence_view_index_uniform
);
181 std::unique_ptr
<Temp
[]> allocated
{new Temp
[impl
->ssa_alloc
]()};
182 memset(&ctx
->fs_vgpr_args
, false, sizeof(ctx
->fs_vgpr_args
));
187 nir_foreach_block(block
, impl
) {
188 nir_foreach_instr(instr
, block
) {
189 switch(instr
->type
) {
190 case nir_instr_type_alu
: {
191 nir_alu_instr
*alu_instr
= nir_instr_as_alu(instr
);
192 unsigned size
= alu_instr
->dest
.dest
.ssa
.num_components
;
193 if (alu_instr
->dest
.dest
.ssa
.bit_size
== 64)
195 RegType type
= RegType::sgpr
;
196 switch(alu_instr
->op
) {
218 case nir_op_fround_even
:
227 case nir_op_pack_half_2x16
:
228 case nir_op_unpack_half_2x16_split_x
:
229 case nir_op_unpack_half_2x16_split_y
:
232 case nir_op_fddx_fine
:
233 case nir_op_fddy_fine
:
234 case nir_op_fddx_coarse
:
235 case nir_op_fddy_coarse
:
236 case nir_op_fquantize2f16
:
238 case nir_op_frexp_sig
:
239 case nir_op_frexp_exp
:
240 case nir_op_cube_face_index
:
241 case nir_op_cube_face_coord
:
242 type
= RegType::vgpr
;
254 size
= alu_instr
->src
[0].src
.ssa
->bit_size
== 64 ? 2 : 1;
259 if (ctx
->divergent_vals
[alu_instr
->dest
.dest
.ssa
.index
]) {
262 for (unsigned i
= 0; i
< nir_op_infos
[alu_instr
->op
].num_inputs
; i
++) {
263 if (allocated
[alu_instr
->src
[i
].src
.ssa
->index
].type() == RegType::vgpr
)
274 type
= ctx
->divergent_vals
[alu_instr
->dest
.dest
.ssa
.index
] ? RegType::vgpr
: RegType::sgpr
;
277 if (alu_instr
->dest
.dest
.ssa
.bit_size
== 1) {
278 if (ctx
->divergent_vals
[alu_instr
->dest
.dest
.ssa
.index
])
280 else if (allocated
[alu_instr
->src
[1].src
.ssa
->index
].regClass() == s2
&&
281 allocated
[alu_instr
->src
[2].src
.ssa
->index
].regClass() == s2
)
286 if (ctx
->divergent_vals
[alu_instr
->dest
.dest
.ssa
.index
]) {
287 type
= RegType::vgpr
;
289 if (allocated
[alu_instr
->src
[1].src
.ssa
->index
].type() == RegType::vgpr
||
290 allocated
[alu_instr
->src
[2].src
.ssa
->index
].type() == RegType::vgpr
) {
291 type
= RegType::vgpr
;
294 if (alu_instr
->src
[1].src
.ssa
->num_components
== 1 && alu_instr
->src
[2].src
.ssa
->num_components
== 1) {
295 assert(allocated
[alu_instr
->src
[1].src
.ssa
->index
].size() == allocated
[alu_instr
->src
[2].src
.ssa
->index
].size());
296 size
= allocated
[alu_instr
->src
[1].src
.ssa
->index
].size();
301 if (alu_instr
->dest
.dest
.ssa
.bit_size
== 1) {
302 size
= allocated
[alu_instr
->src
[0].src
.ssa
->index
].size();
304 type
= ctx
->divergent_vals
[alu_instr
->dest
.dest
.ssa
.index
] ? RegType::vgpr
: RegType::sgpr
;
309 if (alu_instr
->dest
.dest
.ssa
.bit_size
== 1) {
310 size
= ctx
->divergent_vals
[alu_instr
->dest
.dest
.ssa
.index
] ? 2 : 1;
316 if (alu_instr
->dest
.dest
.ssa
.bit_size
== 1) {
317 if (ctx
->divergent_vals
[alu_instr
->dest
.dest
.ssa
.index
]) {
321 for (unsigned i
= 0; i
< nir_op_infos
[alu_instr
->op
].num_inputs
; i
++) {
322 if (allocated
[alu_instr
->src
[i
].src
.ssa
->index
].regClass() == s1
) {
329 for (unsigned i
= 0; i
< nir_op_infos
[alu_instr
->op
].num_inputs
; i
++) {
330 if (allocated
[alu_instr
->src
[i
].src
.ssa
->index
].type() == RegType::vgpr
)
331 type
= RegType::vgpr
;
336 allocated
[alu_instr
->dest
.dest
.ssa
.index
] = Temp(0, RegClass(type
, size
));
339 case nir_instr_type_load_const
: {
340 unsigned size
= nir_instr_as_load_const(instr
)->def
.num_components
;
341 if (nir_instr_as_load_const(instr
)->def
.bit_size
== 64)
343 allocated
[nir_instr_as_load_const(instr
)->def
.index
] = Temp(0, RegClass(RegType::sgpr
, size
));
346 case nir_instr_type_intrinsic
: {
347 nir_intrinsic_instr
*intrinsic
= nir_instr_as_intrinsic(instr
);
348 if (!nir_intrinsic_infos
[intrinsic
->intrinsic
].has_dest
)
350 unsigned size
= intrinsic
->dest
.ssa
.num_components
;
351 if (intrinsic
->dest
.ssa
.bit_size
== 64)
353 RegType type
= RegType::sgpr
;
354 switch(intrinsic
->intrinsic
) {
355 case nir_intrinsic_load_push_constant
:
356 case nir_intrinsic_load_work_group_id
:
357 case nir_intrinsic_load_num_work_groups
:
358 case nir_intrinsic_load_subgroup_id
:
359 case nir_intrinsic_load_num_subgroups
:
360 case nir_intrinsic_load_first_vertex
:
361 case nir_intrinsic_load_base_instance
:
362 case nir_intrinsic_get_buffer_size
:
363 case nir_intrinsic_vote_all
:
364 case nir_intrinsic_vote_any
:
365 case nir_intrinsic_read_first_invocation
:
366 case nir_intrinsic_read_invocation
:
367 case nir_intrinsic_first_invocation
:
368 case nir_intrinsic_vulkan_resource_index
:
369 type
= RegType::sgpr
;
371 case nir_intrinsic_ballot
:
372 type
= RegType::sgpr
;
375 case nir_intrinsic_load_sample_id
:
376 case nir_intrinsic_load_sample_mask_in
:
377 case nir_intrinsic_load_input
:
378 case nir_intrinsic_load_vertex_id
:
379 case nir_intrinsic_load_vertex_id_zero_base
:
380 case nir_intrinsic_load_barycentric_sample
:
381 case nir_intrinsic_load_barycentric_pixel
:
382 case nir_intrinsic_load_barycentric_centroid
:
383 case nir_intrinsic_load_barycentric_at_sample
:
384 case nir_intrinsic_load_barycentric_at_offset
:
385 case nir_intrinsic_load_interpolated_input
:
386 case nir_intrinsic_load_frag_coord
:
387 case nir_intrinsic_load_sample_pos
:
388 case nir_intrinsic_load_layer_id
:
389 case nir_intrinsic_load_local_invocation_id
:
390 case nir_intrinsic_load_local_invocation_index
:
391 case nir_intrinsic_load_subgroup_invocation
:
392 case nir_intrinsic_write_invocation_amd
:
393 case nir_intrinsic_mbcnt_amd
:
394 case nir_intrinsic_load_instance_id
:
395 case nir_intrinsic_ssbo_atomic_add
:
396 case nir_intrinsic_ssbo_atomic_imin
:
397 case nir_intrinsic_ssbo_atomic_umin
:
398 case nir_intrinsic_ssbo_atomic_imax
:
399 case nir_intrinsic_ssbo_atomic_umax
:
400 case nir_intrinsic_ssbo_atomic_and
:
401 case nir_intrinsic_ssbo_atomic_or
:
402 case nir_intrinsic_ssbo_atomic_xor
:
403 case nir_intrinsic_ssbo_atomic_exchange
:
404 case nir_intrinsic_ssbo_atomic_comp_swap
:
405 case nir_intrinsic_image_deref_atomic_add
:
406 case nir_intrinsic_image_deref_atomic_umin
:
407 case nir_intrinsic_image_deref_atomic_imin
:
408 case nir_intrinsic_image_deref_atomic_umax
:
409 case nir_intrinsic_image_deref_atomic_imax
:
410 case nir_intrinsic_image_deref_atomic_and
:
411 case nir_intrinsic_image_deref_atomic_or
:
412 case nir_intrinsic_image_deref_atomic_xor
:
413 case nir_intrinsic_image_deref_atomic_exchange
:
414 case nir_intrinsic_image_deref_atomic_comp_swap
:
415 case nir_intrinsic_image_deref_size
:
416 case nir_intrinsic_shared_atomic_add
:
417 case nir_intrinsic_shared_atomic_imin
:
418 case nir_intrinsic_shared_atomic_umin
:
419 case nir_intrinsic_shared_atomic_imax
:
420 case nir_intrinsic_shared_atomic_umax
:
421 case nir_intrinsic_shared_atomic_and
:
422 case nir_intrinsic_shared_atomic_or
:
423 case nir_intrinsic_shared_atomic_xor
:
424 case nir_intrinsic_shared_atomic_exchange
:
425 case nir_intrinsic_shared_atomic_comp_swap
:
426 case nir_intrinsic_load_scratch
:
427 type
= RegType::vgpr
;
429 case nir_intrinsic_shuffle
:
430 case nir_intrinsic_quad_broadcast
:
431 case nir_intrinsic_quad_swap_horizontal
:
432 case nir_intrinsic_quad_swap_vertical
:
433 case nir_intrinsic_quad_swap_diagonal
:
434 case nir_intrinsic_quad_swizzle_amd
:
435 case nir_intrinsic_masked_swizzle_amd
:
436 case nir_intrinsic_inclusive_scan
:
437 case nir_intrinsic_exclusive_scan
:
438 if (!ctx
->divergent_vals
[intrinsic
->dest
.ssa
.index
]) {
439 type
= RegType::sgpr
;
440 } else if (intrinsic
->src
[0].ssa
->bit_size
== 1) {
441 type
= RegType::sgpr
;
444 type
= RegType::vgpr
;
447 case nir_intrinsic_load_view_index
:
448 type
= ctx
->stage
== fragment_fs
? RegType::vgpr
: RegType::sgpr
;
450 case nir_intrinsic_load_front_face
:
451 case nir_intrinsic_load_helper_invocation
:
452 case nir_intrinsic_is_helper_invocation
:
453 type
= RegType::sgpr
;
456 case nir_intrinsic_reduce
:
457 if (nir_intrinsic_cluster_size(intrinsic
) == 0 ||
458 !ctx
->divergent_vals
[intrinsic
->dest
.ssa
.index
]) {
459 type
= RegType::sgpr
;
460 } else if (intrinsic
->src
[0].ssa
->bit_size
== 1) {
461 type
= RegType::sgpr
;
464 type
= RegType::vgpr
;
467 case nir_intrinsic_load_ubo
:
468 case nir_intrinsic_load_ssbo
:
469 case nir_intrinsic_load_global
:
470 type
= ctx
->divergent_vals
[intrinsic
->dest
.ssa
.index
] ? RegType::vgpr
: RegType::sgpr
;
472 /* due to copy propagation, the swizzled imov is removed if num dest components == 1 */
473 case nir_intrinsic_load_shared
:
474 if (ctx
->divergent_vals
[intrinsic
->dest
.ssa
.index
])
475 type
= RegType::vgpr
;
477 type
= RegType::sgpr
;
480 for (unsigned i
= 0; i
< nir_intrinsic_infos
[intrinsic
->intrinsic
].num_srcs
; i
++) {
481 if (allocated
[intrinsic
->src
[i
].ssa
->index
].type() == RegType::vgpr
)
482 type
= RegType::vgpr
;
486 allocated
[intrinsic
->dest
.ssa
.index
] = Temp(0, RegClass(type
, size
));
488 switch(intrinsic
->intrinsic
) {
489 case nir_intrinsic_load_barycentric_sample
:
490 case nir_intrinsic_load_barycentric_pixel
:
491 case nir_intrinsic_load_barycentric_centroid
:
492 case nir_intrinsic_load_barycentric_at_sample
:
493 case nir_intrinsic_load_barycentric_at_offset
: {
494 glsl_interp_mode mode
= (glsl_interp_mode
)nir_intrinsic_interp_mode(intrinsic
);
495 ctx
->fs_vgpr_args
[get_interp_input(intrinsic
->intrinsic
, mode
)] = true;
498 case nir_intrinsic_load_front_face
:
499 ctx
->fs_vgpr_args
[fs_input::front_face
] = true;
501 case nir_intrinsic_load_frag_coord
:
502 case nir_intrinsic_load_sample_pos
: {
503 uint8_t mask
= nir_ssa_def_components_read(&intrinsic
->dest
.ssa
);
504 for (unsigned i
= 0; i
< 4; i
++) {
506 ctx
->fs_vgpr_args
[fs_input::frag_pos_0
+ i
] = true;
511 case nir_intrinsic_load_sample_id
:
512 ctx
->fs_vgpr_args
[fs_input::ancillary
] = true;
514 case nir_intrinsic_load_sample_mask_in
:
515 ctx
->fs_vgpr_args
[fs_input::ancillary
] = true;
516 ctx
->fs_vgpr_args
[fs_input::sample_coverage
] = true;
523 case nir_instr_type_tex
: {
524 nir_tex_instr
* tex
= nir_instr_as_tex(instr
);
525 unsigned size
= tex
->dest
.ssa
.num_components
;
527 if (tex
->dest
.ssa
.bit_size
== 64)
529 if (tex
->op
== nir_texop_texture_samples
)
530 assert(!ctx
->divergent_vals
[tex
->dest
.ssa
.index
]);
531 if (ctx
->divergent_vals
[tex
->dest
.ssa
.index
])
532 allocated
[tex
->dest
.ssa
.index
] = Temp(0, RegClass(RegType::vgpr
, size
));
534 allocated
[tex
->dest
.ssa
.index
] = Temp(0, RegClass(RegType::sgpr
, size
));
537 case nir_instr_type_parallel_copy
: {
538 nir_foreach_parallel_copy_entry(entry
, nir_instr_as_parallel_copy(instr
)) {
539 allocated
[entry
->dest
.ssa
.index
] = allocated
[entry
->src
.ssa
->index
];
543 case nir_instr_type_ssa_undef
: {
544 unsigned size
= nir_instr_as_ssa_undef(instr
)->def
.num_components
;
545 if (nir_instr_as_ssa_undef(instr
)->def
.bit_size
== 64)
547 allocated
[nir_instr_as_ssa_undef(instr
)->def
.index
] = Temp(0, RegClass(RegType::sgpr
, size
));
550 case nir_instr_type_phi
: {
551 nir_phi_instr
* phi
= nir_instr_as_phi(instr
);
553 unsigned size
= phi
->dest
.ssa
.num_components
;
555 if (phi
->dest
.ssa
.bit_size
== 1) {
556 assert(size
== 1 && "multiple components not yet supported on boolean phis.");
557 type
= RegType::sgpr
;
558 size
*= ctx
->divergent_vals
[phi
->dest
.ssa
.index
] ? 2 : 1;
559 allocated
[phi
->dest
.ssa
.index
] = Temp(0, RegClass(type
, size
));
563 if (ctx
->divergent_vals
[phi
->dest
.ssa
.index
]) {
564 type
= RegType::vgpr
;
566 type
= RegType::sgpr
;
567 nir_foreach_phi_src (src
, phi
) {
568 if (allocated
[src
->src
.ssa
->index
].type() == RegType::vgpr
)
569 type
= RegType::vgpr
;
570 if (allocated
[src
->src
.ssa
->index
].type() == RegType::none
)
575 size
*= phi
->dest
.ssa
.bit_size
== 64 ? 2 : 1;
576 RegClass rc
= RegClass(type
, size
);
577 if (rc
!= allocated
[phi
->dest
.ssa
.index
].regClass()) {
580 nir_foreach_phi_src(src
, phi
)
581 assert(allocated
[src
->src
.ssa
->index
].size() == rc
.size());
583 allocated
[phi
->dest
.ssa
.index
] = Temp(0, rc
);
593 for (unsigned i
= 0; i
< impl
->ssa_alloc
; i
++)
594 allocated
[i
] = Temp(ctx
->program
->allocateId(), allocated
[i
].regClass());
596 ctx
->allocated
.reset(allocated
.release());
599 struct user_sgpr_info
{
601 uint8_t remaining_sgprs
;
602 uint8_t user_sgpr_idx
;
603 bool need_ring_offsets
;
604 bool indirect_all_descriptor_sets
;
607 static void allocate_inline_push_consts(isel_context
*ctx
,
608 user_sgpr_info
& user_sgpr_info
)
610 uint8_t remaining_sgprs
= user_sgpr_info
.remaining_sgprs
;
612 /* Only supported if shaders use push constants. */
613 if (ctx
->program
->info
->min_push_constant_used
== UINT8_MAX
)
616 /* Only supported if shaders don't have indirect push constants. */
617 if (ctx
->program
->info
->has_indirect_push_constants
)
620 /* Only supported for 32-bit push constants. */
621 //TODO: it's possible that some day, the load/store vectorization could make this inaccurate
622 if (!ctx
->program
->info
->has_only_32bit_push_constants
)
625 uint8_t num_push_consts
=
626 (ctx
->program
->info
->max_push_constant_used
-
627 ctx
->program
->info
->min_push_constant_used
) / 4;
629 /* Check if the number of user SGPRs is large enough. */
630 if (num_push_consts
< remaining_sgprs
) {
631 ctx
->program
->info
->num_inline_push_consts
= num_push_consts
;
633 ctx
->program
->info
->num_inline_push_consts
= remaining_sgprs
;
636 /* Clamp to the maximum number of allowed inlined push constants. */
637 if (ctx
->program
->info
->num_inline_push_consts
> MAX_INLINE_PUSH_CONSTS
)
638 ctx
->program
->info
->num_inline_push_consts
= MAX_INLINE_PUSH_CONSTS
;
640 if (ctx
->program
->info
->num_inline_push_consts
== num_push_consts
&&
641 !ctx
->program
->info
->loads_dynamic_offsets
) {
642 /* Disable the default push constants path if all constants are
643 * inlined and if shaders don't use dynamic descriptors.
645 ctx
->program
->info
->loads_push_constants
= false;
646 user_sgpr_info
.num_sgpr
--;
647 user_sgpr_info
.remaining_sgprs
++;
650 ctx
->program
->info
->base_inline_push_consts
=
651 ctx
->program
->info
->min_push_constant_used
/ 4;
653 user_sgpr_info
.num_sgpr
+= ctx
->program
->info
->num_inline_push_consts
;
654 user_sgpr_info
.remaining_sgprs
-= ctx
->program
->info
->num_inline_push_consts
;
657 static void allocate_user_sgprs(isel_context
*ctx
,
658 bool needs_view_index
, user_sgpr_info
& user_sgpr_info
)
660 memset(&user_sgpr_info
, 0, sizeof(struct user_sgpr_info
));
661 uint32_t user_sgpr_count
= 0;
663 /* until we sort out scratch/global buffers always assign ring offsets for gs/vs/es */
664 if (ctx
->stage
!= fragment_fs
&&
665 ctx
->stage
!= compute_cs
666 /*|| ctx->is_gs_copy_shader */)
667 user_sgpr_info
.need_ring_offsets
= true;
669 if (ctx
->stage
== fragment_fs
&&
670 ctx
->program
->info
->ps
.needs_sample_positions
)
671 user_sgpr_info
.need_ring_offsets
= true;
673 /* 2 user sgprs will nearly always be allocated for scratch/rings */
674 if (ctx
->options
->supports_spill
|| user_sgpr_info
.need_ring_offsets
|| ctx
->scratch_enabled
)
675 user_sgpr_count
+= 2;
677 switch (ctx
->stage
) {
679 /* if (!ctx->is_gs_copy_shader) */ {
680 if (ctx
->program
->info
->vs
.has_vertex_buffers
)
682 user_sgpr_count
+= ctx
->program
->info
->vs
.needs_draw_id
? 3 : 2;
686 //user_sgpr_count += ctx->program->info->ps.needs_sample_positions;
689 if (ctx
->program
->info
->cs
.uses_grid_size
)
690 user_sgpr_count
+= 3;
693 unreachable("Shader stage not implemented");
696 if (needs_view_index
)
699 if (ctx
->program
->info
->loads_push_constants
)
700 user_sgpr_count
+= 1; /* we use 32bit pointers */
702 if (ctx
->program
->info
->so
.num_outputs
)
703 user_sgpr_count
+= 1; /* we use 32bit pointers */
705 uint32_t available_sgprs
= ctx
->options
->chip_class
>= GFX9
&& !(ctx
->stage
& hw_cs
) ? 32 : 16;
706 uint32_t remaining_sgprs
= available_sgprs
- user_sgpr_count
;
707 uint32_t num_desc_set
= util_bitcount(ctx
->program
->info
->desc_set_used_mask
);
709 if (available_sgprs
< user_sgpr_count
+ num_desc_set
) {
710 user_sgpr_info
.indirect_all_descriptor_sets
= true;
711 user_sgpr_info
.num_sgpr
= user_sgpr_count
+ 1;
712 user_sgpr_info
.remaining_sgprs
= remaining_sgprs
- 1;
714 user_sgpr_info
.num_sgpr
= user_sgpr_count
+ num_desc_set
;
715 user_sgpr_info
.remaining_sgprs
= remaining_sgprs
- num_desc_set
;
718 allocate_inline_push_consts(ctx
, user_sgpr_info
);
723 RegClass types
[MAX_ARGS
];
724 Temp
*assign
[MAX_ARGS
];
725 PhysReg reg
[MAX_ARGS
];
726 unsigned array_params_mask
;
729 uint8_t num_sgprs_used
;
730 uint8_t num_vgprs_used
;
734 add_arg(arg_info
*info
, RegClass rc
, Temp
*param_ptr
, unsigned reg
)
736 assert(info
->count
< MAX_ARGS
);
738 info
->assign
[info
->count
] = param_ptr
;
739 info
->types
[info
->count
] = rc
;
741 if (rc
.type() == RegType::sgpr
) {
742 info
->num_sgprs_used
+= rc
.size();
744 info
->reg
[info
->count
] = PhysReg
{reg
};
746 assert(rc
.type() == RegType::vgpr
);
747 info
->num_vgprs_used
+= rc
.size();
748 info
->reg
[info
->count
] = PhysReg
{reg
+ 256};
754 set_loc(struct radv_userdata_info
*ud_info
, uint8_t *sgpr_idx
, uint8_t num_sgprs
)
756 ud_info
->sgpr_idx
= *sgpr_idx
;
757 ud_info
->num_sgprs
= num_sgprs
;
758 *sgpr_idx
+= num_sgprs
;
762 set_loc_shader(isel_context
*ctx
, int idx
, uint8_t *sgpr_idx
,
765 struct radv_userdata_info
*ud_info
= &ctx
->program
->info
->user_sgprs_locs
.shader_data
[idx
];
768 set_loc(ud_info
, sgpr_idx
, num_sgprs
);
772 set_loc_shader_ptr(isel_context
*ctx
, int idx
, uint8_t *sgpr_idx
)
774 bool use_32bit_pointers
= idx
!= AC_UD_SCRATCH_RING_OFFSETS
;
776 set_loc_shader(ctx
, idx
, sgpr_idx
, use_32bit_pointers
? 1 : 2);
780 set_loc_desc(isel_context
*ctx
, int idx
, uint8_t *sgpr_idx
)
782 struct radv_userdata_locations
*locs
= &ctx
->program
->info
->user_sgprs_locs
;
783 struct radv_userdata_info
*ud_info
= &locs
->descriptor_sets
[idx
];
786 set_loc(ud_info
, sgpr_idx
, 1);
787 locs
->descriptor_sets_enabled
|= 1 << idx
;
791 declare_global_input_sgprs(isel_context
*ctx
,
792 /* bool has_previous_stage, gl_shader_stage previous_stage, */
793 user_sgpr_info
*user_sgpr_info
,
794 struct arg_info
*args
,
797 /* 1 for each descriptor set */
798 if (!user_sgpr_info
->indirect_all_descriptor_sets
) {
799 uint32_t mask
= ctx
->program
->info
->desc_set_used_mask
;
801 int i
= u_bit_scan(&mask
);
802 add_arg(args
, s1
, &desc_sets
[i
], user_sgpr_info
->user_sgpr_idx
);
803 set_loc_desc(ctx
, i
, &user_sgpr_info
->user_sgpr_idx
);
805 /* NIR->LLVM might have set this to true if RADV_DEBUG=compiletime */
806 ctx
->program
->info
->need_indirect_descriptor_sets
= false;
808 add_arg(args
, s1
, desc_sets
, user_sgpr_info
->user_sgpr_idx
);
809 set_loc_shader_ptr(ctx
, AC_UD_INDIRECT_DESCRIPTOR_SETS
, &user_sgpr_info
->user_sgpr_idx
);
810 ctx
->program
->info
->need_indirect_descriptor_sets
= true;
813 if (ctx
->program
->info
->loads_push_constants
) {
814 /* 1 for push constants and dynamic descriptors */
815 add_arg(args
, s1
, &ctx
->push_constants
, user_sgpr_info
->user_sgpr_idx
);
816 set_loc_shader_ptr(ctx
, AC_UD_PUSH_CONSTANTS
, &user_sgpr_info
->user_sgpr_idx
);
819 if (ctx
->program
->info
->num_inline_push_consts
) {
820 unsigned count
= ctx
->program
->info
->num_inline_push_consts
;
821 for (unsigned i
= 0; i
< count
; i
++)
822 add_arg(args
, s1
, &ctx
->inline_push_consts
[i
], user_sgpr_info
->user_sgpr_idx
+ i
);
823 set_loc_shader(ctx
, AC_UD_INLINE_PUSH_CONSTANTS
, &user_sgpr_info
->user_sgpr_idx
, count
);
825 ctx
->num_inline_push_consts
= ctx
->program
->info
->num_inline_push_consts
;
826 ctx
->base_inline_push_consts
= ctx
->program
->info
->base_inline_push_consts
;
829 if (ctx
->program
->info
->so
.num_outputs
) {
830 add_arg(args
, s1
, &ctx
->streamout_buffers
, user_sgpr_info
->user_sgpr_idx
);
831 set_loc_shader_ptr(ctx
, AC_UD_STREAMOUT_BUFFERS
, &user_sgpr_info
->user_sgpr_idx
);
836 declare_vs_input_vgprs(isel_context
*ctx
, struct arg_info
*args
)
838 unsigned vgpr_idx
= 0;
839 add_arg(args
, v1
, &ctx
->vertex_id
, vgpr_idx
++);
840 if (ctx
->options
->chip_class
>= GFX10
) {
841 add_arg(args
, v1
, NULL
, vgpr_idx
++); /* unused */
842 add_arg(args
, v1
, &ctx
->vs_prim_id
, vgpr_idx
++);
843 add_arg(args
, v1
, &ctx
->instance_id
, vgpr_idx
++);
845 if (ctx
->options
->key
.vs
.out
.as_ls
) {
846 add_arg(args
, v1
, &ctx
->rel_auto_id
, vgpr_idx
++);
847 add_arg(args
, v1
, &ctx
->instance_id
, vgpr_idx
++);
849 add_arg(args
, v1
, &ctx
->instance_id
, vgpr_idx
++);
850 add_arg(args
, v1
, &ctx
->vs_prim_id
, vgpr_idx
++);
852 add_arg(args
, v1
, NULL
, vgpr_idx
); /* unused */
857 declare_streamout_sgprs(isel_context
*ctx
, struct arg_info
*args
, unsigned *idx
)
859 /* Streamout SGPRs. */
860 if (ctx
->program
->info
->so
.num_outputs
) {
861 assert(ctx
->stage
& hw_vs
);
863 if (ctx
->stage
!= tess_eval_vs
) {
864 add_arg(args
, s1
, &ctx
->streamout_config
, (*idx
)++);
866 args
->assign
[args
->count
- 1] = &ctx
->streamout_config
;
867 args
->types
[args
->count
- 1] = s1
;
870 add_arg(args
, s1
, &ctx
->streamout_write_idx
, (*idx
)++);
873 /* A streamout buffer offset is loaded if the stride is non-zero. */
874 for (unsigned i
= 0; i
< 4; i
++) {
875 if (!ctx
->program
->info
->so
.strides
[i
])
878 add_arg(args
, s1
, &ctx
->streamout_offset
[i
], (*idx
)++);
882 static bool needs_view_index_sgpr(isel_context
*ctx
)
884 switch (ctx
->stage
) {
886 return ctx
->program
->info
->needs_multiview_view_index
|| ctx
->options
->key
.has_multiview_view_index
;
888 return ctx
->program
->info
->needs_multiview_view_index
&& ctx
->options
->key
.has_multiview_view_index
;
890 case vertex_tess_control_ls
:
891 case vertex_geometry_es
:
892 case tess_control_hs
:
894 case tess_eval_geometry_es
:
896 return ctx
->program
->info
->needs_multiview_view_index
;
903 add_fs_arg(isel_context
*ctx
, arg_info
*args
, unsigned &vgpr_idx
, fs_input input
, unsigned value
, bool enable_next
= false, RegClass rc
= v1
)
905 if (!ctx
->fs_vgpr_args
[input
])
908 add_arg(args
, rc
, &ctx
->fs_inputs
[input
], vgpr_idx
);
909 vgpr_idx
+= rc
.size();
912 add_arg(args
, rc
, &ctx
->fs_inputs
[input
+ 1], vgpr_idx
);
913 vgpr_idx
+= rc
.size();
916 ctx
->program
->config
->spi_ps_input_addr
|= value
;
917 ctx
->program
->config
->spi_ps_input_ena
|= value
;
921 void add_startpgm(struct isel_context
*ctx
)
923 user_sgpr_info user_sgpr_info
;
924 bool needs_view_index
= needs_view_index_sgpr(ctx
);
925 allocate_user_sgprs(ctx
, needs_view_index
, user_sgpr_info
);
928 /* this needs to be in sgprs 0 and 1 */
929 if (ctx
->options
->supports_spill
|| user_sgpr_info
.need_ring_offsets
|| ctx
->scratch_enabled
) {
930 add_arg(&args
, s2
, &ctx
->private_segment_buffer
, 0);
931 set_loc_shader_ptr(ctx
, AC_UD_SCRATCH_RING_OFFSETS
, &user_sgpr_info
.user_sgpr_idx
);
934 unsigned vgpr_idx
= 0;
935 switch (ctx
->stage
) {
937 declare_global_input_sgprs(ctx
, &user_sgpr_info
, &args
, ctx
->descriptor_sets
);
938 if (ctx
->program
->info
->vs
.has_vertex_buffers
) {
939 add_arg(&args
, s1
, &ctx
->vertex_buffers
, user_sgpr_info
.user_sgpr_idx
);
940 set_loc_shader_ptr(ctx
, AC_UD_VS_VERTEX_BUFFERS
, &user_sgpr_info
.user_sgpr_idx
);
942 add_arg(&args
, s1
, &ctx
->base_vertex
, user_sgpr_info
.user_sgpr_idx
);
943 add_arg(&args
, s1
, &ctx
->start_instance
, user_sgpr_info
.user_sgpr_idx
+ 1);
944 if (ctx
->program
->info
->vs
.needs_draw_id
) {
945 add_arg(&args
, s1
, &ctx
->draw_id
, user_sgpr_info
.user_sgpr_idx
+ 2);
946 set_loc_shader(ctx
, AC_UD_VS_BASE_VERTEX_START_INSTANCE
, &user_sgpr_info
.user_sgpr_idx
, 3);
948 set_loc_shader(ctx
, AC_UD_VS_BASE_VERTEX_START_INSTANCE
, &user_sgpr_info
.user_sgpr_idx
, 2);
950 if (needs_view_index
) {
951 add_arg(&args
, s1
, &ctx
->view_index
, user_sgpr_info
.user_sgpr_idx
);
952 set_loc_shader(ctx
, AC_UD_VIEW_INDEX
, &user_sgpr_info
.user_sgpr_idx
, 1);
955 assert(user_sgpr_info
.user_sgpr_idx
== user_sgpr_info
.num_sgpr
);
956 unsigned idx
= user_sgpr_info
.user_sgpr_idx
;
957 if (ctx
->options
->key
.vs
.out
.as_es
)
958 add_arg(&args
, s1
, &ctx
->es2gs_offset
, idx
++);
960 declare_streamout_sgprs(ctx
, &args
, &idx
);
962 if (ctx
->scratch_enabled
)
963 add_arg(&args
, s1
, &ctx
->scratch_offset
, idx
++);
965 declare_vs_input_vgprs(ctx
, &args
);
969 declare_global_input_sgprs(ctx
, &user_sgpr_info
, &args
, ctx
->descriptor_sets
);
971 assert(user_sgpr_info
.user_sgpr_idx
== user_sgpr_info
.num_sgpr
);
972 add_arg(&args
, s1
, &ctx
->prim_mask
, user_sgpr_info
.user_sgpr_idx
);
974 if (ctx
->scratch_enabled
)
975 add_arg(&args
, s1
, &ctx
->scratch_offset
, user_sgpr_info
.user_sgpr_idx
+ 1);
977 ctx
->program
->config
->spi_ps_input_addr
= 0;
978 ctx
->program
->config
->spi_ps_input_ena
= 0;
980 bool has_interp_mode
= false;
982 has_interp_mode
|= add_fs_arg(ctx
, &args
, vgpr_idx
, fs_input::persp_sample_p1
, S_0286CC_PERSP_SAMPLE_ENA(1), true);
983 has_interp_mode
|= add_fs_arg(ctx
, &args
, vgpr_idx
, fs_input::persp_center_p1
, S_0286CC_PERSP_CENTER_ENA(1), true);
984 has_interp_mode
|= add_fs_arg(ctx
, &args
, vgpr_idx
, fs_input::persp_centroid_p1
, S_0286CC_PERSP_CENTROID_ENA(1), true);
985 has_interp_mode
|= add_fs_arg(ctx
, &args
, vgpr_idx
, fs_input::persp_pull_model
, S_0286CC_PERSP_PULL_MODEL_ENA(1), false, v3
);
987 if (!has_interp_mode
&& ctx
->fs_vgpr_args
[fs_input::frag_pos_3
]) {
988 /* If POS_W_FLOAT (11) is enabled, at least one of PERSP_* must be enabled too */
989 ctx
->fs_vgpr_args
[fs_input::persp_center_p1
] = true;
990 has_interp_mode
= add_fs_arg(ctx
, &args
, vgpr_idx
, fs_input::persp_center_p1
, S_0286CC_PERSP_CENTER_ENA(1), true);
993 has_interp_mode
|= add_fs_arg(ctx
, &args
, vgpr_idx
, fs_input::linear_sample_p1
, S_0286CC_LINEAR_SAMPLE_ENA(1), true);
994 has_interp_mode
|= add_fs_arg(ctx
, &args
, vgpr_idx
, fs_input::linear_center_p1
, S_0286CC_LINEAR_CENTER_ENA(1), true);
995 has_interp_mode
|= add_fs_arg(ctx
, &args
, vgpr_idx
, fs_input::linear_centroid_p1
, S_0286CC_LINEAR_CENTROID_ENA(1), true);
996 has_interp_mode
|= add_fs_arg(ctx
, &args
, vgpr_idx
, fs_input::line_stipple
, S_0286CC_LINE_STIPPLE_TEX_ENA(1));
998 if (!has_interp_mode
) {
999 /* At least one of PERSP_* (0xF) or LINEAR_* (0x70) must be enabled */
1000 ctx
->fs_vgpr_args
[fs_input::persp_center_p1
] = true;
1001 has_interp_mode
= add_fs_arg(ctx
, &args
, vgpr_idx
, fs_input::persp_center_p1
, S_0286CC_PERSP_CENTER_ENA(1), true);
1004 add_fs_arg(ctx
, &args
, vgpr_idx
, fs_input::frag_pos_0
, S_0286CC_POS_X_FLOAT_ENA(1));
1005 add_fs_arg(ctx
, &args
, vgpr_idx
, fs_input::frag_pos_1
, S_0286CC_POS_Y_FLOAT_ENA(1));
1006 add_fs_arg(ctx
, &args
, vgpr_idx
, fs_input::frag_pos_2
, S_0286CC_POS_Z_FLOAT_ENA(1));
1007 add_fs_arg(ctx
, &args
, vgpr_idx
, fs_input::frag_pos_3
, S_0286CC_POS_W_FLOAT_ENA(1));
1009 add_fs_arg(ctx
, &args
, vgpr_idx
, fs_input::front_face
, S_0286CC_FRONT_FACE_ENA(1));
1010 add_fs_arg(ctx
, &args
, vgpr_idx
, fs_input::ancillary
, S_0286CC_ANCILLARY_ENA(1));
1011 add_fs_arg(ctx
, &args
, vgpr_idx
, fs_input::sample_coverage
, S_0286CC_SAMPLE_COVERAGE_ENA(1));
1012 add_fs_arg(ctx
, &args
, vgpr_idx
, fs_input::fixed_pt
, S_0286CC_POS_FIXED_PT_ENA(1));
1014 ASSERTED
bool unset_interp_mode
= !(ctx
->program
->config
->spi_ps_input_addr
& 0x7F) ||
1015 (G_0286CC_POS_W_FLOAT_ENA(ctx
->program
->config
->spi_ps_input_addr
)
1016 && !(ctx
->program
->config
->spi_ps_input_addr
& 0xF));
1018 assert(has_interp_mode
);
1019 assert(!unset_interp_mode
);
1023 declare_global_input_sgprs(ctx
, &user_sgpr_info
, &args
, ctx
->descriptor_sets
);
1025 if (ctx
->program
->info
->cs
.uses_grid_size
) {
1026 add_arg(&args
, s1
, &ctx
->num_workgroups
[0], user_sgpr_info
.user_sgpr_idx
);
1027 add_arg(&args
, s1
, &ctx
->num_workgroups
[1], user_sgpr_info
.user_sgpr_idx
+ 1);
1028 add_arg(&args
, s1
, &ctx
->num_workgroups
[2], user_sgpr_info
.user_sgpr_idx
+ 2);
1029 set_loc_shader(ctx
, AC_UD_CS_GRID_SIZE
, &user_sgpr_info
.user_sgpr_idx
, 3);
1031 assert(user_sgpr_info
.user_sgpr_idx
== user_sgpr_info
.num_sgpr
);
1032 unsigned idx
= user_sgpr_info
.user_sgpr_idx
;
1033 for (unsigned i
= 0; i
< 3; i
++) {
1034 if (ctx
->program
->info
->cs
.uses_block_id
[i
])
1035 add_arg(&args
, s1
, &ctx
->workgroup_ids
[i
], idx
++);
1038 if (ctx
->program
->info
->cs
.uses_local_invocation_idx
)
1039 add_arg(&args
, s1
, &ctx
->tg_size
, idx
++);
1040 if (ctx
->scratch_enabled
)
1041 add_arg(&args
, s1
, &ctx
->scratch_offset
, idx
++);
1043 add_arg(&args
, v1
, &ctx
->local_invocation_ids
[0], vgpr_idx
++);
1044 add_arg(&args
, v1
, &ctx
->local_invocation_ids
[1], vgpr_idx
++);
1045 add_arg(&args
, v1
, &ctx
->local_invocation_ids
[2], vgpr_idx
++);
1049 unreachable("Shader stage not implemented");
1052 ctx
->program
->info
->num_input_vgprs
= 0;
1053 ctx
->program
->info
->num_input_sgprs
= args
.num_sgprs_used
;
1054 ctx
->program
->info
->num_user_sgprs
= user_sgpr_info
.num_sgpr
;
1055 ctx
->program
->info
->num_input_vgprs
= args
.num_vgprs_used
;
1057 if (ctx
->stage
== fragment_fs
) {
1058 /* Verify that we have a correct assumption about input VGPR count */
1059 ASSERTED
unsigned input_vgpr_cnt
= ac_get_fs_input_vgpr_cnt(ctx
->program
->config
, nullptr, nullptr);
1060 assert(input_vgpr_cnt
== ctx
->program
->info
->num_input_vgprs
);
1063 aco_ptr
<Pseudo_instruction
> startpgm
{create_instruction
<Pseudo_instruction
>(aco_opcode::p_startpgm
, Format::PSEUDO
, 0, args
.count
+ 1)};
1064 for (unsigned i
= 0; i
< args
.count
; i
++) {
1065 if (args
.assign
[i
]) {
1066 *args
.assign
[i
] = Temp
{ctx
->program
->allocateId(), args
.types
[i
]};
1067 startpgm
->definitions
[i
] = Definition(*args
.assign
[i
]);
1068 startpgm
->definitions
[i
].setFixed(args
.reg
[i
]);
1071 startpgm
->definitions
[args
.count
] = Definition
{ctx
->program
->allocateId(), exec
, s2
};
1072 ctx
->block
->instructions
.push_back(std::move(startpgm
));
1076 type_size(const struct glsl_type
*type
, bool bindless
)
1078 // TODO: don't we need type->std430_base_alignment() here?
1079 return glsl_count_attribute_slots(type
, false);
1083 shared_var_info(const struct glsl_type
*type
, unsigned *size
, unsigned *align
)
1085 assert(glsl_type_is_vector_or_scalar(type
));
1087 uint32_t comp_size
= glsl_type_is_boolean(type
)
1088 ? 4 : glsl_get_bit_size(type
) / 8;
1089 unsigned length
= glsl_get_vector_elements(type
);
1090 *size
= comp_size
* length
,
1095 get_align(nir_variable_mode mode
, bool is_store
, unsigned bit_size
, unsigned num_components
)
1097 /* TODO: ACO doesn't have good support for non-32-bit reads/writes yet */
1102 case nir_var_mem_ubo
:
1103 case nir_var_mem_ssbo
:
1104 //case nir_var_mem_push_const: enable with 1240!
1105 case nir_var_mem_shared
:
1106 /* TODO: what are the alignment requirements for LDS? */
1107 return num_components
<= 4 ? 4 : -1;
1114 setup_vs_variables(isel_context
*ctx
, nir_shader
*nir
)
1116 nir_foreach_variable(variable
, &nir
->inputs
)
1118 variable
->data
.driver_location
= variable
->data
.location
* 4;
1120 nir_foreach_variable(variable
, &nir
->outputs
)
1122 variable
->data
.driver_location
= variable
->data
.location
* 4;
1125 radv_vs_output_info
*outinfo
= &ctx
->program
->info
->vs
.outinfo
;
1127 memset(outinfo
->vs_output_param_offset
, AC_EXP_PARAM_UNDEFINED
,
1128 sizeof(outinfo
->vs_output_param_offset
));
1130 ctx
->needs_instance_id
= ctx
->program
->info
->vs
.needs_instance_id
;
1132 bool export_clip_dists
= ctx
->options
->key
.vs_common_out
.export_clip_dists
;
1134 outinfo
->param_exports
= 0;
1135 int pos_written
= 0x1;
1136 if (outinfo
->writes_pointsize
|| outinfo
->writes_viewport_index
|| outinfo
->writes_layer
)
1137 pos_written
|= 1 << 1;
1139 nir_foreach_variable(variable
, &nir
->outputs
)
1141 int idx
= variable
->data
.location
;
1142 unsigned slots
= variable
->type
->count_attribute_slots(false);
1143 if (variable
->data
.compact
) {
1144 unsigned component_count
= variable
->data
.location_frac
+ variable
->type
->length
;
1145 slots
= (component_count
+ 3) / 4;
1148 if (idx
>= VARYING_SLOT_VAR0
|| idx
== VARYING_SLOT_LAYER
|| idx
== VARYING_SLOT_PRIMITIVE_ID
||
1149 ((idx
== VARYING_SLOT_CLIP_DIST0
|| idx
== VARYING_SLOT_CLIP_DIST1
) && export_clip_dists
)) {
1150 for (unsigned i
= 0; i
< slots
; i
++) {
1151 if (outinfo
->vs_output_param_offset
[idx
+ i
] == AC_EXP_PARAM_UNDEFINED
)
1152 outinfo
->vs_output_param_offset
[idx
+ i
] = outinfo
->param_exports
++;
1156 if (outinfo
->writes_layer
&&
1157 outinfo
->vs_output_param_offset
[VARYING_SLOT_LAYER
] == AC_EXP_PARAM_UNDEFINED
) {
1158 /* when ctx->options->key.has_multiview_view_index = true, the layer
1159 * variable isn't declared in NIR and it's isel's job to get the layer */
1160 outinfo
->vs_output_param_offset
[VARYING_SLOT_LAYER
] = outinfo
->param_exports
++;
1163 if (outinfo
->export_prim_id
) {
1164 assert(outinfo
->vs_output_param_offset
[VARYING_SLOT_PRIMITIVE_ID
] == AC_EXP_PARAM_UNDEFINED
);
1165 outinfo
->vs_output_param_offset
[VARYING_SLOT_PRIMITIVE_ID
] = outinfo
->param_exports
++;
1168 ctx
->num_clip_distances
= util_bitcount(outinfo
->clip_dist_mask
);
1169 ctx
->num_cull_distances
= util_bitcount(outinfo
->cull_dist_mask
);
1171 assert(ctx
->num_clip_distances
+ ctx
->num_cull_distances
<= 8);
1173 if (ctx
->num_clip_distances
+ ctx
->num_cull_distances
> 0)
1174 pos_written
|= 1 << 2;
1175 if (ctx
->num_clip_distances
+ ctx
->num_cull_distances
> 4)
1176 pos_written
|= 1 << 3;
1178 outinfo
->pos_exports
= util_bitcount(pos_written
);
1182 setup_variables(isel_context
*ctx
, nir_shader
*nir
)
1184 switch (nir
->info
.stage
) {
1185 case MESA_SHADER_FRAGMENT
: {
1186 nir_foreach_variable(variable
, &nir
->outputs
)
1188 int idx
= variable
->data
.location
+ variable
->data
.index
;
1189 variable
->data
.driver_location
= idx
* 4;
1193 case MESA_SHADER_COMPUTE
: {
1194 unsigned lds_allocation_size_unit
= 4 * 64;
1195 if (ctx
->program
->chip_class
>= GFX7
)
1196 lds_allocation_size_unit
= 4 * 128;
1197 ctx
->program
->config
->lds_size
= (nir
->info
.cs
.shared_size
+ lds_allocation_size_unit
- 1) / lds_allocation_size_unit
;
1200 case MESA_SHADER_VERTEX
: {
1201 setup_vs_variables(ctx
, nir
);
1205 unreachable("Unhandled shader stage.");
1210 setup_isel_context(Program
* program
,
1211 unsigned shader_count
,
1212 struct nir_shader
*const *shaders
,
1213 ac_shader_config
* config
,
1214 radv_shader_info
*info
,
1215 radv_nir_compiler_options
*options
)
1218 for (unsigned i
= 0; i
< shader_count
; i
++) {
1219 switch (shaders
[i
]->info
.stage
) {
1220 case MESA_SHADER_VERTEX
:
1221 program
->stage
|= sw_vs
;
1223 case MESA_SHADER_TESS_CTRL
:
1224 program
->stage
|= sw_tcs
;
1226 case MESA_SHADER_TESS_EVAL
:
1227 program
->stage
|= sw_tes
;
1229 case MESA_SHADER_GEOMETRY
:
1230 program
->stage
|= sw_gs
;
1232 case MESA_SHADER_FRAGMENT
:
1233 program
->stage
|= sw_fs
;
1235 case MESA_SHADER_COMPUTE
:
1236 program
->stage
|= sw_cs
;
1239 unreachable("Shader stage not implemented");
1242 if (program
->stage
== sw_vs
)
1243 program
->stage
|= hw_vs
;
1244 else if (program
->stage
== sw_fs
)
1245 program
->stage
|= hw_fs
;
1246 else if (program
->stage
== sw_cs
)
1247 program
->stage
|= hw_cs
;
1249 unreachable("Shader stage not implemented");
1251 program
->config
= config
;
1252 program
->info
= info
;
1253 program
->chip_class
= options
->chip_class
;
1254 program
->family
= options
->family
;
1255 program
->wave_size
= options
->wave_size
;
1256 program
->sgpr_limit
= options
->chip_class
>= GFX8
? 102 : 104;
1257 if (options
->family
== CHIP_TONGA
|| options
->family
== CHIP_ICELAND
)
1258 program
->sgpr_limit
= 94; /* workaround hardware bug */
1260 for (unsigned i
= 0; i
< MAX_SETS
; ++i
)
1261 program
->info
->user_sgprs_locs
.descriptor_sets
[i
].sgpr_idx
= -1;
1262 for (unsigned i
= 0; i
< AC_UD_MAX_UD
; ++i
)
1263 program
->info
->user_sgprs_locs
.shader_data
[i
].sgpr_idx
= -1;
1265 isel_context ctx
= {};
1266 ctx
.program
= program
;
1267 ctx
.options
= options
;
1268 ctx
.stage
= program
->stage
;
1270 for (unsigned i
= 0; i
< fs_input::max_inputs
; ++i
)
1271 ctx
.fs_inputs
[i
] = Temp(0, v1
);
1272 ctx
.fs_inputs
[fs_input::persp_pull_model
] = Temp(0, v3
);
1273 for (unsigned i
= 0; i
< MAX_SETS
; ++i
)
1274 ctx
.descriptor_sets
[i
] = Temp(0, s1
);
1275 for (unsigned i
= 0; i
< MAX_INLINE_PUSH_CONSTS
; ++i
)
1276 ctx
.inline_push_consts
[i
] = Temp(0, s1
);
1277 for (unsigned i
= 0; i
<= VARYING_SLOT_VAR31
; ++i
) {
1278 for (unsigned j
= 0; j
< 4; ++j
)
1279 ctx
.vs_output
.outputs
[i
][j
] = Temp(0, v1
);
1282 for (unsigned i
= 0; i
< shader_count
; i
++) {
1283 nir_shader
*nir
= shaders
[i
];
1285 /* align and copy constant data */
1286 while (program
->constant_data
.size() % 4u)
1287 program
->constant_data
.push_back(0);
1288 ctx
.constant_data_offset
= program
->constant_data
.size();
1289 program
->constant_data
.insert(program
->constant_data
.end(),
1290 (uint8_t*)nir
->constant_data
,
1291 (uint8_t*)nir
->constant_data
+ nir
->constant_data_size
);
1293 /* the variable setup has to be done before lower_io / CSE */
1294 if (nir
->info
.stage
== MESA_SHADER_COMPUTE
)
1295 nir_lower_vars_to_explicit_types(nir
, nir_var_mem_shared
, shared_var_info
);
1296 setup_variables(&ctx
, nir
);
1298 /* optimize and lower memory operations */
1299 bool lower_to_scalar
= false;
1300 bool lower_pack
= false;
1301 // TODO: uncomment this once !1240 is merged
1302 /*if (nir_opt_load_store_vectorize(nir,
1303 (nir_variable_mode)(nir_var_mem_ssbo | nir_var_mem_ubo |
1304 nir_var_mem_push_const | nir_var_mem_shared),
1306 lower_to_scalar = true;
1309 if (nir
->info
.stage
== MESA_SHADER_COMPUTE
)
1310 lower_to_scalar
|= nir_lower_explicit_io(nir
, nir_var_mem_shared
, nir_address_format_32bit_offset
);
1312 nir_lower_io(nir
, (nir_variable_mode
)(nir_var_shader_in
| nir_var_shader_out
), type_size
, (nir_lower_io_options
)0);
1313 nir_lower_explicit_io(nir
, nir_var_mem_global
, nir_address_format_64bit_global
);
1315 if (lower_to_scalar
)
1316 nir_lower_alu_to_scalar(nir
, NULL
, NULL
);
1318 nir_lower_pack(nir
);
1320 /* lower ALU operations */
1321 // TODO: implement logic64 in aco, it's more effective for sgprs
1322 nir_lower_int64(nir
, (nir_lower_int64_options
) (nir_lower_imul64
|
1323 nir_lower_imul_high64
|
1324 nir_lower_imul_2x32_64
|
1325 nir_lower_divmod64
|
1327 nir_lower_minmax64
|
1330 nir_opt_idiv_const(nir
, 32);
1331 nir_lower_idiv(nir
); // TODO: use the LLVM path once !1239 is merged
1333 /* optimize the lowered ALU operations */
1335 nir_opt_constant_folding(nir
);
1336 nir_opt_algebraic(nir
);
1338 /* Do late algebraic optimization to turn add(a, neg(b)) back into
1339 * subs, then the mandatory cleanup after algebraic. Note that it may
1340 * produce fnegs, and if so then we need to keep running to squash
1343 bool more_late_algebraic
= true;
1344 while (more_late_algebraic
) {
1345 more_late_algebraic
= false;
1346 NIR_PASS(more_late_algebraic
, nir
, nir_opt_algebraic_late
);
1347 NIR_PASS_V(nir
, nir_opt_constant_folding
);
1348 NIR_PASS_V(nir
, nir_copy_prop
);
1349 NIR_PASS_V(nir
, nir_opt_dce
);
1350 NIR_PASS_V(nir
, nir_opt_cse
);
1353 /* cleanup passes */
1354 nir_lower_load_const_to_scalar(nir
);
1355 nir_opt_shrink_load(nir
);
1356 nir_move_options move_opts
= (nir_move_options
)(
1357 nir_move_const_undef
| nir_move_load_ubo
| nir_move_load_input
| nir_move_comparisons
);
1358 nir_opt_sink(nir
, move_opts
);
1359 nir_opt_move(nir
, move_opts
);
1360 nir_convert_to_lcssa(nir
, true, false);
1361 nir_lower_phis_to_scalar(nir
);
1363 nir_function_impl
*func
= nir_shader_get_entrypoint(nir
);
1364 nir_index_ssa_defs(func
);
1366 if (options
->dump_preoptir
) {
1367 fprintf(stderr
, "NIR shader before instruction selection:\n");
1368 nir_print_shader(nir
, stderr
);
1372 unsigned scratch_size
= 0;
1373 for (unsigned i
= 0; i
< shader_count
; i
++)
1374 scratch_size
= std::max(scratch_size
, shaders
[i
]->scratch_size
);
1375 ctx
.scratch_enabled
= scratch_size
> 0;
1376 ctx
.program
->config
->scratch_bytes_per_wave
= align(scratch_size
* ctx
.options
->wave_size
, 1024);
1377 ctx
.program
->config
->float_mode
= V_00B028_FP_64_DENORMS
;
1378 ctx
.program
->info
->wave_size
= ctx
.options
->wave_size
;
1380 ctx
.block
= ctx
.program
->create_and_insert_block();
1381 ctx
.block
->loop_nest_depth
= 0;
1382 ctx
.block
->kind
= block_kind_top_level
;