2 * Copyright © 2018 Valve Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include <unordered_map>
29 #include "vulkan/radv_shader.h"
30 #include "vulkan/radv_descriptor_set.h"
32 #include "ac_exp_param.h"
33 #include "ac_shader_util.h"
35 #include "util/u_math.h"
37 #define MAX_INLINE_PUSH_CONSTS 8
67 struct vs_output_state
{
68 uint8_t mask
[VARYING_SLOT_VAR31
+ 1];
69 Temp outputs
[VARYING_SLOT_VAR31
+ 1][4];
73 const struct radv_nir_compiler_options
*options
;
76 uint32_t constant_data_offset
;
79 std::unique_ptr
<Temp
[]> allocated
;
80 std::unordered_map
<unsigned, std::array
<Temp
,4>> allocated_vec
;
81 Stage stage
; /* Stage */
82 bool has_gfx10_wave64_bpermute
= false;
85 uint16_t loop_nest_depth
= 0;
89 bool has_divergent_continue
= false;
90 bool has_divergent_branch
= false;
93 bool is_divergent
= false;
95 bool exec_potentially_empty
= false;
99 bool scratch_enabled
= false;
101 /* inputs common for merged stages */
102 Temp merged_wave_info
= Temp(0, s1
);
105 bool fs_vgpr_args
[fs_input::max_inputs
];
106 Temp fs_inputs
[fs_input::max_inputs
];
107 Temp prim_mask
= Temp(0, s1
);
108 Temp descriptor_sets
[MAX_SETS
];
109 Temp push_constants
= Temp(0, s1
);
110 Temp inline_push_consts
[MAX_INLINE_PUSH_CONSTS
];
111 unsigned num_inline_push_consts
= 0;
112 unsigned base_inline_push_consts
= 0;
115 Temp vertex_buffers
= Temp(0, s1
);
116 Temp base_vertex
= Temp(0, s1
);
117 Temp start_instance
= Temp(0, s1
);
118 Temp draw_id
= Temp(0, s1
);
119 Temp view_index
= Temp(0, s1
);
120 Temp es2gs_offset
= Temp(0, s1
);
121 Temp vertex_id
= Temp(0, v1
);
122 Temp rel_auto_id
= Temp(0, v1
);
123 Temp instance_id
= Temp(0, v1
);
124 Temp vs_prim_id
= Temp(0, v1
);
125 bool needs_instance_id
;
128 Temp num_workgroups
[3] = {Temp(0, s1
), Temp(0, s1
), Temp(0, s1
)};
129 Temp workgroup_ids
[3] = {Temp(0, s1
), Temp(0, s1
), Temp(0, s1
)};
130 Temp tg_size
= Temp(0, s1
);
131 Temp local_invocation_ids
[3] = {Temp(0, v1
), Temp(0, v1
), Temp(0, v1
)};
133 /* VS output information */
134 unsigned num_clip_distances
;
135 unsigned num_cull_distances
;
136 vs_output_state vs_output
;
139 Temp streamout_buffers
= Temp(0, s1
);
140 Temp streamout_write_idx
= Temp(0, s1
);
141 Temp streamout_config
= Temp(0, s1
);
142 Temp streamout_offset
[4] = {Temp(0, s1
), Temp(0, s1
), Temp(0, s1
), Temp(0, s1
)};
145 fs_input
get_interp_input(nir_intrinsic_op intrin
, enum glsl_interp_mode interp
)
148 case INTERP_MODE_SMOOTH
:
149 case INTERP_MODE_NONE
:
150 if (intrin
== nir_intrinsic_load_barycentric_pixel
||
151 intrin
== nir_intrinsic_load_barycentric_at_sample
||
152 intrin
== nir_intrinsic_load_barycentric_at_offset
)
153 return fs_input::persp_center_p1
;
154 else if (intrin
== nir_intrinsic_load_barycentric_centroid
)
155 return fs_input::persp_centroid_p1
;
156 else if (intrin
== nir_intrinsic_load_barycentric_sample
)
157 return fs_input::persp_sample_p1
;
159 case INTERP_MODE_NOPERSPECTIVE
:
160 if (intrin
== nir_intrinsic_load_barycentric_pixel
)
161 return fs_input::linear_center_p1
;
162 else if (intrin
== nir_intrinsic_load_barycentric_centroid
)
163 return fs_input::linear_centroid_p1
;
164 else if (intrin
== nir_intrinsic_load_barycentric_sample
)
165 return fs_input::linear_sample_p1
;
170 return fs_input::max_inputs
;
173 void init_context(isel_context
*ctx
, nir_shader
*shader
)
175 nir_function_impl
*impl
= nir_shader_get_entrypoint(shader
);
177 ctx
->shader
= shader
;
178 ctx
->divergent_vals
= nir_divergence_analysis(shader
, nir_divergence_view_index_uniform
);
180 std::unique_ptr
<Temp
[]> allocated
{new Temp
[impl
->ssa_alloc
]()};
181 memset(&ctx
->fs_vgpr_args
, false, sizeof(ctx
->fs_vgpr_args
));
186 nir_foreach_block(block
, impl
) {
187 nir_foreach_instr(instr
, block
) {
188 switch(instr
->type
) {
189 case nir_instr_type_alu
: {
190 nir_alu_instr
*alu_instr
= nir_instr_as_alu(instr
);
191 unsigned size
= alu_instr
->dest
.dest
.ssa
.num_components
;
192 if (alu_instr
->dest
.dest
.ssa
.bit_size
== 64)
194 RegType type
= RegType::sgpr
;
195 switch(alu_instr
->op
) {
217 case nir_op_fround_even
:
226 case nir_op_pack_half_2x16
:
227 case nir_op_unpack_half_2x16_split_x
:
228 case nir_op_unpack_half_2x16_split_y
:
231 case nir_op_fddx_fine
:
232 case nir_op_fddy_fine
:
233 case nir_op_fddx_coarse
:
234 case nir_op_fddy_coarse
:
235 case nir_op_fquantize2f16
:
237 case nir_op_frexp_sig
:
238 case nir_op_frexp_exp
:
239 case nir_op_cube_face_index
:
240 case nir_op_cube_face_coord
:
241 type
= RegType::vgpr
;
262 type
= ctx
->divergent_vals
[alu_instr
->dest
.dest
.ssa
.index
] ? RegType::vgpr
: RegType::sgpr
;
265 if (alu_instr
->dest
.dest
.ssa
.bit_size
== 1) {
268 if (ctx
->divergent_vals
[alu_instr
->dest
.dest
.ssa
.index
]) {
269 type
= RegType::vgpr
;
271 if (allocated
[alu_instr
->src
[1].src
.ssa
->index
].type() == RegType::vgpr
||
272 allocated
[alu_instr
->src
[2].src
.ssa
->index
].type() == RegType::vgpr
) {
273 type
= RegType::vgpr
;
276 if (alu_instr
->src
[1].src
.ssa
->num_components
== 1 && alu_instr
->src
[2].src
.ssa
->num_components
== 1) {
277 assert(allocated
[alu_instr
->src
[1].src
.ssa
->index
].size() == allocated
[alu_instr
->src
[2].src
.ssa
->index
].size());
278 size
= allocated
[alu_instr
->src
[1].src
.ssa
->index
].size();
283 if (alu_instr
->dest
.dest
.ssa
.bit_size
== 1) {
286 type
= ctx
->divergent_vals
[alu_instr
->dest
.dest
.ssa
.index
] ? RegType::vgpr
: RegType::sgpr
;
290 if (alu_instr
->dest
.dest
.ssa
.bit_size
== 1) {
293 for (unsigned i
= 0; i
< nir_op_infos
[alu_instr
->op
].num_inputs
; i
++) {
294 if (allocated
[alu_instr
->src
[i
].src
.ssa
->index
].type() == RegType::vgpr
)
295 type
= RegType::vgpr
;
300 allocated
[alu_instr
->dest
.dest
.ssa
.index
] = Temp(0, RegClass(type
, size
));
303 case nir_instr_type_load_const
: {
304 unsigned size
= nir_instr_as_load_const(instr
)->def
.num_components
;
305 if (nir_instr_as_load_const(instr
)->def
.bit_size
== 64)
307 else if (nir_instr_as_load_const(instr
)->def
.bit_size
== 1)
309 allocated
[nir_instr_as_load_const(instr
)->def
.index
] = Temp(0, RegClass(RegType::sgpr
, size
));
312 case nir_instr_type_intrinsic
: {
313 nir_intrinsic_instr
*intrinsic
= nir_instr_as_intrinsic(instr
);
314 if (!nir_intrinsic_infos
[intrinsic
->intrinsic
].has_dest
)
316 unsigned size
= intrinsic
->dest
.ssa
.num_components
;
317 if (intrinsic
->dest
.ssa
.bit_size
== 64)
319 RegType type
= RegType::sgpr
;
320 switch(intrinsic
->intrinsic
) {
321 case nir_intrinsic_load_push_constant
:
322 case nir_intrinsic_load_work_group_id
:
323 case nir_intrinsic_load_num_work_groups
:
324 case nir_intrinsic_load_subgroup_id
:
325 case nir_intrinsic_load_num_subgroups
:
326 case nir_intrinsic_load_first_vertex
:
327 case nir_intrinsic_load_base_instance
:
328 case nir_intrinsic_get_buffer_size
:
329 case nir_intrinsic_vote_all
:
330 case nir_intrinsic_vote_any
:
331 case nir_intrinsic_read_first_invocation
:
332 case nir_intrinsic_read_invocation
:
333 case nir_intrinsic_first_invocation
:
334 type
= RegType::sgpr
;
335 if (intrinsic
->dest
.ssa
.bit_size
== 1)
338 case nir_intrinsic_ballot
:
339 type
= RegType::sgpr
;
342 case nir_intrinsic_load_sample_id
:
343 case nir_intrinsic_load_sample_mask_in
:
344 case nir_intrinsic_load_input
:
345 case nir_intrinsic_load_vertex_id
:
346 case nir_intrinsic_load_vertex_id_zero_base
:
347 case nir_intrinsic_load_barycentric_sample
:
348 case nir_intrinsic_load_barycentric_pixel
:
349 case nir_intrinsic_load_barycentric_centroid
:
350 case nir_intrinsic_load_barycentric_at_sample
:
351 case nir_intrinsic_load_barycentric_at_offset
:
352 case nir_intrinsic_load_interpolated_input
:
353 case nir_intrinsic_load_frag_coord
:
354 case nir_intrinsic_load_sample_pos
:
355 case nir_intrinsic_load_layer_id
:
356 case nir_intrinsic_load_local_invocation_id
:
357 case nir_intrinsic_load_local_invocation_index
:
358 case nir_intrinsic_load_subgroup_invocation
:
359 case nir_intrinsic_write_invocation_amd
:
360 case nir_intrinsic_mbcnt_amd
:
361 case nir_intrinsic_load_instance_id
:
362 case nir_intrinsic_ssbo_atomic_add
:
363 case nir_intrinsic_ssbo_atomic_imin
:
364 case nir_intrinsic_ssbo_atomic_umin
:
365 case nir_intrinsic_ssbo_atomic_imax
:
366 case nir_intrinsic_ssbo_atomic_umax
:
367 case nir_intrinsic_ssbo_atomic_and
:
368 case nir_intrinsic_ssbo_atomic_or
:
369 case nir_intrinsic_ssbo_atomic_xor
:
370 case nir_intrinsic_ssbo_atomic_exchange
:
371 case nir_intrinsic_ssbo_atomic_comp_swap
:
372 case nir_intrinsic_image_deref_atomic_add
:
373 case nir_intrinsic_image_deref_atomic_umin
:
374 case nir_intrinsic_image_deref_atomic_imin
:
375 case nir_intrinsic_image_deref_atomic_umax
:
376 case nir_intrinsic_image_deref_atomic_imax
:
377 case nir_intrinsic_image_deref_atomic_and
:
378 case nir_intrinsic_image_deref_atomic_or
:
379 case nir_intrinsic_image_deref_atomic_xor
:
380 case nir_intrinsic_image_deref_atomic_exchange
:
381 case nir_intrinsic_image_deref_atomic_comp_swap
:
382 case nir_intrinsic_image_deref_size
:
383 case nir_intrinsic_shared_atomic_add
:
384 case nir_intrinsic_shared_atomic_imin
:
385 case nir_intrinsic_shared_atomic_umin
:
386 case nir_intrinsic_shared_atomic_imax
:
387 case nir_intrinsic_shared_atomic_umax
:
388 case nir_intrinsic_shared_atomic_and
:
389 case nir_intrinsic_shared_atomic_or
:
390 case nir_intrinsic_shared_atomic_xor
:
391 case nir_intrinsic_shared_atomic_exchange
:
392 case nir_intrinsic_shared_atomic_comp_swap
:
393 case nir_intrinsic_load_scratch
:
394 type
= RegType::vgpr
;
396 case nir_intrinsic_shuffle
:
397 case nir_intrinsic_quad_broadcast
:
398 case nir_intrinsic_quad_swap_horizontal
:
399 case nir_intrinsic_quad_swap_vertical
:
400 case nir_intrinsic_quad_swap_diagonal
:
401 case nir_intrinsic_quad_swizzle_amd
:
402 case nir_intrinsic_masked_swizzle_amd
:
403 case nir_intrinsic_inclusive_scan
:
404 case nir_intrinsic_exclusive_scan
:
405 if (intrinsic
->dest
.ssa
.bit_size
== 1) {
407 type
= RegType::sgpr
;
408 } else if (!ctx
->divergent_vals
[intrinsic
->dest
.ssa
.index
]) {
409 type
= RegType::sgpr
;
411 type
= RegType::vgpr
;
414 case nir_intrinsic_load_view_index
:
415 type
= ctx
->stage
== fragment_fs
? RegType::vgpr
: RegType::sgpr
;
417 case nir_intrinsic_load_front_face
:
418 case nir_intrinsic_load_helper_invocation
:
419 case nir_intrinsic_is_helper_invocation
:
420 type
= RegType::sgpr
;
423 case nir_intrinsic_reduce
:
424 if (intrinsic
->dest
.ssa
.bit_size
== 1) {
426 type
= RegType::sgpr
;
427 } else if (nir_intrinsic_cluster_size(intrinsic
) == 0 ||
428 !ctx
->divergent_vals
[intrinsic
->dest
.ssa
.index
]) {
429 type
= RegType::sgpr
;
431 type
= RegType::vgpr
;
434 case nir_intrinsic_load_ubo
:
435 case nir_intrinsic_load_ssbo
:
436 case nir_intrinsic_load_global
:
437 case nir_intrinsic_vulkan_resource_index
:
438 type
= ctx
->divergent_vals
[intrinsic
->dest
.ssa
.index
] ? RegType::vgpr
: RegType::sgpr
;
440 /* due to copy propagation, the swizzled imov is removed if num dest components == 1 */
441 case nir_intrinsic_load_shared
:
442 if (ctx
->divergent_vals
[intrinsic
->dest
.ssa
.index
])
443 type
= RegType::vgpr
;
445 type
= RegType::sgpr
;
448 for (unsigned i
= 0; i
< nir_intrinsic_infos
[intrinsic
->intrinsic
].num_srcs
; i
++) {
449 if (allocated
[intrinsic
->src
[i
].ssa
->index
].type() == RegType::vgpr
)
450 type
= RegType::vgpr
;
454 allocated
[intrinsic
->dest
.ssa
.index
] = Temp(0, RegClass(type
, size
));
456 switch(intrinsic
->intrinsic
) {
457 case nir_intrinsic_load_barycentric_sample
:
458 case nir_intrinsic_load_barycentric_pixel
:
459 case nir_intrinsic_load_barycentric_centroid
:
460 case nir_intrinsic_load_barycentric_at_sample
:
461 case nir_intrinsic_load_barycentric_at_offset
: {
462 glsl_interp_mode mode
= (glsl_interp_mode
)nir_intrinsic_interp_mode(intrinsic
);
463 ctx
->fs_vgpr_args
[get_interp_input(intrinsic
->intrinsic
, mode
)] = true;
466 case nir_intrinsic_load_front_face
:
467 ctx
->fs_vgpr_args
[fs_input::front_face
] = true;
469 case nir_intrinsic_load_frag_coord
:
470 case nir_intrinsic_load_sample_pos
: {
471 uint8_t mask
= nir_ssa_def_components_read(&intrinsic
->dest
.ssa
);
472 for (unsigned i
= 0; i
< 4; i
++) {
474 ctx
->fs_vgpr_args
[fs_input::frag_pos_0
+ i
] = true;
479 case nir_intrinsic_load_sample_id
:
480 ctx
->fs_vgpr_args
[fs_input::ancillary
] = true;
482 case nir_intrinsic_load_sample_mask_in
:
483 ctx
->fs_vgpr_args
[fs_input::ancillary
] = true;
484 ctx
->fs_vgpr_args
[fs_input::sample_coverage
] = true;
491 case nir_instr_type_tex
: {
492 nir_tex_instr
* tex
= nir_instr_as_tex(instr
);
493 unsigned size
= tex
->dest
.ssa
.num_components
;
495 if (tex
->dest
.ssa
.bit_size
== 64)
497 if (tex
->op
== nir_texop_texture_samples
)
498 assert(!ctx
->divergent_vals
[tex
->dest
.ssa
.index
]);
499 if (ctx
->divergent_vals
[tex
->dest
.ssa
.index
])
500 allocated
[tex
->dest
.ssa
.index
] = Temp(0, RegClass(RegType::vgpr
, size
));
502 allocated
[tex
->dest
.ssa
.index
] = Temp(0, RegClass(RegType::sgpr
, size
));
505 case nir_instr_type_parallel_copy
: {
506 nir_foreach_parallel_copy_entry(entry
, nir_instr_as_parallel_copy(instr
)) {
507 allocated
[entry
->dest
.ssa
.index
] = allocated
[entry
->src
.ssa
->index
];
511 case nir_instr_type_ssa_undef
: {
512 unsigned size
= nir_instr_as_ssa_undef(instr
)->def
.num_components
;
513 if (nir_instr_as_ssa_undef(instr
)->def
.bit_size
== 64)
515 allocated
[nir_instr_as_ssa_undef(instr
)->def
.index
] = Temp(0, RegClass(RegType::sgpr
, size
));
518 case nir_instr_type_phi
: {
519 nir_phi_instr
* phi
= nir_instr_as_phi(instr
);
521 unsigned size
= phi
->dest
.ssa
.num_components
;
523 if (phi
->dest
.ssa
.bit_size
== 1) {
524 assert(size
== 1 && "multiple components not yet supported on boolean phis.");
525 type
= RegType::sgpr
;
527 allocated
[phi
->dest
.ssa
.index
] = Temp(0, RegClass(type
, size
));
531 if (ctx
->divergent_vals
[phi
->dest
.ssa
.index
]) {
532 type
= RegType::vgpr
;
534 type
= RegType::sgpr
;
535 nir_foreach_phi_src (src
, phi
) {
536 if (allocated
[src
->src
.ssa
->index
].type() == RegType::vgpr
)
537 type
= RegType::vgpr
;
538 if (allocated
[src
->src
.ssa
->index
].type() == RegType::none
)
543 size
*= phi
->dest
.ssa
.bit_size
== 64 ? 2 : 1;
544 RegClass rc
= RegClass(type
, size
);
545 if (rc
!= allocated
[phi
->dest
.ssa
.index
].regClass()) {
548 nir_foreach_phi_src(src
, phi
)
549 assert(allocated
[src
->src
.ssa
->index
].size() == rc
.size());
551 allocated
[phi
->dest
.ssa
.index
] = Temp(0, rc
);
561 for (unsigned i
= 0; i
< impl
->ssa_alloc
; i
++)
562 allocated
[i
] = Temp(ctx
->program
->allocateId(), allocated
[i
].regClass());
564 ctx
->allocated
.reset(allocated
.release());
567 struct user_sgpr_info
{
569 uint8_t remaining_sgprs
;
570 uint8_t user_sgpr_idx
;
571 bool need_ring_offsets
;
572 bool indirect_all_descriptor_sets
;
575 static void allocate_inline_push_consts(isel_context
*ctx
,
576 user_sgpr_info
& user_sgpr_info
)
578 uint8_t remaining_sgprs
= user_sgpr_info
.remaining_sgprs
;
580 /* Only supported if shaders use push constants. */
581 if (ctx
->program
->info
->min_push_constant_used
== UINT8_MAX
)
584 /* Only supported if shaders don't have indirect push constants. */
585 if (ctx
->program
->info
->has_indirect_push_constants
)
588 /* Only supported for 32-bit push constants. */
589 //TODO: it's possible that some day, the load/store vectorization could make this inaccurate
590 if (!ctx
->program
->info
->has_only_32bit_push_constants
)
593 uint8_t num_push_consts
=
594 (ctx
->program
->info
->max_push_constant_used
-
595 ctx
->program
->info
->min_push_constant_used
) / 4;
597 /* Check if the number of user SGPRs is large enough. */
598 if (num_push_consts
< remaining_sgprs
) {
599 ctx
->program
->info
->num_inline_push_consts
= num_push_consts
;
601 ctx
->program
->info
->num_inline_push_consts
= remaining_sgprs
;
604 /* Clamp to the maximum number of allowed inlined push constants. */
605 if (ctx
->program
->info
->num_inline_push_consts
> MAX_INLINE_PUSH_CONSTS
)
606 ctx
->program
->info
->num_inline_push_consts
= MAX_INLINE_PUSH_CONSTS
;
608 if (ctx
->program
->info
->num_inline_push_consts
== num_push_consts
&&
609 !ctx
->program
->info
->loads_dynamic_offsets
) {
610 /* Disable the default push constants path if all constants are
611 * inlined and if shaders don't use dynamic descriptors.
613 ctx
->program
->info
->loads_push_constants
= false;
614 user_sgpr_info
.num_sgpr
--;
615 user_sgpr_info
.remaining_sgprs
++;
618 ctx
->program
->info
->base_inline_push_consts
=
619 ctx
->program
->info
->min_push_constant_used
/ 4;
621 user_sgpr_info
.num_sgpr
+= ctx
->program
->info
->num_inline_push_consts
;
622 user_sgpr_info
.remaining_sgprs
-= ctx
->program
->info
->num_inline_push_consts
;
625 static void allocate_user_sgprs(isel_context
*ctx
,
626 bool needs_view_index
, user_sgpr_info
& user_sgpr_info
)
628 memset(&user_sgpr_info
, 0, sizeof(struct user_sgpr_info
));
629 uint32_t user_sgpr_count
= 0;
631 /* until we sort out scratch/global buffers always assign ring offsets for gs/vs/es */
632 if (ctx
->stage
!= fragment_fs
&&
633 ctx
->stage
!= compute_cs
634 /*|| ctx->is_gs_copy_shader */)
635 user_sgpr_info
.need_ring_offsets
= true;
637 if (ctx
->stage
== fragment_fs
&&
638 ctx
->program
->info
->ps
.needs_sample_positions
)
639 user_sgpr_info
.need_ring_offsets
= true;
641 /* 2 user sgprs will nearly always be allocated for scratch/rings */
642 if (ctx
->options
->supports_spill
|| user_sgpr_info
.need_ring_offsets
|| ctx
->scratch_enabled
)
643 user_sgpr_count
+= 2;
645 switch (ctx
->stage
) {
647 /* if (!ctx->is_gs_copy_shader) */ {
648 if (ctx
->program
->info
->vs
.has_vertex_buffers
)
650 user_sgpr_count
+= ctx
->program
->info
->vs
.needs_draw_id
? 3 : 2;
654 //user_sgpr_count += ctx->program->info->ps.needs_sample_positions;
657 if (ctx
->program
->info
->cs
.uses_grid_size
)
658 user_sgpr_count
+= 3;
661 unreachable("Shader stage not implemented");
664 if (needs_view_index
)
667 if (ctx
->program
->info
->loads_push_constants
)
668 user_sgpr_count
+= 1; /* we use 32bit pointers */
670 if (ctx
->program
->info
->so
.num_outputs
)
671 user_sgpr_count
+= 1; /* we use 32bit pointers */
673 uint32_t available_sgprs
= ctx
->options
->chip_class
>= GFX9
&& !(ctx
->stage
& hw_cs
) ? 32 : 16;
674 uint32_t remaining_sgprs
= available_sgprs
- user_sgpr_count
;
675 uint32_t num_desc_set
= util_bitcount(ctx
->program
->info
->desc_set_used_mask
);
677 if (available_sgprs
< user_sgpr_count
+ num_desc_set
) {
678 user_sgpr_info
.indirect_all_descriptor_sets
= true;
679 user_sgpr_info
.num_sgpr
= user_sgpr_count
+ 1;
680 user_sgpr_info
.remaining_sgprs
= remaining_sgprs
- 1;
682 user_sgpr_info
.num_sgpr
= user_sgpr_count
+ num_desc_set
;
683 user_sgpr_info
.remaining_sgprs
= remaining_sgprs
- num_desc_set
;
686 allocate_inline_push_consts(ctx
, user_sgpr_info
);
691 RegClass types
[MAX_ARGS
];
692 Temp
*assign
[MAX_ARGS
];
693 PhysReg reg
[MAX_ARGS
];
694 unsigned array_params_mask
;
697 uint8_t num_sgprs_used
;
698 uint8_t num_vgprs_used
;
702 add_arg(arg_info
*info
, RegClass rc
, Temp
*param_ptr
, unsigned reg
)
704 assert(info
->count
< MAX_ARGS
);
706 info
->assign
[info
->count
] = param_ptr
;
707 info
->types
[info
->count
] = rc
;
709 if (rc
.type() == RegType::sgpr
) {
710 info
->num_sgprs_used
+= rc
.size();
712 info
->reg
[info
->count
] = PhysReg
{reg
};
714 assert(rc
.type() == RegType::vgpr
);
715 info
->num_vgprs_used
+= rc
.size();
716 info
->reg
[info
->count
] = PhysReg
{reg
+ 256};
722 set_loc(struct radv_userdata_info
*ud_info
, uint8_t *sgpr_idx
, uint8_t num_sgprs
)
724 ud_info
->sgpr_idx
= *sgpr_idx
;
725 ud_info
->num_sgprs
= num_sgprs
;
726 *sgpr_idx
+= num_sgprs
;
730 set_loc_shader(isel_context
*ctx
, int idx
, uint8_t *sgpr_idx
,
733 struct radv_userdata_info
*ud_info
= &ctx
->program
->info
->user_sgprs_locs
.shader_data
[idx
];
736 set_loc(ud_info
, sgpr_idx
, num_sgprs
);
740 set_loc_shader_ptr(isel_context
*ctx
, int idx
, uint8_t *sgpr_idx
)
742 bool use_32bit_pointers
= idx
!= AC_UD_SCRATCH_RING_OFFSETS
;
744 set_loc_shader(ctx
, idx
, sgpr_idx
, use_32bit_pointers
? 1 : 2);
748 set_loc_desc(isel_context
*ctx
, int idx
, uint8_t *sgpr_idx
)
750 struct radv_userdata_locations
*locs
= &ctx
->program
->info
->user_sgprs_locs
;
751 struct radv_userdata_info
*ud_info
= &locs
->descriptor_sets
[idx
];
754 set_loc(ud_info
, sgpr_idx
, 1);
755 locs
->descriptor_sets_enabled
|= 1 << idx
;
759 declare_global_input_sgprs(isel_context
*ctx
,
760 /* bool has_previous_stage, gl_shader_stage previous_stage, */
761 user_sgpr_info
*user_sgpr_info
,
762 struct arg_info
*args
,
765 /* 1 for each descriptor set */
766 if (!user_sgpr_info
->indirect_all_descriptor_sets
) {
767 uint32_t mask
= ctx
->program
->info
->desc_set_used_mask
;
769 int i
= u_bit_scan(&mask
);
770 add_arg(args
, s1
, &desc_sets
[i
], user_sgpr_info
->user_sgpr_idx
);
771 set_loc_desc(ctx
, i
, &user_sgpr_info
->user_sgpr_idx
);
773 /* NIR->LLVM might have set this to true if RADV_DEBUG=compiletime */
774 ctx
->program
->info
->need_indirect_descriptor_sets
= false;
776 add_arg(args
, s1
, desc_sets
, user_sgpr_info
->user_sgpr_idx
);
777 set_loc_shader_ptr(ctx
, AC_UD_INDIRECT_DESCRIPTOR_SETS
, &user_sgpr_info
->user_sgpr_idx
);
778 ctx
->program
->info
->need_indirect_descriptor_sets
= true;
781 if (ctx
->program
->info
->loads_push_constants
) {
782 /* 1 for push constants and dynamic descriptors */
783 add_arg(args
, s1
, &ctx
->push_constants
, user_sgpr_info
->user_sgpr_idx
);
784 set_loc_shader_ptr(ctx
, AC_UD_PUSH_CONSTANTS
, &user_sgpr_info
->user_sgpr_idx
);
787 if (ctx
->program
->info
->num_inline_push_consts
) {
788 unsigned count
= ctx
->program
->info
->num_inline_push_consts
;
789 for (unsigned i
= 0; i
< count
; i
++)
790 add_arg(args
, s1
, &ctx
->inline_push_consts
[i
], user_sgpr_info
->user_sgpr_idx
+ i
);
791 set_loc_shader(ctx
, AC_UD_INLINE_PUSH_CONSTANTS
, &user_sgpr_info
->user_sgpr_idx
, count
);
793 ctx
->num_inline_push_consts
= ctx
->program
->info
->num_inline_push_consts
;
794 ctx
->base_inline_push_consts
= ctx
->program
->info
->base_inline_push_consts
;
797 if (ctx
->program
->info
->so
.num_outputs
) {
798 add_arg(args
, s1
, &ctx
->streamout_buffers
, user_sgpr_info
->user_sgpr_idx
);
799 set_loc_shader_ptr(ctx
, AC_UD_STREAMOUT_BUFFERS
, &user_sgpr_info
->user_sgpr_idx
);
804 declare_vs_input_vgprs(isel_context
*ctx
, struct arg_info
*args
)
806 unsigned vgpr_idx
= 0;
807 add_arg(args
, v1
, &ctx
->vertex_id
, vgpr_idx
++);
808 if (ctx
->options
->chip_class
>= GFX10
) {
809 add_arg(args
, v1
, NULL
, vgpr_idx
++); /* unused */
810 add_arg(args
, v1
, &ctx
->vs_prim_id
, vgpr_idx
++);
811 add_arg(args
, v1
, &ctx
->instance_id
, vgpr_idx
++);
813 if (ctx
->options
->key
.vs
.out
.as_ls
) {
814 add_arg(args
, v1
, &ctx
->rel_auto_id
, vgpr_idx
++);
815 add_arg(args
, v1
, &ctx
->instance_id
, vgpr_idx
++);
817 add_arg(args
, v1
, &ctx
->instance_id
, vgpr_idx
++);
818 add_arg(args
, v1
, &ctx
->vs_prim_id
, vgpr_idx
++);
820 add_arg(args
, v1
, NULL
, vgpr_idx
); /* unused */
825 declare_streamout_sgprs(isel_context
*ctx
, struct arg_info
*args
, unsigned *idx
)
827 /* Streamout SGPRs. */
828 if (ctx
->program
->info
->so
.num_outputs
) {
829 assert(ctx
->stage
& hw_vs
);
831 if (ctx
->stage
!= tess_eval_vs
) {
832 add_arg(args
, s1
, &ctx
->streamout_config
, (*idx
)++);
834 args
->assign
[args
->count
- 1] = &ctx
->streamout_config
;
835 args
->types
[args
->count
- 1] = s1
;
838 add_arg(args
, s1
, &ctx
->streamout_write_idx
, (*idx
)++);
841 /* A streamout buffer offset is loaded if the stride is non-zero. */
842 for (unsigned i
= 0; i
< 4; i
++) {
843 if (!ctx
->program
->info
->so
.strides
[i
])
846 add_arg(args
, s1
, &ctx
->streamout_offset
[i
], (*idx
)++);
850 static bool needs_view_index_sgpr(isel_context
*ctx
)
852 switch (ctx
->stage
) {
854 return ctx
->program
->info
->needs_multiview_view_index
|| ctx
->options
->key
.has_multiview_view_index
;
856 return ctx
->program
->info
->needs_multiview_view_index
&& ctx
->options
->key
.has_multiview_view_index
;
859 case vertex_tess_control_hs
:
860 case vertex_geometry_gs
:
861 case tess_control_hs
:
863 case tess_eval_geometry_gs
:
865 return ctx
->program
->info
->needs_multiview_view_index
;
872 add_fs_arg(isel_context
*ctx
, arg_info
*args
, unsigned &vgpr_idx
, fs_input input
, unsigned value
, bool enable_next
= false, RegClass rc
= v1
)
874 if (!ctx
->fs_vgpr_args
[input
])
877 add_arg(args
, rc
, &ctx
->fs_inputs
[input
], vgpr_idx
);
878 vgpr_idx
+= rc
.size();
881 add_arg(args
, rc
, &ctx
->fs_inputs
[input
+ 1], vgpr_idx
);
882 vgpr_idx
+= rc
.size();
885 ctx
->program
->config
->spi_ps_input_addr
|= value
;
886 ctx
->program
->config
->spi_ps_input_ena
|= value
;
890 Pseudo_instruction
*add_startpgm(struct isel_context
*ctx
)
892 user_sgpr_info user_sgpr_info
;
893 bool needs_view_index
= needs_view_index_sgpr(ctx
);
894 allocate_user_sgprs(ctx
, needs_view_index
, user_sgpr_info
);
897 /* this needs to be in sgprs 0 and 1 */
898 if (ctx
->options
->supports_spill
|| user_sgpr_info
.need_ring_offsets
|| ctx
->scratch_enabled
) {
899 add_arg(&args
, s2
, &ctx
->program
->private_segment_buffer
, 0);
900 set_loc_shader_ptr(ctx
, AC_UD_SCRATCH_RING_OFFSETS
, &user_sgpr_info
.user_sgpr_idx
);
903 unsigned vgpr_idx
= 0;
904 switch (ctx
->stage
) {
906 declare_global_input_sgprs(ctx
, &user_sgpr_info
, &args
, ctx
->descriptor_sets
);
907 if (ctx
->program
->info
->vs
.has_vertex_buffers
) {
908 add_arg(&args
, s1
, &ctx
->vertex_buffers
, user_sgpr_info
.user_sgpr_idx
);
909 set_loc_shader_ptr(ctx
, AC_UD_VS_VERTEX_BUFFERS
, &user_sgpr_info
.user_sgpr_idx
);
911 add_arg(&args
, s1
, &ctx
->base_vertex
, user_sgpr_info
.user_sgpr_idx
);
912 add_arg(&args
, s1
, &ctx
->start_instance
, user_sgpr_info
.user_sgpr_idx
+ 1);
913 if (ctx
->program
->info
->vs
.needs_draw_id
) {
914 add_arg(&args
, s1
, &ctx
->draw_id
, user_sgpr_info
.user_sgpr_idx
+ 2);
915 set_loc_shader(ctx
, AC_UD_VS_BASE_VERTEX_START_INSTANCE
, &user_sgpr_info
.user_sgpr_idx
, 3);
917 set_loc_shader(ctx
, AC_UD_VS_BASE_VERTEX_START_INSTANCE
, &user_sgpr_info
.user_sgpr_idx
, 2);
919 if (needs_view_index
) {
920 add_arg(&args
, s1
, &ctx
->view_index
, user_sgpr_info
.user_sgpr_idx
);
921 set_loc_shader(ctx
, AC_UD_VIEW_INDEX
, &user_sgpr_info
.user_sgpr_idx
, 1);
924 assert(user_sgpr_info
.user_sgpr_idx
== user_sgpr_info
.num_sgpr
);
925 unsigned idx
= user_sgpr_info
.user_sgpr_idx
;
926 if (ctx
->options
->key
.vs
.out
.as_es
)
927 add_arg(&args
, s1
, &ctx
->es2gs_offset
, idx
++);
929 declare_streamout_sgprs(ctx
, &args
, &idx
);
931 if (ctx
->options
->supports_spill
|| ctx
->scratch_enabled
)
932 add_arg(&args
, s1
, &ctx
->program
->scratch_offset
, idx
++);
934 declare_vs_input_vgprs(ctx
, &args
);
938 declare_global_input_sgprs(ctx
, &user_sgpr_info
, &args
, ctx
->descriptor_sets
);
940 assert(user_sgpr_info
.user_sgpr_idx
== user_sgpr_info
.num_sgpr
);
941 add_arg(&args
, s1
, &ctx
->prim_mask
, user_sgpr_info
.user_sgpr_idx
);
943 if (ctx
->options
->supports_spill
|| ctx
->scratch_enabled
)
944 add_arg(&args
, s1
, &ctx
->program
->scratch_offset
, user_sgpr_info
.user_sgpr_idx
+ 1);
946 ctx
->program
->config
->spi_ps_input_addr
= 0;
947 ctx
->program
->config
->spi_ps_input_ena
= 0;
949 bool has_interp_mode
= false;
951 has_interp_mode
|= add_fs_arg(ctx
, &args
, vgpr_idx
, fs_input::persp_sample_p1
, S_0286CC_PERSP_SAMPLE_ENA(1), true);
952 has_interp_mode
|= add_fs_arg(ctx
, &args
, vgpr_idx
, fs_input::persp_center_p1
, S_0286CC_PERSP_CENTER_ENA(1), true);
953 has_interp_mode
|= add_fs_arg(ctx
, &args
, vgpr_idx
, fs_input::persp_centroid_p1
, S_0286CC_PERSP_CENTROID_ENA(1), true);
954 has_interp_mode
|= add_fs_arg(ctx
, &args
, vgpr_idx
, fs_input::persp_pull_model
, S_0286CC_PERSP_PULL_MODEL_ENA(1), false, v3
);
956 if (!has_interp_mode
&& ctx
->fs_vgpr_args
[fs_input::frag_pos_3
]) {
957 /* If POS_W_FLOAT (11) is enabled, at least one of PERSP_* must be enabled too */
958 ctx
->fs_vgpr_args
[fs_input::persp_center_p1
] = true;
959 has_interp_mode
= add_fs_arg(ctx
, &args
, vgpr_idx
, fs_input::persp_center_p1
, S_0286CC_PERSP_CENTER_ENA(1), true);
962 has_interp_mode
|= add_fs_arg(ctx
, &args
, vgpr_idx
, fs_input::linear_sample_p1
, S_0286CC_LINEAR_SAMPLE_ENA(1), true);
963 has_interp_mode
|= add_fs_arg(ctx
, &args
, vgpr_idx
, fs_input::linear_center_p1
, S_0286CC_LINEAR_CENTER_ENA(1), true);
964 has_interp_mode
|= add_fs_arg(ctx
, &args
, vgpr_idx
, fs_input::linear_centroid_p1
, S_0286CC_LINEAR_CENTROID_ENA(1), true);
965 has_interp_mode
|= add_fs_arg(ctx
, &args
, vgpr_idx
, fs_input::line_stipple
, S_0286CC_LINE_STIPPLE_TEX_ENA(1));
967 if (!has_interp_mode
) {
968 /* At least one of PERSP_* (0xF) or LINEAR_* (0x70) must be enabled */
969 ctx
->fs_vgpr_args
[fs_input::persp_center_p1
] = true;
970 has_interp_mode
= add_fs_arg(ctx
, &args
, vgpr_idx
, fs_input::persp_center_p1
, S_0286CC_PERSP_CENTER_ENA(1), true);
973 add_fs_arg(ctx
, &args
, vgpr_idx
, fs_input::frag_pos_0
, S_0286CC_POS_X_FLOAT_ENA(1));
974 add_fs_arg(ctx
, &args
, vgpr_idx
, fs_input::frag_pos_1
, S_0286CC_POS_Y_FLOAT_ENA(1));
975 add_fs_arg(ctx
, &args
, vgpr_idx
, fs_input::frag_pos_2
, S_0286CC_POS_Z_FLOAT_ENA(1));
976 add_fs_arg(ctx
, &args
, vgpr_idx
, fs_input::frag_pos_3
, S_0286CC_POS_W_FLOAT_ENA(1));
978 add_fs_arg(ctx
, &args
, vgpr_idx
, fs_input::front_face
, S_0286CC_FRONT_FACE_ENA(1));
979 add_fs_arg(ctx
, &args
, vgpr_idx
, fs_input::ancillary
, S_0286CC_ANCILLARY_ENA(1));
980 add_fs_arg(ctx
, &args
, vgpr_idx
, fs_input::sample_coverage
, S_0286CC_SAMPLE_COVERAGE_ENA(1));
981 add_fs_arg(ctx
, &args
, vgpr_idx
, fs_input::fixed_pt
, S_0286CC_POS_FIXED_PT_ENA(1));
983 ASSERTED
bool unset_interp_mode
= !(ctx
->program
->config
->spi_ps_input_addr
& 0x7F) ||
984 (G_0286CC_POS_W_FLOAT_ENA(ctx
->program
->config
->spi_ps_input_addr
)
985 && !(ctx
->program
->config
->spi_ps_input_addr
& 0xF));
987 assert(has_interp_mode
);
988 assert(!unset_interp_mode
);
992 declare_global_input_sgprs(ctx
, &user_sgpr_info
, &args
, ctx
->descriptor_sets
);
994 if (ctx
->program
->info
->cs
.uses_grid_size
) {
995 add_arg(&args
, s1
, &ctx
->num_workgroups
[0], user_sgpr_info
.user_sgpr_idx
);
996 add_arg(&args
, s1
, &ctx
->num_workgroups
[1], user_sgpr_info
.user_sgpr_idx
+ 1);
997 add_arg(&args
, s1
, &ctx
->num_workgroups
[2], user_sgpr_info
.user_sgpr_idx
+ 2);
998 set_loc_shader(ctx
, AC_UD_CS_GRID_SIZE
, &user_sgpr_info
.user_sgpr_idx
, 3);
1000 assert(user_sgpr_info
.user_sgpr_idx
== user_sgpr_info
.num_sgpr
);
1001 unsigned idx
= user_sgpr_info
.user_sgpr_idx
;
1002 for (unsigned i
= 0; i
< 3; i
++) {
1003 if (ctx
->program
->info
->cs
.uses_block_id
[i
])
1004 add_arg(&args
, s1
, &ctx
->workgroup_ids
[i
], idx
++);
1007 if (ctx
->program
->info
->cs
.uses_local_invocation_idx
)
1008 add_arg(&args
, s1
, &ctx
->tg_size
, idx
++);
1009 if (ctx
->options
->supports_spill
|| ctx
->scratch_enabled
)
1010 add_arg(&args
, s1
, &ctx
->program
->scratch_offset
, idx
++);
1012 add_arg(&args
, v1
, &ctx
->local_invocation_ids
[0], vgpr_idx
++);
1013 add_arg(&args
, v1
, &ctx
->local_invocation_ids
[1], vgpr_idx
++);
1014 add_arg(&args
, v1
, &ctx
->local_invocation_ids
[2], vgpr_idx
++);
1018 unreachable("Shader stage not implemented");
1021 ctx
->program
->info
->num_input_vgprs
= 0;
1022 ctx
->program
->info
->num_input_sgprs
= args
.num_sgprs_used
;
1023 ctx
->program
->info
->num_user_sgprs
= user_sgpr_info
.num_sgpr
;
1024 ctx
->program
->info
->num_input_vgprs
= args
.num_vgprs_used
;
1026 if (ctx
->stage
== fragment_fs
) {
1027 /* Verify that we have a correct assumption about input VGPR count */
1028 ASSERTED
unsigned input_vgpr_cnt
= ac_get_fs_input_vgpr_cnt(ctx
->program
->config
, nullptr, nullptr);
1029 assert(input_vgpr_cnt
== ctx
->program
->info
->num_input_vgprs
);
1032 aco_ptr
<Pseudo_instruction
> startpgm
{create_instruction
<Pseudo_instruction
>(aco_opcode::p_startpgm
, Format::PSEUDO
, 0, args
.count
+ 1)};
1033 for (unsigned i
= 0; i
< args
.count
; i
++) {
1034 if (args
.assign
[i
]) {
1035 *args
.assign
[i
] = Temp
{ctx
->program
->allocateId(), args
.types
[i
]};
1036 startpgm
->definitions
[i
] = Definition(*args
.assign
[i
]);
1037 startpgm
->definitions
[i
].setFixed(args
.reg
[i
]);
1040 startpgm
->definitions
[args
.count
] = Definition
{ctx
->program
->allocateId(), exec
, s2
};
1041 Pseudo_instruction
*instr
= startpgm
.get();
1042 ctx
->block
->instructions
.push_back(std::move(startpgm
));
1048 type_size(const struct glsl_type
*type
, bool bindless
)
1050 // TODO: don't we need type->std430_base_alignment() here?
1051 return glsl_count_attribute_slots(type
, false);
1055 shared_var_info(const struct glsl_type
*type
, unsigned *size
, unsigned *align
)
1057 assert(glsl_type_is_vector_or_scalar(type
));
1059 uint32_t comp_size
= glsl_type_is_boolean(type
)
1060 ? 4 : glsl_get_bit_size(type
) / 8;
1061 unsigned length
= glsl_get_vector_elements(type
);
1062 *size
= comp_size
* length
,
1067 get_align(nir_variable_mode mode
, bool is_store
, unsigned bit_size
, unsigned num_components
)
1069 /* TODO: ACO doesn't have good support for non-32-bit reads/writes yet */
1074 case nir_var_mem_ubo
:
1075 case nir_var_mem_ssbo
:
1076 //case nir_var_mem_push_const: enable with 1240!
1077 case nir_var_mem_shared
:
1078 /* TODO: what are the alignment requirements for LDS? */
1079 return num_components
<= 4 ? 4 : -1;
1086 setup_vs_variables(isel_context
*ctx
, nir_shader
*nir
)
1088 nir_foreach_variable(variable
, &nir
->inputs
)
1090 variable
->data
.driver_location
= variable
->data
.location
* 4;
1092 nir_foreach_variable(variable
, &nir
->outputs
)
1094 variable
->data
.driver_location
= variable
->data
.location
* 4;
1097 radv_vs_output_info
*outinfo
= &ctx
->program
->info
->vs
.outinfo
;
1099 memset(outinfo
->vs_output_param_offset
, AC_EXP_PARAM_UNDEFINED
,
1100 sizeof(outinfo
->vs_output_param_offset
));
1102 ctx
->needs_instance_id
= ctx
->program
->info
->vs
.needs_instance_id
;
1104 bool export_clip_dists
= ctx
->options
->key
.vs_common_out
.export_clip_dists
;
1106 outinfo
->param_exports
= 0;
1107 int pos_written
= 0x1;
1108 if (outinfo
->writes_pointsize
|| outinfo
->writes_viewport_index
|| outinfo
->writes_layer
)
1109 pos_written
|= 1 << 1;
1111 nir_foreach_variable(variable
, &nir
->outputs
)
1113 int idx
= variable
->data
.location
;
1114 unsigned slots
= variable
->type
->count_attribute_slots(false);
1115 if (variable
->data
.compact
) {
1116 unsigned component_count
= variable
->data
.location_frac
+ variable
->type
->length
;
1117 slots
= (component_count
+ 3) / 4;
1120 if (idx
>= VARYING_SLOT_VAR0
|| idx
== VARYING_SLOT_LAYER
|| idx
== VARYING_SLOT_PRIMITIVE_ID
||
1121 ((idx
== VARYING_SLOT_CLIP_DIST0
|| idx
== VARYING_SLOT_CLIP_DIST1
) && export_clip_dists
)) {
1122 for (unsigned i
= 0; i
< slots
; i
++) {
1123 if (outinfo
->vs_output_param_offset
[idx
+ i
] == AC_EXP_PARAM_UNDEFINED
)
1124 outinfo
->vs_output_param_offset
[idx
+ i
] = outinfo
->param_exports
++;
1128 if (outinfo
->writes_layer
&&
1129 outinfo
->vs_output_param_offset
[VARYING_SLOT_LAYER
] == AC_EXP_PARAM_UNDEFINED
) {
1130 /* when ctx->options->key.has_multiview_view_index = true, the layer
1131 * variable isn't declared in NIR and it's isel's job to get the layer */
1132 outinfo
->vs_output_param_offset
[VARYING_SLOT_LAYER
] = outinfo
->param_exports
++;
1135 if (outinfo
->export_prim_id
) {
1136 assert(outinfo
->vs_output_param_offset
[VARYING_SLOT_PRIMITIVE_ID
] == AC_EXP_PARAM_UNDEFINED
);
1137 outinfo
->vs_output_param_offset
[VARYING_SLOT_PRIMITIVE_ID
] = outinfo
->param_exports
++;
1140 ctx
->num_clip_distances
= util_bitcount(outinfo
->clip_dist_mask
);
1141 ctx
->num_cull_distances
= util_bitcount(outinfo
->cull_dist_mask
);
1143 assert(ctx
->num_clip_distances
+ ctx
->num_cull_distances
<= 8);
1145 if (ctx
->num_clip_distances
+ ctx
->num_cull_distances
> 0)
1146 pos_written
|= 1 << 2;
1147 if (ctx
->num_clip_distances
+ ctx
->num_cull_distances
> 4)
1148 pos_written
|= 1 << 3;
1150 outinfo
->pos_exports
= util_bitcount(pos_written
);
1154 setup_variables(isel_context
*ctx
, nir_shader
*nir
)
1156 switch (nir
->info
.stage
) {
1157 case MESA_SHADER_FRAGMENT
: {
1158 nir_foreach_variable(variable
, &nir
->outputs
)
1160 int idx
= variable
->data
.location
+ variable
->data
.index
;
1161 variable
->data
.driver_location
= idx
* 4;
1165 case MESA_SHADER_COMPUTE
: {
1166 ctx
->program
->config
->lds_size
= (nir
->info
.cs
.shared_size
+ ctx
->program
->lds_alloc_granule
- 1) /
1167 ctx
->program
->lds_alloc_granule
;
1170 case MESA_SHADER_VERTEX
: {
1171 setup_vs_variables(ctx
, nir
);
1175 unreachable("Unhandled shader stage.");
1180 setup_isel_context(Program
* program
,
1181 unsigned shader_count
,
1182 struct nir_shader
*const *shaders
,
1183 ac_shader_config
* config
,
1184 radv_shader_info
*info
,
1185 const radv_nir_compiler_options
*options
)
1188 for (unsigned i
= 0; i
< shader_count
; i
++) {
1189 switch (shaders
[i
]->info
.stage
) {
1190 case MESA_SHADER_VERTEX
:
1191 program
->stage
|= sw_vs
;
1193 case MESA_SHADER_TESS_CTRL
:
1194 program
->stage
|= sw_tcs
;
1196 case MESA_SHADER_TESS_EVAL
:
1197 program
->stage
|= sw_tes
;
1199 case MESA_SHADER_GEOMETRY
:
1200 program
->stage
|= sw_gs
;
1202 case MESA_SHADER_FRAGMENT
:
1203 program
->stage
|= sw_fs
;
1205 case MESA_SHADER_COMPUTE
:
1206 program
->stage
|= sw_cs
;
1209 unreachable("Shader stage not implemented");
1212 if (program
->stage
== sw_vs
)
1213 program
->stage
|= hw_vs
;
1214 else if (program
->stage
== sw_fs
)
1215 program
->stage
|= hw_fs
;
1216 else if (program
->stage
== sw_cs
)
1217 program
->stage
|= hw_cs
;
1219 unreachable("Shader stage not implemented");
1221 program
->config
= config
;
1222 program
->info
= info
;
1223 program
->chip_class
= options
->chip_class
;
1224 program
->family
= options
->family
;
1225 program
->wave_size
= info
->wave_size
;
1227 program
->lds_alloc_granule
= options
->chip_class
>= GFX7
? 512 : 256;
1228 program
->lds_limit
= options
->chip_class
>= GFX7
? 65536 : 32768;
1229 program
->vgpr_limit
= 256;
1231 if (options
->chip_class
>= GFX10
) {
1232 program
->physical_sgprs
= 2560; /* doesn't matter as long as it's at least 128 * 20 */
1233 program
->sgpr_alloc_granule
= 127;
1234 program
->sgpr_limit
= 106;
1235 } else if (program
->chip_class
>= GFX8
) {
1236 program
->physical_sgprs
= 800;
1237 program
->sgpr_alloc_granule
= 15;
1238 if (options
->family
== CHIP_TONGA
|| options
->family
== CHIP_ICELAND
)
1239 program
->sgpr_limit
= 94; /* workaround hardware bug */
1241 program
->sgpr_limit
= 102;
1243 program
->physical_sgprs
= 512;
1244 program
->sgpr_alloc_granule
= 7;
1245 program
->sgpr_limit
= 104;
1247 /* TODO: we don't have to allocate VCC if we don't need it */
1248 program
->needs_vcc
= true;
1250 for (unsigned i
= 0; i
< MAX_SETS
; ++i
)
1251 program
->info
->user_sgprs_locs
.descriptor_sets
[i
].sgpr_idx
= -1;
1252 for (unsigned i
= 0; i
< AC_UD_MAX_UD
; ++i
)
1253 program
->info
->user_sgprs_locs
.shader_data
[i
].sgpr_idx
= -1;
1255 isel_context ctx
= {};
1256 ctx
.program
= program
;
1257 ctx
.options
= options
;
1258 ctx
.stage
= program
->stage
;
1260 for (unsigned i
= 0; i
< fs_input::max_inputs
; ++i
)
1261 ctx
.fs_inputs
[i
] = Temp(0, v1
);
1262 ctx
.fs_inputs
[fs_input::persp_pull_model
] = Temp(0, v3
);
1263 for (unsigned i
= 0; i
< MAX_SETS
; ++i
)
1264 ctx
.descriptor_sets
[i
] = Temp(0, s1
);
1265 for (unsigned i
= 0; i
< MAX_INLINE_PUSH_CONSTS
; ++i
)
1266 ctx
.inline_push_consts
[i
] = Temp(0, s1
);
1267 for (unsigned i
= 0; i
<= VARYING_SLOT_VAR31
; ++i
) {
1268 for (unsigned j
= 0; j
< 4; ++j
)
1269 ctx
.vs_output
.outputs
[i
][j
] = Temp(0, v1
);
1272 for (unsigned i
= 0; i
< shader_count
; i
++) {
1273 nir_shader
*nir
= shaders
[i
];
1275 /* align and copy constant data */
1276 while (program
->constant_data
.size() % 4u)
1277 program
->constant_data
.push_back(0);
1278 ctx
.constant_data_offset
= program
->constant_data
.size();
1279 program
->constant_data
.insert(program
->constant_data
.end(),
1280 (uint8_t*)nir
->constant_data
,
1281 (uint8_t*)nir
->constant_data
+ nir
->constant_data_size
);
1283 /* the variable setup has to be done before lower_io / CSE */
1284 if (nir
->info
.stage
== MESA_SHADER_COMPUTE
)
1285 nir_lower_vars_to_explicit_types(nir
, nir_var_mem_shared
, shared_var_info
);
1286 setup_variables(&ctx
, nir
);
1288 /* optimize and lower memory operations */
1289 bool lower_to_scalar
= false;
1290 bool lower_pack
= false;
1291 // TODO: uncomment this once !1240 is merged
1292 /*if (nir_opt_load_store_vectorize(nir,
1293 (nir_variable_mode)(nir_var_mem_ssbo | nir_var_mem_ubo |
1294 nir_var_mem_push_const | nir_var_mem_shared),
1296 lower_to_scalar = true;
1299 if (nir
->info
.stage
== MESA_SHADER_COMPUTE
)
1300 lower_to_scalar
|= nir_lower_explicit_io(nir
, nir_var_mem_shared
, nir_address_format_32bit_offset
);
1302 nir_lower_io(nir
, (nir_variable_mode
)(nir_var_shader_in
| nir_var_shader_out
), type_size
, (nir_lower_io_options
)0);
1303 nir_lower_explicit_io(nir
, nir_var_mem_global
, nir_address_format_64bit_global
);
1305 if (lower_to_scalar
)
1306 nir_lower_alu_to_scalar(nir
, NULL
, NULL
);
1308 nir_lower_pack(nir
);
1310 /* lower ALU operations */
1311 // TODO: implement logic64 in aco, it's more effective for sgprs
1312 nir_lower_int64(nir
, nir
->options
->lower_int64_options
);
1314 nir_opt_idiv_const(nir
, 32);
1315 nir_lower_idiv(nir
, nir_lower_idiv_precise
);
1317 /* optimize the lowered ALU operations */
1318 bool more_algebraic
= true;
1319 while (more_algebraic
) {
1320 more_algebraic
= false;
1321 NIR_PASS_V(nir
, nir_copy_prop
);
1322 NIR_PASS_V(nir
, nir_opt_dce
);
1323 NIR_PASS_V(nir
, nir_opt_constant_folding
);
1324 NIR_PASS(more_algebraic
, nir
, nir_opt_algebraic
);
1327 /* Do late algebraic optimization to turn add(a, neg(b)) back into
1328 * subs, then the mandatory cleanup after algebraic. Note that it may
1329 * produce fnegs, and if so then we need to keep running to squash
1332 bool more_late_algebraic
= true;
1333 while (more_late_algebraic
) {
1334 more_late_algebraic
= false;
1335 NIR_PASS(more_late_algebraic
, nir
, nir_opt_algebraic_late
);
1336 NIR_PASS_V(nir
, nir_opt_constant_folding
);
1337 NIR_PASS_V(nir
, nir_copy_prop
);
1338 NIR_PASS_V(nir
, nir_opt_dce
);
1339 NIR_PASS_V(nir
, nir_opt_cse
);
1342 /* cleanup passes */
1343 nir_lower_load_const_to_scalar(nir
);
1344 nir_opt_shrink_load(nir
);
1345 nir_move_options move_opts
= (nir_move_options
)(
1346 nir_move_const_undef
| nir_move_load_ubo
| nir_move_load_input
| nir_move_comparisons
);
1347 nir_opt_sink(nir
, move_opts
);
1348 nir_opt_move(nir
, move_opts
);
1349 nir_convert_to_lcssa(nir
, true, false);
1350 nir_lower_phis_to_scalar(nir
);
1352 nir_function_impl
*func
= nir_shader_get_entrypoint(nir
);
1353 nir_index_ssa_defs(func
);
1355 if (options
->dump_preoptir
) {
1356 fprintf(stderr
, "NIR shader before instruction selection:\n");
1357 nir_print_shader(nir
, stderr
);
1361 unsigned scratch_size
= 0;
1362 for (unsigned i
= 0; i
< shader_count
; i
++)
1363 scratch_size
= std::max(scratch_size
, shaders
[i
]->scratch_size
);
1364 ctx
.scratch_enabled
= scratch_size
> 0;
1365 ctx
.program
->config
->scratch_bytes_per_wave
= align(scratch_size
* ctx
.program
->wave_size
, 1024);
1367 ctx
.block
= ctx
.program
->create_and_insert_block();
1368 ctx
.block
->loop_nest_depth
= 0;
1369 ctx
.block
->kind
= block_kind_top_level
;