2 * Copyright © 2018 Valve Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include <unordered_map>
29 #include "vulkan/radv_shader.h"
30 #include "vulkan/radv_descriptor_set.h"
31 #include "vulkan/radv_shader_args.h"
33 #include "ac_exp_param.h"
34 #include "ac_shader_util.h"
36 #include "util/u_math.h"
38 #define MAX_INLINE_PUSH_CONSTS 8
43 uint8_t mask
[VARYING_SLOT_VAR31
+ 1];
44 Temp outputs
[VARYING_SLOT_VAR31
+ 1][4];
48 const struct radv_nir_compiler_options
*options
;
49 struct radv_shader_args
*args
;
52 uint32_t constant_data_offset
;
55 std::unique_ptr
<Temp
[]> allocated
;
56 std::unordered_map
<unsigned, std::array
<Temp
,NIR_MAX_VEC_COMPONENTS
>> allocated_vec
;
57 Stage stage
; /* Stage */
58 bool has_gfx10_wave64_bpermute
= false;
61 uint16_t loop_nest_depth
= 0;
65 bool has_divergent_continue
= false;
66 bool has_divergent_branch
= false;
69 bool is_divergent
= false;
71 bool exec_potentially_empty_discard
= false; /* set to false when loop_nest_depth==0 && parent_if.is_divergent==false */
72 uint16_t exec_potentially_empty_break_depth
= UINT16_MAX
;
73 /* Set to false when loop_nest_depth==exec_potentially_empty_break_depth
74 * and parent_if.is_divergent==false. Called _break but it's also used for
76 bool exec_potentially_empty_break
= false;
77 std::unique_ptr
<unsigned[]> nir_to_aco
; /* NIR block index to ACO block index */
80 Temp arg_temps
[AC_MAX_ARGS
];
83 Temp persp_centroid
, linear_centroid
;
88 /* gathered information */
89 uint64_t input_masks
[MESA_SHADER_COMPUTE
];
90 uint64_t output_masks
[MESA_SHADER_COMPUTE
];
92 /* VS output information */
93 bool export_clip_dists
;
94 unsigned num_clip_distances
;
95 unsigned num_cull_distances
;
97 /* VS, FS or GS output information */
101 Temp
get_arg(isel_context
*ctx
, struct ac_arg arg
)
104 return ctx
->arg_temps
[arg
.arg_index
];
107 unsigned get_interp_input(nir_intrinsic_op intrin
, enum glsl_interp_mode interp
)
110 case INTERP_MODE_SMOOTH
:
111 case INTERP_MODE_NONE
:
112 if (intrin
== nir_intrinsic_load_barycentric_pixel
||
113 intrin
== nir_intrinsic_load_barycentric_at_sample
||
114 intrin
== nir_intrinsic_load_barycentric_at_offset
)
115 return S_0286CC_PERSP_CENTER_ENA(1);
116 else if (intrin
== nir_intrinsic_load_barycentric_centroid
)
117 return S_0286CC_PERSP_CENTROID_ENA(1);
118 else if (intrin
== nir_intrinsic_load_barycentric_sample
)
119 return S_0286CC_PERSP_SAMPLE_ENA(1);
121 case INTERP_MODE_NOPERSPECTIVE
:
122 if (intrin
== nir_intrinsic_load_barycentric_pixel
)
123 return S_0286CC_LINEAR_CENTER_ENA(1);
124 else if (intrin
== nir_intrinsic_load_barycentric_centroid
)
125 return S_0286CC_LINEAR_CENTROID_ENA(1);
126 else if (intrin
== nir_intrinsic_load_barycentric_sample
)
127 return S_0286CC_LINEAR_SAMPLE_ENA(1);
135 void init_context(isel_context
*ctx
, nir_shader
*shader
)
137 nir_function_impl
*impl
= nir_shader_get_entrypoint(shader
);
138 unsigned lane_mask_size
= ctx
->program
->lane_mask
.size();
140 ctx
->shader
= shader
;
141 ctx
->divergent_vals
= nir_divergence_analysis(shader
, nir_divergence_view_index_uniform
);
143 std::unique_ptr
<Temp
[]> allocated
{new Temp
[impl
->ssa_alloc
]()};
145 unsigned spi_ps_inputs
= 0;
147 std::unique_ptr
<unsigned[]> nir_to_aco
{new unsigned[impl
->num_blocks
]()};
152 nir_foreach_block(block
, impl
) {
153 nir_foreach_instr(instr
, block
) {
154 switch(instr
->type
) {
155 case nir_instr_type_alu
: {
156 nir_alu_instr
*alu_instr
= nir_instr_as_alu(instr
);
157 unsigned size
= alu_instr
->dest
.dest
.ssa
.num_components
;
158 if (alu_instr
->dest
.dest
.ssa
.bit_size
== 64)
160 RegType type
= RegType::sgpr
;
161 switch(alu_instr
->op
) {
183 case nir_op_fround_even
:
192 case nir_op_pack_half_2x16
:
193 case nir_op_unpack_half_2x16_split_x
:
194 case nir_op_unpack_half_2x16_split_y
:
197 case nir_op_fddx_fine
:
198 case nir_op_fddy_fine
:
199 case nir_op_fddx_coarse
:
200 case nir_op_fddy_coarse
:
201 case nir_op_fquantize2f16
:
203 case nir_op_frexp_sig
:
204 case nir_op_frexp_exp
:
205 case nir_op_cube_face_index
:
206 case nir_op_cube_face_coord
:
207 type
= RegType::vgpr
;
220 size
= lane_mask_size
;
228 type
= ctx
->divergent_vals
[alu_instr
->dest
.dest
.ssa
.index
] ? RegType::vgpr
: RegType::sgpr
;
231 if (alu_instr
->dest
.dest
.ssa
.bit_size
== 1) {
232 size
= lane_mask_size
;
234 if (ctx
->divergent_vals
[alu_instr
->dest
.dest
.ssa
.index
]) {
235 type
= RegType::vgpr
;
237 if (allocated
[alu_instr
->src
[1].src
.ssa
->index
].type() == RegType::vgpr
||
238 allocated
[alu_instr
->src
[2].src
.ssa
->index
].type() == RegType::vgpr
) {
239 type
= RegType::vgpr
;
242 if (alu_instr
->src
[1].src
.ssa
->num_components
== 1 && alu_instr
->src
[2].src
.ssa
->num_components
== 1) {
243 assert(allocated
[alu_instr
->src
[1].src
.ssa
->index
].size() == allocated
[alu_instr
->src
[2].src
.ssa
->index
].size());
244 size
= allocated
[alu_instr
->src
[1].src
.ssa
->index
].size();
249 if (alu_instr
->dest
.dest
.ssa
.bit_size
== 1) {
250 size
= lane_mask_size
;
252 type
= ctx
->divergent_vals
[alu_instr
->dest
.dest
.ssa
.index
] ? RegType::vgpr
: RegType::sgpr
;
256 if (alu_instr
->dest
.dest
.ssa
.bit_size
== 1) {
257 size
= lane_mask_size
;
259 for (unsigned i
= 0; i
< nir_op_infos
[alu_instr
->op
].num_inputs
; i
++) {
260 if (allocated
[alu_instr
->src
[i
].src
.ssa
->index
].type() == RegType::vgpr
)
261 type
= RegType::vgpr
;
266 allocated
[alu_instr
->dest
.dest
.ssa
.index
] = Temp(0, RegClass(type
, size
));
269 case nir_instr_type_load_const
: {
270 unsigned size
= nir_instr_as_load_const(instr
)->def
.num_components
;
271 if (nir_instr_as_load_const(instr
)->def
.bit_size
== 64)
273 else if (nir_instr_as_load_const(instr
)->def
.bit_size
== 1)
274 size
*= lane_mask_size
;
275 allocated
[nir_instr_as_load_const(instr
)->def
.index
] = Temp(0, RegClass(RegType::sgpr
, size
));
278 case nir_instr_type_intrinsic
: {
279 nir_intrinsic_instr
*intrinsic
= nir_instr_as_intrinsic(instr
);
280 if (!nir_intrinsic_infos
[intrinsic
->intrinsic
].has_dest
)
282 unsigned size
= intrinsic
->dest
.ssa
.num_components
;
283 if (intrinsic
->dest
.ssa
.bit_size
== 64)
285 RegType type
= RegType::sgpr
;
286 switch(intrinsic
->intrinsic
) {
287 case nir_intrinsic_load_push_constant
:
288 case nir_intrinsic_load_work_group_id
:
289 case nir_intrinsic_load_num_work_groups
:
290 case nir_intrinsic_load_subgroup_id
:
291 case nir_intrinsic_load_num_subgroups
:
292 case nir_intrinsic_load_first_vertex
:
293 case nir_intrinsic_load_base_instance
:
294 case nir_intrinsic_get_buffer_size
:
295 case nir_intrinsic_vote_all
:
296 case nir_intrinsic_vote_any
:
297 case nir_intrinsic_read_first_invocation
:
298 case nir_intrinsic_read_invocation
:
299 case nir_intrinsic_first_invocation
:
300 type
= RegType::sgpr
;
301 if (intrinsic
->dest
.ssa
.bit_size
== 1)
302 size
= lane_mask_size
;
304 case nir_intrinsic_ballot
:
305 type
= RegType::sgpr
;
307 case nir_intrinsic_load_sample_id
:
308 case nir_intrinsic_load_sample_mask_in
:
309 case nir_intrinsic_load_input
:
310 case nir_intrinsic_load_input_vertex
:
311 case nir_intrinsic_load_per_vertex_input
:
312 case nir_intrinsic_load_vertex_id
:
313 case nir_intrinsic_load_vertex_id_zero_base
:
314 case nir_intrinsic_load_barycentric_sample
:
315 case nir_intrinsic_load_barycentric_pixel
:
316 case nir_intrinsic_load_barycentric_model
:
317 case nir_intrinsic_load_barycentric_centroid
:
318 case nir_intrinsic_load_barycentric_at_sample
:
319 case nir_intrinsic_load_barycentric_at_offset
:
320 case nir_intrinsic_load_interpolated_input
:
321 case nir_intrinsic_load_frag_coord
:
322 case nir_intrinsic_load_sample_pos
:
323 case nir_intrinsic_load_layer_id
:
324 case nir_intrinsic_load_local_invocation_id
:
325 case nir_intrinsic_load_local_invocation_index
:
326 case nir_intrinsic_load_subgroup_invocation
:
327 case nir_intrinsic_write_invocation_amd
:
328 case nir_intrinsic_mbcnt_amd
:
329 case nir_intrinsic_load_instance_id
:
330 case nir_intrinsic_ssbo_atomic_add
:
331 case nir_intrinsic_ssbo_atomic_imin
:
332 case nir_intrinsic_ssbo_atomic_umin
:
333 case nir_intrinsic_ssbo_atomic_imax
:
334 case nir_intrinsic_ssbo_atomic_umax
:
335 case nir_intrinsic_ssbo_atomic_and
:
336 case nir_intrinsic_ssbo_atomic_or
:
337 case nir_intrinsic_ssbo_atomic_xor
:
338 case nir_intrinsic_ssbo_atomic_exchange
:
339 case nir_intrinsic_ssbo_atomic_comp_swap
:
340 case nir_intrinsic_global_atomic_add
:
341 case nir_intrinsic_global_atomic_imin
:
342 case nir_intrinsic_global_atomic_umin
:
343 case nir_intrinsic_global_atomic_imax
:
344 case nir_intrinsic_global_atomic_umax
:
345 case nir_intrinsic_global_atomic_and
:
346 case nir_intrinsic_global_atomic_or
:
347 case nir_intrinsic_global_atomic_xor
:
348 case nir_intrinsic_global_atomic_exchange
:
349 case nir_intrinsic_global_atomic_comp_swap
:
350 case nir_intrinsic_image_deref_atomic_add
:
351 case nir_intrinsic_image_deref_atomic_umin
:
352 case nir_intrinsic_image_deref_atomic_imin
:
353 case nir_intrinsic_image_deref_atomic_umax
:
354 case nir_intrinsic_image_deref_atomic_imax
:
355 case nir_intrinsic_image_deref_atomic_and
:
356 case nir_intrinsic_image_deref_atomic_or
:
357 case nir_intrinsic_image_deref_atomic_xor
:
358 case nir_intrinsic_image_deref_atomic_exchange
:
359 case nir_intrinsic_image_deref_atomic_comp_swap
:
360 case nir_intrinsic_image_deref_size
:
361 case nir_intrinsic_shared_atomic_add
:
362 case nir_intrinsic_shared_atomic_imin
:
363 case nir_intrinsic_shared_atomic_umin
:
364 case nir_intrinsic_shared_atomic_imax
:
365 case nir_intrinsic_shared_atomic_umax
:
366 case nir_intrinsic_shared_atomic_and
:
367 case nir_intrinsic_shared_atomic_or
:
368 case nir_intrinsic_shared_atomic_xor
:
369 case nir_intrinsic_shared_atomic_exchange
:
370 case nir_intrinsic_shared_atomic_comp_swap
:
371 case nir_intrinsic_load_scratch
:
372 case nir_intrinsic_load_invocation_id
:
373 case nir_intrinsic_load_primitive_id
:
374 type
= RegType::vgpr
;
376 case nir_intrinsic_shuffle
:
377 case nir_intrinsic_quad_broadcast
:
378 case nir_intrinsic_quad_swap_horizontal
:
379 case nir_intrinsic_quad_swap_vertical
:
380 case nir_intrinsic_quad_swap_diagonal
:
381 case nir_intrinsic_quad_swizzle_amd
:
382 case nir_intrinsic_masked_swizzle_amd
:
383 case nir_intrinsic_inclusive_scan
:
384 case nir_intrinsic_exclusive_scan
:
385 if (intrinsic
->dest
.ssa
.bit_size
== 1) {
386 size
= lane_mask_size
;
387 type
= RegType::sgpr
;
388 } else if (!ctx
->divergent_vals
[intrinsic
->dest
.ssa
.index
]) {
389 type
= RegType::sgpr
;
391 type
= RegType::vgpr
;
394 case nir_intrinsic_load_view_index
:
395 type
= ctx
->stage
== fragment_fs
? RegType::vgpr
: RegType::sgpr
;
397 case nir_intrinsic_load_front_face
:
398 case nir_intrinsic_load_helper_invocation
:
399 case nir_intrinsic_is_helper_invocation
:
400 type
= RegType::sgpr
;
401 size
= lane_mask_size
;
403 case nir_intrinsic_reduce
:
404 if (intrinsic
->dest
.ssa
.bit_size
== 1) {
405 size
= lane_mask_size
;
406 type
= RegType::sgpr
;
407 } else if (!ctx
->divergent_vals
[intrinsic
->dest
.ssa
.index
]) {
408 type
= RegType::sgpr
;
410 type
= RegType::vgpr
;
413 case nir_intrinsic_load_ubo
:
414 case nir_intrinsic_load_ssbo
:
415 case nir_intrinsic_load_global
:
416 case nir_intrinsic_vulkan_resource_index
:
417 type
= ctx
->divergent_vals
[intrinsic
->dest
.ssa
.index
] ? RegType::vgpr
: RegType::sgpr
;
419 /* due to copy propagation, the swizzled imov is removed if num dest components == 1 */
420 case nir_intrinsic_load_shared
:
421 if (ctx
->divergent_vals
[intrinsic
->dest
.ssa
.index
])
422 type
= RegType::vgpr
;
424 type
= RegType::sgpr
;
427 for (unsigned i
= 0; i
< nir_intrinsic_infos
[intrinsic
->intrinsic
].num_srcs
; i
++) {
428 if (allocated
[intrinsic
->src
[i
].ssa
->index
].type() == RegType::vgpr
)
429 type
= RegType::vgpr
;
433 allocated
[intrinsic
->dest
.ssa
.index
] = Temp(0, RegClass(type
, size
));
435 switch(intrinsic
->intrinsic
) {
436 case nir_intrinsic_load_barycentric_sample
:
437 case nir_intrinsic_load_barycentric_pixel
:
438 case nir_intrinsic_load_barycentric_centroid
:
439 case nir_intrinsic_load_barycentric_at_sample
:
440 case nir_intrinsic_load_barycentric_at_offset
: {
441 glsl_interp_mode mode
= (glsl_interp_mode
)nir_intrinsic_interp_mode(intrinsic
);
442 spi_ps_inputs
|= get_interp_input(intrinsic
->intrinsic
, mode
);
445 case nir_intrinsic_load_barycentric_model
:
446 spi_ps_inputs
|= S_0286CC_PERSP_PULL_MODEL_ENA(1);
448 case nir_intrinsic_load_front_face
:
449 spi_ps_inputs
|= S_0286CC_FRONT_FACE_ENA(1);
451 case nir_intrinsic_load_frag_coord
:
452 case nir_intrinsic_load_sample_pos
: {
453 uint8_t mask
= nir_ssa_def_components_read(&intrinsic
->dest
.ssa
);
454 for (unsigned i
= 0; i
< 4; i
++) {
456 spi_ps_inputs
|= S_0286CC_POS_X_FLOAT_ENA(1) << i
;
461 case nir_intrinsic_load_sample_id
:
462 spi_ps_inputs
|= S_0286CC_ANCILLARY_ENA(1);
464 case nir_intrinsic_load_sample_mask_in
:
465 spi_ps_inputs
|= S_0286CC_ANCILLARY_ENA(1);
466 spi_ps_inputs
|= S_0286CC_SAMPLE_COVERAGE_ENA(1);
473 case nir_instr_type_tex
: {
474 nir_tex_instr
* tex
= nir_instr_as_tex(instr
);
475 unsigned size
= tex
->dest
.ssa
.num_components
;
477 if (tex
->dest
.ssa
.bit_size
== 64)
479 if (tex
->op
== nir_texop_texture_samples
)
480 assert(!ctx
->divergent_vals
[tex
->dest
.ssa
.index
]);
481 if (ctx
->divergent_vals
[tex
->dest
.ssa
.index
])
482 allocated
[tex
->dest
.ssa
.index
] = Temp(0, RegClass(RegType::vgpr
, size
));
484 allocated
[tex
->dest
.ssa
.index
] = Temp(0, RegClass(RegType::sgpr
, size
));
487 case nir_instr_type_parallel_copy
: {
488 nir_foreach_parallel_copy_entry(entry
, nir_instr_as_parallel_copy(instr
)) {
489 allocated
[entry
->dest
.ssa
.index
] = allocated
[entry
->src
.ssa
->index
];
493 case nir_instr_type_ssa_undef
: {
494 unsigned size
= nir_instr_as_ssa_undef(instr
)->def
.num_components
;
495 if (nir_instr_as_ssa_undef(instr
)->def
.bit_size
== 64)
497 allocated
[nir_instr_as_ssa_undef(instr
)->def
.index
] = Temp(0, RegClass(RegType::sgpr
, size
));
500 case nir_instr_type_phi
: {
501 nir_phi_instr
* phi
= nir_instr_as_phi(instr
);
503 unsigned size
= phi
->dest
.ssa
.num_components
;
505 if (phi
->dest
.ssa
.bit_size
== 1) {
506 assert(size
== 1 && "multiple components not yet supported on boolean phis.");
507 type
= RegType::sgpr
;
508 size
*= lane_mask_size
;
509 allocated
[phi
->dest
.ssa
.index
] = Temp(0, RegClass(type
, size
));
513 if (ctx
->divergent_vals
[phi
->dest
.ssa
.index
]) {
514 type
= RegType::vgpr
;
516 type
= RegType::sgpr
;
517 nir_foreach_phi_src (src
, phi
) {
518 if (allocated
[src
->src
.ssa
->index
].type() == RegType::vgpr
)
519 type
= RegType::vgpr
;
520 if (allocated
[src
->src
.ssa
->index
].type() == RegType::none
)
525 size
*= phi
->dest
.ssa
.bit_size
== 64 ? 2 : 1;
526 RegClass rc
= RegClass(type
, size
);
527 if (rc
!= allocated
[phi
->dest
.ssa
.index
].regClass()) {
530 nir_foreach_phi_src(src
, phi
)
531 assert(allocated
[src
->src
.ssa
->index
].size() == rc
.size());
533 allocated
[phi
->dest
.ssa
.index
] = Temp(0, rc
);
543 if (G_0286CC_POS_W_FLOAT_ENA(spi_ps_inputs
)) {
544 /* If POS_W_FLOAT (11) is enabled, at least one of PERSP_* must be enabled too */
545 spi_ps_inputs
|= S_0286CC_PERSP_CENTER_ENA(1);
548 if (!(spi_ps_inputs
& 0x7F)) {
549 /* At least one of PERSP_* (0xF) or LINEAR_* (0x70) must be enabled */
550 spi_ps_inputs
|= S_0286CC_PERSP_CENTER_ENA(1);
553 ctx
->program
->config
->spi_ps_input_ena
= spi_ps_inputs
;
554 ctx
->program
->config
->spi_ps_input_addr
= spi_ps_inputs
;
556 for (unsigned i
= 0; i
< impl
->ssa_alloc
; i
++)
557 allocated
[i
] = Temp(ctx
->program
->allocateId(), allocated
[i
].regClass());
559 ctx
->allocated
.reset(allocated
.release());
560 ctx
->cf_info
.nir_to_aco
.reset(nir_to_aco
.release());
563 Pseudo_instruction
*add_startpgm(struct isel_context
*ctx
)
565 unsigned arg_count
= ctx
->args
->ac
.arg_count
;
566 if (ctx
->stage
== fragment_fs
) {
567 /* LLVM optimizes away unused FS inputs and computes spi_ps_input_addr
568 * itself and then communicates the results back via the ELF binary.
569 * Mirror what LLVM does by re-mapping the VGPR arguments here.
571 * TODO: If we made the FS input scanning code into a separate pass that
572 * could run before argument setup, then this wouldn't be necessary
575 struct ac_shader_args
*args
= &ctx
->args
->ac
;
577 for (unsigned i
= 0, vgpr_arg
= 0, vgpr_reg
= 0; i
< args
->arg_count
; i
++) {
578 if (args
->args
[i
].file
!= AC_ARG_VGPR
) {
583 if (!(ctx
->program
->config
->spi_ps_input_addr
& (1 << vgpr_arg
))) {
584 args
->args
[i
].skip
= true;
586 args
->args
[i
].offset
= vgpr_reg
;
587 vgpr_reg
+= args
->args
[i
].size
;
594 aco_ptr
<Pseudo_instruction
> startpgm
{create_instruction
<Pseudo_instruction
>(aco_opcode::p_startpgm
, Format::PSEUDO
, 0, arg_count
+ 1)};
595 for (unsigned i
= 0, arg
= 0; i
< ctx
->args
->ac
.arg_count
; i
++) {
596 if (ctx
->args
->ac
.args
[i
].skip
)
599 enum ac_arg_regfile file
= ctx
->args
->ac
.args
[i
].file
;
600 unsigned size
= ctx
->args
->ac
.args
[i
].size
;
601 unsigned reg
= ctx
->args
->ac
.args
[i
].offset
;
602 RegClass type
= RegClass(file
== AC_ARG_SGPR
? RegType::sgpr
: RegType::vgpr
, size
);
603 Temp dst
= Temp
{ctx
->program
->allocateId(), type
};
604 ctx
->arg_temps
[i
] = dst
;
605 startpgm
->definitions
[arg
] = Definition(dst
);
606 startpgm
->definitions
[arg
].setFixed(PhysReg
{file
== AC_ARG_SGPR
? reg
: reg
+ 256});
609 startpgm
->definitions
[arg_count
] = Definition
{ctx
->program
->allocateId(), exec
, ctx
->program
->lane_mask
};
610 Pseudo_instruction
*instr
= startpgm
.get();
611 ctx
->block
->instructions
.push_back(std::move(startpgm
));
613 /* Stash these in the program so that they can be accessed later when
616 ctx
->program
->private_segment_buffer
= get_arg(ctx
, ctx
->args
->ring_offsets
);
617 ctx
->program
->scratch_offset
= get_arg(ctx
, ctx
->args
->scratch_offset
);
623 type_size(const struct glsl_type
*type
, bool bindless
)
625 // TODO: don't we need type->std430_base_alignment() here?
626 return glsl_count_attribute_slots(type
, false);
630 shared_var_info(const struct glsl_type
*type
, unsigned *size
, unsigned *align
)
632 assert(glsl_type_is_vector_or_scalar(type
));
634 uint32_t comp_size
= glsl_type_is_boolean(type
)
635 ? 4 : glsl_get_bit_size(type
) / 8;
636 unsigned length
= glsl_get_vector_elements(type
);
637 *size
= comp_size
* length
,
642 mem_vectorize_callback(unsigned align
, unsigned bit_size
,
643 unsigned num_components
, unsigned high_offset
,
644 nir_intrinsic_instr
*low
, nir_intrinsic_instr
*high
)
646 if ((bit_size
!= 32 && bit_size
!= 64) || num_components
> 4)
649 /* >128 bit loads are split except with SMEM */
650 if (bit_size
* num_components
> 128)
653 switch (low
->intrinsic
) {
654 case nir_intrinsic_load_ubo
:
655 case nir_intrinsic_load_ssbo
:
656 case nir_intrinsic_store_ssbo
:
657 case nir_intrinsic_load_push_constant
:
658 return align
% 4 == 0;
659 case nir_intrinsic_load_deref
:
660 case nir_intrinsic_store_deref
:
661 assert(nir_src_as_deref(low
->src
[0])->mode
== nir_var_mem_shared
);
663 case nir_intrinsic_load_shared
:
664 case nir_intrinsic_store_shared
:
665 if (bit_size
* num_components
> 64) /* 96 and 128 bit loads require 128 bit alignment and are split otherwise */
666 return align
% 16 == 0;
668 return align
% 4 == 0;
676 setup_vs_output_info(isel_context
*ctx
, nir_shader
*nir
,
677 bool export_prim_id
, bool export_clip_dists
,
678 radv_vs_output_info
*outinfo
)
680 memset(outinfo
->vs_output_param_offset
, AC_EXP_PARAM_UNDEFINED
,
681 sizeof(outinfo
->vs_output_param_offset
));
683 outinfo
->param_exports
= 0;
684 int pos_written
= 0x1;
685 if (outinfo
->writes_pointsize
|| outinfo
->writes_viewport_index
|| outinfo
->writes_layer
)
686 pos_written
|= 1 << 1;
688 uint64_t mask
= ctx
->output_masks
[nir
->info
.stage
];
690 int idx
= u_bit_scan64(&mask
);
691 if (idx
>= VARYING_SLOT_VAR0
|| idx
== VARYING_SLOT_LAYER
|| idx
== VARYING_SLOT_PRIMITIVE_ID
||
692 ((idx
== VARYING_SLOT_CLIP_DIST0
|| idx
== VARYING_SLOT_CLIP_DIST1
) && export_clip_dists
)) {
693 if (outinfo
->vs_output_param_offset
[idx
] == AC_EXP_PARAM_UNDEFINED
)
694 outinfo
->vs_output_param_offset
[idx
] = outinfo
->param_exports
++;
697 if (outinfo
->writes_layer
&&
698 outinfo
->vs_output_param_offset
[VARYING_SLOT_LAYER
] == AC_EXP_PARAM_UNDEFINED
) {
699 /* when ctx->options->key.has_multiview_view_index = true, the layer
700 * variable isn't declared in NIR and it's isel's job to get the layer */
701 outinfo
->vs_output_param_offset
[VARYING_SLOT_LAYER
] = outinfo
->param_exports
++;
704 if (export_prim_id
) {
705 assert(outinfo
->vs_output_param_offset
[VARYING_SLOT_PRIMITIVE_ID
] == AC_EXP_PARAM_UNDEFINED
);
706 outinfo
->vs_output_param_offset
[VARYING_SLOT_PRIMITIVE_ID
] = outinfo
->param_exports
++;
709 ctx
->export_clip_dists
= export_clip_dists
;
710 ctx
->num_clip_distances
= util_bitcount(outinfo
->clip_dist_mask
);
711 ctx
->num_cull_distances
= util_bitcount(outinfo
->cull_dist_mask
);
713 assert(ctx
->num_clip_distances
+ ctx
->num_cull_distances
<= 8);
715 if (ctx
->num_clip_distances
+ ctx
->num_cull_distances
> 0)
716 pos_written
|= 1 << 2;
717 if (ctx
->num_clip_distances
+ ctx
->num_cull_distances
> 4)
718 pos_written
|= 1 << 3;
720 outinfo
->pos_exports
= util_bitcount(pos_written
);
724 setup_vs_variables(isel_context
*ctx
, nir_shader
*nir
)
726 nir_foreach_variable(variable
, &nir
->inputs
)
728 variable
->data
.driver_location
= variable
->data
.location
* 4;
730 nir_foreach_variable(variable
, &nir
->outputs
)
732 if (ctx
->stage
== vertex_geometry_gs
)
733 variable
->data
.driver_location
= util_bitcount64(ctx
->output_masks
[nir
->info
.stage
] & ((1ull << variable
->data
.location
) - 1ull)) * 4;
734 else if (ctx
->stage
== vertex_es
)
735 //TODO: make this more compact
736 variable
->data
.driver_location
= shader_io_get_unique_index((gl_varying_slot
)variable
->data
.location
) * 4;
738 variable
->data
.driver_location
= variable
->data
.location
* 4;
741 if (ctx
->stage
== vertex_vs
) {
742 radv_vs_output_info
*outinfo
= &ctx
->program
->info
->vs
.outinfo
;
743 setup_vs_output_info(ctx
, nir
, outinfo
->export_prim_id
,
744 ctx
->options
->key
.vs_common_out
.export_clip_dists
, outinfo
);
745 } else if (ctx
->stage
== vertex_geometry_gs
|| ctx
->stage
== vertex_es
) {
746 /* TODO: radv_nir_shader_info_pass() already sets this but it's larger
747 * than it needs to be in order to set it better, we have to improve
748 * radv_nir_shader_info_pass() because gfx9_get_gs_info() uses
749 * esgs_itemsize and has to be done before compilation
751 /* radv_es_output_info *outinfo = &ctx->program->info->vs.es_info;
752 outinfo->esgs_itemsize = util_bitcount64(ctx->output_masks[nir->info.stage]) * 16u; */
757 setup_variables(isel_context
*ctx
, nir_shader
*nir
)
759 switch (nir
->info
.stage
) {
760 case MESA_SHADER_FRAGMENT
: {
761 nir_foreach_variable(variable
, &nir
->outputs
)
763 int idx
= variable
->data
.location
+ variable
->data
.index
;
764 variable
->data
.driver_location
= idx
* 4;
768 case MESA_SHADER_COMPUTE
: {
769 ctx
->program
->config
->lds_size
= (nir
->info
.cs
.shared_size
+ ctx
->program
->lds_alloc_granule
- 1) /
770 ctx
->program
->lds_alloc_granule
;
773 case MESA_SHADER_VERTEX
: {
774 setup_vs_variables(ctx
, nir
);
777 case MESA_SHADER_GEOMETRY
: {
778 assert(ctx
->stage
== vertex_geometry_gs
|| ctx
->stage
== geometry_gs
);
779 if (ctx
->stage
== vertex_geometry_gs
) {
780 nir_foreach_variable(variable
, &nir
->inputs
) {
781 variable
->data
.driver_location
= util_bitcount64(ctx
->input_masks
[nir
->info
.stage
] & ((1ull << variable
->data
.location
) - 1ull)) * 4;
784 //TODO: make this more compact
785 nir_foreach_variable(variable
, &nir
->inputs
) {
786 variable
->data
.driver_location
= shader_io_get_unique_index((gl_varying_slot
)variable
->data
.location
) * 4;
789 nir_foreach_variable(variable
, &nir
->outputs
) {
790 variable
->data
.driver_location
= variable
->data
.location
* 4;
792 if (ctx
->stage
== vertex_geometry_gs
)
793 ctx
->program
->info
->gs
.es_type
= MESA_SHADER_VERTEX
; /* tesselation shaders are not yet supported */
797 unreachable("Unhandled shader stage.");
802 get_io_masks(isel_context
*ctx
, unsigned shader_count
, struct nir_shader
*const *shaders
)
804 for (unsigned i
= 0; i
< shader_count
; i
++) {
805 nir_shader
*nir
= shaders
[i
];
806 if (nir
->info
.stage
== MESA_SHADER_COMPUTE
)
809 uint64_t output_mask
= 0;
810 nir_foreach_variable(variable
, &nir
->outputs
) {
811 const glsl_type
*type
= variable
->type
;
812 if (nir_is_per_vertex_io(variable
, nir
->info
.stage
))
813 type
= type
->fields
.array
;
814 unsigned slots
= type
->count_attribute_slots(false);
815 if (variable
->data
.compact
) {
816 unsigned component_count
= variable
->data
.location_frac
+ type
->length
;
817 slots
= (component_count
+ 3) / 4;
819 output_mask
|= ((1ull << slots
) - 1) << variable
->data
.location
;
822 uint64_t input_mask
= 0;
823 nir_foreach_variable(variable
, &nir
->inputs
) {
824 const glsl_type
*type
= variable
->type
;
825 if (nir_is_per_vertex_io(variable
, nir
->info
.stage
))
826 type
= type
->fields
.array
;
827 unsigned slots
= type
->count_attribute_slots(false);
828 if (variable
->data
.compact
) {
829 unsigned component_count
= variable
->data
.location_frac
+ type
->length
;
830 slots
= (component_count
+ 3) / 4;
832 input_mask
|= ((1ull << slots
) - 1) << variable
->data
.location
;
835 ctx
->output_masks
[nir
->info
.stage
] |= output_mask
;
836 if (i
+ 1 < shader_count
)
837 ctx
->input_masks
[shaders
[i
+ 1]->info
.stage
] |= output_mask
;
839 ctx
->input_masks
[nir
->info
.stage
] |= input_mask
;
841 ctx
->output_masks
[shaders
[i
- 1]->info
.stage
] |= input_mask
;
846 setup_nir(isel_context
*ctx
, nir_shader
*nir
)
848 Program
*program
= ctx
->program
;
850 /* align and copy constant data */
851 while (program
->constant_data
.size() % 4u)
852 program
->constant_data
.push_back(0);
853 ctx
->constant_data_offset
= program
->constant_data
.size();
854 program
->constant_data
.insert(program
->constant_data
.end(),
855 (uint8_t*)nir
->constant_data
,
856 (uint8_t*)nir
->constant_data
+ nir
->constant_data_size
);
858 /* the variable setup has to be done before lower_io / CSE */
859 setup_variables(ctx
, nir
);
861 /* optimize and lower memory operations */
862 bool lower_to_scalar
= false;
863 bool lower_pack
= false;
864 if (nir_opt_load_store_vectorize(nir
,
865 (nir_variable_mode
)(nir_var_mem_ssbo
| nir_var_mem_ubo
|
866 nir_var_mem_push_const
| nir_var_mem_shared
),
867 mem_vectorize_callback
)) {
868 lower_to_scalar
= true;
871 if (nir
->info
.stage
!= MESA_SHADER_COMPUTE
)
872 nir_lower_io(nir
, (nir_variable_mode
)(nir_var_shader_in
| nir_var_shader_out
), type_size
, (nir_lower_io_options
)0);
873 nir_lower_explicit_io(nir
, nir_var_mem_global
, nir_address_format_64bit_global
);
876 nir_lower_alu_to_scalar(nir
, NULL
, NULL
);
880 /* lower ALU operations */
881 // TODO: implement logic64 in aco, it's more effective for sgprs
882 nir_lower_int64(nir
, nir
->options
->lower_int64_options
);
884 nir_opt_idiv_const(nir
, 32);
885 nir_lower_idiv(nir
, nir_lower_idiv_precise
);
887 /* optimize the lowered ALU operations */
888 bool more_algebraic
= true;
889 while (more_algebraic
) {
890 more_algebraic
= false;
891 NIR_PASS_V(nir
, nir_copy_prop
);
892 NIR_PASS_V(nir
, nir_opt_dce
);
893 NIR_PASS_V(nir
, nir_opt_constant_folding
);
894 NIR_PASS(more_algebraic
, nir
, nir_opt_algebraic
);
897 /* Do late algebraic optimization to turn add(a, neg(b)) back into
898 * subs, then the mandatory cleanup after algebraic. Note that it may
899 * produce fnegs, and if so then we need to keep running to squash
902 bool more_late_algebraic
= true;
903 while (more_late_algebraic
) {
904 more_late_algebraic
= false;
905 NIR_PASS(more_late_algebraic
, nir
, nir_opt_algebraic_late
);
906 NIR_PASS_V(nir
, nir_opt_constant_folding
);
907 NIR_PASS_V(nir
, nir_copy_prop
);
908 NIR_PASS_V(nir
, nir_opt_dce
);
909 NIR_PASS_V(nir
, nir_opt_cse
);
913 nir_lower_load_const_to_scalar(nir
);
914 nir_opt_shrink_load(nir
);
915 nir_move_options move_opts
= (nir_move_options
)(
916 nir_move_const_undef
| nir_move_load_ubo
| nir_move_load_input
|
917 nir_move_comparisons
| nir_move_copies
);
918 nir_opt_sink(nir
, move_opts
);
919 nir_opt_move(nir
, move_opts
);
920 nir_convert_to_lcssa(nir
, true, false);
921 nir_lower_phis_to_scalar(nir
);
923 nir_function_impl
*func
= nir_shader_get_entrypoint(nir
);
924 nir_index_ssa_defs(func
);
925 nir_metadata_require(func
, nir_metadata_block_index
);
929 setup_isel_context(Program
* program
,
930 unsigned shader_count
,
931 struct nir_shader
*const *shaders
,
932 ac_shader_config
* config
,
933 struct radv_shader_args
*args
,
934 bool is_gs_copy_shader
)
937 for (unsigned i
= 0; i
< shader_count
; i
++) {
938 switch (shaders
[i
]->info
.stage
) {
939 case MESA_SHADER_VERTEX
:
940 program
->stage
|= sw_vs
;
942 case MESA_SHADER_TESS_CTRL
:
943 program
->stage
|= sw_tcs
;
945 case MESA_SHADER_TESS_EVAL
:
946 program
->stage
|= sw_tes
;
948 case MESA_SHADER_GEOMETRY
:
949 program
->stage
|= is_gs_copy_shader
? sw_gs_copy
: sw_gs
;
951 case MESA_SHADER_FRAGMENT
:
952 program
->stage
|= sw_fs
;
954 case MESA_SHADER_COMPUTE
:
955 program
->stage
|= sw_cs
;
958 unreachable("Shader stage not implemented");
961 bool gfx9_plus
= args
->options
->chip_class
>= GFX9
;
962 bool ngg
= args
->shader_info
->is_ngg
&& args
->options
->chip_class
>= GFX10
;
963 if (program
->stage
== sw_vs
&& args
->shader_info
->vs
.as_es
)
964 program
->stage
|= hw_es
;
965 else if (program
->stage
== sw_vs
&& !args
->shader_info
->vs
.as_ls
)
966 program
->stage
|= hw_vs
;
967 else if (program
->stage
== sw_gs
)
968 program
->stage
|= hw_gs
;
969 else if (program
->stage
== sw_fs
)
970 program
->stage
|= hw_fs
;
971 else if (program
->stage
== sw_cs
)
972 program
->stage
|= hw_cs
;
973 else if (program
->stage
== sw_gs_copy
)
974 program
->stage
|= hw_vs
;
975 else if (program
->stage
== (sw_vs
| sw_gs
) && gfx9_plus
&& !ngg
)
976 program
->stage
|= hw_gs
;
978 unreachable("Shader stage not implemented");
980 program
->config
= config
;
981 program
->info
= args
->shader_info
;
982 program
->chip_class
= args
->options
->chip_class
;
983 program
->family
= args
->options
->family
;
984 program
->wave_size
= args
->shader_info
->wave_size
;
985 program
->lane_mask
= program
->wave_size
== 32 ? s1
: s2
;
987 program
->lds_alloc_granule
= args
->options
->chip_class
>= GFX7
? 512 : 256;
988 program
->lds_limit
= args
->options
->chip_class
>= GFX7
? 65536 : 32768;
989 program
->vgpr_limit
= 256;
990 program
->vgpr_alloc_granule
= 3;
992 if (args
->options
->chip_class
>= GFX10
) {
993 program
->physical_sgprs
= 2560; /* doesn't matter as long as it's at least 128 * 20 */
994 program
->sgpr_alloc_granule
= 127;
995 program
->sgpr_limit
= 106;
996 program
->vgpr_alloc_granule
= program
->wave_size
== 32 ? 7 : 3;
997 } else if (program
->chip_class
>= GFX8
) {
998 program
->physical_sgprs
= 800;
999 program
->sgpr_alloc_granule
= 15;
1000 if (args
->options
->family
== CHIP_TONGA
|| args
->options
->family
== CHIP_ICELAND
)
1001 program
->sgpr_limit
= 94; /* workaround hardware bug */
1003 program
->sgpr_limit
= 102;
1005 program
->physical_sgprs
= 512;
1006 program
->sgpr_alloc_granule
= 7;
1007 program
->sgpr_limit
= 104;
1010 calc_min_waves(program
);
1011 program
->vgpr_limit
= get_addr_vgpr_from_waves(program
, program
->min_waves
);
1012 program
->sgpr_limit
= get_addr_sgpr_from_waves(program
, program
->min_waves
);
1014 isel_context ctx
= {};
1015 ctx
.program
= program
;
1017 ctx
.options
= args
->options
;
1018 ctx
.stage
= program
->stage
;
1020 get_io_masks(&ctx
, shader_count
, shaders
);
1022 unsigned scratch_size
= 0;
1023 if (program
->stage
== gs_copy_vs
) {
1024 assert(shader_count
== 1);
1025 setup_vs_output_info(&ctx
, shaders
[0], false, true, &args
->shader_info
->vs
.outinfo
);
1027 for (unsigned i
= 0; i
< shader_count
; i
++) {
1028 nir_shader
*nir
= shaders
[i
];
1029 setup_nir(&ctx
, nir
);
1031 if (args
->options
->dump_preoptir
) {
1032 fprintf(stderr
, "NIR shader before instruction selection:\n");
1033 nir_print_shader(nir
, stderr
);
1037 for (unsigned i
= 0; i
< shader_count
; i
++)
1038 scratch_size
= std::max(scratch_size
, shaders
[i
]->scratch_size
);
1041 ctx
.program
->config
->scratch_bytes_per_wave
= align(scratch_size
* ctx
.program
->wave_size
, 1024);
1043 ctx
.block
= ctx
.program
->create_and_insert_block();
1044 ctx
.block
->loop_nest_depth
= 0;
1045 ctx
.block
->kind
= block_kind_top_level
;