2 * Copyright © 2015 Thomas Helland
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "nir_constant_expressions.h"
26 #include "nir_loop_analyze.h"
33 } nir_loop_variable_type
;
35 typedef struct nir_basic_induction_var
{
36 nir_alu_instr
*alu
; /* The def of the alu-operation */
37 nir_ssa_def
*def_outside_loop
; /* The phi-src outside the loop */
38 } nir_basic_induction_var
;
41 /* A link for the work list */
42 struct list_head process_link
;
46 /* The ssa_def associated with this info */
49 /* The type of this ssa_def */
50 nir_loop_variable_type type
;
52 /* If this is of type basic_induction */
53 struct nir_basic_induction_var
*ind
;
55 /* True if variable is in an if branch */
58 /* True if variable is in a nested loop */
64 /* The loop we store information for */
67 /* Loop_variable for all ssa_defs in function */
68 nir_loop_variable
*loop_vars
;
70 /* A list of the loop_vars to analyze */
71 struct list_head process_list
;
73 nir_variable_mode indirect_mask
;
77 static nir_loop_variable
*
78 get_loop_var(nir_ssa_def
*value
, loop_info_state
*state
)
80 return &(state
->loop_vars
[value
->index
]);
84 loop_info_state
*state
;
90 init_loop_def(nir_ssa_def
*def
, void *void_init_loop_state
)
92 init_loop_state
*loop_init_state
= void_init_loop_state
;
93 nir_loop_variable
*var
= get_loop_var(def
, loop_init_state
->state
);
95 if (loop_init_state
->in_nested_loop
) {
96 var
->in_nested_loop
= true;
97 } else if (loop_init_state
->in_if_branch
) {
98 var
->in_if_branch
= true;
100 /* Add to the tail of the list. That way we start at the beginning of
101 * the defs in the loop instead of the end when walking the list. This
102 * means less recursive calls. Only add defs that are not in nested
103 * loops or conditional blocks.
105 list_addtail(&var
->process_link
, &loop_init_state
->state
->process_list
);
113 /** Calculate an estimated cost in number of instructions
115 * We do this so that we don't unroll loops which will later get massively
116 * inflated due to int64 or fp64 lowering. The estimates provided here don't
117 * have to be massively accurate; they just have to be good enough that loop
118 * unrolling doesn't cause things to blow up too much.
121 instr_cost(nir_instr
*instr
, const nir_shader_compiler_options
*options
)
123 if (instr
->type
== nir_instr_type_intrinsic
||
124 instr
->type
== nir_instr_type_tex
)
127 if (instr
->type
!= nir_instr_type_alu
)
130 nir_alu_instr
*alu
= nir_instr_as_alu(instr
);
131 const nir_op_info
*info
= &nir_op_infos
[alu
->op
];
133 /* Assume everything 16 or 32-bit is cheap.
135 * There are no 64-bit ops that don't have a 64-bit thing as their
136 * destination or first source.
138 if (nir_dest_bit_size(alu
->dest
.dest
) < 64 &&
139 nir_src_bit_size(alu
->src
[0].src
) < 64)
142 bool is_fp64
= nir_dest_bit_size(alu
->dest
.dest
) == 64 &&
143 nir_alu_type_get_base_type(info
->output_type
) == nir_type_float
;
144 for (unsigned i
= 0; i
< info
->num_inputs
; i
++) {
145 if (nir_src_bit_size(alu
->src
[i
].src
) == 64 &&
146 nir_alu_type_get_base_type(info
->input_types
[i
]) == nir_type_float
)
151 /* If it's something lowered normally, it's expensive. */
153 if (options
->lower_doubles_options
&
154 nir_lower_doubles_op_to_options_mask(alu
->op
))
157 /* If it's full software, it's even more expensive */
158 if (options
->lower_doubles_options
& nir_lower_fp64_full_software
)
163 if (options
->lower_int64_options
&
164 nir_lower_int64_op_to_options_mask(alu
->op
)) {
165 /* These require a doing the division algorithm. */
166 if (alu
->op
== nir_op_idiv
|| alu
->op
== nir_op_udiv
||
167 alu
->op
== nir_op_imod
|| alu
->op
== nir_op_umod
||
168 alu
->op
== nir_op_irem
)
171 /* Other int64 lowering isn't usually all that expensive */
180 init_loop_block(nir_block
*block
, loop_info_state
*state
,
181 bool in_if_branch
, bool in_nested_loop
,
182 const nir_shader_compiler_options
*options
)
184 init_loop_state init_state
= {.in_if_branch
= in_if_branch
,
185 .in_nested_loop
= in_nested_loop
,
188 nir_foreach_instr(instr
, block
) {
189 state
->loop
->info
->instr_cost
+= instr_cost(instr
, options
);
190 nir_foreach_ssa_def(instr
, init_loop_def
, &init_state
);
197 is_var_alu(nir_loop_variable
*var
)
199 return var
->def
->parent_instr
->type
== nir_instr_type_alu
;
203 is_var_phi(nir_loop_variable
*var
)
205 return var
->def
->parent_instr
->type
== nir_instr_type_phi
;
209 mark_invariant(nir_ssa_def
*def
, loop_info_state
*state
)
211 nir_loop_variable
*var
= get_loop_var(def
, state
);
213 if (var
->type
== invariant
)
217 var
->type
= invariant
;
221 if (var
->type
== not_invariant
)
224 if (is_var_alu(var
)) {
225 nir_alu_instr
*alu
= nir_instr_as_alu(def
->parent_instr
);
227 for (unsigned i
= 0; i
< nir_op_infos
[alu
->op
].num_inputs
; i
++) {
228 if (!mark_invariant(alu
->src
[i
].src
.ssa
, state
)) {
229 var
->type
= not_invariant
;
233 var
->type
= invariant
;
237 /* Phis shouldn't be invariant except if one operand is invariant, and the
238 * other is the phi itself. These should be removed by opt_remove_phis.
239 * load_consts are already set to invariant and constant during init,
240 * and so should return earlier. Remaining op_codes are set undefined.
242 var
->type
= not_invariant
;
247 compute_invariance_information(loop_info_state
*state
)
249 /* An expression is invariant in a loop L if:
252 * – it’s a variable use, all of whose single defs are outside of L
254 * – it’s a pure computation all of whose args are loop invariant
255 * – it’s a variable use whose single reaching def, and the
256 * rhs of that def is loop-invariant
258 list_for_each_entry_safe(nir_loop_variable
, var
, &state
->process_list
,
260 assert(!var
->in_if_branch
&& !var
->in_nested_loop
);
262 if (mark_invariant(var
->def
, state
))
263 list_del(&var
->process_link
);
267 /* If all of the instruction sources point to identical ALU instructions (as
268 * per nir_instrs_equal), return one of the ALU instructions. Otherwise,
271 static nir_alu_instr
*
272 phi_instr_as_alu(nir_phi_instr
*phi
)
274 nir_alu_instr
*first
= NULL
;
275 nir_foreach_phi_src(src
, phi
) {
276 assert(src
->src
.is_ssa
);
277 if (src
->src
.ssa
->parent_instr
->type
!= nir_instr_type_alu
)
280 nir_alu_instr
*alu
= nir_instr_as_alu(src
->src
.ssa
->parent_instr
);
284 if (!nir_instrs_equal(&first
->instr
, &alu
->instr
))
293 alu_src_has_identity_swizzle(nir_alu_instr
*alu
, unsigned src_idx
)
295 assert(nir_op_infos
[alu
->op
].input_sizes
[src_idx
] == 0);
296 assert(alu
->dest
.dest
.is_ssa
);
297 for (unsigned i
= 0; i
< alu
->dest
.dest
.ssa
.num_components
; i
++) {
298 if (alu
->src
[src_idx
].swizzle
[i
] != i
)
306 compute_induction_information(loop_info_state
*state
)
308 bool found_induction_var
= false;
309 list_for_each_entry_safe(nir_loop_variable
, var
, &state
->process_list
,
312 /* It can't be an induction variable if it is invariant. Invariants and
313 * things in nested loops or conditionals should have been removed from
314 * the list by compute_invariance_information().
316 assert(!var
->in_if_branch
&& !var
->in_nested_loop
&&
317 var
->type
!= invariant
);
319 /* We are only interested in checking phis for the basic induction
320 * variable case as its simple to detect. All basic induction variables
323 if (!is_var_phi(var
))
326 nir_phi_instr
*phi
= nir_instr_as_phi(var
->def
->parent_instr
);
327 nir_basic_induction_var
*biv
= rzalloc(state
, nir_basic_induction_var
);
329 nir_loop_variable
*alu_src_var
= NULL
;
330 nir_foreach_phi_src(src
, phi
) {
331 nir_loop_variable
*src_var
= get_loop_var(src
->src
.ssa
, state
);
333 /* If one of the sources is in an if branch or nested loop then don't
334 * attempt to go any further.
336 if (src_var
->in_if_branch
|| src_var
->in_nested_loop
)
339 /* Detect inductions variables that are incremented in both branches
340 * of an unnested if rather than in a loop block.
342 if (is_var_phi(src_var
)) {
343 nir_phi_instr
*src_phi
=
344 nir_instr_as_phi(src_var
->def
->parent_instr
);
345 nir_alu_instr
*src_phi_alu
= phi_instr_as_alu(src_phi
);
347 src_var
= get_loop_var(&src_phi_alu
->dest
.dest
.ssa
, state
);
348 if (!src_var
->in_if_branch
)
353 if (!src_var
->in_loop
&& !biv
->def_outside_loop
) {
354 biv
->def_outside_loop
= src_var
->def
;
355 } else if (is_var_alu(src_var
) && !biv
->alu
) {
356 alu_src_var
= src_var
;
357 nir_alu_instr
*alu
= nir_instr_as_alu(src_var
->def
->parent_instr
);
359 if (nir_op_infos
[alu
->op
].num_inputs
== 2) {
360 for (unsigned i
= 0; i
< 2; i
++) {
361 /* Is one of the operands const, and the other the phi. The
362 * phi source can't be swizzled in any way.
364 if (nir_src_is_const(alu
->src
[i
].src
) &&
365 alu
->src
[1-i
].src
.ssa
== &phi
->dest
.ssa
&&
366 alu_src_has_identity_swizzle(alu
, 1 - i
))
379 if (biv
->alu
&& biv
->def_outside_loop
&&
380 biv
->def_outside_loop
->parent_instr
->type
== nir_instr_type_load_const
) {
381 alu_src_var
->type
= basic_induction
;
382 alu_src_var
->ind
= biv
;
383 var
->type
= basic_induction
;
386 found_induction_var
= true;
391 return found_induction_var
;
395 initialize_ssa_def(nir_ssa_def
*def
, void *void_state
)
397 loop_info_state
*state
= void_state
;
398 nir_loop_variable
*var
= get_loop_var(def
, state
);
400 var
->in_loop
= false;
403 if (def
->parent_instr
->type
== nir_instr_type_load_const
) {
404 var
->type
= invariant
;
406 var
->type
= undefined
;
413 find_loop_terminators(loop_info_state
*state
)
415 bool success
= false;
416 foreach_list_typed_safe(nir_cf_node
, node
, node
, &state
->loop
->body
) {
417 if (node
->type
== nir_cf_node_if
) {
418 nir_if
*nif
= nir_cf_node_as_if(node
);
420 nir_block
*break_blk
= NULL
;
421 nir_block
*continue_from_blk
= NULL
;
422 bool continue_from_then
= true;
424 nir_block
*last_then
= nir_if_last_then_block(nif
);
425 nir_block
*last_else
= nir_if_last_else_block(nif
);
426 if (nir_block_ends_in_break(last_then
)) {
427 break_blk
= last_then
;
428 continue_from_blk
= last_else
;
429 continue_from_then
= false;
430 } else if (nir_block_ends_in_break(last_else
)) {
431 break_blk
= last_else
;
432 continue_from_blk
= last_then
;
435 /* If there is a break then we should find a terminator. If we can
436 * not find a loop terminator, but there is a break-statement then
437 * we should return false so that we do not try to find trip-count
439 if (!nir_is_trivial_loop_if(nif
, break_blk
)) {
440 state
->loop
->info
->complex_loop
= true;
444 /* Continue if the if contained no jumps at all */
448 if (nif
->condition
.ssa
->parent_instr
->type
== nir_instr_type_phi
) {
449 state
->loop
->info
->complex_loop
= true;
453 nir_loop_terminator
*terminator
=
454 rzalloc(state
->loop
->info
, nir_loop_terminator
);
456 list_addtail(&terminator
->loop_terminator_link
,
457 &state
->loop
->info
->loop_terminator_list
);
459 terminator
->nif
= nif
;
460 terminator
->break_block
= break_blk
;
461 terminator
->continue_from_block
= continue_from_blk
;
462 terminator
->continue_from_then
= continue_from_then
;
463 terminator
->conditional_instr
= nif
->condition
.ssa
->parent_instr
;
472 /* This function looks for an array access within a loop that uses an
473 * induction variable for the array index. If found it returns the size of the
474 * array, otherwise 0 is returned. If we find an induction var we pass it back
475 * to the caller via array_index_out.
478 find_array_access_via_induction(loop_info_state
*state
,
479 nir_deref_instr
*deref
,
480 nir_loop_variable
**array_index_out
)
482 for (nir_deref_instr
*d
= deref
; d
; d
= nir_deref_instr_parent(d
)) {
483 if (d
->deref_type
!= nir_deref_type_array
)
486 assert(d
->arr
.index
.is_ssa
);
487 nir_loop_variable
*array_index
= get_loop_var(d
->arr
.index
.ssa
, state
);
489 if (array_index
->type
!= basic_induction
)
493 *array_index_out
= array_index
;
495 nir_deref_instr
*parent
= nir_deref_instr_parent(d
);
496 if (glsl_type_is_array_or_matrix(parent
->type
)) {
497 return glsl_get_length(parent
->type
);
499 assert(glsl_type_is_vector(parent
->type
));
500 return glsl_get_vector_elements(parent
->type
);
508 guess_loop_limit(loop_info_state
*state
, nir_const_value
*limit_val
,
509 nir_ssa_scalar basic_ind
)
511 unsigned min_array_size
= 0;
513 nir_foreach_block_in_cf_node(block
, &state
->loop
->cf_node
) {
514 nir_foreach_instr(instr
, block
) {
515 if (instr
->type
!= nir_instr_type_intrinsic
)
518 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
520 /* Check for arrays variably-indexed by a loop induction variable. */
521 if (intrin
->intrinsic
== nir_intrinsic_load_deref
||
522 intrin
->intrinsic
== nir_intrinsic_store_deref
||
523 intrin
->intrinsic
== nir_intrinsic_copy_deref
) {
525 nir_loop_variable
*array_idx
= NULL
;
526 unsigned array_size
=
527 find_array_access_via_induction(state
,
528 nir_src_as_deref(intrin
->src
[0]),
530 if (array_idx
&& basic_ind
.def
== array_idx
->def
&&
531 (min_array_size
== 0 || min_array_size
> array_size
)) {
532 /* Array indices are scalars */
533 assert(basic_ind
.def
->num_components
== 1);
534 min_array_size
= array_size
;
537 if (intrin
->intrinsic
!= nir_intrinsic_copy_deref
)
541 find_array_access_via_induction(state
,
542 nir_src_as_deref(intrin
->src
[1]),
544 if (array_idx
&& basic_ind
.def
== array_idx
->def
&&
545 (min_array_size
== 0 || min_array_size
> array_size
)) {
546 /* Array indices are scalars */
547 assert(basic_ind
.def
->num_components
== 1);
548 min_array_size
= array_size
;
554 if (min_array_size
) {
555 *limit_val
= nir_const_value_for_uint(min_array_size
,
556 basic_ind
.def
->bit_size
);
564 try_find_limit_of_alu(nir_ssa_scalar limit
, nir_const_value
*limit_val
,
565 nir_loop_terminator
*terminator
, loop_info_state
*state
)
567 if (!nir_ssa_scalar_is_alu(limit
))
570 nir_op limit_op
= nir_ssa_scalar_alu_op(limit
);
571 if (limit_op
== nir_op_imin
|| limit_op
== nir_op_fmin
) {
572 for (unsigned i
= 0; i
< 2; i
++) {
573 nir_ssa_scalar src
= nir_ssa_scalar_chase_alu_src(limit
, i
);
574 if (nir_ssa_scalar_is_const(src
)) {
575 *limit_val
= nir_ssa_scalar_as_const_value(src
);
576 terminator
->exact_trip_count_unknown
= true;
585 static nir_const_value
586 eval_const_unop(nir_op op
, unsigned bit_size
, nir_const_value src0
,
587 unsigned execution_mode
)
589 assert(nir_op_infos
[op
].num_inputs
== 1);
590 nir_const_value dest
;
591 nir_const_value
*src
[1] = { &src0
};
592 nir_eval_const_opcode(op
, &dest
, 1, bit_size
, src
, execution_mode
);
596 static nir_const_value
597 eval_const_binop(nir_op op
, unsigned bit_size
,
598 nir_const_value src0
, nir_const_value src1
,
599 unsigned execution_mode
)
601 assert(nir_op_infos
[op
].num_inputs
== 2);
602 nir_const_value dest
;
603 nir_const_value
*src
[2] = { &src0
, &src1
};
604 nir_eval_const_opcode(op
, &dest
, 1, bit_size
, src
, execution_mode
);
609 get_iteration(nir_op cond_op
, nir_const_value initial
, nir_const_value step
,
610 nir_const_value limit
, unsigned bit_size
,
611 unsigned execution_mode
)
613 nir_const_value span
, iter
;
620 span
= eval_const_binop(nir_op_isub
, bit_size
, limit
, initial
,
622 iter
= eval_const_binop(nir_op_idiv
, bit_size
, span
, step
,
628 span
= eval_const_binop(nir_op_isub
, bit_size
, limit
, initial
,
630 iter
= eval_const_binop(nir_op_udiv
, bit_size
, span
, step
,
638 span
= eval_const_binop(nir_op_fsub
, bit_size
, limit
, initial
,
640 iter
= eval_const_binop(nir_op_fdiv
, bit_size
, span
,
641 step
, execution_mode
);
642 iter
= eval_const_unop(nir_op_f2i64
, bit_size
, iter
, execution_mode
);
649 uint64_t iter_u64
= nir_const_value_as_uint(iter
, bit_size
);
650 return iter_u64
> INT_MAX
? -1 : (int)iter_u64
;
654 will_break_on_first_iteration(nir_const_value step
,
655 nir_alu_type induction_base_type
,
656 unsigned trip_offset
,
657 nir_op cond_op
, unsigned bit_size
,
658 nir_const_value initial
,
659 nir_const_value limit
,
660 bool limit_rhs
, bool invert_cond
,
661 unsigned execution_mode
)
663 if (trip_offset
== 1) {
665 switch (induction_base_type
) {
667 add_op
= nir_op_fadd
;
671 add_op
= nir_op_iadd
;
674 unreachable("Unhandled induction variable base type!");
677 initial
= eval_const_binop(add_op
, bit_size
, initial
, step
,
681 nir_const_value
*src
[2];
682 src
[limit_rhs
? 0 : 1] = &initial
;
683 src
[limit_rhs
? 1 : 0] = &limit
;
685 /* Evaluate the loop exit condition */
686 nir_const_value result
;
687 nir_eval_const_opcode(cond_op
, &result
, 1, bit_size
, src
, execution_mode
);
689 return invert_cond
? !result
.b
: result
.b
;
693 test_iterations(int32_t iter_int
, nir_const_value step
,
694 nir_const_value limit
, nir_op cond_op
, unsigned bit_size
,
695 nir_alu_type induction_base_type
,
696 nir_const_value initial
, bool limit_rhs
, bool invert_cond
,
697 unsigned execution_mode
)
699 assert(nir_op_infos
[cond_op
].num_inputs
== 2);
701 nir_const_value iter_src
;
704 switch (induction_base_type
) {
706 iter_src
= nir_const_value_for_float(iter_int
, bit_size
);
707 mul_op
= nir_op_fmul
;
708 add_op
= nir_op_fadd
;
712 iter_src
= nir_const_value_for_int(iter_int
, bit_size
);
713 mul_op
= nir_op_imul
;
714 add_op
= nir_op_iadd
;
717 unreachable("Unhandled induction variable base type!");
720 /* Multiple the iteration count we are testing by the number of times we
721 * step the induction variable each iteration.
723 nir_const_value mul_result
=
724 eval_const_binop(mul_op
, bit_size
, iter_src
, step
, execution_mode
);
726 /* Add the initial value to the accumulated induction variable total */
727 nir_const_value add_result
=
728 eval_const_binop(add_op
, bit_size
, mul_result
, initial
, execution_mode
);
730 nir_const_value
*src
[2];
731 src
[limit_rhs
? 0 : 1] = &add_result
;
732 src
[limit_rhs
? 1 : 0] = &limit
;
734 /* Evaluate the loop exit condition */
735 nir_const_value result
;
736 nir_eval_const_opcode(cond_op
, &result
, 1, bit_size
, src
, execution_mode
);
738 return invert_cond
? !result
.b
: result
.b
;
742 calculate_iterations(nir_const_value initial
, nir_const_value step
,
743 nir_const_value limit
, nir_alu_instr
*alu
,
744 nir_ssa_scalar cond
, nir_op alu_op
, bool limit_rhs
,
745 bool invert_cond
, unsigned execution_mode
)
747 /* nir_op_isub should have been lowered away by this point */
748 assert(alu
->op
!= nir_op_isub
);
750 /* Make sure the alu type for our induction variable is compatible with the
751 * conditional alus input type. If its not something has gone really wrong.
753 nir_alu_type induction_base_type
=
754 nir_alu_type_get_base_type(nir_op_infos
[alu
->op
].output_type
);
755 if (induction_base_type
== nir_type_int
|| induction_base_type
== nir_type_uint
) {
756 assert(nir_alu_type_get_base_type(nir_op_infos
[alu_op
].input_types
[1]) == nir_type_int
||
757 nir_alu_type_get_base_type(nir_op_infos
[alu_op
].input_types
[1]) == nir_type_uint
);
759 assert(nir_alu_type_get_base_type(nir_op_infos
[alu_op
].input_types
[0]) ==
760 induction_base_type
);
763 /* Check for nsupported alu operations */
764 if (alu
->op
!= nir_op_iadd
&& alu
->op
!= nir_op_fadd
)
767 /* do-while loops can increment the starting value before the condition is
774 * Here we check if the induction variable is used directly by the loop
775 * condition and if so we assume we need to step the initial value.
777 unsigned trip_offset
= 0;
778 nir_alu_instr
*cond_alu
= nir_instr_as_alu(cond
.def
->parent_instr
);
779 if (cond_alu
->src
[0].src
.ssa
== &alu
->dest
.dest
.ssa
||
780 cond_alu
->src
[1].src
.ssa
== &alu
->dest
.dest
.ssa
) {
784 assert(nir_src_bit_size(alu
->src
[0].src
) ==
785 nir_src_bit_size(alu
->src
[1].src
));
786 unsigned bit_size
= nir_src_bit_size(alu
->src
[0].src
);
788 /* get_iteration works under assumption that iterator will be
789 * incremented or decremented until it hits the limit,
790 * however if the loop condition is false on the first iteration
791 * get_iteration's assumption is broken. Handle such loops first.
793 if (will_break_on_first_iteration(step
, induction_base_type
, trip_offset
,
794 alu_op
, bit_size
, initial
,
795 limit
, limit_rhs
, invert_cond
,
800 int iter_int
= get_iteration(alu_op
, initial
, step
, limit
, bit_size
,
803 /* If iter_int is negative the loop is ill-formed or is the conditional is
804 * unsigned with a huge iteration count so don't bother going any further.
809 /* An explanation from the GLSL unrolling pass:
811 * Make sure that the calculated number of iterations satisfies the exit
812 * condition. This is needed to catch off-by-one errors and some types of
813 * ill-formed loops. For example, we need to detect that the following
814 * loop does not have a maximum iteration count.
816 * for (float x = 0.0; x != 0.9; x += 0.2);
818 for (int bias
= -1; bias
<= 1; bias
++) {
819 const int iter_bias
= iter_int
+ bias
;
821 if (test_iterations(iter_bias
, step
, limit
, alu_op
, bit_size
,
822 induction_base_type
, initial
,
823 limit_rhs
, invert_cond
, execution_mode
)) {
824 return iter_bias
> 0 ? iter_bias
- trip_offset
: iter_bias
;
832 inverse_comparison(nir_op alu_op
)
856 unreachable("Unsuported comparison!");
861 is_supported_terminator_condition(nir_ssa_scalar cond
)
863 if (!nir_ssa_scalar_is_alu(cond
))
866 nir_alu_instr
*alu
= nir_instr_as_alu(cond
.def
->parent_instr
);
867 return nir_alu_instr_is_comparison(alu
) &&
868 nir_op_infos
[alu
->op
].num_inputs
== 2;
872 get_induction_and_limit_vars(nir_ssa_scalar cond
,
874 nir_ssa_scalar
*limit
,
876 loop_info_state
*state
)
878 nir_ssa_scalar rhs
, lhs
;
879 lhs
= nir_ssa_scalar_chase_alu_src(cond
, 0);
880 rhs
= nir_ssa_scalar_chase_alu_src(cond
, 1);
882 if (get_loop_var(lhs
.def
, state
)->type
== basic_induction
) {
887 } else if (get_loop_var(rhs
.def
, state
)->type
== basic_induction
) {
898 try_find_trip_count_vars_in_iand(nir_ssa_scalar
*cond
,
900 nir_ssa_scalar
*limit
,
902 loop_info_state
*state
)
904 const nir_op alu_op
= nir_ssa_scalar_alu_op(*cond
);
905 assert(alu_op
== nir_op_ieq
|| alu_op
== nir_op_inot
);
907 nir_ssa_scalar iand
= nir_ssa_scalar_chase_alu_src(*cond
, 0);
909 if (alu_op
== nir_op_ieq
) {
910 nir_ssa_scalar zero
= nir_ssa_scalar_chase_alu_src(*cond
, 1);
912 if (!nir_ssa_scalar_is_alu(iand
) || !nir_ssa_scalar_is_const(zero
)) {
913 /* Maybe we had it the wrong way, flip things around */
914 nir_ssa_scalar tmp
= zero
;
918 /* If we still didn't find what we need then return */
919 if (!nir_ssa_scalar_is_const(zero
))
923 /* If the loop is not breaking on (x && y) == 0 then return */
924 if (nir_ssa_scalar_as_uint(zero
) != 0)
928 if (!nir_ssa_scalar_is_alu(iand
))
931 if (nir_ssa_scalar_alu_op(iand
) != nir_op_iand
)
934 /* Check if iand src is a terminator condition and try get induction var
935 * and trip limit var.
937 bool found_induction_var
= false;
938 for (unsigned i
= 0; i
< 2; i
++) {
939 nir_ssa_scalar src
= nir_ssa_scalar_chase_alu_src(iand
, i
);
940 if (is_supported_terminator_condition(src
) &&
941 get_induction_and_limit_vars(src
, ind
, limit
, limit_rhs
, state
)) {
943 found_induction_var
= true;
945 /* If we've found one with a constant limit, stop. */
946 if (nir_ssa_scalar_is_const(*limit
))
951 return found_induction_var
;
954 /* Run through each of the terminators of the loop and try to infer a possible
955 * trip-count. We need to check them all, and set the lowest trip-count as the
956 * trip-count of our loop. If one of the terminators has an undecidable
957 * trip-count we can not safely assume anything about the duration of the
961 find_trip_count(loop_info_state
*state
, unsigned execution_mode
)
963 bool trip_count_known
= true;
964 bool guessed_trip_count
= false;
965 nir_loop_terminator
*limiting_terminator
= NULL
;
966 int max_trip_count
= -1;
968 list_for_each_entry(nir_loop_terminator
, terminator
,
969 &state
->loop
->info
->loop_terminator_list
,
970 loop_terminator_link
) {
971 assert(terminator
->nif
->condition
.is_ssa
);
972 nir_ssa_scalar cond
= { terminator
->nif
->condition
.ssa
, 0 };
974 if (!nir_ssa_scalar_is_alu(cond
)) {
975 /* If we get here the loop is dead and will get cleaned up by the
976 * nir_opt_dead_cf pass.
978 trip_count_known
= false;
982 nir_op alu_op
= nir_ssa_scalar_alu_op(cond
);
985 nir_ssa_scalar basic_ind
= { NULL
, 0 };
986 nir_ssa_scalar limit
;
987 if ((alu_op
== nir_op_inot
|| alu_op
== nir_op_ieq
) &&
988 try_find_trip_count_vars_in_iand(&cond
, &basic_ind
, &limit
,
989 &limit_rhs
, state
)) {
991 /* The loop is exiting on (x && y) == 0 so we need to get the
992 * inverse of x or y (i.e. which ever contained the induction var) in
993 * order to compute the trip count.
995 alu_op
= inverse_comparison(nir_ssa_scalar_alu_op(cond
));
996 trip_count_known
= false;
997 terminator
->exact_trip_count_unknown
= true;
1000 if (!basic_ind
.def
) {
1001 if (is_supported_terminator_condition(cond
)) {
1002 get_induction_and_limit_vars(cond
, &basic_ind
,
1003 &limit
, &limit_rhs
, state
);
1007 /* The comparison has to have a basic induction variable for us to be
1008 * able to find trip counts.
1010 if (!basic_ind
.def
) {
1011 trip_count_known
= false;
1015 terminator
->induction_rhs
= !limit_rhs
;
1017 /* Attempt to find a constant limit for the loop */
1018 nir_const_value limit_val
;
1019 if (nir_ssa_scalar_is_const(limit
)) {
1020 limit_val
= nir_ssa_scalar_as_const_value(limit
);
1022 trip_count_known
= false;
1024 if (!try_find_limit_of_alu(limit
, &limit_val
, terminator
, state
)) {
1025 /* Guess loop limit based on array access */
1026 if (!guess_loop_limit(state
, &limit_val
, basic_ind
)) {
1030 guessed_trip_count
= true;
1034 /* We have determined that we have the following constants:
1035 * (With the typical int i = 0; i < x; i++; as an example)
1038 * - Step / iteration size
1039 * Thats all thats needed to calculate the trip-count
1042 nir_basic_induction_var
*ind_var
=
1043 get_loop_var(basic_ind
.def
, state
)->ind
;
1045 /* The basic induction var might be a vector but, because we guarantee
1046 * earlier that the phi source has a scalar swizzle, we can take the
1047 * component from basic_ind.
1049 nir_ssa_scalar initial_s
= { ind_var
->def_outside_loop
, basic_ind
.comp
};
1050 nir_ssa_scalar alu_s
= { &ind_var
->alu
->dest
.dest
.ssa
, basic_ind
.comp
};
1052 nir_const_value initial_val
= nir_ssa_scalar_as_const_value(initial_s
);
1054 /* We are guaranteed by earlier code that at least one of these sources
1055 * is a constant but we don't know which.
1057 nir_const_value step_val
;
1058 memset(&step_val
, 0, sizeof(step_val
));
1059 UNUSED
bool found_step_value
= false;
1060 assert(nir_op_infos
[ind_var
->alu
->op
].num_inputs
== 2);
1061 for (unsigned i
= 0; i
< 2; i
++) {
1062 nir_ssa_scalar alu_src
= nir_ssa_scalar_chase_alu_src(alu_s
, i
);
1063 if (nir_ssa_scalar_is_const(alu_src
)) {
1064 found_step_value
= true;
1065 step_val
= nir_ssa_scalar_as_const_value(alu_src
);
1069 assert(found_step_value
);
1071 int iterations
= calculate_iterations(initial_val
, step_val
, limit_val
,
1074 terminator
->continue_from_then
,
1077 /* Where we not able to calculate the iteration count */
1078 if (iterations
== -1) {
1079 trip_count_known
= false;
1080 guessed_trip_count
= false;
1084 if (guessed_trip_count
) {
1085 guessed_trip_count
= false;
1086 if (state
->loop
->info
->guessed_trip_count
== 0 ||
1087 state
->loop
->info
->guessed_trip_count
> iterations
)
1088 state
->loop
->info
->guessed_trip_count
= iterations
;
1093 /* If this is the first run or we have found a smaller amount of
1094 * iterations than previously (we have identified a more limiting
1095 * terminator) set the trip count and limiting terminator.
1097 if (max_trip_count
== -1 || iterations
< max_trip_count
) {
1098 max_trip_count
= iterations
;
1099 limiting_terminator
= terminator
;
1103 state
->loop
->info
->exact_trip_count_known
= trip_count_known
;
1104 if (max_trip_count
> -1)
1105 state
->loop
->info
->max_trip_count
= max_trip_count
;
1106 state
->loop
->info
->limiting_terminator
= limiting_terminator
;
1110 force_unroll_array_access(loop_info_state
*state
, nir_deref_instr
*deref
)
1112 unsigned array_size
= find_array_access_via_induction(state
, deref
, NULL
);
1114 if (array_size
== state
->loop
->info
->max_trip_count
)
1117 if (deref
->mode
& state
->indirect_mask
)
1125 force_unroll_heuristics(loop_info_state
*state
, nir_block
*block
)
1127 nir_foreach_instr(instr
, block
) {
1128 if (instr
->type
!= nir_instr_type_intrinsic
)
1131 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
1133 /* Check for arrays variably-indexed by a loop induction variable.
1134 * Unrolling the loop may convert that access into constant-indexing.
1136 if (intrin
->intrinsic
== nir_intrinsic_load_deref
||
1137 intrin
->intrinsic
== nir_intrinsic_store_deref
||
1138 intrin
->intrinsic
== nir_intrinsic_copy_deref
) {
1139 if (force_unroll_array_access(state
,
1140 nir_src_as_deref(intrin
->src
[0])))
1143 if (intrin
->intrinsic
== nir_intrinsic_copy_deref
&&
1144 force_unroll_array_access(state
,
1145 nir_src_as_deref(intrin
->src
[1])))
1154 get_loop_info(loop_info_state
*state
, nir_function_impl
*impl
)
1156 nir_shader
*shader
= impl
->function
->shader
;
1157 const nir_shader_compiler_options
*options
= shader
->options
;
1159 /* Initialize all variables to "outside_loop". This also marks defs
1160 * invariant and constant if they are nir_instr_type_load_consts
1162 nir_foreach_block(block
, impl
) {
1163 nir_foreach_instr(instr
, block
)
1164 nir_foreach_ssa_def(instr
, initialize_ssa_def
, state
);
1167 /* Add all entries in the outermost part of the loop to the processing list
1168 * Mark the entries in conditionals or in nested loops accordingly
1170 foreach_list_typed_safe(nir_cf_node
, node
, node
, &state
->loop
->body
) {
1171 switch (node
->type
) {
1173 case nir_cf_node_block
:
1174 init_loop_block(nir_cf_node_as_block(node
), state
,
1175 false, false, options
);
1178 case nir_cf_node_if
:
1179 nir_foreach_block_in_cf_node(block
, node
)
1180 init_loop_block(block
, state
, true, false, options
);
1183 case nir_cf_node_loop
:
1184 nir_foreach_block_in_cf_node(block
, node
) {
1185 init_loop_block(block
, state
, false, true, options
);
1189 case nir_cf_node_function
:
1194 /* Try to find all simple terminators of the loop. If we can't find any,
1195 * or we find possible terminators that have side effects then bail.
1197 if (!find_loop_terminators(state
)) {
1198 list_for_each_entry_safe(nir_loop_terminator
, terminator
,
1199 &state
->loop
->info
->loop_terminator_list
,
1200 loop_terminator_link
) {
1201 list_del(&terminator
->loop_terminator_link
);
1202 ralloc_free(terminator
);
1207 /* Induction analysis needs invariance information so get that first */
1208 compute_invariance_information(state
);
1210 /* We have invariance information so try to find induction variables */
1211 if (!compute_induction_information(state
))
1214 /* Run through each of the terminators and try to compute a trip-count */
1215 find_trip_count(state
, impl
->function
->shader
->info
.float_controls_execution_mode
);
1217 nir_foreach_block_in_cf_node(block
, &state
->loop
->cf_node
) {
1218 if (force_unroll_heuristics(state
, block
)) {
1219 state
->loop
->info
->force_unroll
= true;
1225 static loop_info_state
*
1226 initialize_loop_info_state(nir_loop
*loop
, void *mem_ctx
,
1227 nir_function_impl
*impl
)
1229 loop_info_state
*state
= rzalloc(mem_ctx
, loop_info_state
);
1230 state
->loop_vars
= rzalloc_array(mem_ctx
, nir_loop_variable
,
1234 list_inithead(&state
->process_list
);
1237 ralloc_free(loop
->info
);
1239 loop
->info
= rzalloc(loop
, nir_loop_info
);
1241 list_inithead(&loop
->info
->loop_terminator_list
);
1247 process_loops(nir_cf_node
*cf_node
, nir_variable_mode indirect_mask
)
1249 switch (cf_node
->type
) {
1250 case nir_cf_node_block
:
1252 case nir_cf_node_if
: {
1253 nir_if
*if_stmt
= nir_cf_node_as_if(cf_node
);
1254 foreach_list_typed(nir_cf_node
, nested_node
, node
, &if_stmt
->then_list
)
1255 process_loops(nested_node
, indirect_mask
);
1256 foreach_list_typed(nir_cf_node
, nested_node
, node
, &if_stmt
->else_list
)
1257 process_loops(nested_node
, indirect_mask
);
1260 case nir_cf_node_loop
: {
1261 nir_loop
*loop
= nir_cf_node_as_loop(cf_node
);
1262 foreach_list_typed(nir_cf_node
, nested_node
, node
, &loop
->body
)
1263 process_loops(nested_node
, indirect_mask
);
1267 unreachable("unknown cf node type");
1270 nir_loop
*loop
= nir_cf_node_as_loop(cf_node
);
1271 nir_function_impl
*impl
= nir_cf_node_get_function(cf_node
);
1272 void *mem_ctx
= ralloc_context(NULL
);
1274 loop_info_state
*state
= initialize_loop_info_state(loop
, mem_ctx
, impl
);
1275 state
->indirect_mask
= indirect_mask
;
1277 get_loop_info(state
, impl
);
1279 ralloc_free(mem_ctx
);
1283 nir_loop_analyze_impl(nir_function_impl
*impl
,
1284 nir_variable_mode indirect_mask
)
1286 nir_index_ssa_defs(impl
);
1287 foreach_list_typed(nir_cf_node
, node
, node
, &impl
->body
)
1288 process_loops(node
, indirect_mask
);