2 * Copyright © 2015 Thomas Helland
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "nir_constant_expressions.h"
26 #include "nir_loop_analyze.h"
33 } nir_loop_variable_type
;
35 typedef struct nir_basic_induction_var
{
36 nir_alu_instr
*alu
; /* The def of the alu-operation */
37 nir_ssa_def
*def_outside_loop
; /* The phi-src outside the loop */
38 } nir_basic_induction_var
;
41 /* A link for the work list */
42 struct list_head process_link
;
46 /* The ssa_def associated with this info */
49 /* The type of this ssa_def */
50 nir_loop_variable_type type
;
52 /* If this is of type basic_induction */
53 struct nir_basic_induction_var
*ind
;
55 /* True if variable is in an if branch */
58 /* True if variable is in a nested loop */
64 /* The loop we store information for */
67 /* Loop_variable for all ssa_defs in function */
68 nir_loop_variable
*loop_vars
;
70 /* A list of the loop_vars to analyze */
71 struct list_head process_list
;
73 nir_variable_mode indirect_mask
;
77 static nir_loop_variable
*
78 get_loop_var(nir_ssa_def
*value
, loop_info_state
*state
)
80 return &(state
->loop_vars
[value
->index
]);
84 loop_info_state
*state
;
90 init_loop_def(nir_ssa_def
*def
, void *void_init_loop_state
)
92 init_loop_state
*loop_init_state
= void_init_loop_state
;
93 nir_loop_variable
*var
= get_loop_var(def
, loop_init_state
->state
);
95 if (loop_init_state
->in_nested_loop
) {
96 var
->in_nested_loop
= true;
97 } else if (loop_init_state
->in_if_branch
) {
98 var
->in_if_branch
= true;
100 /* Add to the tail of the list. That way we start at the beginning of
101 * the defs in the loop instead of the end when walking the list. This
102 * means less recursive calls. Only add defs that are not in nested
103 * loops or conditional blocks.
105 list_addtail(&var
->process_link
, &loop_init_state
->state
->process_list
);
113 /** Calculate an estimated cost in number of instructions
115 * We do this so that we don't unroll loops which will later get massively
116 * inflated due to int64 or fp64 lowering. The estimates provided here don't
117 * have to be massively accurate; they just have to be good enough that loop
118 * unrolling doesn't cause things to blow up too much.
121 instr_cost(nir_instr
*instr
, const nir_shader_compiler_options
*options
)
123 if (instr
->type
== nir_instr_type_intrinsic
||
124 instr
->type
== nir_instr_type_tex
)
127 if (instr
->type
!= nir_instr_type_alu
)
130 nir_alu_instr
*alu
= nir_instr_as_alu(instr
);
131 const nir_op_info
*info
= &nir_op_infos
[alu
->op
];
133 /* Assume everything 16 or 32-bit is cheap.
135 * There are no 64-bit ops that don't have a 64-bit thing as their
136 * destination or first source.
138 if (nir_dest_bit_size(alu
->dest
.dest
) < 64 &&
139 nir_src_bit_size(alu
->src
[0].src
) < 64)
142 bool is_fp64
= nir_dest_bit_size(alu
->dest
.dest
) == 64 &&
143 nir_alu_type_get_base_type(info
->output_type
) == nir_type_float
;
144 for (unsigned i
= 0; i
< info
->num_inputs
; i
++) {
145 if (nir_src_bit_size(alu
->src
[i
].src
) == 64 &&
146 nir_alu_type_get_base_type(info
->input_types
[i
]) == nir_type_float
)
151 /* If it's something lowered normally, it's expensive. */
153 if (options
->lower_doubles_options
&
154 nir_lower_doubles_op_to_options_mask(alu
->op
))
157 /* If it's full software, it's even more expensive */
158 if (options
->lower_doubles_options
& nir_lower_fp64_full_software
)
163 if (options
->lower_int64_options
&
164 nir_lower_int64_op_to_options_mask(alu
->op
)) {
165 /* These require a doing the division algorithm. */
166 if (alu
->op
== nir_op_idiv
|| alu
->op
== nir_op_udiv
||
167 alu
->op
== nir_op_imod
|| alu
->op
== nir_op_umod
||
168 alu
->op
== nir_op_irem
)
171 /* Other int64 lowering isn't usually all that expensive */
180 init_loop_block(nir_block
*block
, loop_info_state
*state
,
181 bool in_if_branch
, bool in_nested_loop
,
182 const nir_shader_compiler_options
*options
)
184 init_loop_state init_state
= {.in_if_branch
= in_if_branch
,
185 .in_nested_loop
= in_nested_loop
,
188 nir_foreach_instr(instr
, block
) {
189 state
->loop
->info
->instr_cost
+= instr_cost(instr
, options
);
190 nir_foreach_ssa_def(instr
, init_loop_def
, &init_state
);
197 is_var_alu(nir_loop_variable
*var
)
199 return var
->def
->parent_instr
->type
== nir_instr_type_alu
;
203 is_var_constant(nir_loop_variable
*var
)
205 return var
->def
->parent_instr
->type
== nir_instr_type_load_const
;
209 is_var_phi(nir_loop_variable
*var
)
211 return var
->def
->parent_instr
->type
== nir_instr_type_phi
;
215 mark_invariant(nir_ssa_def
*def
, loop_info_state
*state
)
217 nir_loop_variable
*var
= get_loop_var(def
, state
);
219 if (var
->type
== invariant
)
223 var
->type
= invariant
;
227 if (var
->type
== not_invariant
)
230 if (is_var_alu(var
)) {
231 nir_alu_instr
*alu
= nir_instr_as_alu(def
->parent_instr
);
233 for (unsigned i
= 0; i
< nir_op_infos
[alu
->op
].num_inputs
; i
++) {
234 if (!mark_invariant(alu
->src
[i
].src
.ssa
, state
)) {
235 var
->type
= not_invariant
;
239 var
->type
= invariant
;
243 /* Phis shouldn't be invariant except if one operand is invariant, and the
244 * other is the phi itself. These should be removed by opt_remove_phis.
245 * load_consts are already set to invariant and constant during init,
246 * and so should return earlier. Remaining op_codes are set undefined.
248 var
->type
= not_invariant
;
253 compute_invariance_information(loop_info_state
*state
)
255 /* An expression is invariant in a loop L if:
258 * – it’s a variable use, all of whose single defs are outside of L
260 * – it’s a pure computation all of whose args are loop invariant
261 * – it’s a variable use whose single reaching def, and the
262 * rhs of that def is loop-invariant
264 list_for_each_entry_safe(nir_loop_variable
, var
, &state
->process_list
,
266 assert(!var
->in_if_branch
&& !var
->in_nested_loop
);
268 if (mark_invariant(var
->def
, state
))
269 list_del(&var
->process_link
);
273 /* If all of the instruction sources point to identical ALU instructions (as
274 * per nir_instrs_equal), return one of the ALU instructions. Otherwise,
277 static nir_alu_instr
*
278 phi_instr_as_alu(nir_phi_instr
*phi
)
280 nir_alu_instr
*first
= NULL
;
281 nir_foreach_phi_src(src
, phi
) {
282 assert(src
->src
.is_ssa
);
283 if (src
->src
.ssa
->parent_instr
->type
!= nir_instr_type_alu
)
286 nir_alu_instr
*alu
= nir_instr_as_alu(src
->src
.ssa
->parent_instr
);
290 if (!nir_instrs_equal(&first
->instr
, &alu
->instr
))
299 alu_src_has_identity_swizzle(nir_alu_instr
*alu
, unsigned src_idx
)
301 assert(nir_op_infos
[alu
->op
].input_sizes
[src_idx
] == 0);
302 assert(alu
->dest
.dest
.is_ssa
);
303 for (unsigned i
= 0; i
< alu
->dest
.dest
.ssa
.num_components
; i
++) {
304 if (alu
->src
[src_idx
].swizzle
[i
] != i
)
312 compute_induction_information(loop_info_state
*state
)
314 bool found_induction_var
= false;
315 list_for_each_entry_safe(nir_loop_variable
, var
, &state
->process_list
,
318 /* It can't be an induction variable if it is invariant. Invariants and
319 * things in nested loops or conditionals should have been removed from
320 * the list by compute_invariance_information().
322 assert(!var
->in_if_branch
&& !var
->in_nested_loop
&&
323 var
->type
!= invariant
);
325 /* We are only interested in checking phis for the basic induction
326 * variable case as its simple to detect. All basic induction variables
329 if (!is_var_phi(var
))
332 nir_phi_instr
*phi
= nir_instr_as_phi(var
->def
->parent_instr
);
333 nir_basic_induction_var
*biv
= rzalloc(state
, nir_basic_induction_var
);
335 nir_loop_variable
*alu_src_var
= NULL
;
336 nir_foreach_phi_src(src
, phi
) {
337 nir_loop_variable
*src_var
= get_loop_var(src
->src
.ssa
, state
);
339 /* If one of the sources is in an if branch or nested loop then don't
340 * attempt to go any further.
342 if (src_var
->in_if_branch
|| src_var
->in_nested_loop
)
345 /* Detect inductions variables that are incremented in both branches
346 * of an unnested if rather than in a loop block.
348 if (is_var_phi(src_var
)) {
349 nir_phi_instr
*src_phi
=
350 nir_instr_as_phi(src_var
->def
->parent_instr
);
351 nir_alu_instr
*src_phi_alu
= phi_instr_as_alu(src_phi
);
353 src_var
= get_loop_var(&src_phi_alu
->dest
.dest
.ssa
, state
);
354 if (!src_var
->in_if_branch
)
359 if (!src_var
->in_loop
&& !biv
->def_outside_loop
) {
360 biv
->def_outside_loop
= src_var
->def
;
361 } else if (is_var_alu(src_var
) && !biv
->alu
) {
362 alu_src_var
= src_var
;
363 nir_alu_instr
*alu
= nir_instr_as_alu(src_var
->def
->parent_instr
);
365 if (nir_op_infos
[alu
->op
].num_inputs
== 2) {
366 for (unsigned i
= 0; i
< 2; i
++) {
367 /* Is one of the operands const, and the other the phi. The
368 * phi source can't be swizzled in any way.
370 if (nir_src_is_const(alu
->src
[i
].src
) &&
371 alu
->src
[1-i
].src
.ssa
== &phi
->dest
.ssa
&&
372 alu_src_has_identity_swizzle(alu
, 1 - i
))
385 if (biv
->alu
&& biv
->def_outside_loop
&&
386 biv
->def_outside_loop
->parent_instr
->type
== nir_instr_type_load_const
) {
387 alu_src_var
->type
= basic_induction
;
388 alu_src_var
->ind
= biv
;
389 var
->type
= basic_induction
;
392 found_induction_var
= true;
397 return found_induction_var
;
401 initialize_ssa_def(nir_ssa_def
*def
, void *void_state
)
403 loop_info_state
*state
= void_state
;
404 nir_loop_variable
*var
= get_loop_var(def
, state
);
406 var
->in_loop
= false;
409 if (def
->parent_instr
->type
== nir_instr_type_load_const
) {
410 var
->type
= invariant
;
412 var
->type
= undefined
;
419 find_loop_terminators(loop_info_state
*state
)
421 bool success
= false;
422 foreach_list_typed_safe(nir_cf_node
, node
, node
, &state
->loop
->body
) {
423 if (node
->type
== nir_cf_node_if
) {
424 nir_if
*nif
= nir_cf_node_as_if(node
);
426 nir_block
*break_blk
= NULL
;
427 nir_block
*continue_from_blk
= NULL
;
428 bool continue_from_then
= true;
430 nir_block
*last_then
= nir_if_last_then_block(nif
);
431 nir_block
*last_else
= nir_if_last_else_block(nif
);
432 if (nir_block_ends_in_break(last_then
)) {
433 break_blk
= last_then
;
434 continue_from_blk
= last_else
;
435 continue_from_then
= false;
436 } else if (nir_block_ends_in_break(last_else
)) {
437 break_blk
= last_else
;
438 continue_from_blk
= last_then
;
441 /* If there is a break then we should find a terminator. If we can
442 * not find a loop terminator, but there is a break-statement then
443 * we should return false so that we do not try to find trip-count
445 if (!nir_is_trivial_loop_if(nif
, break_blk
)) {
446 state
->loop
->info
->complex_loop
= true;
450 /* Continue if the if contained no jumps at all */
454 if (nif
->condition
.ssa
->parent_instr
->type
== nir_instr_type_phi
) {
455 state
->loop
->info
->complex_loop
= true;
459 nir_loop_terminator
*terminator
=
460 rzalloc(state
->loop
->info
, nir_loop_terminator
);
462 list_addtail(&terminator
->loop_terminator_link
,
463 &state
->loop
->info
->loop_terminator_list
);
465 terminator
->nif
= nif
;
466 terminator
->break_block
= break_blk
;
467 terminator
->continue_from_block
= continue_from_blk
;
468 terminator
->continue_from_then
= continue_from_then
;
469 terminator
->conditional_instr
= nif
->condition
.ssa
->parent_instr
;
478 /* This function looks for an array access within a loop that uses an
479 * induction variable for the array index. If found it returns the size of the
480 * array, otherwise 0 is returned. If we find an induction var we pass it back
481 * to the caller via array_index_out.
484 find_array_access_via_induction(loop_info_state
*state
,
485 nir_deref_instr
*deref
,
486 nir_loop_variable
**array_index_out
)
488 for (nir_deref_instr
*d
= deref
; d
; d
= nir_deref_instr_parent(d
)) {
489 if (d
->deref_type
!= nir_deref_type_array
)
492 assert(d
->arr
.index
.is_ssa
);
493 nir_loop_variable
*array_index
= get_loop_var(d
->arr
.index
.ssa
, state
);
495 if (array_index
->type
!= basic_induction
)
499 *array_index_out
= array_index
;
501 nir_deref_instr
*parent
= nir_deref_instr_parent(d
);
502 if (glsl_type_is_array_or_matrix(parent
->type
)) {
503 return glsl_get_length(parent
->type
);
505 assert(glsl_type_is_vector(parent
->type
));
506 return glsl_get_vector_elements(parent
->type
);
514 guess_loop_limit(loop_info_state
*state
, nir_const_value
*limit_val
,
515 nir_ssa_scalar basic_ind
)
517 unsigned min_array_size
= 0;
519 nir_foreach_block_in_cf_node(block
, &state
->loop
->cf_node
) {
520 nir_foreach_instr(instr
, block
) {
521 if (instr
->type
!= nir_instr_type_intrinsic
)
524 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
526 /* Check for arrays variably-indexed by a loop induction variable. */
527 if (intrin
->intrinsic
== nir_intrinsic_load_deref
||
528 intrin
->intrinsic
== nir_intrinsic_store_deref
||
529 intrin
->intrinsic
== nir_intrinsic_copy_deref
) {
531 nir_loop_variable
*array_idx
= NULL
;
532 unsigned array_size
=
533 find_array_access_via_induction(state
,
534 nir_src_as_deref(intrin
->src
[0]),
536 if (array_idx
&& basic_ind
.def
== array_idx
->def
&&
537 (min_array_size
== 0 || min_array_size
> array_size
)) {
538 /* Array indices are scalars */
539 assert(basic_ind
.def
->num_components
== 1);
540 min_array_size
= array_size
;
543 if (intrin
->intrinsic
!= nir_intrinsic_copy_deref
)
547 find_array_access_via_induction(state
,
548 nir_src_as_deref(intrin
->src
[1]),
550 if (array_idx
&& basic_ind
.def
== array_idx
->def
&&
551 (min_array_size
== 0 || min_array_size
> array_size
)) {
552 /* Array indices are scalars */
553 assert(basic_ind
.def
->num_components
== 1);
554 min_array_size
= array_size
;
560 if (min_array_size
) {
561 *limit_val
= nir_const_value_for_uint(min_array_size
,
562 basic_ind
.def
->bit_size
);
570 try_find_limit_of_alu(nir_ssa_scalar limit
, nir_const_value
*limit_val
,
571 nir_loop_terminator
*terminator
, loop_info_state
*state
)
573 if (!nir_ssa_scalar_is_alu(limit
))
576 nir_op limit_op
= nir_ssa_scalar_alu_op(limit
);
577 if (limit_op
== nir_op_imin
|| limit_op
== nir_op_fmin
) {
578 for (unsigned i
= 0; i
< 2; i
++) {
579 nir_ssa_scalar src
= nir_ssa_scalar_chase_alu_src(limit
, i
);
580 if (nir_ssa_scalar_is_const(src
)) {
581 *limit_val
= nir_ssa_scalar_as_const_value(src
);
582 terminator
->exact_trip_count_unknown
= true;
591 static nir_const_value
592 eval_const_unop(nir_op op
, unsigned bit_size
, nir_const_value src0
)
594 assert(nir_op_infos
[op
].num_inputs
== 1);
595 nir_const_value dest
;
596 nir_const_value
*src
[1] = { &src0
};
597 nir_eval_const_opcode(op
, &dest
, 1, bit_size
, src
);
601 static nir_const_value
602 eval_const_binop(nir_op op
, unsigned bit_size
,
603 nir_const_value src0
, nir_const_value src1
)
605 assert(nir_op_infos
[op
].num_inputs
== 2);
606 nir_const_value dest
;
607 nir_const_value
*src
[2] = { &src0
, &src1
};
608 nir_eval_const_opcode(op
, &dest
, 1, bit_size
, src
);
613 get_iteration(nir_op cond_op
, nir_const_value initial
, nir_const_value step
,
614 nir_const_value limit
, unsigned bit_size
)
616 nir_const_value span
, iter
;
623 span
= eval_const_binop(nir_op_isub
, bit_size
, limit
, initial
);
624 iter
= eval_const_binop(nir_op_idiv
, bit_size
, span
, step
);
629 span
= eval_const_binop(nir_op_isub
, bit_size
, limit
, initial
);
630 iter
= eval_const_binop(nir_op_udiv
, bit_size
, span
, step
);
637 span
= eval_const_binop(nir_op_fsub
, bit_size
, limit
, initial
);
638 iter
= eval_const_binop(nir_op_fdiv
, bit_size
, span
, step
);
639 iter
= eval_const_unop(nir_op_f2i64
, bit_size
, iter
);
646 uint64_t iter_u64
= nir_const_value_as_uint(iter
, bit_size
);
647 return iter_u64
> INT_MAX
? -1 : (int)iter_u64
;
651 test_iterations(int32_t iter_int
, nir_const_value
*step
,
652 nir_const_value
*limit
, nir_op cond_op
, unsigned bit_size
,
653 nir_alu_type induction_base_type
,
654 nir_const_value
*initial
, bool limit_rhs
, bool invert_cond
)
656 assert(nir_op_infos
[cond_op
].num_inputs
== 2);
658 nir_const_value iter_src
;
661 switch (induction_base_type
) {
663 iter_src
= nir_const_value_for_float(iter_int
, bit_size
);
664 mul_op
= nir_op_fmul
;
665 add_op
= nir_op_fadd
;
669 iter_src
= nir_const_value_for_int(iter_int
, bit_size
);
670 mul_op
= nir_op_imul
;
671 add_op
= nir_op_iadd
;
674 unreachable("Unhandled induction variable base type!");
677 /* Multiple the iteration count we are testing by the number of times we
678 * step the induction variable each iteration.
680 nir_const_value mul_result
=
681 eval_const_binop(mul_op
, bit_size
, iter_src
, *step
);
683 /* Add the initial value to the accumulated induction variable total */
684 nir_const_value add_result
=
685 eval_const_binop(add_op
, bit_size
, mul_result
, *initial
);
687 nir_const_value
*src
[2];
688 src
[limit_rhs
? 0 : 1] = &add_result
;
689 src
[limit_rhs
? 1 : 0] = limit
;
691 /* Evaluate the loop exit condition */
692 nir_const_value result
;
693 nir_eval_const_opcode(cond_op
, &result
, 1, bit_size
, src
);
695 return invert_cond
? !result
.b
: result
.b
;
699 calculate_iterations(nir_const_value
*initial
, nir_const_value
*step
,
700 nir_const_value
*limit
, nir_alu_instr
*alu
,
701 nir_ssa_scalar cond
, nir_op alu_op
, bool limit_rhs
,
704 assert(initial
!= NULL
&& step
!= NULL
&& limit
!= NULL
);
706 /* nir_op_isub should have been lowered away by this point */
707 assert(alu
->op
!= nir_op_isub
);
709 /* Make sure the alu type for our induction variable is compatible with the
710 * conditional alus input type. If its not something has gone really wrong.
712 nir_alu_type induction_base_type
=
713 nir_alu_type_get_base_type(nir_op_infos
[alu
->op
].output_type
);
714 if (induction_base_type
== nir_type_int
|| induction_base_type
== nir_type_uint
) {
715 assert(nir_alu_type_get_base_type(nir_op_infos
[alu_op
].input_types
[1]) == nir_type_int
||
716 nir_alu_type_get_base_type(nir_op_infos
[alu_op
].input_types
[1]) == nir_type_uint
);
718 assert(nir_alu_type_get_base_type(nir_op_infos
[alu_op
].input_types
[0]) ==
719 induction_base_type
);
722 /* Check for nsupported alu operations */
723 if (alu
->op
!= nir_op_iadd
&& alu
->op
!= nir_op_fadd
)
726 /* do-while loops can increment the starting value before the condition is
733 * Here we check if the induction variable is used directly by the loop
734 * condition and if so we assume we need to step the initial value.
736 unsigned trip_offset
= 0;
737 nir_alu_instr
*cond_alu
= nir_instr_as_alu(cond
.def
->parent_instr
);
738 if (cond_alu
->src
[0].src
.ssa
== &alu
->dest
.dest
.ssa
||
739 cond_alu
->src
[1].src
.ssa
== &alu
->dest
.dest
.ssa
) {
743 assert(nir_src_bit_size(alu
->src
[0].src
) ==
744 nir_src_bit_size(alu
->src
[1].src
));
745 unsigned bit_size
= nir_src_bit_size(alu
->src
[0].src
);
746 int iter_int
= get_iteration(alu_op
, *initial
, *step
, *limit
, bit_size
);
748 /* If iter_int is negative the loop is ill-formed or is the conditional is
749 * unsigned with a huge iteration count so don't bother going any further.
754 /* An explanation from the GLSL unrolling pass:
756 * Make sure that the calculated number of iterations satisfies the exit
757 * condition. This is needed to catch off-by-one errors and some types of
758 * ill-formed loops. For example, we need to detect that the following
759 * loop does not have a maximum iteration count.
761 * for (float x = 0.0; x != 0.9; x += 0.2);
763 for (int bias
= -1; bias
<= 1; bias
++) {
764 const int iter_bias
= iter_int
+ bias
;
766 if (test_iterations(iter_bias
, step
, limit
, alu_op
, bit_size
,
767 induction_base_type
, initial
,
768 limit_rhs
, invert_cond
)) {
769 return iter_bias
> 0 ? iter_bias
- trip_offset
: iter_bias
;
777 inverse_comparison(nir_op alu_op
)
801 unreachable("Unsuported comparison!");
806 is_supported_terminator_condition(nir_ssa_scalar cond
)
808 if (!nir_ssa_scalar_is_alu(cond
))
811 nir_alu_instr
*alu
= nir_instr_as_alu(cond
.def
->parent_instr
);
812 return nir_alu_instr_is_comparison(alu
) &&
813 nir_op_infos
[alu
->op
].num_inputs
== 2;
817 get_induction_and_limit_vars(nir_ssa_scalar cond
,
819 nir_ssa_scalar
*limit
,
821 loop_info_state
*state
)
823 nir_ssa_scalar rhs
, lhs
;
824 lhs
= nir_ssa_scalar_chase_alu_src(cond
, 0);
825 rhs
= nir_ssa_scalar_chase_alu_src(cond
, 1);
827 if (get_loop_var(lhs
.def
, state
)->type
== basic_induction
) {
832 } else if (get_loop_var(rhs
.def
, state
)->type
== basic_induction
) {
843 try_find_trip_count_vars_in_iand(nir_ssa_scalar
*cond
,
845 nir_ssa_scalar
*limit
,
847 loop_info_state
*state
)
849 const nir_op alu_op
= nir_ssa_scalar_alu_op(*cond
);
850 assert(alu_op
== nir_op_ieq
|| alu_op
== nir_op_inot
);
852 nir_ssa_scalar iand
= nir_ssa_scalar_chase_alu_src(*cond
, 0);
854 if (alu_op
== nir_op_ieq
) {
855 nir_ssa_scalar zero
= nir_ssa_scalar_chase_alu_src(*cond
, 1);
857 if (!nir_ssa_scalar_is_alu(iand
) || !nir_ssa_scalar_is_const(zero
)) {
858 /* Maybe we had it the wrong way, flip things around */
859 nir_ssa_scalar tmp
= zero
;
863 /* If we still didn't find what we need then return */
864 if (!nir_ssa_scalar_is_const(zero
))
868 /* If the loop is not breaking on (x && y) == 0 then return */
869 if (nir_ssa_scalar_as_uint(zero
) != 0)
873 if (!nir_ssa_scalar_is_alu(iand
))
876 if (nir_ssa_scalar_alu_op(iand
) != nir_op_iand
)
879 /* Check if iand src is a terminator condition and try get induction var
880 * and trip limit var.
882 bool found_induction_var
= false;
883 for (unsigned i
= 0; i
< 2; i
++) {
884 nir_ssa_scalar src
= nir_ssa_scalar_chase_alu_src(iand
, i
);
885 if (is_supported_terminator_condition(src
) &&
886 get_induction_and_limit_vars(src
, ind
, limit
, limit_rhs
, state
)) {
888 found_induction_var
= true;
890 /* If we've found one with a constant limit, stop. */
891 if (nir_ssa_scalar_is_const(*limit
))
896 return found_induction_var
;
899 /* Run through each of the terminators of the loop and try to infer a possible
900 * trip-count. We need to check them all, and set the lowest trip-count as the
901 * trip-count of our loop. If one of the terminators has an undecidable
902 * trip-count we can not safely assume anything about the duration of the
906 find_trip_count(loop_info_state
*state
)
908 bool trip_count_known
= true;
909 bool guessed_trip_count
= false;
910 nir_loop_terminator
*limiting_terminator
= NULL
;
911 int max_trip_count
= -1;
913 list_for_each_entry(nir_loop_terminator
, terminator
,
914 &state
->loop
->info
->loop_terminator_list
,
915 loop_terminator_link
) {
916 assert(terminator
->nif
->condition
.is_ssa
);
917 nir_ssa_scalar cond
= { terminator
->nif
->condition
.ssa
, 0 };
919 if (!nir_ssa_scalar_is_alu(cond
)) {
920 /* If we get here the loop is dead and will get cleaned up by the
921 * nir_opt_dead_cf pass.
923 trip_count_known
= false;
927 nir_op alu_op
= nir_ssa_scalar_alu_op(cond
);
930 nir_ssa_scalar basic_ind
= { NULL
, 0 };
931 nir_ssa_scalar limit
;
932 if ((alu_op
== nir_op_inot
|| alu_op
== nir_op_ieq
) &&
933 try_find_trip_count_vars_in_iand(&cond
, &basic_ind
, &limit
,
934 &limit_rhs
, state
)) {
936 /* The loop is exiting on (x && y) == 0 so we need to get the
937 * inverse of x or y (i.e. which ever contained the induction var) in
938 * order to compute the trip count.
940 alu_op
= inverse_comparison(nir_ssa_scalar_alu_op(cond
));
941 trip_count_known
= false;
942 terminator
->exact_trip_count_unknown
= true;
945 if (!basic_ind
.def
) {
946 if (is_supported_terminator_condition(cond
)) {
947 get_induction_and_limit_vars(cond
, &basic_ind
,
948 &limit
, &limit_rhs
, state
);
952 /* The comparison has to have a basic induction variable for us to be
953 * able to find trip counts.
955 if (!basic_ind
.def
) {
956 trip_count_known
= false;
960 terminator
->induction_rhs
= !limit_rhs
;
962 /* Attempt to find a constant limit for the loop */
963 nir_const_value limit_val
;
964 if (nir_ssa_scalar_is_const(limit
)) {
965 limit_val
= nir_ssa_scalar_as_const_value(limit
);
967 trip_count_known
= false;
969 if (!try_find_limit_of_alu(limit
, &limit_val
, terminator
, state
)) {
970 /* Guess loop limit based on array access */
971 if (!guess_loop_limit(state
, &limit_val
, basic_ind
)) {
975 guessed_trip_count
= true;
979 /* We have determined that we have the following constants:
980 * (With the typical int i = 0; i < x; i++; as an example)
983 * - Step / iteration size
984 * Thats all thats needed to calculate the trip-count
987 nir_basic_induction_var
*ind_var
=
988 get_loop_var(basic_ind
.def
, state
)->ind
;
990 /* The basic induction var might be a vector but, because we guarantee
991 * earlier that the phi source has a scalar swizzle, we can take the
992 * component from basic_ind.
994 nir_ssa_scalar initial_s
= { ind_var
->def_outside_loop
, basic_ind
.comp
};
995 nir_ssa_scalar alu_s
= { &ind_var
->alu
->dest
.dest
.ssa
, basic_ind
.comp
};
997 nir_const_value initial_val
= nir_ssa_scalar_as_const_value(initial_s
);
999 /* We are guaranteed by earlier code that at least one of these sources
1000 * is a constant but we don't know which.
1002 nir_const_value step_val
;
1003 memset(&step_val
, 0, sizeof(step_val
));
1004 UNUSED
bool found_step_value
= false;
1005 assert(nir_op_infos
[ind_var
->alu
->op
].num_inputs
== 2);
1006 for (unsigned i
= 0; i
< 2; i
++) {
1007 nir_ssa_scalar alu_src
= nir_ssa_scalar_chase_alu_src(alu_s
, i
);
1008 if (nir_ssa_scalar_is_const(alu_src
)) {
1009 found_step_value
= true;
1010 step_val
= nir_ssa_scalar_as_const_value(alu_src
);
1014 assert(found_step_value
);
1016 int iterations
= calculate_iterations(&initial_val
, &step_val
,
1020 terminator
->continue_from_then
);
1022 /* Where we not able to calculate the iteration count */
1023 if (iterations
== -1) {
1024 trip_count_known
= false;
1025 guessed_trip_count
= false;
1029 if (guessed_trip_count
) {
1030 guessed_trip_count
= false;
1031 if (state
->loop
->info
->guessed_trip_count
== 0 ||
1032 state
->loop
->info
->guessed_trip_count
> iterations
)
1033 state
->loop
->info
->guessed_trip_count
= iterations
;
1038 /* If this is the first run or we have found a smaller amount of
1039 * iterations than previously (we have identified a more limiting
1040 * terminator) set the trip count and limiting terminator.
1042 if (max_trip_count
== -1 || iterations
< max_trip_count
) {
1043 max_trip_count
= iterations
;
1044 limiting_terminator
= terminator
;
1048 state
->loop
->info
->exact_trip_count_known
= trip_count_known
;
1049 if (max_trip_count
> -1)
1050 state
->loop
->info
->max_trip_count
= max_trip_count
;
1051 state
->loop
->info
->limiting_terminator
= limiting_terminator
;
1055 force_unroll_array_access(loop_info_state
*state
, nir_deref_instr
*deref
)
1057 unsigned array_size
= find_array_access_via_induction(state
, deref
, NULL
);
1059 if (array_size
== state
->loop
->info
->max_trip_count
)
1062 if (deref
->mode
& state
->indirect_mask
)
1070 force_unroll_heuristics(loop_info_state
*state
, nir_block
*block
)
1072 nir_foreach_instr(instr
, block
) {
1073 if (instr
->type
!= nir_instr_type_intrinsic
)
1076 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
1078 /* Check for arrays variably-indexed by a loop induction variable.
1079 * Unrolling the loop may convert that access into constant-indexing.
1081 if (intrin
->intrinsic
== nir_intrinsic_load_deref
||
1082 intrin
->intrinsic
== nir_intrinsic_store_deref
||
1083 intrin
->intrinsic
== nir_intrinsic_copy_deref
) {
1084 if (force_unroll_array_access(state
,
1085 nir_src_as_deref(intrin
->src
[0])))
1088 if (intrin
->intrinsic
== nir_intrinsic_copy_deref
&&
1089 force_unroll_array_access(state
,
1090 nir_src_as_deref(intrin
->src
[1])))
1099 get_loop_info(loop_info_state
*state
, nir_function_impl
*impl
)
1101 nir_shader
*shader
= impl
->function
->shader
;
1102 const nir_shader_compiler_options
*options
= shader
->options
;
1104 /* Initialize all variables to "outside_loop". This also marks defs
1105 * invariant and constant if they are nir_instr_type_load_consts
1107 nir_foreach_block(block
, impl
) {
1108 nir_foreach_instr(instr
, block
)
1109 nir_foreach_ssa_def(instr
, initialize_ssa_def
, state
);
1112 /* Add all entries in the outermost part of the loop to the processing list
1113 * Mark the entries in conditionals or in nested loops accordingly
1115 foreach_list_typed_safe(nir_cf_node
, node
, node
, &state
->loop
->body
) {
1116 switch (node
->type
) {
1118 case nir_cf_node_block
:
1119 init_loop_block(nir_cf_node_as_block(node
), state
,
1120 false, false, options
);
1123 case nir_cf_node_if
:
1124 nir_foreach_block_in_cf_node(block
, node
)
1125 init_loop_block(block
, state
, true, false, options
);
1128 case nir_cf_node_loop
:
1129 nir_foreach_block_in_cf_node(block
, node
) {
1130 init_loop_block(block
, state
, false, true, options
);
1134 case nir_cf_node_function
:
1139 /* Try to find all simple terminators of the loop. If we can't find any,
1140 * or we find possible terminators that have side effects then bail.
1142 if (!find_loop_terminators(state
)) {
1143 list_for_each_entry_safe(nir_loop_terminator
, terminator
,
1144 &state
->loop
->info
->loop_terminator_list
,
1145 loop_terminator_link
) {
1146 list_del(&terminator
->loop_terminator_link
);
1147 ralloc_free(terminator
);
1152 /* Induction analysis needs invariance information so get that first */
1153 compute_invariance_information(state
);
1155 /* We have invariance information so try to find induction variables */
1156 if (!compute_induction_information(state
))
1159 /* Run through each of the terminators and try to compute a trip-count */
1160 find_trip_count(state
);
1162 nir_foreach_block_in_cf_node(block
, &state
->loop
->cf_node
) {
1163 if (force_unroll_heuristics(state
, block
)) {
1164 state
->loop
->info
->force_unroll
= true;
1170 static loop_info_state
*
1171 initialize_loop_info_state(nir_loop
*loop
, void *mem_ctx
,
1172 nir_function_impl
*impl
)
1174 loop_info_state
*state
= rzalloc(mem_ctx
, loop_info_state
);
1175 state
->loop_vars
= rzalloc_array(mem_ctx
, nir_loop_variable
,
1179 list_inithead(&state
->process_list
);
1182 ralloc_free(loop
->info
);
1184 loop
->info
= rzalloc(loop
, nir_loop_info
);
1186 list_inithead(&loop
->info
->loop_terminator_list
);
1192 process_loops(nir_cf_node
*cf_node
, nir_variable_mode indirect_mask
)
1194 switch (cf_node
->type
) {
1195 case nir_cf_node_block
:
1197 case nir_cf_node_if
: {
1198 nir_if
*if_stmt
= nir_cf_node_as_if(cf_node
);
1199 foreach_list_typed(nir_cf_node
, nested_node
, node
, &if_stmt
->then_list
)
1200 process_loops(nested_node
, indirect_mask
);
1201 foreach_list_typed(nir_cf_node
, nested_node
, node
, &if_stmt
->else_list
)
1202 process_loops(nested_node
, indirect_mask
);
1205 case nir_cf_node_loop
: {
1206 nir_loop
*loop
= nir_cf_node_as_loop(cf_node
);
1207 foreach_list_typed(nir_cf_node
, nested_node
, node
, &loop
->body
)
1208 process_loops(nested_node
, indirect_mask
);
1212 unreachable("unknown cf node type");
1215 nir_loop
*loop
= nir_cf_node_as_loop(cf_node
);
1216 nir_function_impl
*impl
= nir_cf_node_get_function(cf_node
);
1217 void *mem_ctx
= ralloc_context(NULL
);
1219 loop_info_state
*state
= initialize_loop_info_state(loop
, mem_ctx
, impl
);
1220 state
->indirect_mask
= indirect_mask
;
1222 get_loop_info(state
, impl
);
1224 ralloc_free(mem_ctx
);
1228 nir_loop_analyze_impl(nir_function_impl
*impl
,
1229 nir_variable_mode indirect_mask
)
1231 nir_index_ssa_defs(impl
);
1232 foreach_list_typed(nir_cf_node
, node
, node
, &impl
->body
)
1233 process_loops(node
, indirect_mask
);