1 /* Induction variable optimizations.
2 Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
3 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by the
9 Free Software Foundation; either version 3, or (at your option) any
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* This pass tries to find the optimal set of induction variables for the loop.
22 It optimizes just the basic linear induction variables (although adding
23 support for other types should not be too hard). It includes the
24 optimizations commonly known as strength reduction, induction variable
25 coalescing and induction variable elimination. It does it in the
28 1) The interesting uses of induction variables are found. This includes
30 -- uses of induction variables in non-linear expressions
31 -- addresses of arrays
32 -- comparisons of induction variables
34 2) Candidates for the induction variables are found. This includes
36 -- old induction variables
37 -- the variables defined by expressions derived from the "interesting
40 3) The optimal (w.r. to a cost function) set of variables is chosen. The
41 cost function assigns a cost to sets of induction variables and consists
44 -- The use costs. Each of the interesting uses chooses the best induction
45 variable in the set and adds its cost to the sum. The cost reflects
46 the time spent on modifying the induction variables value to be usable
47 for the given purpose (adding base and offset for arrays, etc.).
48 -- The variable costs. Each of the variables has a cost assigned that
49 reflects the costs associated with incrementing the value of the
50 variable. The original variables are somewhat preferred.
51 -- The set cost. Depending on the size of the set, extra cost may be
52 added to reflect register pressure.
54 All the costs are defined in a machine-specific way, using the target
55 hooks and machine descriptions to determine them.
57 4) The trees are transformed to use the new variables, the dead code is
60 All of this is done loop by loop. Doing it globally is theoretically
61 possible, it might give a better performance and it might enable us
62 to decide costs more precisely, but getting all the interactions right
63 would be complicated. */
67 #include "coretypes.h"
71 #include "basic-block.h"
72 #include "gimple-pretty-print.h"
73 #include "tree-flow.h"
75 #include "tree-pass.h"
77 #include "insn-config.h"
78 #include "pointer-set.h"
80 #include "tree-chrec.h"
81 #include "tree-scalar-evolution.h"
84 #include "langhooks.h"
85 #include "tree-affine.h"
87 #include "tree-inline.h"
88 #include "tree-ssa-propagate.h"
91 /* FIXME: Expressions are expanded to RTL in this pass to determine the
92 cost of different addressing modes. This should be moved to a TBD
93 interface between the GIMPLE and RTL worlds. */
97 /* The infinite cost. */
98 #define INFTY 10000000
100 #define AVG_LOOP_NITER(LOOP) 5
102 /* Returns the expected number of loop iterations for LOOP.
103 The average trip count is computed from profile data if it
106 static inline HOST_WIDE_INT
107 avg_loop_niter (struct loop
*loop
)
109 HOST_WIDE_INT niter
= estimated_stmt_executions_int (loop
);
111 return AVG_LOOP_NITER (loop
);
116 /* Representation of the induction variable. */
119 tree base
; /* Initial value of the iv. */
120 tree base_object
; /* A memory object to that the induction variable points. */
121 tree step
; /* Step of the iv (constant only). */
122 tree ssa_name
; /* The ssa name with the value. */
123 bool biv_p
; /* Is it a biv? */
124 bool have_use_for
; /* Do we already have a use for it? */
125 unsigned use_id
; /* The identifier in the use if it is the case. */
128 /* Per-ssa version information (induction variable descriptions, etc.). */
131 tree name
; /* The ssa name. */
132 struct iv
*iv
; /* Induction variable description. */
133 bool has_nonlin_use
; /* For a loop-level invariant, whether it is used in
134 an expression that is not an induction variable. */
135 bool preserve_biv
; /* For the original biv, whether to preserve it. */
136 unsigned inv_id
; /* Id of an invariant. */
142 USE_NONLINEAR_EXPR
, /* Use in a nonlinear expression. */
143 USE_ADDRESS
, /* Use in an address. */
144 USE_COMPARE
/* Use is a compare. */
147 /* Cost of a computation. */
150 int cost
; /* The runtime cost. */
151 unsigned complexity
; /* The estimate of the complexity of the code for
152 the computation (in no concrete units --
153 complexity field should be larger for more
154 complex expressions and addressing modes). */
157 static const comp_cost no_cost
= {0, 0};
158 static const comp_cost infinite_cost
= {INFTY
, INFTY
};
160 /* The candidate - cost pair. */
163 struct iv_cand
*cand
; /* The candidate. */
164 comp_cost cost
; /* The cost. */
165 bitmap depends_on
; /* The list of invariants that have to be
167 tree value
; /* For final value elimination, the expression for
168 the final value of the iv. For iv elimination,
169 the new bound to compare with. */
170 enum tree_code comp
; /* For iv elimination, the comparison. */
171 int inv_expr_id
; /* Loop invariant expression id. */
177 unsigned id
; /* The id of the use. */
178 enum use_type type
; /* Type of the use. */
179 struct iv
*iv
; /* The induction variable it is based on. */
180 gimple stmt
; /* Statement in that it occurs. */
181 tree
*op_p
; /* The place where it occurs. */
182 bitmap related_cands
; /* The set of "related" iv candidates, plus the common
185 unsigned n_map_members
; /* Number of candidates in the cost_map list. */
186 struct cost_pair
*cost_map
;
187 /* The costs wrto the iv candidates. */
189 struct iv_cand
*selected
;
190 /* The selected candidate. */
193 /* The position where the iv is computed. */
196 IP_NORMAL
, /* At the end, just before the exit condition. */
197 IP_END
, /* At the end of the latch block. */
198 IP_BEFORE_USE
, /* Immediately before a specific use. */
199 IP_AFTER_USE
, /* Immediately after a specific use. */
200 IP_ORIGINAL
/* The original biv. */
203 /* The induction variable candidate. */
206 unsigned id
; /* The number of the candidate. */
207 bool important
; /* Whether this is an "important" candidate, i.e. such
208 that it should be considered by all uses. */
209 ENUM_BITFIELD(iv_position
) pos
: 8; /* Where it is computed. */
210 gimple incremented_at
;/* For original biv, the statement where it is
212 tree var_before
; /* The variable used for it before increment. */
213 tree var_after
; /* The variable used for it after increment. */
214 struct iv
*iv
; /* The value of the candidate. NULL for
215 "pseudocandidate" used to indicate the possibility
216 to replace the final value of an iv by direct
217 computation of the value. */
218 unsigned cost
; /* Cost of the candidate. */
219 unsigned cost_step
; /* Cost of the candidate's increment operation. */
220 struct iv_use
*ainc_use
; /* For IP_{BEFORE,AFTER}_USE candidates, the place
221 where it is incremented. */
222 bitmap depends_on
; /* The list of invariants that are used in step of the
226 /* Loop invariant expression hashtable entry. */
227 struct iv_inv_expr_ent
234 /* The data used by the induction variable optimizations. */
236 typedef struct iv_use
*iv_use_p
;
238 DEF_VEC_ALLOC_P(iv_use_p
,heap
);
240 typedef struct iv_cand
*iv_cand_p
;
241 DEF_VEC_P(iv_cand_p
);
242 DEF_VEC_ALLOC_P(iv_cand_p
,heap
);
246 /* The currently optimized loop. */
247 struct loop
*current_loop
;
249 /* Numbers of iterations for all exits of the current loop. */
250 struct pointer_map_t
*niters
;
252 /* Number of registers used in it. */
255 /* The size of version_info array allocated. */
256 unsigned version_info_size
;
258 /* The array of information for the ssa names. */
259 struct version_info
*version_info
;
261 /* The hashtable of loop invariant expressions created
265 /* Loop invariant expression id. */
268 /* The bitmap of indices in version_info whose value was changed. */
271 /* The uses of induction variables. */
272 VEC(iv_use_p
,heap
) *iv_uses
;
274 /* The candidates. */
275 VEC(iv_cand_p
,heap
) *iv_candidates
;
277 /* A bitmap of important candidates. */
278 bitmap important_candidates
;
280 /* The maximum invariant id. */
283 /* Whether to consider just related and important candidates when replacing a
285 bool consider_all_candidates
;
287 /* Are we optimizing for speed? */
290 /* Whether the loop body includes any function calls. */
291 bool body_includes_call
;
293 /* Whether the loop body can only be exited via single exit. */
294 bool loop_single_exit_p
;
297 /* An assignment of iv candidates to uses. */
301 /* The number of uses covered by the assignment. */
304 /* Number of uses that cannot be expressed by the candidates in the set. */
307 /* Candidate assigned to a use, together with the related costs. */
308 struct cost_pair
**cand_for_use
;
310 /* Number of times each candidate is used. */
311 unsigned *n_cand_uses
;
313 /* The candidates used. */
316 /* The number of candidates in the set. */
319 /* Total number of registers needed. */
322 /* Total cost of expressing uses. */
323 comp_cost cand_use_cost
;
325 /* Total cost of candidates. */
328 /* Number of times each invariant is used. */
329 unsigned *n_invariant_uses
;
331 /* The array holding the number of uses of each loop
332 invariant expressions created by ivopt. */
333 unsigned *used_inv_expr
;
335 /* The number of created loop invariants. */
336 unsigned num_used_inv_expr
;
338 /* Total cost of the assignment. */
342 /* Difference of two iv candidate assignments. */
349 /* An old assignment (for rollback purposes). */
350 struct cost_pair
*old_cp
;
352 /* A new assignment. */
353 struct cost_pair
*new_cp
;
355 /* Next change in the list. */
356 struct iv_ca_delta
*next_change
;
359 /* Bound on number of candidates below that all candidates are considered. */
361 #define CONSIDER_ALL_CANDIDATES_BOUND \
362 ((unsigned) PARAM_VALUE (PARAM_IV_CONSIDER_ALL_CANDIDATES_BOUND))
364 /* If there are more iv occurrences, we just give up (it is quite unlikely that
365 optimizing such a loop would help, and it would take ages). */
367 #define MAX_CONSIDERED_USES \
368 ((unsigned) PARAM_VALUE (PARAM_IV_MAX_CONSIDERED_USES))
370 /* If there are at most this number of ivs in the set, try removing unnecessary
371 ivs from the set always. */
373 #define ALWAYS_PRUNE_CAND_SET_BOUND \
374 ((unsigned) PARAM_VALUE (PARAM_IV_ALWAYS_PRUNE_CAND_SET_BOUND))
376 /* The list of trees for that the decl_rtl field must be reset is stored
379 static VEC(tree
,heap
) *decl_rtl_to_reset
;
381 static comp_cost
force_expr_to_var_cost (tree
, bool);
383 /* Number of uses recorded in DATA. */
385 static inline unsigned
386 n_iv_uses (struct ivopts_data
*data
)
388 return VEC_length (iv_use_p
, data
->iv_uses
);
391 /* Ith use recorded in DATA. */
393 static inline struct iv_use
*
394 iv_use (struct ivopts_data
*data
, unsigned i
)
396 return VEC_index (iv_use_p
, data
->iv_uses
, i
);
399 /* Number of candidates recorded in DATA. */
401 static inline unsigned
402 n_iv_cands (struct ivopts_data
*data
)
404 return VEC_length (iv_cand_p
, data
->iv_candidates
);
407 /* Ith candidate recorded in DATA. */
409 static inline struct iv_cand
*
410 iv_cand (struct ivopts_data
*data
, unsigned i
)
412 return VEC_index (iv_cand_p
, data
->iv_candidates
, i
);
415 /* The single loop exit if it dominates the latch, NULL otherwise. */
418 single_dom_exit (struct loop
*loop
)
420 edge exit
= single_exit (loop
);
425 if (!just_once_each_iteration_p (loop
, exit
->src
))
431 /* Dumps information about the induction variable IV to FILE. */
433 extern void dump_iv (FILE *, struct iv
*);
435 dump_iv (FILE *file
, struct iv
*iv
)
439 fprintf (file
, "ssa name ");
440 print_generic_expr (file
, iv
->ssa_name
, TDF_SLIM
);
441 fprintf (file
, "\n");
444 fprintf (file
, " type ");
445 print_generic_expr (file
, TREE_TYPE (iv
->base
), TDF_SLIM
);
446 fprintf (file
, "\n");
450 fprintf (file
, " base ");
451 print_generic_expr (file
, iv
->base
, TDF_SLIM
);
452 fprintf (file
, "\n");
454 fprintf (file
, " step ");
455 print_generic_expr (file
, iv
->step
, TDF_SLIM
);
456 fprintf (file
, "\n");
460 fprintf (file
, " invariant ");
461 print_generic_expr (file
, iv
->base
, TDF_SLIM
);
462 fprintf (file
, "\n");
467 fprintf (file
, " base object ");
468 print_generic_expr (file
, iv
->base_object
, TDF_SLIM
);
469 fprintf (file
, "\n");
473 fprintf (file
, " is a biv\n");
476 /* Dumps information about the USE to FILE. */
478 extern void dump_use (FILE *, struct iv_use
*);
480 dump_use (FILE *file
, struct iv_use
*use
)
482 fprintf (file
, "use %d\n", use
->id
);
486 case USE_NONLINEAR_EXPR
:
487 fprintf (file
, " generic\n");
491 fprintf (file
, " address\n");
495 fprintf (file
, " compare\n");
502 fprintf (file
, " in statement ");
503 print_gimple_stmt (file
, use
->stmt
, 0, 0);
504 fprintf (file
, "\n");
506 fprintf (file
, " at position ");
508 print_generic_expr (file
, *use
->op_p
, TDF_SLIM
);
509 fprintf (file
, "\n");
511 dump_iv (file
, use
->iv
);
513 if (use
->related_cands
)
515 fprintf (file
, " related candidates ");
516 dump_bitmap (file
, use
->related_cands
);
520 /* Dumps information about the uses to FILE. */
522 extern void dump_uses (FILE *, struct ivopts_data
*);
524 dump_uses (FILE *file
, struct ivopts_data
*data
)
529 for (i
= 0; i
< n_iv_uses (data
); i
++)
531 use
= iv_use (data
, i
);
533 dump_use (file
, use
);
534 fprintf (file
, "\n");
538 /* Dumps information about induction variable candidate CAND to FILE. */
540 extern void dump_cand (FILE *, struct iv_cand
*);
542 dump_cand (FILE *file
, struct iv_cand
*cand
)
544 struct iv
*iv
= cand
->iv
;
546 fprintf (file
, "candidate %d%s\n",
547 cand
->id
, cand
->important
? " (important)" : "");
549 if (cand
->depends_on
)
551 fprintf (file
, " depends on ");
552 dump_bitmap (file
, cand
->depends_on
);
557 fprintf (file
, " final value replacement\n");
561 if (cand
->var_before
)
563 fprintf (file
, " var_before ");
564 print_generic_expr (file
, cand
->var_before
, TDF_SLIM
);
565 fprintf (file
, "\n");
569 fprintf (file
, " var_after ");
570 print_generic_expr (file
, cand
->var_after
, TDF_SLIM
);
571 fprintf (file
, "\n");
577 fprintf (file
, " incremented before exit test\n");
581 fprintf (file
, " incremented before use %d\n", cand
->ainc_use
->id
);
585 fprintf (file
, " incremented after use %d\n", cand
->ainc_use
->id
);
589 fprintf (file
, " incremented at end\n");
593 fprintf (file
, " original biv\n");
600 /* Returns the info for ssa version VER. */
602 static inline struct version_info
*
603 ver_info (struct ivopts_data
*data
, unsigned ver
)
605 return data
->version_info
+ ver
;
608 /* Returns the info for ssa name NAME. */
610 static inline struct version_info
*
611 name_info (struct ivopts_data
*data
, tree name
)
613 return ver_info (data
, SSA_NAME_VERSION (name
));
616 /* Returns true if STMT is after the place where the IP_NORMAL ivs will be
620 stmt_after_ip_normal_pos (struct loop
*loop
, gimple stmt
)
622 basic_block bb
= ip_normal_pos (loop
), sbb
= gimple_bb (stmt
);
626 if (sbb
== loop
->latch
)
632 return stmt
== last_stmt (bb
);
635 /* Returns true if STMT if after the place where the original induction
636 variable CAND is incremented. If TRUE_IF_EQUAL is set, we return true
637 if the positions are identical. */
640 stmt_after_inc_pos (struct iv_cand
*cand
, gimple stmt
, bool true_if_equal
)
642 basic_block cand_bb
= gimple_bb (cand
->incremented_at
);
643 basic_block stmt_bb
= gimple_bb (stmt
);
645 if (!dominated_by_p (CDI_DOMINATORS
, stmt_bb
, cand_bb
))
648 if (stmt_bb
!= cand_bb
)
652 && gimple_uid (stmt
) == gimple_uid (cand
->incremented_at
))
654 return gimple_uid (stmt
) > gimple_uid (cand
->incremented_at
);
657 /* Returns true if STMT if after the place where the induction variable
658 CAND is incremented in LOOP. */
661 stmt_after_increment (struct loop
*loop
, struct iv_cand
*cand
, gimple stmt
)
669 return stmt_after_ip_normal_pos (loop
, stmt
);
673 return stmt_after_inc_pos (cand
, stmt
, false);
676 return stmt_after_inc_pos (cand
, stmt
, true);
683 /* Returns true if EXP is a ssa name that occurs in an abnormal phi node. */
686 abnormal_ssa_name_p (tree exp
)
691 if (TREE_CODE (exp
) != SSA_NAME
)
694 return SSA_NAME_OCCURS_IN_ABNORMAL_PHI (exp
) != 0;
697 /* Returns false if BASE or INDEX contains a ssa name that occurs in an
698 abnormal phi node. Callback for for_each_index. */
701 idx_contains_abnormal_ssa_name_p (tree base
, tree
*index
,
702 void *data ATTRIBUTE_UNUSED
)
704 if (TREE_CODE (base
) == ARRAY_REF
|| TREE_CODE (base
) == ARRAY_RANGE_REF
)
706 if (abnormal_ssa_name_p (TREE_OPERAND (base
, 2)))
708 if (abnormal_ssa_name_p (TREE_OPERAND (base
, 3)))
712 return !abnormal_ssa_name_p (*index
);
715 /* Returns true if EXPR contains a ssa name that occurs in an
716 abnormal phi node. */
719 contains_abnormal_ssa_name_p (tree expr
)
722 enum tree_code_class codeclass
;
727 code
= TREE_CODE (expr
);
728 codeclass
= TREE_CODE_CLASS (code
);
730 if (code
== SSA_NAME
)
731 return SSA_NAME_OCCURS_IN_ABNORMAL_PHI (expr
) != 0;
733 if (code
== INTEGER_CST
734 || is_gimple_min_invariant (expr
))
737 if (code
== ADDR_EXPR
)
738 return !for_each_index (&TREE_OPERAND (expr
, 0),
739 idx_contains_abnormal_ssa_name_p
,
742 if (code
== COND_EXPR
)
743 return contains_abnormal_ssa_name_p (TREE_OPERAND (expr
, 0))
744 || contains_abnormal_ssa_name_p (TREE_OPERAND (expr
, 1))
745 || contains_abnormal_ssa_name_p (TREE_OPERAND (expr
, 2));
751 if (contains_abnormal_ssa_name_p (TREE_OPERAND (expr
, 1)))
756 if (contains_abnormal_ssa_name_p (TREE_OPERAND (expr
, 0)))
768 /* Returns the structure describing number of iterations determined from
769 EXIT of DATA->current_loop, or NULL if something goes wrong. */
771 static struct tree_niter_desc
*
772 niter_for_exit (struct ivopts_data
*data
, edge exit
)
774 struct tree_niter_desc
*desc
;
779 data
->niters
= pointer_map_create ();
783 slot
= pointer_map_contains (data
->niters
, exit
);
787 /* Try to determine number of iterations. We cannot safely work with ssa
788 names that appear in phi nodes on abnormal edges, so that we do not
789 create overlapping life ranges for them (PR 27283). */
790 desc
= XNEW (struct tree_niter_desc
);
791 if (!number_of_iterations_exit (data
->current_loop
,
793 || contains_abnormal_ssa_name_p (desc
->niter
))
798 slot
= pointer_map_insert (data
->niters
, exit
);
802 desc
= (struct tree_niter_desc
*) *slot
;
807 /* Returns the structure describing number of iterations determined from
808 single dominating exit of DATA->current_loop, or NULL if something
811 static struct tree_niter_desc
*
812 niter_for_single_dom_exit (struct ivopts_data
*data
)
814 edge exit
= single_dom_exit (data
->current_loop
);
819 return niter_for_exit (data
, exit
);
822 /* Hash table equality function for expressions. */
825 htab_inv_expr_eq (const void *ent1
, const void *ent2
)
827 const struct iv_inv_expr_ent
*expr1
=
828 (const struct iv_inv_expr_ent
*)ent1
;
829 const struct iv_inv_expr_ent
*expr2
=
830 (const struct iv_inv_expr_ent
*)ent2
;
832 return expr1
->hash
== expr2
->hash
833 && operand_equal_p (expr1
->expr
, expr2
->expr
, 0);
836 /* Hash function for loop invariant expressions. */
839 htab_inv_expr_hash (const void *ent
)
841 const struct iv_inv_expr_ent
*expr
=
842 (const struct iv_inv_expr_ent
*)ent
;
846 /* Initializes data structures used by the iv optimization pass, stored
850 tree_ssa_iv_optimize_init (struct ivopts_data
*data
)
852 data
->version_info_size
= 2 * num_ssa_names
;
853 data
->version_info
= XCNEWVEC (struct version_info
, data
->version_info_size
);
854 data
->relevant
= BITMAP_ALLOC (NULL
);
855 data
->important_candidates
= BITMAP_ALLOC (NULL
);
856 data
->max_inv_id
= 0;
858 data
->iv_uses
= VEC_alloc (iv_use_p
, heap
, 20);
859 data
->iv_candidates
= VEC_alloc (iv_cand_p
, heap
, 20);
860 data
->inv_expr_tab
= htab_create (10, htab_inv_expr_hash
,
861 htab_inv_expr_eq
, free
);
862 data
->inv_expr_id
= 0;
863 decl_rtl_to_reset
= VEC_alloc (tree
, heap
, 20);
866 /* Returns a memory object to that EXPR points. In case we are able to
867 determine that it does not point to any such object, NULL is returned. */
870 determine_base_object (tree expr
)
872 enum tree_code code
= TREE_CODE (expr
);
875 /* If this is a pointer casted to any type, we need to determine
876 the base object for the pointer; so handle conversions before
877 throwing away non-pointer expressions. */
878 if (CONVERT_EXPR_P (expr
))
879 return determine_base_object (TREE_OPERAND (expr
, 0));
881 if (!POINTER_TYPE_P (TREE_TYPE (expr
)))
890 obj
= TREE_OPERAND (expr
, 0);
891 base
= get_base_address (obj
);
896 if (TREE_CODE (base
) == MEM_REF
)
897 return determine_base_object (TREE_OPERAND (base
, 0));
899 return fold_convert (ptr_type_node
,
900 build_fold_addr_expr (base
));
902 case POINTER_PLUS_EXPR
:
903 return determine_base_object (TREE_OPERAND (expr
, 0));
907 /* Pointer addition is done solely using POINTER_PLUS_EXPR. */
911 return fold_convert (ptr_type_node
, expr
);
915 /* Allocates an induction variable with given initial value BASE and step STEP
919 alloc_iv (tree base
, tree step
)
921 struct iv
*iv
= XCNEW (struct iv
);
922 gcc_assert (step
!= NULL_TREE
);
925 iv
->base_object
= determine_base_object (base
);
928 iv
->have_use_for
= false;
930 iv
->ssa_name
= NULL_TREE
;
935 /* Sets STEP and BASE for induction variable IV. */
938 set_iv (struct ivopts_data
*data
, tree iv
, tree base
, tree step
)
940 struct version_info
*info
= name_info (data
, iv
);
942 gcc_assert (!info
->iv
);
944 bitmap_set_bit (data
->relevant
, SSA_NAME_VERSION (iv
));
945 info
->iv
= alloc_iv (base
, step
);
946 info
->iv
->ssa_name
= iv
;
949 /* Finds induction variable declaration for VAR. */
952 get_iv (struct ivopts_data
*data
, tree var
)
955 tree type
= TREE_TYPE (var
);
957 if (!POINTER_TYPE_P (type
)
958 && !INTEGRAL_TYPE_P (type
))
961 if (!name_info (data
, var
)->iv
)
963 bb
= gimple_bb (SSA_NAME_DEF_STMT (var
));
966 || !flow_bb_inside_loop_p (data
->current_loop
, bb
))
967 set_iv (data
, var
, var
, build_int_cst (type
, 0));
970 return name_info (data
, var
)->iv
;
973 /* Determines the step of a biv defined in PHI. Returns NULL if PHI does
974 not define a simple affine biv with nonzero step. */
977 determine_biv_step (gimple phi
)
979 struct loop
*loop
= gimple_bb (phi
)->loop_father
;
980 tree name
= PHI_RESULT (phi
);
983 if (!is_gimple_reg (name
))
986 if (!simple_iv (loop
, loop
, name
, &iv
, true))
989 return integer_zerop (iv
.step
) ? NULL_TREE
: iv
.step
;
992 /* Finds basic ivs. */
995 find_bivs (struct ivopts_data
*data
)
998 tree step
, type
, base
;
1000 struct loop
*loop
= data
->current_loop
;
1001 gimple_stmt_iterator psi
;
1003 for (psi
= gsi_start_phis (loop
->header
); !gsi_end_p (psi
); gsi_next (&psi
))
1005 phi
= gsi_stmt (psi
);
1007 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (PHI_RESULT (phi
)))
1010 step
= determine_biv_step (phi
);
1014 base
= PHI_ARG_DEF_FROM_EDGE (phi
, loop_preheader_edge (loop
));
1015 base
= expand_simple_operations (base
);
1016 if (contains_abnormal_ssa_name_p (base
)
1017 || contains_abnormal_ssa_name_p (step
))
1020 type
= TREE_TYPE (PHI_RESULT (phi
));
1021 base
= fold_convert (type
, base
);
1024 if (POINTER_TYPE_P (type
))
1025 step
= convert_to_ptrofftype (step
);
1027 step
= fold_convert (type
, step
);
1030 set_iv (data
, PHI_RESULT (phi
), base
, step
);
1037 /* Marks basic ivs. */
1040 mark_bivs (struct ivopts_data
*data
)
1044 struct iv
*iv
, *incr_iv
;
1045 struct loop
*loop
= data
->current_loop
;
1046 basic_block incr_bb
;
1047 gimple_stmt_iterator psi
;
1049 for (psi
= gsi_start_phis (loop
->header
); !gsi_end_p (psi
); gsi_next (&psi
))
1051 phi
= gsi_stmt (psi
);
1053 iv
= get_iv (data
, PHI_RESULT (phi
));
1057 var
= PHI_ARG_DEF_FROM_EDGE (phi
, loop_latch_edge (loop
));
1058 incr_iv
= get_iv (data
, var
);
1062 /* If the increment is in the subloop, ignore it. */
1063 incr_bb
= gimple_bb (SSA_NAME_DEF_STMT (var
));
1064 if (incr_bb
->loop_father
!= data
->current_loop
1065 || (incr_bb
->flags
& BB_IRREDUCIBLE_LOOP
))
1069 incr_iv
->biv_p
= true;
1073 /* Checks whether STMT defines a linear induction variable and stores its
1074 parameters to IV. */
1077 find_givs_in_stmt_scev (struct ivopts_data
*data
, gimple stmt
, affine_iv
*iv
)
1080 struct loop
*loop
= data
->current_loop
;
1082 iv
->base
= NULL_TREE
;
1083 iv
->step
= NULL_TREE
;
1085 if (gimple_code (stmt
) != GIMPLE_ASSIGN
)
1088 lhs
= gimple_assign_lhs (stmt
);
1089 if (TREE_CODE (lhs
) != SSA_NAME
)
1092 if (!simple_iv (loop
, loop_containing_stmt (stmt
), lhs
, iv
, true))
1094 iv
->base
= expand_simple_operations (iv
->base
);
1096 if (contains_abnormal_ssa_name_p (iv
->base
)
1097 || contains_abnormal_ssa_name_p (iv
->step
))
1100 /* If STMT could throw, then do not consider STMT as defining a GIV.
1101 While this will suppress optimizations, we can not safely delete this
1102 GIV and associated statements, even if it appears it is not used. */
1103 if (stmt_could_throw_p (stmt
))
1109 /* Finds general ivs in statement STMT. */
1112 find_givs_in_stmt (struct ivopts_data
*data
, gimple stmt
)
1116 if (!find_givs_in_stmt_scev (data
, stmt
, &iv
))
1119 set_iv (data
, gimple_assign_lhs (stmt
), iv
.base
, iv
.step
);
1122 /* Finds general ivs in basic block BB. */
1125 find_givs_in_bb (struct ivopts_data
*data
, basic_block bb
)
1127 gimple_stmt_iterator bsi
;
1129 for (bsi
= gsi_start_bb (bb
); !gsi_end_p (bsi
); gsi_next (&bsi
))
1130 find_givs_in_stmt (data
, gsi_stmt (bsi
));
1133 /* Finds general ivs. */
1136 find_givs (struct ivopts_data
*data
)
1138 struct loop
*loop
= data
->current_loop
;
1139 basic_block
*body
= get_loop_body_in_dom_order (loop
);
1142 for (i
= 0; i
< loop
->num_nodes
; i
++)
1143 find_givs_in_bb (data
, body
[i
]);
1147 /* For each ssa name defined in LOOP determines whether it is an induction
1148 variable and if so, its initial value and step. */
1151 find_induction_variables (struct ivopts_data
*data
)
1156 if (!find_bivs (data
))
1162 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1164 struct tree_niter_desc
*niter
= niter_for_single_dom_exit (data
);
1168 fprintf (dump_file
, " number of iterations ");
1169 print_generic_expr (dump_file
, niter
->niter
, TDF_SLIM
);
1170 if (!integer_zerop (niter
->may_be_zero
))
1172 fprintf (dump_file
, "; zero if ");
1173 print_generic_expr (dump_file
, niter
->may_be_zero
, TDF_SLIM
);
1175 fprintf (dump_file
, "\n\n");
1178 fprintf (dump_file
, "Induction variables:\n\n");
1180 EXECUTE_IF_SET_IN_BITMAP (data
->relevant
, 0, i
, bi
)
1182 if (ver_info (data
, i
)->iv
)
1183 dump_iv (dump_file
, ver_info (data
, i
)->iv
);
1190 /* Records a use of type USE_TYPE at *USE_P in STMT whose value is IV. */
1192 static struct iv_use
*
1193 record_use (struct ivopts_data
*data
, tree
*use_p
, struct iv
*iv
,
1194 gimple stmt
, enum use_type use_type
)
1196 struct iv_use
*use
= XCNEW (struct iv_use
);
1198 use
->id
= n_iv_uses (data
);
1199 use
->type
= use_type
;
1203 use
->related_cands
= BITMAP_ALLOC (NULL
);
1205 /* To avoid showing ssa name in the dumps, if it was not reset by the
1207 iv
->ssa_name
= NULL_TREE
;
1209 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1210 dump_use (dump_file
, use
);
1212 VEC_safe_push (iv_use_p
, heap
, data
->iv_uses
, use
);
1217 /* Checks whether OP is a loop-level invariant and if so, records it.
1218 NONLINEAR_USE is true if the invariant is used in a way we do not
1219 handle specially. */
1222 record_invariant (struct ivopts_data
*data
, tree op
, bool nonlinear_use
)
1225 struct version_info
*info
;
1227 if (TREE_CODE (op
) != SSA_NAME
1228 || !is_gimple_reg (op
))
1231 bb
= gimple_bb (SSA_NAME_DEF_STMT (op
));
1233 && flow_bb_inside_loop_p (data
->current_loop
, bb
))
1236 info
= name_info (data
, op
);
1238 info
->has_nonlin_use
|= nonlinear_use
;
1240 info
->inv_id
= ++data
->max_inv_id
;
1241 bitmap_set_bit (data
->relevant
, SSA_NAME_VERSION (op
));
1244 /* Checks whether the use OP is interesting and if so, records it. */
1246 static struct iv_use
*
1247 find_interesting_uses_op (struct ivopts_data
*data
, tree op
)
1254 if (TREE_CODE (op
) != SSA_NAME
)
1257 iv
= get_iv (data
, op
);
1261 if (iv
->have_use_for
)
1263 use
= iv_use (data
, iv
->use_id
);
1265 gcc_assert (use
->type
== USE_NONLINEAR_EXPR
);
1269 if (integer_zerop (iv
->step
))
1271 record_invariant (data
, op
, true);
1274 iv
->have_use_for
= true;
1276 civ
= XNEW (struct iv
);
1279 stmt
= SSA_NAME_DEF_STMT (op
);
1280 gcc_assert (gimple_code (stmt
) == GIMPLE_PHI
1281 || is_gimple_assign (stmt
));
1283 use
= record_use (data
, NULL
, civ
, stmt
, USE_NONLINEAR_EXPR
);
1284 iv
->use_id
= use
->id
;
1289 /* Given a condition in statement STMT, checks whether it is a compare
1290 of an induction variable and an invariant. If this is the case,
1291 CONTROL_VAR is set to location of the iv, BOUND to the location of
1292 the invariant, IV_VAR and IV_BOUND are set to the corresponding
1293 induction variable descriptions, and true is returned. If this is not
1294 the case, CONTROL_VAR and BOUND are set to the arguments of the
1295 condition and false is returned. */
1298 extract_cond_operands (struct ivopts_data
*data
, gimple stmt
,
1299 tree
**control_var
, tree
**bound
,
1300 struct iv
**iv_var
, struct iv
**iv_bound
)
1302 /* The objects returned when COND has constant operands. */
1303 static struct iv const_iv
;
1305 tree
*op0
= &zero
, *op1
= &zero
, *tmp_op
;
1306 struct iv
*iv0
= &const_iv
, *iv1
= &const_iv
, *tmp_iv
;
1309 if (gimple_code (stmt
) == GIMPLE_COND
)
1311 op0
= gimple_cond_lhs_ptr (stmt
);
1312 op1
= gimple_cond_rhs_ptr (stmt
);
1316 op0
= gimple_assign_rhs1_ptr (stmt
);
1317 op1
= gimple_assign_rhs2_ptr (stmt
);
1320 zero
= integer_zero_node
;
1321 const_iv
.step
= integer_zero_node
;
1323 if (TREE_CODE (*op0
) == SSA_NAME
)
1324 iv0
= get_iv (data
, *op0
);
1325 if (TREE_CODE (*op1
) == SSA_NAME
)
1326 iv1
= get_iv (data
, *op1
);
1328 /* Exactly one of the compared values must be an iv, and the other one must
1333 if (integer_zerop (iv0
->step
))
1335 /* Control variable may be on the other side. */
1336 tmp_op
= op0
; op0
= op1
; op1
= tmp_op
;
1337 tmp_iv
= iv0
; iv0
= iv1
; iv1
= tmp_iv
;
1339 ret
= !integer_zerop (iv0
->step
) && integer_zerop (iv1
->step
);
1343 *control_var
= op0
;;
1354 /* Checks whether the condition in STMT is interesting and if so,
1358 find_interesting_uses_cond (struct ivopts_data
*data
, gimple stmt
)
1360 tree
*var_p
, *bound_p
;
1361 struct iv
*var_iv
, *civ
;
1363 if (!extract_cond_operands (data
, stmt
, &var_p
, &bound_p
, &var_iv
, NULL
))
1365 find_interesting_uses_op (data
, *var_p
);
1366 find_interesting_uses_op (data
, *bound_p
);
1370 civ
= XNEW (struct iv
);
1372 record_use (data
, NULL
, civ
, stmt
, USE_COMPARE
);
1375 /* Returns true if expression EXPR is obviously invariant in LOOP,
1376 i.e. if all its operands are defined outside of the LOOP. LOOP
1377 should not be the function body. */
1380 expr_invariant_in_loop_p (struct loop
*loop
, tree expr
)
1385 gcc_assert (loop_depth (loop
) > 0);
1387 if (is_gimple_min_invariant (expr
))
1390 if (TREE_CODE (expr
) == SSA_NAME
)
1392 def_bb
= gimple_bb (SSA_NAME_DEF_STMT (expr
));
1394 && flow_bb_inside_loop_p (loop
, def_bb
))
1403 len
= TREE_OPERAND_LENGTH (expr
);
1404 for (i
= 0; i
< len
; i
++)
1405 if (TREE_OPERAND (expr
, i
)
1406 && !expr_invariant_in_loop_p (loop
, TREE_OPERAND (expr
, i
)))
1412 /* Returns true if statement STMT is obviously invariant in LOOP,
1413 i.e. if all its operands on the RHS are defined outside of the LOOP.
1414 LOOP should not be the function body. */
1417 stmt_invariant_in_loop_p (struct loop
*loop
, gimple stmt
)
1422 gcc_assert (loop_depth (loop
) > 0);
1424 lhs
= gimple_get_lhs (stmt
);
1425 for (i
= 0; i
< gimple_num_ops (stmt
); i
++)
1427 tree op
= gimple_op (stmt
, i
);
1428 if (op
!= lhs
&& !expr_invariant_in_loop_p (loop
, op
))
1435 /* Cumulates the steps of indices into DATA and replaces their values with the
1436 initial ones. Returns false when the value of the index cannot be determined.
1437 Callback for for_each_index. */
1439 struct ifs_ivopts_data
1441 struct ivopts_data
*ivopts_data
;
1447 idx_find_step (tree base
, tree
*idx
, void *data
)
1449 struct ifs_ivopts_data
*dta
= (struct ifs_ivopts_data
*) data
;
1451 tree step
, iv_base
, iv_step
, lbound
, off
;
1452 struct loop
*loop
= dta
->ivopts_data
->current_loop
;
1454 /* If base is a component ref, require that the offset of the reference
1456 if (TREE_CODE (base
) == COMPONENT_REF
)
1458 off
= component_ref_field_offset (base
);
1459 return expr_invariant_in_loop_p (loop
, off
);
1462 /* If base is array, first check whether we will be able to move the
1463 reference out of the loop (in order to take its address in strength
1464 reduction). In order for this to work we need both lower bound
1465 and step to be loop invariants. */
1466 if (TREE_CODE (base
) == ARRAY_REF
|| TREE_CODE (base
) == ARRAY_RANGE_REF
)
1468 /* Moreover, for a range, the size needs to be invariant as well. */
1469 if (TREE_CODE (base
) == ARRAY_RANGE_REF
1470 && !expr_invariant_in_loop_p (loop
, TYPE_SIZE (TREE_TYPE (base
))))
1473 step
= array_ref_element_size (base
);
1474 lbound
= array_ref_low_bound (base
);
1476 if (!expr_invariant_in_loop_p (loop
, step
)
1477 || !expr_invariant_in_loop_p (loop
, lbound
))
1481 if (TREE_CODE (*idx
) != SSA_NAME
)
1484 iv
= get_iv (dta
->ivopts_data
, *idx
);
1488 /* XXX We produce for a base of *D42 with iv->base being &x[0]
1489 *&x[0], which is not folded and does not trigger the
1490 ARRAY_REF path below. */
1493 if (integer_zerop (iv
->step
))
1496 if (TREE_CODE (base
) == ARRAY_REF
|| TREE_CODE (base
) == ARRAY_RANGE_REF
)
1498 step
= array_ref_element_size (base
);
1500 /* We only handle addresses whose step is an integer constant. */
1501 if (TREE_CODE (step
) != INTEGER_CST
)
1505 /* The step for pointer arithmetics already is 1 byte. */
1506 step
= size_one_node
;
1510 if (!convert_affine_scev (dta
->ivopts_data
->current_loop
,
1511 sizetype
, &iv_base
, &iv_step
, dta
->stmt
,
1514 /* The index might wrap. */
1518 step
= fold_build2 (MULT_EXPR
, sizetype
, step
, iv_step
);
1519 dta
->step
= fold_build2 (PLUS_EXPR
, sizetype
, dta
->step
, step
);
1524 /* Records use in index IDX. Callback for for_each_index. Ivopts data
1525 object is passed to it in DATA. */
1528 idx_record_use (tree base
, tree
*idx
,
1531 struct ivopts_data
*data
= (struct ivopts_data
*) vdata
;
1532 find_interesting_uses_op (data
, *idx
);
1533 if (TREE_CODE (base
) == ARRAY_REF
|| TREE_CODE (base
) == ARRAY_RANGE_REF
)
1535 find_interesting_uses_op (data
, array_ref_element_size (base
));
1536 find_interesting_uses_op (data
, array_ref_low_bound (base
));
1541 /* If we can prove that TOP = cst * BOT for some constant cst,
1542 store cst to MUL and return true. Otherwise return false.
1543 The returned value is always sign-extended, regardless of the
1544 signedness of TOP and BOT. */
1547 constant_multiple_of (tree top
, tree bot
, double_int
*mul
)
1550 enum tree_code code
;
1551 double_int res
, p0
, p1
;
1552 unsigned precision
= TYPE_PRECISION (TREE_TYPE (top
));
1557 if (operand_equal_p (top
, bot
, 0))
1559 *mul
= double_int_one
;
1563 code
= TREE_CODE (top
);
1567 mby
= TREE_OPERAND (top
, 1);
1568 if (TREE_CODE (mby
) != INTEGER_CST
)
1571 if (!constant_multiple_of (TREE_OPERAND (top
, 0), bot
, &res
))
1574 *mul
= double_int_sext (double_int_mul (res
, tree_to_double_int (mby
)),
1580 if (!constant_multiple_of (TREE_OPERAND (top
, 0), bot
, &p0
)
1581 || !constant_multiple_of (TREE_OPERAND (top
, 1), bot
, &p1
))
1584 if (code
== MINUS_EXPR
)
1585 p1
= double_int_neg (p1
);
1586 *mul
= double_int_sext (double_int_add (p0
, p1
), precision
);
1590 if (TREE_CODE (bot
) != INTEGER_CST
)
1593 p0
= double_int_sext (tree_to_double_int (top
), precision
);
1594 p1
= double_int_sext (tree_to_double_int (bot
), precision
);
1595 if (double_int_zero_p (p1
))
1597 *mul
= double_int_sext (double_int_sdivmod (p0
, p1
, FLOOR_DIV_EXPR
, &res
),
1599 return double_int_zero_p (res
);
1606 /* Returns true if memory reference REF with step STEP may be unaligned. */
1609 may_be_unaligned_p (tree ref
, tree step
)
1613 HOST_WIDE_INT bitsize
;
1614 HOST_WIDE_INT bitpos
;
1616 enum machine_mode mode
;
1617 int unsignedp
, volatilep
;
1618 unsigned base_align
;
1620 /* TARGET_MEM_REFs are translated directly to valid MEMs on the target,
1621 thus they are not misaligned. */
1622 if (TREE_CODE (ref
) == TARGET_MEM_REF
)
1625 /* The test below is basically copy of what expr.c:normal_inner_ref
1626 does to check whether the object must be loaded by parts when
1627 STRICT_ALIGNMENT is true. */
1628 base
= get_inner_reference (ref
, &bitsize
, &bitpos
, &toffset
, &mode
,
1629 &unsignedp
, &volatilep
, true);
1630 base_type
= TREE_TYPE (base
);
1631 base_align
= get_object_alignment (base
);
1632 base_align
= MAX (base_align
, TYPE_ALIGN (base_type
));
1634 if (mode
!= BLKmode
)
1636 unsigned mode_align
= GET_MODE_ALIGNMENT (mode
);
1638 if (base_align
< mode_align
1639 || (bitpos
% mode_align
) != 0
1640 || (bitpos
% BITS_PER_UNIT
) != 0)
1644 && (highest_pow2_factor (toffset
) * BITS_PER_UNIT
) < mode_align
)
1647 if ((highest_pow2_factor (step
) * BITS_PER_UNIT
) < mode_align
)
1654 /* Return true if EXPR may be non-addressable. */
1657 may_be_nonaddressable_p (tree expr
)
1659 switch (TREE_CODE (expr
))
1661 case TARGET_MEM_REF
:
1662 /* TARGET_MEM_REFs are translated directly to valid MEMs on the
1663 target, thus they are always addressable. */
1667 return DECL_NONADDRESSABLE_P (TREE_OPERAND (expr
, 1))
1668 || may_be_nonaddressable_p (TREE_OPERAND (expr
, 0));
1670 case VIEW_CONVERT_EXPR
:
1671 /* This kind of view-conversions may wrap non-addressable objects
1672 and make them look addressable. After some processing the
1673 non-addressability may be uncovered again, causing ADDR_EXPRs
1674 of inappropriate objects to be built. */
1675 if (is_gimple_reg (TREE_OPERAND (expr
, 0))
1676 || !is_gimple_addressable (TREE_OPERAND (expr
, 0)))
1679 /* ... fall through ... */
1682 case ARRAY_RANGE_REF
:
1683 return may_be_nonaddressable_p (TREE_OPERAND (expr
, 0));
1695 /* Finds addresses in *OP_P inside STMT. */
1698 find_interesting_uses_address (struct ivopts_data
*data
, gimple stmt
, tree
*op_p
)
1700 tree base
= *op_p
, step
= size_zero_node
;
1702 struct ifs_ivopts_data ifs_ivopts_data
;
1704 /* Do not play with volatile memory references. A bit too conservative,
1705 perhaps, but safe. */
1706 if (gimple_has_volatile_ops (stmt
))
1709 /* Ignore bitfields for now. Not really something terribly complicated
1711 if (TREE_CODE (base
) == BIT_FIELD_REF
)
1714 base
= unshare_expr (base
);
1716 if (TREE_CODE (base
) == TARGET_MEM_REF
)
1718 tree type
= build_pointer_type (TREE_TYPE (base
));
1722 && TREE_CODE (TMR_BASE (base
)) == SSA_NAME
)
1724 civ
= get_iv (data
, TMR_BASE (base
));
1728 TMR_BASE (base
) = civ
->base
;
1731 if (TMR_INDEX2 (base
)
1732 && TREE_CODE (TMR_INDEX2 (base
)) == SSA_NAME
)
1734 civ
= get_iv (data
, TMR_INDEX2 (base
));
1738 TMR_INDEX2 (base
) = civ
->base
;
1741 if (TMR_INDEX (base
)
1742 && TREE_CODE (TMR_INDEX (base
)) == SSA_NAME
)
1744 civ
= get_iv (data
, TMR_INDEX (base
));
1748 TMR_INDEX (base
) = civ
->base
;
1753 if (TMR_STEP (base
))
1754 astep
= fold_build2 (MULT_EXPR
, type
, TMR_STEP (base
), astep
);
1756 step
= fold_build2 (PLUS_EXPR
, type
, step
, astep
);
1760 if (integer_zerop (step
))
1762 base
= tree_mem_ref_addr (type
, base
);
1766 ifs_ivopts_data
.ivopts_data
= data
;
1767 ifs_ivopts_data
.stmt
= stmt
;
1768 ifs_ivopts_data
.step
= size_zero_node
;
1769 if (!for_each_index (&base
, idx_find_step
, &ifs_ivopts_data
)
1770 || integer_zerop (ifs_ivopts_data
.step
))
1772 step
= ifs_ivopts_data
.step
;
1774 /* Check that the base expression is addressable. This needs
1775 to be done after substituting bases of IVs into it. */
1776 if (may_be_nonaddressable_p (base
))
1779 /* Moreover, on strict alignment platforms, check that it is
1780 sufficiently aligned. */
1781 if (STRICT_ALIGNMENT
&& may_be_unaligned_p (base
, step
))
1784 base
= build_fold_addr_expr (base
);
1786 /* Substituting bases of IVs into the base expression might
1787 have caused folding opportunities. */
1788 if (TREE_CODE (base
) == ADDR_EXPR
)
1790 tree
*ref
= &TREE_OPERAND (base
, 0);
1791 while (handled_component_p (*ref
))
1792 ref
= &TREE_OPERAND (*ref
, 0);
1793 if (TREE_CODE (*ref
) == MEM_REF
)
1795 tree tem
= fold_binary (MEM_REF
, TREE_TYPE (*ref
),
1796 TREE_OPERAND (*ref
, 0),
1797 TREE_OPERAND (*ref
, 1));
1804 civ
= alloc_iv (base
, step
);
1805 record_use (data
, op_p
, civ
, stmt
, USE_ADDRESS
);
1809 for_each_index (op_p
, idx_record_use
, data
);
1812 /* Finds and records invariants used in STMT. */
1815 find_invariants_stmt (struct ivopts_data
*data
, gimple stmt
)
1818 use_operand_p use_p
;
1821 FOR_EACH_PHI_OR_STMT_USE (use_p
, stmt
, iter
, SSA_OP_USE
)
1823 op
= USE_FROM_PTR (use_p
);
1824 record_invariant (data
, op
, false);
1828 /* Finds interesting uses of induction variables in the statement STMT. */
1831 find_interesting_uses_stmt (struct ivopts_data
*data
, gimple stmt
)
1834 tree op
, *lhs
, *rhs
;
1836 use_operand_p use_p
;
1837 enum tree_code code
;
1839 find_invariants_stmt (data
, stmt
);
1841 if (gimple_code (stmt
) == GIMPLE_COND
)
1843 find_interesting_uses_cond (data
, stmt
);
1847 if (is_gimple_assign (stmt
))
1849 lhs
= gimple_assign_lhs_ptr (stmt
);
1850 rhs
= gimple_assign_rhs1_ptr (stmt
);
1852 if (TREE_CODE (*lhs
) == SSA_NAME
)
1854 /* If the statement defines an induction variable, the uses are not
1855 interesting by themselves. */
1857 iv
= get_iv (data
, *lhs
);
1859 if (iv
&& !integer_zerop (iv
->step
))
1863 code
= gimple_assign_rhs_code (stmt
);
1864 if (get_gimple_rhs_class (code
) == GIMPLE_SINGLE_RHS
1865 && (REFERENCE_CLASS_P (*rhs
)
1866 || is_gimple_val (*rhs
)))
1868 if (REFERENCE_CLASS_P (*rhs
))
1869 find_interesting_uses_address (data
, stmt
, rhs
);
1871 find_interesting_uses_op (data
, *rhs
);
1873 if (REFERENCE_CLASS_P (*lhs
))
1874 find_interesting_uses_address (data
, stmt
, lhs
);
1877 else if (TREE_CODE_CLASS (code
) == tcc_comparison
)
1879 find_interesting_uses_cond (data
, stmt
);
1883 /* TODO -- we should also handle address uses of type
1885 memory = call (whatever);
1892 if (gimple_code (stmt
) == GIMPLE_PHI
1893 && gimple_bb (stmt
) == data
->current_loop
->header
)
1895 iv
= get_iv (data
, PHI_RESULT (stmt
));
1897 if (iv
&& !integer_zerop (iv
->step
))
1901 FOR_EACH_PHI_OR_STMT_USE (use_p
, stmt
, iter
, SSA_OP_USE
)
1903 op
= USE_FROM_PTR (use_p
);
1905 if (TREE_CODE (op
) != SSA_NAME
)
1908 iv
= get_iv (data
, op
);
1912 find_interesting_uses_op (data
, op
);
1916 /* Finds interesting uses of induction variables outside of loops
1917 on loop exit edge EXIT. */
1920 find_interesting_uses_outside (struct ivopts_data
*data
, edge exit
)
1923 gimple_stmt_iterator psi
;
1926 for (psi
= gsi_start_phis (exit
->dest
); !gsi_end_p (psi
); gsi_next (&psi
))
1928 phi
= gsi_stmt (psi
);
1929 def
= PHI_ARG_DEF_FROM_EDGE (phi
, exit
);
1930 if (is_gimple_reg (def
))
1931 find_interesting_uses_op (data
, def
);
1935 /* Finds uses of the induction variables that are interesting. */
1938 find_interesting_uses (struct ivopts_data
*data
)
1941 gimple_stmt_iterator bsi
;
1942 basic_block
*body
= get_loop_body (data
->current_loop
);
1944 struct version_info
*info
;
1947 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1948 fprintf (dump_file
, "Uses:\n\n");
1950 for (i
= 0; i
< data
->current_loop
->num_nodes
; i
++)
1955 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
1956 if (e
->dest
!= EXIT_BLOCK_PTR
1957 && !flow_bb_inside_loop_p (data
->current_loop
, e
->dest
))
1958 find_interesting_uses_outside (data
, e
);
1960 for (bsi
= gsi_start_phis (bb
); !gsi_end_p (bsi
); gsi_next (&bsi
))
1961 find_interesting_uses_stmt (data
, gsi_stmt (bsi
));
1962 for (bsi
= gsi_start_bb (bb
); !gsi_end_p (bsi
); gsi_next (&bsi
))
1963 if (!is_gimple_debug (gsi_stmt (bsi
)))
1964 find_interesting_uses_stmt (data
, gsi_stmt (bsi
));
1967 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1971 fprintf (dump_file
, "\n");
1973 EXECUTE_IF_SET_IN_BITMAP (data
->relevant
, 0, i
, bi
)
1975 info
= ver_info (data
, i
);
1978 fprintf (dump_file
, " ");
1979 print_generic_expr (dump_file
, info
->name
, TDF_SLIM
);
1980 fprintf (dump_file
, " is invariant (%d)%s\n",
1981 info
->inv_id
, info
->has_nonlin_use
? "" : ", eliminable");
1985 fprintf (dump_file
, "\n");
1991 /* Strips constant offsets from EXPR and stores them to OFFSET. If INSIDE_ADDR
1992 is true, assume we are inside an address. If TOP_COMPREF is true, assume
1993 we are at the top-level of the processed address. */
1996 strip_offset_1 (tree expr
, bool inside_addr
, bool top_compref
,
1997 unsigned HOST_WIDE_INT
*offset
)
1999 tree op0
= NULL_TREE
, op1
= NULL_TREE
, tmp
, step
;
2000 enum tree_code code
;
2001 tree type
, orig_type
= TREE_TYPE (expr
);
2002 unsigned HOST_WIDE_INT off0
, off1
, st
;
2003 tree orig_expr
= expr
;
2007 type
= TREE_TYPE (expr
);
2008 code
= TREE_CODE (expr
);
2014 if (!cst_and_fits_in_hwi (expr
)
2015 || integer_zerop (expr
))
2018 *offset
= int_cst_value (expr
);
2019 return build_int_cst (orig_type
, 0);
2021 case POINTER_PLUS_EXPR
:
2024 op0
= TREE_OPERAND (expr
, 0);
2025 op1
= TREE_OPERAND (expr
, 1);
2027 op0
= strip_offset_1 (op0
, false, false, &off0
);
2028 op1
= strip_offset_1 (op1
, false, false, &off1
);
2030 *offset
= (code
== MINUS_EXPR
? off0
- off1
: off0
+ off1
);
2031 if (op0
== TREE_OPERAND (expr
, 0)
2032 && op1
== TREE_OPERAND (expr
, 1))
2035 if (integer_zerop (op1
))
2037 else if (integer_zerop (op0
))
2039 if (code
== MINUS_EXPR
)
2040 expr
= fold_build1 (NEGATE_EXPR
, type
, op1
);
2045 expr
= fold_build2 (code
, type
, op0
, op1
);
2047 return fold_convert (orig_type
, expr
);
2050 op1
= TREE_OPERAND (expr
, 1);
2051 if (!cst_and_fits_in_hwi (op1
))
2054 op0
= TREE_OPERAND (expr
, 0);
2055 op0
= strip_offset_1 (op0
, false, false, &off0
);
2056 if (op0
== TREE_OPERAND (expr
, 0))
2059 *offset
= off0
* int_cst_value (op1
);
2060 if (integer_zerop (op0
))
2063 expr
= fold_build2 (MULT_EXPR
, type
, op0
, op1
);
2065 return fold_convert (orig_type
, expr
);
2068 case ARRAY_RANGE_REF
:
2072 step
= array_ref_element_size (expr
);
2073 if (!cst_and_fits_in_hwi (step
))
2076 st
= int_cst_value (step
);
2077 op1
= TREE_OPERAND (expr
, 1);
2078 op1
= strip_offset_1 (op1
, false, false, &off1
);
2079 *offset
= off1
* st
;
2082 && integer_zerop (op1
))
2084 /* Strip the component reference completely. */
2085 op0
= TREE_OPERAND (expr
, 0);
2086 op0
= strip_offset_1 (op0
, inside_addr
, top_compref
, &off0
);
2096 tmp
= component_ref_field_offset (expr
);
2098 && cst_and_fits_in_hwi (tmp
))
2100 /* Strip the component reference completely. */
2101 op0
= TREE_OPERAND (expr
, 0);
2102 op0
= strip_offset_1 (op0
, inside_addr
, top_compref
, &off0
);
2103 *offset
= off0
+ int_cst_value (tmp
);
2109 op0
= TREE_OPERAND (expr
, 0);
2110 op0
= strip_offset_1 (op0
, true, true, &off0
);
2113 if (op0
== TREE_OPERAND (expr
, 0))
2116 expr
= build_fold_addr_expr (op0
);
2117 return fold_convert (orig_type
, expr
);
2120 /* ??? Offset operand? */
2121 inside_addr
= false;
2128 /* Default handling of expressions for that we want to recurse into
2129 the first operand. */
2130 op0
= TREE_OPERAND (expr
, 0);
2131 op0
= strip_offset_1 (op0
, inside_addr
, false, &off0
);
2134 if (op0
== TREE_OPERAND (expr
, 0)
2135 && (!op1
|| op1
== TREE_OPERAND (expr
, 1)))
2138 expr
= copy_node (expr
);
2139 TREE_OPERAND (expr
, 0) = op0
;
2141 TREE_OPERAND (expr
, 1) = op1
;
2143 /* Inside address, we might strip the top level component references,
2144 thus changing type of the expression. Handling of ADDR_EXPR
2146 expr
= fold_convert (orig_type
, expr
);
2151 /* Strips constant offsets from EXPR and stores them to OFFSET. */
2154 strip_offset (tree expr
, unsigned HOST_WIDE_INT
*offset
)
2156 return strip_offset_1 (expr
, false, false, offset
);
2159 /* Returns variant of TYPE that can be used as base for different uses.
2160 We return unsigned type with the same precision, which avoids problems
2164 generic_type_for (tree type
)
2166 if (POINTER_TYPE_P (type
))
2167 return unsigned_type_for (type
);
2169 if (TYPE_UNSIGNED (type
))
2172 return unsigned_type_for (type
);
2175 /* Records invariants in *EXPR_P. Callback for walk_tree. DATA contains
2176 the bitmap to that we should store it. */
2178 static struct ivopts_data
*fd_ivopts_data
;
2180 find_depends (tree
*expr_p
, int *ws ATTRIBUTE_UNUSED
, void *data
)
2182 bitmap
*depends_on
= (bitmap
*) data
;
2183 struct version_info
*info
;
2185 if (TREE_CODE (*expr_p
) != SSA_NAME
)
2187 info
= name_info (fd_ivopts_data
, *expr_p
);
2189 if (!info
->inv_id
|| info
->has_nonlin_use
)
2193 *depends_on
= BITMAP_ALLOC (NULL
);
2194 bitmap_set_bit (*depends_on
, info
->inv_id
);
2199 /* Adds a candidate BASE + STEP * i. Important field is set to IMPORTANT and
2200 position to POS. If USE is not NULL, the candidate is set as related to
2201 it. If both BASE and STEP are NULL, we add a pseudocandidate for the
2202 replacement of the final value of the iv by a direct computation. */
2204 static struct iv_cand
*
2205 add_candidate_1 (struct ivopts_data
*data
,
2206 tree base
, tree step
, bool important
, enum iv_position pos
,
2207 struct iv_use
*use
, gimple incremented_at
)
2210 struct iv_cand
*cand
= NULL
;
2211 tree type
, orig_type
;
2213 /* For non-original variables, make sure their values are computed in a type
2214 that does not invoke undefined behavior on overflows (since in general,
2215 we cannot prove that these induction variables are non-wrapping). */
2216 if (pos
!= IP_ORIGINAL
)
2218 orig_type
= TREE_TYPE (base
);
2219 type
= generic_type_for (orig_type
);
2220 if (type
!= orig_type
)
2222 base
= fold_convert (type
, base
);
2223 step
= fold_convert (type
, step
);
2227 for (i
= 0; i
< n_iv_cands (data
); i
++)
2229 cand
= iv_cand (data
, i
);
2231 if (cand
->pos
!= pos
)
2234 if (cand
->incremented_at
!= incremented_at
2235 || ((pos
== IP_AFTER_USE
|| pos
== IP_BEFORE_USE
)
2236 && cand
->ainc_use
!= use
))
2250 if (operand_equal_p (base
, cand
->iv
->base
, 0)
2251 && operand_equal_p (step
, cand
->iv
->step
, 0)
2252 && (TYPE_PRECISION (TREE_TYPE (base
))
2253 == TYPE_PRECISION (TREE_TYPE (cand
->iv
->base
))))
2257 if (i
== n_iv_cands (data
))
2259 cand
= XCNEW (struct iv_cand
);
2265 cand
->iv
= alloc_iv (base
, step
);
2268 if (pos
!= IP_ORIGINAL
&& cand
->iv
)
2270 cand
->var_before
= create_tmp_var_raw (TREE_TYPE (base
), "ivtmp");
2271 cand
->var_after
= cand
->var_before
;
2273 cand
->important
= important
;
2274 cand
->incremented_at
= incremented_at
;
2275 VEC_safe_push (iv_cand_p
, heap
, data
->iv_candidates
, cand
);
2278 && TREE_CODE (step
) != INTEGER_CST
)
2280 fd_ivopts_data
= data
;
2281 walk_tree (&step
, find_depends
, &cand
->depends_on
, NULL
);
2284 if (pos
== IP_AFTER_USE
|| pos
== IP_BEFORE_USE
)
2285 cand
->ainc_use
= use
;
2287 cand
->ainc_use
= NULL
;
2289 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2290 dump_cand (dump_file
, cand
);
2293 if (important
&& !cand
->important
)
2295 cand
->important
= true;
2296 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2297 fprintf (dump_file
, "Candidate %d is important\n", cand
->id
);
2302 bitmap_set_bit (use
->related_cands
, i
);
2303 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2304 fprintf (dump_file
, "Candidate %d is related to use %d\n",
2311 /* Returns true if incrementing the induction variable at the end of the LOOP
2314 The purpose is to avoid splitting latch edge with a biv increment, thus
2315 creating a jump, possibly confusing other optimization passes and leaving
2316 less freedom to scheduler. So we allow IP_END_POS only if IP_NORMAL_POS
2317 is not available (so we do not have a better alternative), or if the latch
2318 edge is already nonempty. */
2321 allow_ip_end_pos_p (struct loop
*loop
)
2323 if (!ip_normal_pos (loop
))
2326 if (!empty_block_p (ip_end_pos (loop
)))
2332 /* If possible, adds autoincrement candidates BASE + STEP * i based on use USE.
2333 Important field is set to IMPORTANT. */
2336 add_autoinc_candidates (struct ivopts_data
*data
, tree base
, tree step
,
2337 bool important
, struct iv_use
*use
)
2339 basic_block use_bb
= gimple_bb (use
->stmt
);
2340 enum machine_mode mem_mode
;
2341 unsigned HOST_WIDE_INT cstepi
;
2343 /* If we insert the increment in any position other than the standard
2344 ones, we must ensure that it is incremented once per iteration.
2345 It must not be in an inner nested loop, or one side of an if
2347 if (use_bb
->loop_father
!= data
->current_loop
2348 || !dominated_by_p (CDI_DOMINATORS
, data
->current_loop
->latch
, use_bb
)
2349 || stmt_could_throw_p (use
->stmt
)
2350 || !cst_and_fits_in_hwi (step
))
2353 cstepi
= int_cst_value (step
);
2355 mem_mode
= TYPE_MODE (TREE_TYPE (*use
->op_p
));
2356 if (((USE_LOAD_PRE_INCREMENT (mem_mode
)
2357 || USE_STORE_PRE_INCREMENT (mem_mode
))
2358 && GET_MODE_SIZE (mem_mode
) == cstepi
)
2359 || ((USE_LOAD_PRE_DECREMENT (mem_mode
)
2360 || USE_STORE_PRE_DECREMENT (mem_mode
))
2361 && GET_MODE_SIZE (mem_mode
) == -cstepi
))
2363 enum tree_code code
= MINUS_EXPR
;
2365 tree new_step
= step
;
2367 if (POINTER_TYPE_P (TREE_TYPE (base
)))
2369 new_step
= fold_build1 (NEGATE_EXPR
, TREE_TYPE (step
), step
);
2370 code
= POINTER_PLUS_EXPR
;
2373 new_step
= fold_convert (TREE_TYPE (base
), new_step
);
2374 new_base
= fold_build2 (code
, TREE_TYPE (base
), base
, new_step
);
2375 add_candidate_1 (data
, new_base
, step
, important
, IP_BEFORE_USE
, use
,
2378 if (((USE_LOAD_POST_INCREMENT (mem_mode
)
2379 || USE_STORE_POST_INCREMENT (mem_mode
))
2380 && GET_MODE_SIZE (mem_mode
) == cstepi
)
2381 || ((USE_LOAD_POST_DECREMENT (mem_mode
)
2382 || USE_STORE_POST_DECREMENT (mem_mode
))
2383 && GET_MODE_SIZE (mem_mode
) == -cstepi
))
2385 add_candidate_1 (data
, base
, step
, important
, IP_AFTER_USE
, use
,
2390 /* Adds a candidate BASE + STEP * i. Important field is set to IMPORTANT and
2391 position to POS. If USE is not NULL, the candidate is set as related to
2392 it. The candidate computation is scheduled on all available positions. */
2395 add_candidate (struct ivopts_data
*data
,
2396 tree base
, tree step
, bool important
, struct iv_use
*use
)
2398 if (ip_normal_pos (data
->current_loop
))
2399 add_candidate_1 (data
, base
, step
, important
, IP_NORMAL
, use
, NULL
);
2400 if (ip_end_pos (data
->current_loop
)
2401 && allow_ip_end_pos_p (data
->current_loop
))
2402 add_candidate_1 (data
, base
, step
, important
, IP_END
, use
, NULL
);
2404 if (use
!= NULL
&& use
->type
== USE_ADDRESS
)
2405 add_autoinc_candidates (data
, base
, step
, important
, use
);
2408 /* Adds standard iv candidates. */
2411 add_standard_iv_candidates (struct ivopts_data
*data
)
2413 add_candidate (data
, integer_zero_node
, integer_one_node
, true, NULL
);
2415 /* The same for a double-integer type if it is still fast enough. */
2417 (long_integer_type_node
) > TYPE_PRECISION (integer_type_node
)
2418 && TYPE_PRECISION (long_integer_type_node
) <= BITS_PER_WORD
)
2419 add_candidate (data
, build_int_cst (long_integer_type_node
, 0),
2420 build_int_cst (long_integer_type_node
, 1), true, NULL
);
2422 /* The same for a double-integer type if it is still fast enough. */
2424 (long_long_integer_type_node
) > TYPE_PRECISION (long_integer_type_node
)
2425 && TYPE_PRECISION (long_long_integer_type_node
) <= BITS_PER_WORD
)
2426 add_candidate (data
, build_int_cst (long_long_integer_type_node
, 0),
2427 build_int_cst (long_long_integer_type_node
, 1), true, NULL
);
2431 /* Adds candidates bases on the old induction variable IV. */
2434 add_old_iv_candidates (struct ivopts_data
*data
, struct iv
*iv
)
2438 struct iv_cand
*cand
;
2440 add_candidate (data
, iv
->base
, iv
->step
, true, NULL
);
2442 /* The same, but with initial value zero. */
2443 if (POINTER_TYPE_P (TREE_TYPE (iv
->base
)))
2444 add_candidate (data
, size_int (0), iv
->step
, true, NULL
);
2446 add_candidate (data
, build_int_cst (TREE_TYPE (iv
->base
), 0),
2447 iv
->step
, true, NULL
);
2449 phi
= SSA_NAME_DEF_STMT (iv
->ssa_name
);
2450 if (gimple_code (phi
) == GIMPLE_PHI
)
2452 /* Additionally record the possibility of leaving the original iv
2454 def
= PHI_ARG_DEF_FROM_EDGE (phi
, loop_latch_edge (data
->current_loop
));
2455 cand
= add_candidate_1 (data
,
2456 iv
->base
, iv
->step
, true, IP_ORIGINAL
, NULL
,
2457 SSA_NAME_DEF_STMT (def
));
2458 cand
->var_before
= iv
->ssa_name
;
2459 cand
->var_after
= def
;
2463 /* Adds candidates based on the old induction variables. */
2466 add_old_ivs_candidates (struct ivopts_data
*data
)
2472 EXECUTE_IF_SET_IN_BITMAP (data
->relevant
, 0, i
, bi
)
2474 iv
= ver_info (data
, i
)->iv
;
2475 if (iv
&& iv
->biv_p
&& !integer_zerop (iv
->step
))
2476 add_old_iv_candidates (data
, iv
);
2480 /* Adds candidates based on the value of the induction variable IV and USE. */
2483 add_iv_value_candidates (struct ivopts_data
*data
,
2484 struct iv
*iv
, struct iv_use
*use
)
2486 unsigned HOST_WIDE_INT offset
;
2490 add_candidate (data
, iv
->base
, iv
->step
, false, use
);
2492 /* The same, but with initial value zero. Make such variable important,
2493 since it is generic enough so that possibly many uses may be based
2495 basetype
= TREE_TYPE (iv
->base
);
2496 if (POINTER_TYPE_P (basetype
))
2497 basetype
= sizetype
;
2498 add_candidate (data
, build_int_cst (basetype
, 0),
2499 iv
->step
, true, use
);
2501 /* Third, try removing the constant offset. Make sure to even
2502 add a candidate for &a[0] vs. (T *)&a. */
2503 base
= strip_offset (iv
->base
, &offset
);
2505 || base
!= iv
->base
)
2506 add_candidate (data
, base
, iv
->step
, false, use
);
2509 /* Adds candidates based on the uses. */
2512 add_derived_ivs_candidates (struct ivopts_data
*data
)
2516 for (i
= 0; i
< n_iv_uses (data
); i
++)
2518 struct iv_use
*use
= iv_use (data
, i
);
2525 case USE_NONLINEAR_EXPR
:
2528 /* Just add the ivs based on the value of the iv used here. */
2529 add_iv_value_candidates (data
, use
->iv
, use
);
2538 /* Record important candidates and add them to related_cands bitmaps
2542 record_important_candidates (struct ivopts_data
*data
)
2547 for (i
= 0; i
< n_iv_cands (data
); i
++)
2549 struct iv_cand
*cand
= iv_cand (data
, i
);
2551 if (cand
->important
)
2552 bitmap_set_bit (data
->important_candidates
, i
);
2555 data
->consider_all_candidates
= (n_iv_cands (data
)
2556 <= CONSIDER_ALL_CANDIDATES_BOUND
);
2558 if (data
->consider_all_candidates
)
2560 /* We will not need "related_cands" bitmaps in this case,
2561 so release them to decrease peak memory consumption. */
2562 for (i
= 0; i
< n_iv_uses (data
); i
++)
2564 use
= iv_use (data
, i
);
2565 BITMAP_FREE (use
->related_cands
);
2570 /* Add important candidates to the related_cands bitmaps. */
2571 for (i
= 0; i
< n_iv_uses (data
); i
++)
2572 bitmap_ior_into (iv_use (data
, i
)->related_cands
,
2573 data
->important_candidates
);
2577 /* Allocates the data structure mapping the (use, candidate) pairs to costs.
2578 If consider_all_candidates is true, we use a two-dimensional array, otherwise
2579 we allocate a simple list to every use. */
2582 alloc_use_cost_map (struct ivopts_data
*data
)
2584 unsigned i
, size
, s
, j
;
2586 for (i
= 0; i
< n_iv_uses (data
); i
++)
2588 struct iv_use
*use
= iv_use (data
, i
);
2591 if (data
->consider_all_candidates
)
2592 size
= n_iv_cands (data
);
2596 EXECUTE_IF_SET_IN_BITMAP (use
->related_cands
, 0, j
, bi
)
2601 /* Round up to the power of two, so that moduling by it is fast. */
2602 for (size
= 1; size
< s
; size
<<= 1)
2606 use
->n_map_members
= size
;
2607 use
->cost_map
= XCNEWVEC (struct cost_pair
, size
);
2611 /* Returns description of computation cost of expression whose runtime
2612 cost is RUNTIME and complexity corresponds to COMPLEXITY. */
2615 new_cost (unsigned runtime
, unsigned complexity
)
2619 cost
.cost
= runtime
;
2620 cost
.complexity
= complexity
;
2625 /* Adds costs COST1 and COST2. */
2628 add_costs (comp_cost cost1
, comp_cost cost2
)
2630 cost1
.cost
+= cost2
.cost
;
2631 cost1
.complexity
+= cost2
.complexity
;
2635 /* Subtracts costs COST1 and COST2. */
2638 sub_costs (comp_cost cost1
, comp_cost cost2
)
2640 cost1
.cost
-= cost2
.cost
;
2641 cost1
.complexity
-= cost2
.complexity
;
2646 /* Returns a negative number if COST1 < COST2, a positive number if
2647 COST1 > COST2, and 0 if COST1 = COST2. */
2650 compare_costs (comp_cost cost1
, comp_cost cost2
)
2652 if (cost1
.cost
== cost2
.cost
)
2653 return cost1
.complexity
- cost2
.complexity
;
2655 return cost1
.cost
- cost2
.cost
;
2658 /* Returns true if COST is infinite. */
2661 infinite_cost_p (comp_cost cost
)
2663 return cost
.cost
== INFTY
;
2666 /* Sets cost of (USE, CANDIDATE) pair to COST and record that it depends
2667 on invariants DEPENDS_ON and that the value used in expressing it
2668 is VALUE, and in case of iv elimination the comparison operator is COMP. */
2671 set_use_iv_cost (struct ivopts_data
*data
,
2672 struct iv_use
*use
, struct iv_cand
*cand
,
2673 comp_cost cost
, bitmap depends_on
, tree value
,
2674 enum tree_code comp
, int inv_expr_id
)
2678 if (infinite_cost_p (cost
))
2680 BITMAP_FREE (depends_on
);
2684 if (data
->consider_all_candidates
)
2686 use
->cost_map
[cand
->id
].cand
= cand
;
2687 use
->cost_map
[cand
->id
].cost
= cost
;
2688 use
->cost_map
[cand
->id
].depends_on
= depends_on
;
2689 use
->cost_map
[cand
->id
].value
= value
;
2690 use
->cost_map
[cand
->id
].comp
= comp
;
2691 use
->cost_map
[cand
->id
].inv_expr_id
= inv_expr_id
;
2695 /* n_map_members is a power of two, so this computes modulo. */
2696 s
= cand
->id
& (use
->n_map_members
- 1);
2697 for (i
= s
; i
< use
->n_map_members
; i
++)
2698 if (!use
->cost_map
[i
].cand
)
2700 for (i
= 0; i
< s
; i
++)
2701 if (!use
->cost_map
[i
].cand
)
2707 use
->cost_map
[i
].cand
= cand
;
2708 use
->cost_map
[i
].cost
= cost
;
2709 use
->cost_map
[i
].depends_on
= depends_on
;
2710 use
->cost_map
[i
].value
= value
;
2711 use
->cost_map
[i
].comp
= comp
;
2712 use
->cost_map
[i
].inv_expr_id
= inv_expr_id
;
2715 /* Gets cost of (USE, CANDIDATE) pair. */
2717 static struct cost_pair
*
2718 get_use_iv_cost (struct ivopts_data
*data
, struct iv_use
*use
,
2719 struct iv_cand
*cand
)
2722 struct cost_pair
*ret
;
2727 if (data
->consider_all_candidates
)
2729 ret
= use
->cost_map
+ cand
->id
;
2736 /* n_map_members is a power of two, so this computes modulo. */
2737 s
= cand
->id
& (use
->n_map_members
- 1);
2738 for (i
= s
; i
< use
->n_map_members
; i
++)
2739 if (use
->cost_map
[i
].cand
== cand
)
2740 return use
->cost_map
+ i
;
2742 for (i
= 0; i
< s
; i
++)
2743 if (use
->cost_map
[i
].cand
== cand
)
2744 return use
->cost_map
+ i
;
2749 /* Returns estimate on cost of computing SEQ. */
2752 seq_cost (rtx seq
, bool speed
)
2757 for (; seq
; seq
= NEXT_INSN (seq
))
2759 set
= single_set (seq
);
2761 cost
+= set_src_cost (SET_SRC (set
), speed
);
2769 /* Produce DECL_RTL for object obj so it looks like it is stored in memory. */
2771 produce_memory_decl_rtl (tree obj
, int *regno
)
2773 addr_space_t as
= TYPE_ADDR_SPACE (TREE_TYPE (obj
));
2774 enum machine_mode address_mode
= targetm
.addr_space
.address_mode (as
);
2778 if (TREE_STATIC (obj
) || DECL_EXTERNAL (obj
))
2780 const char *name
= IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (obj
));
2781 x
= gen_rtx_SYMBOL_REF (address_mode
, name
);
2782 SET_SYMBOL_REF_DECL (x
, obj
);
2783 x
= gen_rtx_MEM (DECL_MODE (obj
), x
);
2784 set_mem_addr_space (x
, as
);
2785 targetm
.encode_section_info (obj
, x
, true);
2789 x
= gen_raw_REG (address_mode
, (*regno
)++);
2790 x
= gen_rtx_MEM (DECL_MODE (obj
), x
);
2791 set_mem_addr_space (x
, as
);
2797 /* Prepares decl_rtl for variables referred in *EXPR_P. Callback for
2798 walk_tree. DATA contains the actual fake register number. */
2801 prepare_decl_rtl (tree
*expr_p
, int *ws
, void *data
)
2803 tree obj
= NULL_TREE
;
2805 int *regno
= (int *) data
;
2807 switch (TREE_CODE (*expr_p
))
2810 for (expr_p
= &TREE_OPERAND (*expr_p
, 0);
2811 handled_component_p (*expr_p
);
2812 expr_p
= &TREE_OPERAND (*expr_p
, 0))
2815 if (DECL_P (obj
) && !DECL_RTL_SET_P (obj
))
2816 x
= produce_memory_decl_rtl (obj
, regno
);
2821 obj
= SSA_NAME_VAR (*expr_p
);
2822 if (!DECL_RTL_SET_P (obj
))
2823 x
= gen_raw_REG (DECL_MODE (obj
), (*regno
)++);
2832 if (DECL_RTL_SET_P (obj
))
2835 if (DECL_MODE (obj
) == BLKmode
)
2836 x
= produce_memory_decl_rtl (obj
, regno
);
2838 x
= gen_raw_REG (DECL_MODE (obj
), (*regno
)++);
2848 VEC_safe_push (tree
, heap
, decl_rtl_to_reset
, obj
);
2849 SET_DECL_RTL (obj
, x
);
2855 /* Determines cost of the computation of EXPR. */
2858 computation_cost (tree expr
, bool speed
)
2861 tree type
= TREE_TYPE (expr
);
2863 /* Avoid using hard regs in ways which may be unsupported. */
2864 int regno
= LAST_VIRTUAL_REGISTER
+ 1;
2865 struct cgraph_node
*node
= cgraph_get_node (current_function_decl
);
2866 enum node_frequency real_frequency
= node
->frequency
;
2868 node
->frequency
= NODE_FREQUENCY_NORMAL
;
2869 crtl
->maybe_hot_insn_p
= speed
;
2870 walk_tree (&expr
, prepare_decl_rtl
, ®no
, NULL
);
2872 rslt
= expand_expr (expr
, NULL_RTX
, TYPE_MODE (type
), EXPAND_NORMAL
);
2875 default_rtl_profile ();
2876 node
->frequency
= real_frequency
;
2878 cost
= seq_cost (seq
, speed
);
2880 cost
+= address_cost (XEXP (rslt
, 0), TYPE_MODE (type
),
2881 TYPE_ADDR_SPACE (type
), speed
);
2882 else if (!REG_P (rslt
))
2883 cost
+= set_src_cost (rslt
, speed
);
2888 /* Returns variable containing the value of candidate CAND at statement AT. */
2891 var_at_stmt (struct loop
*loop
, struct iv_cand
*cand
, gimple stmt
)
2893 if (stmt_after_increment (loop
, cand
, stmt
))
2894 return cand
->var_after
;
2896 return cand
->var_before
;
2899 /* If A is (TYPE) BA and B is (TYPE) BB, and the types of BA and BB have the
2900 same precision that is at least as wide as the precision of TYPE, stores
2901 BA to A and BB to B, and returns the type of BA. Otherwise, returns the
2905 determine_common_wider_type (tree
*a
, tree
*b
)
2907 tree wider_type
= NULL
;
2909 tree atype
= TREE_TYPE (*a
);
2911 if (CONVERT_EXPR_P (*a
))
2913 suba
= TREE_OPERAND (*a
, 0);
2914 wider_type
= TREE_TYPE (suba
);
2915 if (TYPE_PRECISION (wider_type
) < TYPE_PRECISION (atype
))
2921 if (CONVERT_EXPR_P (*b
))
2923 subb
= TREE_OPERAND (*b
, 0);
2924 if (TYPE_PRECISION (wider_type
) != TYPE_PRECISION (TREE_TYPE (subb
)))
2935 /* Determines the expression by that USE is expressed from induction variable
2936 CAND at statement AT in LOOP. The expression is stored in a decomposed
2937 form into AFF. Returns false if USE cannot be expressed using CAND. */
2940 get_computation_aff (struct loop
*loop
,
2941 struct iv_use
*use
, struct iv_cand
*cand
, gimple at
,
2942 struct affine_tree_combination
*aff
)
2944 tree ubase
= use
->iv
->base
;
2945 tree ustep
= use
->iv
->step
;
2946 tree cbase
= cand
->iv
->base
;
2947 tree cstep
= cand
->iv
->step
, cstep_common
;
2948 tree utype
= TREE_TYPE (ubase
), ctype
= TREE_TYPE (cbase
);
2949 tree common_type
, var
;
2951 aff_tree cbase_aff
, var_aff
;
2954 if (TYPE_PRECISION (utype
) > TYPE_PRECISION (ctype
))
2956 /* We do not have a precision to express the values of use. */
2960 var
= var_at_stmt (loop
, cand
, at
);
2961 uutype
= unsigned_type_for (utype
);
2963 /* If the conversion is not noop, perform it. */
2964 if (TYPE_PRECISION (utype
) < TYPE_PRECISION (ctype
))
2966 cstep
= fold_convert (uutype
, cstep
);
2967 cbase
= fold_convert (uutype
, cbase
);
2968 var
= fold_convert (uutype
, var
);
2971 if (!constant_multiple_of (ustep
, cstep
, &rat
))
2974 /* In case both UBASE and CBASE are shortened to UUTYPE from some common
2975 type, we achieve better folding by computing their difference in this
2976 wider type, and cast the result to UUTYPE. We do not need to worry about
2977 overflows, as all the arithmetics will in the end be performed in UUTYPE
2979 common_type
= determine_common_wider_type (&ubase
, &cbase
);
2981 /* use = ubase - ratio * cbase + ratio * var. */
2982 tree_to_aff_combination (ubase
, common_type
, aff
);
2983 tree_to_aff_combination (cbase
, common_type
, &cbase_aff
);
2984 tree_to_aff_combination (var
, uutype
, &var_aff
);
2986 /* We need to shift the value if we are after the increment. */
2987 if (stmt_after_increment (loop
, cand
, at
))
2991 if (common_type
!= uutype
)
2992 cstep_common
= fold_convert (common_type
, cstep
);
2994 cstep_common
= cstep
;
2996 tree_to_aff_combination (cstep_common
, common_type
, &cstep_aff
);
2997 aff_combination_add (&cbase_aff
, &cstep_aff
);
3000 aff_combination_scale (&cbase_aff
, double_int_neg (rat
));
3001 aff_combination_add (aff
, &cbase_aff
);
3002 if (common_type
!= uutype
)
3003 aff_combination_convert (aff
, uutype
);
3005 aff_combination_scale (&var_aff
, rat
);
3006 aff_combination_add (aff
, &var_aff
);
3011 /* Determines the expression by that USE is expressed from induction variable
3012 CAND at statement AT in LOOP. The computation is unshared. */
3015 get_computation_at (struct loop
*loop
,
3016 struct iv_use
*use
, struct iv_cand
*cand
, gimple at
)
3019 tree type
= TREE_TYPE (use
->iv
->base
);
3021 if (!get_computation_aff (loop
, use
, cand
, at
, &aff
))
3023 unshare_aff_combination (&aff
);
3024 return fold_convert (type
, aff_combination_to_tree (&aff
));
3027 /* Determines the expression by that USE is expressed from induction variable
3028 CAND in LOOP. The computation is unshared. */
3031 get_computation (struct loop
*loop
, struct iv_use
*use
, struct iv_cand
*cand
)
3033 return get_computation_at (loop
, use
, cand
, use
->stmt
);
3036 /* Adjust the cost COST for being in loop setup rather than loop body.
3037 If we're optimizing for space, the loop setup overhead is constant;
3038 if we're optimizing for speed, amortize it over the per-iteration cost. */
3040 adjust_setup_cost (struct ivopts_data
*data
, unsigned cost
)
3044 else if (optimize_loop_for_speed_p (data
->current_loop
))
3045 return cost
/ avg_loop_niter (data
->current_loop
);
3050 /* Returns true if multiplying by RATIO is allowed in an address. Test the
3051 validity for a memory reference accessing memory of mode MODE in
3052 address space AS. */
3054 DEF_VEC_P (sbitmap
);
3055 DEF_VEC_ALLOC_P (sbitmap
, heap
);
3058 multiplier_allowed_in_address_p (HOST_WIDE_INT ratio
, enum machine_mode mode
,
3061 #define MAX_RATIO 128
3062 unsigned int data_index
= (int) as
* MAX_MACHINE_MODE
+ (int) mode
;
3063 static VEC (sbitmap
, heap
) *valid_mult_list
;
3066 if (data_index
>= VEC_length (sbitmap
, valid_mult_list
))
3067 VEC_safe_grow_cleared (sbitmap
, heap
, valid_mult_list
, data_index
+ 1);
3069 valid_mult
= VEC_index (sbitmap
, valid_mult_list
, data_index
);
3072 enum machine_mode address_mode
= targetm
.addr_space
.address_mode (as
);
3073 rtx reg1
= gen_raw_REG (address_mode
, LAST_VIRTUAL_REGISTER
+ 1);
3077 valid_mult
= sbitmap_alloc (2 * MAX_RATIO
+ 1);
3078 sbitmap_zero (valid_mult
);
3079 addr
= gen_rtx_fmt_ee (MULT
, address_mode
, reg1
, NULL_RTX
);
3080 for (i
= -MAX_RATIO
; i
<= MAX_RATIO
; i
++)
3082 XEXP (addr
, 1) = gen_int_mode (i
, address_mode
);
3083 if (memory_address_addr_space_p (mode
, addr
, as
))
3084 SET_BIT (valid_mult
, i
+ MAX_RATIO
);
3087 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3089 fprintf (dump_file
, " allowed multipliers:");
3090 for (i
= -MAX_RATIO
; i
<= MAX_RATIO
; i
++)
3091 if (TEST_BIT (valid_mult
, i
+ MAX_RATIO
))
3092 fprintf (dump_file
, " %d", (int) i
);
3093 fprintf (dump_file
, "\n");
3094 fprintf (dump_file
, "\n");
3097 VEC_replace (sbitmap
, valid_mult_list
, data_index
, valid_mult
);
3100 if (ratio
> MAX_RATIO
|| ratio
< -MAX_RATIO
)
3103 return TEST_BIT (valid_mult
, ratio
+ MAX_RATIO
);
3106 /* Returns cost of address in shape symbol + var + OFFSET + RATIO * index.
3107 If SYMBOL_PRESENT is false, symbol is omitted. If VAR_PRESENT is false,
3108 variable is omitted. Compute the cost for a memory reference that accesses
3109 a memory location of mode MEM_MODE in address space AS.
3111 MAY_AUTOINC is set to true if the autoincrement (increasing index by
3112 size of MEM_MODE / RATIO) is available. To make this determination, we
3113 look at the size of the increment to be made, which is given in CSTEP.
3114 CSTEP may be zero if the step is unknown.
3115 STMT_AFTER_INC is true iff the statement we're looking at is after the
3116 increment of the original biv.
3118 TODO -- there must be some better way. This all is quite crude. */
3122 HOST_WIDE_INT min_offset
, max_offset
;
3123 unsigned costs
[2][2][2][2];
3124 } *address_cost_data
;
3126 DEF_VEC_P (address_cost_data
);
3127 DEF_VEC_ALLOC_P (address_cost_data
, heap
);
3130 get_address_cost (bool symbol_present
, bool var_present
,
3131 unsigned HOST_WIDE_INT offset
, HOST_WIDE_INT ratio
,
3132 HOST_WIDE_INT cstep
, enum machine_mode mem_mode
,
3133 addr_space_t as
, bool speed
,
3134 bool stmt_after_inc
, bool *may_autoinc
)
3136 enum machine_mode address_mode
= targetm
.addr_space
.address_mode (as
);
3137 static VEC(address_cost_data
, heap
) *address_cost_data_list
;
3138 unsigned int data_index
= (int) as
* MAX_MACHINE_MODE
+ (int) mem_mode
;
3139 address_cost_data data
;
3140 static bool has_preinc
[MAX_MACHINE_MODE
], has_postinc
[MAX_MACHINE_MODE
];
3141 static bool has_predec
[MAX_MACHINE_MODE
], has_postdec
[MAX_MACHINE_MODE
];
3142 unsigned cost
, acost
, complexity
;
3143 bool offset_p
, ratio_p
, autoinc
;
3144 HOST_WIDE_INT s_offset
, autoinc_offset
, msize
;
3145 unsigned HOST_WIDE_INT mask
;
3148 if (data_index
>= VEC_length (address_cost_data
, address_cost_data_list
))
3149 VEC_safe_grow_cleared (address_cost_data
, heap
, address_cost_data_list
,
3152 data
= VEC_index (address_cost_data
, address_cost_data_list
, data_index
);
3156 HOST_WIDE_INT rat
, off
= 0;
3157 int old_cse_not_expected
, width
;
3158 unsigned sym_p
, var_p
, off_p
, rat_p
, add_c
;
3159 rtx seq
, addr
, base
;
3162 data
= (address_cost_data
) xcalloc (1, sizeof (*data
));
3164 reg1
= gen_raw_REG (address_mode
, LAST_VIRTUAL_REGISTER
+ 1);
3166 width
= GET_MODE_BITSIZE (address_mode
) - 1;
3167 if (width
> (HOST_BITS_PER_WIDE_INT
- 1))
3168 width
= HOST_BITS_PER_WIDE_INT
- 1;
3169 addr
= gen_rtx_fmt_ee (PLUS
, address_mode
, reg1
, NULL_RTX
);
3171 for (i
= width
; i
>= 0; i
--)
3173 off
= -((HOST_WIDE_INT
) 1 << i
);
3174 XEXP (addr
, 1) = gen_int_mode (off
, address_mode
);
3175 if (memory_address_addr_space_p (mem_mode
, addr
, as
))
3178 data
->min_offset
= (i
== -1? 0 : off
);
3180 for (i
= width
; i
>= 0; i
--)
3182 off
= ((HOST_WIDE_INT
) 1 << i
) - 1;
3183 XEXP (addr
, 1) = gen_int_mode (off
, address_mode
);
3184 if (memory_address_addr_space_p (mem_mode
, addr
, as
))
3189 data
->max_offset
= off
;
3191 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3193 fprintf (dump_file
, "get_address_cost:\n");
3194 fprintf (dump_file
, " min offset %s " HOST_WIDE_INT_PRINT_DEC
"\n",
3195 GET_MODE_NAME (mem_mode
),
3197 fprintf (dump_file
, " max offset %s " HOST_WIDE_INT_PRINT_DEC
"\n",
3198 GET_MODE_NAME (mem_mode
),
3203 for (i
= 2; i
<= MAX_RATIO
; i
++)
3204 if (multiplier_allowed_in_address_p (i
, mem_mode
, as
))
3210 /* Compute the cost of various addressing modes. */
3212 reg0
= gen_raw_REG (address_mode
, LAST_VIRTUAL_REGISTER
+ 1);
3213 reg1
= gen_raw_REG (address_mode
, LAST_VIRTUAL_REGISTER
+ 2);
3215 if (USE_LOAD_PRE_DECREMENT (mem_mode
)
3216 || USE_STORE_PRE_DECREMENT (mem_mode
))
3218 addr
= gen_rtx_PRE_DEC (address_mode
, reg0
);
3219 has_predec
[mem_mode
]
3220 = memory_address_addr_space_p (mem_mode
, addr
, as
);
3222 if (USE_LOAD_POST_DECREMENT (mem_mode
)
3223 || USE_STORE_POST_DECREMENT (mem_mode
))
3225 addr
= gen_rtx_POST_DEC (address_mode
, reg0
);
3226 has_postdec
[mem_mode
]
3227 = memory_address_addr_space_p (mem_mode
, addr
, as
);
3229 if (USE_LOAD_PRE_INCREMENT (mem_mode
)
3230 || USE_STORE_PRE_DECREMENT (mem_mode
))
3232 addr
= gen_rtx_PRE_INC (address_mode
, reg0
);
3233 has_preinc
[mem_mode
]
3234 = memory_address_addr_space_p (mem_mode
, addr
, as
);
3236 if (USE_LOAD_POST_INCREMENT (mem_mode
)
3237 || USE_STORE_POST_INCREMENT (mem_mode
))
3239 addr
= gen_rtx_POST_INC (address_mode
, reg0
);
3240 has_postinc
[mem_mode
]
3241 = memory_address_addr_space_p (mem_mode
, addr
, as
);
3243 for (i
= 0; i
< 16; i
++)
3246 var_p
= (i
>> 1) & 1;
3247 off_p
= (i
>> 2) & 1;
3248 rat_p
= (i
>> 3) & 1;
3252 addr
= gen_rtx_fmt_ee (MULT
, address_mode
, addr
,
3253 gen_int_mode (rat
, address_mode
));
3256 addr
= gen_rtx_fmt_ee (PLUS
, address_mode
, addr
, reg1
);
3260 base
= gen_rtx_SYMBOL_REF (address_mode
, ggc_strdup (""));
3261 /* ??? We can run into trouble with some backends by presenting
3262 it with symbols which haven't been properly passed through
3263 targetm.encode_section_info. By setting the local bit, we
3264 enhance the probability of things working. */
3265 SYMBOL_REF_FLAGS (base
) = SYMBOL_FLAG_LOCAL
;
3268 base
= gen_rtx_fmt_e (CONST
, address_mode
,
3270 (PLUS
, address_mode
, base
,
3271 gen_int_mode (off
, address_mode
)));
3274 base
= gen_int_mode (off
, address_mode
);
3279 addr
= gen_rtx_fmt_ee (PLUS
, address_mode
, addr
, base
);
3282 /* To avoid splitting addressing modes, pretend that no cse will
3284 old_cse_not_expected
= cse_not_expected
;
3285 cse_not_expected
= true;
3286 addr
= memory_address_addr_space (mem_mode
, addr
, as
);
3287 cse_not_expected
= old_cse_not_expected
;
3291 acost
= seq_cost (seq
, speed
);
3292 acost
+= address_cost (addr
, mem_mode
, as
, speed
);
3296 data
->costs
[sym_p
][var_p
][off_p
][rat_p
] = acost
;
3299 /* On some targets, it is quite expensive to load symbol to a register,
3300 which makes addresses that contain symbols look much more expensive.
3301 However, the symbol will have to be loaded in any case before the
3302 loop (and quite likely we have it in register already), so it does not
3303 make much sense to penalize them too heavily. So make some final
3304 tweaks for the SYMBOL_PRESENT modes:
3306 If VAR_PRESENT is false, and the mode obtained by changing symbol to
3307 var is cheaper, use this mode with small penalty.
3308 If VAR_PRESENT is true, try whether the mode with
3309 SYMBOL_PRESENT = false is cheaper even with cost of addition, and
3310 if this is the case, use it. */
3311 add_c
= add_cost (speed
, address_mode
);
3312 for (i
= 0; i
< 8; i
++)
3315 off_p
= (i
>> 1) & 1;
3316 rat_p
= (i
>> 2) & 1;
3318 acost
= data
->costs
[0][1][off_p
][rat_p
] + 1;
3322 if (acost
< data
->costs
[1][var_p
][off_p
][rat_p
])
3323 data
->costs
[1][var_p
][off_p
][rat_p
] = acost
;
3326 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3328 fprintf (dump_file
, "Address costs:\n");
3330 for (i
= 0; i
< 16; i
++)
3333 var_p
= (i
>> 1) & 1;
3334 off_p
= (i
>> 2) & 1;
3335 rat_p
= (i
>> 3) & 1;
3337 fprintf (dump_file
, " ");
3339 fprintf (dump_file
, "sym + ");
3341 fprintf (dump_file
, "var + ");
3343 fprintf (dump_file
, "cst + ");
3345 fprintf (dump_file
, "rat * ");
3347 acost
= data
->costs
[sym_p
][var_p
][off_p
][rat_p
];
3348 fprintf (dump_file
, "index costs %d\n", acost
);
3350 if (has_predec
[mem_mode
] || has_postdec
[mem_mode
]
3351 || has_preinc
[mem_mode
] || has_postinc
[mem_mode
])
3352 fprintf (dump_file
, " May include autoinc/dec\n");
3353 fprintf (dump_file
, "\n");
3356 VEC_replace (address_cost_data
, address_cost_data_list
,
3360 bits
= GET_MODE_BITSIZE (address_mode
);
3361 mask
= ~(~(unsigned HOST_WIDE_INT
) 0 << (bits
- 1) << 1);
3363 if ((offset
>> (bits
- 1) & 1))
3368 msize
= GET_MODE_SIZE (mem_mode
);
3369 autoinc_offset
= offset
;
3371 autoinc_offset
+= ratio
* cstep
;
3372 if (symbol_present
|| var_present
|| ratio
!= 1)
3374 else if ((has_postinc
[mem_mode
] && autoinc_offset
== 0
3376 || (has_postdec
[mem_mode
] && autoinc_offset
== 0
3378 || (has_preinc
[mem_mode
] && autoinc_offset
== msize
3380 || (has_predec
[mem_mode
] && autoinc_offset
== -msize
3381 && msize
== -cstep
))
3385 offset_p
= (s_offset
!= 0
3386 && data
->min_offset
<= s_offset
3387 && s_offset
<= data
->max_offset
);
3388 ratio_p
= (ratio
!= 1
3389 && multiplier_allowed_in_address_p (ratio
, mem_mode
, as
));
3391 if (ratio
!= 1 && !ratio_p
)
3392 cost
+= mult_by_coeff_cost (ratio
, address_mode
, speed
);
3394 if (s_offset
&& !offset_p
&& !symbol_present
)
3395 cost
+= add_cost (speed
, address_mode
);
3398 *may_autoinc
= autoinc
;
3399 acost
= data
->costs
[symbol_present
][var_present
][offset_p
][ratio_p
];
3400 complexity
= (symbol_present
!= 0) + (var_present
!= 0) + offset_p
+ ratio_p
;
3401 return new_cost (cost
+ acost
, complexity
);
3404 /* Calculate the SPEED or size cost of shiftadd EXPR in MODE. MULT is the
3405 the EXPR operand holding the shift. COST0 and COST1 are the costs for
3406 calculating the operands of EXPR. Returns true if successful, and returns
3407 the cost in COST. */
3410 get_shiftadd_cost (tree expr
, enum machine_mode mode
, comp_cost cost0
,
3411 comp_cost cost1
, tree mult
, bool speed
, comp_cost
*cost
)
3414 tree op1
= TREE_OPERAND (expr
, 1);
3415 tree cst
= TREE_OPERAND (mult
, 1);
3416 tree multop
= TREE_OPERAND (mult
, 0);
3417 int m
= exact_log2 (int_cst_value (cst
));
3418 int maxm
= MIN (BITS_PER_WORD
, GET_MODE_BITSIZE (mode
));
3421 if (!(m
>= 0 && m
< maxm
))
3424 sa_cost
= (TREE_CODE (expr
) != MINUS_EXPR
3425 ? shiftadd_cost (speed
, mode
, m
)
3427 ? shiftsub1_cost (speed
, mode
, m
)
3428 : shiftsub0_cost (speed
, mode
, m
)));
3429 res
= new_cost (sa_cost
, 0);
3430 res
= add_costs (res
, mult
== op1
? cost0
: cost1
);
3432 STRIP_NOPS (multop
);
3433 if (!is_gimple_val (multop
))
3434 res
= add_costs (res
, force_expr_to_var_cost (multop
, speed
));
3440 /* Estimates cost of forcing expression EXPR into a variable. */
3443 force_expr_to_var_cost (tree expr
, bool speed
)
3445 static bool costs_initialized
= false;
3446 static unsigned integer_cost
[2];
3447 static unsigned symbol_cost
[2];
3448 static unsigned address_cost
[2];
3450 comp_cost cost0
, cost1
, cost
;
3451 enum machine_mode mode
;
3453 if (!costs_initialized
)
3455 tree type
= build_pointer_type (integer_type_node
);
3460 var
= create_tmp_var_raw (integer_type_node
, "test_var");
3461 TREE_STATIC (var
) = 1;
3462 x
= produce_memory_decl_rtl (var
, NULL
);
3463 SET_DECL_RTL (var
, x
);
3465 addr
= build1 (ADDR_EXPR
, type
, var
);
3468 for (i
= 0; i
< 2; i
++)
3470 integer_cost
[i
] = computation_cost (build_int_cst (integer_type_node
,
3473 symbol_cost
[i
] = computation_cost (addr
, i
) + 1;
3476 = computation_cost (fold_build_pointer_plus_hwi (addr
, 2000), i
) + 1;
3477 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3479 fprintf (dump_file
, "force_expr_to_var_cost %s costs:\n", i
? "speed" : "size");
3480 fprintf (dump_file
, " integer %d\n", (int) integer_cost
[i
]);
3481 fprintf (dump_file
, " symbol %d\n", (int) symbol_cost
[i
]);
3482 fprintf (dump_file
, " address %d\n", (int) address_cost
[i
]);
3483 fprintf (dump_file
, " other %d\n", (int) target_spill_cost
[i
]);
3484 fprintf (dump_file
, "\n");
3488 costs_initialized
= true;
3493 if (SSA_VAR_P (expr
))
3496 if (is_gimple_min_invariant (expr
))
3498 if (TREE_CODE (expr
) == INTEGER_CST
)
3499 return new_cost (integer_cost
[speed
], 0);
3501 if (TREE_CODE (expr
) == ADDR_EXPR
)
3503 tree obj
= TREE_OPERAND (expr
, 0);
3505 if (TREE_CODE (obj
) == VAR_DECL
3506 || TREE_CODE (obj
) == PARM_DECL
3507 || TREE_CODE (obj
) == RESULT_DECL
)
3508 return new_cost (symbol_cost
[speed
], 0);
3511 return new_cost (address_cost
[speed
], 0);
3514 switch (TREE_CODE (expr
))
3516 case POINTER_PLUS_EXPR
:
3520 op0
= TREE_OPERAND (expr
, 0);
3521 op1
= TREE_OPERAND (expr
, 1);
3525 if (is_gimple_val (op0
))
3528 cost0
= force_expr_to_var_cost (op0
, speed
);
3530 if (is_gimple_val (op1
))
3533 cost1
= force_expr_to_var_cost (op1
, speed
);
3538 op0
= TREE_OPERAND (expr
, 0);
3542 if (is_gimple_val (op0
))
3545 cost0
= force_expr_to_var_cost (op0
, speed
);
3551 /* Just an arbitrary value, FIXME. */
3552 return new_cost (target_spill_cost
[speed
], 0);
3555 mode
= TYPE_MODE (TREE_TYPE (expr
));
3556 switch (TREE_CODE (expr
))
3558 case POINTER_PLUS_EXPR
:
3562 cost
= new_cost (add_cost (speed
, mode
), 0);
3563 if (TREE_CODE (expr
) != NEGATE_EXPR
)
3565 tree mult
= NULL_TREE
;
3567 if (TREE_CODE (op1
) == MULT_EXPR
)
3569 else if (TREE_CODE (op0
) == MULT_EXPR
)
3572 if (mult
!= NULL_TREE
3573 && cst_and_fits_in_hwi (TREE_OPERAND (mult
, 1))
3574 && get_shiftadd_cost (expr
, mode
, cost0
, cost1
, mult
,
3581 if (cst_and_fits_in_hwi (op0
))
3582 cost
= new_cost (mult_by_coeff_cost (int_cst_value (op0
),
3584 else if (cst_and_fits_in_hwi (op1
))
3585 cost
= new_cost (mult_by_coeff_cost (int_cst_value (op1
),
3588 return new_cost (target_spill_cost
[speed
], 0);
3595 cost
= add_costs (cost
, cost0
);
3596 cost
= add_costs (cost
, cost1
);
3598 /* Bound the cost by target_spill_cost. The parts of complicated
3599 computations often are either loop invariant or at least can
3600 be shared between several iv uses, so letting this grow without
3601 limits would not give reasonable results. */
3602 if (cost
.cost
> (int) target_spill_cost
[speed
])
3603 cost
.cost
= target_spill_cost
[speed
];
3608 /* Estimates cost of forcing EXPR into a variable. DEPENDS_ON is a set of the
3609 invariants the computation depends on. */
3612 force_var_cost (struct ivopts_data
*data
,
3613 tree expr
, bitmap
*depends_on
)
3617 fd_ivopts_data
= data
;
3618 walk_tree (&expr
, find_depends
, depends_on
, NULL
);
3621 return force_expr_to_var_cost (expr
, data
->speed
);
3624 /* Estimates cost of expressing address ADDR as var + symbol + offset. The
3625 value of offset is added to OFFSET, SYMBOL_PRESENT and VAR_PRESENT are set
3626 to false if the corresponding part is missing. DEPENDS_ON is a set of the
3627 invariants the computation depends on. */
3630 split_address_cost (struct ivopts_data
*data
,
3631 tree addr
, bool *symbol_present
, bool *var_present
,
3632 unsigned HOST_WIDE_INT
*offset
, bitmap
*depends_on
)
3635 HOST_WIDE_INT bitsize
;
3636 HOST_WIDE_INT bitpos
;
3638 enum machine_mode mode
;
3639 int unsignedp
, volatilep
;
3641 core
= get_inner_reference (addr
, &bitsize
, &bitpos
, &toffset
, &mode
,
3642 &unsignedp
, &volatilep
, false);
3645 || bitpos
% BITS_PER_UNIT
!= 0
3646 || TREE_CODE (core
) != VAR_DECL
)
3648 *symbol_present
= false;
3649 *var_present
= true;
3650 fd_ivopts_data
= data
;
3651 walk_tree (&addr
, find_depends
, depends_on
, NULL
);
3652 return new_cost (target_spill_cost
[data
->speed
], 0);
3655 *offset
+= bitpos
/ BITS_PER_UNIT
;
3656 if (TREE_STATIC (core
)
3657 || DECL_EXTERNAL (core
))
3659 *symbol_present
= true;
3660 *var_present
= false;
3664 *symbol_present
= false;
3665 *var_present
= true;
3669 /* Estimates cost of expressing difference of addresses E1 - E2 as
3670 var + symbol + offset. The value of offset is added to OFFSET,
3671 SYMBOL_PRESENT and VAR_PRESENT are set to false if the corresponding
3672 part is missing. DEPENDS_ON is a set of the invariants the computation
3676 ptr_difference_cost (struct ivopts_data
*data
,
3677 tree e1
, tree e2
, bool *symbol_present
, bool *var_present
,
3678 unsigned HOST_WIDE_INT
*offset
, bitmap
*depends_on
)
3680 HOST_WIDE_INT diff
= 0;
3681 aff_tree aff_e1
, aff_e2
;
3684 gcc_assert (TREE_CODE (e1
) == ADDR_EXPR
);
3686 if (ptr_difference_const (e1
, e2
, &diff
))
3689 *symbol_present
= false;
3690 *var_present
= false;
3694 if (integer_zerop (e2
))
3695 return split_address_cost (data
, TREE_OPERAND (e1
, 0),
3696 symbol_present
, var_present
, offset
, depends_on
);
3698 *symbol_present
= false;
3699 *var_present
= true;
3701 type
= signed_type_for (TREE_TYPE (e1
));
3702 tree_to_aff_combination (e1
, type
, &aff_e1
);
3703 tree_to_aff_combination (e2
, type
, &aff_e2
);
3704 aff_combination_scale (&aff_e2
, double_int_minus_one
);
3705 aff_combination_add (&aff_e1
, &aff_e2
);
3707 return force_var_cost (data
, aff_combination_to_tree (&aff_e1
), depends_on
);
3710 /* Estimates cost of expressing difference E1 - E2 as
3711 var + symbol + offset. The value of offset is added to OFFSET,
3712 SYMBOL_PRESENT and VAR_PRESENT are set to false if the corresponding
3713 part is missing. DEPENDS_ON is a set of the invariants the computation
3717 difference_cost (struct ivopts_data
*data
,
3718 tree e1
, tree e2
, bool *symbol_present
, bool *var_present
,
3719 unsigned HOST_WIDE_INT
*offset
, bitmap
*depends_on
)
3721 enum machine_mode mode
= TYPE_MODE (TREE_TYPE (e1
));
3722 unsigned HOST_WIDE_INT off1
, off2
;
3723 aff_tree aff_e1
, aff_e2
;
3726 e1
= strip_offset (e1
, &off1
);
3727 e2
= strip_offset (e2
, &off2
);
3728 *offset
+= off1
- off2
;
3733 if (TREE_CODE (e1
) == ADDR_EXPR
)
3734 return ptr_difference_cost (data
, e1
, e2
, symbol_present
, var_present
,
3735 offset
, depends_on
);
3736 *symbol_present
= false;
3738 if (operand_equal_p (e1
, e2
, 0))
3740 *var_present
= false;
3744 *var_present
= true;
3746 if (integer_zerop (e2
))
3747 return force_var_cost (data
, e1
, depends_on
);
3749 if (integer_zerop (e1
))
3751 comp_cost cost
= force_var_cost (data
, e2
, depends_on
);
3752 cost
.cost
+= mult_by_coeff_cost (-1, mode
, data
->speed
);
3756 type
= signed_type_for (TREE_TYPE (e1
));
3757 tree_to_aff_combination (e1
, type
, &aff_e1
);
3758 tree_to_aff_combination (e2
, type
, &aff_e2
);
3759 aff_combination_scale (&aff_e2
, double_int_minus_one
);
3760 aff_combination_add (&aff_e1
, &aff_e2
);
3762 return force_var_cost (data
, aff_combination_to_tree (&aff_e1
), depends_on
);
3765 /* Returns true if AFF1 and AFF2 are identical. */
3768 compare_aff_trees (aff_tree
*aff1
, aff_tree
*aff2
)
3772 if (aff1
->n
!= aff2
->n
)
3775 for (i
= 0; i
< aff1
->n
; i
++)
3777 if (double_int_cmp (aff1
->elts
[i
].coef
, aff2
->elts
[i
].coef
, 0) != 0)
3780 if (!operand_equal_p (aff1
->elts
[i
].val
, aff2
->elts
[i
].val
, 0))
3786 /* Stores EXPR in DATA->inv_expr_tab, and assigns it an inv_expr_id. */
3789 get_expr_id (struct ivopts_data
*data
, tree expr
)
3791 struct iv_inv_expr_ent ent
;
3792 struct iv_inv_expr_ent
**slot
;
3795 ent
.hash
= iterative_hash_expr (expr
, 0);
3796 slot
= (struct iv_inv_expr_ent
**) htab_find_slot (data
->inv_expr_tab
,
3801 *slot
= XNEW (struct iv_inv_expr_ent
);
3802 (*slot
)->expr
= expr
;
3803 (*slot
)->hash
= ent
.hash
;
3804 (*slot
)->id
= data
->inv_expr_id
++;
3808 /* Returns the pseudo expr id if expression UBASE - RATIO * CBASE
3809 requires a new compiler generated temporary. Returns -1 otherwise.
3810 ADDRESS_P is a flag indicating if the expression is for address
3814 get_loop_invariant_expr_id (struct ivopts_data
*data
, tree ubase
,
3815 tree cbase
, HOST_WIDE_INT ratio
,
3818 aff_tree ubase_aff
, cbase_aff
;
3826 if ((TREE_CODE (ubase
) == INTEGER_CST
)
3827 && (TREE_CODE (cbase
) == INTEGER_CST
))
3830 /* Strips the constant part. */
3831 if (TREE_CODE (ubase
) == PLUS_EXPR
3832 || TREE_CODE (ubase
) == MINUS_EXPR
3833 || TREE_CODE (ubase
) == POINTER_PLUS_EXPR
)
3835 if (TREE_CODE (TREE_OPERAND (ubase
, 1)) == INTEGER_CST
)
3836 ubase
= TREE_OPERAND (ubase
, 0);
3839 /* Strips the constant part. */
3840 if (TREE_CODE (cbase
) == PLUS_EXPR
3841 || TREE_CODE (cbase
) == MINUS_EXPR
3842 || TREE_CODE (cbase
) == POINTER_PLUS_EXPR
)
3844 if (TREE_CODE (TREE_OPERAND (cbase
, 1)) == INTEGER_CST
)
3845 cbase
= TREE_OPERAND (cbase
, 0);
3850 if (((TREE_CODE (ubase
) == SSA_NAME
)
3851 || (TREE_CODE (ubase
) == ADDR_EXPR
3852 && is_gimple_min_invariant (ubase
)))
3853 && (TREE_CODE (cbase
) == INTEGER_CST
))
3856 if (((TREE_CODE (cbase
) == SSA_NAME
)
3857 || (TREE_CODE (cbase
) == ADDR_EXPR
3858 && is_gimple_min_invariant (cbase
)))
3859 && (TREE_CODE (ubase
) == INTEGER_CST
))
3865 if(operand_equal_p (ubase
, cbase
, 0))
3868 if (TREE_CODE (ubase
) == ADDR_EXPR
3869 && TREE_CODE (cbase
) == ADDR_EXPR
)
3873 usym
= TREE_OPERAND (ubase
, 0);
3874 csym
= TREE_OPERAND (cbase
, 0);
3875 if (TREE_CODE (usym
) == ARRAY_REF
)
3877 tree ind
= TREE_OPERAND (usym
, 1);
3878 if (TREE_CODE (ind
) == INTEGER_CST
3879 && host_integerp (ind
, 0)
3880 && TREE_INT_CST_LOW (ind
) == 0)
3881 usym
= TREE_OPERAND (usym
, 0);
3883 if (TREE_CODE (csym
) == ARRAY_REF
)
3885 tree ind
= TREE_OPERAND (csym
, 1);
3886 if (TREE_CODE (ind
) == INTEGER_CST
3887 && host_integerp (ind
, 0)
3888 && TREE_INT_CST_LOW (ind
) == 0)
3889 csym
= TREE_OPERAND (csym
, 0);
3891 if (operand_equal_p (usym
, csym
, 0))
3894 /* Now do more complex comparison */
3895 tree_to_aff_combination (ubase
, TREE_TYPE (ubase
), &ubase_aff
);
3896 tree_to_aff_combination (cbase
, TREE_TYPE (cbase
), &cbase_aff
);
3897 if (compare_aff_trees (&ubase_aff
, &cbase_aff
))
3901 tree_to_aff_combination (ub
, TREE_TYPE (ub
), &ubase_aff
);
3902 tree_to_aff_combination (cb
, TREE_TYPE (cb
), &cbase_aff
);
3904 aff_combination_scale (&cbase_aff
, shwi_to_double_int (-1 * ratio
));
3905 aff_combination_add (&ubase_aff
, &cbase_aff
);
3906 expr
= aff_combination_to_tree (&ubase_aff
);
3907 return get_expr_id (data
, expr
);
3912 /* Determines the cost of the computation by that USE is expressed
3913 from induction variable CAND. If ADDRESS_P is true, we just need
3914 to create an address from it, otherwise we want to get it into
3915 register. A set of invariants we depend on is stored in
3916 DEPENDS_ON. AT is the statement at that the value is computed.
3917 If CAN_AUTOINC is nonnull, use it to record whether autoinc
3918 addressing is likely. */
3921 get_computation_cost_at (struct ivopts_data
*data
,
3922 struct iv_use
*use
, struct iv_cand
*cand
,
3923 bool address_p
, bitmap
*depends_on
, gimple at
,
3927 tree ubase
= use
->iv
->base
, ustep
= use
->iv
->step
;
3929 tree utype
= TREE_TYPE (ubase
), ctype
;
3930 unsigned HOST_WIDE_INT cstepi
, offset
= 0;
3931 HOST_WIDE_INT ratio
, aratio
;
3932 bool var_present
, symbol_present
, stmt_is_after_inc
;
3935 bool speed
= optimize_bb_for_speed_p (gimple_bb (at
));
3939 /* Only consider real candidates. */
3941 return infinite_cost
;
3943 cbase
= cand
->iv
->base
;
3944 cstep
= cand
->iv
->step
;
3945 ctype
= TREE_TYPE (cbase
);
3947 if (TYPE_PRECISION (utype
) > TYPE_PRECISION (ctype
))
3949 /* We do not have a precision to express the values of use. */
3950 return infinite_cost
;
3954 || (use
->iv
->base_object
3955 && cand
->iv
->base_object
3956 && POINTER_TYPE_P (TREE_TYPE (use
->iv
->base_object
))
3957 && POINTER_TYPE_P (TREE_TYPE (cand
->iv
->base_object
))))
3959 /* Do not try to express address of an object with computation based
3960 on address of a different object. This may cause problems in rtl
3961 level alias analysis (that does not expect this to be happening,
3962 as this is illegal in C), and would be unlikely to be useful
3964 if (use
->iv
->base_object
3965 && cand
->iv
->base_object
3966 && !operand_equal_p (use
->iv
->base_object
, cand
->iv
->base_object
, 0))
3967 return infinite_cost
;
3970 if (TYPE_PRECISION (utype
) < TYPE_PRECISION (ctype
))
3972 /* TODO -- add direct handling of this case. */
3976 /* CSTEPI is removed from the offset in case statement is after the
3977 increment. If the step is not constant, we use zero instead.
3978 This is a bit imprecise (there is the extra addition), but
3979 redundancy elimination is likely to transform the code so that
3980 it uses value of the variable before increment anyway,
3981 so it is not that much unrealistic. */
3982 if (cst_and_fits_in_hwi (cstep
))
3983 cstepi
= int_cst_value (cstep
);
3987 if (!constant_multiple_of (ustep
, cstep
, &rat
))
3988 return infinite_cost
;
3990 if (double_int_fits_in_shwi_p (rat
))
3991 ratio
= double_int_to_shwi (rat
);
3993 return infinite_cost
;
3996 ctype
= TREE_TYPE (cbase
);
3998 stmt_is_after_inc
= stmt_after_increment (data
->current_loop
, cand
, at
);
4000 /* use = ubase + ratio * (var - cbase). If either cbase is a constant
4001 or ratio == 1, it is better to handle this like
4003 ubase - ratio * cbase + ratio * var
4005 (also holds in the case ratio == -1, TODO. */
4007 if (cst_and_fits_in_hwi (cbase
))
4009 offset
= - ratio
* int_cst_value (cbase
);
4010 cost
= difference_cost (data
,
4011 ubase
, build_int_cst (utype
, 0),
4012 &symbol_present
, &var_present
, &offset
,
4014 cost
.cost
/= avg_loop_niter (data
->current_loop
);
4016 else if (ratio
== 1)
4018 tree real_cbase
= cbase
;
4020 /* Check to see if any adjustment is needed. */
4021 if (cstepi
== 0 && stmt_is_after_inc
)
4023 aff_tree real_cbase_aff
;
4026 tree_to_aff_combination (cbase
, TREE_TYPE (real_cbase
),
4028 tree_to_aff_combination (cstep
, TREE_TYPE (cstep
), &cstep_aff
);
4030 aff_combination_add (&real_cbase_aff
, &cstep_aff
);
4031 real_cbase
= aff_combination_to_tree (&real_cbase_aff
);
4034 cost
= difference_cost (data
,
4036 &symbol_present
, &var_present
, &offset
,
4038 cost
.cost
/= avg_loop_niter (data
->current_loop
);
4041 && !POINTER_TYPE_P (ctype
)
4042 && multiplier_allowed_in_address_p
4043 (ratio
, TYPE_MODE (TREE_TYPE (utype
)),
4044 TYPE_ADDR_SPACE (TREE_TYPE (utype
))))
4047 = fold_build2 (MULT_EXPR
, ctype
, cbase
, build_int_cst (ctype
, ratio
));
4048 cost
= difference_cost (data
,
4050 &symbol_present
, &var_present
, &offset
,
4052 cost
.cost
/= avg_loop_niter (data
->current_loop
);
4056 cost
= force_var_cost (data
, cbase
, depends_on
);
4057 cost
= add_costs (cost
,
4058 difference_cost (data
,
4059 ubase
, build_int_cst (utype
, 0),
4060 &symbol_present
, &var_present
,
4061 &offset
, depends_on
));
4062 cost
.cost
/= avg_loop_niter (data
->current_loop
);
4063 cost
.cost
+= add_cost (data
->speed
, TYPE_MODE (ctype
));
4069 get_loop_invariant_expr_id (data
, ubase
, cbase
, ratio
, address_p
);
4070 /* Clear depends on. */
4071 if (*inv_expr_id
!= -1 && depends_on
&& *depends_on
)
4072 bitmap_clear (*depends_on
);
4075 /* If we are after the increment, the value of the candidate is higher by
4077 if (stmt_is_after_inc
)
4078 offset
-= ratio
* cstepi
;
4080 /* Now the computation is in shape symbol + var1 + const + ratio * var2.
4081 (symbol/var1/const parts may be omitted). If we are looking for an
4082 address, find the cost of addressing this. */
4084 return add_costs (cost
,
4085 get_address_cost (symbol_present
, var_present
,
4086 offset
, ratio
, cstepi
,
4087 TYPE_MODE (TREE_TYPE (utype
)),
4088 TYPE_ADDR_SPACE (TREE_TYPE (utype
)),
4089 speed
, stmt_is_after_inc
,
4092 /* Otherwise estimate the costs for computing the expression. */
4093 if (!symbol_present
&& !var_present
&& !offset
)
4096 cost
.cost
+= mult_by_coeff_cost (ratio
, TYPE_MODE (ctype
), speed
);
4100 /* Symbol + offset should be compile-time computable so consider that they
4101 are added once to the variable, if present. */
4102 if (var_present
&& (symbol_present
|| offset
))
4103 cost
.cost
+= adjust_setup_cost (data
,
4104 add_cost (speed
, TYPE_MODE (ctype
)));
4106 /* Having offset does not affect runtime cost in case it is added to
4107 symbol, but it increases complexity. */
4111 cost
.cost
+= add_cost (speed
, TYPE_MODE (ctype
));
4113 aratio
= ratio
> 0 ? ratio
: -ratio
;
4115 cost
.cost
+= mult_by_coeff_cost (aratio
, TYPE_MODE (ctype
), speed
);
4120 *can_autoinc
= false;
4123 /* Just get the expression, expand it and measure the cost. */
4124 tree comp
= get_computation_at (data
->current_loop
, use
, cand
, at
);
4127 return infinite_cost
;
4130 comp
= build_simple_mem_ref (comp
);
4132 return new_cost (computation_cost (comp
, speed
), 0);
4136 /* Determines the cost of the computation by that USE is expressed
4137 from induction variable CAND. If ADDRESS_P is true, we just need
4138 to create an address from it, otherwise we want to get it into
4139 register. A set of invariants we depend on is stored in
4140 DEPENDS_ON. If CAN_AUTOINC is nonnull, use it to record whether
4141 autoinc addressing is likely. */
4144 get_computation_cost (struct ivopts_data
*data
,
4145 struct iv_use
*use
, struct iv_cand
*cand
,
4146 bool address_p
, bitmap
*depends_on
,
4147 bool *can_autoinc
, int *inv_expr_id
)
4149 return get_computation_cost_at (data
,
4150 use
, cand
, address_p
, depends_on
, use
->stmt
,
4151 can_autoinc
, inv_expr_id
);
4154 /* Determines cost of basing replacement of USE on CAND in a generic
4158 determine_use_iv_cost_generic (struct ivopts_data
*data
,
4159 struct iv_use
*use
, struct iv_cand
*cand
)
4163 int inv_expr_id
= -1;
4165 /* The simple case first -- if we need to express value of the preserved
4166 original biv, the cost is 0. This also prevents us from counting the
4167 cost of increment twice -- once at this use and once in the cost of
4169 if (cand
->pos
== IP_ORIGINAL
4170 && cand
->incremented_at
== use
->stmt
)
4172 set_use_iv_cost (data
, use
, cand
, no_cost
, NULL
, NULL_TREE
,
4177 cost
= get_computation_cost (data
, use
, cand
, false, &depends_on
,
4178 NULL
, &inv_expr_id
);
4180 set_use_iv_cost (data
, use
, cand
, cost
, depends_on
, NULL_TREE
, ERROR_MARK
,
4183 return !infinite_cost_p (cost
);
4186 /* Determines cost of basing replacement of USE on CAND in an address. */
4189 determine_use_iv_cost_address (struct ivopts_data
*data
,
4190 struct iv_use
*use
, struct iv_cand
*cand
)
4194 int inv_expr_id
= -1;
4195 comp_cost cost
= get_computation_cost (data
, use
, cand
, true, &depends_on
,
4196 &can_autoinc
, &inv_expr_id
);
4198 if (cand
->ainc_use
== use
)
4201 cost
.cost
-= cand
->cost_step
;
4202 /* If we generated the candidate solely for exploiting autoincrement
4203 opportunities, and it turns out it can't be used, set the cost to
4204 infinity to make sure we ignore it. */
4205 else if (cand
->pos
== IP_AFTER_USE
|| cand
->pos
== IP_BEFORE_USE
)
4206 cost
= infinite_cost
;
4208 set_use_iv_cost (data
, use
, cand
, cost
, depends_on
, NULL_TREE
, ERROR_MARK
,
4211 return !infinite_cost_p (cost
);
4214 /* Computes value of candidate CAND at position AT in iteration NITER, and
4215 stores it to VAL. */
4218 cand_value_at (struct loop
*loop
, struct iv_cand
*cand
, gimple at
, tree niter
,
4221 aff_tree step
, delta
, nit
;
4222 struct iv
*iv
= cand
->iv
;
4223 tree type
= TREE_TYPE (iv
->base
);
4224 tree steptype
= type
;
4225 if (POINTER_TYPE_P (type
))
4226 steptype
= sizetype
;
4228 tree_to_aff_combination (iv
->step
, steptype
, &step
);
4229 tree_to_aff_combination (niter
, TREE_TYPE (niter
), &nit
);
4230 aff_combination_convert (&nit
, steptype
);
4231 aff_combination_mult (&nit
, &step
, &delta
);
4232 if (stmt_after_increment (loop
, cand
, at
))
4233 aff_combination_add (&delta
, &step
);
4235 tree_to_aff_combination (iv
->base
, type
, val
);
4236 aff_combination_add (val
, &delta
);
4239 /* Returns period of induction variable iv. */
4242 iv_period (struct iv
*iv
)
4244 tree step
= iv
->step
, period
, type
;
4247 gcc_assert (step
&& TREE_CODE (step
) == INTEGER_CST
);
4249 type
= unsigned_type_for (TREE_TYPE (step
));
4250 /* Period of the iv is lcm (step, type_range)/step -1,
4251 i.e., N*type_range/step - 1. Since type range is power
4252 of two, N == (step >> num_of_ending_zeros_binary (step),
4253 so the final result is
4255 (type_range >> num_of_ending_zeros_binary (step)) - 1
4258 pow2div
= num_ending_zeros (step
);
4260 period
= build_low_bits_mask (type
,
4261 (TYPE_PRECISION (type
)
4262 - tree_low_cst (pow2div
, 1)));
4267 /* Returns the comparison operator used when eliminating the iv USE. */
4269 static enum tree_code
4270 iv_elimination_compare (struct ivopts_data
*data
, struct iv_use
*use
)
4272 struct loop
*loop
= data
->current_loop
;
4276 ex_bb
= gimple_bb (use
->stmt
);
4277 exit
= EDGE_SUCC (ex_bb
, 0);
4278 if (flow_bb_inside_loop_p (loop
, exit
->dest
))
4279 exit
= EDGE_SUCC (ex_bb
, 1);
4281 return (exit
->flags
& EDGE_TRUE_VALUE
? EQ_EXPR
: NE_EXPR
);
4285 strip_wrap_conserving_type_conversions (tree exp
)
4287 while (tree_ssa_useless_type_conversion (exp
)
4288 && (nowrap_type_p (TREE_TYPE (exp
))
4289 == nowrap_type_p (TREE_TYPE (TREE_OPERAND (exp
, 0)))))
4290 exp
= TREE_OPERAND (exp
, 0);
4294 /* Walk the SSA form and check whether E == WHAT. Fairly simplistic, we
4295 check for an exact match. */
4298 expr_equal_p (tree e
, tree what
)
4301 enum tree_code code
;
4303 e
= strip_wrap_conserving_type_conversions (e
);
4304 what
= strip_wrap_conserving_type_conversions (what
);
4306 code
= TREE_CODE (what
);
4307 if (TREE_TYPE (e
) != TREE_TYPE (what
))
4310 if (operand_equal_p (e
, what
, 0))
4313 if (TREE_CODE (e
) != SSA_NAME
)
4316 stmt
= SSA_NAME_DEF_STMT (e
);
4317 if (gimple_code (stmt
) != GIMPLE_ASSIGN
4318 || gimple_assign_rhs_code (stmt
) != code
)
4321 switch (get_gimple_rhs_class (code
))
4323 case GIMPLE_BINARY_RHS
:
4324 if (!expr_equal_p (gimple_assign_rhs2 (stmt
), TREE_OPERAND (what
, 1)))
4328 case GIMPLE_UNARY_RHS
:
4329 case GIMPLE_SINGLE_RHS
:
4330 return expr_equal_p (gimple_assign_rhs1 (stmt
), TREE_OPERAND (what
, 0));
4336 /* Returns true if we can prove that BASE - OFFSET does not overflow. For now,
4337 we only detect the situation that BASE = SOMETHING + OFFSET, where the
4338 calculation is performed in non-wrapping type.
4340 TODO: More generally, we could test for the situation that
4341 BASE = SOMETHING + OFFSET' and OFFSET is between OFFSET' and zero.
4342 This would require knowing the sign of OFFSET.
4344 Also, we only look for the first addition in the computation of BASE.
4345 More complex analysis would be better, but introducing it just for
4346 this optimization seems like an overkill. */
4349 difference_cannot_overflow_p (tree base
, tree offset
)
4351 enum tree_code code
;
4354 if (!nowrap_type_p (TREE_TYPE (base
)))
4357 base
= expand_simple_operations (base
);
4359 if (TREE_CODE (base
) == SSA_NAME
)
4361 gimple stmt
= SSA_NAME_DEF_STMT (base
);
4363 if (gimple_code (stmt
) != GIMPLE_ASSIGN
)
4366 code
= gimple_assign_rhs_code (stmt
);
4367 if (get_gimple_rhs_class (code
) != GIMPLE_BINARY_RHS
)
4370 e1
= gimple_assign_rhs1 (stmt
);
4371 e2
= gimple_assign_rhs2 (stmt
);
4375 code
= TREE_CODE (base
);
4376 if (get_gimple_rhs_class (code
) != GIMPLE_BINARY_RHS
)
4378 e1
= TREE_OPERAND (base
, 0);
4379 e2
= TREE_OPERAND (base
, 1);
4382 /* TODO: deeper inspection may be necessary to prove the equality. */
4386 return expr_equal_p (e1
, offset
) || expr_equal_p (e2
, offset
);
4387 case POINTER_PLUS_EXPR
:
4388 return expr_equal_p (e2
, offset
);
4395 /* Tries to replace loop exit by one formulated in terms of a LT_EXPR
4396 comparison with CAND. NITER describes the number of iterations of
4397 the loops. If successful, the comparison in COMP_P is altered accordingly.
4399 We aim to handle the following situation:
4415 Here, the number of iterations of the loop is (a + 1 > b) ? 0 : b - a - 1.
4416 We aim to optimize this to
4424 while (p < p_0 - a + b);
4426 This preserves the correctness, since the pointer arithmetics does not
4427 overflow. More precisely:
4429 1) if a + 1 <= b, then p_0 - a + b is the final value of p, hence there is no
4430 overflow in computing it or the values of p.
4431 2) if a + 1 > b, then we need to verify that the expression p_0 - a does not
4432 overflow. To prove this, we use the fact that p_0 = base + a. */
4435 iv_elimination_compare_lt (struct ivopts_data
*data
,
4436 struct iv_cand
*cand
, enum tree_code
*comp_p
,
4437 struct tree_niter_desc
*niter
)
4439 tree cand_type
, a
, b
, mbz
, nit_type
= TREE_TYPE (niter
->niter
), offset
;
4440 struct affine_tree_combination nit
, tmpa
, tmpb
;
4441 enum tree_code comp
;
4444 /* We need to know that the candidate induction variable does not overflow.
4445 While more complex analysis may be used to prove this, for now just
4446 check that the variable appears in the original program and that it
4447 is computed in a type that guarantees no overflows. */
4448 cand_type
= TREE_TYPE (cand
->iv
->base
);
4449 if (cand
->pos
!= IP_ORIGINAL
|| !nowrap_type_p (cand_type
))
4452 /* Make sure that the loop iterates till the loop bound is hit, as otherwise
4453 the calculation of the BOUND could overflow, making the comparison
4455 if (!data
->loop_single_exit_p
)
4458 /* We need to be able to decide whether candidate is increasing or decreasing
4459 in order to choose the right comparison operator. */
4460 if (!cst_and_fits_in_hwi (cand
->iv
->step
))
4462 step
= int_cst_value (cand
->iv
->step
);
4464 /* Check that the number of iterations matches the expected pattern:
4465 a + 1 > b ? 0 : b - a - 1. */
4466 mbz
= niter
->may_be_zero
;
4467 if (TREE_CODE (mbz
) == GT_EXPR
)
4469 /* Handle a + 1 > b. */
4470 tree op0
= TREE_OPERAND (mbz
, 0);
4471 if (TREE_CODE (op0
) == PLUS_EXPR
&& integer_onep (TREE_OPERAND (op0
, 1)))
4473 a
= TREE_OPERAND (op0
, 0);
4474 b
= TREE_OPERAND (mbz
, 1);
4479 else if (TREE_CODE (mbz
) == LT_EXPR
)
4481 tree op1
= TREE_OPERAND (mbz
, 1);
4483 /* Handle b < a + 1. */
4484 if (TREE_CODE (op1
) == PLUS_EXPR
&& integer_onep (TREE_OPERAND (op1
, 1)))
4486 a
= TREE_OPERAND (op1
, 0);
4487 b
= TREE_OPERAND (mbz
, 0);
4495 /* Expected number of iterations is B - A - 1. Check that it matches
4496 the actual number, i.e., that B - A - NITER = 1. */
4497 tree_to_aff_combination (niter
->niter
, nit_type
, &nit
);
4498 tree_to_aff_combination (fold_convert (nit_type
, a
), nit_type
, &tmpa
);
4499 tree_to_aff_combination (fold_convert (nit_type
, b
), nit_type
, &tmpb
);
4500 aff_combination_scale (&nit
, double_int_minus_one
);
4501 aff_combination_scale (&tmpa
, double_int_minus_one
);
4502 aff_combination_add (&tmpb
, &tmpa
);
4503 aff_combination_add (&tmpb
, &nit
);
4504 if (tmpb
.n
!= 0 || !double_int_equal_p (tmpb
.offset
, double_int_one
))
4507 /* Finally, check that CAND->IV->BASE - CAND->IV->STEP * A does not
4509 offset
= fold_build2 (MULT_EXPR
, TREE_TYPE (cand
->iv
->step
),
4511 fold_convert (TREE_TYPE (cand
->iv
->step
), a
));
4512 if (!difference_cannot_overflow_p (cand
->iv
->base
, offset
))
4515 /* Determine the new comparison operator. */
4516 comp
= step
< 0 ? GT_EXPR
: LT_EXPR
;
4517 if (*comp_p
== NE_EXPR
)
4519 else if (*comp_p
== EQ_EXPR
)
4520 *comp_p
= invert_tree_comparison (comp
, false);
4527 /* Check whether it is possible to express the condition in USE by comparison
4528 of candidate CAND. If so, store the value compared with to BOUND, and the
4529 comparison operator to COMP. */
4532 may_eliminate_iv (struct ivopts_data
*data
,
4533 struct iv_use
*use
, struct iv_cand
*cand
, tree
*bound
,
4534 enum tree_code
*comp
)
4539 struct loop
*loop
= data
->current_loop
;
4541 struct tree_niter_desc
*desc
= NULL
;
4543 if (TREE_CODE (cand
->iv
->step
) != INTEGER_CST
)
4546 /* For now works only for exits that dominate the loop latch.
4547 TODO: extend to other conditions inside loop body. */
4548 ex_bb
= gimple_bb (use
->stmt
);
4549 if (use
->stmt
!= last_stmt (ex_bb
)
4550 || gimple_code (use
->stmt
) != GIMPLE_COND
4551 || !dominated_by_p (CDI_DOMINATORS
, loop
->latch
, ex_bb
))
4554 exit
= EDGE_SUCC (ex_bb
, 0);
4555 if (flow_bb_inside_loop_p (loop
, exit
->dest
))
4556 exit
= EDGE_SUCC (ex_bb
, 1);
4557 if (flow_bb_inside_loop_p (loop
, exit
->dest
))
4560 desc
= niter_for_exit (data
, exit
);
4564 /* Determine whether we can use the variable to test the exit condition.
4565 This is the case iff the period of the induction variable is greater
4566 than the number of iterations for which the exit condition is true. */
4567 period
= iv_period (cand
->iv
);
4569 /* If the number of iterations is constant, compare against it directly. */
4570 if (TREE_CODE (desc
->niter
) == INTEGER_CST
)
4572 /* See cand_value_at. */
4573 if (stmt_after_increment (loop
, cand
, use
->stmt
))
4575 if (!tree_int_cst_lt (desc
->niter
, period
))
4580 if (tree_int_cst_lt (period
, desc
->niter
))
4585 /* If not, and if this is the only possible exit of the loop, see whether
4586 we can get a conservative estimate on the number of iterations of the
4587 entire loop and compare against that instead. */
4590 double_int period_value
, max_niter
;
4592 max_niter
= desc
->max
;
4593 if (stmt_after_increment (loop
, cand
, use
->stmt
))
4594 max_niter
= double_int_add (max_niter
, double_int_one
);
4595 period_value
= tree_to_double_int (period
);
4596 if (double_int_ucmp (max_niter
, period_value
) > 0)
4598 /* See if we can take advantage of inferred loop bound information. */
4599 if (data
->loop_single_exit_p
)
4601 if (!max_loop_iterations (loop
, &max_niter
))
4603 /* The loop bound is already adjusted by adding 1. */
4604 if (double_int_ucmp (max_niter
, period_value
) > 0)
4612 cand_value_at (loop
, cand
, use
->stmt
, desc
->niter
, &bnd
);
4614 *bound
= aff_combination_to_tree (&bnd
);
4615 *comp
= iv_elimination_compare (data
, use
);
4617 /* It is unlikely that computing the number of iterations using division
4618 would be more profitable than keeping the original induction variable. */
4619 if (expression_expensive_p (*bound
))
4622 /* Sometimes, it is possible to handle the situation that the number of
4623 iterations may be zero unless additional assumtions by using <
4624 instead of != in the exit condition.
4626 TODO: we could also calculate the value MAY_BE_ZERO ? 0 : NITER and
4627 base the exit condition on it. However, that is often too
4629 if (!integer_zerop (desc
->may_be_zero
))
4630 return iv_elimination_compare_lt (data
, cand
, comp
, desc
);
4635 /* Calculates the cost of BOUND, if it is a PARM_DECL. A PARM_DECL must
4636 be copied, if is is used in the loop body and DATA->body_includes_call. */
4639 parm_decl_cost (struct ivopts_data
*data
, tree bound
)
4641 tree sbound
= bound
;
4642 STRIP_NOPS (sbound
);
4644 if (TREE_CODE (sbound
) == SSA_NAME
4645 && TREE_CODE (SSA_NAME_VAR (sbound
)) == PARM_DECL
4646 && gimple_nop_p (SSA_NAME_DEF_STMT (sbound
))
4647 && data
->body_includes_call
)
4648 return COSTS_N_INSNS (1);
4653 /* Determines cost of basing replacement of USE on CAND in a condition. */
4656 determine_use_iv_cost_condition (struct ivopts_data
*data
,
4657 struct iv_use
*use
, struct iv_cand
*cand
)
4659 tree bound
= NULL_TREE
;
4661 bitmap depends_on_elim
= NULL
, depends_on_express
= NULL
, depends_on
;
4662 comp_cost elim_cost
, express_cost
, cost
, bound_cost
;
4664 int elim_inv_expr_id
= -1, express_inv_expr_id
= -1, inv_expr_id
;
4665 tree
*control_var
, *bound_cst
;
4666 enum tree_code comp
= ERROR_MARK
;
4668 /* Only consider real candidates. */
4671 set_use_iv_cost (data
, use
, cand
, infinite_cost
, NULL
, NULL_TREE
,
4676 /* Try iv elimination. */
4677 if (may_eliminate_iv (data
, use
, cand
, &bound
, &comp
))
4679 elim_cost
= force_var_cost (data
, bound
, &depends_on_elim
);
4680 if (elim_cost
.cost
== 0)
4681 elim_cost
.cost
= parm_decl_cost (data
, bound
);
4682 else if (TREE_CODE (bound
) == INTEGER_CST
)
4684 /* If we replace a loop condition 'i < n' with 'p < base + n',
4685 depends_on_elim will have 'base' and 'n' set, which implies
4686 that both 'base' and 'n' will be live during the loop. More likely,
4687 'base + n' will be loop invariant, resulting in only one live value
4688 during the loop. So in that case we clear depends_on_elim and set
4689 elim_inv_expr_id instead. */
4690 if (depends_on_elim
&& bitmap_count_bits (depends_on_elim
) > 1)
4692 elim_inv_expr_id
= get_expr_id (data
, bound
);
4693 bitmap_clear (depends_on_elim
);
4695 /* The bound is a loop invariant, so it will be only computed
4697 elim_cost
.cost
= adjust_setup_cost (data
, elim_cost
.cost
);
4700 elim_cost
= infinite_cost
;
4702 /* Try expressing the original giv. If it is compared with an invariant,
4703 note that we cannot get rid of it. */
4704 ok
= extract_cond_operands (data
, use
->stmt
, &control_var
, &bound_cst
,
4708 /* When the condition is a comparison of the candidate IV against
4709 zero, prefer this IV.
4711 TODO: The constant that we're subtracting from the cost should
4712 be target-dependent. This information should be added to the
4713 target costs for each backend. */
4714 if (!infinite_cost_p (elim_cost
) /* Do not try to decrease infinite! */
4715 && integer_zerop (*bound_cst
)
4716 && (operand_equal_p (*control_var
, cand
->var_after
, 0)
4717 || operand_equal_p (*control_var
, cand
->var_before
, 0)))
4718 elim_cost
.cost
-= 1;
4720 express_cost
= get_computation_cost (data
, use
, cand
, false,
4721 &depends_on_express
, NULL
,
4722 &express_inv_expr_id
);
4723 fd_ivopts_data
= data
;
4724 walk_tree (&cmp_iv
->base
, find_depends
, &depends_on_express
, NULL
);
4726 /* Count the cost of the original bound as well. */
4727 bound_cost
= force_var_cost (data
, *bound_cst
, NULL
);
4728 if (bound_cost
.cost
== 0)
4729 bound_cost
.cost
= parm_decl_cost (data
, *bound_cst
);
4730 else if (TREE_CODE (*bound_cst
) == INTEGER_CST
)
4731 bound_cost
.cost
= 0;
4732 express_cost
.cost
+= bound_cost
.cost
;
4734 /* Choose the better approach, preferring the eliminated IV. */
4735 if (compare_costs (elim_cost
, express_cost
) <= 0)
4738 depends_on
= depends_on_elim
;
4739 depends_on_elim
= NULL
;
4740 inv_expr_id
= elim_inv_expr_id
;
4744 cost
= express_cost
;
4745 depends_on
= depends_on_express
;
4746 depends_on_express
= NULL
;
4749 inv_expr_id
= express_inv_expr_id
;
4752 set_use_iv_cost (data
, use
, cand
, cost
, depends_on
, bound
, comp
, inv_expr_id
);
4754 if (depends_on_elim
)
4755 BITMAP_FREE (depends_on_elim
);
4756 if (depends_on_express
)
4757 BITMAP_FREE (depends_on_express
);
4759 return !infinite_cost_p (cost
);
4762 /* Determines cost of basing replacement of USE on CAND. Returns false
4763 if USE cannot be based on CAND. */
4766 determine_use_iv_cost (struct ivopts_data
*data
,
4767 struct iv_use
*use
, struct iv_cand
*cand
)
4771 case USE_NONLINEAR_EXPR
:
4772 return determine_use_iv_cost_generic (data
, use
, cand
);
4775 return determine_use_iv_cost_address (data
, use
, cand
);
4778 return determine_use_iv_cost_condition (data
, use
, cand
);
4785 /* Return true if get_computation_cost indicates that autoincrement is
4786 a possibility for the pair of USE and CAND, false otherwise. */
4789 autoinc_possible_for_pair (struct ivopts_data
*data
, struct iv_use
*use
,
4790 struct iv_cand
*cand
)
4796 if (use
->type
!= USE_ADDRESS
)
4799 cost
= get_computation_cost (data
, use
, cand
, true, &depends_on
,
4800 &can_autoinc
, NULL
);
4802 BITMAP_FREE (depends_on
);
4804 return !infinite_cost_p (cost
) && can_autoinc
;
4807 /* Examine IP_ORIGINAL candidates to see if they are incremented next to a
4808 use that allows autoincrement, and set their AINC_USE if possible. */
4811 set_autoinc_for_original_candidates (struct ivopts_data
*data
)
4815 for (i
= 0; i
< n_iv_cands (data
); i
++)
4817 struct iv_cand
*cand
= iv_cand (data
, i
);
4818 struct iv_use
*closest
= NULL
;
4819 if (cand
->pos
!= IP_ORIGINAL
)
4821 for (j
= 0; j
< n_iv_uses (data
); j
++)
4823 struct iv_use
*use
= iv_use (data
, j
);
4824 unsigned uid
= gimple_uid (use
->stmt
);
4825 if (gimple_bb (use
->stmt
) != gimple_bb (cand
->incremented_at
)
4826 || uid
> gimple_uid (cand
->incremented_at
))
4828 if (closest
== NULL
|| uid
> gimple_uid (closest
->stmt
))
4831 if (closest
== NULL
|| !autoinc_possible_for_pair (data
, closest
, cand
))
4833 cand
->ainc_use
= closest
;
4837 /* Finds the candidates for the induction variables. */
4840 find_iv_candidates (struct ivopts_data
*data
)
4842 /* Add commonly used ivs. */
4843 add_standard_iv_candidates (data
);
4845 /* Add old induction variables. */
4846 add_old_ivs_candidates (data
);
4848 /* Add induction variables derived from uses. */
4849 add_derived_ivs_candidates (data
);
4851 set_autoinc_for_original_candidates (data
);
4853 /* Record the important candidates. */
4854 record_important_candidates (data
);
4857 /* Determines costs of basing the use of the iv on an iv candidate. */
4860 determine_use_iv_costs (struct ivopts_data
*data
)
4864 struct iv_cand
*cand
;
4865 bitmap to_clear
= BITMAP_ALLOC (NULL
);
4867 alloc_use_cost_map (data
);
4869 for (i
= 0; i
< n_iv_uses (data
); i
++)
4871 use
= iv_use (data
, i
);
4873 if (data
->consider_all_candidates
)
4875 for (j
= 0; j
< n_iv_cands (data
); j
++)
4877 cand
= iv_cand (data
, j
);
4878 determine_use_iv_cost (data
, use
, cand
);
4885 EXECUTE_IF_SET_IN_BITMAP (use
->related_cands
, 0, j
, bi
)
4887 cand
= iv_cand (data
, j
);
4888 if (!determine_use_iv_cost (data
, use
, cand
))
4889 bitmap_set_bit (to_clear
, j
);
4892 /* Remove the candidates for that the cost is infinite from
4893 the list of related candidates. */
4894 bitmap_and_compl_into (use
->related_cands
, to_clear
);
4895 bitmap_clear (to_clear
);
4899 BITMAP_FREE (to_clear
);
4901 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4903 fprintf (dump_file
, "Use-candidate costs:\n");
4905 for (i
= 0; i
< n_iv_uses (data
); i
++)
4907 use
= iv_use (data
, i
);
4909 fprintf (dump_file
, "Use %d:\n", i
);
4910 fprintf (dump_file
, " cand\tcost\tcompl.\tdepends on\n");
4911 for (j
= 0; j
< use
->n_map_members
; j
++)
4913 if (!use
->cost_map
[j
].cand
4914 || infinite_cost_p (use
->cost_map
[j
].cost
))
4917 fprintf (dump_file
, " %d\t%d\t%d\t",
4918 use
->cost_map
[j
].cand
->id
,
4919 use
->cost_map
[j
].cost
.cost
,
4920 use
->cost_map
[j
].cost
.complexity
);
4921 if (use
->cost_map
[j
].depends_on
)
4922 bitmap_print (dump_file
,
4923 use
->cost_map
[j
].depends_on
, "","");
4924 if (use
->cost_map
[j
].inv_expr_id
!= -1)
4925 fprintf (dump_file
, " inv_expr:%d", use
->cost_map
[j
].inv_expr_id
);
4926 fprintf (dump_file
, "\n");
4929 fprintf (dump_file
, "\n");
4931 fprintf (dump_file
, "\n");
4935 /* Determines cost of the candidate CAND. */
4938 determine_iv_cost (struct ivopts_data
*data
, struct iv_cand
*cand
)
4940 comp_cost cost_base
;
4941 unsigned cost
, cost_step
;
4950 /* There are two costs associated with the candidate -- its increment
4951 and its initialization. The second is almost negligible for any loop
4952 that rolls enough, so we take it just very little into account. */
4954 base
= cand
->iv
->base
;
4955 cost_base
= force_var_cost (data
, base
, NULL
);
4956 /* It will be exceptional that the iv register happens to be initialized with
4957 the proper value at no cost. In general, there will at least be a regcopy
4959 if (cost_base
.cost
== 0)
4960 cost_base
.cost
= COSTS_N_INSNS (1);
4961 cost_step
= add_cost (data
->speed
, TYPE_MODE (TREE_TYPE (base
)));
4963 cost
= cost_step
+ adjust_setup_cost (data
, cost_base
.cost
);
4965 /* Prefer the original ivs unless we may gain something by replacing it.
4966 The reason is to make debugging simpler; so this is not relevant for
4967 artificial ivs created by other optimization passes. */
4968 if (cand
->pos
!= IP_ORIGINAL
4969 || DECL_ARTIFICIAL (SSA_NAME_VAR (cand
->var_before
)))
4972 /* Prefer not to insert statements into latch unless there are some
4973 already (so that we do not create unnecessary jumps). */
4974 if (cand
->pos
== IP_END
4975 && empty_block_p (ip_end_pos (data
->current_loop
)))
4979 cand
->cost_step
= cost_step
;
4982 /* Determines costs of computation of the candidates. */
4985 determine_iv_costs (struct ivopts_data
*data
)
4989 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4991 fprintf (dump_file
, "Candidate costs:\n");
4992 fprintf (dump_file
, " cand\tcost\n");
4995 for (i
= 0; i
< n_iv_cands (data
); i
++)
4997 struct iv_cand
*cand
= iv_cand (data
, i
);
4999 determine_iv_cost (data
, cand
);
5001 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5002 fprintf (dump_file
, " %d\t%d\n", i
, cand
->cost
);
5005 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5006 fprintf (dump_file
, "\n");
5009 /* Calculates cost for having SIZE induction variables. */
5012 ivopts_global_cost_for_size (struct ivopts_data
*data
, unsigned size
)
5014 /* We add size to the cost, so that we prefer eliminating ivs
5016 return size
+ estimate_reg_pressure_cost (size
, data
->regs_used
, data
->speed
,
5017 data
->body_includes_call
);
5020 /* For each size of the induction variable set determine the penalty. */
5023 determine_set_costs (struct ivopts_data
*data
)
5027 gimple_stmt_iterator psi
;
5029 struct loop
*loop
= data
->current_loop
;
5032 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5034 fprintf (dump_file
, "Global costs:\n");
5035 fprintf (dump_file
, " target_avail_regs %d\n", target_avail_regs
);
5036 fprintf (dump_file
, " target_clobbered_regs %d\n", target_clobbered_regs
);
5037 fprintf (dump_file
, " target_reg_cost %d\n", target_reg_cost
[data
->speed
]);
5038 fprintf (dump_file
, " target_spill_cost %d\n", target_spill_cost
[data
->speed
]);
5042 for (psi
= gsi_start_phis (loop
->header
); !gsi_end_p (psi
); gsi_next (&psi
))
5044 phi
= gsi_stmt (psi
);
5045 op
= PHI_RESULT (phi
);
5047 if (!is_gimple_reg (op
))
5050 if (get_iv (data
, op
))
5056 EXECUTE_IF_SET_IN_BITMAP (data
->relevant
, 0, j
, bi
)
5058 struct version_info
*info
= ver_info (data
, j
);
5060 if (info
->inv_id
&& info
->has_nonlin_use
)
5064 data
->regs_used
= n
;
5065 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5066 fprintf (dump_file
, " regs_used %d\n", n
);
5068 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5070 fprintf (dump_file
, " cost for size:\n");
5071 fprintf (dump_file
, " ivs\tcost\n");
5072 for (j
= 0; j
<= 2 * target_avail_regs
; j
++)
5073 fprintf (dump_file
, " %d\t%d\n", j
,
5074 ivopts_global_cost_for_size (data
, j
));
5075 fprintf (dump_file
, "\n");
5079 /* Returns true if A is a cheaper cost pair than B. */
5082 cheaper_cost_pair (struct cost_pair
*a
, struct cost_pair
*b
)
5092 cmp
= compare_costs (a
->cost
, b
->cost
);
5099 /* In case the costs are the same, prefer the cheaper candidate. */
5100 if (a
->cand
->cost
< b
->cand
->cost
)
5107 /* Returns candidate by that USE is expressed in IVS. */
5109 static struct cost_pair
*
5110 iv_ca_cand_for_use (struct iv_ca
*ivs
, struct iv_use
*use
)
5112 return ivs
->cand_for_use
[use
->id
];
5115 /* Computes the cost field of IVS structure. */
5118 iv_ca_recount_cost (struct ivopts_data
*data
, struct iv_ca
*ivs
)
5120 comp_cost cost
= ivs
->cand_use_cost
;
5122 cost
.cost
+= ivs
->cand_cost
;
5124 cost
.cost
+= ivopts_global_cost_for_size (data
,
5125 ivs
->n_regs
+ ivs
->num_used_inv_expr
);
5130 /* Remove invariants in set INVS to set IVS. */
5133 iv_ca_set_remove_invariants (struct iv_ca
*ivs
, bitmap invs
)
5141 EXECUTE_IF_SET_IN_BITMAP (invs
, 0, iid
, bi
)
5143 ivs
->n_invariant_uses
[iid
]--;
5144 if (ivs
->n_invariant_uses
[iid
] == 0)
5149 /* Set USE not to be expressed by any candidate in IVS. */
5152 iv_ca_set_no_cp (struct ivopts_data
*data
, struct iv_ca
*ivs
,
5155 unsigned uid
= use
->id
, cid
;
5156 struct cost_pair
*cp
;
5158 cp
= ivs
->cand_for_use
[uid
];
5164 ivs
->cand_for_use
[uid
] = NULL
;
5165 ivs
->n_cand_uses
[cid
]--;
5167 if (ivs
->n_cand_uses
[cid
] == 0)
5169 bitmap_clear_bit (ivs
->cands
, cid
);
5170 /* Do not count the pseudocandidates. */
5174 ivs
->cand_cost
-= cp
->cand
->cost
;
5176 iv_ca_set_remove_invariants (ivs
, cp
->cand
->depends_on
);
5179 ivs
->cand_use_cost
= sub_costs (ivs
->cand_use_cost
, cp
->cost
);
5181 iv_ca_set_remove_invariants (ivs
, cp
->depends_on
);
5183 if (cp
->inv_expr_id
!= -1)
5185 ivs
->used_inv_expr
[cp
->inv_expr_id
]--;
5186 if (ivs
->used_inv_expr
[cp
->inv_expr_id
] == 0)
5187 ivs
->num_used_inv_expr
--;
5189 iv_ca_recount_cost (data
, ivs
);
5192 /* Add invariants in set INVS to set IVS. */
5195 iv_ca_set_add_invariants (struct iv_ca
*ivs
, bitmap invs
)
5203 EXECUTE_IF_SET_IN_BITMAP (invs
, 0, iid
, bi
)
5205 ivs
->n_invariant_uses
[iid
]++;
5206 if (ivs
->n_invariant_uses
[iid
] == 1)
5211 /* Set cost pair for USE in set IVS to CP. */
5214 iv_ca_set_cp (struct ivopts_data
*data
, struct iv_ca
*ivs
,
5215 struct iv_use
*use
, struct cost_pair
*cp
)
5217 unsigned uid
= use
->id
, cid
;
5219 if (ivs
->cand_for_use
[uid
] == cp
)
5222 if (ivs
->cand_for_use
[uid
])
5223 iv_ca_set_no_cp (data
, ivs
, use
);
5230 ivs
->cand_for_use
[uid
] = cp
;
5231 ivs
->n_cand_uses
[cid
]++;
5232 if (ivs
->n_cand_uses
[cid
] == 1)
5234 bitmap_set_bit (ivs
->cands
, cid
);
5235 /* Do not count the pseudocandidates. */
5239 ivs
->cand_cost
+= cp
->cand
->cost
;
5241 iv_ca_set_add_invariants (ivs
, cp
->cand
->depends_on
);
5244 ivs
->cand_use_cost
= add_costs (ivs
->cand_use_cost
, cp
->cost
);
5245 iv_ca_set_add_invariants (ivs
, cp
->depends_on
);
5247 if (cp
->inv_expr_id
!= -1)
5249 ivs
->used_inv_expr
[cp
->inv_expr_id
]++;
5250 if (ivs
->used_inv_expr
[cp
->inv_expr_id
] == 1)
5251 ivs
->num_used_inv_expr
++;
5253 iv_ca_recount_cost (data
, ivs
);
5257 /* Extend set IVS by expressing USE by some of the candidates in it
5258 if possible. All important candidates will be considered
5259 if IMPORTANT_CANDIDATES is true. */
5262 iv_ca_add_use (struct ivopts_data
*data
, struct iv_ca
*ivs
,
5263 struct iv_use
*use
, bool important_candidates
)
5265 struct cost_pair
*best_cp
= NULL
, *cp
;
5270 gcc_assert (ivs
->upto
>= use
->id
);
5272 if (ivs
->upto
== use
->id
)
5278 cands
= (important_candidates
? data
->important_candidates
: ivs
->cands
);
5279 EXECUTE_IF_SET_IN_BITMAP (cands
, 0, i
, bi
)
5281 struct iv_cand
*cand
= iv_cand (data
, i
);
5283 cp
= get_use_iv_cost (data
, use
, cand
);
5285 if (cheaper_cost_pair (cp
, best_cp
))
5289 iv_ca_set_cp (data
, ivs
, use
, best_cp
);
5292 /* Get cost for assignment IVS. */
5295 iv_ca_cost (struct iv_ca
*ivs
)
5297 /* This was a conditional expression but it triggered a bug in
5300 return infinite_cost
;
5305 /* Returns true if all dependences of CP are among invariants in IVS. */
5308 iv_ca_has_deps (struct iv_ca
*ivs
, struct cost_pair
*cp
)
5313 if (!cp
->depends_on
)
5316 EXECUTE_IF_SET_IN_BITMAP (cp
->depends_on
, 0, i
, bi
)
5318 if (ivs
->n_invariant_uses
[i
] == 0)
5325 /* Creates change of expressing USE by NEW_CP instead of OLD_CP and chains
5326 it before NEXT_CHANGE. */
5328 static struct iv_ca_delta
*
5329 iv_ca_delta_add (struct iv_use
*use
, struct cost_pair
*old_cp
,
5330 struct cost_pair
*new_cp
, struct iv_ca_delta
*next_change
)
5332 struct iv_ca_delta
*change
= XNEW (struct iv_ca_delta
);
5335 change
->old_cp
= old_cp
;
5336 change
->new_cp
= new_cp
;
5337 change
->next_change
= next_change
;
5342 /* Joins two lists of changes L1 and L2. Destructive -- old lists
5345 static struct iv_ca_delta
*
5346 iv_ca_delta_join (struct iv_ca_delta
*l1
, struct iv_ca_delta
*l2
)
5348 struct iv_ca_delta
*last
;
5356 for (last
= l1
; last
->next_change
; last
= last
->next_change
)
5358 last
->next_change
= l2
;
5363 /* Reverse the list of changes DELTA, forming the inverse to it. */
5365 static struct iv_ca_delta
*
5366 iv_ca_delta_reverse (struct iv_ca_delta
*delta
)
5368 struct iv_ca_delta
*act
, *next
, *prev
= NULL
;
5369 struct cost_pair
*tmp
;
5371 for (act
= delta
; act
; act
= next
)
5373 next
= act
->next_change
;
5374 act
->next_change
= prev
;
5378 act
->old_cp
= act
->new_cp
;
5385 /* Commit changes in DELTA to IVS. If FORWARD is false, the changes are
5386 reverted instead. */
5389 iv_ca_delta_commit (struct ivopts_data
*data
, struct iv_ca
*ivs
,
5390 struct iv_ca_delta
*delta
, bool forward
)
5392 struct cost_pair
*from
, *to
;
5393 struct iv_ca_delta
*act
;
5396 delta
= iv_ca_delta_reverse (delta
);
5398 for (act
= delta
; act
; act
= act
->next_change
)
5402 gcc_assert (iv_ca_cand_for_use (ivs
, act
->use
) == from
);
5403 iv_ca_set_cp (data
, ivs
, act
->use
, to
);
5407 iv_ca_delta_reverse (delta
);
5410 /* Returns true if CAND is used in IVS. */
5413 iv_ca_cand_used_p (struct iv_ca
*ivs
, struct iv_cand
*cand
)
5415 return ivs
->n_cand_uses
[cand
->id
] > 0;
5418 /* Returns number of induction variable candidates in the set IVS. */
5421 iv_ca_n_cands (struct iv_ca
*ivs
)
5423 return ivs
->n_cands
;
5426 /* Free the list of changes DELTA. */
5429 iv_ca_delta_free (struct iv_ca_delta
**delta
)
5431 struct iv_ca_delta
*act
, *next
;
5433 for (act
= *delta
; act
; act
= next
)
5435 next
= act
->next_change
;
5442 /* Allocates new iv candidates assignment. */
5444 static struct iv_ca
*
5445 iv_ca_new (struct ivopts_data
*data
)
5447 struct iv_ca
*nw
= XNEW (struct iv_ca
);
5451 nw
->cand_for_use
= XCNEWVEC (struct cost_pair
*, n_iv_uses (data
));
5452 nw
->n_cand_uses
= XCNEWVEC (unsigned, n_iv_cands (data
));
5453 nw
->cands
= BITMAP_ALLOC (NULL
);
5456 nw
->cand_use_cost
= no_cost
;
5458 nw
->n_invariant_uses
= XCNEWVEC (unsigned, data
->max_inv_id
+ 1);
5460 nw
->used_inv_expr
= XCNEWVEC (unsigned, data
->inv_expr_id
+ 1);
5461 nw
->num_used_inv_expr
= 0;
5466 /* Free memory occupied by the set IVS. */
5469 iv_ca_free (struct iv_ca
**ivs
)
5471 free ((*ivs
)->cand_for_use
);
5472 free ((*ivs
)->n_cand_uses
);
5473 BITMAP_FREE ((*ivs
)->cands
);
5474 free ((*ivs
)->n_invariant_uses
);
5475 free ((*ivs
)->used_inv_expr
);
5480 /* Dumps IVS to FILE. */
5483 iv_ca_dump (struct ivopts_data
*data
, FILE *file
, struct iv_ca
*ivs
)
5485 const char *pref
= " invariants ";
5487 comp_cost cost
= iv_ca_cost (ivs
);
5489 fprintf (file
, " cost: %d (complexity %d)\n", cost
.cost
, cost
.complexity
);
5490 fprintf (file
, " cand_cost: %d\n cand_use_cost: %d (complexity %d)\n",
5491 ivs
->cand_cost
, ivs
->cand_use_cost
.cost
, ivs
->cand_use_cost
.complexity
);
5492 bitmap_print (file
, ivs
->cands
, " candidates: ","\n");
5494 for (i
= 0; i
< ivs
->upto
; i
++)
5496 struct iv_use
*use
= iv_use (data
, i
);
5497 struct cost_pair
*cp
= iv_ca_cand_for_use (ivs
, use
);
5499 fprintf (file
, " use:%d --> iv_cand:%d, cost=(%d,%d)\n",
5500 use
->id
, cp
->cand
->id
, cp
->cost
.cost
, cp
->cost
.complexity
);
5502 fprintf (file
, " use:%d --> ??\n", use
->id
);
5505 for (i
= 1; i
<= data
->max_inv_id
; i
++)
5506 if (ivs
->n_invariant_uses
[i
])
5508 fprintf (file
, "%s%d", pref
, i
);
5511 fprintf (file
, "\n\n");
5514 /* Try changing candidate in IVS to CAND for each use. Return cost of the
5515 new set, and store differences in DELTA. Number of induction variables
5516 in the new set is stored to N_IVS. MIN_NCAND is a flag. When it is true
5517 the function will try to find a solution with mimimal iv candidates. */
5520 iv_ca_extend (struct ivopts_data
*data
, struct iv_ca
*ivs
,
5521 struct iv_cand
*cand
, struct iv_ca_delta
**delta
,
5522 unsigned *n_ivs
, bool min_ncand
)
5527 struct cost_pair
*old_cp
, *new_cp
;
5530 for (i
= 0; i
< ivs
->upto
; i
++)
5532 use
= iv_use (data
, i
);
5533 old_cp
= iv_ca_cand_for_use (ivs
, use
);
5536 && old_cp
->cand
== cand
)
5539 new_cp
= get_use_iv_cost (data
, use
, cand
);
5543 if (!min_ncand
&& !iv_ca_has_deps (ivs
, new_cp
))
5546 if (!min_ncand
&& !cheaper_cost_pair (new_cp
, old_cp
))
5549 *delta
= iv_ca_delta_add (use
, old_cp
, new_cp
, *delta
);
5552 iv_ca_delta_commit (data
, ivs
, *delta
, true);
5553 cost
= iv_ca_cost (ivs
);
5555 *n_ivs
= iv_ca_n_cands (ivs
);
5556 iv_ca_delta_commit (data
, ivs
, *delta
, false);
5561 /* Try narrowing set IVS by removing CAND. Return the cost of
5562 the new set and store the differences in DELTA. */
5565 iv_ca_narrow (struct ivopts_data
*data
, struct iv_ca
*ivs
,
5566 struct iv_cand
*cand
, struct iv_ca_delta
**delta
)
5570 struct cost_pair
*old_cp
, *new_cp
, *cp
;
5572 struct iv_cand
*cnd
;
5576 for (i
= 0; i
< n_iv_uses (data
); i
++)
5578 use
= iv_use (data
, i
);
5580 old_cp
= iv_ca_cand_for_use (ivs
, use
);
5581 if (old_cp
->cand
!= cand
)
5586 if (data
->consider_all_candidates
)
5588 EXECUTE_IF_SET_IN_BITMAP (ivs
->cands
, 0, ci
, bi
)
5593 cnd
= iv_cand (data
, ci
);
5595 cp
= get_use_iv_cost (data
, use
, cnd
);
5599 if (!iv_ca_has_deps (ivs
, cp
))
5602 if (!cheaper_cost_pair (cp
, new_cp
))
5610 EXECUTE_IF_AND_IN_BITMAP (use
->related_cands
, ivs
->cands
, 0, ci
, bi
)
5615 cnd
= iv_cand (data
, ci
);
5617 cp
= get_use_iv_cost (data
, use
, cnd
);
5620 if (!iv_ca_has_deps (ivs
, cp
))
5623 if (!cheaper_cost_pair (cp
, new_cp
))
5632 iv_ca_delta_free (delta
);
5633 return infinite_cost
;
5636 *delta
= iv_ca_delta_add (use
, old_cp
, new_cp
, *delta
);
5639 iv_ca_delta_commit (data
, ivs
, *delta
, true);
5640 cost
= iv_ca_cost (ivs
);
5641 iv_ca_delta_commit (data
, ivs
, *delta
, false);
5646 /* Try optimizing the set of candidates IVS by removing candidates different
5647 from to EXCEPT_CAND from it. Return cost of the new set, and store
5648 differences in DELTA. */
5651 iv_ca_prune (struct ivopts_data
*data
, struct iv_ca
*ivs
,
5652 struct iv_cand
*except_cand
, struct iv_ca_delta
**delta
)
5655 struct iv_ca_delta
*act_delta
, *best_delta
;
5657 comp_cost best_cost
, acost
;
5658 struct iv_cand
*cand
;
5661 best_cost
= iv_ca_cost (ivs
);
5663 EXECUTE_IF_SET_IN_BITMAP (ivs
->cands
, 0, i
, bi
)
5665 cand
= iv_cand (data
, i
);
5667 if (cand
== except_cand
)
5670 acost
= iv_ca_narrow (data
, ivs
, cand
, &act_delta
);
5672 if (compare_costs (acost
, best_cost
) < 0)
5675 iv_ca_delta_free (&best_delta
);
5676 best_delta
= act_delta
;
5679 iv_ca_delta_free (&act_delta
);
5688 /* Recurse to possibly remove other unnecessary ivs. */
5689 iv_ca_delta_commit (data
, ivs
, best_delta
, true);
5690 best_cost
= iv_ca_prune (data
, ivs
, except_cand
, delta
);
5691 iv_ca_delta_commit (data
, ivs
, best_delta
, false);
5692 *delta
= iv_ca_delta_join (best_delta
, *delta
);
5696 /* Tries to extend the sets IVS in the best possible way in order
5697 to express the USE. If ORIGINALP is true, prefer candidates from
5698 the original set of IVs, otherwise favor important candidates not
5699 based on any memory object. */
5702 try_add_cand_for (struct ivopts_data
*data
, struct iv_ca
*ivs
,
5703 struct iv_use
*use
, bool originalp
)
5705 comp_cost best_cost
, act_cost
;
5708 struct iv_cand
*cand
;
5709 struct iv_ca_delta
*best_delta
= NULL
, *act_delta
;
5710 struct cost_pair
*cp
;
5712 iv_ca_add_use (data
, ivs
, use
, false);
5713 best_cost
= iv_ca_cost (ivs
);
5715 cp
= iv_ca_cand_for_use (ivs
, use
);
5720 iv_ca_add_use (data
, ivs
, use
, true);
5721 best_cost
= iv_ca_cost (ivs
);
5722 cp
= iv_ca_cand_for_use (ivs
, use
);
5726 best_delta
= iv_ca_delta_add (use
, NULL
, cp
, NULL
);
5727 iv_ca_set_no_cp (data
, ivs
, use
);
5730 /* If ORIGINALP is true, try to find the original IV for the use. Otherwise
5731 first try important candidates not based on any memory object. Only if
5732 this fails, try the specific ones. Rationale -- in loops with many
5733 variables the best choice often is to use just one generic biv. If we
5734 added here many ivs specific to the uses, the optimization algorithm later
5735 would be likely to get stuck in a local minimum, thus causing us to create
5736 too many ivs. The approach from few ivs to more seems more likely to be
5737 successful -- starting from few ivs, replacing an expensive use by a
5738 specific iv should always be a win. */
5739 EXECUTE_IF_SET_IN_BITMAP (data
->important_candidates
, 0, i
, bi
)
5741 cand
= iv_cand (data
, i
);
5743 if (originalp
&& cand
->pos
!=IP_ORIGINAL
)
5746 if (!originalp
&& cand
->iv
->base_object
!= NULL_TREE
)
5749 if (iv_ca_cand_used_p (ivs
, cand
))
5752 cp
= get_use_iv_cost (data
, use
, cand
);
5756 iv_ca_set_cp (data
, ivs
, use
, cp
);
5757 act_cost
= iv_ca_extend (data
, ivs
, cand
, &act_delta
, NULL
,
5759 iv_ca_set_no_cp (data
, ivs
, use
);
5760 act_delta
= iv_ca_delta_add (use
, NULL
, cp
, act_delta
);
5762 if (compare_costs (act_cost
, best_cost
) < 0)
5764 best_cost
= act_cost
;
5766 iv_ca_delta_free (&best_delta
);
5767 best_delta
= act_delta
;
5770 iv_ca_delta_free (&act_delta
);
5773 if (infinite_cost_p (best_cost
))
5775 for (i
= 0; i
< use
->n_map_members
; i
++)
5777 cp
= use
->cost_map
+ i
;
5782 /* Already tried this. */
5783 if (cand
->important
)
5785 if (originalp
&& cand
->pos
== IP_ORIGINAL
)
5787 if (!originalp
&& cand
->iv
->base_object
== NULL_TREE
)
5791 if (iv_ca_cand_used_p (ivs
, cand
))
5795 iv_ca_set_cp (data
, ivs
, use
, cp
);
5796 act_cost
= iv_ca_extend (data
, ivs
, cand
, &act_delta
, NULL
, true);
5797 iv_ca_set_no_cp (data
, ivs
, use
);
5798 act_delta
= iv_ca_delta_add (use
, iv_ca_cand_for_use (ivs
, use
),
5801 if (compare_costs (act_cost
, best_cost
) < 0)
5803 best_cost
= act_cost
;
5806 iv_ca_delta_free (&best_delta
);
5807 best_delta
= act_delta
;
5810 iv_ca_delta_free (&act_delta
);
5814 iv_ca_delta_commit (data
, ivs
, best_delta
, true);
5815 iv_ca_delta_free (&best_delta
);
5817 return !infinite_cost_p (best_cost
);
5820 /* Finds an initial assignment of candidates to uses. */
5822 static struct iv_ca
*
5823 get_initial_solution (struct ivopts_data
*data
, bool originalp
)
5825 struct iv_ca
*ivs
= iv_ca_new (data
);
5828 for (i
= 0; i
< n_iv_uses (data
); i
++)
5829 if (!try_add_cand_for (data
, ivs
, iv_use (data
, i
), originalp
))
5838 /* Tries to improve set of induction variables IVS. */
5841 try_improve_iv_set (struct ivopts_data
*data
, struct iv_ca
*ivs
)
5844 comp_cost acost
, best_cost
= iv_ca_cost (ivs
);
5845 struct iv_ca_delta
*best_delta
= NULL
, *act_delta
, *tmp_delta
;
5846 struct iv_cand
*cand
;
5848 /* Try extending the set of induction variables by one. */
5849 for (i
= 0; i
< n_iv_cands (data
); i
++)
5851 cand
= iv_cand (data
, i
);
5853 if (iv_ca_cand_used_p (ivs
, cand
))
5856 acost
= iv_ca_extend (data
, ivs
, cand
, &act_delta
, &n_ivs
, false);
5860 /* If we successfully added the candidate and the set is small enough,
5861 try optimizing it by removing other candidates. */
5862 if (n_ivs
<= ALWAYS_PRUNE_CAND_SET_BOUND
)
5864 iv_ca_delta_commit (data
, ivs
, act_delta
, true);
5865 acost
= iv_ca_prune (data
, ivs
, cand
, &tmp_delta
);
5866 iv_ca_delta_commit (data
, ivs
, act_delta
, false);
5867 act_delta
= iv_ca_delta_join (act_delta
, tmp_delta
);
5870 if (compare_costs (acost
, best_cost
) < 0)
5873 iv_ca_delta_free (&best_delta
);
5874 best_delta
= act_delta
;
5877 iv_ca_delta_free (&act_delta
);
5882 /* Try removing the candidates from the set instead. */
5883 best_cost
= iv_ca_prune (data
, ivs
, NULL
, &best_delta
);
5885 /* Nothing more we can do. */
5890 iv_ca_delta_commit (data
, ivs
, best_delta
, true);
5891 gcc_assert (compare_costs (best_cost
, iv_ca_cost (ivs
)) == 0);
5892 iv_ca_delta_free (&best_delta
);
5896 /* Attempts to find the optimal set of induction variables. We do simple
5897 greedy heuristic -- we try to replace at most one candidate in the selected
5898 solution and remove the unused ivs while this improves the cost. */
5900 static struct iv_ca
*
5901 find_optimal_iv_set_1 (struct ivopts_data
*data
, bool originalp
)
5905 /* Get the initial solution. */
5906 set
= get_initial_solution (data
, originalp
);
5909 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5910 fprintf (dump_file
, "Unable to substitute for ivs, failed.\n");
5914 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5916 fprintf (dump_file
, "Initial set of candidates:\n");
5917 iv_ca_dump (data
, dump_file
, set
);
5920 while (try_improve_iv_set (data
, set
))
5922 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5924 fprintf (dump_file
, "Improved to:\n");
5925 iv_ca_dump (data
, dump_file
, set
);
5932 static struct iv_ca
*
5933 find_optimal_iv_set (struct ivopts_data
*data
)
5936 struct iv_ca
*set
, *origset
;
5938 comp_cost cost
, origcost
;
5940 /* Determine the cost based on a strategy that starts with original IVs,
5941 and try again using a strategy that prefers candidates not based
5943 origset
= find_optimal_iv_set_1 (data
, true);
5944 set
= find_optimal_iv_set_1 (data
, false);
5946 if (!origset
&& !set
)
5949 origcost
= origset
? iv_ca_cost (origset
) : infinite_cost
;
5950 cost
= set
? iv_ca_cost (set
) : infinite_cost
;
5952 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5954 fprintf (dump_file
, "Original cost %d (complexity %d)\n\n",
5955 origcost
.cost
, origcost
.complexity
);
5956 fprintf (dump_file
, "Final cost %d (complexity %d)\n\n",
5957 cost
.cost
, cost
.complexity
);
5960 /* Choose the one with the best cost. */
5961 if (compare_costs (origcost
, cost
) <= 0)
5968 iv_ca_free (&origset
);
5970 for (i
= 0; i
< n_iv_uses (data
); i
++)
5972 use
= iv_use (data
, i
);
5973 use
->selected
= iv_ca_cand_for_use (set
, use
)->cand
;
5979 /* Creates a new induction variable corresponding to CAND. */
5982 create_new_iv (struct ivopts_data
*data
, struct iv_cand
*cand
)
5984 gimple_stmt_iterator incr_pos
;
5994 incr_pos
= gsi_last_bb (ip_normal_pos (data
->current_loop
));
5998 incr_pos
= gsi_last_bb (ip_end_pos (data
->current_loop
));
6006 incr_pos
= gsi_for_stmt (cand
->incremented_at
);
6010 /* Mark that the iv is preserved. */
6011 name_info (data
, cand
->var_before
)->preserve_biv
= true;
6012 name_info (data
, cand
->var_after
)->preserve_biv
= true;
6014 /* Rewrite the increment so that it uses var_before directly. */
6015 find_interesting_uses_op (data
, cand
->var_after
)->selected
= cand
;
6019 gimple_add_tmp_var (cand
->var_before
);
6020 add_referenced_var (cand
->var_before
);
6022 base
= unshare_expr (cand
->iv
->base
);
6024 create_iv (base
, unshare_expr (cand
->iv
->step
),
6025 cand
->var_before
, data
->current_loop
,
6026 &incr_pos
, after
, &cand
->var_before
, &cand
->var_after
);
6029 /* Creates new induction variables described in SET. */
6032 create_new_ivs (struct ivopts_data
*data
, struct iv_ca
*set
)
6035 struct iv_cand
*cand
;
6038 EXECUTE_IF_SET_IN_BITMAP (set
->cands
, 0, i
, bi
)
6040 cand
= iv_cand (data
, i
);
6041 create_new_iv (data
, cand
);
6044 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
6046 fprintf (dump_file
, "\nSelected IV set: \n");
6047 EXECUTE_IF_SET_IN_BITMAP (set
->cands
, 0, i
, bi
)
6049 cand
= iv_cand (data
, i
);
6050 dump_cand (dump_file
, cand
);
6052 fprintf (dump_file
, "\n");
6056 /* Rewrites USE (definition of iv used in a nonlinear expression)
6057 using candidate CAND. */
6060 rewrite_use_nonlinear_expr (struct ivopts_data
*data
,
6061 struct iv_use
*use
, struct iv_cand
*cand
)
6066 gimple_stmt_iterator bsi
;
6068 /* An important special case -- if we are asked to express value of
6069 the original iv by itself, just exit; there is no need to
6070 introduce a new computation (that might also need casting the
6071 variable to unsigned and back). */
6072 if (cand
->pos
== IP_ORIGINAL
6073 && cand
->incremented_at
== use
->stmt
)
6075 tree step
, ctype
, utype
;
6076 enum tree_code incr_code
= PLUS_EXPR
, old_code
;
6078 gcc_assert (is_gimple_assign (use
->stmt
));
6079 gcc_assert (gimple_assign_lhs (use
->stmt
) == cand
->var_after
);
6081 step
= cand
->iv
->step
;
6082 ctype
= TREE_TYPE (step
);
6083 utype
= TREE_TYPE (cand
->var_after
);
6084 if (TREE_CODE (step
) == NEGATE_EXPR
)
6086 incr_code
= MINUS_EXPR
;
6087 step
= TREE_OPERAND (step
, 0);
6090 /* Check whether we may leave the computation unchanged.
6091 This is the case only if it does not rely on other
6092 computations in the loop -- otherwise, the computation
6093 we rely upon may be removed in remove_unused_ivs,
6094 thus leading to ICE. */
6095 old_code
= gimple_assign_rhs_code (use
->stmt
);
6096 if (old_code
== PLUS_EXPR
6097 || old_code
== MINUS_EXPR
6098 || old_code
== POINTER_PLUS_EXPR
)
6100 if (gimple_assign_rhs1 (use
->stmt
) == cand
->var_before
)
6101 op
= gimple_assign_rhs2 (use
->stmt
);
6102 else if (old_code
!= MINUS_EXPR
6103 && gimple_assign_rhs2 (use
->stmt
) == cand
->var_before
)
6104 op
= gimple_assign_rhs1 (use
->stmt
);
6112 && (TREE_CODE (op
) == INTEGER_CST
6113 || operand_equal_p (op
, step
, 0)))
6116 /* Otherwise, add the necessary computations to express
6118 op
= fold_convert (ctype
, cand
->var_before
);
6119 comp
= fold_convert (utype
,
6120 build2 (incr_code
, ctype
, op
,
6121 unshare_expr (step
)));
6125 comp
= get_computation (data
->current_loop
, use
, cand
);
6126 gcc_assert (comp
!= NULL_TREE
);
6129 switch (gimple_code (use
->stmt
))
6132 tgt
= PHI_RESULT (use
->stmt
);
6134 /* If we should keep the biv, do not replace it. */
6135 if (name_info (data
, tgt
)->preserve_biv
)
6138 bsi
= gsi_after_labels (gimple_bb (use
->stmt
));
6142 tgt
= gimple_assign_lhs (use
->stmt
);
6143 bsi
= gsi_for_stmt (use
->stmt
);
6150 if (!valid_gimple_rhs_p (comp
)
6151 || (gimple_code (use
->stmt
) != GIMPLE_PHI
6152 /* We can't allow re-allocating the stmt as it might be pointed
6154 && (get_gimple_rhs_num_ops (TREE_CODE (comp
))
6155 >= gimple_num_ops (gsi_stmt (bsi
)))))
6157 comp
= force_gimple_operand_gsi (&bsi
, comp
, true, NULL_TREE
,
6158 true, GSI_SAME_STMT
);
6159 if (POINTER_TYPE_P (TREE_TYPE (tgt
)))
6161 duplicate_ssa_name_ptr_info (comp
, SSA_NAME_PTR_INFO (tgt
));
6162 /* As this isn't a plain copy we have to reset alignment
6164 if (SSA_NAME_PTR_INFO (comp
))
6165 mark_ptr_info_alignment_unknown (SSA_NAME_PTR_INFO (comp
));
6169 if (gimple_code (use
->stmt
) == GIMPLE_PHI
)
6171 ass
= gimple_build_assign (tgt
, comp
);
6172 gsi_insert_before (&bsi
, ass
, GSI_SAME_STMT
);
6174 bsi
= gsi_for_stmt (use
->stmt
);
6175 remove_phi_node (&bsi
, false);
6179 gimple_assign_set_rhs_from_tree (&bsi
, comp
);
6180 use
->stmt
= gsi_stmt (bsi
);
6184 /* Performs a peephole optimization to reorder the iv update statement with
6185 a mem ref to enable instruction combining in later phases. The mem ref uses
6186 the iv value before the update, so the reordering transformation requires
6187 adjustment of the offset. CAND is the selected IV_CAND.
6191 t = MEM_REF (base, iv1, 8, 16); // base, index, stride, offset
6199 directly propagating t over to (1) will introduce overlapping live range
6200 thus increase register pressure. This peephole transform it into:
6204 t = MEM_REF (base, iv2, 8, 8);
6211 adjust_iv_update_pos (struct iv_cand
*cand
, struct iv_use
*use
)
6214 gimple iv_update
, stmt
;
6216 gimple_stmt_iterator gsi
, gsi_iv
;
6218 if (cand
->pos
!= IP_NORMAL
)
6221 var_after
= cand
->var_after
;
6222 iv_update
= SSA_NAME_DEF_STMT (var_after
);
6224 bb
= gimple_bb (iv_update
);
6225 gsi
= gsi_last_nondebug_bb (bb
);
6226 stmt
= gsi_stmt (gsi
);
6228 /* Only handle conditional statement for now. */
6229 if (gimple_code (stmt
) != GIMPLE_COND
)
6232 gsi_prev_nondebug (&gsi
);
6233 stmt
= gsi_stmt (gsi
);
6234 if (stmt
!= iv_update
)
6237 gsi_prev_nondebug (&gsi
);
6238 if (gsi_end_p (gsi
))
6241 stmt
= gsi_stmt (gsi
);
6242 if (gimple_code (stmt
) != GIMPLE_ASSIGN
)
6245 if (stmt
!= use
->stmt
)
6248 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
6251 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
6253 fprintf (dump_file
, "Reordering \n");
6254 print_gimple_stmt (dump_file
, iv_update
, 0, 0);
6255 print_gimple_stmt (dump_file
, use
->stmt
, 0, 0);
6256 fprintf (dump_file
, "\n");
6259 gsi
= gsi_for_stmt (use
->stmt
);
6260 gsi_iv
= gsi_for_stmt (iv_update
);
6261 gsi_move_before (&gsi_iv
, &gsi
);
6263 cand
->pos
= IP_BEFORE_USE
;
6264 cand
->incremented_at
= use
->stmt
;
6267 /* Rewrites USE (address that is an iv) using candidate CAND. */
6270 rewrite_use_address (struct ivopts_data
*data
,
6271 struct iv_use
*use
, struct iv_cand
*cand
)
6274 gimple_stmt_iterator bsi
= gsi_for_stmt (use
->stmt
);
6275 tree base_hint
= NULL_TREE
;
6279 adjust_iv_update_pos (cand
, use
);
6280 ok
= get_computation_aff (data
->current_loop
, use
, cand
, use
->stmt
, &aff
);
6282 unshare_aff_combination (&aff
);
6284 /* To avoid undefined overflow problems, all IV candidates use unsigned
6285 integer types. The drawback is that this makes it impossible for
6286 create_mem_ref to distinguish an IV that is based on a memory object
6287 from one that represents simply an offset.
6289 To work around this problem, we pass a hint to create_mem_ref that
6290 indicates which variable (if any) in aff is an IV based on a memory
6291 object. Note that we only consider the candidate. If this is not
6292 based on an object, the base of the reference is in some subexpression
6293 of the use -- but these will use pointer types, so they are recognized
6294 by the create_mem_ref heuristics anyway. */
6295 if (cand
->iv
->base_object
)
6296 base_hint
= var_at_stmt (data
->current_loop
, cand
, use
->stmt
);
6298 iv
= var_at_stmt (data
->current_loop
, cand
, use
->stmt
);
6299 ref
= create_mem_ref (&bsi
, TREE_TYPE (*use
->op_p
), &aff
,
6300 reference_alias_ptr_type (*use
->op_p
),
6301 iv
, base_hint
, data
->speed
);
6302 copy_ref_info (ref
, *use
->op_p
);
6306 /* Rewrites USE (the condition such that one of the arguments is an iv) using
6310 rewrite_use_compare (struct ivopts_data
*data
,
6311 struct iv_use
*use
, struct iv_cand
*cand
)
6313 tree comp
, *var_p
, op
, bound
;
6314 gimple_stmt_iterator bsi
= gsi_for_stmt (use
->stmt
);
6315 enum tree_code compare
;
6316 struct cost_pair
*cp
= get_use_iv_cost (data
, use
, cand
);
6322 tree var
= var_at_stmt (data
->current_loop
, cand
, use
->stmt
);
6323 tree var_type
= TREE_TYPE (var
);
6326 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
6328 fprintf (dump_file
, "Replacing exit test: ");
6329 print_gimple_stmt (dump_file
, use
->stmt
, 0, TDF_SLIM
);
6332 bound
= unshare_expr (fold_convert (var_type
, bound
));
6333 op
= force_gimple_operand (bound
, &stmts
, true, NULL_TREE
);
6335 gsi_insert_seq_on_edge_immediate (
6336 loop_preheader_edge (data
->current_loop
),
6339 gimple_cond_set_lhs (use
->stmt
, var
);
6340 gimple_cond_set_code (use
->stmt
, compare
);
6341 gimple_cond_set_rhs (use
->stmt
, op
);
6345 /* The induction variable elimination failed; just express the original
6347 comp
= get_computation (data
->current_loop
, use
, cand
);
6348 gcc_assert (comp
!= NULL_TREE
);
6350 ok
= extract_cond_operands (data
, use
->stmt
, &var_p
, NULL
, NULL
, NULL
);
6353 *var_p
= force_gimple_operand_gsi (&bsi
, comp
, true, SSA_NAME_VAR (*var_p
),
6354 true, GSI_SAME_STMT
);
6357 /* Rewrites USE using candidate CAND. */
6360 rewrite_use (struct ivopts_data
*data
, struct iv_use
*use
, struct iv_cand
*cand
)
6364 case USE_NONLINEAR_EXPR
:
6365 rewrite_use_nonlinear_expr (data
, use
, cand
);
6369 rewrite_use_address (data
, use
, cand
);
6373 rewrite_use_compare (data
, use
, cand
);
6380 update_stmt (use
->stmt
);
6383 /* Rewrite the uses using the selected induction variables. */
6386 rewrite_uses (struct ivopts_data
*data
)
6389 struct iv_cand
*cand
;
6392 for (i
= 0; i
< n_iv_uses (data
); i
++)
6394 use
= iv_use (data
, i
);
6395 cand
= use
->selected
;
6398 rewrite_use (data
, use
, cand
);
6402 /* Removes the ivs that are not used after rewriting. */
6405 remove_unused_ivs (struct ivopts_data
*data
)
6409 bitmap toremove
= BITMAP_ALLOC (NULL
);
6411 /* Figure out an order in which to release SSA DEFs so that we don't
6412 release something that we'd have to propagate into a debug stmt
6414 EXECUTE_IF_SET_IN_BITMAP (data
->relevant
, 0, j
, bi
)
6416 struct version_info
*info
;
6418 info
= ver_info (data
, j
);
6420 && !integer_zerop (info
->iv
->step
)
6422 && !info
->iv
->have_use_for
6423 && !info
->preserve_biv
)
6424 bitmap_set_bit (toremove
, SSA_NAME_VERSION (info
->iv
->ssa_name
));
6427 release_defs_bitset (toremove
);
6429 BITMAP_FREE (toremove
);
6432 /* Frees memory occupied by struct tree_niter_desc in *VALUE. Callback
6433 for pointer_map_traverse. */
6436 free_tree_niter_desc (const void *key ATTRIBUTE_UNUSED
, void **value
,
6437 void *data ATTRIBUTE_UNUSED
)
6439 struct tree_niter_desc
*const niter
= (struct tree_niter_desc
*) *value
;
6445 /* Frees data allocated by the optimization of a single loop. */
6448 free_loop_data (struct ivopts_data
*data
)
6456 pointer_map_traverse (data
->niters
, free_tree_niter_desc
, NULL
);
6457 pointer_map_destroy (data
->niters
);
6458 data
->niters
= NULL
;
6461 EXECUTE_IF_SET_IN_BITMAP (data
->relevant
, 0, i
, bi
)
6463 struct version_info
*info
;
6465 info
= ver_info (data
, i
);
6468 info
->has_nonlin_use
= false;
6469 info
->preserve_biv
= false;
6472 bitmap_clear (data
->relevant
);
6473 bitmap_clear (data
->important_candidates
);
6475 for (i
= 0; i
< n_iv_uses (data
); i
++)
6477 struct iv_use
*use
= iv_use (data
, i
);
6480 BITMAP_FREE (use
->related_cands
);
6481 for (j
= 0; j
< use
->n_map_members
; j
++)
6482 if (use
->cost_map
[j
].depends_on
)
6483 BITMAP_FREE (use
->cost_map
[j
].depends_on
);
6484 free (use
->cost_map
);
6487 VEC_truncate (iv_use_p
, data
->iv_uses
, 0);
6489 for (i
= 0; i
< n_iv_cands (data
); i
++)
6491 struct iv_cand
*cand
= iv_cand (data
, i
);
6494 if (cand
->depends_on
)
6495 BITMAP_FREE (cand
->depends_on
);
6498 VEC_truncate (iv_cand_p
, data
->iv_candidates
, 0);
6500 if (data
->version_info_size
< num_ssa_names
)
6502 data
->version_info_size
= 2 * num_ssa_names
;
6503 free (data
->version_info
);
6504 data
->version_info
= XCNEWVEC (struct version_info
, data
->version_info_size
);
6507 data
->max_inv_id
= 0;
6509 FOR_EACH_VEC_ELT (tree
, decl_rtl_to_reset
, i
, obj
)
6510 SET_DECL_RTL (obj
, NULL_RTX
);
6512 VEC_truncate (tree
, decl_rtl_to_reset
, 0);
6514 htab_empty (data
->inv_expr_tab
);
6515 data
->inv_expr_id
= 0;
6518 /* Finalizes data structures used by the iv optimization pass. LOOPS is the
6522 tree_ssa_iv_optimize_finalize (struct ivopts_data
*data
)
6524 free_loop_data (data
);
6525 free (data
->version_info
);
6526 BITMAP_FREE (data
->relevant
);
6527 BITMAP_FREE (data
->important_candidates
);
6529 VEC_free (tree
, heap
, decl_rtl_to_reset
);
6530 VEC_free (iv_use_p
, heap
, data
->iv_uses
);
6531 VEC_free (iv_cand_p
, heap
, data
->iv_candidates
);
6532 htab_delete (data
->inv_expr_tab
);
6535 /* Returns true if the loop body BODY includes any function calls. */
6538 loop_body_includes_call (basic_block
*body
, unsigned num_nodes
)
6540 gimple_stmt_iterator gsi
;
6543 for (i
= 0; i
< num_nodes
; i
++)
6544 for (gsi
= gsi_start_bb (body
[i
]); !gsi_end_p (gsi
); gsi_next (&gsi
))
6546 gimple stmt
= gsi_stmt (gsi
);
6547 if (is_gimple_call (stmt
)
6548 && !is_inexpensive_builtin (gimple_call_fndecl (stmt
)))
6554 /* Optimizes the LOOP. Returns true if anything changed. */
6557 tree_ssa_iv_optimize_loop (struct ivopts_data
*data
, struct loop
*loop
)
6559 bool changed
= false;
6560 struct iv_ca
*iv_ca
;
6561 edge exit
= single_dom_exit (loop
);
6564 gcc_assert (!data
->niters
);
6565 data
->current_loop
= loop
;
6566 data
->speed
= optimize_loop_for_speed_p (loop
);
6568 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
6570 fprintf (dump_file
, "Processing loop %d\n", loop
->num
);
6574 fprintf (dump_file
, " single exit %d -> %d, exit condition ",
6575 exit
->src
->index
, exit
->dest
->index
);
6576 print_gimple_stmt (dump_file
, last_stmt (exit
->src
), 0, TDF_SLIM
);
6577 fprintf (dump_file
, "\n");
6580 fprintf (dump_file
, "\n");
6583 body
= get_loop_body (loop
);
6584 data
->body_includes_call
= loop_body_includes_call (body
, loop
->num_nodes
);
6585 renumber_gimple_stmt_uids_in_blocks (body
, loop
->num_nodes
);
6588 data
->loop_single_exit_p
= exit
!= NULL
&& loop_only_exit_p (loop
, exit
);
6590 /* For each ssa name determines whether it behaves as an induction variable
6592 if (!find_induction_variables (data
))
6595 /* Finds interesting uses (item 1). */
6596 find_interesting_uses (data
);
6597 if (n_iv_uses (data
) > MAX_CONSIDERED_USES
)
6600 /* Finds candidates for the induction variables (item 2). */
6601 find_iv_candidates (data
);
6603 /* Calculates the costs (item 3, part 1). */
6604 determine_iv_costs (data
);
6605 determine_use_iv_costs (data
);
6606 determine_set_costs (data
);
6608 /* Find the optimal set of induction variables (item 3, part 2). */
6609 iv_ca
= find_optimal_iv_set (data
);
6614 /* Create the new induction variables (item 4, part 1). */
6615 create_new_ivs (data
, iv_ca
);
6616 iv_ca_free (&iv_ca
);
6618 /* Rewrite the uses (item 4, part 2). */
6619 rewrite_uses (data
);
6621 /* Remove the ivs that are unused after rewriting. */
6622 remove_unused_ivs (data
);
6624 /* We have changed the structure of induction variables; it might happen
6625 that definitions in the scev database refer to some of them that were
6630 free_loop_data (data
);
6635 /* Main entry point. Optimizes induction variables in loops. */
6638 tree_ssa_iv_optimize (void)
6641 struct ivopts_data data
;
6644 tree_ssa_iv_optimize_init (&data
);
6646 /* Optimize the loops starting with the innermost ones. */
6647 FOR_EACH_LOOP (li
, loop
, LI_FROM_INNERMOST
)
6649 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
6650 flow_loop_dump (loop
, dump_file
, NULL
, 1);
6652 tree_ssa_iv_optimize_loop (&data
, loop
);
6655 tree_ssa_iv_optimize_finalize (&data
);