1 /* SSA Dominator optimizations for trees
2 Copyright (C) 2001-2013 Free Software Foundation, Inc.
3 Contributed by Diego Novillo <dnovillo@redhat.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
24 #include "hash-table.h"
29 #include "basic-block.h"
32 #include "gimple-pretty-print.h"
35 #include "tree-pass.h"
36 #include "tree-ssa-propagate.h"
37 #include "tree-ssa-threadupdate.h"
38 #include "langhooks.h"
41 /* This file implements optimizations on the dominator tree. */
43 /* Representation of a "naked" right-hand-side expression, to be used
44 in recording available expressions in the expression hash table. */
61 struct { tree rhs
; } single
;
62 struct { enum tree_code op
; tree opnd
; } unary
;
63 struct { enum tree_code op
; tree opnd0
, opnd1
; } binary
;
64 struct { enum tree_code op
; tree opnd0
, opnd1
, opnd2
; } ternary
;
65 struct { gimple fn_from
; bool pure
; size_t nargs
; tree
*args
; } call
;
66 struct { size_t nargs
; tree
*args
; } phi
;
70 /* Structure for recording known values of a conditional expression
71 at the exits from its block. */
73 typedef struct cond_equivalence_s
75 struct hashable_expr cond
;
80 /* Structure for recording edge equivalences as well as any pending
81 edge redirections during the dominator optimizer.
83 Computing and storing the edge equivalences instead of creating
84 them on-demand can save significant amounts of time, particularly
85 for pathological cases involving switch statements.
87 These structures live for a single iteration of the dominator
88 optimizer in the edge's AUX field. At the end of an iteration we
89 free each of these structures and update the AUX field to point
90 to any requested redirection target (the code for updating the
91 CFG and SSA graph for edge redirection expects redirection edge
92 targets to be in the AUX field for each edge. */
96 /* If this edge creates a simple equivalence, the LHS and RHS of
97 the equivalence will be stored here. */
101 /* Traversing an edge may also indicate one or more particular conditions
102 are true or false. */
103 vec
<cond_equivalence
> cond_equivalences
;
106 /* Stack of available expressions in AVAIL_EXPRs. Each block pushes any
107 expressions it enters into the hash table along with a marker entry
108 (null). When we finish processing the block, we pop off entries and
109 remove the expressions from the global hash table until we hit the
111 typedef struct expr_hash_elt
* expr_hash_elt_t
;
113 static vec
<expr_hash_elt_t
> avail_exprs_stack
;
115 /* Structure for entries in the expression hash table. */
119 /* The value (lhs) of this expression. */
122 /* The expression (rhs) we want to record. */
123 struct hashable_expr expr
;
125 /* The stmt pointer if this element corresponds to a statement. */
128 /* The hash value for RHS. */
131 /* A unique stamp, typically the address of the hash
132 element itself, used in removing entries from the table. */
133 struct expr_hash_elt
*stamp
;
136 /* Hashtable helpers. */
138 static bool hashable_expr_equal_p (const struct hashable_expr
*,
139 const struct hashable_expr
*);
140 static void free_expr_hash_elt (void *);
142 struct expr_elt_hasher
144 typedef expr_hash_elt value_type
;
145 typedef expr_hash_elt compare_type
;
146 static inline hashval_t
hash (const value_type
*);
147 static inline bool equal (const value_type
*, const compare_type
*);
148 static inline void remove (value_type
*);
152 expr_elt_hasher::hash (const value_type
*p
)
158 expr_elt_hasher::equal (const value_type
*p1
, const compare_type
*p2
)
160 gimple stmt1
= p1
->stmt
;
161 const struct hashable_expr
*expr1
= &p1
->expr
;
162 const struct expr_hash_elt
*stamp1
= p1
->stamp
;
163 gimple stmt2
= p2
->stmt
;
164 const struct hashable_expr
*expr2
= &p2
->expr
;
165 const struct expr_hash_elt
*stamp2
= p2
->stamp
;
167 /* This case should apply only when removing entries from the table. */
168 if (stamp1
== stamp2
)
172 We add stmts to a hash table and them modify them. To detect the case
173 that we modify a stmt and then search for it, we assume that the hash
174 is always modified by that change.
175 We have to fully check why this doesn't happen on trunk or rewrite
176 this in a more reliable (and easier to understand) way. */
177 if (((const struct expr_hash_elt
*)p1
)->hash
178 != ((const struct expr_hash_elt
*)p2
)->hash
)
181 /* In case of a collision, both RHS have to be identical and have the
182 same VUSE operands. */
183 if (hashable_expr_equal_p (expr1
, expr2
)
184 && types_compatible_p (expr1
->type
, expr2
->type
))
186 /* Note that STMT1 and/or STMT2 may be NULL. */
187 return ((stmt1
? gimple_vuse (stmt1
) : NULL_TREE
)
188 == (stmt2
? gimple_vuse (stmt2
) : NULL_TREE
));
194 /* Delete an expr_hash_elt and reclaim its storage. */
197 expr_elt_hasher::remove (value_type
*element
)
199 free_expr_hash_elt (element
);
202 /* Hash table with expressions made available during the renaming process.
203 When an assignment of the form X_i = EXPR is found, the statement is
204 stored in this table. If the same expression EXPR is later found on the
205 RHS of another statement, it is replaced with X_i (thus performing
206 global redundancy elimination). Similarly as we pass through conditionals
207 we record the conditional itself as having either a true or false value
209 static hash_table
<expr_elt_hasher
> avail_exprs
;
211 /* Stack of dest,src pairs that need to be restored during finalization.
213 A NULL entry is used to mark the end of pairs which need to be
214 restored during finalization of this block. */
215 static vec
<tree
> const_and_copies_stack
;
217 /* Track whether or not we have changed the control flow graph. */
218 static bool cfg_altered
;
220 /* Bitmap of blocks that have had EH statements cleaned. We should
221 remove their dead edges eventually. */
222 static bitmap need_eh_cleanup
;
224 /* Statistics for dominator optimizations. */
228 long num_exprs_considered
;
234 static struct opt_stats_d opt_stats
;
236 /* Local functions. */
237 static void optimize_stmt (basic_block
, gimple_stmt_iterator
);
238 static tree
lookup_avail_expr (gimple
, bool);
239 static hashval_t
avail_expr_hash (const void *);
240 static void htab_statistics (FILE *, hash_table
<expr_elt_hasher
>);
241 static void record_cond (cond_equivalence
*);
242 static void record_const_or_copy (tree
, tree
);
243 static void record_equality (tree
, tree
);
244 static void record_equivalences_from_phis (basic_block
);
245 static void record_equivalences_from_incoming_edge (basic_block
);
246 static void eliminate_redundant_computations (gimple_stmt_iterator
*);
247 static void record_equivalences_from_stmt (gimple
, int);
248 static void remove_local_expressions_from_table (void);
249 static void restore_vars_to_original_value (void);
250 static edge
single_incoming_edge_ignoring_loop_edges (basic_block
);
253 /* Given a statement STMT, initialize the hash table element pointed to
257 initialize_hash_element (gimple stmt
, tree lhs
,
258 struct expr_hash_elt
*element
)
260 enum gimple_code code
= gimple_code (stmt
);
261 struct hashable_expr
*expr
= &element
->expr
;
263 if (code
== GIMPLE_ASSIGN
)
265 enum tree_code subcode
= gimple_assign_rhs_code (stmt
);
267 switch (get_gimple_rhs_class (subcode
))
269 case GIMPLE_SINGLE_RHS
:
270 expr
->kind
= EXPR_SINGLE
;
271 expr
->type
= TREE_TYPE (gimple_assign_rhs1 (stmt
));
272 expr
->ops
.single
.rhs
= gimple_assign_rhs1 (stmt
);
274 case GIMPLE_UNARY_RHS
:
275 expr
->kind
= EXPR_UNARY
;
276 expr
->type
= TREE_TYPE (gimple_assign_lhs (stmt
));
277 expr
->ops
.unary
.op
= subcode
;
278 expr
->ops
.unary
.opnd
= gimple_assign_rhs1 (stmt
);
280 case GIMPLE_BINARY_RHS
:
281 expr
->kind
= EXPR_BINARY
;
282 expr
->type
= TREE_TYPE (gimple_assign_lhs (stmt
));
283 expr
->ops
.binary
.op
= subcode
;
284 expr
->ops
.binary
.opnd0
= gimple_assign_rhs1 (stmt
);
285 expr
->ops
.binary
.opnd1
= gimple_assign_rhs2 (stmt
);
287 case GIMPLE_TERNARY_RHS
:
288 expr
->kind
= EXPR_TERNARY
;
289 expr
->type
= TREE_TYPE (gimple_assign_lhs (stmt
));
290 expr
->ops
.ternary
.op
= subcode
;
291 expr
->ops
.ternary
.opnd0
= gimple_assign_rhs1 (stmt
);
292 expr
->ops
.ternary
.opnd1
= gimple_assign_rhs2 (stmt
);
293 expr
->ops
.ternary
.opnd2
= gimple_assign_rhs3 (stmt
);
299 else if (code
== GIMPLE_COND
)
301 expr
->type
= boolean_type_node
;
302 expr
->kind
= EXPR_BINARY
;
303 expr
->ops
.binary
.op
= gimple_cond_code (stmt
);
304 expr
->ops
.binary
.opnd0
= gimple_cond_lhs (stmt
);
305 expr
->ops
.binary
.opnd1
= gimple_cond_rhs (stmt
);
307 else if (code
== GIMPLE_CALL
)
309 size_t nargs
= gimple_call_num_args (stmt
);
312 gcc_assert (gimple_call_lhs (stmt
));
314 expr
->type
= TREE_TYPE (gimple_call_lhs (stmt
));
315 expr
->kind
= EXPR_CALL
;
316 expr
->ops
.call
.fn_from
= stmt
;
318 if (gimple_call_flags (stmt
) & (ECF_CONST
| ECF_PURE
))
319 expr
->ops
.call
.pure
= true;
321 expr
->ops
.call
.pure
= false;
323 expr
->ops
.call
.nargs
= nargs
;
324 expr
->ops
.call
.args
= XCNEWVEC (tree
, nargs
);
325 for (i
= 0; i
< nargs
; i
++)
326 expr
->ops
.call
.args
[i
] = gimple_call_arg (stmt
, i
);
328 else if (code
== GIMPLE_SWITCH
)
330 expr
->type
= TREE_TYPE (gimple_switch_index (stmt
));
331 expr
->kind
= EXPR_SINGLE
;
332 expr
->ops
.single
.rhs
= gimple_switch_index (stmt
);
334 else if (code
== GIMPLE_GOTO
)
336 expr
->type
= TREE_TYPE (gimple_goto_dest (stmt
));
337 expr
->kind
= EXPR_SINGLE
;
338 expr
->ops
.single
.rhs
= gimple_goto_dest (stmt
);
340 else if (code
== GIMPLE_PHI
)
342 size_t nargs
= gimple_phi_num_args (stmt
);
345 expr
->type
= TREE_TYPE (gimple_phi_result (stmt
));
346 expr
->kind
= EXPR_PHI
;
347 expr
->ops
.phi
.nargs
= nargs
;
348 expr
->ops
.phi
.args
= XCNEWVEC (tree
, nargs
);
350 for (i
= 0; i
< nargs
; i
++)
351 expr
->ops
.phi
.args
[i
] = gimple_phi_arg_def (stmt
, i
);
357 element
->stmt
= stmt
;
358 element
->hash
= avail_expr_hash (element
);
359 element
->stamp
= element
;
362 /* Given a conditional expression COND as a tree, initialize
363 a hashable_expr expression EXPR. The conditional must be a
364 comparison or logical negation. A constant or a variable is
368 initialize_expr_from_cond (tree cond
, struct hashable_expr
*expr
)
370 expr
->type
= boolean_type_node
;
372 if (COMPARISON_CLASS_P (cond
))
374 expr
->kind
= EXPR_BINARY
;
375 expr
->ops
.binary
.op
= TREE_CODE (cond
);
376 expr
->ops
.binary
.opnd0
= TREE_OPERAND (cond
, 0);
377 expr
->ops
.binary
.opnd1
= TREE_OPERAND (cond
, 1);
379 else if (TREE_CODE (cond
) == TRUTH_NOT_EXPR
)
381 expr
->kind
= EXPR_UNARY
;
382 expr
->ops
.unary
.op
= TRUTH_NOT_EXPR
;
383 expr
->ops
.unary
.opnd
= TREE_OPERAND (cond
, 0);
389 /* Given a hashable_expr expression EXPR and an LHS,
390 initialize the hash table element pointed to by ELEMENT. */
393 initialize_hash_element_from_expr (struct hashable_expr
*expr
,
395 struct expr_hash_elt
*element
)
397 element
->expr
= *expr
;
399 element
->stmt
= NULL
;
400 element
->hash
= avail_expr_hash (element
);
401 element
->stamp
= element
;
404 /* Compare two hashable_expr structures for equivalence.
405 They are considered equivalent when the the expressions
406 they denote must necessarily be equal. The logic is intended
407 to follow that of operand_equal_p in fold-const.c */
410 hashable_expr_equal_p (const struct hashable_expr
*expr0
,
411 const struct hashable_expr
*expr1
)
413 tree type0
= expr0
->type
;
414 tree type1
= expr1
->type
;
416 /* If either type is NULL, there is nothing to check. */
417 if ((type0
== NULL_TREE
) ^ (type1
== NULL_TREE
))
420 /* If both types don't have the same signedness, precision, and mode,
421 then we can't consider them equal. */
423 && (TREE_CODE (type0
) == ERROR_MARK
424 || TREE_CODE (type1
) == ERROR_MARK
425 || TYPE_UNSIGNED (type0
) != TYPE_UNSIGNED (type1
)
426 || TYPE_PRECISION (type0
) != TYPE_PRECISION (type1
)
427 || TYPE_MODE (type0
) != TYPE_MODE (type1
)))
430 if (expr0
->kind
!= expr1
->kind
)
436 return operand_equal_p (expr0
->ops
.single
.rhs
,
437 expr1
->ops
.single
.rhs
, 0);
440 if (expr0
->ops
.unary
.op
!= expr1
->ops
.unary
.op
)
443 if ((CONVERT_EXPR_CODE_P (expr0
->ops
.unary
.op
)
444 || expr0
->ops
.unary
.op
== NON_LVALUE_EXPR
)
445 && TYPE_UNSIGNED (expr0
->type
) != TYPE_UNSIGNED (expr1
->type
))
448 return operand_equal_p (expr0
->ops
.unary
.opnd
,
449 expr1
->ops
.unary
.opnd
, 0);
452 if (expr0
->ops
.binary
.op
!= expr1
->ops
.binary
.op
)
455 if (operand_equal_p (expr0
->ops
.binary
.opnd0
,
456 expr1
->ops
.binary
.opnd0
, 0)
457 && operand_equal_p (expr0
->ops
.binary
.opnd1
,
458 expr1
->ops
.binary
.opnd1
, 0))
461 /* For commutative ops, allow the other order. */
462 return (commutative_tree_code (expr0
->ops
.binary
.op
)
463 && operand_equal_p (expr0
->ops
.binary
.opnd0
,
464 expr1
->ops
.binary
.opnd1
, 0)
465 && operand_equal_p (expr0
->ops
.binary
.opnd1
,
466 expr1
->ops
.binary
.opnd0
, 0));
469 if (expr0
->ops
.ternary
.op
!= expr1
->ops
.ternary
.op
470 || !operand_equal_p (expr0
->ops
.ternary
.opnd2
,
471 expr1
->ops
.ternary
.opnd2
, 0))
474 if (operand_equal_p (expr0
->ops
.ternary
.opnd0
,
475 expr1
->ops
.ternary
.opnd0
, 0)
476 && operand_equal_p (expr0
->ops
.ternary
.opnd1
,
477 expr1
->ops
.ternary
.opnd1
, 0))
480 /* For commutative ops, allow the other order. */
481 return (commutative_ternary_tree_code (expr0
->ops
.ternary
.op
)
482 && operand_equal_p (expr0
->ops
.ternary
.opnd0
,
483 expr1
->ops
.ternary
.opnd1
, 0)
484 && operand_equal_p (expr0
->ops
.ternary
.opnd1
,
485 expr1
->ops
.ternary
.opnd0
, 0));
491 /* If the calls are to different functions, then they
492 clearly cannot be equal. */
493 if (!gimple_call_same_target_p (expr0
->ops
.call
.fn_from
,
494 expr1
->ops
.call
.fn_from
))
497 if (! expr0
->ops
.call
.pure
)
500 if (expr0
->ops
.call
.nargs
!= expr1
->ops
.call
.nargs
)
503 for (i
= 0; i
< expr0
->ops
.call
.nargs
; i
++)
504 if (! operand_equal_p (expr0
->ops
.call
.args
[i
],
505 expr1
->ops
.call
.args
[i
], 0))
515 if (expr0
->ops
.phi
.nargs
!= expr1
->ops
.phi
.nargs
)
518 for (i
= 0; i
< expr0
->ops
.phi
.nargs
; i
++)
519 if (! operand_equal_p (expr0
->ops
.phi
.args
[i
],
520 expr1
->ops
.phi
.args
[i
], 0))
531 /* Compute a hash value for a hashable_expr value EXPR and a
532 previously accumulated hash value VAL. If two hashable_expr
533 values compare equal with hashable_expr_equal_p, they must
534 hash to the same value, given an identical value of VAL.
535 The logic is intended to follow iterative_hash_expr in tree.c. */
538 iterative_hash_hashable_expr (const struct hashable_expr
*expr
, hashval_t val
)
543 val
= iterative_hash_expr (expr
->ops
.single
.rhs
, val
);
547 val
= iterative_hash_object (expr
->ops
.unary
.op
, val
);
549 /* Make sure to include signedness in the hash computation.
550 Don't hash the type, that can lead to having nodes which
551 compare equal according to operand_equal_p, but which
552 have different hash codes. */
553 if (CONVERT_EXPR_CODE_P (expr
->ops
.unary
.op
)
554 || expr
->ops
.unary
.op
== NON_LVALUE_EXPR
)
555 val
+= TYPE_UNSIGNED (expr
->type
);
557 val
= iterative_hash_expr (expr
->ops
.unary
.opnd
, val
);
561 val
= iterative_hash_object (expr
->ops
.binary
.op
, val
);
562 if (commutative_tree_code (expr
->ops
.binary
.op
))
563 val
= iterative_hash_exprs_commutative (expr
->ops
.binary
.opnd0
,
564 expr
->ops
.binary
.opnd1
, val
);
567 val
= iterative_hash_expr (expr
->ops
.binary
.opnd0
, val
);
568 val
= iterative_hash_expr (expr
->ops
.binary
.opnd1
, val
);
573 val
= iterative_hash_object (expr
->ops
.ternary
.op
, val
);
574 if (commutative_ternary_tree_code (expr
->ops
.ternary
.op
))
575 val
= iterative_hash_exprs_commutative (expr
->ops
.ternary
.opnd0
,
576 expr
->ops
.ternary
.opnd1
, val
);
579 val
= iterative_hash_expr (expr
->ops
.ternary
.opnd0
, val
);
580 val
= iterative_hash_expr (expr
->ops
.ternary
.opnd1
, val
);
582 val
= iterative_hash_expr (expr
->ops
.ternary
.opnd2
, val
);
588 enum tree_code code
= CALL_EXPR
;
591 val
= iterative_hash_object (code
, val
);
592 fn_from
= expr
->ops
.call
.fn_from
;
593 if (gimple_call_internal_p (fn_from
))
594 val
= iterative_hash_hashval_t
595 ((hashval_t
) gimple_call_internal_fn (fn_from
), val
);
597 val
= iterative_hash_expr (gimple_call_fn (fn_from
), val
);
598 for (i
= 0; i
< expr
->ops
.call
.nargs
; i
++)
599 val
= iterative_hash_expr (expr
->ops
.call
.args
[i
], val
);
607 for (i
= 0; i
< expr
->ops
.phi
.nargs
; i
++)
608 val
= iterative_hash_expr (expr
->ops
.phi
.args
[i
], val
);
619 /* Print a diagnostic dump of an expression hash table entry. */
622 print_expr_hash_elt (FILE * stream
, const struct expr_hash_elt
*element
)
625 fprintf (stream
, "STMT ");
627 fprintf (stream
, "COND ");
631 print_generic_expr (stream
, element
->lhs
, 0);
632 fprintf (stream
, " = ");
635 switch (element
->expr
.kind
)
638 print_generic_expr (stream
, element
->expr
.ops
.single
.rhs
, 0);
642 fprintf (stream
, "%s ", get_tree_code_name (element
->expr
.ops
.unary
.op
));
643 print_generic_expr (stream
, element
->expr
.ops
.unary
.opnd
, 0);
647 print_generic_expr (stream
, element
->expr
.ops
.binary
.opnd0
, 0);
648 fprintf (stream
, " %s ", get_tree_code_name (element
->expr
.ops
.binary
.op
));
649 print_generic_expr (stream
, element
->expr
.ops
.binary
.opnd1
, 0);
653 fprintf (stream
, " %s <", get_tree_code_name (element
->expr
.ops
.ternary
.op
));
654 print_generic_expr (stream
, element
->expr
.ops
.ternary
.opnd0
, 0);
655 fputs (", ", stream
);
656 print_generic_expr (stream
, element
->expr
.ops
.ternary
.opnd1
, 0);
657 fputs (", ", stream
);
658 print_generic_expr (stream
, element
->expr
.ops
.ternary
.opnd2
, 0);
665 size_t nargs
= element
->expr
.ops
.call
.nargs
;
668 fn_from
= element
->expr
.ops
.call
.fn_from
;
669 if (gimple_call_internal_p (fn_from
))
670 fputs (internal_fn_name (gimple_call_internal_fn (fn_from
)),
673 print_generic_expr (stream
, gimple_call_fn (fn_from
), 0);
674 fprintf (stream
, " (");
675 for (i
= 0; i
< nargs
; i
++)
677 print_generic_expr (stream
, element
->expr
.ops
.call
.args
[i
], 0);
679 fprintf (stream
, ", ");
681 fprintf (stream
, ")");
688 size_t nargs
= element
->expr
.ops
.phi
.nargs
;
690 fprintf (stream
, "PHI <");
691 for (i
= 0; i
< nargs
; i
++)
693 print_generic_expr (stream
, element
->expr
.ops
.phi
.args
[i
], 0);
695 fprintf (stream
, ", ");
697 fprintf (stream
, ">");
701 fprintf (stream
, "\n");
705 fprintf (stream
, " ");
706 print_gimple_stmt (stream
, element
->stmt
, 0, 0);
710 /* Delete variable sized pieces of the expr_hash_elt ELEMENT. */
713 free_expr_hash_elt_contents (struct expr_hash_elt
*element
)
715 if (element
->expr
.kind
== EXPR_CALL
)
716 free (element
->expr
.ops
.call
.args
);
717 else if (element
->expr
.kind
== EXPR_PHI
)
718 free (element
->expr
.ops
.phi
.args
);
721 /* Delete an expr_hash_elt and reclaim its storage. */
724 free_expr_hash_elt (void *elt
)
726 struct expr_hash_elt
*element
= ((struct expr_hash_elt
*)elt
);
727 free_expr_hash_elt_contents (element
);
731 /* Allocate an EDGE_INFO for edge E and attach it to E.
732 Return the new EDGE_INFO structure. */
734 static struct edge_info
*
735 allocate_edge_info (edge e
)
737 struct edge_info
*edge_info
;
739 edge_info
= XCNEW (struct edge_info
);
745 /* Free all EDGE_INFO structures associated with edges in the CFG.
746 If a particular edge can be threaded, copy the redirection
747 target from the EDGE_INFO structure into the edge's AUX field
748 as required by code to update the CFG and SSA graph for
752 free_all_edge_infos (void)
760 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
762 struct edge_info
*edge_info
= (struct edge_info
*) e
->aux
;
766 edge_info
->cond_equivalences
.release ();
774 class dom_opt_dom_walker
: public dom_walker
777 dom_opt_dom_walker (cdi_direction direction
)
778 : dom_walker (direction
), m_dummy_cond (NULL
) {}
780 virtual void before_dom_children (basic_block
);
781 virtual void after_dom_children (basic_block
);
784 void thread_across_edge (edge
);
789 /* Jump threading, redundancy elimination and const/copy propagation.
791 This pass may expose new symbols that need to be renamed into SSA. For
792 every new symbol exposed, its corresponding bit will be set in
796 tree_ssa_dominator_optimize (void)
798 memset (&opt_stats
, 0, sizeof (opt_stats
));
800 /* Create our hash tables. */
801 avail_exprs
.create (1024);
802 avail_exprs_stack
.create (20);
803 const_and_copies_stack
.create (20);
804 need_eh_cleanup
= BITMAP_ALLOC (NULL
);
806 calculate_dominance_info (CDI_DOMINATORS
);
809 /* We need to know loop structures in order to avoid destroying them
810 in jump threading. Note that we still can e.g. thread through loop
811 headers to an exit edge, or through loop header to the loop body, assuming
812 that we update the loop info. */
813 loop_optimizer_init (LOOPS_HAVE_SIMPLE_LATCHES
);
815 /* Initialize the value-handle array. */
816 threadedge_initialize_values ();
818 /* We need accurate information regarding back edges in the CFG
819 for jump threading; this may include back edges that are not part of
821 mark_dfs_back_edges ();
823 /* Recursively walk the dominator tree optimizing statements. */
824 dom_opt_dom_walker (CDI_DOMINATORS
).walk (cfun
->cfg
->x_entry_block_ptr
);
827 gimple_stmt_iterator gsi
;
831 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
832 update_stmt_if_modified (gsi_stmt (gsi
));
836 /* If we exposed any new variables, go ahead and put them into
837 SSA form now, before we handle jump threading. This simplifies
838 interactions between rewriting of _DECL nodes into SSA form
839 and rewriting SSA_NAME nodes into SSA form after block
840 duplication and CFG manipulation. */
841 update_ssa (TODO_update_ssa
);
843 free_all_edge_infos ();
845 /* Thread jumps, creating duplicate blocks as needed. */
846 cfg_altered
|= thread_through_all_blocks (first_pass_instance
);
849 free_dominance_info (CDI_DOMINATORS
);
851 /* Removal of statements may make some EH edges dead. Purge
852 such edges from the CFG as needed. */
853 if (!bitmap_empty_p (need_eh_cleanup
))
858 /* Jump threading may have created forwarder blocks from blocks
859 needing EH cleanup; the new successor of these blocks, which
860 has inherited from the original block, needs the cleanup.
861 Don't clear bits in the bitmap, as that can break the bitmap
863 EXECUTE_IF_SET_IN_BITMAP (need_eh_cleanup
, 0, i
, bi
)
865 basic_block bb
= BASIC_BLOCK (i
);
868 while (single_succ_p (bb
)
869 && (single_succ_edge (bb
)->flags
& EDGE_EH
) == 0)
870 bb
= single_succ (bb
);
871 if (bb
== EXIT_BLOCK_PTR
)
873 if ((unsigned) bb
->index
!= i
)
874 bitmap_set_bit (need_eh_cleanup
, bb
->index
);
877 gimple_purge_all_dead_eh_edges (need_eh_cleanup
);
878 bitmap_clear (need_eh_cleanup
);
881 statistics_counter_event (cfun
, "Redundant expressions eliminated",
883 statistics_counter_event (cfun
, "Constants propagated",
884 opt_stats
.num_const_prop
);
885 statistics_counter_event (cfun
, "Copies propagated",
886 opt_stats
.num_copy_prop
);
888 /* Debugging dumps. */
889 if (dump_file
&& (dump_flags
& TDF_STATS
))
890 dump_dominator_optimization_stats (dump_file
);
892 loop_optimizer_finalize ();
894 /* Delete our main hashtable. */
895 avail_exprs
.dispose ();
897 /* Free asserted bitmaps and stacks. */
898 BITMAP_FREE (need_eh_cleanup
);
900 avail_exprs_stack
.release ();
901 const_and_copies_stack
.release ();
903 /* Free the value-handle array. */
904 threadedge_finalize_values ();
910 gate_dominator (void)
912 return flag_tree_dom
!= 0;
917 const pass_data pass_data_dominator
=
919 GIMPLE_PASS
, /* type */
921 OPTGROUP_NONE
, /* optinfo_flags */
923 true, /* has_execute */
924 TV_TREE_SSA_DOMINATOR_OPTS
, /* tv_id */
925 ( PROP_cfg
| PROP_ssa
), /* properties_required */
926 0, /* properties_provided */
927 0, /* properties_destroyed */
928 0, /* todo_flags_start */
929 ( TODO_cleanup_cfg
| TODO_update_ssa
931 | TODO_verify_flow
), /* todo_flags_finish */
934 class pass_dominator
: public gimple_opt_pass
937 pass_dominator (gcc::context
*ctxt
)
938 : gimple_opt_pass (pass_data_dominator
, ctxt
)
941 /* opt_pass methods: */
942 opt_pass
* clone () { return new pass_dominator (m_ctxt
); }
943 bool gate () { return gate_dominator (); }
944 unsigned int execute () { return tree_ssa_dominator_optimize (); }
946 }; // class pass_dominator
951 make_pass_dominator (gcc::context
*ctxt
)
953 return new pass_dominator (ctxt
);
957 /* Given a conditional statement CONDSTMT, convert the
958 condition to a canonical form. */
961 canonicalize_comparison (gimple condstmt
)
967 gcc_assert (gimple_code (condstmt
) == GIMPLE_COND
);
969 op0
= gimple_cond_lhs (condstmt
);
970 op1
= gimple_cond_rhs (condstmt
);
972 code
= gimple_cond_code (condstmt
);
974 /* If it would be profitable to swap the operands, then do so to
975 canonicalize the statement, enabling better optimization.
977 By placing canonicalization of such expressions here we
978 transparently keep statements in canonical form, even
979 when the statement is modified. */
980 if (tree_swap_operands_p (op0
, op1
, false))
982 /* For relationals we need to swap the operands
983 and change the code. */
989 code
= swap_tree_comparison (code
);
991 gimple_cond_set_code (condstmt
, code
);
992 gimple_cond_set_lhs (condstmt
, op1
);
993 gimple_cond_set_rhs (condstmt
, op0
);
995 update_stmt (condstmt
);
1000 /* Initialize local stacks for this optimizer and record equivalences
1001 upon entry to BB. Equivalences can come from the edge traversed to
1002 reach BB or they may come from PHI nodes at the start of BB. */
1004 /* Remove all the expressions in LOCALS from TABLE, stopping when there are
1005 LIMIT entries left in LOCALs. */
1008 remove_local_expressions_from_table (void)
1010 /* Remove all the expressions made available in this block. */
1011 while (avail_exprs_stack
.length () > 0)
1013 expr_hash_elt_t victim
= avail_exprs_stack
.pop ();
1014 expr_hash_elt
**slot
;
1019 /* This must precede the actual removal from the hash table,
1020 as ELEMENT and the table entry may share a call argument
1021 vector which will be freed during removal. */
1022 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1024 fprintf (dump_file
, "<<<< ");
1025 print_expr_hash_elt (dump_file
, victim
);
1028 slot
= avail_exprs
.find_slot_with_hash (victim
, victim
->hash
, NO_INSERT
);
1029 gcc_assert (slot
&& *slot
== victim
);
1030 avail_exprs
.clear_slot (slot
);
1034 /* Use the source/dest pairs in CONST_AND_COPIES_STACK to restore
1035 CONST_AND_COPIES to its original state, stopping when we hit a
1039 restore_vars_to_original_value (void)
1041 while (const_and_copies_stack
.length () > 0)
1043 tree prev_value
, dest
;
1045 dest
= const_and_copies_stack
.pop ();
1050 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1052 fprintf (dump_file
, "<<<< COPY ");
1053 print_generic_expr (dump_file
, dest
, 0);
1054 fprintf (dump_file
, " = ");
1055 print_generic_expr (dump_file
, SSA_NAME_VALUE (dest
), 0);
1056 fprintf (dump_file
, "\n");
1059 prev_value
= const_and_copies_stack
.pop ();
1060 set_ssa_name_value (dest
, prev_value
);
1064 /* A trivial wrapper so that we can present the generic jump
1065 threading code with a simple API for simplifying statements. */
1067 simplify_stmt_for_jump_threading (gimple stmt
,
1068 gimple within_stmt ATTRIBUTE_UNUSED
)
1070 return lookup_avail_expr (stmt
, false);
1073 /* Record into the equivalence tables any equivalences implied by
1074 traversing edge E (which are cached in E->aux).
1076 Callers are responsible for managing the unwinding markers. */
1078 record_temporary_equivalences (edge e
)
1081 struct edge_info
*edge_info
= (struct edge_info
*) e
->aux
;
1083 /* If we have info associated with this edge, record it into
1084 our equivalence tables. */
1087 cond_equivalence
*eq
;
1088 tree lhs
= edge_info
->lhs
;
1089 tree rhs
= edge_info
->rhs
;
1091 /* If we have a simple NAME = VALUE equivalence, record it. */
1092 if (lhs
&& TREE_CODE (lhs
) == SSA_NAME
)
1093 record_const_or_copy (lhs
, rhs
);
1095 /* If we have 0 = COND or 1 = COND equivalences, record them
1096 into our expression hash tables. */
1097 for (i
= 0; edge_info
->cond_equivalences
.iterate (i
, &eq
); ++i
)
1102 /* Wrapper for common code to attempt to thread an edge. For example,
1103 it handles lazily building the dummy condition and the bookkeeping
1104 when jump threading is successful. */
1107 dom_opt_dom_walker::thread_across_edge (edge e
)
1111 gimple_build_cond (NE_EXPR
,
1112 integer_zero_node
, integer_zero_node
,
1115 /* Push a marker on both stacks so we can unwind the tables back to their
1117 avail_exprs_stack
.safe_push (NULL
);
1118 const_and_copies_stack
.safe_push (NULL_TREE
);
1120 /* Traversing E may result in equivalences we can utilize. */
1121 record_temporary_equivalences (e
);
1123 /* With all the edge equivalences in the tables, go ahead and attempt
1124 to thread through E->dest. */
1125 ::thread_across_edge (m_dummy_cond
, e
, false,
1126 &const_and_copies_stack
,
1127 simplify_stmt_for_jump_threading
);
1129 /* And restore the various tables to their state before
1130 we threaded this edge.
1132 XXX The code in tree-ssa-threadedge.c will restore the state of
1133 the const_and_copies table. We we just have to restore the expression
1135 remove_local_expressions_from_table ();
1138 /* PHI nodes can create equivalences too.
1140 Ignoring any alternatives which are the same as the result, if
1141 all the alternatives are equal, then the PHI node creates an
1145 record_equivalences_from_phis (basic_block bb
)
1147 gimple_stmt_iterator gsi
;
1149 for (gsi
= gsi_start_phis (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
1151 gimple phi
= gsi_stmt (gsi
);
1153 tree lhs
= gimple_phi_result (phi
);
1157 for (i
= 0; i
< gimple_phi_num_args (phi
); i
++)
1159 tree t
= gimple_phi_arg_def (phi
, i
);
1161 /* Ignore alternatives which are the same as our LHS. Since
1162 LHS is a PHI_RESULT, it is known to be a SSA_NAME, so we
1163 can simply compare pointers. */
1167 /* If we have not processed an alternative yet, then set
1168 RHS to this alternative. */
1171 /* If we have processed an alternative (stored in RHS), then
1172 see if it is equal to this one. If it isn't, then stop
1174 else if (! operand_equal_for_phi_arg_p (rhs
, t
))
1178 /* If we had no interesting alternatives, then all the RHS alternatives
1179 must have been the same as LHS. */
1183 /* If we managed to iterate through each PHI alternative without
1184 breaking out of the loop, then we have a PHI which may create
1185 a useful equivalence. We do not need to record unwind data for
1186 this, since this is a true assignment and not an equivalence
1187 inferred from a comparison. All uses of this ssa name are dominated
1188 by this assignment, so unwinding just costs time and space. */
1189 if (i
== gimple_phi_num_args (phi
) && may_propagate_copy (lhs
, rhs
))
1190 set_ssa_name_value (lhs
, rhs
);
1194 /* Ignoring loop backedges, if BB has precisely one incoming edge then
1195 return that edge. Otherwise return NULL. */
1197 single_incoming_edge_ignoring_loop_edges (basic_block bb
)
1203 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
1205 /* A loop back edge can be identified by the destination of
1206 the edge dominating the source of the edge. */
1207 if (dominated_by_p (CDI_DOMINATORS
, e
->src
, e
->dest
))
1210 /* If we have already seen a non-loop edge, then we must have
1211 multiple incoming non-loop edges and thus we return NULL. */
1215 /* This is the first non-loop incoming edge we have found. Record
1223 /* Record any equivalences created by the incoming edge to BB. If BB
1224 has more than one incoming edge, then no equivalence is created. */
1227 record_equivalences_from_incoming_edge (basic_block bb
)
1231 struct edge_info
*edge_info
;
1233 /* If our parent block ended with a control statement, then we may be
1234 able to record some equivalences based on which outgoing edge from
1235 the parent was followed. */
1236 parent
= get_immediate_dominator (CDI_DOMINATORS
, bb
);
1238 e
= single_incoming_edge_ignoring_loop_edges (bb
);
1240 /* If we had a single incoming edge from our parent block, then enter
1241 any data associated with the edge into our tables. */
1242 if (e
&& e
->src
== parent
)
1246 edge_info
= (struct edge_info
*) e
->aux
;
1250 tree lhs
= edge_info
->lhs
;
1251 tree rhs
= edge_info
->rhs
;
1252 cond_equivalence
*eq
;
1255 record_equality (lhs
, rhs
);
1257 /* If LHS is an SSA_NAME and RHS is a constant integer and LHS was
1258 set via a widening type conversion, then we may be able to record
1259 additional equivalences. */
1261 && TREE_CODE (lhs
) == SSA_NAME
1262 && is_gimple_constant (rhs
)
1263 && TREE_CODE (rhs
) == INTEGER_CST
)
1265 gimple defstmt
= SSA_NAME_DEF_STMT (lhs
);
1268 && is_gimple_assign (defstmt
)
1269 && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (defstmt
)))
1271 tree old_rhs
= gimple_assign_rhs1 (defstmt
);
1273 /* If the conversion widens the original value and
1274 the constant is in the range of the type of OLD_RHS,
1275 then convert the constant and record the equivalence.
1277 Note that int_fits_type_p does not check the precision
1278 if the upper and lower bounds are OK. */
1279 if (INTEGRAL_TYPE_P (TREE_TYPE (old_rhs
))
1280 && (TYPE_PRECISION (TREE_TYPE (lhs
))
1281 > TYPE_PRECISION (TREE_TYPE (old_rhs
)))
1282 && int_fits_type_p (rhs
, TREE_TYPE (old_rhs
)))
1284 tree newval
= fold_convert (TREE_TYPE (old_rhs
), rhs
);
1285 record_equality (old_rhs
, newval
);
1290 for (i
= 0; edge_info
->cond_equivalences
.iterate (i
, &eq
); ++i
)
1296 /* Dump SSA statistics on FILE. */
1299 dump_dominator_optimization_stats (FILE *file
)
1301 fprintf (file
, "Total number of statements: %6ld\n\n",
1302 opt_stats
.num_stmts
);
1303 fprintf (file
, "Exprs considered for dominator optimizations: %6ld\n",
1304 opt_stats
.num_exprs_considered
);
1306 fprintf (file
, "\nHash table statistics:\n");
1308 fprintf (file
, " avail_exprs: ");
1309 htab_statistics (file
, avail_exprs
);
1313 /* Dump SSA statistics on stderr. */
1316 debug_dominator_optimization_stats (void)
1318 dump_dominator_optimization_stats (stderr
);
1322 /* Dump statistics for the hash table HTAB. */
1325 htab_statistics (FILE *file
, hash_table
<expr_elt_hasher
> htab
)
1327 fprintf (file
, "size %ld, %ld elements, %f collision/search ratio\n",
1328 (long) htab
.size (),
1329 (long) htab
.elements (),
1330 htab
.collisions ());
1334 /* Enter condition equivalence into the expression hash table.
1335 This indicates that a conditional expression has a known
1339 record_cond (cond_equivalence
*p
)
1341 struct expr_hash_elt
*element
= XCNEW (struct expr_hash_elt
);
1342 expr_hash_elt
**slot
;
1344 initialize_hash_element_from_expr (&p
->cond
, p
->value
, element
);
1346 slot
= avail_exprs
.find_slot_with_hash (element
, element
->hash
, INSERT
);
1351 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1353 fprintf (dump_file
, "1>>> ");
1354 print_expr_hash_elt (dump_file
, element
);
1357 avail_exprs_stack
.safe_push (element
);
1360 free_expr_hash_elt (element
);
1363 /* Build a cond_equivalence record indicating that the comparison
1364 CODE holds between operands OP0 and OP1 and push it to **P. */
1367 build_and_record_new_cond (enum tree_code code
,
1369 vec
<cond_equivalence
> *p
)
1372 struct hashable_expr
*cond
= &c
.cond
;
1374 gcc_assert (TREE_CODE_CLASS (code
) == tcc_comparison
);
1376 cond
->type
= boolean_type_node
;
1377 cond
->kind
= EXPR_BINARY
;
1378 cond
->ops
.binary
.op
= code
;
1379 cond
->ops
.binary
.opnd0
= op0
;
1380 cond
->ops
.binary
.opnd1
= op1
;
1382 c
.value
= boolean_true_node
;
1386 /* Record that COND is true and INVERTED is false into the edge information
1387 structure. Also record that any conditions dominated by COND are true
1390 For example, if a < b is true, then a <= b must also be true. */
1393 record_conditions (struct edge_info
*edge_info
, tree cond
, tree inverted
)
1398 if (!COMPARISON_CLASS_P (cond
))
1401 op0
= TREE_OPERAND (cond
, 0);
1402 op1
= TREE_OPERAND (cond
, 1);
1404 switch (TREE_CODE (cond
))
1408 if (FLOAT_TYPE_P (TREE_TYPE (op0
)))
1410 build_and_record_new_cond (ORDERED_EXPR
, op0
, op1
,
1411 &edge_info
->cond_equivalences
);
1412 build_and_record_new_cond (LTGT_EXPR
, op0
, op1
,
1413 &edge_info
->cond_equivalences
);
1416 build_and_record_new_cond ((TREE_CODE (cond
) == LT_EXPR
1417 ? LE_EXPR
: GE_EXPR
),
1418 op0
, op1
, &edge_info
->cond_equivalences
);
1419 build_and_record_new_cond (NE_EXPR
, op0
, op1
,
1420 &edge_info
->cond_equivalences
);
1425 if (FLOAT_TYPE_P (TREE_TYPE (op0
)))
1427 build_and_record_new_cond (ORDERED_EXPR
, op0
, op1
,
1428 &edge_info
->cond_equivalences
);
1433 if (FLOAT_TYPE_P (TREE_TYPE (op0
)))
1435 build_and_record_new_cond (ORDERED_EXPR
, op0
, op1
,
1436 &edge_info
->cond_equivalences
);
1438 build_and_record_new_cond (LE_EXPR
, op0
, op1
,
1439 &edge_info
->cond_equivalences
);
1440 build_and_record_new_cond (GE_EXPR
, op0
, op1
,
1441 &edge_info
->cond_equivalences
);
1444 case UNORDERED_EXPR
:
1445 build_and_record_new_cond (NE_EXPR
, op0
, op1
,
1446 &edge_info
->cond_equivalences
);
1447 build_and_record_new_cond (UNLE_EXPR
, op0
, op1
,
1448 &edge_info
->cond_equivalences
);
1449 build_and_record_new_cond (UNGE_EXPR
, op0
, op1
,
1450 &edge_info
->cond_equivalences
);
1451 build_and_record_new_cond (UNEQ_EXPR
, op0
, op1
,
1452 &edge_info
->cond_equivalences
);
1453 build_and_record_new_cond (UNLT_EXPR
, op0
, op1
,
1454 &edge_info
->cond_equivalences
);
1455 build_and_record_new_cond (UNGT_EXPR
, op0
, op1
,
1456 &edge_info
->cond_equivalences
);
1461 build_and_record_new_cond ((TREE_CODE (cond
) == UNLT_EXPR
1462 ? UNLE_EXPR
: UNGE_EXPR
),
1463 op0
, op1
, &edge_info
->cond_equivalences
);
1464 build_and_record_new_cond (NE_EXPR
, op0
, op1
,
1465 &edge_info
->cond_equivalences
);
1469 build_and_record_new_cond (UNLE_EXPR
, op0
, op1
,
1470 &edge_info
->cond_equivalences
);
1471 build_and_record_new_cond (UNGE_EXPR
, op0
, op1
,
1472 &edge_info
->cond_equivalences
);
1476 build_and_record_new_cond (NE_EXPR
, op0
, op1
,
1477 &edge_info
->cond_equivalences
);
1478 build_and_record_new_cond (ORDERED_EXPR
, op0
, op1
,
1479 &edge_info
->cond_equivalences
);
1486 /* Now store the original true and false conditions into the first
1488 initialize_expr_from_cond (cond
, &c
.cond
);
1489 c
.value
= boolean_true_node
;
1490 edge_info
->cond_equivalences
.safe_push (c
);
1492 /* It is possible for INVERTED to be the negation of a comparison,
1493 and not a valid RHS or GIMPLE_COND condition. This happens because
1494 invert_truthvalue may return such an expression when asked to invert
1495 a floating-point comparison. These comparisons are not assumed to
1496 obey the trichotomy law. */
1497 initialize_expr_from_cond (inverted
, &c
.cond
);
1498 c
.value
= boolean_false_node
;
1499 edge_info
->cond_equivalences
.safe_push (c
);
1502 /* A helper function for record_const_or_copy and record_equality.
1503 Do the work of recording the value and undo info. */
1506 record_const_or_copy_1 (tree x
, tree y
, tree prev_x
)
1508 set_ssa_name_value (x
, y
);
1510 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1512 fprintf (dump_file
, "0>>> COPY ");
1513 print_generic_expr (dump_file
, x
, 0);
1514 fprintf (dump_file
, " = ");
1515 print_generic_expr (dump_file
, y
, 0);
1516 fprintf (dump_file
, "\n");
1519 const_and_copies_stack
.reserve (2);
1520 const_and_copies_stack
.quick_push (prev_x
);
1521 const_and_copies_stack
.quick_push (x
);
1524 /* Return the loop depth of the basic block of the defining statement of X.
1525 This number should not be treated as absolutely correct because the loop
1526 information may not be completely up-to-date when dom runs. However, it
1527 will be relatively correct, and as more passes are taught to keep loop info
1528 up to date, the result will become more and more accurate. */
1531 loop_depth_of_name (tree x
)
1536 /* If it's not an SSA_NAME, we have no clue where the definition is. */
1537 if (TREE_CODE (x
) != SSA_NAME
)
1540 /* Otherwise return the loop depth of the defining statement's bb.
1541 Note that there may not actually be a bb for this statement, if the
1542 ssa_name is live on entry. */
1543 defstmt
= SSA_NAME_DEF_STMT (x
);
1544 defbb
= gimple_bb (defstmt
);
1548 return bb_loop_depth (defbb
);
1551 /* Record that X is equal to Y in const_and_copies. Record undo
1552 information in the block-local vector. */
1555 record_const_or_copy (tree x
, tree y
)
1557 tree prev_x
= SSA_NAME_VALUE (x
);
1559 gcc_assert (TREE_CODE (x
) == SSA_NAME
);
1561 if (TREE_CODE (y
) == SSA_NAME
)
1563 tree tmp
= SSA_NAME_VALUE (y
);
1568 record_const_or_copy_1 (x
, y
, prev_x
);
1571 /* Similarly, but assume that X and Y are the two operands of an EQ_EXPR.
1572 This constrains the cases in which we may treat this as assignment. */
1575 record_equality (tree x
, tree y
)
1577 tree prev_x
= NULL
, prev_y
= NULL
;
1579 if (TREE_CODE (x
) == SSA_NAME
)
1580 prev_x
= SSA_NAME_VALUE (x
);
1581 if (TREE_CODE (y
) == SSA_NAME
)
1582 prev_y
= SSA_NAME_VALUE (y
);
1584 /* If one of the previous values is invariant, or invariant in more loops
1585 (by depth), then use that.
1586 Otherwise it doesn't matter which value we choose, just so
1587 long as we canonicalize on one value. */
1588 if (is_gimple_min_invariant (y
))
1590 else if (is_gimple_min_invariant (x
)
1591 || (loop_depth_of_name (x
) <= loop_depth_of_name (y
)))
1592 prev_x
= x
, x
= y
, y
= prev_x
, prev_x
= prev_y
;
1593 else if (prev_x
&& is_gimple_min_invariant (prev_x
))
1594 x
= y
, y
= prev_x
, prev_x
= prev_y
;
1598 /* After the swapping, we must have one SSA_NAME. */
1599 if (TREE_CODE (x
) != SSA_NAME
)
1602 /* For IEEE, -0.0 == 0.0, so we don't necessarily know the sign of a
1603 variable compared against zero. If we're honoring signed zeros,
1604 then we cannot record this value unless we know that the value is
1606 if (HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (x
)))
1607 && (TREE_CODE (y
) != REAL_CST
1608 || REAL_VALUES_EQUAL (dconst0
, TREE_REAL_CST (y
))))
1611 record_const_or_copy_1 (x
, y
, prev_x
);
1614 /* Returns true when STMT is a simple iv increment. It detects the
1615 following situation:
1617 i_1 = phi (..., i_2)
1618 i_2 = i_1 +/- ... */
1621 simple_iv_increment_p (gimple stmt
)
1623 enum tree_code code
;
1628 if (gimple_code (stmt
) != GIMPLE_ASSIGN
)
1631 lhs
= gimple_assign_lhs (stmt
);
1632 if (TREE_CODE (lhs
) != SSA_NAME
)
1635 code
= gimple_assign_rhs_code (stmt
);
1636 if (code
!= PLUS_EXPR
1637 && code
!= MINUS_EXPR
1638 && code
!= POINTER_PLUS_EXPR
)
1641 preinc
= gimple_assign_rhs1 (stmt
);
1642 if (TREE_CODE (preinc
) != SSA_NAME
)
1645 phi
= SSA_NAME_DEF_STMT (preinc
);
1646 if (gimple_code (phi
) != GIMPLE_PHI
)
1649 for (i
= 0; i
< gimple_phi_num_args (phi
); i
++)
1650 if (gimple_phi_arg_def (phi
, i
) == lhs
)
1656 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
1657 known value for that SSA_NAME (or NULL if no value is known).
1659 Propagate values from CONST_AND_COPIES into the PHI nodes of the
1660 successors of BB. */
1663 cprop_into_successor_phis (basic_block bb
)
1668 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
1671 gimple_stmt_iterator gsi
;
1673 /* If this is an abnormal edge, then we do not want to copy propagate
1674 into the PHI alternative associated with this edge. */
1675 if (e
->flags
& EDGE_ABNORMAL
)
1678 gsi
= gsi_start_phis (e
->dest
);
1679 if (gsi_end_p (gsi
))
1682 /* We may have an equivalence associated with this edge. While
1683 we can not propagate it into non-dominated blocks, we can
1684 propagate them into PHIs in non-dominated blocks. */
1686 /* Push the unwind marker so we can reset the const and copies
1687 table back to its original state after processing this edge. */
1688 const_and_copies_stack
.safe_push (NULL_TREE
);
1690 /* Extract and record any simple NAME = VALUE equivalences.
1692 Don't bother with [01] = COND equivalences, they're not useful
1694 struct edge_info
*edge_info
= (struct edge_info
*) e
->aux
;
1697 tree lhs
= edge_info
->lhs
;
1698 tree rhs
= edge_info
->rhs
;
1700 if (lhs
&& TREE_CODE (lhs
) == SSA_NAME
)
1701 record_const_or_copy (lhs
, rhs
);
1705 for ( ; !gsi_end_p (gsi
); gsi_next (&gsi
))
1708 use_operand_p orig_p
;
1710 gimple phi
= gsi_stmt (gsi
);
1712 /* The alternative may be associated with a constant, so verify
1713 it is an SSA_NAME before doing anything with it. */
1714 orig_p
= gimple_phi_arg_imm_use_ptr (phi
, indx
);
1715 orig_val
= get_use_from_ptr (orig_p
);
1716 if (TREE_CODE (orig_val
) != SSA_NAME
)
1719 /* If we have *ORIG_P in our constant/copy table, then replace
1720 ORIG_P with its value in our constant/copy table. */
1721 new_val
= SSA_NAME_VALUE (orig_val
);
1723 && new_val
!= orig_val
1724 && (TREE_CODE (new_val
) == SSA_NAME
1725 || is_gimple_min_invariant (new_val
))
1726 && may_propagate_copy (orig_val
, new_val
))
1727 propagate_value (orig_p
, new_val
);
1730 restore_vars_to_original_value ();
1734 /* We have finished optimizing BB, record any information implied by
1735 taking a specific outgoing edge from BB. */
1738 record_edge_info (basic_block bb
)
1740 gimple_stmt_iterator gsi
= gsi_last_bb (bb
);
1741 struct edge_info
*edge_info
;
1743 if (! gsi_end_p (gsi
))
1745 gimple stmt
= gsi_stmt (gsi
);
1746 location_t loc
= gimple_location (stmt
);
1748 if (gimple_code (stmt
) == GIMPLE_SWITCH
)
1750 tree index
= gimple_switch_index (stmt
);
1752 if (TREE_CODE (index
) == SSA_NAME
)
1755 int n_labels
= gimple_switch_num_labels (stmt
);
1756 tree
*info
= XCNEWVEC (tree
, last_basic_block
);
1760 for (i
= 0; i
< n_labels
; i
++)
1762 tree label
= gimple_switch_label (stmt
, i
);
1763 basic_block target_bb
= label_to_block (CASE_LABEL (label
));
1764 if (CASE_HIGH (label
)
1765 || !CASE_LOW (label
)
1766 || info
[target_bb
->index
])
1767 info
[target_bb
->index
] = error_mark_node
;
1769 info
[target_bb
->index
] = label
;
1772 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
1774 basic_block target_bb
= e
->dest
;
1775 tree label
= info
[target_bb
->index
];
1777 if (label
!= NULL
&& label
!= error_mark_node
)
1779 tree x
= fold_convert_loc (loc
, TREE_TYPE (index
),
1781 edge_info
= allocate_edge_info (e
);
1782 edge_info
->lhs
= index
;
1790 /* A COND_EXPR may create equivalences too. */
1791 if (gimple_code (stmt
) == GIMPLE_COND
)
1796 tree op0
= gimple_cond_lhs (stmt
);
1797 tree op1
= gimple_cond_rhs (stmt
);
1798 enum tree_code code
= gimple_cond_code (stmt
);
1800 extract_true_false_edges_from_block (bb
, &true_edge
, &false_edge
);
1802 /* Special case comparing booleans against a constant as we
1803 know the value of OP0 on both arms of the branch. i.e., we
1804 can record an equivalence for OP0 rather than COND. */
1805 if ((code
== EQ_EXPR
|| code
== NE_EXPR
)
1806 && TREE_CODE (op0
) == SSA_NAME
1807 && TREE_CODE (TREE_TYPE (op0
)) == BOOLEAN_TYPE
1808 && is_gimple_min_invariant (op1
))
1810 if (code
== EQ_EXPR
)
1812 edge_info
= allocate_edge_info (true_edge
);
1813 edge_info
->lhs
= op0
;
1814 edge_info
->rhs
= (integer_zerop (op1
)
1815 ? boolean_false_node
1816 : boolean_true_node
);
1818 edge_info
= allocate_edge_info (false_edge
);
1819 edge_info
->lhs
= op0
;
1820 edge_info
->rhs
= (integer_zerop (op1
)
1822 : boolean_false_node
);
1826 edge_info
= allocate_edge_info (true_edge
);
1827 edge_info
->lhs
= op0
;
1828 edge_info
->rhs
= (integer_zerop (op1
)
1830 : boolean_false_node
);
1832 edge_info
= allocate_edge_info (false_edge
);
1833 edge_info
->lhs
= op0
;
1834 edge_info
->rhs
= (integer_zerop (op1
)
1835 ? boolean_false_node
1836 : boolean_true_node
);
1839 else if (is_gimple_min_invariant (op0
)
1840 && (TREE_CODE (op1
) == SSA_NAME
1841 || is_gimple_min_invariant (op1
)))
1843 tree cond
= build2 (code
, boolean_type_node
, op0
, op1
);
1844 tree inverted
= invert_truthvalue_loc (loc
, cond
);
1845 bool can_infer_simple_equiv
1846 = !(HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (op0
)))
1847 && real_zerop (op0
));
1848 struct edge_info
*edge_info
;
1850 edge_info
= allocate_edge_info (true_edge
);
1851 record_conditions (edge_info
, cond
, inverted
);
1853 if (can_infer_simple_equiv
&& code
== EQ_EXPR
)
1855 edge_info
->lhs
= op1
;
1856 edge_info
->rhs
= op0
;
1859 edge_info
= allocate_edge_info (false_edge
);
1860 record_conditions (edge_info
, inverted
, cond
);
1862 if (can_infer_simple_equiv
&& TREE_CODE (inverted
) == EQ_EXPR
)
1864 edge_info
->lhs
= op1
;
1865 edge_info
->rhs
= op0
;
1869 else if (TREE_CODE (op0
) == SSA_NAME
1870 && (TREE_CODE (op1
) == SSA_NAME
1871 || is_gimple_min_invariant (op1
)))
1873 tree cond
= build2 (code
, boolean_type_node
, op0
, op1
);
1874 tree inverted
= invert_truthvalue_loc (loc
, cond
);
1875 bool can_infer_simple_equiv
1876 = !(HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (op1
)))
1877 && (TREE_CODE (op1
) == SSA_NAME
|| real_zerop (op1
)));
1878 struct edge_info
*edge_info
;
1880 edge_info
= allocate_edge_info (true_edge
);
1881 record_conditions (edge_info
, cond
, inverted
);
1883 if (can_infer_simple_equiv
&& code
== EQ_EXPR
)
1885 edge_info
->lhs
= op0
;
1886 edge_info
->rhs
= op1
;
1889 edge_info
= allocate_edge_info (false_edge
);
1890 record_conditions (edge_info
, inverted
, cond
);
1892 if (can_infer_simple_equiv
&& TREE_CODE (inverted
) == EQ_EXPR
)
1894 edge_info
->lhs
= op0
;
1895 edge_info
->rhs
= op1
;
1900 /* ??? TRUTH_NOT_EXPR can create an equivalence too. */
1905 dom_opt_dom_walker::before_dom_children (basic_block bb
)
1907 gimple_stmt_iterator gsi
;
1909 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1910 fprintf (dump_file
, "\n\nOptimizing block #%d\n\n", bb
->index
);
1912 /* Push a marker on the stacks of local information so that we know how
1913 far to unwind when we finalize this block. */
1914 avail_exprs_stack
.safe_push (NULL
);
1915 const_and_copies_stack
.safe_push (NULL_TREE
);
1917 record_equivalences_from_incoming_edge (bb
);
1919 /* PHI nodes can create equivalences too. */
1920 record_equivalences_from_phis (bb
);
1922 /* Create equivalences from redundant PHIs. PHIs are only truly
1923 redundant when they exist in the same block, so push another
1924 marker and unwind right afterwards. */
1925 avail_exprs_stack
.safe_push (NULL
);
1926 for (gsi
= gsi_start_phis (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
1927 eliminate_redundant_computations (&gsi
);
1928 remove_local_expressions_from_table ();
1930 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
1931 optimize_stmt (bb
, gsi
);
1933 /* Now prepare to process dominated blocks. */
1934 record_edge_info (bb
);
1935 cprop_into_successor_phis (bb
);
1938 /* We have finished processing the dominator children of BB, perform
1939 any finalization actions in preparation for leaving this node in
1940 the dominator tree. */
1943 dom_opt_dom_walker::after_dom_children (basic_block bb
)
1947 /* If we have an outgoing edge to a block with multiple incoming and
1948 outgoing edges, then we may be able to thread the edge, i.e., we
1949 may be able to statically determine which of the outgoing edges
1950 will be traversed when the incoming edge from BB is traversed. */
1951 if (single_succ_p (bb
)
1952 && (single_succ_edge (bb
)->flags
& EDGE_ABNORMAL
) == 0
1953 && potentially_threadable_block (single_succ (bb
)))
1955 thread_across_edge (single_succ_edge (bb
));
1957 else if ((last
= last_stmt (bb
))
1958 && gimple_code (last
) == GIMPLE_COND
1959 && EDGE_COUNT (bb
->succs
) == 2
1960 && (EDGE_SUCC (bb
, 0)->flags
& EDGE_ABNORMAL
) == 0
1961 && (EDGE_SUCC (bb
, 1)->flags
& EDGE_ABNORMAL
) == 0)
1963 edge true_edge
, false_edge
;
1965 extract_true_false_edges_from_block (bb
, &true_edge
, &false_edge
);
1967 /* Only try to thread the edge if it reaches a target block with
1968 more than one predecessor and more than one successor. */
1969 if (potentially_threadable_block (true_edge
->dest
))
1970 thread_across_edge (true_edge
);
1972 /* Similarly for the ELSE arm. */
1973 if (potentially_threadable_block (false_edge
->dest
))
1974 thread_across_edge (false_edge
);
1978 /* These remove expressions local to BB from the tables. */
1979 remove_local_expressions_from_table ();
1980 restore_vars_to_original_value ();
1983 /* Search for redundant computations in STMT. If any are found, then
1984 replace them with the variable holding the result of the computation.
1986 If safe, record this expression into the available expression hash
1990 eliminate_redundant_computations (gimple_stmt_iterator
* gsi
)
1996 bool assigns_var_p
= false;
1998 gimple stmt
= gsi_stmt (*gsi
);
2000 if (gimple_code (stmt
) == GIMPLE_PHI
)
2001 def
= gimple_phi_result (stmt
);
2003 def
= gimple_get_lhs (stmt
);
2005 /* Certain expressions on the RHS can be optimized away, but can not
2006 themselves be entered into the hash tables. */
2008 || TREE_CODE (def
) != SSA_NAME
2009 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (def
)
2010 || gimple_vdef (stmt
)
2011 /* Do not record equivalences for increments of ivs. This would create
2012 overlapping live ranges for a very questionable gain. */
2013 || simple_iv_increment_p (stmt
))
2016 /* Check if the expression has been computed before. */
2017 cached_lhs
= lookup_avail_expr (stmt
, insert
);
2019 opt_stats
.num_exprs_considered
++;
2021 /* Get the type of the expression we are trying to optimize. */
2022 if (is_gimple_assign (stmt
))
2024 expr_type
= TREE_TYPE (gimple_assign_lhs (stmt
));
2025 assigns_var_p
= true;
2027 else if (gimple_code (stmt
) == GIMPLE_COND
)
2028 expr_type
= boolean_type_node
;
2029 else if (is_gimple_call (stmt
))
2031 gcc_assert (gimple_call_lhs (stmt
));
2032 expr_type
= TREE_TYPE (gimple_call_lhs (stmt
));
2033 assigns_var_p
= true;
2035 else if (gimple_code (stmt
) == GIMPLE_SWITCH
)
2036 expr_type
= TREE_TYPE (gimple_switch_index (stmt
));
2037 else if (gimple_code (stmt
) == GIMPLE_PHI
)
2038 /* We can't propagate into a phi, so the logic below doesn't apply.
2039 Instead record an equivalence between the cached LHS and the
2040 PHI result of this statement, provided they are in the same block.
2041 This should be sufficient to kill the redundant phi. */
2043 if (def
&& cached_lhs
)
2044 record_const_or_copy (def
, cached_lhs
);
2053 /* It is safe to ignore types here since we have already done
2054 type checking in the hashing and equality routines. In fact
2055 type checking here merely gets in the way of constant
2056 propagation. Also, make sure that it is safe to propagate
2057 CACHED_LHS into the expression in STMT. */
2058 if ((TREE_CODE (cached_lhs
) != SSA_NAME
2060 || useless_type_conversion_p (expr_type
, TREE_TYPE (cached_lhs
))))
2061 || may_propagate_copy_into_stmt (stmt
, cached_lhs
))
2063 gcc_checking_assert (TREE_CODE (cached_lhs
) == SSA_NAME
2064 || is_gimple_min_invariant (cached_lhs
));
2066 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2068 fprintf (dump_file
, " Replaced redundant expr '");
2069 print_gimple_expr (dump_file
, stmt
, 0, dump_flags
);
2070 fprintf (dump_file
, "' with '");
2071 print_generic_expr (dump_file
, cached_lhs
, dump_flags
);
2072 fprintf (dump_file
, "'\n");
2078 && !useless_type_conversion_p (expr_type
, TREE_TYPE (cached_lhs
)))
2079 cached_lhs
= fold_convert (expr_type
, cached_lhs
);
2081 propagate_tree_value_into_stmt (gsi
, cached_lhs
);
2083 /* Since it is always necessary to mark the result as modified,
2084 perhaps we should move this into propagate_tree_value_into_stmt
2086 gimple_set_modified (gsi_stmt (*gsi
), true);
2090 /* STMT, a GIMPLE_ASSIGN, may create certain equivalences, in either
2091 the available expressions table or the const_and_copies table.
2092 Detect and record those equivalences. */
2093 /* We handle only very simple copy equivalences here. The heavy
2094 lifing is done by eliminate_redundant_computations. */
2097 record_equivalences_from_stmt (gimple stmt
, int may_optimize_p
)
2100 enum tree_code lhs_code
;
2102 gcc_assert (is_gimple_assign (stmt
));
2104 lhs
= gimple_assign_lhs (stmt
);
2105 lhs_code
= TREE_CODE (lhs
);
2107 if (lhs_code
== SSA_NAME
2108 && gimple_assign_single_p (stmt
))
2110 tree rhs
= gimple_assign_rhs1 (stmt
);
2112 /* If the RHS of the assignment is a constant or another variable that
2113 may be propagated, register it in the CONST_AND_COPIES table. We
2114 do not need to record unwind data for this, since this is a true
2115 assignment and not an equivalence inferred from a comparison. All
2116 uses of this ssa name are dominated by this assignment, so unwinding
2117 just costs time and space. */
2119 && (TREE_CODE (rhs
) == SSA_NAME
2120 || is_gimple_min_invariant (rhs
)))
2122 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2124 fprintf (dump_file
, "==== ASGN ");
2125 print_generic_expr (dump_file
, lhs
, 0);
2126 fprintf (dump_file
, " = ");
2127 print_generic_expr (dump_file
, rhs
, 0);
2128 fprintf (dump_file
, "\n");
2131 set_ssa_name_value (lhs
, rhs
);
2135 /* A memory store, even an aliased store, creates a useful
2136 equivalence. By exchanging the LHS and RHS, creating suitable
2137 vops and recording the result in the available expression table,
2138 we may be able to expose more redundant loads. */
2139 if (!gimple_has_volatile_ops (stmt
)
2140 && gimple_references_memory_p (stmt
)
2141 && gimple_assign_single_p (stmt
)
2142 && (TREE_CODE (gimple_assign_rhs1 (stmt
)) == SSA_NAME
2143 || is_gimple_min_invariant (gimple_assign_rhs1 (stmt
)))
2144 && !is_gimple_reg (lhs
))
2146 tree rhs
= gimple_assign_rhs1 (stmt
);
2149 /* Build a new statement with the RHS and LHS exchanged. */
2150 if (TREE_CODE (rhs
) == SSA_NAME
)
2152 /* NOTE tuples. The call to gimple_build_assign below replaced
2153 a call to build_gimple_modify_stmt, which did not set the
2154 SSA_NAME_DEF_STMT on the LHS of the assignment. Doing so
2155 may cause an SSA validation failure, as the LHS may be a
2156 default-initialized name and should have no definition. I'm
2157 a bit dubious of this, as the artificial statement that we
2158 generate here may in fact be ill-formed, but it is simply
2159 used as an internal device in this pass, and never becomes
2161 gimple defstmt
= SSA_NAME_DEF_STMT (rhs
);
2162 new_stmt
= gimple_build_assign (rhs
, lhs
);
2163 SSA_NAME_DEF_STMT (rhs
) = defstmt
;
2166 new_stmt
= gimple_build_assign (rhs
, lhs
);
2168 gimple_set_vuse (new_stmt
, gimple_vdef (stmt
));
2170 /* Finally enter the statement into the available expression
2172 lookup_avail_expr (new_stmt
, true);
2176 /* Replace *OP_P in STMT with any known equivalent value for *OP_P from
2177 CONST_AND_COPIES. */
2180 cprop_operand (gimple stmt
, use_operand_p op_p
)
2183 tree op
= USE_FROM_PTR (op_p
);
2185 /* If the operand has a known constant value or it is known to be a
2186 copy of some other variable, use the value or copy stored in
2187 CONST_AND_COPIES. */
2188 val
= SSA_NAME_VALUE (op
);
2189 if (val
&& val
!= op
)
2191 /* Do not replace hard register operands in asm statements. */
2192 if (gimple_code (stmt
) == GIMPLE_ASM
2193 && !may_propagate_copy_into_asm (op
))
2196 /* Certain operands are not allowed to be copy propagated due
2197 to their interaction with exception handling and some GCC
2199 if (!may_propagate_copy (op
, val
))
2202 /* Do not propagate addresses that point to volatiles into memory
2203 stmts without volatile operands. */
2204 if (POINTER_TYPE_P (TREE_TYPE (val
))
2205 && TYPE_VOLATILE (TREE_TYPE (TREE_TYPE (val
)))
2206 && gimple_has_mem_ops (stmt
)
2207 && !gimple_has_volatile_ops (stmt
))
2210 /* Do not propagate copies if the propagated value is at a deeper loop
2211 depth than the propagatee. Otherwise, this may move loop variant
2212 variables outside of their loops and prevent coalescing
2213 opportunities. If the value was loop invariant, it will be hoisted
2214 by LICM and exposed for copy propagation. */
2215 if (loop_depth_of_name (val
) > loop_depth_of_name (op
))
2218 /* Do not propagate copies into simple IV increment statements.
2219 See PR23821 for how this can disturb IV analysis. */
2220 if (TREE_CODE (val
) != INTEGER_CST
2221 && simple_iv_increment_p (stmt
))
2225 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2227 fprintf (dump_file
, " Replaced '");
2228 print_generic_expr (dump_file
, op
, dump_flags
);
2229 fprintf (dump_file
, "' with %s '",
2230 (TREE_CODE (val
) != SSA_NAME
? "constant" : "variable"));
2231 print_generic_expr (dump_file
, val
, dump_flags
);
2232 fprintf (dump_file
, "'\n");
2235 if (TREE_CODE (val
) != SSA_NAME
)
2236 opt_stats
.num_const_prop
++;
2238 opt_stats
.num_copy_prop
++;
2240 propagate_value (op_p
, val
);
2242 /* And note that we modified this statement. This is now
2243 safe, even if we changed virtual operands since we will
2244 rescan the statement and rewrite its operands again. */
2245 gimple_set_modified (stmt
, true);
2249 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
2250 known value for that SSA_NAME (or NULL if no value is known).
2252 Propagate values from CONST_AND_COPIES into the uses, vuses and
2253 vdef_ops of STMT. */
2256 cprop_into_stmt (gimple stmt
)
2261 FOR_EACH_SSA_USE_OPERAND (op_p
, stmt
, iter
, SSA_OP_USE
)
2262 cprop_operand (stmt
, op_p
);
2265 /* Optimize the statement pointed to by iterator SI.
2267 We try to perform some simplistic global redundancy elimination and
2268 constant propagation:
2270 1- To detect global redundancy, we keep track of expressions that have
2271 been computed in this block and its dominators. If we find that the
2272 same expression is computed more than once, we eliminate repeated
2273 computations by using the target of the first one.
2275 2- Constant values and copy assignments. This is used to do very
2276 simplistic constant and copy propagation. When a constant or copy
2277 assignment is found, we map the value on the RHS of the assignment to
2278 the variable in the LHS in the CONST_AND_COPIES table. */
2281 optimize_stmt (basic_block bb
, gimple_stmt_iterator si
)
2283 gimple stmt
, old_stmt
;
2284 bool may_optimize_p
;
2285 bool modified_p
= false;
2287 old_stmt
= stmt
= gsi_stmt (si
);
2289 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2291 fprintf (dump_file
, "Optimizing statement ");
2292 print_gimple_stmt (dump_file
, stmt
, 0, TDF_SLIM
);
2295 if (gimple_code (stmt
) == GIMPLE_COND
)
2296 canonicalize_comparison (stmt
);
2298 update_stmt_if_modified (stmt
);
2299 opt_stats
.num_stmts
++;
2301 /* Const/copy propagate into USES, VUSES and the RHS of VDEFs. */
2302 cprop_into_stmt (stmt
);
2304 /* If the statement has been modified with constant replacements,
2305 fold its RHS before checking for redundant computations. */
2306 if (gimple_modified_p (stmt
))
2310 /* Try to fold the statement making sure that STMT is kept
2312 if (fold_stmt (&si
))
2314 stmt
= gsi_stmt (si
);
2315 gimple_set_modified (stmt
, true);
2317 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2319 fprintf (dump_file
, " Folded to: ");
2320 print_gimple_stmt (dump_file
, stmt
, 0, TDF_SLIM
);
2324 /* We only need to consider cases that can yield a gimple operand. */
2325 if (gimple_assign_single_p (stmt
))
2326 rhs
= gimple_assign_rhs1 (stmt
);
2327 else if (gimple_code (stmt
) == GIMPLE_GOTO
)
2328 rhs
= gimple_goto_dest (stmt
);
2329 else if (gimple_code (stmt
) == GIMPLE_SWITCH
)
2330 /* This should never be an ADDR_EXPR. */
2331 rhs
= gimple_switch_index (stmt
);
2333 if (rhs
&& TREE_CODE (rhs
) == ADDR_EXPR
)
2334 recompute_tree_invariant_for_addr_expr (rhs
);
2336 /* Indicate that maybe_clean_or_replace_eh_stmt needs to be called,
2337 even if fold_stmt updated the stmt already and thus cleared
2338 gimple_modified_p flag on it. */
2342 /* Check for redundant computations. Do this optimization only
2343 for assignments that have no volatile ops and conditionals. */
2344 may_optimize_p
= (!gimple_has_side_effects (stmt
)
2345 && (is_gimple_assign (stmt
)
2346 || (is_gimple_call (stmt
)
2347 && gimple_call_lhs (stmt
) != NULL_TREE
)
2348 || gimple_code (stmt
) == GIMPLE_COND
2349 || gimple_code (stmt
) == GIMPLE_SWITCH
));
2353 if (gimple_code (stmt
) == GIMPLE_CALL
)
2355 /* Resolve __builtin_constant_p. If it hasn't been
2356 folded to integer_one_node by now, it's fairly
2357 certain that the value simply isn't constant. */
2358 tree callee
= gimple_call_fndecl (stmt
);
2360 && DECL_BUILT_IN_CLASS (callee
) == BUILT_IN_NORMAL
2361 && DECL_FUNCTION_CODE (callee
) == BUILT_IN_CONSTANT_P
)
2363 propagate_tree_value_into_stmt (&si
, integer_zero_node
);
2364 stmt
= gsi_stmt (si
);
2368 update_stmt_if_modified (stmt
);
2369 eliminate_redundant_computations (&si
);
2370 stmt
= gsi_stmt (si
);
2372 /* Perform simple redundant store elimination. */
2373 if (gimple_assign_single_p (stmt
)
2374 && TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
2376 tree lhs
= gimple_assign_lhs (stmt
);
2377 tree rhs
= gimple_assign_rhs1 (stmt
);
2380 if (TREE_CODE (rhs
) == SSA_NAME
)
2382 tree tem
= SSA_NAME_VALUE (rhs
);
2386 /* Build a new statement with the RHS and LHS exchanged. */
2387 if (TREE_CODE (rhs
) == SSA_NAME
)
2389 gimple defstmt
= SSA_NAME_DEF_STMT (rhs
);
2390 new_stmt
= gimple_build_assign (rhs
, lhs
);
2391 SSA_NAME_DEF_STMT (rhs
) = defstmt
;
2394 new_stmt
= gimple_build_assign (rhs
, lhs
);
2395 gimple_set_vuse (new_stmt
, gimple_vuse (stmt
));
2396 cached_lhs
= lookup_avail_expr (new_stmt
, false);
2398 && rhs
== cached_lhs
)
2400 basic_block bb
= gimple_bb (stmt
);
2401 unlink_stmt_vdef (stmt
);
2402 if (gsi_remove (&si
, true))
2404 bitmap_set_bit (need_eh_cleanup
, bb
->index
);
2405 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2406 fprintf (dump_file
, " Flagged to clear EH edges.\n");
2408 release_defs (stmt
);
2414 /* Record any additional equivalences created by this statement. */
2415 if (is_gimple_assign (stmt
))
2416 record_equivalences_from_stmt (stmt
, may_optimize_p
);
2418 /* If STMT is a COND_EXPR and it was modified, then we may know
2419 where it goes. If that is the case, then mark the CFG as altered.
2421 This will cause us to later call remove_unreachable_blocks and
2422 cleanup_tree_cfg when it is safe to do so. It is not safe to
2423 clean things up here since removal of edges and such can trigger
2424 the removal of PHI nodes, which in turn can release SSA_NAMEs to
2427 That's all fine and good, except that once SSA_NAMEs are released
2428 to the manager, we must not call create_ssa_name until all references
2429 to released SSA_NAMEs have been eliminated.
2431 All references to the deleted SSA_NAMEs can not be eliminated until
2432 we remove unreachable blocks.
2434 We can not remove unreachable blocks until after we have completed
2435 any queued jump threading.
2437 We can not complete any queued jump threads until we have taken
2438 appropriate variables out of SSA form. Taking variables out of
2439 SSA form can call create_ssa_name and thus we lose.
2441 Ultimately I suspect we're going to need to change the interface
2442 into the SSA_NAME manager. */
2443 if (gimple_modified_p (stmt
) || modified_p
)
2447 update_stmt_if_modified (stmt
);
2449 if (gimple_code (stmt
) == GIMPLE_COND
)
2450 val
= fold_binary_loc (gimple_location (stmt
),
2451 gimple_cond_code (stmt
), boolean_type_node
,
2452 gimple_cond_lhs (stmt
), gimple_cond_rhs (stmt
));
2453 else if (gimple_code (stmt
) == GIMPLE_SWITCH
)
2454 val
= gimple_switch_index (stmt
);
2456 if (val
&& TREE_CODE (val
) == INTEGER_CST
&& find_taken_edge (bb
, val
))
2459 /* If we simplified a statement in such a way as to be shown that it
2460 cannot trap, update the eh information and the cfg to match. */
2461 if (maybe_clean_or_replace_eh_stmt (old_stmt
, stmt
))
2463 bitmap_set_bit (need_eh_cleanup
, bb
->index
);
2464 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2465 fprintf (dump_file
, " Flagged to clear EH edges.\n");
2470 /* Search for an existing instance of STMT in the AVAIL_EXPRS table.
2471 If found, return its LHS. Otherwise insert STMT in the table and
2474 Also, when an expression is first inserted in the table, it is also
2475 is also added to AVAIL_EXPRS_STACK, so that it can be removed when
2476 we finish processing this block and its children. */
2479 lookup_avail_expr (gimple stmt
, bool insert
)
2481 expr_hash_elt
**slot
;
2484 struct expr_hash_elt element
;
2486 /* Get LHS of phi, assignment, or call; else NULL_TREE. */
2487 if (gimple_code (stmt
) == GIMPLE_PHI
)
2488 lhs
= gimple_phi_result (stmt
);
2490 lhs
= gimple_get_lhs (stmt
);
2492 initialize_hash_element (stmt
, lhs
, &element
);
2494 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2496 fprintf (dump_file
, "LKUP ");
2497 print_expr_hash_elt (dump_file
, &element
);
2500 /* Don't bother remembering constant assignments and copy operations.
2501 Constants and copy operations are handled by the constant/copy propagator
2502 in optimize_stmt. */
2503 if (element
.expr
.kind
== EXPR_SINGLE
2504 && (TREE_CODE (element
.expr
.ops
.single
.rhs
) == SSA_NAME
2505 || is_gimple_min_invariant (element
.expr
.ops
.single
.rhs
)))
2508 /* Finally try to find the expression in the main expression hash table. */
2509 slot
= avail_exprs
.find_slot_with_hash (&element
, element
.hash
,
2510 (insert
? INSERT
: NO_INSERT
));
2513 free_expr_hash_elt_contents (&element
);
2516 else if (*slot
== NULL
)
2518 struct expr_hash_elt
*element2
= XNEW (struct expr_hash_elt
);
2519 *element2
= element
;
2520 element2
->stamp
= element2
;
2523 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2525 fprintf (dump_file
, "2>>> ");
2526 print_expr_hash_elt (dump_file
, element2
);
2529 avail_exprs_stack
.safe_push (element2
);
2533 free_expr_hash_elt_contents (&element
);
2535 /* Extract the LHS of the assignment so that it can be used as the current
2536 definition of another variable. */
2537 lhs
= ((struct expr_hash_elt
*)*slot
)->lhs
;
2539 /* See if the LHS appears in the CONST_AND_COPIES table. If it does, then
2540 use the value from the const_and_copies table. */
2541 if (TREE_CODE (lhs
) == SSA_NAME
)
2543 temp
= SSA_NAME_VALUE (lhs
);
2548 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2550 fprintf (dump_file
, "FIND: ");
2551 print_generic_expr (dump_file
, lhs
, 0);
2552 fprintf (dump_file
, "\n");
2558 /* Hashing and equality functions for AVAIL_EXPRS. We compute a value number
2559 for expressions using the code of the expression and the SSA numbers of
2563 avail_expr_hash (const void *p
)
2565 gimple stmt
= ((const struct expr_hash_elt
*)p
)->stmt
;
2566 const struct hashable_expr
*expr
= &((const struct expr_hash_elt
*)p
)->expr
;
2570 val
= iterative_hash_hashable_expr (expr
, val
);
2572 /* If the hash table entry is not associated with a statement, then we
2573 can just hash the expression and not worry about virtual operands
2578 /* Add the SSA version numbers of the vuse operand. This is important
2579 because compound variables like arrays are not renamed in the
2580 operands. Rather, the rename is done on the virtual variable
2581 representing all the elements of the array. */
2582 if ((vuse
= gimple_vuse (stmt
)))
2583 val
= iterative_hash_expr (vuse
, val
);
2588 /* PHI-ONLY copy and constant propagation. This pass is meant to clean
2589 up degenerate PHIs created by or exposed by jump threading. */
2591 /* Given PHI, return its RHS if the PHI is a degenerate, otherwise return
2595 degenerate_phi_result (gimple phi
)
2597 tree lhs
= gimple_phi_result (phi
);
2601 /* Ignoring arguments which are the same as LHS, if all the remaining
2602 arguments are the same, then the PHI is a degenerate and has the
2603 value of that common argument. */
2604 for (i
= 0; i
< gimple_phi_num_args (phi
); i
++)
2606 tree arg
= gimple_phi_arg_def (phi
, i
);
2614 else if (arg
== val
)
2616 /* We bring in some of operand_equal_p not only to speed things
2617 up, but also to avoid crashing when dereferencing the type of
2618 a released SSA name. */
2619 else if (TREE_CODE (val
) != TREE_CODE (arg
)
2620 || TREE_CODE (val
) == SSA_NAME
2621 || !operand_equal_p (arg
, val
, 0))
2624 return (i
== gimple_phi_num_args (phi
) ? val
: NULL
);
2627 /* Given a statement STMT, which is either a PHI node or an assignment,
2628 remove it from the IL. */
2631 remove_stmt_or_phi (gimple stmt
)
2633 gimple_stmt_iterator gsi
= gsi_for_stmt (stmt
);
2635 if (gimple_code (stmt
) == GIMPLE_PHI
)
2636 remove_phi_node (&gsi
, true);
2639 gsi_remove (&gsi
, true);
2640 release_defs (stmt
);
2644 /* Given a statement STMT, which is either a PHI node or an assignment,
2645 return the "rhs" of the node, in the case of a non-degenerate
2646 phi, NULL is returned. */
2649 get_rhs_or_phi_arg (gimple stmt
)
2651 if (gimple_code (stmt
) == GIMPLE_PHI
)
2652 return degenerate_phi_result (stmt
);
2653 else if (gimple_assign_single_p (stmt
))
2654 return gimple_assign_rhs1 (stmt
);
2660 /* Given a statement STMT, which is either a PHI node or an assignment,
2661 return the "lhs" of the node. */
2664 get_lhs_or_phi_result (gimple stmt
)
2666 if (gimple_code (stmt
) == GIMPLE_PHI
)
2667 return gimple_phi_result (stmt
);
2668 else if (is_gimple_assign (stmt
))
2669 return gimple_assign_lhs (stmt
);
2674 /* Propagate RHS into all uses of LHS (when possible).
2676 RHS and LHS are derived from STMT, which is passed in solely so
2677 that we can remove it if propagation is successful.
2679 When propagating into a PHI node or into a statement which turns
2680 into a trivial copy or constant initialization, set the
2681 appropriate bit in INTERESTING_NAMEs so that we will visit those
2682 nodes as well in an effort to pick up secondary optimization
2686 propagate_rhs_into_lhs (gimple stmt
, tree lhs
, tree rhs
, bitmap interesting_names
)
2688 /* First verify that propagation is valid and isn't going to move a
2689 loop variant variable outside its loop. */
2690 if (! SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs
)
2691 && (TREE_CODE (rhs
) != SSA_NAME
2692 || ! SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs
))
2693 && may_propagate_copy (lhs
, rhs
)
2694 && loop_depth_of_name (lhs
) >= loop_depth_of_name (rhs
))
2696 use_operand_p use_p
;
2697 imm_use_iterator iter
;
2702 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2704 fprintf (dump_file
, " Replacing '");
2705 print_generic_expr (dump_file
, lhs
, dump_flags
);
2706 fprintf (dump_file
, "' with %s '",
2707 (TREE_CODE (rhs
) != SSA_NAME
? "constant" : "variable"));
2708 print_generic_expr (dump_file
, rhs
, dump_flags
);
2709 fprintf (dump_file
, "'\n");
2712 /* Walk over every use of LHS and try to replace the use with RHS.
2713 At this point the only reason why such a propagation would not
2714 be successful would be if the use occurs in an ASM_EXPR. */
2715 FOR_EACH_IMM_USE_STMT (use_stmt
, iter
, lhs
)
2717 /* Leave debug stmts alone. If we succeed in propagating
2718 all non-debug uses, we'll drop the DEF, and propagation
2719 into debug stmts will occur then. */
2720 if (gimple_debug_bind_p (use_stmt
))
2723 /* It's not always safe to propagate into an ASM_EXPR. */
2724 if (gimple_code (use_stmt
) == GIMPLE_ASM
2725 && ! may_propagate_copy_into_asm (lhs
))
2731 /* It's not ok to propagate into the definition stmt of RHS.
2733 # prephitmp.12_36 = PHI <g_67.1_6(9)>
2734 g_67.1_6 = prephitmp.12_36;
2736 While this is strictly all dead code we do not want to
2737 deal with this here. */
2738 if (TREE_CODE (rhs
) == SSA_NAME
2739 && SSA_NAME_DEF_STMT (rhs
) == use_stmt
)
2746 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2748 fprintf (dump_file
, " Original statement:");
2749 print_gimple_stmt (dump_file
, use_stmt
, 0, dump_flags
);
2752 /* Propagate the RHS into this use of the LHS. */
2753 FOR_EACH_IMM_USE_ON_STMT (use_p
, iter
)
2754 propagate_value (use_p
, rhs
);
2756 /* Special cases to avoid useless calls into the folding
2757 routines, operand scanning, etc.
2759 Propagation into a PHI may cause the PHI to become
2760 a degenerate, so mark the PHI as interesting. No other
2761 actions are necessary. */
2762 if (gimple_code (use_stmt
) == GIMPLE_PHI
)
2767 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2769 fprintf (dump_file
, " Updated statement:");
2770 print_gimple_stmt (dump_file
, use_stmt
, 0, dump_flags
);
2773 result
= get_lhs_or_phi_result (use_stmt
);
2774 bitmap_set_bit (interesting_names
, SSA_NAME_VERSION (result
));
2778 /* From this point onward we are propagating into a
2779 real statement. Folding may (or may not) be possible,
2780 we may expose new operands, expose dead EH edges,
2782 /* NOTE tuples. In the tuples world, fold_stmt_inplace
2783 cannot fold a call that simplifies to a constant,
2784 because the GIMPLE_CALL must be replaced by a
2785 GIMPLE_ASSIGN, and there is no way to effect such a
2786 transformation in-place. We might want to consider
2787 using the more general fold_stmt here. */
2789 gimple_stmt_iterator gsi
= gsi_for_stmt (use_stmt
);
2790 fold_stmt_inplace (&gsi
);
2793 /* Sometimes propagation can expose new operands to the
2795 update_stmt (use_stmt
);
2798 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2800 fprintf (dump_file
, " Updated statement:");
2801 print_gimple_stmt (dump_file
, use_stmt
, 0, dump_flags
);
2804 /* If we replaced a variable index with a constant, then
2805 we would need to update the invariant flag for ADDR_EXPRs. */
2806 if (gimple_assign_single_p (use_stmt
)
2807 && TREE_CODE (gimple_assign_rhs1 (use_stmt
)) == ADDR_EXPR
)
2808 recompute_tree_invariant_for_addr_expr
2809 (gimple_assign_rhs1 (use_stmt
));
2811 /* If we cleaned up EH information from the statement,
2812 mark its containing block as needing EH cleanups. */
2813 if (maybe_clean_or_replace_eh_stmt (use_stmt
, use_stmt
))
2815 bitmap_set_bit (need_eh_cleanup
, gimple_bb (use_stmt
)->index
);
2816 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2817 fprintf (dump_file
, " Flagged to clear EH edges.\n");
2820 /* Propagation may expose new trivial copy/constant propagation
2822 if (gimple_assign_single_p (use_stmt
)
2823 && TREE_CODE (gimple_assign_lhs (use_stmt
)) == SSA_NAME
2824 && (TREE_CODE (gimple_assign_rhs1 (use_stmt
)) == SSA_NAME
2825 || is_gimple_min_invariant (gimple_assign_rhs1 (use_stmt
))))
2827 tree result
= get_lhs_or_phi_result (use_stmt
);
2828 bitmap_set_bit (interesting_names
, SSA_NAME_VERSION (result
));
2831 /* Propagation into these nodes may make certain edges in
2832 the CFG unexecutable. We want to identify them as PHI nodes
2833 at the destination of those unexecutable edges may become
2835 else if (gimple_code (use_stmt
) == GIMPLE_COND
2836 || gimple_code (use_stmt
) == GIMPLE_SWITCH
2837 || gimple_code (use_stmt
) == GIMPLE_GOTO
)
2841 if (gimple_code (use_stmt
) == GIMPLE_COND
)
2842 val
= fold_binary_loc (gimple_location (use_stmt
),
2843 gimple_cond_code (use_stmt
),
2845 gimple_cond_lhs (use_stmt
),
2846 gimple_cond_rhs (use_stmt
));
2847 else if (gimple_code (use_stmt
) == GIMPLE_SWITCH
)
2848 val
= gimple_switch_index (use_stmt
);
2850 val
= gimple_goto_dest (use_stmt
);
2852 if (val
&& is_gimple_min_invariant (val
))
2854 basic_block bb
= gimple_bb (use_stmt
);
2855 edge te
= find_taken_edge (bb
, val
);
2858 gimple_stmt_iterator gsi
, psi
;
2860 /* Remove all outgoing edges except TE. */
2861 for (ei
= ei_start (bb
->succs
); (e
= ei_safe_edge (ei
));)
2865 /* Mark all the PHI nodes at the destination of
2866 the unexecutable edge as interesting. */
2867 for (psi
= gsi_start_phis (e
->dest
);
2871 gimple phi
= gsi_stmt (psi
);
2873 tree result
= gimple_phi_result (phi
);
2874 int version
= SSA_NAME_VERSION (result
);
2876 bitmap_set_bit (interesting_names
, version
);
2879 te
->probability
+= e
->probability
;
2881 te
->count
+= e
->count
;
2889 gsi
= gsi_last_bb (gimple_bb (use_stmt
));
2890 gsi_remove (&gsi
, true);
2892 /* And fixup the flags on the single remaining edge. */
2893 te
->flags
&= ~(EDGE_TRUE_VALUE
| EDGE_FALSE_VALUE
);
2894 te
->flags
&= ~EDGE_ABNORMAL
;
2895 te
->flags
|= EDGE_FALLTHRU
;
2896 if (te
->probability
> REG_BR_PROB_BASE
)
2897 te
->probability
= REG_BR_PROB_BASE
;
2902 /* Ensure there is nothing else to do. */
2903 gcc_assert (!all
|| has_zero_uses (lhs
));
2905 /* If we were able to propagate away all uses of LHS, then
2906 we can remove STMT. */
2908 remove_stmt_or_phi (stmt
);
2912 /* STMT is either a PHI node (potentially a degenerate PHI node) or
2913 a statement that is a trivial copy or constant initialization.
2915 Attempt to eliminate T by propagating its RHS into all uses of
2916 its LHS. This may in turn set new bits in INTERESTING_NAMES
2917 for nodes we want to revisit later.
2919 All exit paths should clear INTERESTING_NAMES for the result
2923 eliminate_const_or_copy (gimple stmt
, bitmap interesting_names
)
2925 tree lhs
= get_lhs_or_phi_result (stmt
);
2927 int version
= SSA_NAME_VERSION (lhs
);
2929 /* If the LHS of this statement or PHI has no uses, then we can
2930 just eliminate it. This can occur if, for example, the PHI
2931 was created by block duplication due to threading and its only
2932 use was in the conditional at the end of the block which was
2934 if (has_zero_uses (lhs
))
2936 bitmap_clear_bit (interesting_names
, version
);
2937 remove_stmt_or_phi (stmt
);
2941 /* Get the RHS of the assignment or PHI node if the PHI is a
2943 rhs
= get_rhs_or_phi_arg (stmt
);
2946 bitmap_clear_bit (interesting_names
, version
);
2950 if (!virtual_operand_p (lhs
))
2951 propagate_rhs_into_lhs (stmt
, lhs
, rhs
, interesting_names
);
2955 imm_use_iterator iter
;
2956 use_operand_p use_p
;
2957 /* For virtual operands we have to propagate into all uses as
2958 otherwise we will create overlapping life-ranges. */
2959 FOR_EACH_IMM_USE_STMT (use_stmt
, iter
, lhs
)
2960 FOR_EACH_IMM_USE_ON_STMT (use_p
, iter
)
2961 SET_USE (use_p
, rhs
);
2962 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs
))
2963 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs
) = 1;
2964 remove_stmt_or_phi (stmt
);
2967 /* Note that STMT may well have been deleted by now, so do
2968 not access it, instead use the saved version # to clear
2969 T's entry in the worklist. */
2970 bitmap_clear_bit (interesting_names
, version
);
2973 /* The first phase in degenerate PHI elimination.
2975 Eliminate the degenerate PHIs in BB, then recurse on the
2976 dominator children of BB. */
2979 eliminate_degenerate_phis_1 (basic_block bb
, bitmap interesting_names
)
2981 gimple_stmt_iterator gsi
;
2984 for (gsi
= gsi_start_phis (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
2986 gimple phi
= gsi_stmt (gsi
);
2988 eliminate_const_or_copy (phi
, interesting_names
);
2991 /* Recurse into the dominator children of BB. */
2992 for (son
= first_dom_son (CDI_DOMINATORS
, bb
);
2994 son
= next_dom_son (CDI_DOMINATORS
, son
))
2995 eliminate_degenerate_phis_1 (son
, interesting_names
);
2999 /* A very simple pass to eliminate degenerate PHI nodes from the
3000 IL. This is meant to be fast enough to be able to be run several
3001 times in the optimization pipeline.
3003 Certain optimizations, particularly those which duplicate blocks
3004 or remove edges from the CFG can create or expose PHIs which are
3005 trivial copies or constant initializations.
3007 While we could pick up these optimizations in DOM or with the
3008 combination of copy-prop and CCP, those solutions are far too
3009 heavy-weight for our needs.
3011 This implementation has two phases so that we can efficiently
3012 eliminate the first order degenerate PHIs and second order
3015 The first phase performs a dominator walk to identify and eliminate
3016 the vast majority of the degenerate PHIs. When a degenerate PHI
3017 is identified and eliminated any affected statements or PHIs
3018 are put on a worklist.
3020 The second phase eliminates degenerate PHIs and trivial copies
3021 or constant initializations using the worklist. This is how we
3022 pick up the secondary optimization opportunities with minimal
3026 eliminate_degenerate_phis (void)
3028 bitmap interesting_names
;
3029 bitmap interesting_names1
;
3031 /* Bitmap of blocks which need EH information updated. We can not
3032 update it on-the-fly as doing so invalidates the dominator tree. */
3033 need_eh_cleanup
= BITMAP_ALLOC (NULL
);
3035 /* INTERESTING_NAMES is effectively our worklist, indexed by
3038 A set bit indicates that the statement or PHI node which
3039 defines the SSA_NAME should be (re)examined to determine if
3040 it has become a degenerate PHI or trivial const/copy propagation
3043 Experiments have show we generally get better compilation
3044 time behavior with bitmaps rather than sbitmaps. */
3045 interesting_names
= BITMAP_ALLOC (NULL
);
3046 interesting_names1
= BITMAP_ALLOC (NULL
);
3048 calculate_dominance_info (CDI_DOMINATORS
);
3049 cfg_altered
= false;
3051 /* First phase. Eliminate degenerate PHIs via a dominator
3054 Experiments have indicated that we generally get better
3055 compile-time behavior by visiting blocks in the first
3056 phase in dominator order. Presumably this is because walking
3057 in dominator order leaves fewer PHIs for later examination
3058 by the worklist phase. */
3059 eliminate_degenerate_phis_1 (ENTRY_BLOCK_PTR
, interesting_names
);
3061 /* Second phase. Eliminate second order degenerate PHIs as well
3062 as trivial copies or constant initializations identified by
3063 the first phase or this phase. Basically we keep iterating
3064 until our set of INTERESTING_NAMEs is empty. */
3065 while (!bitmap_empty_p (interesting_names
))
3070 /* EXECUTE_IF_SET_IN_BITMAP does not like its bitmap
3071 changed during the loop. Copy it to another bitmap and
3073 bitmap_copy (interesting_names1
, interesting_names
);
3075 EXECUTE_IF_SET_IN_BITMAP (interesting_names1
, 0, i
, bi
)
3077 tree name
= ssa_name (i
);
3079 /* Ignore SSA_NAMEs that have been released because
3080 their defining statement was deleted (unreachable). */
3082 eliminate_const_or_copy (SSA_NAME_DEF_STMT (ssa_name (i
)),
3089 free_dominance_info (CDI_DOMINATORS
);
3090 /* If we changed the CFG schedule loops for fixup by cfgcleanup. */
3092 loops_state_set (LOOPS_NEED_FIXUP
);
3095 /* Propagation of const and copies may make some EH edges dead. Purge
3096 such edges from the CFG as needed. */
3097 if (!bitmap_empty_p (need_eh_cleanup
))
3099 gimple_purge_all_dead_eh_edges (need_eh_cleanup
);
3100 BITMAP_FREE (need_eh_cleanup
);
3103 BITMAP_FREE (interesting_names
);
3104 BITMAP_FREE (interesting_names1
);
3110 const pass_data pass_data_phi_only_cprop
=
3112 GIMPLE_PASS
, /* type */
3113 "phicprop", /* name */
3114 OPTGROUP_NONE
, /* optinfo_flags */
3115 true, /* has_gate */
3116 true, /* has_execute */
3117 TV_TREE_PHI_CPROP
, /* tv_id */
3118 ( PROP_cfg
| PROP_ssa
), /* properties_required */
3119 0, /* properties_provided */
3120 0, /* properties_destroyed */
3121 0, /* todo_flags_start */
3122 ( TODO_cleanup_cfg
| TODO_verify_ssa
3124 | TODO_update_ssa
), /* todo_flags_finish */
3127 class pass_phi_only_cprop
: public gimple_opt_pass
3130 pass_phi_only_cprop (gcc::context
*ctxt
)
3131 : gimple_opt_pass (pass_data_phi_only_cprop
, ctxt
)
3134 /* opt_pass methods: */
3135 opt_pass
* clone () { return new pass_phi_only_cprop (m_ctxt
); }
3136 bool gate () { return gate_dominator (); }
3137 unsigned int execute () { return eliminate_degenerate_phis (); }
3139 }; // class pass_phi_only_cprop
3144 make_pass_phi_only_cprop (gcc::context
*ctxt
)
3146 return new pass_phi_only_cprop (ctxt
);