re PR tree-optimization/61757 (genmodes failure with enable-checking)
[gcc.git] / gcc / tree-ssa-dom.c
1 /* SSA Dominator optimizations for trees
2 Copyright (C) 2001-2014 Free Software Foundation, Inc.
3 Contributed by Diego Novillo <dnovillo@redhat.com>
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "hash-table.h"
25 #include "tm.h"
26 #include "tree.h"
27 #include "stor-layout.h"
28 #include "flags.h"
29 #include "tm_p.h"
30 #include "basic-block.h"
31 #include "cfgloop.h"
32 #include "function.h"
33 #include "gimple-pretty-print.h"
34 #include "tree-ssa-alias.h"
35 #include "internal-fn.h"
36 #include "gimple-fold.h"
37 #include "tree-eh.h"
38 #include "gimple-expr.h"
39 #include "is-a.h"
40 #include "gimple.h"
41 #include "gimple-iterator.h"
42 #include "gimple-ssa.h"
43 #include "tree-cfg.h"
44 #include "tree-phinodes.h"
45 #include "ssa-iterators.h"
46 #include "stringpool.h"
47 #include "tree-ssanames.h"
48 #include "tree-into-ssa.h"
49 #include "domwalk.h"
50 #include "tree-pass.h"
51 #include "tree-ssa-propagate.h"
52 #include "tree-ssa-threadupdate.h"
53 #include "langhooks.h"
54 #include "params.h"
55 #include "tree-ssa-threadedge.h"
56 #include "tree-ssa-dom.h"
57
58 /* This file implements optimizations on the dominator tree. */
59
60 /* Representation of a "naked" right-hand-side expression, to be used
61 in recording available expressions in the expression hash table. */
62
63 enum expr_kind
64 {
65 EXPR_SINGLE,
66 EXPR_UNARY,
67 EXPR_BINARY,
68 EXPR_TERNARY,
69 EXPR_CALL,
70 EXPR_PHI
71 };
72
73 struct hashable_expr
74 {
75 tree type;
76 enum expr_kind kind;
77 union {
78 struct { tree rhs; } single;
79 struct { enum tree_code op; tree opnd; } unary;
80 struct { enum tree_code op; tree opnd0, opnd1; } binary;
81 struct { enum tree_code op; tree opnd0, opnd1, opnd2; } ternary;
82 struct { gimple fn_from; bool pure; size_t nargs; tree *args; } call;
83 struct { size_t nargs; tree *args; } phi;
84 } ops;
85 };
86
87 /* Structure for recording known values of a conditional expression
88 at the exits from its block. */
89
90 typedef struct cond_equivalence_s
91 {
92 struct hashable_expr cond;
93 tree value;
94 } cond_equivalence;
95
96
97 /* Structure for recording edge equivalences as well as any pending
98 edge redirections during the dominator optimizer.
99
100 Computing and storing the edge equivalences instead of creating
101 them on-demand can save significant amounts of time, particularly
102 for pathological cases involving switch statements.
103
104 These structures live for a single iteration of the dominator
105 optimizer in the edge's AUX field. At the end of an iteration we
106 free each of these structures and update the AUX field to point
107 to any requested redirection target (the code for updating the
108 CFG and SSA graph for edge redirection expects redirection edge
109 targets to be in the AUX field for each edge. */
110
111 struct edge_info
112 {
113 /* If this edge creates a simple equivalence, the LHS and RHS of
114 the equivalence will be stored here. */
115 tree lhs;
116 tree rhs;
117
118 /* Traversing an edge may also indicate one or more particular conditions
119 are true or false. */
120 vec<cond_equivalence> cond_equivalences;
121 };
122
123 /* Stack of available expressions in AVAIL_EXPRs. Each block pushes any
124 expressions it enters into the hash table along with a marker entry
125 (null). When we finish processing the block, we pop off entries and
126 remove the expressions from the global hash table until we hit the
127 marker. */
128 typedef struct expr_hash_elt * expr_hash_elt_t;
129
130 static vec<expr_hash_elt_t> avail_exprs_stack;
131
132 /* Structure for entries in the expression hash table. */
133
134 struct expr_hash_elt
135 {
136 /* The value (lhs) of this expression. */
137 tree lhs;
138
139 /* The expression (rhs) we want to record. */
140 struct hashable_expr expr;
141
142 /* The stmt pointer if this element corresponds to a statement. */
143 gimple stmt;
144
145 /* The hash value for RHS. */
146 hashval_t hash;
147
148 /* A unique stamp, typically the address of the hash
149 element itself, used in removing entries from the table. */
150 struct expr_hash_elt *stamp;
151 };
152
153 /* Hashtable helpers. */
154
155 static bool hashable_expr_equal_p (const struct hashable_expr *,
156 const struct hashable_expr *);
157 static void free_expr_hash_elt (void *);
158
159 struct expr_elt_hasher
160 {
161 typedef expr_hash_elt *value_type;
162 typedef expr_hash_elt *compare_type;
163 typedef int store_values_directly;
164 static inline hashval_t hash (const value_type &);
165 static inline bool equal (const value_type &, const compare_type &);
166 static inline void remove (value_type &);
167 };
168
169 inline hashval_t
170 expr_elt_hasher::hash (const value_type &p)
171 {
172 return p->hash;
173 }
174
175 inline bool
176 expr_elt_hasher::equal (const value_type &p1, const compare_type &p2)
177 {
178 gimple stmt1 = p1->stmt;
179 const struct hashable_expr *expr1 = &p1->expr;
180 const struct expr_hash_elt *stamp1 = p1->stamp;
181 gimple stmt2 = p2->stmt;
182 const struct hashable_expr *expr2 = &p2->expr;
183 const struct expr_hash_elt *stamp2 = p2->stamp;
184
185 /* This case should apply only when removing entries from the table. */
186 if (stamp1 == stamp2)
187 return true;
188
189 /* FIXME tuples:
190 We add stmts to a hash table and them modify them. To detect the case
191 that we modify a stmt and then search for it, we assume that the hash
192 is always modified by that change.
193 We have to fully check why this doesn't happen on trunk or rewrite
194 this in a more reliable (and easier to understand) way. */
195 if (((const struct expr_hash_elt *)p1)->hash
196 != ((const struct expr_hash_elt *)p2)->hash)
197 return false;
198
199 /* In case of a collision, both RHS have to be identical and have the
200 same VUSE operands. */
201 if (hashable_expr_equal_p (expr1, expr2)
202 && types_compatible_p (expr1->type, expr2->type))
203 {
204 /* Note that STMT1 and/or STMT2 may be NULL. */
205 return ((stmt1 ? gimple_vuse (stmt1) : NULL_TREE)
206 == (stmt2 ? gimple_vuse (stmt2) : NULL_TREE));
207 }
208
209 return false;
210 }
211
212 /* Delete an expr_hash_elt and reclaim its storage. */
213
214 inline void
215 expr_elt_hasher::remove (value_type &element)
216 {
217 free_expr_hash_elt (element);
218 }
219
220 /* Hash table with expressions made available during the renaming process.
221 When an assignment of the form X_i = EXPR is found, the statement is
222 stored in this table. If the same expression EXPR is later found on the
223 RHS of another statement, it is replaced with X_i (thus performing
224 global redundancy elimination). Similarly as we pass through conditionals
225 we record the conditional itself as having either a true or false value
226 in this table. */
227 static hash_table<expr_elt_hasher> *avail_exprs;
228
229 /* Stack of dest,src pairs that need to be restored during finalization.
230
231 A NULL entry is used to mark the end of pairs which need to be
232 restored during finalization of this block. */
233 static vec<tree> const_and_copies_stack;
234
235 /* Track whether or not we have changed the control flow graph. */
236 static bool cfg_altered;
237
238 /* Bitmap of blocks that have had EH statements cleaned. We should
239 remove their dead edges eventually. */
240 static bitmap need_eh_cleanup;
241
242 /* Statistics for dominator optimizations. */
243 struct opt_stats_d
244 {
245 long num_stmts;
246 long num_exprs_considered;
247 long num_re;
248 long num_const_prop;
249 long num_copy_prop;
250 };
251
252 static struct opt_stats_d opt_stats;
253
254 /* Local functions. */
255 static void optimize_stmt (basic_block, gimple_stmt_iterator);
256 static tree lookup_avail_expr (gimple, bool);
257 static hashval_t avail_expr_hash (const void *);
258 static void htab_statistics (FILE *,
259 const hash_table<expr_elt_hasher> &);
260 static void record_cond (cond_equivalence *);
261 static void record_const_or_copy (tree, tree);
262 static void record_equality (tree, tree);
263 static void record_equivalences_from_phis (basic_block);
264 static void record_equivalences_from_incoming_edge (basic_block);
265 static void eliminate_redundant_computations (gimple_stmt_iterator *);
266 static void record_equivalences_from_stmt (gimple, int);
267 static void remove_local_expressions_from_table (void);
268 static void restore_vars_to_original_value (void);
269 static edge single_incoming_edge_ignoring_loop_edges (basic_block);
270
271
272 /* Given a statement STMT, initialize the hash table element pointed to
273 by ELEMENT. */
274
275 static void
276 initialize_hash_element (gimple stmt, tree lhs,
277 struct expr_hash_elt *element)
278 {
279 enum gimple_code code = gimple_code (stmt);
280 struct hashable_expr *expr = &element->expr;
281
282 if (code == GIMPLE_ASSIGN)
283 {
284 enum tree_code subcode = gimple_assign_rhs_code (stmt);
285
286 switch (get_gimple_rhs_class (subcode))
287 {
288 case GIMPLE_SINGLE_RHS:
289 expr->kind = EXPR_SINGLE;
290 expr->type = TREE_TYPE (gimple_assign_rhs1 (stmt));
291 expr->ops.single.rhs = gimple_assign_rhs1 (stmt);
292 break;
293 case GIMPLE_UNARY_RHS:
294 expr->kind = EXPR_UNARY;
295 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
296 expr->ops.unary.op = subcode;
297 expr->ops.unary.opnd = gimple_assign_rhs1 (stmt);
298 break;
299 case GIMPLE_BINARY_RHS:
300 expr->kind = EXPR_BINARY;
301 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
302 expr->ops.binary.op = subcode;
303 expr->ops.binary.opnd0 = gimple_assign_rhs1 (stmt);
304 expr->ops.binary.opnd1 = gimple_assign_rhs2 (stmt);
305 break;
306 case GIMPLE_TERNARY_RHS:
307 expr->kind = EXPR_TERNARY;
308 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
309 expr->ops.ternary.op = subcode;
310 expr->ops.ternary.opnd0 = gimple_assign_rhs1 (stmt);
311 expr->ops.ternary.opnd1 = gimple_assign_rhs2 (stmt);
312 expr->ops.ternary.opnd2 = gimple_assign_rhs3 (stmt);
313 break;
314 default:
315 gcc_unreachable ();
316 }
317 }
318 else if (code == GIMPLE_COND)
319 {
320 expr->type = boolean_type_node;
321 expr->kind = EXPR_BINARY;
322 expr->ops.binary.op = gimple_cond_code (stmt);
323 expr->ops.binary.opnd0 = gimple_cond_lhs (stmt);
324 expr->ops.binary.opnd1 = gimple_cond_rhs (stmt);
325 }
326 else if (code == GIMPLE_CALL)
327 {
328 size_t nargs = gimple_call_num_args (stmt);
329 size_t i;
330
331 gcc_assert (gimple_call_lhs (stmt));
332
333 expr->type = TREE_TYPE (gimple_call_lhs (stmt));
334 expr->kind = EXPR_CALL;
335 expr->ops.call.fn_from = stmt;
336
337 if (gimple_call_flags (stmt) & (ECF_CONST | ECF_PURE))
338 expr->ops.call.pure = true;
339 else
340 expr->ops.call.pure = false;
341
342 expr->ops.call.nargs = nargs;
343 expr->ops.call.args = XCNEWVEC (tree, nargs);
344 for (i = 0; i < nargs; i++)
345 expr->ops.call.args[i] = gimple_call_arg (stmt, i);
346 }
347 else if (code == GIMPLE_SWITCH)
348 {
349 expr->type = TREE_TYPE (gimple_switch_index (stmt));
350 expr->kind = EXPR_SINGLE;
351 expr->ops.single.rhs = gimple_switch_index (stmt);
352 }
353 else if (code == GIMPLE_GOTO)
354 {
355 expr->type = TREE_TYPE (gimple_goto_dest (stmt));
356 expr->kind = EXPR_SINGLE;
357 expr->ops.single.rhs = gimple_goto_dest (stmt);
358 }
359 else if (code == GIMPLE_PHI)
360 {
361 size_t nargs = gimple_phi_num_args (stmt);
362 size_t i;
363
364 expr->type = TREE_TYPE (gimple_phi_result (stmt));
365 expr->kind = EXPR_PHI;
366 expr->ops.phi.nargs = nargs;
367 expr->ops.phi.args = XCNEWVEC (tree, nargs);
368
369 for (i = 0; i < nargs; i++)
370 expr->ops.phi.args[i] = gimple_phi_arg_def (stmt, i);
371 }
372 else
373 gcc_unreachable ();
374
375 element->lhs = lhs;
376 element->stmt = stmt;
377 element->hash = avail_expr_hash (element);
378 element->stamp = element;
379 }
380
381 /* Given a conditional expression COND as a tree, initialize
382 a hashable_expr expression EXPR. The conditional must be a
383 comparison or logical negation. A constant or a variable is
384 not permitted. */
385
386 static void
387 initialize_expr_from_cond (tree cond, struct hashable_expr *expr)
388 {
389 expr->type = boolean_type_node;
390
391 if (COMPARISON_CLASS_P (cond))
392 {
393 expr->kind = EXPR_BINARY;
394 expr->ops.binary.op = TREE_CODE (cond);
395 expr->ops.binary.opnd0 = TREE_OPERAND (cond, 0);
396 expr->ops.binary.opnd1 = TREE_OPERAND (cond, 1);
397 }
398 else if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
399 {
400 expr->kind = EXPR_UNARY;
401 expr->ops.unary.op = TRUTH_NOT_EXPR;
402 expr->ops.unary.opnd = TREE_OPERAND (cond, 0);
403 }
404 else
405 gcc_unreachable ();
406 }
407
408 /* Given a hashable_expr expression EXPR and an LHS,
409 initialize the hash table element pointed to by ELEMENT. */
410
411 static void
412 initialize_hash_element_from_expr (struct hashable_expr *expr,
413 tree lhs,
414 struct expr_hash_elt *element)
415 {
416 element->expr = *expr;
417 element->lhs = lhs;
418 element->stmt = NULL;
419 element->hash = avail_expr_hash (element);
420 element->stamp = element;
421 }
422
423 /* Compare two hashable_expr structures for equivalence.
424 They are considered equivalent when the the expressions
425 they denote must necessarily be equal. The logic is intended
426 to follow that of operand_equal_p in fold-const.c */
427
428 static bool
429 hashable_expr_equal_p (const struct hashable_expr *expr0,
430 const struct hashable_expr *expr1)
431 {
432 tree type0 = expr0->type;
433 tree type1 = expr1->type;
434
435 /* If either type is NULL, there is nothing to check. */
436 if ((type0 == NULL_TREE) ^ (type1 == NULL_TREE))
437 return false;
438
439 /* If both types don't have the same signedness, precision, and mode,
440 then we can't consider them equal. */
441 if (type0 != type1
442 && (TREE_CODE (type0) == ERROR_MARK
443 || TREE_CODE (type1) == ERROR_MARK
444 || TYPE_UNSIGNED (type0) != TYPE_UNSIGNED (type1)
445 || TYPE_PRECISION (type0) != TYPE_PRECISION (type1)
446 || TYPE_MODE (type0) != TYPE_MODE (type1)))
447 return false;
448
449 if (expr0->kind != expr1->kind)
450 return false;
451
452 switch (expr0->kind)
453 {
454 case EXPR_SINGLE:
455 return operand_equal_p (expr0->ops.single.rhs,
456 expr1->ops.single.rhs, 0);
457
458 case EXPR_UNARY:
459 if (expr0->ops.unary.op != expr1->ops.unary.op)
460 return false;
461
462 if ((CONVERT_EXPR_CODE_P (expr0->ops.unary.op)
463 || expr0->ops.unary.op == NON_LVALUE_EXPR)
464 && TYPE_UNSIGNED (expr0->type) != TYPE_UNSIGNED (expr1->type))
465 return false;
466
467 return operand_equal_p (expr0->ops.unary.opnd,
468 expr1->ops.unary.opnd, 0);
469
470 case EXPR_BINARY:
471 if (expr0->ops.binary.op != expr1->ops.binary.op)
472 return false;
473
474 if (operand_equal_p (expr0->ops.binary.opnd0,
475 expr1->ops.binary.opnd0, 0)
476 && operand_equal_p (expr0->ops.binary.opnd1,
477 expr1->ops.binary.opnd1, 0))
478 return true;
479
480 /* For commutative ops, allow the other order. */
481 return (commutative_tree_code (expr0->ops.binary.op)
482 && operand_equal_p (expr0->ops.binary.opnd0,
483 expr1->ops.binary.opnd1, 0)
484 && operand_equal_p (expr0->ops.binary.opnd1,
485 expr1->ops.binary.opnd0, 0));
486
487 case EXPR_TERNARY:
488 if (expr0->ops.ternary.op != expr1->ops.ternary.op
489 || !operand_equal_p (expr0->ops.ternary.opnd2,
490 expr1->ops.ternary.opnd2, 0))
491 return false;
492
493 if (operand_equal_p (expr0->ops.ternary.opnd0,
494 expr1->ops.ternary.opnd0, 0)
495 && operand_equal_p (expr0->ops.ternary.opnd1,
496 expr1->ops.ternary.opnd1, 0))
497 return true;
498
499 /* For commutative ops, allow the other order. */
500 return (commutative_ternary_tree_code (expr0->ops.ternary.op)
501 && operand_equal_p (expr0->ops.ternary.opnd0,
502 expr1->ops.ternary.opnd1, 0)
503 && operand_equal_p (expr0->ops.ternary.opnd1,
504 expr1->ops.ternary.opnd0, 0));
505
506 case EXPR_CALL:
507 {
508 size_t i;
509
510 /* If the calls are to different functions, then they
511 clearly cannot be equal. */
512 if (!gimple_call_same_target_p (expr0->ops.call.fn_from,
513 expr1->ops.call.fn_from))
514 return false;
515
516 if (! expr0->ops.call.pure)
517 return false;
518
519 if (expr0->ops.call.nargs != expr1->ops.call.nargs)
520 return false;
521
522 for (i = 0; i < expr0->ops.call.nargs; i++)
523 if (! operand_equal_p (expr0->ops.call.args[i],
524 expr1->ops.call.args[i], 0))
525 return false;
526
527 if (stmt_could_throw_p (expr0->ops.call.fn_from))
528 {
529 int lp0 = lookup_stmt_eh_lp (expr0->ops.call.fn_from);
530 int lp1 = lookup_stmt_eh_lp (expr1->ops.call.fn_from);
531 if ((lp0 > 0 || lp1 > 0) && lp0 != lp1)
532 return false;
533 }
534
535 return true;
536 }
537
538 case EXPR_PHI:
539 {
540 size_t i;
541
542 if (expr0->ops.phi.nargs != expr1->ops.phi.nargs)
543 return false;
544
545 for (i = 0; i < expr0->ops.phi.nargs; i++)
546 if (! operand_equal_p (expr0->ops.phi.args[i],
547 expr1->ops.phi.args[i], 0))
548 return false;
549
550 return true;
551 }
552
553 default:
554 gcc_unreachable ();
555 }
556 }
557
558 /* Generate a hash value for a pair of expressions. This can be used
559 iteratively by passing a previous result as the VAL argument.
560
561 The same hash value is always returned for a given pair of expressions,
562 regardless of the order in which they are presented. This is useful in
563 hashing the operands of commutative functions. */
564
565 static hashval_t
566 iterative_hash_exprs_commutative (const_tree t1,
567 const_tree t2, hashval_t val)
568 {
569 hashval_t one = iterative_hash_expr (t1, 0);
570 hashval_t two = iterative_hash_expr (t2, 0);
571 hashval_t t;
572
573 if (one > two)
574 t = one, one = two, two = t;
575 val = iterative_hash_hashval_t (one, val);
576 val = iterative_hash_hashval_t (two, val);
577
578 return val;
579 }
580
581 /* Compute a hash value for a hashable_expr value EXPR and a
582 previously accumulated hash value VAL. If two hashable_expr
583 values compare equal with hashable_expr_equal_p, they must
584 hash to the same value, given an identical value of VAL.
585 The logic is intended to follow iterative_hash_expr in tree.c. */
586
587 static hashval_t
588 iterative_hash_hashable_expr (const struct hashable_expr *expr, hashval_t val)
589 {
590 switch (expr->kind)
591 {
592 case EXPR_SINGLE:
593 val = iterative_hash_expr (expr->ops.single.rhs, val);
594 break;
595
596 case EXPR_UNARY:
597 val = iterative_hash_object (expr->ops.unary.op, val);
598
599 /* Make sure to include signedness in the hash computation.
600 Don't hash the type, that can lead to having nodes which
601 compare equal according to operand_equal_p, but which
602 have different hash codes. */
603 if (CONVERT_EXPR_CODE_P (expr->ops.unary.op)
604 || expr->ops.unary.op == NON_LVALUE_EXPR)
605 val += TYPE_UNSIGNED (expr->type);
606
607 val = iterative_hash_expr (expr->ops.unary.opnd, val);
608 break;
609
610 case EXPR_BINARY:
611 val = iterative_hash_object (expr->ops.binary.op, val);
612 if (commutative_tree_code (expr->ops.binary.op))
613 val = iterative_hash_exprs_commutative (expr->ops.binary.opnd0,
614 expr->ops.binary.opnd1, val);
615 else
616 {
617 val = iterative_hash_expr (expr->ops.binary.opnd0, val);
618 val = iterative_hash_expr (expr->ops.binary.opnd1, val);
619 }
620 break;
621
622 case EXPR_TERNARY:
623 val = iterative_hash_object (expr->ops.ternary.op, val);
624 if (commutative_ternary_tree_code (expr->ops.ternary.op))
625 val = iterative_hash_exprs_commutative (expr->ops.ternary.opnd0,
626 expr->ops.ternary.opnd1, val);
627 else
628 {
629 val = iterative_hash_expr (expr->ops.ternary.opnd0, val);
630 val = iterative_hash_expr (expr->ops.ternary.opnd1, val);
631 }
632 val = iterative_hash_expr (expr->ops.ternary.opnd2, val);
633 break;
634
635 case EXPR_CALL:
636 {
637 size_t i;
638 enum tree_code code = CALL_EXPR;
639 gimple fn_from;
640
641 val = iterative_hash_object (code, val);
642 fn_from = expr->ops.call.fn_from;
643 if (gimple_call_internal_p (fn_from))
644 val = iterative_hash_hashval_t
645 ((hashval_t) gimple_call_internal_fn (fn_from), val);
646 else
647 val = iterative_hash_expr (gimple_call_fn (fn_from), val);
648 for (i = 0; i < expr->ops.call.nargs; i++)
649 val = iterative_hash_expr (expr->ops.call.args[i], val);
650 }
651 break;
652
653 case EXPR_PHI:
654 {
655 size_t i;
656
657 for (i = 0; i < expr->ops.phi.nargs; i++)
658 val = iterative_hash_expr (expr->ops.phi.args[i], val);
659 }
660 break;
661
662 default:
663 gcc_unreachable ();
664 }
665
666 return val;
667 }
668
669 /* Print a diagnostic dump of an expression hash table entry. */
670
671 static void
672 print_expr_hash_elt (FILE * stream, const struct expr_hash_elt *element)
673 {
674 if (element->stmt)
675 fprintf (stream, "STMT ");
676 else
677 fprintf (stream, "COND ");
678
679 if (element->lhs)
680 {
681 print_generic_expr (stream, element->lhs, 0);
682 fprintf (stream, " = ");
683 }
684
685 switch (element->expr.kind)
686 {
687 case EXPR_SINGLE:
688 print_generic_expr (stream, element->expr.ops.single.rhs, 0);
689 break;
690
691 case EXPR_UNARY:
692 fprintf (stream, "%s ", get_tree_code_name (element->expr.ops.unary.op));
693 print_generic_expr (stream, element->expr.ops.unary.opnd, 0);
694 break;
695
696 case EXPR_BINARY:
697 print_generic_expr (stream, element->expr.ops.binary.opnd0, 0);
698 fprintf (stream, " %s ", get_tree_code_name (element->expr.ops.binary.op));
699 print_generic_expr (stream, element->expr.ops.binary.opnd1, 0);
700 break;
701
702 case EXPR_TERNARY:
703 fprintf (stream, " %s <", get_tree_code_name (element->expr.ops.ternary.op));
704 print_generic_expr (stream, element->expr.ops.ternary.opnd0, 0);
705 fputs (", ", stream);
706 print_generic_expr (stream, element->expr.ops.ternary.opnd1, 0);
707 fputs (", ", stream);
708 print_generic_expr (stream, element->expr.ops.ternary.opnd2, 0);
709 fputs (">", stream);
710 break;
711
712 case EXPR_CALL:
713 {
714 size_t i;
715 size_t nargs = element->expr.ops.call.nargs;
716 gimple fn_from;
717
718 fn_from = element->expr.ops.call.fn_from;
719 if (gimple_call_internal_p (fn_from))
720 fputs (internal_fn_name (gimple_call_internal_fn (fn_from)),
721 stream);
722 else
723 print_generic_expr (stream, gimple_call_fn (fn_from), 0);
724 fprintf (stream, " (");
725 for (i = 0; i < nargs; i++)
726 {
727 print_generic_expr (stream, element->expr.ops.call.args[i], 0);
728 if (i + 1 < nargs)
729 fprintf (stream, ", ");
730 }
731 fprintf (stream, ")");
732 }
733 break;
734
735 case EXPR_PHI:
736 {
737 size_t i;
738 size_t nargs = element->expr.ops.phi.nargs;
739
740 fprintf (stream, "PHI <");
741 for (i = 0; i < nargs; i++)
742 {
743 print_generic_expr (stream, element->expr.ops.phi.args[i], 0);
744 if (i + 1 < nargs)
745 fprintf (stream, ", ");
746 }
747 fprintf (stream, ">");
748 }
749 break;
750 }
751 fprintf (stream, "\n");
752
753 if (element->stmt)
754 {
755 fprintf (stream, " ");
756 print_gimple_stmt (stream, element->stmt, 0, 0);
757 }
758 }
759
760 /* Delete variable sized pieces of the expr_hash_elt ELEMENT. */
761
762 static void
763 free_expr_hash_elt_contents (struct expr_hash_elt *element)
764 {
765 if (element->expr.kind == EXPR_CALL)
766 free (element->expr.ops.call.args);
767 else if (element->expr.kind == EXPR_PHI)
768 free (element->expr.ops.phi.args);
769 }
770
771 /* Delete an expr_hash_elt and reclaim its storage. */
772
773 static void
774 free_expr_hash_elt (void *elt)
775 {
776 struct expr_hash_elt *element = ((struct expr_hash_elt *)elt);
777 free_expr_hash_elt_contents (element);
778 free (element);
779 }
780
781 /* Allocate an EDGE_INFO for edge E and attach it to E.
782 Return the new EDGE_INFO structure. */
783
784 static struct edge_info *
785 allocate_edge_info (edge e)
786 {
787 struct edge_info *edge_info;
788
789 edge_info = XCNEW (struct edge_info);
790
791 e->aux = edge_info;
792 return edge_info;
793 }
794
795 /* Free all EDGE_INFO structures associated with edges in the CFG.
796 If a particular edge can be threaded, copy the redirection
797 target from the EDGE_INFO structure into the edge's AUX field
798 as required by code to update the CFG and SSA graph for
799 jump threading. */
800
801 static void
802 free_all_edge_infos (void)
803 {
804 basic_block bb;
805 edge_iterator ei;
806 edge e;
807
808 FOR_EACH_BB_FN (bb, cfun)
809 {
810 FOR_EACH_EDGE (e, ei, bb->preds)
811 {
812 struct edge_info *edge_info = (struct edge_info *) e->aux;
813
814 if (edge_info)
815 {
816 edge_info->cond_equivalences.release ();
817 free (edge_info);
818 e->aux = NULL;
819 }
820 }
821 }
822 }
823
824 class dom_opt_dom_walker : public dom_walker
825 {
826 public:
827 dom_opt_dom_walker (cdi_direction direction)
828 : dom_walker (direction), m_dummy_cond (NULL) {}
829
830 virtual void before_dom_children (basic_block);
831 virtual void after_dom_children (basic_block);
832
833 private:
834 void thread_across_edge (edge);
835
836 gimple m_dummy_cond;
837 };
838
839 /* Jump threading, redundancy elimination and const/copy propagation.
840
841 This pass may expose new symbols that need to be renamed into SSA. For
842 every new symbol exposed, its corresponding bit will be set in
843 VARS_TO_RENAME. */
844
845 namespace {
846
847 const pass_data pass_data_dominator =
848 {
849 GIMPLE_PASS, /* type */
850 "dom", /* name */
851 OPTGROUP_NONE, /* optinfo_flags */
852 TV_TREE_SSA_DOMINATOR_OPTS, /* tv_id */
853 ( PROP_cfg | PROP_ssa ), /* properties_required */
854 0, /* properties_provided */
855 0, /* properties_destroyed */
856 0, /* todo_flags_start */
857 ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
858 };
859
860 class pass_dominator : public gimple_opt_pass
861 {
862 public:
863 pass_dominator (gcc::context *ctxt)
864 : gimple_opt_pass (pass_data_dominator, ctxt)
865 {}
866
867 /* opt_pass methods: */
868 opt_pass * clone () { return new pass_dominator (m_ctxt); }
869 virtual bool gate (function *) { return flag_tree_dom != 0; }
870 virtual unsigned int execute (function *);
871
872 }; // class pass_dominator
873
874 unsigned int
875 pass_dominator::execute (function *fun)
876 {
877 memset (&opt_stats, 0, sizeof (opt_stats));
878
879 /* Create our hash tables. */
880 avail_exprs = new hash_table<expr_elt_hasher> (1024);
881 avail_exprs_stack.create (20);
882 const_and_copies_stack.create (20);
883 need_eh_cleanup = BITMAP_ALLOC (NULL);
884
885 calculate_dominance_info (CDI_DOMINATORS);
886 cfg_altered = false;
887
888 /* We need to know loop structures in order to avoid destroying them
889 in jump threading. Note that we still can e.g. thread through loop
890 headers to an exit edge, or through loop header to the loop body, assuming
891 that we update the loop info.
892
893 TODO: We don't need to set LOOPS_HAVE_PREHEADERS generally, but due
894 to several overly conservative bail-outs in jump threading, case
895 gcc.dg/tree-ssa/pr21417.c can't be threaded if loop preheader is
896 missing. We should improve jump threading in future then
897 LOOPS_HAVE_PREHEADERS won't be needed here. */
898 loop_optimizer_init (LOOPS_HAVE_PREHEADERS | LOOPS_HAVE_SIMPLE_LATCHES);
899
900 /* Initialize the value-handle array. */
901 threadedge_initialize_values ();
902
903 /* We need accurate information regarding back edges in the CFG
904 for jump threading; this may include back edges that are not part of
905 a single loop. */
906 mark_dfs_back_edges ();
907
908 /* Recursively walk the dominator tree optimizing statements. */
909 dom_opt_dom_walker (CDI_DOMINATORS).walk (fun->cfg->x_entry_block_ptr);
910
911 {
912 gimple_stmt_iterator gsi;
913 basic_block bb;
914 FOR_EACH_BB_FN (bb, fun)
915 {
916 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
917 update_stmt_if_modified (gsi_stmt (gsi));
918 }
919 }
920
921 /* If we exposed any new variables, go ahead and put them into
922 SSA form now, before we handle jump threading. This simplifies
923 interactions between rewriting of _DECL nodes into SSA form
924 and rewriting SSA_NAME nodes into SSA form after block
925 duplication and CFG manipulation. */
926 update_ssa (TODO_update_ssa);
927
928 free_all_edge_infos ();
929
930 /* Thread jumps, creating duplicate blocks as needed. */
931 cfg_altered |= thread_through_all_blocks (first_pass_instance);
932
933 if (cfg_altered)
934 free_dominance_info (CDI_DOMINATORS);
935
936 /* Removal of statements may make some EH edges dead. Purge
937 such edges from the CFG as needed. */
938 if (!bitmap_empty_p (need_eh_cleanup))
939 {
940 unsigned i;
941 bitmap_iterator bi;
942
943 /* Jump threading may have created forwarder blocks from blocks
944 needing EH cleanup; the new successor of these blocks, which
945 has inherited from the original block, needs the cleanup.
946 Don't clear bits in the bitmap, as that can break the bitmap
947 iterator. */
948 EXECUTE_IF_SET_IN_BITMAP (need_eh_cleanup, 0, i, bi)
949 {
950 basic_block bb = BASIC_BLOCK_FOR_FN (fun, i);
951 if (bb == NULL)
952 continue;
953 while (single_succ_p (bb)
954 && (single_succ_edge (bb)->flags & EDGE_EH) == 0)
955 bb = single_succ (bb);
956 if (bb == EXIT_BLOCK_PTR_FOR_FN (fun))
957 continue;
958 if ((unsigned) bb->index != i)
959 bitmap_set_bit (need_eh_cleanup, bb->index);
960 }
961
962 gimple_purge_all_dead_eh_edges (need_eh_cleanup);
963 bitmap_clear (need_eh_cleanup);
964 }
965
966 statistics_counter_event (fun, "Redundant expressions eliminated",
967 opt_stats.num_re);
968 statistics_counter_event (fun, "Constants propagated",
969 opt_stats.num_const_prop);
970 statistics_counter_event (fun, "Copies propagated",
971 opt_stats.num_copy_prop);
972
973 /* Debugging dumps. */
974 if (dump_file && (dump_flags & TDF_STATS))
975 dump_dominator_optimization_stats (dump_file);
976
977 loop_optimizer_finalize ();
978
979 /* Delete our main hashtable. */
980 delete avail_exprs;
981 avail_exprs = NULL;
982
983 /* Free asserted bitmaps and stacks. */
984 BITMAP_FREE (need_eh_cleanup);
985
986 avail_exprs_stack.release ();
987 const_and_copies_stack.release ();
988
989 /* Free the value-handle array. */
990 threadedge_finalize_values ();
991
992 return 0;
993 }
994
995 } // anon namespace
996
997 gimple_opt_pass *
998 make_pass_dominator (gcc::context *ctxt)
999 {
1000 return new pass_dominator (ctxt);
1001 }
1002
1003
1004 /* Given a conditional statement CONDSTMT, convert the
1005 condition to a canonical form. */
1006
1007 static void
1008 canonicalize_comparison (gimple condstmt)
1009 {
1010 tree op0;
1011 tree op1;
1012 enum tree_code code;
1013
1014 gcc_assert (gimple_code (condstmt) == GIMPLE_COND);
1015
1016 op0 = gimple_cond_lhs (condstmt);
1017 op1 = gimple_cond_rhs (condstmt);
1018
1019 code = gimple_cond_code (condstmt);
1020
1021 /* If it would be profitable to swap the operands, then do so to
1022 canonicalize the statement, enabling better optimization.
1023
1024 By placing canonicalization of such expressions here we
1025 transparently keep statements in canonical form, even
1026 when the statement is modified. */
1027 if (tree_swap_operands_p (op0, op1, false))
1028 {
1029 /* For relationals we need to swap the operands
1030 and change the code. */
1031 if (code == LT_EXPR
1032 || code == GT_EXPR
1033 || code == LE_EXPR
1034 || code == GE_EXPR)
1035 {
1036 code = swap_tree_comparison (code);
1037
1038 gimple_cond_set_code (condstmt, code);
1039 gimple_cond_set_lhs (condstmt, op1);
1040 gimple_cond_set_rhs (condstmt, op0);
1041
1042 update_stmt (condstmt);
1043 }
1044 }
1045 }
1046
1047 /* Initialize local stacks for this optimizer and record equivalences
1048 upon entry to BB. Equivalences can come from the edge traversed to
1049 reach BB or they may come from PHI nodes at the start of BB. */
1050
1051 /* Remove all the expressions in LOCALS from TABLE, stopping when there are
1052 LIMIT entries left in LOCALs. */
1053
1054 static void
1055 remove_local_expressions_from_table (void)
1056 {
1057 /* Remove all the expressions made available in this block. */
1058 while (avail_exprs_stack.length () > 0)
1059 {
1060 expr_hash_elt_t victim = avail_exprs_stack.pop ();
1061 expr_hash_elt **slot;
1062
1063 if (victim == NULL)
1064 break;
1065
1066 /* This must precede the actual removal from the hash table,
1067 as ELEMENT and the table entry may share a call argument
1068 vector which will be freed during removal. */
1069 if (dump_file && (dump_flags & TDF_DETAILS))
1070 {
1071 fprintf (dump_file, "<<<< ");
1072 print_expr_hash_elt (dump_file, victim);
1073 }
1074
1075 slot = avail_exprs->find_slot (victim, NO_INSERT);
1076 gcc_assert (slot && *slot == victim);
1077 avail_exprs->clear_slot (slot);
1078 }
1079 }
1080
1081 /* Use the source/dest pairs in CONST_AND_COPIES_STACK to restore
1082 CONST_AND_COPIES to its original state, stopping when we hit a
1083 NULL marker. */
1084
1085 static void
1086 restore_vars_to_original_value (void)
1087 {
1088 while (const_and_copies_stack.length () > 0)
1089 {
1090 tree prev_value, dest;
1091
1092 dest = const_and_copies_stack.pop ();
1093
1094 if (dest == NULL)
1095 break;
1096
1097 if (dump_file && (dump_flags & TDF_DETAILS))
1098 {
1099 fprintf (dump_file, "<<<< COPY ");
1100 print_generic_expr (dump_file, dest, 0);
1101 fprintf (dump_file, " = ");
1102 print_generic_expr (dump_file, SSA_NAME_VALUE (dest), 0);
1103 fprintf (dump_file, "\n");
1104 }
1105
1106 prev_value = const_and_copies_stack.pop ();
1107 set_ssa_name_value (dest, prev_value);
1108 }
1109 }
1110
1111 /* A trivial wrapper so that we can present the generic jump
1112 threading code with a simple API for simplifying statements. */
1113 static tree
1114 simplify_stmt_for_jump_threading (gimple stmt,
1115 gimple within_stmt ATTRIBUTE_UNUSED)
1116 {
1117 return lookup_avail_expr (stmt, false);
1118 }
1119
1120 /* Record into the equivalence tables any equivalences implied by
1121 traversing edge E (which are cached in E->aux).
1122
1123 Callers are responsible for managing the unwinding markers. */
1124 static void
1125 record_temporary_equivalences (edge e)
1126 {
1127 int i;
1128 struct edge_info *edge_info = (struct edge_info *) e->aux;
1129
1130 /* If we have info associated with this edge, record it into
1131 our equivalence tables. */
1132 if (edge_info)
1133 {
1134 cond_equivalence *eq;
1135 tree lhs = edge_info->lhs;
1136 tree rhs = edge_info->rhs;
1137
1138 /* If we have a simple NAME = VALUE equivalence, record it. */
1139 if (lhs && TREE_CODE (lhs) == SSA_NAME)
1140 record_const_or_copy (lhs, rhs);
1141
1142 /* If we have 0 = COND or 1 = COND equivalences, record them
1143 into our expression hash tables. */
1144 for (i = 0; edge_info->cond_equivalences.iterate (i, &eq); ++i)
1145 record_cond (eq);
1146 }
1147 }
1148
1149 /* Wrapper for common code to attempt to thread an edge. For example,
1150 it handles lazily building the dummy condition and the bookkeeping
1151 when jump threading is successful. */
1152
1153 void
1154 dom_opt_dom_walker::thread_across_edge (edge e)
1155 {
1156 if (! m_dummy_cond)
1157 m_dummy_cond =
1158 gimple_build_cond (NE_EXPR,
1159 integer_zero_node, integer_zero_node,
1160 NULL, NULL);
1161
1162 /* Push a marker on both stacks so we can unwind the tables back to their
1163 current state. */
1164 avail_exprs_stack.safe_push (NULL);
1165 const_and_copies_stack.safe_push (NULL_TREE);
1166
1167 /* Traversing E may result in equivalences we can utilize. */
1168 record_temporary_equivalences (e);
1169
1170 /* With all the edge equivalences in the tables, go ahead and attempt
1171 to thread through E->dest. */
1172 ::thread_across_edge (m_dummy_cond, e, false,
1173 &const_and_copies_stack,
1174 simplify_stmt_for_jump_threading);
1175
1176 /* And restore the various tables to their state before
1177 we threaded this edge.
1178
1179 XXX The code in tree-ssa-threadedge.c will restore the state of
1180 the const_and_copies table. We we just have to restore the expression
1181 table. */
1182 remove_local_expressions_from_table ();
1183 }
1184
1185 /* PHI nodes can create equivalences too.
1186
1187 Ignoring any alternatives which are the same as the result, if
1188 all the alternatives are equal, then the PHI node creates an
1189 equivalence. */
1190
1191 static void
1192 record_equivalences_from_phis (basic_block bb)
1193 {
1194 gimple_stmt_iterator gsi;
1195
1196 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1197 {
1198 gimple phi = gsi_stmt (gsi);
1199
1200 tree lhs = gimple_phi_result (phi);
1201 tree rhs = NULL;
1202 size_t i;
1203
1204 for (i = 0; i < gimple_phi_num_args (phi); i++)
1205 {
1206 tree t = gimple_phi_arg_def (phi, i);
1207
1208 /* Ignore alternatives which are the same as our LHS. Since
1209 LHS is a PHI_RESULT, it is known to be a SSA_NAME, so we
1210 can simply compare pointers. */
1211 if (lhs == t)
1212 continue;
1213
1214 /* If we have not processed an alternative yet, then set
1215 RHS to this alternative. */
1216 if (rhs == NULL)
1217 rhs = t;
1218 /* If we have processed an alternative (stored in RHS), then
1219 see if it is equal to this one. If it isn't, then stop
1220 the search. */
1221 else if (! operand_equal_for_phi_arg_p (rhs, t))
1222 break;
1223 }
1224
1225 /* If we had no interesting alternatives, then all the RHS alternatives
1226 must have been the same as LHS. */
1227 if (!rhs)
1228 rhs = lhs;
1229
1230 /* If we managed to iterate through each PHI alternative without
1231 breaking out of the loop, then we have a PHI which may create
1232 a useful equivalence. We do not need to record unwind data for
1233 this, since this is a true assignment and not an equivalence
1234 inferred from a comparison. All uses of this ssa name are dominated
1235 by this assignment, so unwinding just costs time and space. */
1236 if (i == gimple_phi_num_args (phi)
1237 && may_propagate_copy (lhs, rhs))
1238 set_ssa_name_value (lhs, rhs);
1239 }
1240 }
1241
1242 /* Ignoring loop backedges, if BB has precisely one incoming edge then
1243 return that edge. Otherwise return NULL. */
1244 static edge
1245 single_incoming_edge_ignoring_loop_edges (basic_block bb)
1246 {
1247 edge retval = NULL;
1248 edge e;
1249 edge_iterator ei;
1250
1251 FOR_EACH_EDGE (e, ei, bb->preds)
1252 {
1253 /* A loop back edge can be identified by the destination of
1254 the edge dominating the source of the edge. */
1255 if (dominated_by_p (CDI_DOMINATORS, e->src, e->dest))
1256 continue;
1257
1258 /* If we have already seen a non-loop edge, then we must have
1259 multiple incoming non-loop edges and thus we return NULL. */
1260 if (retval)
1261 return NULL;
1262
1263 /* This is the first non-loop incoming edge we have found. Record
1264 it. */
1265 retval = e;
1266 }
1267
1268 return retval;
1269 }
1270
1271 /* Record any equivalences created by the incoming edge to BB. If BB
1272 has more than one incoming edge, then no equivalence is created. */
1273
1274 static void
1275 record_equivalences_from_incoming_edge (basic_block bb)
1276 {
1277 edge e;
1278 basic_block parent;
1279 struct edge_info *edge_info;
1280
1281 /* If our parent block ended with a control statement, then we may be
1282 able to record some equivalences based on which outgoing edge from
1283 the parent was followed. */
1284 parent = get_immediate_dominator (CDI_DOMINATORS, bb);
1285
1286 e = single_incoming_edge_ignoring_loop_edges (bb);
1287
1288 /* If we had a single incoming edge from our parent block, then enter
1289 any data associated with the edge into our tables. */
1290 if (e && e->src == parent)
1291 {
1292 unsigned int i;
1293
1294 edge_info = (struct edge_info *) e->aux;
1295
1296 if (edge_info)
1297 {
1298 tree lhs = edge_info->lhs;
1299 tree rhs = edge_info->rhs;
1300 cond_equivalence *eq;
1301
1302 if (lhs)
1303 record_equality (lhs, rhs);
1304
1305 /* If LHS is an SSA_NAME and RHS is a constant integer and LHS was
1306 set via a widening type conversion, then we may be able to record
1307 additional equivalences. */
1308 if (lhs
1309 && TREE_CODE (lhs) == SSA_NAME
1310 && is_gimple_constant (rhs)
1311 && TREE_CODE (rhs) == INTEGER_CST)
1312 {
1313 gimple defstmt = SSA_NAME_DEF_STMT (lhs);
1314
1315 if (defstmt
1316 && is_gimple_assign (defstmt)
1317 && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (defstmt)))
1318 {
1319 tree old_rhs = gimple_assign_rhs1 (defstmt);
1320
1321 /* If the conversion widens the original value and
1322 the constant is in the range of the type of OLD_RHS,
1323 then convert the constant and record the equivalence.
1324
1325 Note that int_fits_type_p does not check the precision
1326 if the upper and lower bounds are OK. */
1327 if (INTEGRAL_TYPE_P (TREE_TYPE (old_rhs))
1328 && (TYPE_PRECISION (TREE_TYPE (lhs))
1329 > TYPE_PRECISION (TREE_TYPE (old_rhs)))
1330 && int_fits_type_p (rhs, TREE_TYPE (old_rhs)))
1331 {
1332 tree newval = fold_convert (TREE_TYPE (old_rhs), rhs);
1333 record_equality (old_rhs, newval);
1334 }
1335 }
1336 }
1337
1338 for (i = 0; edge_info->cond_equivalences.iterate (i, &eq); ++i)
1339 record_cond (eq);
1340 }
1341 }
1342 }
1343
1344 /* Dump SSA statistics on FILE. */
1345
1346 void
1347 dump_dominator_optimization_stats (FILE *file)
1348 {
1349 fprintf (file, "Total number of statements: %6ld\n\n",
1350 opt_stats.num_stmts);
1351 fprintf (file, "Exprs considered for dominator optimizations: %6ld\n",
1352 opt_stats.num_exprs_considered);
1353
1354 fprintf (file, "\nHash table statistics:\n");
1355
1356 fprintf (file, " avail_exprs: ");
1357 htab_statistics (file, *avail_exprs);
1358 }
1359
1360
1361 /* Dump SSA statistics on stderr. */
1362
1363 DEBUG_FUNCTION void
1364 debug_dominator_optimization_stats (void)
1365 {
1366 dump_dominator_optimization_stats (stderr);
1367 }
1368
1369
1370 /* Dump statistics for the hash table HTAB. */
1371
1372 static void
1373 htab_statistics (FILE *file, const hash_table<expr_elt_hasher> &htab)
1374 {
1375 fprintf (file, "size %ld, %ld elements, %f collision/search ratio\n",
1376 (long) htab.size (),
1377 (long) htab.elements (),
1378 htab.collisions ());
1379 }
1380
1381
1382 /* Enter condition equivalence into the expression hash table.
1383 This indicates that a conditional expression has a known
1384 boolean value. */
1385
1386 static void
1387 record_cond (cond_equivalence *p)
1388 {
1389 struct expr_hash_elt *element = XCNEW (struct expr_hash_elt);
1390 expr_hash_elt **slot;
1391
1392 initialize_hash_element_from_expr (&p->cond, p->value, element);
1393
1394 slot = avail_exprs->find_slot_with_hash (element, element->hash, INSERT);
1395 if (*slot == NULL)
1396 {
1397 *slot = element;
1398
1399 if (dump_file && (dump_flags & TDF_DETAILS))
1400 {
1401 fprintf (dump_file, "1>>> ");
1402 print_expr_hash_elt (dump_file, element);
1403 }
1404
1405 avail_exprs_stack.safe_push (element);
1406 }
1407 else
1408 free_expr_hash_elt (element);
1409 }
1410
1411 /* Build a cond_equivalence record indicating that the comparison
1412 CODE holds between operands OP0 and OP1 and push it to **P. */
1413
1414 static void
1415 build_and_record_new_cond (enum tree_code code,
1416 tree op0, tree op1,
1417 vec<cond_equivalence> *p)
1418 {
1419 cond_equivalence c;
1420 struct hashable_expr *cond = &c.cond;
1421
1422 gcc_assert (TREE_CODE_CLASS (code) == tcc_comparison);
1423
1424 cond->type = boolean_type_node;
1425 cond->kind = EXPR_BINARY;
1426 cond->ops.binary.op = code;
1427 cond->ops.binary.opnd0 = op0;
1428 cond->ops.binary.opnd1 = op1;
1429
1430 c.value = boolean_true_node;
1431 p->safe_push (c);
1432 }
1433
1434 /* Record that COND is true and INVERTED is false into the edge information
1435 structure. Also record that any conditions dominated by COND are true
1436 as well.
1437
1438 For example, if a < b is true, then a <= b must also be true. */
1439
1440 static void
1441 record_conditions (struct edge_info *edge_info, tree cond, tree inverted)
1442 {
1443 tree op0, op1;
1444 cond_equivalence c;
1445
1446 if (!COMPARISON_CLASS_P (cond))
1447 return;
1448
1449 op0 = TREE_OPERAND (cond, 0);
1450 op1 = TREE_OPERAND (cond, 1);
1451
1452 switch (TREE_CODE (cond))
1453 {
1454 case LT_EXPR:
1455 case GT_EXPR:
1456 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
1457 {
1458 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1459 &edge_info->cond_equivalences);
1460 build_and_record_new_cond (LTGT_EXPR, op0, op1,
1461 &edge_info->cond_equivalences);
1462 }
1463
1464 build_and_record_new_cond ((TREE_CODE (cond) == LT_EXPR
1465 ? LE_EXPR : GE_EXPR),
1466 op0, op1, &edge_info->cond_equivalences);
1467 build_and_record_new_cond (NE_EXPR, op0, op1,
1468 &edge_info->cond_equivalences);
1469 break;
1470
1471 case GE_EXPR:
1472 case LE_EXPR:
1473 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
1474 {
1475 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1476 &edge_info->cond_equivalences);
1477 }
1478 break;
1479
1480 case EQ_EXPR:
1481 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
1482 {
1483 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1484 &edge_info->cond_equivalences);
1485 }
1486 build_and_record_new_cond (LE_EXPR, op0, op1,
1487 &edge_info->cond_equivalences);
1488 build_and_record_new_cond (GE_EXPR, op0, op1,
1489 &edge_info->cond_equivalences);
1490 break;
1491
1492 case UNORDERED_EXPR:
1493 build_and_record_new_cond (NE_EXPR, op0, op1,
1494 &edge_info->cond_equivalences);
1495 build_and_record_new_cond (UNLE_EXPR, op0, op1,
1496 &edge_info->cond_equivalences);
1497 build_and_record_new_cond (UNGE_EXPR, op0, op1,
1498 &edge_info->cond_equivalences);
1499 build_and_record_new_cond (UNEQ_EXPR, op0, op1,
1500 &edge_info->cond_equivalences);
1501 build_and_record_new_cond (UNLT_EXPR, op0, op1,
1502 &edge_info->cond_equivalences);
1503 build_and_record_new_cond (UNGT_EXPR, op0, op1,
1504 &edge_info->cond_equivalences);
1505 break;
1506
1507 case UNLT_EXPR:
1508 case UNGT_EXPR:
1509 build_and_record_new_cond ((TREE_CODE (cond) == UNLT_EXPR
1510 ? UNLE_EXPR : UNGE_EXPR),
1511 op0, op1, &edge_info->cond_equivalences);
1512 build_and_record_new_cond (NE_EXPR, op0, op1,
1513 &edge_info->cond_equivalences);
1514 break;
1515
1516 case UNEQ_EXPR:
1517 build_and_record_new_cond (UNLE_EXPR, op0, op1,
1518 &edge_info->cond_equivalences);
1519 build_and_record_new_cond (UNGE_EXPR, op0, op1,
1520 &edge_info->cond_equivalences);
1521 break;
1522
1523 case LTGT_EXPR:
1524 build_and_record_new_cond (NE_EXPR, op0, op1,
1525 &edge_info->cond_equivalences);
1526 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1527 &edge_info->cond_equivalences);
1528 break;
1529
1530 default:
1531 break;
1532 }
1533
1534 /* Now store the original true and false conditions into the first
1535 two slots. */
1536 initialize_expr_from_cond (cond, &c.cond);
1537 c.value = boolean_true_node;
1538 edge_info->cond_equivalences.safe_push (c);
1539
1540 /* It is possible for INVERTED to be the negation of a comparison,
1541 and not a valid RHS or GIMPLE_COND condition. This happens because
1542 invert_truthvalue may return such an expression when asked to invert
1543 a floating-point comparison. These comparisons are not assumed to
1544 obey the trichotomy law. */
1545 initialize_expr_from_cond (inverted, &c.cond);
1546 c.value = boolean_false_node;
1547 edge_info->cond_equivalences.safe_push (c);
1548 }
1549
1550 /* A helper function for record_const_or_copy and record_equality.
1551 Do the work of recording the value and undo info. */
1552
1553 static void
1554 record_const_or_copy_1 (tree x, tree y, tree prev_x)
1555 {
1556 set_ssa_name_value (x, y);
1557
1558 if (dump_file && (dump_flags & TDF_DETAILS))
1559 {
1560 fprintf (dump_file, "0>>> COPY ");
1561 print_generic_expr (dump_file, x, 0);
1562 fprintf (dump_file, " = ");
1563 print_generic_expr (dump_file, y, 0);
1564 fprintf (dump_file, "\n");
1565 }
1566
1567 const_and_copies_stack.reserve (2);
1568 const_and_copies_stack.quick_push (prev_x);
1569 const_and_copies_stack.quick_push (x);
1570 }
1571
1572 /* Record that X is equal to Y in const_and_copies. Record undo
1573 information in the block-local vector. */
1574
1575 static void
1576 record_const_or_copy (tree x, tree y)
1577 {
1578 tree prev_x = SSA_NAME_VALUE (x);
1579
1580 gcc_assert (TREE_CODE (x) == SSA_NAME);
1581
1582 if (TREE_CODE (y) == SSA_NAME)
1583 {
1584 tree tmp = SSA_NAME_VALUE (y);
1585 if (tmp)
1586 y = tmp;
1587 }
1588
1589 record_const_or_copy_1 (x, y, prev_x);
1590 }
1591
1592 /* Similarly, but assume that X and Y are the two operands of an EQ_EXPR.
1593 This constrains the cases in which we may treat this as assignment. */
1594
1595 static void
1596 record_equality (tree x, tree y)
1597 {
1598 tree prev_x = NULL, prev_y = NULL;
1599
1600 if (TREE_CODE (x) == SSA_NAME)
1601 prev_x = SSA_NAME_VALUE (x);
1602 if (TREE_CODE (y) == SSA_NAME)
1603 prev_y = SSA_NAME_VALUE (y);
1604
1605 /* If one of the previous values is invariant, or invariant in more loops
1606 (by depth), then use that.
1607 Otherwise it doesn't matter which value we choose, just so
1608 long as we canonicalize on one value. */
1609 if (is_gimple_min_invariant (y))
1610 ;
1611 else if (is_gimple_min_invariant (x))
1612 prev_x = x, x = y, y = prev_x, prev_x = prev_y;
1613 else if (prev_x && is_gimple_min_invariant (prev_x))
1614 x = y, y = prev_x, prev_x = prev_y;
1615 else if (prev_y)
1616 y = prev_y;
1617
1618 /* After the swapping, we must have one SSA_NAME. */
1619 if (TREE_CODE (x) != SSA_NAME)
1620 return;
1621
1622 /* For IEEE, -0.0 == 0.0, so we don't necessarily know the sign of a
1623 variable compared against zero. If we're honoring signed zeros,
1624 then we cannot record this value unless we know that the value is
1625 nonzero. */
1626 if (HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (x)))
1627 && (TREE_CODE (y) != REAL_CST
1628 || REAL_VALUES_EQUAL (dconst0, TREE_REAL_CST (y))))
1629 return;
1630
1631 record_const_or_copy_1 (x, y, prev_x);
1632 }
1633
1634 /* Returns true when STMT is a simple iv increment. It detects the
1635 following situation:
1636
1637 i_1 = phi (..., i_2)
1638 i_2 = i_1 +/- ... */
1639
1640 bool
1641 simple_iv_increment_p (gimple stmt)
1642 {
1643 enum tree_code code;
1644 tree lhs, preinc;
1645 gimple phi;
1646 size_t i;
1647
1648 if (gimple_code (stmt) != GIMPLE_ASSIGN)
1649 return false;
1650
1651 lhs = gimple_assign_lhs (stmt);
1652 if (TREE_CODE (lhs) != SSA_NAME)
1653 return false;
1654
1655 code = gimple_assign_rhs_code (stmt);
1656 if (code != PLUS_EXPR
1657 && code != MINUS_EXPR
1658 && code != POINTER_PLUS_EXPR)
1659 return false;
1660
1661 preinc = gimple_assign_rhs1 (stmt);
1662 if (TREE_CODE (preinc) != SSA_NAME)
1663 return false;
1664
1665 phi = SSA_NAME_DEF_STMT (preinc);
1666 if (gimple_code (phi) != GIMPLE_PHI)
1667 return false;
1668
1669 for (i = 0; i < gimple_phi_num_args (phi); i++)
1670 if (gimple_phi_arg_def (phi, i) == lhs)
1671 return true;
1672
1673 return false;
1674 }
1675
1676 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
1677 known value for that SSA_NAME (or NULL if no value is known).
1678
1679 Propagate values from CONST_AND_COPIES into the PHI nodes of the
1680 successors of BB. */
1681
1682 static void
1683 cprop_into_successor_phis (basic_block bb)
1684 {
1685 edge e;
1686 edge_iterator ei;
1687
1688 FOR_EACH_EDGE (e, ei, bb->succs)
1689 {
1690 int indx;
1691 gimple_stmt_iterator gsi;
1692
1693 /* If this is an abnormal edge, then we do not want to copy propagate
1694 into the PHI alternative associated with this edge. */
1695 if (e->flags & EDGE_ABNORMAL)
1696 continue;
1697
1698 gsi = gsi_start_phis (e->dest);
1699 if (gsi_end_p (gsi))
1700 continue;
1701
1702 /* We may have an equivalence associated with this edge. While
1703 we can not propagate it into non-dominated blocks, we can
1704 propagate them into PHIs in non-dominated blocks. */
1705
1706 /* Push the unwind marker so we can reset the const and copies
1707 table back to its original state after processing this edge. */
1708 const_and_copies_stack.safe_push (NULL_TREE);
1709
1710 /* Extract and record any simple NAME = VALUE equivalences.
1711
1712 Don't bother with [01] = COND equivalences, they're not useful
1713 here. */
1714 struct edge_info *edge_info = (struct edge_info *) e->aux;
1715 if (edge_info)
1716 {
1717 tree lhs = edge_info->lhs;
1718 tree rhs = edge_info->rhs;
1719
1720 if (lhs && TREE_CODE (lhs) == SSA_NAME)
1721 record_const_or_copy (lhs, rhs);
1722 }
1723
1724 indx = e->dest_idx;
1725 for ( ; !gsi_end_p (gsi); gsi_next (&gsi))
1726 {
1727 tree new_val;
1728 use_operand_p orig_p;
1729 tree orig_val;
1730 gimple phi = gsi_stmt (gsi);
1731
1732 /* The alternative may be associated with a constant, so verify
1733 it is an SSA_NAME before doing anything with it. */
1734 orig_p = gimple_phi_arg_imm_use_ptr (phi, indx);
1735 orig_val = get_use_from_ptr (orig_p);
1736 if (TREE_CODE (orig_val) != SSA_NAME)
1737 continue;
1738
1739 /* If we have *ORIG_P in our constant/copy table, then replace
1740 ORIG_P with its value in our constant/copy table. */
1741 new_val = SSA_NAME_VALUE (orig_val);
1742 if (new_val
1743 && new_val != orig_val
1744 && (TREE_CODE (new_val) == SSA_NAME
1745 || is_gimple_min_invariant (new_val))
1746 && may_propagate_copy (orig_val, new_val))
1747 propagate_value (orig_p, new_val);
1748 }
1749
1750 restore_vars_to_original_value ();
1751 }
1752 }
1753
1754 /* We have finished optimizing BB, record any information implied by
1755 taking a specific outgoing edge from BB. */
1756
1757 static void
1758 record_edge_info (basic_block bb)
1759 {
1760 gimple_stmt_iterator gsi = gsi_last_bb (bb);
1761 struct edge_info *edge_info;
1762
1763 if (! gsi_end_p (gsi))
1764 {
1765 gimple stmt = gsi_stmt (gsi);
1766 location_t loc = gimple_location (stmt);
1767
1768 if (gimple_code (stmt) == GIMPLE_SWITCH)
1769 {
1770 tree index = gimple_switch_index (stmt);
1771
1772 if (TREE_CODE (index) == SSA_NAME)
1773 {
1774 int i;
1775 int n_labels = gimple_switch_num_labels (stmt);
1776 tree *info = XCNEWVEC (tree, last_basic_block_for_fn (cfun));
1777 edge e;
1778 edge_iterator ei;
1779
1780 for (i = 0; i < n_labels; i++)
1781 {
1782 tree label = gimple_switch_label (stmt, i);
1783 basic_block target_bb = label_to_block (CASE_LABEL (label));
1784 if (CASE_HIGH (label)
1785 || !CASE_LOW (label)
1786 || info[target_bb->index])
1787 info[target_bb->index] = error_mark_node;
1788 else
1789 info[target_bb->index] = label;
1790 }
1791
1792 FOR_EACH_EDGE (e, ei, bb->succs)
1793 {
1794 basic_block target_bb = e->dest;
1795 tree label = info[target_bb->index];
1796
1797 if (label != NULL && label != error_mark_node)
1798 {
1799 tree x = fold_convert_loc (loc, TREE_TYPE (index),
1800 CASE_LOW (label));
1801 edge_info = allocate_edge_info (e);
1802 edge_info->lhs = index;
1803 edge_info->rhs = x;
1804 }
1805 }
1806 free (info);
1807 }
1808 }
1809
1810 /* A COND_EXPR may create equivalences too. */
1811 if (gimple_code (stmt) == GIMPLE_COND)
1812 {
1813 edge true_edge;
1814 edge false_edge;
1815
1816 tree op0 = gimple_cond_lhs (stmt);
1817 tree op1 = gimple_cond_rhs (stmt);
1818 enum tree_code code = gimple_cond_code (stmt);
1819
1820 extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
1821
1822 /* Special case comparing booleans against a constant as we
1823 know the value of OP0 on both arms of the branch. i.e., we
1824 can record an equivalence for OP0 rather than COND. */
1825 if ((code == EQ_EXPR || code == NE_EXPR)
1826 && TREE_CODE (op0) == SSA_NAME
1827 && TREE_CODE (TREE_TYPE (op0)) == BOOLEAN_TYPE
1828 && is_gimple_min_invariant (op1))
1829 {
1830 if (code == EQ_EXPR)
1831 {
1832 edge_info = allocate_edge_info (true_edge);
1833 edge_info->lhs = op0;
1834 edge_info->rhs = (integer_zerop (op1)
1835 ? boolean_false_node
1836 : boolean_true_node);
1837
1838 edge_info = allocate_edge_info (false_edge);
1839 edge_info->lhs = op0;
1840 edge_info->rhs = (integer_zerop (op1)
1841 ? boolean_true_node
1842 : boolean_false_node);
1843 }
1844 else
1845 {
1846 edge_info = allocate_edge_info (true_edge);
1847 edge_info->lhs = op0;
1848 edge_info->rhs = (integer_zerop (op1)
1849 ? boolean_true_node
1850 : boolean_false_node);
1851
1852 edge_info = allocate_edge_info (false_edge);
1853 edge_info->lhs = op0;
1854 edge_info->rhs = (integer_zerop (op1)
1855 ? boolean_false_node
1856 : boolean_true_node);
1857 }
1858 }
1859 else if (is_gimple_min_invariant (op0)
1860 && (TREE_CODE (op1) == SSA_NAME
1861 || is_gimple_min_invariant (op1)))
1862 {
1863 tree cond = build2 (code, boolean_type_node, op0, op1);
1864 tree inverted = invert_truthvalue_loc (loc, cond);
1865 bool can_infer_simple_equiv
1866 = !(HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (op0)))
1867 && real_zerop (op0));
1868 struct edge_info *edge_info;
1869
1870 edge_info = allocate_edge_info (true_edge);
1871 record_conditions (edge_info, cond, inverted);
1872
1873 if (can_infer_simple_equiv && code == EQ_EXPR)
1874 {
1875 edge_info->lhs = op1;
1876 edge_info->rhs = op0;
1877 }
1878
1879 edge_info = allocate_edge_info (false_edge);
1880 record_conditions (edge_info, inverted, cond);
1881
1882 if (can_infer_simple_equiv && TREE_CODE (inverted) == EQ_EXPR)
1883 {
1884 edge_info->lhs = op1;
1885 edge_info->rhs = op0;
1886 }
1887 }
1888
1889 else if (TREE_CODE (op0) == SSA_NAME
1890 && (TREE_CODE (op1) == SSA_NAME
1891 || is_gimple_min_invariant (op1)))
1892 {
1893 tree cond = build2 (code, boolean_type_node, op0, op1);
1894 tree inverted = invert_truthvalue_loc (loc, cond);
1895 bool can_infer_simple_equiv
1896 = !(HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (op1)))
1897 && (TREE_CODE (op1) == SSA_NAME || real_zerop (op1)));
1898 struct edge_info *edge_info;
1899
1900 edge_info = allocate_edge_info (true_edge);
1901 record_conditions (edge_info, cond, inverted);
1902
1903 if (can_infer_simple_equiv && code == EQ_EXPR)
1904 {
1905 edge_info->lhs = op0;
1906 edge_info->rhs = op1;
1907 }
1908
1909 edge_info = allocate_edge_info (false_edge);
1910 record_conditions (edge_info, inverted, cond);
1911
1912 if (can_infer_simple_equiv && TREE_CODE (inverted) == EQ_EXPR)
1913 {
1914 edge_info->lhs = op0;
1915 edge_info->rhs = op1;
1916 }
1917 }
1918 }
1919
1920 /* ??? TRUTH_NOT_EXPR can create an equivalence too. */
1921 }
1922 }
1923
1924 void
1925 dom_opt_dom_walker::before_dom_children (basic_block bb)
1926 {
1927 gimple_stmt_iterator gsi;
1928
1929 if (dump_file && (dump_flags & TDF_DETAILS))
1930 fprintf (dump_file, "\n\nOptimizing block #%d\n\n", bb->index);
1931
1932 /* Push a marker on the stacks of local information so that we know how
1933 far to unwind when we finalize this block. */
1934 avail_exprs_stack.safe_push (NULL);
1935 const_and_copies_stack.safe_push (NULL_TREE);
1936
1937 record_equivalences_from_incoming_edge (bb);
1938
1939 /* PHI nodes can create equivalences too. */
1940 record_equivalences_from_phis (bb);
1941
1942 /* Create equivalences from redundant PHIs. PHIs are only truly
1943 redundant when they exist in the same block, so push another
1944 marker and unwind right afterwards. */
1945 avail_exprs_stack.safe_push (NULL);
1946 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1947 eliminate_redundant_computations (&gsi);
1948 remove_local_expressions_from_table ();
1949
1950 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1951 optimize_stmt (bb, gsi);
1952
1953 /* Now prepare to process dominated blocks. */
1954 record_edge_info (bb);
1955 cprop_into_successor_phis (bb);
1956 }
1957
1958 /* We have finished processing the dominator children of BB, perform
1959 any finalization actions in preparation for leaving this node in
1960 the dominator tree. */
1961
1962 void
1963 dom_opt_dom_walker::after_dom_children (basic_block bb)
1964 {
1965 gimple last;
1966
1967 /* If we have an outgoing edge to a block with multiple incoming and
1968 outgoing edges, then we may be able to thread the edge, i.e., we
1969 may be able to statically determine which of the outgoing edges
1970 will be traversed when the incoming edge from BB is traversed. */
1971 if (single_succ_p (bb)
1972 && (single_succ_edge (bb)->flags & EDGE_ABNORMAL) == 0
1973 && potentially_threadable_block (single_succ (bb)))
1974 {
1975 thread_across_edge (single_succ_edge (bb));
1976 }
1977 else if ((last = last_stmt (bb))
1978 && gimple_code (last) == GIMPLE_COND
1979 && EDGE_COUNT (bb->succs) == 2
1980 && (EDGE_SUCC (bb, 0)->flags & EDGE_ABNORMAL) == 0
1981 && (EDGE_SUCC (bb, 1)->flags & EDGE_ABNORMAL) == 0)
1982 {
1983 edge true_edge, false_edge;
1984
1985 extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
1986
1987 /* Only try to thread the edge if it reaches a target block with
1988 more than one predecessor and more than one successor. */
1989 if (potentially_threadable_block (true_edge->dest))
1990 thread_across_edge (true_edge);
1991
1992 /* Similarly for the ELSE arm. */
1993 if (potentially_threadable_block (false_edge->dest))
1994 thread_across_edge (false_edge);
1995
1996 }
1997
1998 /* These remove expressions local to BB from the tables. */
1999 remove_local_expressions_from_table ();
2000 restore_vars_to_original_value ();
2001 }
2002
2003 /* Search for redundant computations in STMT. If any are found, then
2004 replace them with the variable holding the result of the computation.
2005
2006 If safe, record this expression into the available expression hash
2007 table. */
2008
2009 static void
2010 eliminate_redundant_computations (gimple_stmt_iterator* gsi)
2011 {
2012 tree expr_type;
2013 tree cached_lhs;
2014 tree def;
2015 bool insert = true;
2016 bool assigns_var_p = false;
2017
2018 gimple stmt = gsi_stmt (*gsi);
2019
2020 if (gimple_code (stmt) == GIMPLE_PHI)
2021 def = gimple_phi_result (stmt);
2022 else
2023 def = gimple_get_lhs (stmt);
2024
2025 /* Certain expressions on the RHS can be optimized away, but can not
2026 themselves be entered into the hash tables. */
2027 if (! def
2028 || TREE_CODE (def) != SSA_NAME
2029 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (def)
2030 || gimple_vdef (stmt)
2031 /* Do not record equivalences for increments of ivs. This would create
2032 overlapping live ranges for a very questionable gain. */
2033 || simple_iv_increment_p (stmt))
2034 insert = false;
2035
2036 /* Check if the expression has been computed before. */
2037 cached_lhs = lookup_avail_expr (stmt, insert);
2038
2039 opt_stats.num_exprs_considered++;
2040
2041 /* Get the type of the expression we are trying to optimize. */
2042 if (is_gimple_assign (stmt))
2043 {
2044 expr_type = TREE_TYPE (gimple_assign_lhs (stmt));
2045 assigns_var_p = true;
2046 }
2047 else if (gimple_code (stmt) == GIMPLE_COND)
2048 expr_type = boolean_type_node;
2049 else if (is_gimple_call (stmt))
2050 {
2051 gcc_assert (gimple_call_lhs (stmt));
2052 expr_type = TREE_TYPE (gimple_call_lhs (stmt));
2053 assigns_var_p = true;
2054 }
2055 else if (gimple_code (stmt) == GIMPLE_SWITCH)
2056 expr_type = TREE_TYPE (gimple_switch_index (stmt));
2057 else if (gimple_code (stmt) == GIMPLE_PHI)
2058 /* We can't propagate into a phi, so the logic below doesn't apply.
2059 Instead record an equivalence between the cached LHS and the
2060 PHI result of this statement, provided they are in the same block.
2061 This should be sufficient to kill the redundant phi. */
2062 {
2063 if (def && cached_lhs)
2064 record_const_or_copy (def, cached_lhs);
2065 return;
2066 }
2067 else
2068 gcc_unreachable ();
2069
2070 if (!cached_lhs)
2071 return;
2072
2073 /* It is safe to ignore types here since we have already done
2074 type checking in the hashing and equality routines. In fact
2075 type checking here merely gets in the way of constant
2076 propagation. Also, make sure that it is safe to propagate
2077 CACHED_LHS into the expression in STMT. */
2078 if ((TREE_CODE (cached_lhs) != SSA_NAME
2079 && (assigns_var_p
2080 || useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs))))
2081 || may_propagate_copy_into_stmt (stmt, cached_lhs))
2082 {
2083 gcc_checking_assert (TREE_CODE (cached_lhs) == SSA_NAME
2084 || is_gimple_min_invariant (cached_lhs));
2085
2086 if (dump_file && (dump_flags & TDF_DETAILS))
2087 {
2088 fprintf (dump_file, " Replaced redundant expr '");
2089 print_gimple_expr (dump_file, stmt, 0, dump_flags);
2090 fprintf (dump_file, "' with '");
2091 print_generic_expr (dump_file, cached_lhs, dump_flags);
2092 fprintf (dump_file, "'\n");
2093 }
2094
2095 opt_stats.num_re++;
2096
2097 if (assigns_var_p
2098 && !useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs)))
2099 cached_lhs = fold_convert (expr_type, cached_lhs);
2100
2101 propagate_tree_value_into_stmt (gsi, cached_lhs);
2102
2103 /* Since it is always necessary to mark the result as modified,
2104 perhaps we should move this into propagate_tree_value_into_stmt
2105 itself. */
2106 gimple_set_modified (gsi_stmt (*gsi), true);
2107 }
2108 }
2109
2110 /* STMT, a GIMPLE_ASSIGN, may create certain equivalences, in either
2111 the available expressions table or the const_and_copies table.
2112 Detect and record those equivalences. */
2113 /* We handle only very simple copy equivalences here. The heavy
2114 lifing is done by eliminate_redundant_computations. */
2115
2116 static void
2117 record_equivalences_from_stmt (gimple stmt, int may_optimize_p)
2118 {
2119 tree lhs;
2120 enum tree_code lhs_code;
2121
2122 gcc_assert (is_gimple_assign (stmt));
2123
2124 lhs = gimple_assign_lhs (stmt);
2125 lhs_code = TREE_CODE (lhs);
2126
2127 if (lhs_code == SSA_NAME
2128 && gimple_assign_single_p (stmt))
2129 {
2130 tree rhs = gimple_assign_rhs1 (stmt);
2131
2132 /* If the RHS of the assignment is a constant or another variable that
2133 may be propagated, register it in the CONST_AND_COPIES table. We
2134 do not need to record unwind data for this, since this is a true
2135 assignment and not an equivalence inferred from a comparison. All
2136 uses of this ssa name are dominated by this assignment, so unwinding
2137 just costs time and space. */
2138 if (may_optimize_p
2139 && (TREE_CODE (rhs) == SSA_NAME
2140 || is_gimple_min_invariant (rhs)))
2141 {
2142 if (dump_file && (dump_flags & TDF_DETAILS))
2143 {
2144 fprintf (dump_file, "==== ASGN ");
2145 print_generic_expr (dump_file, lhs, 0);
2146 fprintf (dump_file, " = ");
2147 print_generic_expr (dump_file, rhs, 0);
2148 fprintf (dump_file, "\n");
2149 }
2150
2151 set_ssa_name_value (lhs, rhs);
2152 }
2153 }
2154
2155 /* A memory store, even an aliased store, creates a useful
2156 equivalence. By exchanging the LHS and RHS, creating suitable
2157 vops and recording the result in the available expression table,
2158 we may be able to expose more redundant loads. */
2159 if (!gimple_has_volatile_ops (stmt)
2160 && gimple_references_memory_p (stmt)
2161 && gimple_assign_single_p (stmt)
2162 && (TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME
2163 || is_gimple_min_invariant (gimple_assign_rhs1 (stmt)))
2164 && !is_gimple_reg (lhs))
2165 {
2166 tree rhs = gimple_assign_rhs1 (stmt);
2167 gimple new_stmt;
2168
2169 /* Build a new statement with the RHS and LHS exchanged. */
2170 if (TREE_CODE (rhs) == SSA_NAME)
2171 {
2172 /* NOTE tuples. The call to gimple_build_assign below replaced
2173 a call to build_gimple_modify_stmt, which did not set the
2174 SSA_NAME_DEF_STMT on the LHS of the assignment. Doing so
2175 may cause an SSA validation failure, as the LHS may be a
2176 default-initialized name and should have no definition. I'm
2177 a bit dubious of this, as the artificial statement that we
2178 generate here may in fact be ill-formed, but it is simply
2179 used as an internal device in this pass, and never becomes
2180 part of the CFG. */
2181 gimple defstmt = SSA_NAME_DEF_STMT (rhs);
2182 new_stmt = gimple_build_assign (rhs, lhs);
2183 SSA_NAME_DEF_STMT (rhs) = defstmt;
2184 }
2185 else
2186 new_stmt = gimple_build_assign (rhs, lhs);
2187
2188 gimple_set_vuse (new_stmt, gimple_vdef (stmt));
2189
2190 /* Finally enter the statement into the available expression
2191 table. */
2192 lookup_avail_expr (new_stmt, true);
2193 }
2194 }
2195
2196 /* Replace *OP_P in STMT with any known equivalent value for *OP_P from
2197 CONST_AND_COPIES. */
2198
2199 static void
2200 cprop_operand (gimple stmt, use_operand_p op_p)
2201 {
2202 tree val;
2203 tree op = USE_FROM_PTR (op_p);
2204
2205 /* If the operand has a known constant value or it is known to be a
2206 copy of some other variable, use the value or copy stored in
2207 CONST_AND_COPIES. */
2208 val = SSA_NAME_VALUE (op);
2209 if (val && val != op)
2210 {
2211 /* Do not replace hard register operands in asm statements. */
2212 if (gimple_code (stmt) == GIMPLE_ASM
2213 && !may_propagate_copy_into_asm (op))
2214 return;
2215
2216 /* Certain operands are not allowed to be copy propagated due
2217 to their interaction with exception handling and some GCC
2218 extensions. */
2219 if (!may_propagate_copy (op, val))
2220 return;
2221
2222 /* Do not propagate copies into simple IV increment statements.
2223 See PR23821 for how this can disturb IV analysis. */
2224 if (TREE_CODE (val) != INTEGER_CST
2225 && simple_iv_increment_p (stmt))
2226 return;
2227
2228 /* Dump details. */
2229 if (dump_file && (dump_flags & TDF_DETAILS))
2230 {
2231 fprintf (dump_file, " Replaced '");
2232 print_generic_expr (dump_file, op, dump_flags);
2233 fprintf (dump_file, "' with %s '",
2234 (TREE_CODE (val) != SSA_NAME ? "constant" : "variable"));
2235 print_generic_expr (dump_file, val, dump_flags);
2236 fprintf (dump_file, "'\n");
2237 }
2238
2239 if (TREE_CODE (val) != SSA_NAME)
2240 opt_stats.num_const_prop++;
2241 else
2242 opt_stats.num_copy_prop++;
2243
2244 propagate_value (op_p, val);
2245
2246 /* And note that we modified this statement. This is now
2247 safe, even if we changed virtual operands since we will
2248 rescan the statement and rewrite its operands again. */
2249 gimple_set_modified (stmt, true);
2250 }
2251 }
2252
2253 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
2254 known value for that SSA_NAME (or NULL if no value is known).
2255
2256 Propagate values from CONST_AND_COPIES into the uses, vuses and
2257 vdef_ops of STMT. */
2258
2259 static void
2260 cprop_into_stmt (gimple stmt)
2261 {
2262 use_operand_p op_p;
2263 ssa_op_iter iter;
2264
2265 FOR_EACH_SSA_USE_OPERAND (op_p, stmt, iter, SSA_OP_USE)
2266 cprop_operand (stmt, op_p);
2267 }
2268
2269 /* Optimize the statement pointed to by iterator SI.
2270
2271 We try to perform some simplistic global redundancy elimination and
2272 constant propagation:
2273
2274 1- To detect global redundancy, we keep track of expressions that have
2275 been computed in this block and its dominators. If we find that the
2276 same expression is computed more than once, we eliminate repeated
2277 computations by using the target of the first one.
2278
2279 2- Constant values and copy assignments. This is used to do very
2280 simplistic constant and copy propagation. When a constant or copy
2281 assignment is found, we map the value on the RHS of the assignment to
2282 the variable in the LHS in the CONST_AND_COPIES table. */
2283
2284 static void
2285 optimize_stmt (basic_block bb, gimple_stmt_iterator si)
2286 {
2287 gimple stmt, old_stmt;
2288 bool may_optimize_p;
2289 bool modified_p = false;
2290
2291 old_stmt = stmt = gsi_stmt (si);
2292
2293 if (dump_file && (dump_flags & TDF_DETAILS))
2294 {
2295 fprintf (dump_file, "Optimizing statement ");
2296 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2297 }
2298
2299 if (gimple_code (stmt) == GIMPLE_COND)
2300 canonicalize_comparison (stmt);
2301
2302 update_stmt_if_modified (stmt);
2303 opt_stats.num_stmts++;
2304
2305 /* Const/copy propagate into USES, VUSES and the RHS of VDEFs. */
2306 cprop_into_stmt (stmt);
2307
2308 /* If the statement has been modified with constant replacements,
2309 fold its RHS before checking for redundant computations. */
2310 if (gimple_modified_p (stmt))
2311 {
2312 tree rhs = NULL;
2313
2314 /* Try to fold the statement making sure that STMT is kept
2315 up to date. */
2316 if (fold_stmt (&si))
2317 {
2318 stmt = gsi_stmt (si);
2319 gimple_set_modified (stmt, true);
2320
2321 if (dump_file && (dump_flags & TDF_DETAILS))
2322 {
2323 fprintf (dump_file, " Folded to: ");
2324 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2325 }
2326 }
2327
2328 /* We only need to consider cases that can yield a gimple operand. */
2329 if (gimple_assign_single_p (stmt))
2330 rhs = gimple_assign_rhs1 (stmt);
2331 else if (gimple_code (stmt) == GIMPLE_GOTO)
2332 rhs = gimple_goto_dest (stmt);
2333 else if (gimple_code (stmt) == GIMPLE_SWITCH)
2334 /* This should never be an ADDR_EXPR. */
2335 rhs = gimple_switch_index (stmt);
2336
2337 if (rhs && TREE_CODE (rhs) == ADDR_EXPR)
2338 recompute_tree_invariant_for_addr_expr (rhs);
2339
2340 /* Indicate that maybe_clean_or_replace_eh_stmt needs to be called,
2341 even if fold_stmt updated the stmt already and thus cleared
2342 gimple_modified_p flag on it. */
2343 modified_p = true;
2344 }
2345
2346 /* Check for redundant computations. Do this optimization only
2347 for assignments that have no volatile ops and conditionals. */
2348 may_optimize_p = (!gimple_has_side_effects (stmt)
2349 && (is_gimple_assign (stmt)
2350 || (is_gimple_call (stmt)
2351 && gimple_call_lhs (stmt) != NULL_TREE)
2352 || gimple_code (stmt) == GIMPLE_COND
2353 || gimple_code (stmt) == GIMPLE_SWITCH));
2354
2355 if (may_optimize_p)
2356 {
2357 if (gimple_code (stmt) == GIMPLE_CALL)
2358 {
2359 /* Resolve __builtin_constant_p. If it hasn't been
2360 folded to integer_one_node by now, it's fairly
2361 certain that the value simply isn't constant. */
2362 tree callee = gimple_call_fndecl (stmt);
2363 if (callee
2364 && DECL_BUILT_IN_CLASS (callee) == BUILT_IN_NORMAL
2365 && DECL_FUNCTION_CODE (callee) == BUILT_IN_CONSTANT_P)
2366 {
2367 propagate_tree_value_into_stmt (&si, integer_zero_node);
2368 stmt = gsi_stmt (si);
2369 }
2370 }
2371
2372 update_stmt_if_modified (stmt);
2373 eliminate_redundant_computations (&si);
2374 stmt = gsi_stmt (si);
2375
2376 /* Perform simple redundant store elimination. */
2377 if (gimple_assign_single_p (stmt)
2378 && TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2379 {
2380 tree lhs = gimple_assign_lhs (stmt);
2381 tree rhs = gimple_assign_rhs1 (stmt);
2382 tree cached_lhs;
2383 gimple new_stmt;
2384 if (TREE_CODE (rhs) == SSA_NAME)
2385 {
2386 tree tem = SSA_NAME_VALUE (rhs);
2387 if (tem)
2388 rhs = tem;
2389 }
2390 /* Build a new statement with the RHS and LHS exchanged. */
2391 if (TREE_CODE (rhs) == SSA_NAME)
2392 {
2393 gimple defstmt = SSA_NAME_DEF_STMT (rhs);
2394 new_stmt = gimple_build_assign (rhs, lhs);
2395 SSA_NAME_DEF_STMT (rhs) = defstmt;
2396 }
2397 else
2398 new_stmt = gimple_build_assign (rhs, lhs);
2399 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
2400 cached_lhs = lookup_avail_expr (new_stmt, false);
2401 if (cached_lhs
2402 && rhs == cached_lhs)
2403 {
2404 basic_block bb = gimple_bb (stmt);
2405 unlink_stmt_vdef (stmt);
2406 if (gsi_remove (&si, true))
2407 {
2408 bitmap_set_bit (need_eh_cleanup, bb->index);
2409 if (dump_file && (dump_flags & TDF_DETAILS))
2410 fprintf (dump_file, " Flagged to clear EH edges.\n");
2411 }
2412 release_defs (stmt);
2413 return;
2414 }
2415 }
2416 }
2417
2418 /* Record any additional equivalences created by this statement. */
2419 if (is_gimple_assign (stmt))
2420 record_equivalences_from_stmt (stmt, may_optimize_p);
2421
2422 /* If STMT is a COND_EXPR and it was modified, then we may know
2423 where it goes. If that is the case, then mark the CFG as altered.
2424
2425 This will cause us to later call remove_unreachable_blocks and
2426 cleanup_tree_cfg when it is safe to do so. It is not safe to
2427 clean things up here since removal of edges and such can trigger
2428 the removal of PHI nodes, which in turn can release SSA_NAMEs to
2429 the manager.
2430
2431 That's all fine and good, except that once SSA_NAMEs are released
2432 to the manager, we must not call create_ssa_name until all references
2433 to released SSA_NAMEs have been eliminated.
2434
2435 All references to the deleted SSA_NAMEs can not be eliminated until
2436 we remove unreachable blocks.
2437
2438 We can not remove unreachable blocks until after we have completed
2439 any queued jump threading.
2440
2441 We can not complete any queued jump threads until we have taken
2442 appropriate variables out of SSA form. Taking variables out of
2443 SSA form can call create_ssa_name and thus we lose.
2444
2445 Ultimately I suspect we're going to need to change the interface
2446 into the SSA_NAME manager. */
2447 if (gimple_modified_p (stmt) || modified_p)
2448 {
2449 tree val = NULL;
2450
2451 update_stmt_if_modified (stmt);
2452
2453 if (gimple_code (stmt) == GIMPLE_COND)
2454 val = fold_binary_loc (gimple_location (stmt),
2455 gimple_cond_code (stmt), boolean_type_node,
2456 gimple_cond_lhs (stmt), gimple_cond_rhs (stmt));
2457 else if (gimple_code (stmt) == GIMPLE_SWITCH)
2458 val = gimple_switch_index (stmt);
2459
2460 if (val && TREE_CODE (val) == INTEGER_CST && find_taken_edge (bb, val))
2461 cfg_altered = true;
2462
2463 /* If we simplified a statement in such a way as to be shown that it
2464 cannot trap, update the eh information and the cfg to match. */
2465 if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt))
2466 {
2467 bitmap_set_bit (need_eh_cleanup, bb->index);
2468 if (dump_file && (dump_flags & TDF_DETAILS))
2469 fprintf (dump_file, " Flagged to clear EH edges.\n");
2470 }
2471 }
2472 }
2473
2474 /* Search for an existing instance of STMT in the AVAIL_EXPRS table.
2475 If found, return its LHS. Otherwise insert STMT in the table and
2476 return NULL_TREE.
2477
2478 Also, when an expression is first inserted in the table, it is also
2479 is also added to AVAIL_EXPRS_STACK, so that it can be removed when
2480 we finish processing this block and its children. */
2481
2482 static tree
2483 lookup_avail_expr (gimple stmt, bool insert)
2484 {
2485 expr_hash_elt **slot;
2486 tree lhs;
2487 tree temp;
2488 struct expr_hash_elt element;
2489
2490 /* Get LHS of phi, assignment, or call; else NULL_TREE. */
2491 if (gimple_code (stmt) == GIMPLE_PHI)
2492 lhs = gimple_phi_result (stmt);
2493 else
2494 lhs = gimple_get_lhs (stmt);
2495
2496 initialize_hash_element (stmt, lhs, &element);
2497
2498 if (dump_file && (dump_flags & TDF_DETAILS))
2499 {
2500 fprintf (dump_file, "LKUP ");
2501 print_expr_hash_elt (dump_file, &element);
2502 }
2503
2504 /* Don't bother remembering constant assignments and copy operations.
2505 Constants and copy operations are handled by the constant/copy propagator
2506 in optimize_stmt. */
2507 if (element.expr.kind == EXPR_SINGLE
2508 && (TREE_CODE (element.expr.ops.single.rhs) == SSA_NAME
2509 || is_gimple_min_invariant (element.expr.ops.single.rhs)))
2510 return NULL_TREE;
2511
2512 /* Finally try to find the expression in the main expression hash table. */
2513 slot = avail_exprs->find_slot (&element, (insert ? INSERT : NO_INSERT));
2514 if (slot == NULL)
2515 {
2516 free_expr_hash_elt_contents (&element);
2517 return NULL_TREE;
2518 }
2519 else if (*slot == NULL)
2520 {
2521 struct expr_hash_elt *element2 = XNEW (struct expr_hash_elt);
2522 *element2 = element;
2523 element2->stamp = element2;
2524 *slot = element2;
2525
2526 if (dump_file && (dump_flags & TDF_DETAILS))
2527 {
2528 fprintf (dump_file, "2>>> ");
2529 print_expr_hash_elt (dump_file, element2);
2530 }
2531
2532 avail_exprs_stack.safe_push (element2);
2533 return NULL_TREE;
2534 }
2535 else
2536 free_expr_hash_elt_contents (&element);
2537
2538 /* Extract the LHS of the assignment so that it can be used as the current
2539 definition of another variable. */
2540 lhs = ((struct expr_hash_elt *)*slot)->lhs;
2541
2542 /* See if the LHS appears in the CONST_AND_COPIES table. If it does, then
2543 use the value from the const_and_copies table. */
2544 if (TREE_CODE (lhs) == SSA_NAME)
2545 {
2546 temp = SSA_NAME_VALUE (lhs);
2547 if (temp)
2548 lhs = temp;
2549 }
2550
2551 if (dump_file && (dump_flags & TDF_DETAILS))
2552 {
2553 fprintf (dump_file, "FIND: ");
2554 print_generic_expr (dump_file, lhs, 0);
2555 fprintf (dump_file, "\n");
2556 }
2557
2558 return lhs;
2559 }
2560
2561 /* Hashing and equality functions for AVAIL_EXPRS. We compute a value number
2562 for expressions using the code of the expression and the SSA numbers of
2563 its operands. */
2564
2565 static hashval_t
2566 avail_expr_hash (const void *p)
2567 {
2568 gimple stmt = ((const struct expr_hash_elt *)p)->stmt;
2569 const struct hashable_expr *expr = &((const struct expr_hash_elt *)p)->expr;
2570 tree vuse;
2571 hashval_t val = 0;
2572
2573 val = iterative_hash_hashable_expr (expr, val);
2574
2575 /* If the hash table entry is not associated with a statement, then we
2576 can just hash the expression and not worry about virtual operands
2577 and such. */
2578 if (!stmt)
2579 return val;
2580
2581 /* Add the SSA version numbers of the vuse operand. This is important
2582 because compound variables like arrays are not renamed in the
2583 operands. Rather, the rename is done on the virtual variable
2584 representing all the elements of the array. */
2585 if ((vuse = gimple_vuse (stmt)))
2586 val = iterative_hash_expr (vuse, val);
2587
2588 return val;
2589 }
2590
2591 /* PHI-ONLY copy and constant propagation. This pass is meant to clean
2592 up degenerate PHIs created by or exposed by jump threading. */
2593
2594 /* Given a statement STMT, which is either a PHI node or an assignment,
2595 remove it from the IL. */
2596
2597 static void
2598 remove_stmt_or_phi (gimple stmt)
2599 {
2600 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
2601
2602 if (gimple_code (stmt) == GIMPLE_PHI)
2603 remove_phi_node (&gsi, true);
2604 else
2605 {
2606 gsi_remove (&gsi, true);
2607 release_defs (stmt);
2608 }
2609 }
2610
2611 /* Given a statement STMT, which is either a PHI node or an assignment,
2612 return the "rhs" of the node, in the case of a non-degenerate
2613 phi, NULL is returned. */
2614
2615 static tree
2616 get_rhs_or_phi_arg (gimple stmt)
2617 {
2618 if (gimple_code (stmt) == GIMPLE_PHI)
2619 return degenerate_phi_result (stmt);
2620 else if (gimple_assign_single_p (stmt))
2621 return gimple_assign_rhs1 (stmt);
2622 else
2623 gcc_unreachable ();
2624 }
2625
2626
2627 /* Given a statement STMT, which is either a PHI node or an assignment,
2628 return the "lhs" of the node. */
2629
2630 static tree
2631 get_lhs_or_phi_result (gimple stmt)
2632 {
2633 if (gimple_code (stmt) == GIMPLE_PHI)
2634 return gimple_phi_result (stmt);
2635 else if (is_gimple_assign (stmt))
2636 return gimple_assign_lhs (stmt);
2637 else
2638 gcc_unreachable ();
2639 }
2640
2641 /* Return the loop depth of the basic block of the defining statement of X.
2642 This number should not be treated as absolutely correct because the loop
2643 information may not be completely up-to-date when dom runs. However, it
2644 will be relatively correct, and as more passes are taught to keep loop info
2645 up to date, the result will become more and more accurate. */
2646
2647 static int
2648 loop_depth_of_name (tree x)
2649 {
2650 gimple defstmt;
2651 basic_block defbb;
2652
2653 /* If it's not an SSA_NAME, we have no clue where the definition is. */
2654 if (TREE_CODE (x) != SSA_NAME)
2655 return 0;
2656
2657 /* Otherwise return the loop depth of the defining statement's bb.
2658 Note that there may not actually be a bb for this statement, if the
2659 ssa_name is live on entry. */
2660 defstmt = SSA_NAME_DEF_STMT (x);
2661 defbb = gimple_bb (defstmt);
2662 if (!defbb)
2663 return 0;
2664
2665 return bb_loop_depth (defbb);
2666 }
2667
2668 /* Propagate RHS into all uses of LHS (when possible).
2669
2670 RHS and LHS are derived from STMT, which is passed in solely so
2671 that we can remove it if propagation is successful.
2672
2673 When propagating into a PHI node or into a statement which turns
2674 into a trivial copy or constant initialization, set the
2675 appropriate bit in INTERESTING_NAMEs so that we will visit those
2676 nodes as well in an effort to pick up secondary optimization
2677 opportunities. */
2678
2679 static void
2680 propagate_rhs_into_lhs (gimple stmt, tree lhs, tree rhs, bitmap interesting_names)
2681 {
2682 /* First verify that propagation is valid. */
2683 if (may_propagate_copy (lhs, rhs)
2684 && loop_depth_of_name (lhs) >= loop_depth_of_name (rhs))
2685 {
2686 use_operand_p use_p;
2687 imm_use_iterator iter;
2688 gimple use_stmt;
2689 bool all = true;
2690
2691 /* Dump details. */
2692 if (dump_file && (dump_flags & TDF_DETAILS))
2693 {
2694 fprintf (dump_file, " Replacing '");
2695 print_generic_expr (dump_file, lhs, dump_flags);
2696 fprintf (dump_file, "' with %s '",
2697 (TREE_CODE (rhs) != SSA_NAME ? "constant" : "variable"));
2698 print_generic_expr (dump_file, rhs, dump_flags);
2699 fprintf (dump_file, "'\n");
2700 }
2701
2702 /* Walk over every use of LHS and try to replace the use with RHS.
2703 At this point the only reason why such a propagation would not
2704 be successful would be if the use occurs in an ASM_EXPR. */
2705 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
2706 {
2707 /* Leave debug stmts alone. If we succeed in propagating
2708 all non-debug uses, we'll drop the DEF, and propagation
2709 into debug stmts will occur then. */
2710 if (gimple_debug_bind_p (use_stmt))
2711 continue;
2712
2713 /* It's not always safe to propagate into an ASM_EXPR. */
2714 if (gimple_code (use_stmt) == GIMPLE_ASM
2715 && ! may_propagate_copy_into_asm (lhs))
2716 {
2717 all = false;
2718 continue;
2719 }
2720
2721 /* It's not ok to propagate into the definition stmt of RHS.
2722 <bb 9>:
2723 # prephitmp.12_36 = PHI <g_67.1_6(9)>
2724 g_67.1_6 = prephitmp.12_36;
2725 goto <bb 9>;
2726 While this is strictly all dead code we do not want to
2727 deal with this here. */
2728 if (TREE_CODE (rhs) == SSA_NAME
2729 && SSA_NAME_DEF_STMT (rhs) == use_stmt)
2730 {
2731 all = false;
2732 continue;
2733 }
2734
2735 /* Dump details. */
2736 if (dump_file && (dump_flags & TDF_DETAILS))
2737 {
2738 fprintf (dump_file, " Original statement:");
2739 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2740 }
2741
2742 /* Propagate the RHS into this use of the LHS. */
2743 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2744 propagate_value (use_p, rhs);
2745
2746 /* Special cases to avoid useless calls into the folding
2747 routines, operand scanning, etc.
2748
2749 Propagation into a PHI may cause the PHI to become
2750 a degenerate, so mark the PHI as interesting. No other
2751 actions are necessary. */
2752 if (gimple_code (use_stmt) == GIMPLE_PHI)
2753 {
2754 tree result;
2755
2756 /* Dump details. */
2757 if (dump_file && (dump_flags & TDF_DETAILS))
2758 {
2759 fprintf (dump_file, " Updated statement:");
2760 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2761 }
2762
2763 result = get_lhs_or_phi_result (use_stmt);
2764 bitmap_set_bit (interesting_names, SSA_NAME_VERSION (result));
2765 continue;
2766 }
2767
2768 /* From this point onward we are propagating into a
2769 real statement. Folding may (or may not) be possible,
2770 we may expose new operands, expose dead EH edges,
2771 etc. */
2772 /* NOTE tuples. In the tuples world, fold_stmt_inplace
2773 cannot fold a call that simplifies to a constant,
2774 because the GIMPLE_CALL must be replaced by a
2775 GIMPLE_ASSIGN, and there is no way to effect such a
2776 transformation in-place. We might want to consider
2777 using the more general fold_stmt here. */
2778 {
2779 gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
2780 fold_stmt_inplace (&gsi);
2781 }
2782
2783 /* Sometimes propagation can expose new operands to the
2784 renamer. */
2785 update_stmt (use_stmt);
2786
2787 /* Dump details. */
2788 if (dump_file && (dump_flags & TDF_DETAILS))
2789 {
2790 fprintf (dump_file, " Updated statement:");
2791 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2792 }
2793
2794 /* If we replaced a variable index with a constant, then
2795 we would need to update the invariant flag for ADDR_EXPRs. */
2796 if (gimple_assign_single_p (use_stmt)
2797 && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ADDR_EXPR)
2798 recompute_tree_invariant_for_addr_expr
2799 (gimple_assign_rhs1 (use_stmt));
2800
2801 /* If we cleaned up EH information from the statement,
2802 mark its containing block as needing EH cleanups. */
2803 if (maybe_clean_or_replace_eh_stmt (use_stmt, use_stmt))
2804 {
2805 bitmap_set_bit (need_eh_cleanup, gimple_bb (use_stmt)->index);
2806 if (dump_file && (dump_flags & TDF_DETAILS))
2807 fprintf (dump_file, " Flagged to clear EH edges.\n");
2808 }
2809
2810 /* Propagation may expose new trivial copy/constant propagation
2811 opportunities. */
2812 if (gimple_assign_single_p (use_stmt)
2813 && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME
2814 && (TREE_CODE (gimple_assign_rhs1 (use_stmt)) == SSA_NAME
2815 || is_gimple_min_invariant (gimple_assign_rhs1 (use_stmt))))
2816 {
2817 tree result = get_lhs_or_phi_result (use_stmt);
2818 bitmap_set_bit (interesting_names, SSA_NAME_VERSION (result));
2819 }
2820
2821 /* Propagation into these nodes may make certain edges in
2822 the CFG unexecutable. We want to identify them as PHI nodes
2823 at the destination of those unexecutable edges may become
2824 degenerates. */
2825 else if (gimple_code (use_stmt) == GIMPLE_COND
2826 || gimple_code (use_stmt) == GIMPLE_SWITCH
2827 || gimple_code (use_stmt) == GIMPLE_GOTO)
2828 {
2829 tree val;
2830
2831 if (gimple_code (use_stmt) == GIMPLE_COND)
2832 val = fold_binary_loc (gimple_location (use_stmt),
2833 gimple_cond_code (use_stmt),
2834 boolean_type_node,
2835 gimple_cond_lhs (use_stmt),
2836 gimple_cond_rhs (use_stmt));
2837 else if (gimple_code (use_stmt) == GIMPLE_SWITCH)
2838 val = gimple_switch_index (use_stmt);
2839 else
2840 val = gimple_goto_dest (use_stmt);
2841
2842 if (val && is_gimple_min_invariant (val))
2843 {
2844 basic_block bb = gimple_bb (use_stmt);
2845 edge te = find_taken_edge (bb, val);
2846 edge_iterator ei;
2847 edge e;
2848 gimple_stmt_iterator gsi, psi;
2849
2850 /* Remove all outgoing edges except TE. */
2851 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei));)
2852 {
2853 if (e != te)
2854 {
2855 /* Mark all the PHI nodes at the destination of
2856 the unexecutable edge as interesting. */
2857 for (psi = gsi_start_phis (e->dest);
2858 !gsi_end_p (psi);
2859 gsi_next (&psi))
2860 {
2861 gimple phi = gsi_stmt (psi);
2862
2863 tree result = gimple_phi_result (phi);
2864 int version = SSA_NAME_VERSION (result);
2865
2866 bitmap_set_bit (interesting_names, version);
2867 }
2868
2869 te->probability += e->probability;
2870
2871 te->count += e->count;
2872 remove_edge (e);
2873 cfg_altered = true;
2874 }
2875 else
2876 ei_next (&ei);
2877 }
2878
2879 gsi = gsi_last_bb (gimple_bb (use_stmt));
2880 gsi_remove (&gsi, true);
2881
2882 /* And fixup the flags on the single remaining edge. */
2883 te->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
2884 te->flags &= ~EDGE_ABNORMAL;
2885 te->flags |= EDGE_FALLTHRU;
2886 if (te->probability > REG_BR_PROB_BASE)
2887 te->probability = REG_BR_PROB_BASE;
2888 }
2889 }
2890 }
2891
2892 /* Ensure there is nothing else to do. */
2893 gcc_assert (!all || has_zero_uses (lhs));
2894
2895 /* If we were able to propagate away all uses of LHS, then
2896 we can remove STMT. */
2897 if (all)
2898 remove_stmt_or_phi (stmt);
2899 }
2900 }
2901
2902 /* STMT is either a PHI node (potentially a degenerate PHI node) or
2903 a statement that is a trivial copy or constant initialization.
2904
2905 Attempt to eliminate T by propagating its RHS into all uses of
2906 its LHS. This may in turn set new bits in INTERESTING_NAMES
2907 for nodes we want to revisit later.
2908
2909 All exit paths should clear INTERESTING_NAMES for the result
2910 of STMT. */
2911
2912 static void
2913 eliminate_const_or_copy (gimple stmt, bitmap interesting_names)
2914 {
2915 tree lhs = get_lhs_or_phi_result (stmt);
2916 tree rhs;
2917 int version = SSA_NAME_VERSION (lhs);
2918
2919 /* If the LHS of this statement or PHI has no uses, then we can
2920 just eliminate it. This can occur if, for example, the PHI
2921 was created by block duplication due to threading and its only
2922 use was in the conditional at the end of the block which was
2923 deleted. */
2924 if (has_zero_uses (lhs))
2925 {
2926 bitmap_clear_bit (interesting_names, version);
2927 remove_stmt_or_phi (stmt);
2928 return;
2929 }
2930
2931 /* Get the RHS of the assignment or PHI node if the PHI is a
2932 degenerate. */
2933 rhs = get_rhs_or_phi_arg (stmt);
2934 if (!rhs)
2935 {
2936 bitmap_clear_bit (interesting_names, version);
2937 return;
2938 }
2939
2940 if (!virtual_operand_p (lhs))
2941 propagate_rhs_into_lhs (stmt, lhs, rhs, interesting_names);
2942 else
2943 {
2944 gimple use_stmt;
2945 imm_use_iterator iter;
2946 use_operand_p use_p;
2947 /* For virtual operands we have to propagate into all uses as
2948 otherwise we will create overlapping life-ranges. */
2949 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
2950 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2951 SET_USE (use_p, rhs);
2952 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
2953 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs) = 1;
2954 remove_stmt_or_phi (stmt);
2955 }
2956
2957 /* Note that STMT may well have been deleted by now, so do
2958 not access it, instead use the saved version # to clear
2959 T's entry in the worklist. */
2960 bitmap_clear_bit (interesting_names, version);
2961 }
2962
2963 /* The first phase in degenerate PHI elimination.
2964
2965 Eliminate the degenerate PHIs in BB, then recurse on the
2966 dominator children of BB. */
2967
2968 static void
2969 eliminate_degenerate_phis_1 (basic_block bb, bitmap interesting_names)
2970 {
2971 gimple_stmt_iterator gsi;
2972 basic_block son;
2973
2974 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2975 {
2976 gimple phi = gsi_stmt (gsi);
2977
2978 eliminate_const_or_copy (phi, interesting_names);
2979 }
2980
2981 /* Recurse into the dominator children of BB. */
2982 for (son = first_dom_son (CDI_DOMINATORS, bb);
2983 son;
2984 son = next_dom_son (CDI_DOMINATORS, son))
2985 eliminate_degenerate_phis_1 (son, interesting_names);
2986 }
2987
2988
2989 /* A very simple pass to eliminate degenerate PHI nodes from the
2990 IL. This is meant to be fast enough to be able to be run several
2991 times in the optimization pipeline.
2992
2993 Certain optimizations, particularly those which duplicate blocks
2994 or remove edges from the CFG can create or expose PHIs which are
2995 trivial copies or constant initializations.
2996
2997 While we could pick up these optimizations in DOM or with the
2998 combination of copy-prop and CCP, those solutions are far too
2999 heavy-weight for our needs.
3000
3001 This implementation has two phases so that we can efficiently
3002 eliminate the first order degenerate PHIs and second order
3003 degenerate PHIs.
3004
3005 The first phase performs a dominator walk to identify and eliminate
3006 the vast majority of the degenerate PHIs. When a degenerate PHI
3007 is identified and eliminated any affected statements or PHIs
3008 are put on a worklist.
3009
3010 The second phase eliminates degenerate PHIs and trivial copies
3011 or constant initializations using the worklist. This is how we
3012 pick up the secondary optimization opportunities with minimal
3013 cost. */
3014
3015 namespace {
3016
3017 const pass_data pass_data_phi_only_cprop =
3018 {
3019 GIMPLE_PASS, /* type */
3020 "phicprop", /* name */
3021 OPTGROUP_NONE, /* optinfo_flags */
3022 TV_TREE_PHI_CPROP, /* tv_id */
3023 ( PROP_cfg | PROP_ssa ), /* properties_required */
3024 0, /* properties_provided */
3025 0, /* properties_destroyed */
3026 0, /* todo_flags_start */
3027 ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
3028 };
3029
3030 class pass_phi_only_cprop : public gimple_opt_pass
3031 {
3032 public:
3033 pass_phi_only_cprop (gcc::context *ctxt)
3034 : gimple_opt_pass (pass_data_phi_only_cprop, ctxt)
3035 {}
3036
3037 /* opt_pass methods: */
3038 opt_pass * clone () { return new pass_phi_only_cprop (m_ctxt); }
3039 virtual bool gate (function *) { return flag_tree_dom != 0; }
3040 virtual unsigned int execute (function *);
3041
3042 }; // class pass_phi_only_cprop
3043
3044 unsigned int
3045 pass_phi_only_cprop::execute (function *fun)
3046 {
3047 bitmap interesting_names;
3048 bitmap interesting_names1;
3049
3050 /* Bitmap of blocks which need EH information updated. We can not
3051 update it on-the-fly as doing so invalidates the dominator tree. */
3052 need_eh_cleanup = BITMAP_ALLOC (NULL);
3053
3054 /* INTERESTING_NAMES is effectively our worklist, indexed by
3055 SSA_NAME_VERSION.
3056
3057 A set bit indicates that the statement or PHI node which
3058 defines the SSA_NAME should be (re)examined to determine if
3059 it has become a degenerate PHI or trivial const/copy propagation
3060 opportunity.
3061
3062 Experiments have show we generally get better compilation
3063 time behavior with bitmaps rather than sbitmaps. */
3064 interesting_names = BITMAP_ALLOC (NULL);
3065 interesting_names1 = BITMAP_ALLOC (NULL);
3066
3067 calculate_dominance_info (CDI_DOMINATORS);
3068 cfg_altered = false;
3069
3070 /* First phase. Eliminate degenerate PHIs via a dominator
3071 walk of the CFG.
3072
3073 Experiments have indicated that we generally get better
3074 compile-time behavior by visiting blocks in the first
3075 phase in dominator order. Presumably this is because walking
3076 in dominator order leaves fewer PHIs for later examination
3077 by the worklist phase. */
3078 eliminate_degenerate_phis_1 (ENTRY_BLOCK_PTR_FOR_FN (fun),
3079 interesting_names);
3080
3081 /* Second phase. Eliminate second order degenerate PHIs as well
3082 as trivial copies or constant initializations identified by
3083 the first phase or this phase. Basically we keep iterating
3084 until our set of INTERESTING_NAMEs is empty. */
3085 while (!bitmap_empty_p (interesting_names))
3086 {
3087 unsigned int i;
3088 bitmap_iterator bi;
3089
3090 /* EXECUTE_IF_SET_IN_BITMAP does not like its bitmap
3091 changed during the loop. Copy it to another bitmap and
3092 use that. */
3093 bitmap_copy (interesting_names1, interesting_names);
3094
3095 EXECUTE_IF_SET_IN_BITMAP (interesting_names1, 0, i, bi)
3096 {
3097 tree name = ssa_name (i);
3098
3099 /* Ignore SSA_NAMEs that have been released because
3100 their defining statement was deleted (unreachable). */
3101 if (name)
3102 eliminate_const_or_copy (SSA_NAME_DEF_STMT (ssa_name (i)),
3103 interesting_names);
3104 }
3105 }
3106
3107 if (cfg_altered)
3108 {
3109 free_dominance_info (CDI_DOMINATORS);
3110 /* If we changed the CFG schedule loops for fixup by cfgcleanup. */
3111 loops_state_set (LOOPS_NEED_FIXUP);
3112 }
3113
3114 /* Propagation of const and copies may make some EH edges dead. Purge
3115 such edges from the CFG as needed. */
3116 if (!bitmap_empty_p (need_eh_cleanup))
3117 {
3118 gimple_purge_all_dead_eh_edges (need_eh_cleanup);
3119 BITMAP_FREE (need_eh_cleanup);
3120 }
3121
3122 BITMAP_FREE (interesting_names);
3123 BITMAP_FREE (interesting_names1);
3124 return 0;
3125 }
3126
3127 } // anon namespace
3128
3129 gimple_opt_pass *
3130 make_pass_phi_only_cprop (gcc::context *ctxt)
3131 {
3132 return new pass_phi_only_cprop (ctxt);
3133 }