Eliminate FOR_EACH_BB macro.
[gcc.git] / gcc / tree-ssa-dom.c
1 /* SSA Dominator optimizations for trees
2 Copyright (C) 2001-2013 Free Software Foundation, Inc.
3 Contributed by Diego Novillo <dnovillo@redhat.com>
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "hash-table.h"
25 #include "tm.h"
26 #include "tree.h"
27 #include "stor-layout.h"
28 #include "flags.h"
29 #include "tm_p.h"
30 #include "basic-block.h"
31 #include "cfgloop.h"
32 #include "function.h"
33 #include "gimple-pretty-print.h"
34 #include "tree-ssa-alias.h"
35 #include "internal-fn.h"
36 #include "gimple-fold.h"
37 #include "tree-eh.h"
38 #include "gimple-expr.h"
39 #include "is-a.h"
40 #include "gimple.h"
41 #include "gimple-iterator.h"
42 #include "gimple-ssa.h"
43 #include "tree-cfg.h"
44 #include "tree-phinodes.h"
45 #include "ssa-iterators.h"
46 #include "stringpool.h"
47 #include "tree-ssanames.h"
48 #include "tree-into-ssa.h"
49 #include "domwalk.h"
50 #include "tree-pass.h"
51 #include "tree-ssa-propagate.h"
52 #include "tree-ssa-threadupdate.h"
53 #include "langhooks.h"
54 #include "params.h"
55 #include "tree-ssa-threadedge.h"
56 #include "tree-ssa-dom.h"
57
58 /* This file implements optimizations on the dominator tree. */
59
60 /* Representation of a "naked" right-hand-side expression, to be used
61 in recording available expressions in the expression hash table. */
62
63 enum expr_kind
64 {
65 EXPR_SINGLE,
66 EXPR_UNARY,
67 EXPR_BINARY,
68 EXPR_TERNARY,
69 EXPR_CALL,
70 EXPR_PHI
71 };
72
73 struct hashable_expr
74 {
75 tree type;
76 enum expr_kind kind;
77 union {
78 struct { tree rhs; } single;
79 struct { enum tree_code op; tree opnd; } unary;
80 struct { enum tree_code op; tree opnd0, opnd1; } binary;
81 struct { enum tree_code op; tree opnd0, opnd1, opnd2; } ternary;
82 struct { gimple fn_from; bool pure; size_t nargs; tree *args; } call;
83 struct { size_t nargs; tree *args; } phi;
84 } ops;
85 };
86
87 /* Structure for recording known values of a conditional expression
88 at the exits from its block. */
89
90 typedef struct cond_equivalence_s
91 {
92 struct hashable_expr cond;
93 tree value;
94 } cond_equivalence;
95
96
97 /* Structure for recording edge equivalences as well as any pending
98 edge redirections during the dominator optimizer.
99
100 Computing and storing the edge equivalences instead of creating
101 them on-demand can save significant amounts of time, particularly
102 for pathological cases involving switch statements.
103
104 These structures live for a single iteration of the dominator
105 optimizer in the edge's AUX field. At the end of an iteration we
106 free each of these structures and update the AUX field to point
107 to any requested redirection target (the code for updating the
108 CFG and SSA graph for edge redirection expects redirection edge
109 targets to be in the AUX field for each edge. */
110
111 struct edge_info
112 {
113 /* If this edge creates a simple equivalence, the LHS and RHS of
114 the equivalence will be stored here. */
115 tree lhs;
116 tree rhs;
117
118 /* Traversing an edge may also indicate one or more particular conditions
119 are true or false. */
120 vec<cond_equivalence> cond_equivalences;
121 };
122
123 /* Stack of available expressions in AVAIL_EXPRs. Each block pushes any
124 expressions it enters into the hash table along with a marker entry
125 (null). When we finish processing the block, we pop off entries and
126 remove the expressions from the global hash table until we hit the
127 marker. */
128 typedef struct expr_hash_elt * expr_hash_elt_t;
129
130 static vec<expr_hash_elt_t> avail_exprs_stack;
131
132 /* Structure for entries in the expression hash table. */
133
134 struct expr_hash_elt
135 {
136 /* The value (lhs) of this expression. */
137 tree lhs;
138
139 /* The expression (rhs) we want to record. */
140 struct hashable_expr expr;
141
142 /* The stmt pointer if this element corresponds to a statement. */
143 gimple stmt;
144
145 /* The hash value for RHS. */
146 hashval_t hash;
147
148 /* A unique stamp, typically the address of the hash
149 element itself, used in removing entries from the table. */
150 struct expr_hash_elt *stamp;
151 };
152
153 /* Hashtable helpers. */
154
155 static bool hashable_expr_equal_p (const struct hashable_expr *,
156 const struct hashable_expr *);
157 static void free_expr_hash_elt (void *);
158
159 struct expr_elt_hasher
160 {
161 typedef expr_hash_elt value_type;
162 typedef expr_hash_elt compare_type;
163 static inline hashval_t hash (const value_type *);
164 static inline bool equal (const value_type *, const compare_type *);
165 static inline void remove (value_type *);
166 };
167
168 inline hashval_t
169 expr_elt_hasher::hash (const value_type *p)
170 {
171 return p->hash;
172 }
173
174 inline bool
175 expr_elt_hasher::equal (const value_type *p1, const compare_type *p2)
176 {
177 gimple stmt1 = p1->stmt;
178 const struct hashable_expr *expr1 = &p1->expr;
179 const struct expr_hash_elt *stamp1 = p1->stamp;
180 gimple stmt2 = p2->stmt;
181 const struct hashable_expr *expr2 = &p2->expr;
182 const struct expr_hash_elt *stamp2 = p2->stamp;
183
184 /* This case should apply only when removing entries from the table. */
185 if (stamp1 == stamp2)
186 return true;
187
188 /* FIXME tuples:
189 We add stmts to a hash table and them modify them. To detect the case
190 that we modify a stmt and then search for it, we assume that the hash
191 is always modified by that change.
192 We have to fully check why this doesn't happen on trunk or rewrite
193 this in a more reliable (and easier to understand) way. */
194 if (((const struct expr_hash_elt *)p1)->hash
195 != ((const struct expr_hash_elt *)p2)->hash)
196 return false;
197
198 /* In case of a collision, both RHS have to be identical and have the
199 same VUSE operands. */
200 if (hashable_expr_equal_p (expr1, expr2)
201 && types_compatible_p (expr1->type, expr2->type))
202 {
203 /* Note that STMT1 and/or STMT2 may be NULL. */
204 return ((stmt1 ? gimple_vuse (stmt1) : NULL_TREE)
205 == (stmt2 ? gimple_vuse (stmt2) : NULL_TREE));
206 }
207
208 return false;
209 }
210
211 /* Delete an expr_hash_elt and reclaim its storage. */
212
213 inline void
214 expr_elt_hasher::remove (value_type *element)
215 {
216 free_expr_hash_elt (element);
217 }
218
219 /* Hash table with expressions made available during the renaming process.
220 When an assignment of the form X_i = EXPR is found, the statement is
221 stored in this table. If the same expression EXPR is later found on the
222 RHS of another statement, it is replaced with X_i (thus performing
223 global redundancy elimination). Similarly as we pass through conditionals
224 we record the conditional itself as having either a true or false value
225 in this table. */
226 static hash_table <expr_elt_hasher> avail_exprs;
227
228 /* Stack of dest,src pairs that need to be restored during finalization.
229
230 A NULL entry is used to mark the end of pairs which need to be
231 restored during finalization of this block. */
232 static vec<tree> const_and_copies_stack;
233
234 /* Track whether or not we have changed the control flow graph. */
235 static bool cfg_altered;
236
237 /* Bitmap of blocks that have had EH statements cleaned. We should
238 remove their dead edges eventually. */
239 static bitmap need_eh_cleanup;
240
241 /* Statistics for dominator optimizations. */
242 struct opt_stats_d
243 {
244 long num_stmts;
245 long num_exprs_considered;
246 long num_re;
247 long num_const_prop;
248 long num_copy_prop;
249 };
250
251 static struct opt_stats_d opt_stats;
252
253 /* Local functions. */
254 static void optimize_stmt (basic_block, gimple_stmt_iterator);
255 static tree lookup_avail_expr (gimple, bool);
256 static hashval_t avail_expr_hash (const void *);
257 static void htab_statistics (FILE *, hash_table <expr_elt_hasher>);
258 static void record_cond (cond_equivalence *);
259 static void record_const_or_copy (tree, tree);
260 static void record_equality (tree, tree);
261 static void record_equivalences_from_phis (basic_block);
262 static void record_equivalences_from_incoming_edge (basic_block);
263 static void eliminate_redundant_computations (gimple_stmt_iterator *);
264 static void record_equivalences_from_stmt (gimple, int);
265 static void remove_local_expressions_from_table (void);
266 static void restore_vars_to_original_value (void);
267 static edge single_incoming_edge_ignoring_loop_edges (basic_block);
268
269
270 /* Given a statement STMT, initialize the hash table element pointed to
271 by ELEMENT. */
272
273 static void
274 initialize_hash_element (gimple stmt, tree lhs,
275 struct expr_hash_elt *element)
276 {
277 enum gimple_code code = gimple_code (stmt);
278 struct hashable_expr *expr = &element->expr;
279
280 if (code == GIMPLE_ASSIGN)
281 {
282 enum tree_code subcode = gimple_assign_rhs_code (stmt);
283
284 switch (get_gimple_rhs_class (subcode))
285 {
286 case GIMPLE_SINGLE_RHS:
287 expr->kind = EXPR_SINGLE;
288 expr->type = TREE_TYPE (gimple_assign_rhs1 (stmt));
289 expr->ops.single.rhs = gimple_assign_rhs1 (stmt);
290 break;
291 case GIMPLE_UNARY_RHS:
292 expr->kind = EXPR_UNARY;
293 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
294 expr->ops.unary.op = subcode;
295 expr->ops.unary.opnd = gimple_assign_rhs1 (stmt);
296 break;
297 case GIMPLE_BINARY_RHS:
298 expr->kind = EXPR_BINARY;
299 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
300 expr->ops.binary.op = subcode;
301 expr->ops.binary.opnd0 = gimple_assign_rhs1 (stmt);
302 expr->ops.binary.opnd1 = gimple_assign_rhs2 (stmt);
303 break;
304 case GIMPLE_TERNARY_RHS:
305 expr->kind = EXPR_TERNARY;
306 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
307 expr->ops.ternary.op = subcode;
308 expr->ops.ternary.opnd0 = gimple_assign_rhs1 (stmt);
309 expr->ops.ternary.opnd1 = gimple_assign_rhs2 (stmt);
310 expr->ops.ternary.opnd2 = gimple_assign_rhs3 (stmt);
311 break;
312 default:
313 gcc_unreachable ();
314 }
315 }
316 else if (code == GIMPLE_COND)
317 {
318 expr->type = boolean_type_node;
319 expr->kind = EXPR_BINARY;
320 expr->ops.binary.op = gimple_cond_code (stmt);
321 expr->ops.binary.opnd0 = gimple_cond_lhs (stmt);
322 expr->ops.binary.opnd1 = gimple_cond_rhs (stmt);
323 }
324 else if (code == GIMPLE_CALL)
325 {
326 size_t nargs = gimple_call_num_args (stmt);
327 size_t i;
328
329 gcc_assert (gimple_call_lhs (stmt));
330
331 expr->type = TREE_TYPE (gimple_call_lhs (stmt));
332 expr->kind = EXPR_CALL;
333 expr->ops.call.fn_from = stmt;
334
335 if (gimple_call_flags (stmt) & (ECF_CONST | ECF_PURE))
336 expr->ops.call.pure = true;
337 else
338 expr->ops.call.pure = false;
339
340 expr->ops.call.nargs = nargs;
341 expr->ops.call.args = XCNEWVEC (tree, nargs);
342 for (i = 0; i < nargs; i++)
343 expr->ops.call.args[i] = gimple_call_arg (stmt, i);
344 }
345 else if (code == GIMPLE_SWITCH)
346 {
347 expr->type = TREE_TYPE (gimple_switch_index (stmt));
348 expr->kind = EXPR_SINGLE;
349 expr->ops.single.rhs = gimple_switch_index (stmt);
350 }
351 else if (code == GIMPLE_GOTO)
352 {
353 expr->type = TREE_TYPE (gimple_goto_dest (stmt));
354 expr->kind = EXPR_SINGLE;
355 expr->ops.single.rhs = gimple_goto_dest (stmt);
356 }
357 else if (code == GIMPLE_PHI)
358 {
359 size_t nargs = gimple_phi_num_args (stmt);
360 size_t i;
361
362 expr->type = TREE_TYPE (gimple_phi_result (stmt));
363 expr->kind = EXPR_PHI;
364 expr->ops.phi.nargs = nargs;
365 expr->ops.phi.args = XCNEWVEC (tree, nargs);
366
367 for (i = 0; i < nargs; i++)
368 expr->ops.phi.args[i] = gimple_phi_arg_def (stmt, i);
369 }
370 else
371 gcc_unreachable ();
372
373 element->lhs = lhs;
374 element->stmt = stmt;
375 element->hash = avail_expr_hash (element);
376 element->stamp = element;
377 }
378
379 /* Given a conditional expression COND as a tree, initialize
380 a hashable_expr expression EXPR. The conditional must be a
381 comparison or logical negation. A constant or a variable is
382 not permitted. */
383
384 static void
385 initialize_expr_from_cond (tree cond, struct hashable_expr *expr)
386 {
387 expr->type = boolean_type_node;
388
389 if (COMPARISON_CLASS_P (cond))
390 {
391 expr->kind = EXPR_BINARY;
392 expr->ops.binary.op = TREE_CODE (cond);
393 expr->ops.binary.opnd0 = TREE_OPERAND (cond, 0);
394 expr->ops.binary.opnd1 = TREE_OPERAND (cond, 1);
395 }
396 else if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
397 {
398 expr->kind = EXPR_UNARY;
399 expr->ops.unary.op = TRUTH_NOT_EXPR;
400 expr->ops.unary.opnd = TREE_OPERAND (cond, 0);
401 }
402 else
403 gcc_unreachable ();
404 }
405
406 /* Given a hashable_expr expression EXPR and an LHS,
407 initialize the hash table element pointed to by ELEMENT. */
408
409 static void
410 initialize_hash_element_from_expr (struct hashable_expr *expr,
411 tree lhs,
412 struct expr_hash_elt *element)
413 {
414 element->expr = *expr;
415 element->lhs = lhs;
416 element->stmt = NULL;
417 element->hash = avail_expr_hash (element);
418 element->stamp = element;
419 }
420
421 /* Compare two hashable_expr structures for equivalence.
422 They are considered equivalent when the the expressions
423 they denote must necessarily be equal. The logic is intended
424 to follow that of operand_equal_p in fold-const.c */
425
426 static bool
427 hashable_expr_equal_p (const struct hashable_expr *expr0,
428 const struct hashable_expr *expr1)
429 {
430 tree type0 = expr0->type;
431 tree type1 = expr1->type;
432
433 /* If either type is NULL, there is nothing to check. */
434 if ((type0 == NULL_TREE) ^ (type1 == NULL_TREE))
435 return false;
436
437 /* If both types don't have the same signedness, precision, and mode,
438 then we can't consider them equal. */
439 if (type0 != type1
440 && (TREE_CODE (type0) == ERROR_MARK
441 || TREE_CODE (type1) == ERROR_MARK
442 || TYPE_UNSIGNED (type0) != TYPE_UNSIGNED (type1)
443 || TYPE_PRECISION (type0) != TYPE_PRECISION (type1)
444 || TYPE_MODE (type0) != TYPE_MODE (type1)))
445 return false;
446
447 if (expr0->kind != expr1->kind)
448 return false;
449
450 switch (expr0->kind)
451 {
452 case EXPR_SINGLE:
453 return operand_equal_p (expr0->ops.single.rhs,
454 expr1->ops.single.rhs, 0);
455
456 case EXPR_UNARY:
457 if (expr0->ops.unary.op != expr1->ops.unary.op)
458 return false;
459
460 if ((CONVERT_EXPR_CODE_P (expr0->ops.unary.op)
461 || expr0->ops.unary.op == NON_LVALUE_EXPR)
462 && TYPE_UNSIGNED (expr0->type) != TYPE_UNSIGNED (expr1->type))
463 return false;
464
465 return operand_equal_p (expr0->ops.unary.opnd,
466 expr1->ops.unary.opnd, 0);
467
468 case EXPR_BINARY:
469 if (expr0->ops.binary.op != expr1->ops.binary.op)
470 return false;
471
472 if (operand_equal_p (expr0->ops.binary.opnd0,
473 expr1->ops.binary.opnd0, 0)
474 && operand_equal_p (expr0->ops.binary.opnd1,
475 expr1->ops.binary.opnd1, 0))
476 return true;
477
478 /* For commutative ops, allow the other order. */
479 return (commutative_tree_code (expr0->ops.binary.op)
480 && operand_equal_p (expr0->ops.binary.opnd0,
481 expr1->ops.binary.opnd1, 0)
482 && operand_equal_p (expr0->ops.binary.opnd1,
483 expr1->ops.binary.opnd0, 0));
484
485 case EXPR_TERNARY:
486 if (expr0->ops.ternary.op != expr1->ops.ternary.op
487 || !operand_equal_p (expr0->ops.ternary.opnd2,
488 expr1->ops.ternary.opnd2, 0))
489 return false;
490
491 if (operand_equal_p (expr0->ops.ternary.opnd0,
492 expr1->ops.ternary.opnd0, 0)
493 && operand_equal_p (expr0->ops.ternary.opnd1,
494 expr1->ops.ternary.opnd1, 0))
495 return true;
496
497 /* For commutative ops, allow the other order. */
498 return (commutative_ternary_tree_code (expr0->ops.ternary.op)
499 && operand_equal_p (expr0->ops.ternary.opnd0,
500 expr1->ops.ternary.opnd1, 0)
501 && operand_equal_p (expr0->ops.ternary.opnd1,
502 expr1->ops.ternary.opnd0, 0));
503
504 case EXPR_CALL:
505 {
506 size_t i;
507
508 /* If the calls are to different functions, then they
509 clearly cannot be equal. */
510 if (!gimple_call_same_target_p (expr0->ops.call.fn_from,
511 expr1->ops.call.fn_from))
512 return false;
513
514 if (! expr0->ops.call.pure)
515 return false;
516
517 if (expr0->ops.call.nargs != expr1->ops.call.nargs)
518 return false;
519
520 for (i = 0; i < expr0->ops.call.nargs; i++)
521 if (! operand_equal_p (expr0->ops.call.args[i],
522 expr1->ops.call.args[i], 0))
523 return false;
524
525 return true;
526 }
527
528 case EXPR_PHI:
529 {
530 size_t i;
531
532 if (expr0->ops.phi.nargs != expr1->ops.phi.nargs)
533 return false;
534
535 for (i = 0; i < expr0->ops.phi.nargs; i++)
536 if (! operand_equal_p (expr0->ops.phi.args[i],
537 expr1->ops.phi.args[i], 0))
538 return false;
539
540 return true;
541 }
542
543 default:
544 gcc_unreachable ();
545 }
546 }
547
548 /* Generate a hash value for a pair of expressions. This can be used
549 iteratively by passing a previous result as the VAL argument.
550
551 The same hash value is always returned for a given pair of expressions,
552 regardless of the order in which they are presented. This is useful in
553 hashing the operands of commutative functions. */
554
555 static hashval_t
556 iterative_hash_exprs_commutative (const_tree t1,
557 const_tree t2, hashval_t val)
558 {
559 hashval_t one = iterative_hash_expr (t1, 0);
560 hashval_t two = iterative_hash_expr (t2, 0);
561 hashval_t t;
562
563 if (one > two)
564 t = one, one = two, two = t;
565 val = iterative_hash_hashval_t (one, val);
566 val = iterative_hash_hashval_t (two, val);
567
568 return val;
569 }
570
571 /* Compute a hash value for a hashable_expr value EXPR and a
572 previously accumulated hash value VAL. If two hashable_expr
573 values compare equal with hashable_expr_equal_p, they must
574 hash to the same value, given an identical value of VAL.
575 The logic is intended to follow iterative_hash_expr in tree.c. */
576
577 static hashval_t
578 iterative_hash_hashable_expr (const struct hashable_expr *expr, hashval_t val)
579 {
580 switch (expr->kind)
581 {
582 case EXPR_SINGLE:
583 val = iterative_hash_expr (expr->ops.single.rhs, val);
584 break;
585
586 case EXPR_UNARY:
587 val = iterative_hash_object (expr->ops.unary.op, val);
588
589 /* Make sure to include signedness in the hash computation.
590 Don't hash the type, that can lead to having nodes which
591 compare equal according to operand_equal_p, but which
592 have different hash codes. */
593 if (CONVERT_EXPR_CODE_P (expr->ops.unary.op)
594 || expr->ops.unary.op == NON_LVALUE_EXPR)
595 val += TYPE_UNSIGNED (expr->type);
596
597 val = iterative_hash_expr (expr->ops.unary.opnd, val);
598 break;
599
600 case EXPR_BINARY:
601 val = iterative_hash_object (expr->ops.binary.op, val);
602 if (commutative_tree_code (expr->ops.binary.op))
603 val = iterative_hash_exprs_commutative (expr->ops.binary.opnd0,
604 expr->ops.binary.opnd1, val);
605 else
606 {
607 val = iterative_hash_expr (expr->ops.binary.opnd0, val);
608 val = iterative_hash_expr (expr->ops.binary.opnd1, val);
609 }
610 break;
611
612 case EXPR_TERNARY:
613 val = iterative_hash_object (expr->ops.ternary.op, val);
614 if (commutative_ternary_tree_code (expr->ops.ternary.op))
615 val = iterative_hash_exprs_commutative (expr->ops.ternary.opnd0,
616 expr->ops.ternary.opnd1, val);
617 else
618 {
619 val = iterative_hash_expr (expr->ops.ternary.opnd0, val);
620 val = iterative_hash_expr (expr->ops.ternary.opnd1, val);
621 }
622 val = iterative_hash_expr (expr->ops.ternary.opnd2, val);
623 break;
624
625 case EXPR_CALL:
626 {
627 size_t i;
628 enum tree_code code = CALL_EXPR;
629 gimple fn_from;
630
631 val = iterative_hash_object (code, val);
632 fn_from = expr->ops.call.fn_from;
633 if (gimple_call_internal_p (fn_from))
634 val = iterative_hash_hashval_t
635 ((hashval_t) gimple_call_internal_fn (fn_from), val);
636 else
637 val = iterative_hash_expr (gimple_call_fn (fn_from), val);
638 for (i = 0; i < expr->ops.call.nargs; i++)
639 val = iterative_hash_expr (expr->ops.call.args[i], val);
640 }
641 break;
642
643 case EXPR_PHI:
644 {
645 size_t i;
646
647 for (i = 0; i < expr->ops.phi.nargs; i++)
648 val = iterative_hash_expr (expr->ops.phi.args[i], val);
649 }
650 break;
651
652 default:
653 gcc_unreachable ();
654 }
655
656 return val;
657 }
658
659 /* Print a diagnostic dump of an expression hash table entry. */
660
661 static void
662 print_expr_hash_elt (FILE * stream, const struct expr_hash_elt *element)
663 {
664 if (element->stmt)
665 fprintf (stream, "STMT ");
666 else
667 fprintf (stream, "COND ");
668
669 if (element->lhs)
670 {
671 print_generic_expr (stream, element->lhs, 0);
672 fprintf (stream, " = ");
673 }
674
675 switch (element->expr.kind)
676 {
677 case EXPR_SINGLE:
678 print_generic_expr (stream, element->expr.ops.single.rhs, 0);
679 break;
680
681 case EXPR_UNARY:
682 fprintf (stream, "%s ", get_tree_code_name (element->expr.ops.unary.op));
683 print_generic_expr (stream, element->expr.ops.unary.opnd, 0);
684 break;
685
686 case EXPR_BINARY:
687 print_generic_expr (stream, element->expr.ops.binary.opnd0, 0);
688 fprintf (stream, " %s ", get_tree_code_name (element->expr.ops.binary.op));
689 print_generic_expr (stream, element->expr.ops.binary.opnd1, 0);
690 break;
691
692 case EXPR_TERNARY:
693 fprintf (stream, " %s <", get_tree_code_name (element->expr.ops.ternary.op));
694 print_generic_expr (stream, element->expr.ops.ternary.opnd0, 0);
695 fputs (", ", stream);
696 print_generic_expr (stream, element->expr.ops.ternary.opnd1, 0);
697 fputs (", ", stream);
698 print_generic_expr (stream, element->expr.ops.ternary.opnd2, 0);
699 fputs (">", stream);
700 break;
701
702 case EXPR_CALL:
703 {
704 size_t i;
705 size_t nargs = element->expr.ops.call.nargs;
706 gimple fn_from;
707
708 fn_from = element->expr.ops.call.fn_from;
709 if (gimple_call_internal_p (fn_from))
710 fputs (internal_fn_name (gimple_call_internal_fn (fn_from)),
711 stream);
712 else
713 print_generic_expr (stream, gimple_call_fn (fn_from), 0);
714 fprintf (stream, " (");
715 for (i = 0; i < nargs; i++)
716 {
717 print_generic_expr (stream, element->expr.ops.call.args[i], 0);
718 if (i + 1 < nargs)
719 fprintf (stream, ", ");
720 }
721 fprintf (stream, ")");
722 }
723 break;
724
725 case EXPR_PHI:
726 {
727 size_t i;
728 size_t nargs = element->expr.ops.phi.nargs;
729
730 fprintf (stream, "PHI <");
731 for (i = 0; i < nargs; i++)
732 {
733 print_generic_expr (stream, element->expr.ops.phi.args[i], 0);
734 if (i + 1 < nargs)
735 fprintf (stream, ", ");
736 }
737 fprintf (stream, ">");
738 }
739 break;
740 }
741 fprintf (stream, "\n");
742
743 if (element->stmt)
744 {
745 fprintf (stream, " ");
746 print_gimple_stmt (stream, element->stmt, 0, 0);
747 }
748 }
749
750 /* Delete variable sized pieces of the expr_hash_elt ELEMENT. */
751
752 static void
753 free_expr_hash_elt_contents (struct expr_hash_elt *element)
754 {
755 if (element->expr.kind == EXPR_CALL)
756 free (element->expr.ops.call.args);
757 else if (element->expr.kind == EXPR_PHI)
758 free (element->expr.ops.phi.args);
759 }
760
761 /* Delete an expr_hash_elt and reclaim its storage. */
762
763 static void
764 free_expr_hash_elt (void *elt)
765 {
766 struct expr_hash_elt *element = ((struct expr_hash_elt *)elt);
767 free_expr_hash_elt_contents (element);
768 free (element);
769 }
770
771 /* Allocate an EDGE_INFO for edge E and attach it to E.
772 Return the new EDGE_INFO structure. */
773
774 static struct edge_info *
775 allocate_edge_info (edge e)
776 {
777 struct edge_info *edge_info;
778
779 edge_info = XCNEW (struct edge_info);
780
781 e->aux = edge_info;
782 return edge_info;
783 }
784
785 /* Free all EDGE_INFO structures associated with edges in the CFG.
786 If a particular edge can be threaded, copy the redirection
787 target from the EDGE_INFO structure into the edge's AUX field
788 as required by code to update the CFG and SSA graph for
789 jump threading. */
790
791 static void
792 free_all_edge_infos (void)
793 {
794 basic_block bb;
795 edge_iterator ei;
796 edge e;
797
798 FOR_EACH_BB_FN (bb, cfun)
799 {
800 FOR_EACH_EDGE (e, ei, bb->preds)
801 {
802 struct edge_info *edge_info = (struct edge_info *) e->aux;
803
804 if (edge_info)
805 {
806 edge_info->cond_equivalences.release ();
807 free (edge_info);
808 e->aux = NULL;
809 }
810 }
811 }
812 }
813
814 class dom_opt_dom_walker : public dom_walker
815 {
816 public:
817 dom_opt_dom_walker (cdi_direction direction)
818 : dom_walker (direction), m_dummy_cond (NULL) {}
819
820 virtual void before_dom_children (basic_block);
821 virtual void after_dom_children (basic_block);
822
823 private:
824 void thread_across_edge (edge);
825
826 gimple m_dummy_cond;
827 };
828
829 /* Jump threading, redundancy elimination and const/copy propagation.
830
831 This pass may expose new symbols that need to be renamed into SSA. For
832 every new symbol exposed, its corresponding bit will be set in
833 VARS_TO_RENAME. */
834
835 static unsigned int
836 tree_ssa_dominator_optimize (void)
837 {
838 memset (&opt_stats, 0, sizeof (opt_stats));
839
840 /* Create our hash tables. */
841 avail_exprs.create (1024);
842 avail_exprs_stack.create (20);
843 const_and_copies_stack.create (20);
844 need_eh_cleanup = BITMAP_ALLOC (NULL);
845
846 calculate_dominance_info (CDI_DOMINATORS);
847 cfg_altered = false;
848
849 /* We need to know loop structures in order to avoid destroying them
850 in jump threading. Note that we still can e.g. thread through loop
851 headers to an exit edge, or through loop header to the loop body, assuming
852 that we update the loop info. */
853 loop_optimizer_init (LOOPS_HAVE_SIMPLE_LATCHES);
854
855 /* Initialize the value-handle array. */
856 threadedge_initialize_values ();
857
858 /* We need accurate information regarding back edges in the CFG
859 for jump threading; this may include back edges that are not part of
860 a single loop. */
861 mark_dfs_back_edges ();
862
863 /* Recursively walk the dominator tree optimizing statements. */
864 dom_opt_dom_walker (CDI_DOMINATORS).walk (cfun->cfg->x_entry_block_ptr);
865
866 {
867 gimple_stmt_iterator gsi;
868 basic_block bb;
869 FOR_EACH_BB_FN (bb, cfun)
870 {
871 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
872 update_stmt_if_modified (gsi_stmt (gsi));
873 }
874 }
875
876 /* If we exposed any new variables, go ahead and put them into
877 SSA form now, before we handle jump threading. This simplifies
878 interactions between rewriting of _DECL nodes into SSA form
879 and rewriting SSA_NAME nodes into SSA form after block
880 duplication and CFG manipulation. */
881 update_ssa (TODO_update_ssa);
882
883 free_all_edge_infos ();
884
885 /* Thread jumps, creating duplicate blocks as needed. */
886 cfg_altered |= thread_through_all_blocks (first_pass_instance);
887
888 if (cfg_altered)
889 free_dominance_info (CDI_DOMINATORS);
890
891 /* Removal of statements may make some EH edges dead. Purge
892 such edges from the CFG as needed. */
893 if (!bitmap_empty_p (need_eh_cleanup))
894 {
895 unsigned i;
896 bitmap_iterator bi;
897
898 /* Jump threading may have created forwarder blocks from blocks
899 needing EH cleanup; the new successor of these blocks, which
900 has inherited from the original block, needs the cleanup.
901 Don't clear bits in the bitmap, as that can break the bitmap
902 iterator. */
903 EXECUTE_IF_SET_IN_BITMAP (need_eh_cleanup, 0, i, bi)
904 {
905 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
906 if (bb == NULL)
907 continue;
908 while (single_succ_p (bb)
909 && (single_succ_edge (bb)->flags & EDGE_EH) == 0)
910 bb = single_succ (bb);
911 if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
912 continue;
913 if ((unsigned) bb->index != i)
914 bitmap_set_bit (need_eh_cleanup, bb->index);
915 }
916
917 gimple_purge_all_dead_eh_edges (need_eh_cleanup);
918 bitmap_clear (need_eh_cleanup);
919 }
920
921 statistics_counter_event (cfun, "Redundant expressions eliminated",
922 opt_stats.num_re);
923 statistics_counter_event (cfun, "Constants propagated",
924 opt_stats.num_const_prop);
925 statistics_counter_event (cfun, "Copies propagated",
926 opt_stats.num_copy_prop);
927
928 /* Debugging dumps. */
929 if (dump_file && (dump_flags & TDF_STATS))
930 dump_dominator_optimization_stats (dump_file);
931
932 loop_optimizer_finalize ();
933
934 /* Delete our main hashtable. */
935 avail_exprs.dispose ();
936
937 /* Free asserted bitmaps and stacks. */
938 BITMAP_FREE (need_eh_cleanup);
939
940 avail_exprs_stack.release ();
941 const_and_copies_stack.release ();
942
943 /* Free the value-handle array. */
944 threadedge_finalize_values ();
945
946 return 0;
947 }
948
949 static bool
950 gate_dominator (void)
951 {
952 return flag_tree_dom != 0;
953 }
954
955 namespace {
956
957 const pass_data pass_data_dominator =
958 {
959 GIMPLE_PASS, /* type */
960 "dom", /* name */
961 OPTGROUP_NONE, /* optinfo_flags */
962 true, /* has_gate */
963 true, /* has_execute */
964 TV_TREE_SSA_DOMINATOR_OPTS, /* tv_id */
965 ( PROP_cfg | PROP_ssa ), /* properties_required */
966 0, /* properties_provided */
967 0, /* properties_destroyed */
968 0, /* todo_flags_start */
969 ( TODO_cleanup_cfg | TODO_update_ssa
970 | TODO_verify_ssa
971 | TODO_verify_flow ), /* todo_flags_finish */
972 };
973
974 class pass_dominator : public gimple_opt_pass
975 {
976 public:
977 pass_dominator (gcc::context *ctxt)
978 : gimple_opt_pass (pass_data_dominator, ctxt)
979 {}
980
981 /* opt_pass methods: */
982 opt_pass * clone () { return new pass_dominator (m_ctxt); }
983 bool gate () { return gate_dominator (); }
984 unsigned int execute () { return tree_ssa_dominator_optimize (); }
985
986 }; // class pass_dominator
987
988 } // anon namespace
989
990 gimple_opt_pass *
991 make_pass_dominator (gcc::context *ctxt)
992 {
993 return new pass_dominator (ctxt);
994 }
995
996
997 /* Given a conditional statement CONDSTMT, convert the
998 condition to a canonical form. */
999
1000 static void
1001 canonicalize_comparison (gimple condstmt)
1002 {
1003 tree op0;
1004 tree op1;
1005 enum tree_code code;
1006
1007 gcc_assert (gimple_code (condstmt) == GIMPLE_COND);
1008
1009 op0 = gimple_cond_lhs (condstmt);
1010 op1 = gimple_cond_rhs (condstmt);
1011
1012 code = gimple_cond_code (condstmt);
1013
1014 /* If it would be profitable to swap the operands, then do so to
1015 canonicalize the statement, enabling better optimization.
1016
1017 By placing canonicalization of such expressions here we
1018 transparently keep statements in canonical form, even
1019 when the statement is modified. */
1020 if (tree_swap_operands_p (op0, op1, false))
1021 {
1022 /* For relationals we need to swap the operands
1023 and change the code. */
1024 if (code == LT_EXPR
1025 || code == GT_EXPR
1026 || code == LE_EXPR
1027 || code == GE_EXPR)
1028 {
1029 code = swap_tree_comparison (code);
1030
1031 gimple_cond_set_code (condstmt, code);
1032 gimple_cond_set_lhs (condstmt, op1);
1033 gimple_cond_set_rhs (condstmt, op0);
1034
1035 update_stmt (condstmt);
1036 }
1037 }
1038 }
1039
1040 /* Initialize local stacks for this optimizer and record equivalences
1041 upon entry to BB. Equivalences can come from the edge traversed to
1042 reach BB or they may come from PHI nodes at the start of BB. */
1043
1044 /* Remove all the expressions in LOCALS from TABLE, stopping when there are
1045 LIMIT entries left in LOCALs. */
1046
1047 static void
1048 remove_local_expressions_from_table (void)
1049 {
1050 /* Remove all the expressions made available in this block. */
1051 while (avail_exprs_stack.length () > 0)
1052 {
1053 expr_hash_elt_t victim = avail_exprs_stack.pop ();
1054 expr_hash_elt **slot;
1055
1056 if (victim == NULL)
1057 break;
1058
1059 /* This must precede the actual removal from the hash table,
1060 as ELEMENT and the table entry may share a call argument
1061 vector which will be freed during removal. */
1062 if (dump_file && (dump_flags & TDF_DETAILS))
1063 {
1064 fprintf (dump_file, "<<<< ");
1065 print_expr_hash_elt (dump_file, victim);
1066 }
1067
1068 slot = avail_exprs.find_slot_with_hash (victim, victim->hash, NO_INSERT);
1069 gcc_assert (slot && *slot == victim);
1070 avail_exprs.clear_slot (slot);
1071 }
1072 }
1073
1074 /* Use the source/dest pairs in CONST_AND_COPIES_STACK to restore
1075 CONST_AND_COPIES to its original state, stopping when we hit a
1076 NULL marker. */
1077
1078 static void
1079 restore_vars_to_original_value (void)
1080 {
1081 while (const_and_copies_stack.length () > 0)
1082 {
1083 tree prev_value, dest;
1084
1085 dest = const_and_copies_stack.pop ();
1086
1087 if (dest == NULL)
1088 break;
1089
1090 if (dump_file && (dump_flags & TDF_DETAILS))
1091 {
1092 fprintf (dump_file, "<<<< COPY ");
1093 print_generic_expr (dump_file, dest, 0);
1094 fprintf (dump_file, " = ");
1095 print_generic_expr (dump_file, SSA_NAME_VALUE (dest), 0);
1096 fprintf (dump_file, "\n");
1097 }
1098
1099 prev_value = const_and_copies_stack.pop ();
1100 set_ssa_name_value (dest, prev_value);
1101 }
1102 }
1103
1104 /* A trivial wrapper so that we can present the generic jump
1105 threading code with a simple API for simplifying statements. */
1106 static tree
1107 simplify_stmt_for_jump_threading (gimple stmt,
1108 gimple within_stmt ATTRIBUTE_UNUSED)
1109 {
1110 return lookup_avail_expr (stmt, false);
1111 }
1112
1113 /* Record into the equivalence tables any equivalences implied by
1114 traversing edge E (which are cached in E->aux).
1115
1116 Callers are responsible for managing the unwinding markers. */
1117 static void
1118 record_temporary_equivalences (edge e)
1119 {
1120 int i;
1121 struct edge_info *edge_info = (struct edge_info *) e->aux;
1122
1123 /* If we have info associated with this edge, record it into
1124 our equivalence tables. */
1125 if (edge_info)
1126 {
1127 cond_equivalence *eq;
1128 tree lhs = edge_info->lhs;
1129 tree rhs = edge_info->rhs;
1130
1131 /* If we have a simple NAME = VALUE equivalence, record it. */
1132 if (lhs && TREE_CODE (lhs) == SSA_NAME)
1133 record_const_or_copy (lhs, rhs);
1134
1135 /* If we have 0 = COND or 1 = COND equivalences, record them
1136 into our expression hash tables. */
1137 for (i = 0; edge_info->cond_equivalences.iterate (i, &eq); ++i)
1138 record_cond (eq);
1139 }
1140 }
1141
1142 /* Wrapper for common code to attempt to thread an edge. For example,
1143 it handles lazily building the dummy condition and the bookkeeping
1144 when jump threading is successful. */
1145
1146 void
1147 dom_opt_dom_walker::thread_across_edge (edge e)
1148 {
1149 if (! m_dummy_cond)
1150 m_dummy_cond =
1151 gimple_build_cond (NE_EXPR,
1152 integer_zero_node, integer_zero_node,
1153 NULL, NULL);
1154
1155 /* Push a marker on both stacks so we can unwind the tables back to their
1156 current state. */
1157 avail_exprs_stack.safe_push (NULL);
1158 const_and_copies_stack.safe_push (NULL_TREE);
1159
1160 /* Traversing E may result in equivalences we can utilize. */
1161 record_temporary_equivalences (e);
1162
1163 /* With all the edge equivalences in the tables, go ahead and attempt
1164 to thread through E->dest. */
1165 ::thread_across_edge (m_dummy_cond, e, false,
1166 &const_and_copies_stack,
1167 simplify_stmt_for_jump_threading);
1168
1169 /* And restore the various tables to their state before
1170 we threaded this edge.
1171
1172 XXX The code in tree-ssa-threadedge.c will restore the state of
1173 the const_and_copies table. We we just have to restore the expression
1174 table. */
1175 remove_local_expressions_from_table ();
1176 }
1177
1178 /* PHI nodes can create equivalences too.
1179
1180 Ignoring any alternatives which are the same as the result, if
1181 all the alternatives are equal, then the PHI node creates an
1182 equivalence. */
1183
1184 static void
1185 record_equivalences_from_phis (basic_block bb)
1186 {
1187 gimple_stmt_iterator gsi;
1188
1189 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1190 {
1191 gimple phi = gsi_stmt (gsi);
1192
1193 tree lhs = gimple_phi_result (phi);
1194 tree rhs = NULL;
1195 size_t i;
1196
1197 for (i = 0; i < gimple_phi_num_args (phi); i++)
1198 {
1199 tree t = gimple_phi_arg_def (phi, i);
1200
1201 /* Ignore alternatives which are the same as our LHS. Since
1202 LHS is a PHI_RESULT, it is known to be a SSA_NAME, so we
1203 can simply compare pointers. */
1204 if (lhs == t)
1205 continue;
1206
1207 /* If we have not processed an alternative yet, then set
1208 RHS to this alternative. */
1209 if (rhs == NULL)
1210 rhs = t;
1211 /* If we have processed an alternative (stored in RHS), then
1212 see if it is equal to this one. If it isn't, then stop
1213 the search. */
1214 else if (! operand_equal_for_phi_arg_p (rhs, t))
1215 break;
1216 }
1217
1218 /* If we had no interesting alternatives, then all the RHS alternatives
1219 must have been the same as LHS. */
1220 if (!rhs)
1221 rhs = lhs;
1222
1223 /* If we managed to iterate through each PHI alternative without
1224 breaking out of the loop, then we have a PHI which may create
1225 a useful equivalence. We do not need to record unwind data for
1226 this, since this is a true assignment and not an equivalence
1227 inferred from a comparison. All uses of this ssa name are dominated
1228 by this assignment, so unwinding just costs time and space. */
1229 if (i == gimple_phi_num_args (phi) && may_propagate_copy (lhs, rhs))
1230 set_ssa_name_value (lhs, rhs);
1231 }
1232 }
1233
1234 /* Ignoring loop backedges, if BB has precisely one incoming edge then
1235 return that edge. Otherwise return NULL. */
1236 static edge
1237 single_incoming_edge_ignoring_loop_edges (basic_block bb)
1238 {
1239 edge retval = NULL;
1240 edge e;
1241 edge_iterator ei;
1242
1243 FOR_EACH_EDGE (e, ei, bb->preds)
1244 {
1245 /* A loop back edge can be identified by the destination of
1246 the edge dominating the source of the edge. */
1247 if (dominated_by_p (CDI_DOMINATORS, e->src, e->dest))
1248 continue;
1249
1250 /* If we have already seen a non-loop edge, then we must have
1251 multiple incoming non-loop edges and thus we return NULL. */
1252 if (retval)
1253 return NULL;
1254
1255 /* This is the first non-loop incoming edge we have found. Record
1256 it. */
1257 retval = e;
1258 }
1259
1260 return retval;
1261 }
1262
1263 /* Record any equivalences created by the incoming edge to BB. If BB
1264 has more than one incoming edge, then no equivalence is created. */
1265
1266 static void
1267 record_equivalences_from_incoming_edge (basic_block bb)
1268 {
1269 edge e;
1270 basic_block parent;
1271 struct edge_info *edge_info;
1272
1273 /* If our parent block ended with a control statement, then we may be
1274 able to record some equivalences based on which outgoing edge from
1275 the parent was followed. */
1276 parent = get_immediate_dominator (CDI_DOMINATORS, bb);
1277
1278 e = single_incoming_edge_ignoring_loop_edges (bb);
1279
1280 /* If we had a single incoming edge from our parent block, then enter
1281 any data associated with the edge into our tables. */
1282 if (e && e->src == parent)
1283 {
1284 unsigned int i;
1285
1286 edge_info = (struct edge_info *) e->aux;
1287
1288 if (edge_info)
1289 {
1290 tree lhs = edge_info->lhs;
1291 tree rhs = edge_info->rhs;
1292 cond_equivalence *eq;
1293
1294 if (lhs)
1295 record_equality (lhs, rhs);
1296
1297 /* If LHS is an SSA_NAME and RHS is a constant integer and LHS was
1298 set via a widening type conversion, then we may be able to record
1299 additional equivalences. */
1300 if (lhs
1301 && TREE_CODE (lhs) == SSA_NAME
1302 && is_gimple_constant (rhs)
1303 && TREE_CODE (rhs) == INTEGER_CST)
1304 {
1305 gimple defstmt = SSA_NAME_DEF_STMT (lhs);
1306
1307 if (defstmt
1308 && is_gimple_assign (defstmt)
1309 && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (defstmt)))
1310 {
1311 tree old_rhs = gimple_assign_rhs1 (defstmt);
1312
1313 /* If the conversion widens the original value and
1314 the constant is in the range of the type of OLD_RHS,
1315 then convert the constant and record the equivalence.
1316
1317 Note that int_fits_type_p does not check the precision
1318 if the upper and lower bounds are OK. */
1319 if (INTEGRAL_TYPE_P (TREE_TYPE (old_rhs))
1320 && (TYPE_PRECISION (TREE_TYPE (lhs))
1321 > TYPE_PRECISION (TREE_TYPE (old_rhs)))
1322 && int_fits_type_p (rhs, TREE_TYPE (old_rhs)))
1323 {
1324 tree newval = fold_convert (TREE_TYPE (old_rhs), rhs);
1325 record_equality (old_rhs, newval);
1326 }
1327 }
1328 }
1329
1330 for (i = 0; edge_info->cond_equivalences.iterate (i, &eq); ++i)
1331 record_cond (eq);
1332 }
1333 }
1334 }
1335
1336 /* Dump SSA statistics on FILE. */
1337
1338 void
1339 dump_dominator_optimization_stats (FILE *file)
1340 {
1341 fprintf (file, "Total number of statements: %6ld\n\n",
1342 opt_stats.num_stmts);
1343 fprintf (file, "Exprs considered for dominator optimizations: %6ld\n",
1344 opt_stats.num_exprs_considered);
1345
1346 fprintf (file, "\nHash table statistics:\n");
1347
1348 fprintf (file, " avail_exprs: ");
1349 htab_statistics (file, avail_exprs);
1350 }
1351
1352
1353 /* Dump SSA statistics on stderr. */
1354
1355 DEBUG_FUNCTION void
1356 debug_dominator_optimization_stats (void)
1357 {
1358 dump_dominator_optimization_stats (stderr);
1359 }
1360
1361
1362 /* Dump statistics for the hash table HTAB. */
1363
1364 static void
1365 htab_statistics (FILE *file, hash_table <expr_elt_hasher> htab)
1366 {
1367 fprintf (file, "size %ld, %ld elements, %f collision/search ratio\n",
1368 (long) htab.size (),
1369 (long) htab.elements (),
1370 htab.collisions ());
1371 }
1372
1373
1374 /* Enter condition equivalence into the expression hash table.
1375 This indicates that a conditional expression has a known
1376 boolean value. */
1377
1378 static void
1379 record_cond (cond_equivalence *p)
1380 {
1381 struct expr_hash_elt *element = XCNEW (struct expr_hash_elt);
1382 expr_hash_elt **slot;
1383
1384 initialize_hash_element_from_expr (&p->cond, p->value, element);
1385
1386 slot = avail_exprs.find_slot_with_hash (element, element->hash, INSERT);
1387 if (*slot == NULL)
1388 {
1389 *slot = element;
1390
1391 if (dump_file && (dump_flags & TDF_DETAILS))
1392 {
1393 fprintf (dump_file, "1>>> ");
1394 print_expr_hash_elt (dump_file, element);
1395 }
1396
1397 avail_exprs_stack.safe_push (element);
1398 }
1399 else
1400 free_expr_hash_elt (element);
1401 }
1402
1403 /* Build a cond_equivalence record indicating that the comparison
1404 CODE holds between operands OP0 and OP1 and push it to **P. */
1405
1406 static void
1407 build_and_record_new_cond (enum tree_code code,
1408 tree op0, tree op1,
1409 vec<cond_equivalence> *p)
1410 {
1411 cond_equivalence c;
1412 struct hashable_expr *cond = &c.cond;
1413
1414 gcc_assert (TREE_CODE_CLASS (code) == tcc_comparison);
1415
1416 cond->type = boolean_type_node;
1417 cond->kind = EXPR_BINARY;
1418 cond->ops.binary.op = code;
1419 cond->ops.binary.opnd0 = op0;
1420 cond->ops.binary.opnd1 = op1;
1421
1422 c.value = boolean_true_node;
1423 p->safe_push (c);
1424 }
1425
1426 /* Record that COND is true and INVERTED is false into the edge information
1427 structure. Also record that any conditions dominated by COND are true
1428 as well.
1429
1430 For example, if a < b is true, then a <= b must also be true. */
1431
1432 static void
1433 record_conditions (struct edge_info *edge_info, tree cond, tree inverted)
1434 {
1435 tree op0, op1;
1436 cond_equivalence c;
1437
1438 if (!COMPARISON_CLASS_P (cond))
1439 return;
1440
1441 op0 = TREE_OPERAND (cond, 0);
1442 op1 = TREE_OPERAND (cond, 1);
1443
1444 switch (TREE_CODE (cond))
1445 {
1446 case LT_EXPR:
1447 case GT_EXPR:
1448 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
1449 {
1450 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1451 &edge_info->cond_equivalences);
1452 build_and_record_new_cond (LTGT_EXPR, op0, op1,
1453 &edge_info->cond_equivalences);
1454 }
1455
1456 build_and_record_new_cond ((TREE_CODE (cond) == LT_EXPR
1457 ? LE_EXPR : GE_EXPR),
1458 op0, op1, &edge_info->cond_equivalences);
1459 build_and_record_new_cond (NE_EXPR, op0, op1,
1460 &edge_info->cond_equivalences);
1461 break;
1462
1463 case GE_EXPR:
1464 case LE_EXPR:
1465 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
1466 {
1467 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1468 &edge_info->cond_equivalences);
1469 }
1470 break;
1471
1472 case EQ_EXPR:
1473 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
1474 {
1475 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1476 &edge_info->cond_equivalences);
1477 }
1478 build_and_record_new_cond (LE_EXPR, op0, op1,
1479 &edge_info->cond_equivalences);
1480 build_and_record_new_cond (GE_EXPR, op0, op1,
1481 &edge_info->cond_equivalences);
1482 break;
1483
1484 case UNORDERED_EXPR:
1485 build_and_record_new_cond (NE_EXPR, op0, op1,
1486 &edge_info->cond_equivalences);
1487 build_and_record_new_cond (UNLE_EXPR, op0, op1,
1488 &edge_info->cond_equivalences);
1489 build_and_record_new_cond (UNGE_EXPR, op0, op1,
1490 &edge_info->cond_equivalences);
1491 build_and_record_new_cond (UNEQ_EXPR, op0, op1,
1492 &edge_info->cond_equivalences);
1493 build_and_record_new_cond (UNLT_EXPR, op0, op1,
1494 &edge_info->cond_equivalences);
1495 build_and_record_new_cond (UNGT_EXPR, op0, op1,
1496 &edge_info->cond_equivalences);
1497 break;
1498
1499 case UNLT_EXPR:
1500 case UNGT_EXPR:
1501 build_and_record_new_cond ((TREE_CODE (cond) == UNLT_EXPR
1502 ? UNLE_EXPR : UNGE_EXPR),
1503 op0, op1, &edge_info->cond_equivalences);
1504 build_and_record_new_cond (NE_EXPR, op0, op1,
1505 &edge_info->cond_equivalences);
1506 break;
1507
1508 case UNEQ_EXPR:
1509 build_and_record_new_cond (UNLE_EXPR, op0, op1,
1510 &edge_info->cond_equivalences);
1511 build_and_record_new_cond (UNGE_EXPR, op0, op1,
1512 &edge_info->cond_equivalences);
1513 break;
1514
1515 case LTGT_EXPR:
1516 build_and_record_new_cond (NE_EXPR, op0, op1,
1517 &edge_info->cond_equivalences);
1518 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1519 &edge_info->cond_equivalences);
1520 break;
1521
1522 default:
1523 break;
1524 }
1525
1526 /* Now store the original true and false conditions into the first
1527 two slots. */
1528 initialize_expr_from_cond (cond, &c.cond);
1529 c.value = boolean_true_node;
1530 edge_info->cond_equivalences.safe_push (c);
1531
1532 /* It is possible for INVERTED to be the negation of a comparison,
1533 and not a valid RHS or GIMPLE_COND condition. This happens because
1534 invert_truthvalue may return such an expression when asked to invert
1535 a floating-point comparison. These comparisons are not assumed to
1536 obey the trichotomy law. */
1537 initialize_expr_from_cond (inverted, &c.cond);
1538 c.value = boolean_false_node;
1539 edge_info->cond_equivalences.safe_push (c);
1540 }
1541
1542 /* A helper function for record_const_or_copy and record_equality.
1543 Do the work of recording the value and undo info. */
1544
1545 static void
1546 record_const_or_copy_1 (tree x, tree y, tree prev_x)
1547 {
1548 set_ssa_name_value (x, y);
1549
1550 if (dump_file && (dump_flags & TDF_DETAILS))
1551 {
1552 fprintf (dump_file, "0>>> COPY ");
1553 print_generic_expr (dump_file, x, 0);
1554 fprintf (dump_file, " = ");
1555 print_generic_expr (dump_file, y, 0);
1556 fprintf (dump_file, "\n");
1557 }
1558
1559 const_and_copies_stack.reserve (2);
1560 const_and_copies_stack.quick_push (prev_x);
1561 const_and_copies_stack.quick_push (x);
1562 }
1563
1564 /* Return the loop depth of the basic block of the defining statement of X.
1565 This number should not be treated as absolutely correct because the loop
1566 information may not be completely up-to-date when dom runs. However, it
1567 will be relatively correct, and as more passes are taught to keep loop info
1568 up to date, the result will become more and more accurate. */
1569
1570 int
1571 loop_depth_of_name (tree x)
1572 {
1573 gimple defstmt;
1574 basic_block defbb;
1575
1576 /* If it's not an SSA_NAME, we have no clue where the definition is. */
1577 if (TREE_CODE (x) != SSA_NAME)
1578 return 0;
1579
1580 /* Otherwise return the loop depth of the defining statement's bb.
1581 Note that there may not actually be a bb for this statement, if the
1582 ssa_name is live on entry. */
1583 defstmt = SSA_NAME_DEF_STMT (x);
1584 defbb = gimple_bb (defstmt);
1585 if (!defbb)
1586 return 0;
1587
1588 return bb_loop_depth (defbb);
1589 }
1590
1591 /* Record that X is equal to Y in const_and_copies. Record undo
1592 information in the block-local vector. */
1593
1594 static void
1595 record_const_or_copy (tree x, tree y)
1596 {
1597 tree prev_x = SSA_NAME_VALUE (x);
1598
1599 gcc_assert (TREE_CODE (x) == SSA_NAME);
1600
1601 if (TREE_CODE (y) == SSA_NAME)
1602 {
1603 tree tmp = SSA_NAME_VALUE (y);
1604 if (tmp)
1605 y = tmp;
1606 }
1607
1608 record_const_or_copy_1 (x, y, prev_x);
1609 }
1610
1611 /* Similarly, but assume that X and Y are the two operands of an EQ_EXPR.
1612 This constrains the cases in which we may treat this as assignment. */
1613
1614 static void
1615 record_equality (tree x, tree y)
1616 {
1617 tree prev_x = NULL, prev_y = NULL;
1618
1619 if (TREE_CODE (x) == SSA_NAME)
1620 prev_x = SSA_NAME_VALUE (x);
1621 if (TREE_CODE (y) == SSA_NAME)
1622 prev_y = SSA_NAME_VALUE (y);
1623
1624 /* If one of the previous values is invariant, or invariant in more loops
1625 (by depth), then use that.
1626 Otherwise it doesn't matter which value we choose, just so
1627 long as we canonicalize on one value. */
1628 if (is_gimple_min_invariant (y))
1629 ;
1630 else if (is_gimple_min_invariant (x)
1631 || (loop_depth_of_name (x) <= loop_depth_of_name (y)))
1632 prev_x = x, x = y, y = prev_x, prev_x = prev_y;
1633 else if (prev_x && is_gimple_min_invariant (prev_x))
1634 x = y, y = prev_x, prev_x = prev_y;
1635 else if (prev_y)
1636 y = prev_y;
1637
1638 /* After the swapping, we must have one SSA_NAME. */
1639 if (TREE_CODE (x) != SSA_NAME)
1640 return;
1641
1642 /* For IEEE, -0.0 == 0.0, so we don't necessarily know the sign of a
1643 variable compared against zero. If we're honoring signed zeros,
1644 then we cannot record this value unless we know that the value is
1645 nonzero. */
1646 if (HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (x)))
1647 && (TREE_CODE (y) != REAL_CST
1648 || REAL_VALUES_EQUAL (dconst0, TREE_REAL_CST (y))))
1649 return;
1650
1651 record_const_or_copy_1 (x, y, prev_x);
1652 }
1653
1654 /* Returns true when STMT is a simple iv increment. It detects the
1655 following situation:
1656
1657 i_1 = phi (..., i_2)
1658 i_2 = i_1 +/- ... */
1659
1660 bool
1661 simple_iv_increment_p (gimple stmt)
1662 {
1663 enum tree_code code;
1664 tree lhs, preinc;
1665 gimple phi;
1666 size_t i;
1667
1668 if (gimple_code (stmt) != GIMPLE_ASSIGN)
1669 return false;
1670
1671 lhs = gimple_assign_lhs (stmt);
1672 if (TREE_CODE (lhs) != SSA_NAME)
1673 return false;
1674
1675 code = gimple_assign_rhs_code (stmt);
1676 if (code != PLUS_EXPR
1677 && code != MINUS_EXPR
1678 && code != POINTER_PLUS_EXPR)
1679 return false;
1680
1681 preinc = gimple_assign_rhs1 (stmt);
1682 if (TREE_CODE (preinc) != SSA_NAME)
1683 return false;
1684
1685 phi = SSA_NAME_DEF_STMT (preinc);
1686 if (gimple_code (phi) != GIMPLE_PHI)
1687 return false;
1688
1689 for (i = 0; i < gimple_phi_num_args (phi); i++)
1690 if (gimple_phi_arg_def (phi, i) == lhs)
1691 return true;
1692
1693 return false;
1694 }
1695
1696 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
1697 known value for that SSA_NAME (or NULL if no value is known).
1698
1699 Propagate values from CONST_AND_COPIES into the PHI nodes of the
1700 successors of BB. */
1701
1702 static void
1703 cprop_into_successor_phis (basic_block bb)
1704 {
1705 edge e;
1706 edge_iterator ei;
1707
1708 FOR_EACH_EDGE (e, ei, bb->succs)
1709 {
1710 int indx;
1711 gimple_stmt_iterator gsi;
1712
1713 /* If this is an abnormal edge, then we do not want to copy propagate
1714 into the PHI alternative associated with this edge. */
1715 if (e->flags & EDGE_ABNORMAL)
1716 continue;
1717
1718 gsi = gsi_start_phis (e->dest);
1719 if (gsi_end_p (gsi))
1720 continue;
1721
1722 /* We may have an equivalence associated with this edge. While
1723 we can not propagate it into non-dominated blocks, we can
1724 propagate them into PHIs in non-dominated blocks. */
1725
1726 /* Push the unwind marker so we can reset the const and copies
1727 table back to its original state after processing this edge. */
1728 const_and_copies_stack.safe_push (NULL_TREE);
1729
1730 /* Extract and record any simple NAME = VALUE equivalences.
1731
1732 Don't bother with [01] = COND equivalences, they're not useful
1733 here. */
1734 struct edge_info *edge_info = (struct edge_info *) e->aux;
1735 if (edge_info)
1736 {
1737 tree lhs = edge_info->lhs;
1738 tree rhs = edge_info->rhs;
1739
1740 if (lhs && TREE_CODE (lhs) == SSA_NAME)
1741 record_const_or_copy (lhs, rhs);
1742 }
1743
1744 indx = e->dest_idx;
1745 for ( ; !gsi_end_p (gsi); gsi_next (&gsi))
1746 {
1747 tree new_val;
1748 use_operand_p orig_p;
1749 tree orig_val;
1750 gimple phi = gsi_stmt (gsi);
1751
1752 /* The alternative may be associated with a constant, so verify
1753 it is an SSA_NAME before doing anything with it. */
1754 orig_p = gimple_phi_arg_imm_use_ptr (phi, indx);
1755 orig_val = get_use_from_ptr (orig_p);
1756 if (TREE_CODE (orig_val) != SSA_NAME)
1757 continue;
1758
1759 /* If we have *ORIG_P in our constant/copy table, then replace
1760 ORIG_P with its value in our constant/copy table. */
1761 new_val = SSA_NAME_VALUE (orig_val);
1762 if (new_val
1763 && new_val != orig_val
1764 && (TREE_CODE (new_val) == SSA_NAME
1765 || is_gimple_min_invariant (new_val))
1766 && may_propagate_copy (orig_val, new_val))
1767 propagate_value (orig_p, new_val);
1768 }
1769
1770 restore_vars_to_original_value ();
1771 }
1772 }
1773
1774 /* We have finished optimizing BB, record any information implied by
1775 taking a specific outgoing edge from BB. */
1776
1777 static void
1778 record_edge_info (basic_block bb)
1779 {
1780 gimple_stmt_iterator gsi = gsi_last_bb (bb);
1781 struct edge_info *edge_info;
1782
1783 if (! gsi_end_p (gsi))
1784 {
1785 gimple stmt = gsi_stmt (gsi);
1786 location_t loc = gimple_location (stmt);
1787
1788 if (gimple_code (stmt) == GIMPLE_SWITCH)
1789 {
1790 tree index = gimple_switch_index (stmt);
1791
1792 if (TREE_CODE (index) == SSA_NAME)
1793 {
1794 int i;
1795 int n_labels = gimple_switch_num_labels (stmt);
1796 tree *info = XCNEWVEC (tree, last_basic_block_for_fn (cfun));
1797 edge e;
1798 edge_iterator ei;
1799
1800 for (i = 0; i < n_labels; i++)
1801 {
1802 tree label = gimple_switch_label (stmt, i);
1803 basic_block target_bb = label_to_block (CASE_LABEL (label));
1804 if (CASE_HIGH (label)
1805 || !CASE_LOW (label)
1806 || info[target_bb->index])
1807 info[target_bb->index] = error_mark_node;
1808 else
1809 info[target_bb->index] = label;
1810 }
1811
1812 FOR_EACH_EDGE (e, ei, bb->succs)
1813 {
1814 basic_block target_bb = e->dest;
1815 tree label = info[target_bb->index];
1816
1817 if (label != NULL && label != error_mark_node)
1818 {
1819 tree x = fold_convert_loc (loc, TREE_TYPE (index),
1820 CASE_LOW (label));
1821 edge_info = allocate_edge_info (e);
1822 edge_info->lhs = index;
1823 edge_info->rhs = x;
1824 }
1825 }
1826 free (info);
1827 }
1828 }
1829
1830 /* A COND_EXPR may create equivalences too. */
1831 if (gimple_code (stmt) == GIMPLE_COND)
1832 {
1833 edge true_edge;
1834 edge false_edge;
1835
1836 tree op0 = gimple_cond_lhs (stmt);
1837 tree op1 = gimple_cond_rhs (stmt);
1838 enum tree_code code = gimple_cond_code (stmt);
1839
1840 extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
1841
1842 /* Special case comparing booleans against a constant as we
1843 know the value of OP0 on both arms of the branch. i.e., we
1844 can record an equivalence for OP0 rather than COND. */
1845 if ((code == EQ_EXPR || code == NE_EXPR)
1846 && TREE_CODE (op0) == SSA_NAME
1847 && TREE_CODE (TREE_TYPE (op0)) == BOOLEAN_TYPE
1848 && is_gimple_min_invariant (op1))
1849 {
1850 if (code == EQ_EXPR)
1851 {
1852 edge_info = allocate_edge_info (true_edge);
1853 edge_info->lhs = op0;
1854 edge_info->rhs = (integer_zerop (op1)
1855 ? boolean_false_node
1856 : boolean_true_node);
1857
1858 edge_info = allocate_edge_info (false_edge);
1859 edge_info->lhs = op0;
1860 edge_info->rhs = (integer_zerop (op1)
1861 ? boolean_true_node
1862 : boolean_false_node);
1863 }
1864 else
1865 {
1866 edge_info = allocate_edge_info (true_edge);
1867 edge_info->lhs = op0;
1868 edge_info->rhs = (integer_zerop (op1)
1869 ? boolean_true_node
1870 : boolean_false_node);
1871
1872 edge_info = allocate_edge_info (false_edge);
1873 edge_info->lhs = op0;
1874 edge_info->rhs = (integer_zerop (op1)
1875 ? boolean_false_node
1876 : boolean_true_node);
1877 }
1878 }
1879 else if (is_gimple_min_invariant (op0)
1880 && (TREE_CODE (op1) == SSA_NAME
1881 || is_gimple_min_invariant (op1)))
1882 {
1883 tree cond = build2 (code, boolean_type_node, op0, op1);
1884 tree inverted = invert_truthvalue_loc (loc, cond);
1885 bool can_infer_simple_equiv
1886 = !(HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (op0)))
1887 && real_zerop (op0));
1888 struct edge_info *edge_info;
1889
1890 edge_info = allocate_edge_info (true_edge);
1891 record_conditions (edge_info, cond, inverted);
1892
1893 if (can_infer_simple_equiv && code == EQ_EXPR)
1894 {
1895 edge_info->lhs = op1;
1896 edge_info->rhs = op0;
1897 }
1898
1899 edge_info = allocate_edge_info (false_edge);
1900 record_conditions (edge_info, inverted, cond);
1901
1902 if (can_infer_simple_equiv && TREE_CODE (inverted) == EQ_EXPR)
1903 {
1904 edge_info->lhs = op1;
1905 edge_info->rhs = op0;
1906 }
1907 }
1908
1909 else if (TREE_CODE (op0) == SSA_NAME
1910 && (TREE_CODE (op1) == SSA_NAME
1911 || is_gimple_min_invariant (op1)))
1912 {
1913 tree cond = build2 (code, boolean_type_node, op0, op1);
1914 tree inverted = invert_truthvalue_loc (loc, cond);
1915 bool can_infer_simple_equiv
1916 = !(HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (op1)))
1917 && (TREE_CODE (op1) == SSA_NAME || real_zerop (op1)));
1918 struct edge_info *edge_info;
1919
1920 edge_info = allocate_edge_info (true_edge);
1921 record_conditions (edge_info, cond, inverted);
1922
1923 if (can_infer_simple_equiv && code == EQ_EXPR)
1924 {
1925 edge_info->lhs = op0;
1926 edge_info->rhs = op1;
1927 }
1928
1929 edge_info = allocate_edge_info (false_edge);
1930 record_conditions (edge_info, inverted, cond);
1931
1932 if (can_infer_simple_equiv && TREE_CODE (inverted) == EQ_EXPR)
1933 {
1934 edge_info->lhs = op0;
1935 edge_info->rhs = op1;
1936 }
1937 }
1938 }
1939
1940 /* ??? TRUTH_NOT_EXPR can create an equivalence too. */
1941 }
1942 }
1943
1944 void
1945 dom_opt_dom_walker::before_dom_children (basic_block bb)
1946 {
1947 gimple_stmt_iterator gsi;
1948
1949 if (dump_file && (dump_flags & TDF_DETAILS))
1950 fprintf (dump_file, "\n\nOptimizing block #%d\n\n", bb->index);
1951
1952 /* Push a marker on the stacks of local information so that we know how
1953 far to unwind when we finalize this block. */
1954 avail_exprs_stack.safe_push (NULL);
1955 const_and_copies_stack.safe_push (NULL_TREE);
1956
1957 record_equivalences_from_incoming_edge (bb);
1958
1959 /* PHI nodes can create equivalences too. */
1960 record_equivalences_from_phis (bb);
1961
1962 /* Create equivalences from redundant PHIs. PHIs are only truly
1963 redundant when they exist in the same block, so push another
1964 marker and unwind right afterwards. */
1965 avail_exprs_stack.safe_push (NULL);
1966 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1967 eliminate_redundant_computations (&gsi);
1968 remove_local_expressions_from_table ();
1969
1970 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1971 optimize_stmt (bb, gsi);
1972
1973 /* Now prepare to process dominated blocks. */
1974 record_edge_info (bb);
1975 cprop_into_successor_phis (bb);
1976 }
1977
1978 /* We have finished processing the dominator children of BB, perform
1979 any finalization actions in preparation for leaving this node in
1980 the dominator tree. */
1981
1982 void
1983 dom_opt_dom_walker::after_dom_children (basic_block bb)
1984 {
1985 gimple last;
1986
1987 /* If we have an outgoing edge to a block with multiple incoming and
1988 outgoing edges, then we may be able to thread the edge, i.e., we
1989 may be able to statically determine which of the outgoing edges
1990 will be traversed when the incoming edge from BB is traversed. */
1991 if (single_succ_p (bb)
1992 && (single_succ_edge (bb)->flags & EDGE_ABNORMAL) == 0
1993 && potentially_threadable_block (single_succ (bb)))
1994 {
1995 thread_across_edge (single_succ_edge (bb));
1996 }
1997 else if ((last = last_stmt (bb))
1998 && gimple_code (last) == GIMPLE_COND
1999 && EDGE_COUNT (bb->succs) == 2
2000 && (EDGE_SUCC (bb, 0)->flags & EDGE_ABNORMAL) == 0
2001 && (EDGE_SUCC (bb, 1)->flags & EDGE_ABNORMAL) == 0)
2002 {
2003 edge true_edge, false_edge;
2004
2005 extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
2006
2007 /* Only try to thread the edge if it reaches a target block with
2008 more than one predecessor and more than one successor. */
2009 if (potentially_threadable_block (true_edge->dest))
2010 thread_across_edge (true_edge);
2011
2012 /* Similarly for the ELSE arm. */
2013 if (potentially_threadable_block (false_edge->dest))
2014 thread_across_edge (false_edge);
2015
2016 }
2017
2018 /* These remove expressions local to BB from the tables. */
2019 remove_local_expressions_from_table ();
2020 restore_vars_to_original_value ();
2021 }
2022
2023 /* Search for redundant computations in STMT. If any are found, then
2024 replace them with the variable holding the result of the computation.
2025
2026 If safe, record this expression into the available expression hash
2027 table. */
2028
2029 static void
2030 eliminate_redundant_computations (gimple_stmt_iterator* gsi)
2031 {
2032 tree expr_type;
2033 tree cached_lhs;
2034 tree def;
2035 bool insert = true;
2036 bool assigns_var_p = false;
2037
2038 gimple stmt = gsi_stmt (*gsi);
2039
2040 if (gimple_code (stmt) == GIMPLE_PHI)
2041 def = gimple_phi_result (stmt);
2042 else
2043 def = gimple_get_lhs (stmt);
2044
2045 /* Certain expressions on the RHS can be optimized away, but can not
2046 themselves be entered into the hash tables. */
2047 if (! def
2048 || TREE_CODE (def) != SSA_NAME
2049 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (def)
2050 || gimple_vdef (stmt)
2051 /* Do not record equivalences for increments of ivs. This would create
2052 overlapping live ranges for a very questionable gain. */
2053 || simple_iv_increment_p (stmt))
2054 insert = false;
2055
2056 /* Check if the expression has been computed before. */
2057 cached_lhs = lookup_avail_expr (stmt, insert);
2058
2059 opt_stats.num_exprs_considered++;
2060
2061 /* Get the type of the expression we are trying to optimize. */
2062 if (is_gimple_assign (stmt))
2063 {
2064 expr_type = TREE_TYPE (gimple_assign_lhs (stmt));
2065 assigns_var_p = true;
2066 }
2067 else if (gimple_code (stmt) == GIMPLE_COND)
2068 expr_type = boolean_type_node;
2069 else if (is_gimple_call (stmt))
2070 {
2071 gcc_assert (gimple_call_lhs (stmt));
2072 expr_type = TREE_TYPE (gimple_call_lhs (stmt));
2073 assigns_var_p = true;
2074 }
2075 else if (gimple_code (stmt) == GIMPLE_SWITCH)
2076 expr_type = TREE_TYPE (gimple_switch_index (stmt));
2077 else if (gimple_code (stmt) == GIMPLE_PHI)
2078 /* We can't propagate into a phi, so the logic below doesn't apply.
2079 Instead record an equivalence between the cached LHS and the
2080 PHI result of this statement, provided they are in the same block.
2081 This should be sufficient to kill the redundant phi. */
2082 {
2083 if (def && cached_lhs)
2084 record_const_or_copy (def, cached_lhs);
2085 return;
2086 }
2087 else
2088 gcc_unreachable ();
2089
2090 if (!cached_lhs)
2091 return;
2092
2093 /* It is safe to ignore types here since we have already done
2094 type checking in the hashing and equality routines. In fact
2095 type checking here merely gets in the way of constant
2096 propagation. Also, make sure that it is safe to propagate
2097 CACHED_LHS into the expression in STMT. */
2098 if ((TREE_CODE (cached_lhs) != SSA_NAME
2099 && (assigns_var_p
2100 || useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs))))
2101 || may_propagate_copy_into_stmt (stmt, cached_lhs))
2102 {
2103 gcc_checking_assert (TREE_CODE (cached_lhs) == SSA_NAME
2104 || is_gimple_min_invariant (cached_lhs));
2105
2106 if (dump_file && (dump_flags & TDF_DETAILS))
2107 {
2108 fprintf (dump_file, " Replaced redundant expr '");
2109 print_gimple_expr (dump_file, stmt, 0, dump_flags);
2110 fprintf (dump_file, "' with '");
2111 print_generic_expr (dump_file, cached_lhs, dump_flags);
2112 fprintf (dump_file, "'\n");
2113 }
2114
2115 opt_stats.num_re++;
2116
2117 if (assigns_var_p
2118 && !useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs)))
2119 cached_lhs = fold_convert (expr_type, cached_lhs);
2120
2121 propagate_tree_value_into_stmt (gsi, cached_lhs);
2122
2123 /* Since it is always necessary to mark the result as modified,
2124 perhaps we should move this into propagate_tree_value_into_stmt
2125 itself. */
2126 gimple_set_modified (gsi_stmt (*gsi), true);
2127 }
2128 }
2129
2130 /* STMT, a GIMPLE_ASSIGN, may create certain equivalences, in either
2131 the available expressions table or the const_and_copies table.
2132 Detect and record those equivalences. */
2133 /* We handle only very simple copy equivalences here. The heavy
2134 lifing is done by eliminate_redundant_computations. */
2135
2136 static void
2137 record_equivalences_from_stmt (gimple stmt, int may_optimize_p)
2138 {
2139 tree lhs;
2140 enum tree_code lhs_code;
2141
2142 gcc_assert (is_gimple_assign (stmt));
2143
2144 lhs = gimple_assign_lhs (stmt);
2145 lhs_code = TREE_CODE (lhs);
2146
2147 if (lhs_code == SSA_NAME
2148 && gimple_assign_single_p (stmt))
2149 {
2150 tree rhs = gimple_assign_rhs1 (stmt);
2151
2152 /* If the RHS of the assignment is a constant or another variable that
2153 may be propagated, register it in the CONST_AND_COPIES table. We
2154 do not need to record unwind data for this, since this is a true
2155 assignment and not an equivalence inferred from a comparison. All
2156 uses of this ssa name are dominated by this assignment, so unwinding
2157 just costs time and space. */
2158 if (may_optimize_p
2159 && (TREE_CODE (rhs) == SSA_NAME
2160 || is_gimple_min_invariant (rhs)))
2161 {
2162 if (dump_file && (dump_flags & TDF_DETAILS))
2163 {
2164 fprintf (dump_file, "==== ASGN ");
2165 print_generic_expr (dump_file, lhs, 0);
2166 fprintf (dump_file, " = ");
2167 print_generic_expr (dump_file, rhs, 0);
2168 fprintf (dump_file, "\n");
2169 }
2170
2171 set_ssa_name_value (lhs, rhs);
2172 }
2173 }
2174
2175 /* A memory store, even an aliased store, creates a useful
2176 equivalence. By exchanging the LHS and RHS, creating suitable
2177 vops and recording the result in the available expression table,
2178 we may be able to expose more redundant loads. */
2179 if (!gimple_has_volatile_ops (stmt)
2180 && gimple_references_memory_p (stmt)
2181 && gimple_assign_single_p (stmt)
2182 && (TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME
2183 || is_gimple_min_invariant (gimple_assign_rhs1 (stmt)))
2184 && !is_gimple_reg (lhs))
2185 {
2186 tree rhs = gimple_assign_rhs1 (stmt);
2187 gimple new_stmt;
2188
2189 /* Build a new statement with the RHS and LHS exchanged. */
2190 if (TREE_CODE (rhs) == SSA_NAME)
2191 {
2192 /* NOTE tuples. The call to gimple_build_assign below replaced
2193 a call to build_gimple_modify_stmt, which did not set the
2194 SSA_NAME_DEF_STMT on the LHS of the assignment. Doing so
2195 may cause an SSA validation failure, as the LHS may be a
2196 default-initialized name and should have no definition. I'm
2197 a bit dubious of this, as the artificial statement that we
2198 generate here may in fact be ill-formed, but it is simply
2199 used as an internal device in this pass, and never becomes
2200 part of the CFG. */
2201 gimple defstmt = SSA_NAME_DEF_STMT (rhs);
2202 new_stmt = gimple_build_assign (rhs, lhs);
2203 SSA_NAME_DEF_STMT (rhs) = defstmt;
2204 }
2205 else
2206 new_stmt = gimple_build_assign (rhs, lhs);
2207
2208 gimple_set_vuse (new_stmt, gimple_vdef (stmt));
2209
2210 /* Finally enter the statement into the available expression
2211 table. */
2212 lookup_avail_expr (new_stmt, true);
2213 }
2214 }
2215
2216 /* Replace *OP_P in STMT with any known equivalent value for *OP_P from
2217 CONST_AND_COPIES. */
2218
2219 static void
2220 cprop_operand (gimple stmt, use_operand_p op_p)
2221 {
2222 tree val;
2223 tree op = USE_FROM_PTR (op_p);
2224
2225 /* If the operand has a known constant value or it is known to be a
2226 copy of some other variable, use the value or copy stored in
2227 CONST_AND_COPIES. */
2228 val = SSA_NAME_VALUE (op);
2229 if (val && val != op)
2230 {
2231 /* Do not replace hard register operands in asm statements. */
2232 if (gimple_code (stmt) == GIMPLE_ASM
2233 && !may_propagate_copy_into_asm (op))
2234 return;
2235
2236 /* Certain operands are not allowed to be copy propagated due
2237 to their interaction with exception handling and some GCC
2238 extensions. */
2239 if (!may_propagate_copy (op, val))
2240 return;
2241
2242 /* Do not propagate addresses that point to volatiles into memory
2243 stmts without volatile operands. */
2244 if (POINTER_TYPE_P (TREE_TYPE (val))
2245 && TYPE_VOLATILE (TREE_TYPE (TREE_TYPE (val)))
2246 && gimple_has_mem_ops (stmt)
2247 && !gimple_has_volatile_ops (stmt))
2248 return;
2249
2250 /* Do not propagate copies if the propagated value is at a deeper loop
2251 depth than the propagatee. Otherwise, this may move loop variant
2252 variables outside of their loops and prevent coalescing
2253 opportunities. If the value was loop invariant, it will be hoisted
2254 by LICM and exposed for copy propagation. */
2255 if (loop_depth_of_name (val) > loop_depth_of_name (op))
2256 return;
2257
2258 /* Do not propagate copies into simple IV increment statements.
2259 See PR23821 for how this can disturb IV analysis. */
2260 if (TREE_CODE (val) != INTEGER_CST
2261 && simple_iv_increment_p (stmt))
2262 return;
2263
2264 /* Dump details. */
2265 if (dump_file && (dump_flags & TDF_DETAILS))
2266 {
2267 fprintf (dump_file, " Replaced '");
2268 print_generic_expr (dump_file, op, dump_flags);
2269 fprintf (dump_file, "' with %s '",
2270 (TREE_CODE (val) != SSA_NAME ? "constant" : "variable"));
2271 print_generic_expr (dump_file, val, dump_flags);
2272 fprintf (dump_file, "'\n");
2273 }
2274
2275 if (TREE_CODE (val) != SSA_NAME)
2276 opt_stats.num_const_prop++;
2277 else
2278 opt_stats.num_copy_prop++;
2279
2280 propagate_value (op_p, val);
2281
2282 /* And note that we modified this statement. This is now
2283 safe, even if we changed virtual operands since we will
2284 rescan the statement and rewrite its operands again. */
2285 gimple_set_modified (stmt, true);
2286 }
2287 }
2288
2289 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
2290 known value for that SSA_NAME (or NULL if no value is known).
2291
2292 Propagate values from CONST_AND_COPIES into the uses, vuses and
2293 vdef_ops of STMT. */
2294
2295 static void
2296 cprop_into_stmt (gimple stmt)
2297 {
2298 use_operand_p op_p;
2299 ssa_op_iter iter;
2300
2301 FOR_EACH_SSA_USE_OPERAND (op_p, stmt, iter, SSA_OP_USE)
2302 cprop_operand (stmt, op_p);
2303 }
2304
2305 /* Optimize the statement pointed to by iterator SI.
2306
2307 We try to perform some simplistic global redundancy elimination and
2308 constant propagation:
2309
2310 1- To detect global redundancy, we keep track of expressions that have
2311 been computed in this block and its dominators. If we find that the
2312 same expression is computed more than once, we eliminate repeated
2313 computations by using the target of the first one.
2314
2315 2- Constant values and copy assignments. This is used to do very
2316 simplistic constant and copy propagation. When a constant or copy
2317 assignment is found, we map the value on the RHS of the assignment to
2318 the variable in the LHS in the CONST_AND_COPIES table. */
2319
2320 static void
2321 optimize_stmt (basic_block bb, gimple_stmt_iterator si)
2322 {
2323 gimple stmt, old_stmt;
2324 bool may_optimize_p;
2325 bool modified_p = false;
2326
2327 old_stmt = stmt = gsi_stmt (si);
2328
2329 if (dump_file && (dump_flags & TDF_DETAILS))
2330 {
2331 fprintf (dump_file, "Optimizing statement ");
2332 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2333 }
2334
2335 if (gimple_code (stmt) == GIMPLE_COND)
2336 canonicalize_comparison (stmt);
2337
2338 update_stmt_if_modified (stmt);
2339 opt_stats.num_stmts++;
2340
2341 /* Const/copy propagate into USES, VUSES and the RHS of VDEFs. */
2342 cprop_into_stmt (stmt);
2343
2344 /* If the statement has been modified with constant replacements,
2345 fold its RHS before checking for redundant computations. */
2346 if (gimple_modified_p (stmt))
2347 {
2348 tree rhs = NULL;
2349
2350 /* Try to fold the statement making sure that STMT is kept
2351 up to date. */
2352 if (fold_stmt (&si))
2353 {
2354 stmt = gsi_stmt (si);
2355 gimple_set_modified (stmt, true);
2356
2357 if (dump_file && (dump_flags & TDF_DETAILS))
2358 {
2359 fprintf (dump_file, " Folded to: ");
2360 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2361 }
2362 }
2363
2364 /* We only need to consider cases that can yield a gimple operand. */
2365 if (gimple_assign_single_p (stmt))
2366 rhs = gimple_assign_rhs1 (stmt);
2367 else if (gimple_code (stmt) == GIMPLE_GOTO)
2368 rhs = gimple_goto_dest (stmt);
2369 else if (gimple_code (stmt) == GIMPLE_SWITCH)
2370 /* This should never be an ADDR_EXPR. */
2371 rhs = gimple_switch_index (stmt);
2372
2373 if (rhs && TREE_CODE (rhs) == ADDR_EXPR)
2374 recompute_tree_invariant_for_addr_expr (rhs);
2375
2376 /* Indicate that maybe_clean_or_replace_eh_stmt needs to be called,
2377 even if fold_stmt updated the stmt already and thus cleared
2378 gimple_modified_p flag on it. */
2379 modified_p = true;
2380 }
2381
2382 /* Check for redundant computations. Do this optimization only
2383 for assignments that have no volatile ops and conditionals. */
2384 may_optimize_p = (!gimple_has_side_effects (stmt)
2385 && (is_gimple_assign (stmt)
2386 || (is_gimple_call (stmt)
2387 && gimple_call_lhs (stmt) != NULL_TREE)
2388 || gimple_code (stmt) == GIMPLE_COND
2389 || gimple_code (stmt) == GIMPLE_SWITCH));
2390
2391 if (may_optimize_p)
2392 {
2393 if (gimple_code (stmt) == GIMPLE_CALL)
2394 {
2395 /* Resolve __builtin_constant_p. If it hasn't been
2396 folded to integer_one_node by now, it's fairly
2397 certain that the value simply isn't constant. */
2398 tree callee = gimple_call_fndecl (stmt);
2399 if (callee
2400 && DECL_BUILT_IN_CLASS (callee) == BUILT_IN_NORMAL
2401 && DECL_FUNCTION_CODE (callee) == BUILT_IN_CONSTANT_P)
2402 {
2403 propagate_tree_value_into_stmt (&si, integer_zero_node);
2404 stmt = gsi_stmt (si);
2405 }
2406 }
2407
2408 update_stmt_if_modified (stmt);
2409 eliminate_redundant_computations (&si);
2410 stmt = gsi_stmt (si);
2411
2412 /* Perform simple redundant store elimination. */
2413 if (gimple_assign_single_p (stmt)
2414 && TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2415 {
2416 tree lhs = gimple_assign_lhs (stmt);
2417 tree rhs = gimple_assign_rhs1 (stmt);
2418 tree cached_lhs;
2419 gimple new_stmt;
2420 if (TREE_CODE (rhs) == SSA_NAME)
2421 {
2422 tree tem = SSA_NAME_VALUE (rhs);
2423 if (tem)
2424 rhs = tem;
2425 }
2426 /* Build a new statement with the RHS and LHS exchanged. */
2427 if (TREE_CODE (rhs) == SSA_NAME)
2428 {
2429 gimple defstmt = SSA_NAME_DEF_STMT (rhs);
2430 new_stmt = gimple_build_assign (rhs, lhs);
2431 SSA_NAME_DEF_STMT (rhs) = defstmt;
2432 }
2433 else
2434 new_stmt = gimple_build_assign (rhs, lhs);
2435 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
2436 cached_lhs = lookup_avail_expr (new_stmt, false);
2437 if (cached_lhs
2438 && rhs == cached_lhs)
2439 {
2440 basic_block bb = gimple_bb (stmt);
2441 unlink_stmt_vdef (stmt);
2442 if (gsi_remove (&si, true))
2443 {
2444 bitmap_set_bit (need_eh_cleanup, bb->index);
2445 if (dump_file && (dump_flags & TDF_DETAILS))
2446 fprintf (dump_file, " Flagged to clear EH edges.\n");
2447 }
2448 release_defs (stmt);
2449 return;
2450 }
2451 }
2452 }
2453
2454 /* Record any additional equivalences created by this statement. */
2455 if (is_gimple_assign (stmt))
2456 record_equivalences_from_stmt (stmt, may_optimize_p);
2457
2458 /* If STMT is a COND_EXPR and it was modified, then we may know
2459 where it goes. If that is the case, then mark the CFG as altered.
2460
2461 This will cause us to later call remove_unreachable_blocks and
2462 cleanup_tree_cfg when it is safe to do so. It is not safe to
2463 clean things up here since removal of edges and such can trigger
2464 the removal of PHI nodes, which in turn can release SSA_NAMEs to
2465 the manager.
2466
2467 That's all fine and good, except that once SSA_NAMEs are released
2468 to the manager, we must not call create_ssa_name until all references
2469 to released SSA_NAMEs have been eliminated.
2470
2471 All references to the deleted SSA_NAMEs can not be eliminated until
2472 we remove unreachable blocks.
2473
2474 We can not remove unreachable blocks until after we have completed
2475 any queued jump threading.
2476
2477 We can not complete any queued jump threads until we have taken
2478 appropriate variables out of SSA form. Taking variables out of
2479 SSA form can call create_ssa_name and thus we lose.
2480
2481 Ultimately I suspect we're going to need to change the interface
2482 into the SSA_NAME manager. */
2483 if (gimple_modified_p (stmt) || modified_p)
2484 {
2485 tree val = NULL;
2486
2487 update_stmt_if_modified (stmt);
2488
2489 if (gimple_code (stmt) == GIMPLE_COND)
2490 val = fold_binary_loc (gimple_location (stmt),
2491 gimple_cond_code (stmt), boolean_type_node,
2492 gimple_cond_lhs (stmt), gimple_cond_rhs (stmt));
2493 else if (gimple_code (stmt) == GIMPLE_SWITCH)
2494 val = gimple_switch_index (stmt);
2495
2496 if (val && TREE_CODE (val) == INTEGER_CST && find_taken_edge (bb, val))
2497 cfg_altered = true;
2498
2499 /* If we simplified a statement in such a way as to be shown that it
2500 cannot trap, update the eh information and the cfg to match. */
2501 if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt))
2502 {
2503 bitmap_set_bit (need_eh_cleanup, bb->index);
2504 if (dump_file && (dump_flags & TDF_DETAILS))
2505 fprintf (dump_file, " Flagged to clear EH edges.\n");
2506 }
2507 }
2508 }
2509
2510 /* Search for an existing instance of STMT in the AVAIL_EXPRS table.
2511 If found, return its LHS. Otherwise insert STMT in the table and
2512 return NULL_TREE.
2513
2514 Also, when an expression is first inserted in the table, it is also
2515 is also added to AVAIL_EXPRS_STACK, so that it can be removed when
2516 we finish processing this block and its children. */
2517
2518 static tree
2519 lookup_avail_expr (gimple stmt, bool insert)
2520 {
2521 expr_hash_elt **slot;
2522 tree lhs;
2523 tree temp;
2524 struct expr_hash_elt element;
2525
2526 /* Get LHS of phi, assignment, or call; else NULL_TREE. */
2527 if (gimple_code (stmt) == GIMPLE_PHI)
2528 lhs = gimple_phi_result (stmt);
2529 else
2530 lhs = gimple_get_lhs (stmt);
2531
2532 initialize_hash_element (stmt, lhs, &element);
2533
2534 if (dump_file && (dump_flags & TDF_DETAILS))
2535 {
2536 fprintf (dump_file, "LKUP ");
2537 print_expr_hash_elt (dump_file, &element);
2538 }
2539
2540 /* Don't bother remembering constant assignments and copy operations.
2541 Constants and copy operations are handled by the constant/copy propagator
2542 in optimize_stmt. */
2543 if (element.expr.kind == EXPR_SINGLE
2544 && (TREE_CODE (element.expr.ops.single.rhs) == SSA_NAME
2545 || is_gimple_min_invariant (element.expr.ops.single.rhs)))
2546 return NULL_TREE;
2547
2548 /* Finally try to find the expression in the main expression hash table. */
2549 slot = avail_exprs.find_slot_with_hash (&element, element.hash,
2550 (insert ? INSERT : NO_INSERT));
2551 if (slot == NULL)
2552 {
2553 free_expr_hash_elt_contents (&element);
2554 return NULL_TREE;
2555 }
2556 else if (*slot == NULL)
2557 {
2558 struct expr_hash_elt *element2 = XNEW (struct expr_hash_elt);
2559 *element2 = element;
2560 element2->stamp = element2;
2561 *slot = element2;
2562
2563 if (dump_file && (dump_flags & TDF_DETAILS))
2564 {
2565 fprintf (dump_file, "2>>> ");
2566 print_expr_hash_elt (dump_file, element2);
2567 }
2568
2569 avail_exprs_stack.safe_push (element2);
2570 return NULL_TREE;
2571 }
2572 else
2573 free_expr_hash_elt_contents (&element);
2574
2575 /* Extract the LHS of the assignment so that it can be used as the current
2576 definition of another variable. */
2577 lhs = ((struct expr_hash_elt *)*slot)->lhs;
2578
2579 /* See if the LHS appears in the CONST_AND_COPIES table. If it does, then
2580 use the value from the const_and_copies table. */
2581 if (TREE_CODE (lhs) == SSA_NAME)
2582 {
2583 temp = SSA_NAME_VALUE (lhs);
2584 if (temp)
2585 lhs = temp;
2586 }
2587
2588 if (dump_file && (dump_flags & TDF_DETAILS))
2589 {
2590 fprintf (dump_file, "FIND: ");
2591 print_generic_expr (dump_file, lhs, 0);
2592 fprintf (dump_file, "\n");
2593 }
2594
2595 return lhs;
2596 }
2597
2598 /* Hashing and equality functions for AVAIL_EXPRS. We compute a value number
2599 for expressions using the code of the expression and the SSA numbers of
2600 its operands. */
2601
2602 static hashval_t
2603 avail_expr_hash (const void *p)
2604 {
2605 gimple stmt = ((const struct expr_hash_elt *)p)->stmt;
2606 const struct hashable_expr *expr = &((const struct expr_hash_elt *)p)->expr;
2607 tree vuse;
2608 hashval_t val = 0;
2609
2610 val = iterative_hash_hashable_expr (expr, val);
2611
2612 /* If the hash table entry is not associated with a statement, then we
2613 can just hash the expression and not worry about virtual operands
2614 and such. */
2615 if (!stmt)
2616 return val;
2617
2618 /* Add the SSA version numbers of the vuse operand. This is important
2619 because compound variables like arrays are not renamed in the
2620 operands. Rather, the rename is done on the virtual variable
2621 representing all the elements of the array. */
2622 if ((vuse = gimple_vuse (stmt)))
2623 val = iterative_hash_expr (vuse, val);
2624
2625 return val;
2626 }
2627
2628 /* PHI-ONLY copy and constant propagation. This pass is meant to clean
2629 up degenerate PHIs created by or exposed by jump threading. */
2630
2631 /* Given a statement STMT, which is either a PHI node or an assignment,
2632 remove it from the IL. */
2633
2634 static void
2635 remove_stmt_or_phi (gimple stmt)
2636 {
2637 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
2638
2639 if (gimple_code (stmt) == GIMPLE_PHI)
2640 remove_phi_node (&gsi, true);
2641 else
2642 {
2643 gsi_remove (&gsi, true);
2644 release_defs (stmt);
2645 }
2646 }
2647
2648 /* Given a statement STMT, which is either a PHI node or an assignment,
2649 return the "rhs" of the node, in the case of a non-degenerate
2650 phi, NULL is returned. */
2651
2652 static tree
2653 get_rhs_or_phi_arg (gimple stmt)
2654 {
2655 if (gimple_code (stmt) == GIMPLE_PHI)
2656 return degenerate_phi_result (stmt);
2657 else if (gimple_assign_single_p (stmt))
2658 return gimple_assign_rhs1 (stmt);
2659 else
2660 gcc_unreachable ();
2661 }
2662
2663
2664 /* Given a statement STMT, which is either a PHI node or an assignment,
2665 return the "lhs" of the node. */
2666
2667 static tree
2668 get_lhs_or_phi_result (gimple stmt)
2669 {
2670 if (gimple_code (stmt) == GIMPLE_PHI)
2671 return gimple_phi_result (stmt);
2672 else if (is_gimple_assign (stmt))
2673 return gimple_assign_lhs (stmt);
2674 else
2675 gcc_unreachable ();
2676 }
2677
2678 /* Propagate RHS into all uses of LHS (when possible).
2679
2680 RHS and LHS are derived from STMT, which is passed in solely so
2681 that we can remove it if propagation is successful.
2682
2683 When propagating into a PHI node or into a statement which turns
2684 into a trivial copy or constant initialization, set the
2685 appropriate bit in INTERESTING_NAMEs so that we will visit those
2686 nodes as well in an effort to pick up secondary optimization
2687 opportunities. */
2688
2689 static void
2690 propagate_rhs_into_lhs (gimple stmt, tree lhs, tree rhs, bitmap interesting_names)
2691 {
2692 /* First verify that propagation is valid and isn't going to move a
2693 loop variant variable outside its loop. */
2694 if (! SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs)
2695 && (TREE_CODE (rhs) != SSA_NAME
2696 || ! SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs))
2697 && may_propagate_copy (lhs, rhs)
2698 && loop_depth_of_name (lhs) >= loop_depth_of_name (rhs))
2699 {
2700 use_operand_p use_p;
2701 imm_use_iterator iter;
2702 gimple use_stmt;
2703 bool all = true;
2704
2705 /* Dump details. */
2706 if (dump_file && (dump_flags & TDF_DETAILS))
2707 {
2708 fprintf (dump_file, " Replacing '");
2709 print_generic_expr (dump_file, lhs, dump_flags);
2710 fprintf (dump_file, "' with %s '",
2711 (TREE_CODE (rhs) != SSA_NAME ? "constant" : "variable"));
2712 print_generic_expr (dump_file, rhs, dump_flags);
2713 fprintf (dump_file, "'\n");
2714 }
2715
2716 /* Walk over every use of LHS and try to replace the use with RHS.
2717 At this point the only reason why such a propagation would not
2718 be successful would be if the use occurs in an ASM_EXPR. */
2719 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
2720 {
2721 /* Leave debug stmts alone. If we succeed in propagating
2722 all non-debug uses, we'll drop the DEF, and propagation
2723 into debug stmts will occur then. */
2724 if (gimple_debug_bind_p (use_stmt))
2725 continue;
2726
2727 /* It's not always safe to propagate into an ASM_EXPR. */
2728 if (gimple_code (use_stmt) == GIMPLE_ASM
2729 && ! may_propagate_copy_into_asm (lhs))
2730 {
2731 all = false;
2732 continue;
2733 }
2734
2735 /* It's not ok to propagate into the definition stmt of RHS.
2736 <bb 9>:
2737 # prephitmp.12_36 = PHI <g_67.1_6(9)>
2738 g_67.1_6 = prephitmp.12_36;
2739 goto <bb 9>;
2740 While this is strictly all dead code we do not want to
2741 deal with this here. */
2742 if (TREE_CODE (rhs) == SSA_NAME
2743 && SSA_NAME_DEF_STMT (rhs) == use_stmt)
2744 {
2745 all = false;
2746 continue;
2747 }
2748
2749 /* Dump details. */
2750 if (dump_file && (dump_flags & TDF_DETAILS))
2751 {
2752 fprintf (dump_file, " Original statement:");
2753 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2754 }
2755
2756 /* Propagate the RHS into this use of the LHS. */
2757 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2758 propagate_value (use_p, rhs);
2759
2760 /* Special cases to avoid useless calls into the folding
2761 routines, operand scanning, etc.
2762
2763 Propagation into a PHI may cause the PHI to become
2764 a degenerate, so mark the PHI as interesting. No other
2765 actions are necessary. */
2766 if (gimple_code (use_stmt) == GIMPLE_PHI)
2767 {
2768 tree result;
2769
2770 /* Dump details. */
2771 if (dump_file && (dump_flags & TDF_DETAILS))
2772 {
2773 fprintf (dump_file, " Updated statement:");
2774 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2775 }
2776
2777 result = get_lhs_or_phi_result (use_stmt);
2778 bitmap_set_bit (interesting_names, SSA_NAME_VERSION (result));
2779 continue;
2780 }
2781
2782 /* From this point onward we are propagating into a
2783 real statement. Folding may (or may not) be possible,
2784 we may expose new operands, expose dead EH edges,
2785 etc. */
2786 /* NOTE tuples. In the tuples world, fold_stmt_inplace
2787 cannot fold a call that simplifies to a constant,
2788 because the GIMPLE_CALL must be replaced by a
2789 GIMPLE_ASSIGN, and there is no way to effect such a
2790 transformation in-place. We might want to consider
2791 using the more general fold_stmt here. */
2792 {
2793 gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
2794 fold_stmt_inplace (&gsi);
2795 }
2796
2797 /* Sometimes propagation can expose new operands to the
2798 renamer. */
2799 update_stmt (use_stmt);
2800
2801 /* Dump details. */
2802 if (dump_file && (dump_flags & TDF_DETAILS))
2803 {
2804 fprintf (dump_file, " Updated statement:");
2805 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2806 }
2807
2808 /* If we replaced a variable index with a constant, then
2809 we would need to update the invariant flag for ADDR_EXPRs. */
2810 if (gimple_assign_single_p (use_stmt)
2811 && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ADDR_EXPR)
2812 recompute_tree_invariant_for_addr_expr
2813 (gimple_assign_rhs1 (use_stmt));
2814
2815 /* If we cleaned up EH information from the statement,
2816 mark its containing block as needing EH cleanups. */
2817 if (maybe_clean_or_replace_eh_stmt (use_stmt, use_stmt))
2818 {
2819 bitmap_set_bit (need_eh_cleanup, gimple_bb (use_stmt)->index);
2820 if (dump_file && (dump_flags & TDF_DETAILS))
2821 fprintf (dump_file, " Flagged to clear EH edges.\n");
2822 }
2823
2824 /* Propagation may expose new trivial copy/constant propagation
2825 opportunities. */
2826 if (gimple_assign_single_p (use_stmt)
2827 && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME
2828 && (TREE_CODE (gimple_assign_rhs1 (use_stmt)) == SSA_NAME
2829 || is_gimple_min_invariant (gimple_assign_rhs1 (use_stmt))))
2830 {
2831 tree result = get_lhs_or_phi_result (use_stmt);
2832 bitmap_set_bit (interesting_names, SSA_NAME_VERSION (result));
2833 }
2834
2835 /* Propagation into these nodes may make certain edges in
2836 the CFG unexecutable. We want to identify them as PHI nodes
2837 at the destination of those unexecutable edges may become
2838 degenerates. */
2839 else if (gimple_code (use_stmt) == GIMPLE_COND
2840 || gimple_code (use_stmt) == GIMPLE_SWITCH
2841 || gimple_code (use_stmt) == GIMPLE_GOTO)
2842 {
2843 tree val;
2844
2845 if (gimple_code (use_stmt) == GIMPLE_COND)
2846 val = fold_binary_loc (gimple_location (use_stmt),
2847 gimple_cond_code (use_stmt),
2848 boolean_type_node,
2849 gimple_cond_lhs (use_stmt),
2850 gimple_cond_rhs (use_stmt));
2851 else if (gimple_code (use_stmt) == GIMPLE_SWITCH)
2852 val = gimple_switch_index (use_stmt);
2853 else
2854 val = gimple_goto_dest (use_stmt);
2855
2856 if (val && is_gimple_min_invariant (val))
2857 {
2858 basic_block bb = gimple_bb (use_stmt);
2859 edge te = find_taken_edge (bb, val);
2860 edge_iterator ei;
2861 edge e;
2862 gimple_stmt_iterator gsi, psi;
2863
2864 /* Remove all outgoing edges except TE. */
2865 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei));)
2866 {
2867 if (e != te)
2868 {
2869 /* Mark all the PHI nodes at the destination of
2870 the unexecutable edge as interesting. */
2871 for (psi = gsi_start_phis (e->dest);
2872 !gsi_end_p (psi);
2873 gsi_next (&psi))
2874 {
2875 gimple phi = gsi_stmt (psi);
2876
2877 tree result = gimple_phi_result (phi);
2878 int version = SSA_NAME_VERSION (result);
2879
2880 bitmap_set_bit (interesting_names, version);
2881 }
2882
2883 te->probability += e->probability;
2884
2885 te->count += e->count;
2886 remove_edge (e);
2887 cfg_altered = true;
2888 }
2889 else
2890 ei_next (&ei);
2891 }
2892
2893 gsi = gsi_last_bb (gimple_bb (use_stmt));
2894 gsi_remove (&gsi, true);
2895
2896 /* And fixup the flags on the single remaining edge. */
2897 te->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
2898 te->flags &= ~EDGE_ABNORMAL;
2899 te->flags |= EDGE_FALLTHRU;
2900 if (te->probability > REG_BR_PROB_BASE)
2901 te->probability = REG_BR_PROB_BASE;
2902 }
2903 }
2904 }
2905
2906 /* Ensure there is nothing else to do. */
2907 gcc_assert (!all || has_zero_uses (lhs));
2908
2909 /* If we were able to propagate away all uses of LHS, then
2910 we can remove STMT. */
2911 if (all)
2912 remove_stmt_or_phi (stmt);
2913 }
2914 }
2915
2916 /* STMT is either a PHI node (potentially a degenerate PHI node) or
2917 a statement that is a trivial copy or constant initialization.
2918
2919 Attempt to eliminate T by propagating its RHS into all uses of
2920 its LHS. This may in turn set new bits in INTERESTING_NAMES
2921 for nodes we want to revisit later.
2922
2923 All exit paths should clear INTERESTING_NAMES for the result
2924 of STMT. */
2925
2926 static void
2927 eliminate_const_or_copy (gimple stmt, bitmap interesting_names)
2928 {
2929 tree lhs = get_lhs_or_phi_result (stmt);
2930 tree rhs;
2931 int version = SSA_NAME_VERSION (lhs);
2932
2933 /* If the LHS of this statement or PHI has no uses, then we can
2934 just eliminate it. This can occur if, for example, the PHI
2935 was created by block duplication due to threading and its only
2936 use was in the conditional at the end of the block which was
2937 deleted. */
2938 if (has_zero_uses (lhs))
2939 {
2940 bitmap_clear_bit (interesting_names, version);
2941 remove_stmt_or_phi (stmt);
2942 return;
2943 }
2944
2945 /* Get the RHS of the assignment or PHI node if the PHI is a
2946 degenerate. */
2947 rhs = get_rhs_or_phi_arg (stmt);
2948 if (!rhs)
2949 {
2950 bitmap_clear_bit (interesting_names, version);
2951 return;
2952 }
2953
2954 if (!virtual_operand_p (lhs))
2955 propagate_rhs_into_lhs (stmt, lhs, rhs, interesting_names);
2956 else
2957 {
2958 gimple use_stmt;
2959 imm_use_iterator iter;
2960 use_operand_p use_p;
2961 /* For virtual operands we have to propagate into all uses as
2962 otherwise we will create overlapping life-ranges. */
2963 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
2964 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2965 SET_USE (use_p, rhs);
2966 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
2967 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs) = 1;
2968 remove_stmt_or_phi (stmt);
2969 }
2970
2971 /* Note that STMT may well have been deleted by now, so do
2972 not access it, instead use the saved version # to clear
2973 T's entry in the worklist. */
2974 bitmap_clear_bit (interesting_names, version);
2975 }
2976
2977 /* The first phase in degenerate PHI elimination.
2978
2979 Eliminate the degenerate PHIs in BB, then recurse on the
2980 dominator children of BB. */
2981
2982 static void
2983 eliminate_degenerate_phis_1 (basic_block bb, bitmap interesting_names)
2984 {
2985 gimple_stmt_iterator gsi;
2986 basic_block son;
2987
2988 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2989 {
2990 gimple phi = gsi_stmt (gsi);
2991
2992 eliminate_const_or_copy (phi, interesting_names);
2993 }
2994
2995 /* Recurse into the dominator children of BB. */
2996 for (son = first_dom_son (CDI_DOMINATORS, bb);
2997 son;
2998 son = next_dom_son (CDI_DOMINATORS, son))
2999 eliminate_degenerate_phis_1 (son, interesting_names);
3000 }
3001
3002
3003 /* A very simple pass to eliminate degenerate PHI nodes from the
3004 IL. This is meant to be fast enough to be able to be run several
3005 times in the optimization pipeline.
3006
3007 Certain optimizations, particularly those which duplicate blocks
3008 or remove edges from the CFG can create or expose PHIs which are
3009 trivial copies or constant initializations.
3010
3011 While we could pick up these optimizations in DOM or with the
3012 combination of copy-prop and CCP, those solutions are far too
3013 heavy-weight for our needs.
3014
3015 This implementation has two phases so that we can efficiently
3016 eliminate the first order degenerate PHIs and second order
3017 degenerate PHIs.
3018
3019 The first phase performs a dominator walk to identify and eliminate
3020 the vast majority of the degenerate PHIs. When a degenerate PHI
3021 is identified and eliminated any affected statements or PHIs
3022 are put on a worklist.
3023
3024 The second phase eliminates degenerate PHIs and trivial copies
3025 or constant initializations using the worklist. This is how we
3026 pick up the secondary optimization opportunities with minimal
3027 cost. */
3028
3029 static unsigned int
3030 eliminate_degenerate_phis (void)
3031 {
3032 bitmap interesting_names;
3033 bitmap interesting_names1;
3034
3035 /* Bitmap of blocks which need EH information updated. We can not
3036 update it on-the-fly as doing so invalidates the dominator tree. */
3037 need_eh_cleanup = BITMAP_ALLOC (NULL);
3038
3039 /* INTERESTING_NAMES is effectively our worklist, indexed by
3040 SSA_NAME_VERSION.
3041
3042 A set bit indicates that the statement or PHI node which
3043 defines the SSA_NAME should be (re)examined to determine if
3044 it has become a degenerate PHI or trivial const/copy propagation
3045 opportunity.
3046
3047 Experiments have show we generally get better compilation
3048 time behavior with bitmaps rather than sbitmaps. */
3049 interesting_names = BITMAP_ALLOC (NULL);
3050 interesting_names1 = BITMAP_ALLOC (NULL);
3051
3052 calculate_dominance_info (CDI_DOMINATORS);
3053 cfg_altered = false;
3054
3055 /* First phase. Eliminate degenerate PHIs via a dominator
3056 walk of the CFG.
3057
3058 Experiments have indicated that we generally get better
3059 compile-time behavior by visiting blocks in the first
3060 phase in dominator order. Presumably this is because walking
3061 in dominator order leaves fewer PHIs for later examination
3062 by the worklist phase. */
3063 eliminate_degenerate_phis_1 (ENTRY_BLOCK_PTR_FOR_FN (cfun),
3064 interesting_names);
3065
3066 /* Second phase. Eliminate second order degenerate PHIs as well
3067 as trivial copies or constant initializations identified by
3068 the first phase or this phase. Basically we keep iterating
3069 until our set of INTERESTING_NAMEs is empty. */
3070 while (!bitmap_empty_p (interesting_names))
3071 {
3072 unsigned int i;
3073 bitmap_iterator bi;
3074
3075 /* EXECUTE_IF_SET_IN_BITMAP does not like its bitmap
3076 changed during the loop. Copy it to another bitmap and
3077 use that. */
3078 bitmap_copy (interesting_names1, interesting_names);
3079
3080 EXECUTE_IF_SET_IN_BITMAP (interesting_names1, 0, i, bi)
3081 {
3082 tree name = ssa_name (i);
3083
3084 /* Ignore SSA_NAMEs that have been released because
3085 their defining statement was deleted (unreachable). */
3086 if (name)
3087 eliminate_const_or_copy (SSA_NAME_DEF_STMT (ssa_name (i)),
3088 interesting_names);
3089 }
3090 }
3091
3092 if (cfg_altered)
3093 {
3094 free_dominance_info (CDI_DOMINATORS);
3095 /* If we changed the CFG schedule loops for fixup by cfgcleanup. */
3096 if (current_loops)
3097 loops_state_set (LOOPS_NEED_FIXUP);
3098 }
3099
3100 /* Propagation of const and copies may make some EH edges dead. Purge
3101 such edges from the CFG as needed. */
3102 if (!bitmap_empty_p (need_eh_cleanup))
3103 {
3104 gimple_purge_all_dead_eh_edges (need_eh_cleanup);
3105 BITMAP_FREE (need_eh_cleanup);
3106 }
3107
3108 BITMAP_FREE (interesting_names);
3109 BITMAP_FREE (interesting_names1);
3110 return 0;
3111 }
3112
3113 namespace {
3114
3115 const pass_data pass_data_phi_only_cprop =
3116 {
3117 GIMPLE_PASS, /* type */
3118 "phicprop", /* name */
3119 OPTGROUP_NONE, /* optinfo_flags */
3120 true, /* has_gate */
3121 true, /* has_execute */
3122 TV_TREE_PHI_CPROP, /* tv_id */
3123 ( PROP_cfg | PROP_ssa ), /* properties_required */
3124 0, /* properties_provided */
3125 0, /* properties_destroyed */
3126 0, /* todo_flags_start */
3127 ( TODO_cleanup_cfg | TODO_verify_ssa
3128 | TODO_verify_stmts
3129 | TODO_update_ssa ), /* todo_flags_finish */
3130 };
3131
3132 class pass_phi_only_cprop : public gimple_opt_pass
3133 {
3134 public:
3135 pass_phi_only_cprop (gcc::context *ctxt)
3136 : gimple_opt_pass (pass_data_phi_only_cprop, ctxt)
3137 {}
3138
3139 /* opt_pass methods: */
3140 opt_pass * clone () { return new pass_phi_only_cprop (m_ctxt); }
3141 bool gate () { return gate_dominator (); }
3142 unsigned int execute () { return eliminate_degenerate_phis (); }
3143
3144 }; // class pass_phi_only_cprop
3145
3146 } // anon namespace
3147
3148 gimple_opt_pass *
3149 make_pass_phi_only_cprop (gcc::context *ctxt)
3150 {
3151 return new pass_phi_only_cprop (ctxt);
3152 }