[ARM/AArch64][testsuite] Add vmull tests.
[gcc.git] / gcc / tree-ssa-dom.c
1 /* SSA Dominator optimizations for trees
2 Copyright (C) 2001-2015 Free Software Foundation, Inc.
3 Contributed by Diego Novillo <dnovillo@redhat.com>
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "hash-table.h"
25 #include "tm.h"
26 #include "hash-set.h"
27 #include "machmode.h"
28 #include "vec.h"
29 #include "double-int.h"
30 #include "input.h"
31 #include "alias.h"
32 #include "symtab.h"
33 #include "wide-int.h"
34 #include "inchash.h"
35 #include "real.h"
36 #include "tree.h"
37 #include "fold-const.h"
38 #include "stor-layout.h"
39 #include "flags.h"
40 #include "tm_p.h"
41 #include "predict.h"
42 #include "hard-reg-set.h"
43 #include "input.h"
44 #include "function.h"
45 #include "dominance.h"
46 #include "cfg.h"
47 #include "cfganal.h"
48 #include "basic-block.h"
49 #include "cfgloop.h"
50 #include "inchash.h"
51 #include "gimple-pretty-print.h"
52 #include "tree-ssa-alias.h"
53 #include "internal-fn.h"
54 #include "gimple-fold.h"
55 #include "tree-eh.h"
56 #include "gimple-expr.h"
57 #include "is-a.h"
58 #include "gimple.h"
59 #include "gimple-iterator.h"
60 #include "gimple-ssa.h"
61 #include "tree-cfg.h"
62 #include "tree-phinodes.h"
63 #include "ssa-iterators.h"
64 #include "stringpool.h"
65 #include "tree-ssanames.h"
66 #include "tree-into-ssa.h"
67 #include "domwalk.h"
68 #include "tree-pass.h"
69 #include "tree-ssa-propagate.h"
70 #include "tree-ssa-threadupdate.h"
71 #include "langhooks.h"
72 #include "params.h"
73 #include "tree-ssa-threadedge.h"
74 #include "tree-ssa-dom.h"
75 #include "inchash.h"
76 #include "gimplify.h"
77
78 /* This file implements optimizations on the dominator tree. */
79
80 /* Representation of a "naked" right-hand-side expression, to be used
81 in recording available expressions in the expression hash table. */
82
83 enum expr_kind
84 {
85 EXPR_SINGLE,
86 EXPR_UNARY,
87 EXPR_BINARY,
88 EXPR_TERNARY,
89 EXPR_CALL,
90 EXPR_PHI
91 };
92
93 struct hashable_expr
94 {
95 tree type;
96 enum expr_kind kind;
97 union {
98 struct { tree rhs; } single;
99 struct { enum tree_code op; tree opnd; } unary;
100 struct { enum tree_code op; tree opnd0, opnd1; } binary;
101 struct { enum tree_code op; tree opnd0, opnd1, opnd2; } ternary;
102 struct { gcall *fn_from; bool pure; size_t nargs; tree *args; } call;
103 struct { size_t nargs; tree *args; } phi;
104 } ops;
105 };
106
107 /* Structure for recording known values of a conditional expression
108 at the exits from its block. */
109
110 typedef struct cond_equivalence_s
111 {
112 struct hashable_expr cond;
113 tree value;
114 } cond_equivalence;
115
116
117 /* Structure for recording edge equivalences as well as any pending
118 edge redirections during the dominator optimizer.
119
120 Computing and storing the edge equivalences instead of creating
121 them on-demand can save significant amounts of time, particularly
122 for pathological cases involving switch statements.
123
124 These structures live for a single iteration of the dominator
125 optimizer in the edge's AUX field. At the end of an iteration we
126 free each of these structures and update the AUX field to point
127 to any requested redirection target (the code for updating the
128 CFG and SSA graph for edge redirection expects redirection edge
129 targets to be in the AUX field for each edge. */
130
131 struct edge_info
132 {
133 /* If this edge creates a simple equivalence, the LHS and RHS of
134 the equivalence will be stored here. */
135 tree lhs;
136 tree rhs;
137
138 /* Traversing an edge may also indicate one or more particular conditions
139 are true or false. */
140 vec<cond_equivalence> cond_equivalences;
141 };
142
143 /* Stack of available expressions in AVAIL_EXPRs. Each block pushes any
144 expressions it enters into the hash table along with a marker entry
145 (null). When we finish processing the block, we pop off entries and
146 remove the expressions from the global hash table until we hit the
147 marker. */
148 typedef struct expr_hash_elt * expr_hash_elt_t;
149
150 static vec<std::pair<expr_hash_elt_t, expr_hash_elt_t> > avail_exprs_stack;
151
152 /* Structure for entries in the expression hash table. */
153
154 struct expr_hash_elt
155 {
156 /* The value (lhs) of this expression. */
157 tree lhs;
158
159 /* The expression (rhs) we want to record. */
160 struct hashable_expr expr;
161
162 /* The virtual operand associated with the nearest dominating stmt
163 loading from or storing to expr. */
164 tree vop;
165
166 /* The hash value for RHS. */
167 hashval_t hash;
168
169 /* A unique stamp, typically the address of the hash
170 element itself, used in removing entries from the table. */
171 struct expr_hash_elt *stamp;
172 };
173
174 /* Hashtable helpers. */
175
176 static bool hashable_expr_equal_p (const struct hashable_expr *,
177 const struct hashable_expr *);
178 static void free_expr_hash_elt (void *);
179
180 struct expr_elt_hasher
181 {
182 typedef expr_hash_elt *value_type;
183 typedef expr_hash_elt *compare_type;
184 typedef int store_values_directly;
185 static inline hashval_t hash (const value_type &);
186 static inline bool equal (const value_type &, const compare_type &);
187 static inline void remove (value_type &);
188 };
189
190 inline hashval_t
191 expr_elt_hasher::hash (const value_type &p)
192 {
193 return p->hash;
194 }
195
196 inline bool
197 expr_elt_hasher::equal (const value_type &p1, const compare_type &p2)
198 {
199 const struct hashable_expr *expr1 = &p1->expr;
200 const struct expr_hash_elt *stamp1 = p1->stamp;
201 const struct hashable_expr *expr2 = &p2->expr;
202 const struct expr_hash_elt *stamp2 = p2->stamp;
203
204 /* This case should apply only when removing entries from the table. */
205 if (stamp1 == stamp2)
206 return true;
207
208 if (p1->hash != p2->hash)
209 return false;
210
211 /* In case of a collision, both RHS have to be identical and have the
212 same VUSE operands. */
213 if (hashable_expr_equal_p (expr1, expr2)
214 && types_compatible_p (expr1->type, expr2->type))
215 return true;
216
217 return false;
218 }
219
220 /* Delete an expr_hash_elt and reclaim its storage. */
221
222 inline void
223 expr_elt_hasher::remove (value_type &element)
224 {
225 free_expr_hash_elt (element);
226 }
227
228 /* Hash table with expressions made available during the renaming process.
229 When an assignment of the form X_i = EXPR is found, the statement is
230 stored in this table. If the same expression EXPR is later found on the
231 RHS of another statement, it is replaced with X_i (thus performing
232 global redundancy elimination). Similarly as we pass through conditionals
233 we record the conditional itself as having either a true or false value
234 in this table. */
235 static hash_table<expr_elt_hasher> *avail_exprs;
236
237 /* Stack of dest,src pairs that need to be restored during finalization.
238
239 A NULL entry is used to mark the end of pairs which need to be
240 restored during finalization of this block. */
241 static vec<tree> const_and_copies_stack;
242
243 /* Track whether or not we have changed the control flow graph. */
244 static bool cfg_altered;
245
246 /* Bitmap of blocks that have had EH statements cleaned. We should
247 remove their dead edges eventually. */
248 static bitmap need_eh_cleanup;
249
250 /* Statistics for dominator optimizations. */
251 struct opt_stats_d
252 {
253 long num_stmts;
254 long num_exprs_considered;
255 long num_re;
256 long num_const_prop;
257 long num_copy_prop;
258 };
259
260 static struct opt_stats_d opt_stats;
261
262 /* Local functions. */
263 static void optimize_stmt (basic_block, gimple_stmt_iterator);
264 static tree lookup_avail_expr (gimple, bool);
265 static hashval_t avail_expr_hash (const void *);
266 static void htab_statistics (FILE *,
267 const hash_table<expr_elt_hasher> &);
268 static void record_cond (cond_equivalence *);
269 static void record_const_or_copy (tree, tree);
270 static void record_equality (tree, tree);
271 static void record_equivalences_from_phis (basic_block);
272 static void record_equivalences_from_incoming_edge (basic_block);
273 static void eliminate_redundant_computations (gimple_stmt_iterator *);
274 static void record_equivalences_from_stmt (gimple, int);
275 static void remove_local_expressions_from_table (void);
276 static void restore_vars_to_original_value (void);
277 static edge single_incoming_edge_ignoring_loop_edges (basic_block);
278
279
280 /* Given a statement STMT, initialize the hash table element pointed to
281 by ELEMENT. */
282
283 static void
284 initialize_hash_element (gimple stmt, tree lhs,
285 struct expr_hash_elt *element)
286 {
287 enum gimple_code code = gimple_code (stmt);
288 struct hashable_expr *expr = &element->expr;
289
290 if (code == GIMPLE_ASSIGN)
291 {
292 enum tree_code subcode = gimple_assign_rhs_code (stmt);
293
294 switch (get_gimple_rhs_class (subcode))
295 {
296 case GIMPLE_SINGLE_RHS:
297 expr->kind = EXPR_SINGLE;
298 expr->type = TREE_TYPE (gimple_assign_rhs1 (stmt));
299 expr->ops.single.rhs = gimple_assign_rhs1 (stmt);
300 break;
301 case GIMPLE_UNARY_RHS:
302 expr->kind = EXPR_UNARY;
303 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
304 if (CONVERT_EXPR_CODE_P (subcode))
305 subcode = NOP_EXPR;
306 expr->ops.unary.op = subcode;
307 expr->ops.unary.opnd = gimple_assign_rhs1 (stmt);
308 break;
309 case GIMPLE_BINARY_RHS:
310 expr->kind = EXPR_BINARY;
311 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
312 expr->ops.binary.op = subcode;
313 expr->ops.binary.opnd0 = gimple_assign_rhs1 (stmt);
314 expr->ops.binary.opnd1 = gimple_assign_rhs2 (stmt);
315 break;
316 case GIMPLE_TERNARY_RHS:
317 expr->kind = EXPR_TERNARY;
318 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
319 expr->ops.ternary.op = subcode;
320 expr->ops.ternary.opnd0 = gimple_assign_rhs1 (stmt);
321 expr->ops.ternary.opnd1 = gimple_assign_rhs2 (stmt);
322 expr->ops.ternary.opnd2 = gimple_assign_rhs3 (stmt);
323 break;
324 default:
325 gcc_unreachable ();
326 }
327 }
328 else if (code == GIMPLE_COND)
329 {
330 expr->type = boolean_type_node;
331 expr->kind = EXPR_BINARY;
332 expr->ops.binary.op = gimple_cond_code (stmt);
333 expr->ops.binary.opnd0 = gimple_cond_lhs (stmt);
334 expr->ops.binary.opnd1 = gimple_cond_rhs (stmt);
335 }
336 else if (gcall *call_stmt = dyn_cast <gcall *> (stmt))
337 {
338 size_t nargs = gimple_call_num_args (call_stmt);
339 size_t i;
340
341 gcc_assert (gimple_call_lhs (call_stmt));
342
343 expr->type = TREE_TYPE (gimple_call_lhs (call_stmt));
344 expr->kind = EXPR_CALL;
345 expr->ops.call.fn_from = call_stmt;
346
347 if (gimple_call_flags (call_stmt) & (ECF_CONST | ECF_PURE))
348 expr->ops.call.pure = true;
349 else
350 expr->ops.call.pure = false;
351
352 expr->ops.call.nargs = nargs;
353 expr->ops.call.args = XCNEWVEC (tree, nargs);
354 for (i = 0; i < nargs; i++)
355 expr->ops.call.args[i] = gimple_call_arg (call_stmt, i);
356 }
357 else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
358 {
359 expr->type = TREE_TYPE (gimple_switch_index (swtch_stmt));
360 expr->kind = EXPR_SINGLE;
361 expr->ops.single.rhs = gimple_switch_index (swtch_stmt);
362 }
363 else if (code == GIMPLE_GOTO)
364 {
365 expr->type = TREE_TYPE (gimple_goto_dest (stmt));
366 expr->kind = EXPR_SINGLE;
367 expr->ops.single.rhs = gimple_goto_dest (stmt);
368 }
369 else if (code == GIMPLE_PHI)
370 {
371 size_t nargs = gimple_phi_num_args (stmt);
372 size_t i;
373
374 expr->type = TREE_TYPE (gimple_phi_result (stmt));
375 expr->kind = EXPR_PHI;
376 expr->ops.phi.nargs = nargs;
377 expr->ops.phi.args = XCNEWVEC (tree, nargs);
378
379 for (i = 0; i < nargs; i++)
380 expr->ops.phi.args[i] = gimple_phi_arg_def (stmt, i);
381 }
382 else
383 gcc_unreachable ();
384
385 element->lhs = lhs;
386 element->vop = gimple_vuse (stmt);
387 element->hash = avail_expr_hash (element);
388 element->stamp = element;
389 }
390
391 /* Given a conditional expression COND as a tree, initialize
392 a hashable_expr expression EXPR. The conditional must be a
393 comparison or logical negation. A constant or a variable is
394 not permitted. */
395
396 static void
397 initialize_expr_from_cond (tree cond, struct hashable_expr *expr)
398 {
399 expr->type = boolean_type_node;
400
401 if (COMPARISON_CLASS_P (cond))
402 {
403 expr->kind = EXPR_BINARY;
404 expr->ops.binary.op = TREE_CODE (cond);
405 expr->ops.binary.opnd0 = TREE_OPERAND (cond, 0);
406 expr->ops.binary.opnd1 = TREE_OPERAND (cond, 1);
407 }
408 else if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
409 {
410 expr->kind = EXPR_UNARY;
411 expr->ops.unary.op = TRUTH_NOT_EXPR;
412 expr->ops.unary.opnd = TREE_OPERAND (cond, 0);
413 }
414 else
415 gcc_unreachable ();
416 }
417
418 /* Given a hashable_expr expression EXPR and an LHS,
419 initialize the hash table element pointed to by ELEMENT. */
420
421 static void
422 initialize_hash_element_from_expr (struct hashable_expr *expr,
423 tree lhs,
424 struct expr_hash_elt *element)
425 {
426 element->expr = *expr;
427 element->lhs = lhs;
428 element->vop = NULL_TREE;
429 element->hash = avail_expr_hash (element);
430 element->stamp = element;
431 }
432
433 /* Compare two hashable_expr structures for equivalence.
434 They are considered equivalent when the the expressions
435 they denote must necessarily be equal. The logic is intended
436 to follow that of operand_equal_p in fold-const.c */
437
438 static bool
439 hashable_expr_equal_p (const struct hashable_expr *expr0,
440 const struct hashable_expr *expr1)
441 {
442 tree type0 = expr0->type;
443 tree type1 = expr1->type;
444
445 /* If either type is NULL, there is nothing to check. */
446 if ((type0 == NULL_TREE) ^ (type1 == NULL_TREE))
447 return false;
448
449 /* If both types don't have the same signedness, precision, and mode,
450 then we can't consider them equal. */
451 if (type0 != type1
452 && (TREE_CODE (type0) == ERROR_MARK
453 || TREE_CODE (type1) == ERROR_MARK
454 || TYPE_UNSIGNED (type0) != TYPE_UNSIGNED (type1)
455 || TYPE_PRECISION (type0) != TYPE_PRECISION (type1)
456 || TYPE_MODE (type0) != TYPE_MODE (type1)))
457 return false;
458
459 if (expr0->kind != expr1->kind)
460 return false;
461
462 switch (expr0->kind)
463 {
464 case EXPR_SINGLE:
465 return operand_equal_p (expr0->ops.single.rhs,
466 expr1->ops.single.rhs, 0);
467
468 case EXPR_UNARY:
469 if (expr0->ops.unary.op != expr1->ops.unary.op)
470 return false;
471
472 if ((CONVERT_EXPR_CODE_P (expr0->ops.unary.op)
473 || expr0->ops.unary.op == NON_LVALUE_EXPR)
474 && TYPE_UNSIGNED (expr0->type) != TYPE_UNSIGNED (expr1->type))
475 return false;
476
477 return operand_equal_p (expr0->ops.unary.opnd,
478 expr1->ops.unary.opnd, 0);
479
480 case EXPR_BINARY:
481 if (expr0->ops.binary.op != expr1->ops.binary.op)
482 return false;
483
484 if (operand_equal_p (expr0->ops.binary.opnd0,
485 expr1->ops.binary.opnd0, 0)
486 && operand_equal_p (expr0->ops.binary.opnd1,
487 expr1->ops.binary.opnd1, 0))
488 return true;
489
490 /* For commutative ops, allow the other order. */
491 return (commutative_tree_code (expr0->ops.binary.op)
492 && operand_equal_p (expr0->ops.binary.opnd0,
493 expr1->ops.binary.opnd1, 0)
494 && operand_equal_p (expr0->ops.binary.opnd1,
495 expr1->ops.binary.opnd0, 0));
496
497 case EXPR_TERNARY:
498 if (expr0->ops.ternary.op != expr1->ops.ternary.op
499 || !operand_equal_p (expr0->ops.ternary.opnd2,
500 expr1->ops.ternary.opnd2, 0))
501 return false;
502
503 if (operand_equal_p (expr0->ops.ternary.opnd0,
504 expr1->ops.ternary.opnd0, 0)
505 && operand_equal_p (expr0->ops.ternary.opnd1,
506 expr1->ops.ternary.opnd1, 0))
507 return true;
508
509 /* For commutative ops, allow the other order. */
510 return (commutative_ternary_tree_code (expr0->ops.ternary.op)
511 && operand_equal_p (expr0->ops.ternary.opnd0,
512 expr1->ops.ternary.opnd1, 0)
513 && operand_equal_p (expr0->ops.ternary.opnd1,
514 expr1->ops.ternary.opnd0, 0));
515
516 case EXPR_CALL:
517 {
518 size_t i;
519
520 /* If the calls are to different functions, then they
521 clearly cannot be equal. */
522 if (!gimple_call_same_target_p (expr0->ops.call.fn_from,
523 expr1->ops.call.fn_from))
524 return false;
525
526 if (! expr0->ops.call.pure)
527 return false;
528
529 if (expr0->ops.call.nargs != expr1->ops.call.nargs)
530 return false;
531
532 for (i = 0; i < expr0->ops.call.nargs; i++)
533 if (! operand_equal_p (expr0->ops.call.args[i],
534 expr1->ops.call.args[i], 0))
535 return false;
536
537 if (stmt_could_throw_p (expr0->ops.call.fn_from))
538 {
539 int lp0 = lookup_stmt_eh_lp (expr0->ops.call.fn_from);
540 int lp1 = lookup_stmt_eh_lp (expr1->ops.call.fn_from);
541 if ((lp0 > 0 || lp1 > 0) && lp0 != lp1)
542 return false;
543 }
544
545 return true;
546 }
547
548 case EXPR_PHI:
549 {
550 size_t i;
551
552 if (expr0->ops.phi.nargs != expr1->ops.phi.nargs)
553 return false;
554
555 for (i = 0; i < expr0->ops.phi.nargs; i++)
556 if (! operand_equal_p (expr0->ops.phi.args[i],
557 expr1->ops.phi.args[i], 0))
558 return false;
559
560 return true;
561 }
562
563 default:
564 gcc_unreachable ();
565 }
566 }
567
568 /* Generate a hash value for a pair of expressions. This can be used
569 iteratively by passing a previous result in HSTATE.
570
571 The same hash value is always returned for a given pair of expressions,
572 regardless of the order in which they are presented. This is useful in
573 hashing the operands of commutative functions. */
574
575 namespace inchash
576 {
577
578 static void
579 add_expr_commutative (const_tree t1, const_tree t2, hash &hstate)
580 {
581 hash one, two;
582
583 inchash::add_expr (t1, one);
584 inchash::add_expr (t2, two);
585 hstate.add_commutative (one, two);
586 }
587
588 /* Compute a hash value for a hashable_expr value EXPR and a
589 previously accumulated hash value VAL. If two hashable_expr
590 values compare equal with hashable_expr_equal_p, they must
591 hash to the same value, given an identical value of VAL.
592 The logic is intended to follow inchash::add_expr in tree.c. */
593
594 static void
595 add_hashable_expr (const struct hashable_expr *expr, hash &hstate)
596 {
597 switch (expr->kind)
598 {
599 case EXPR_SINGLE:
600 inchash::add_expr (expr->ops.single.rhs, hstate);
601 break;
602
603 case EXPR_UNARY:
604 hstate.add_object (expr->ops.unary.op);
605
606 /* Make sure to include signedness in the hash computation.
607 Don't hash the type, that can lead to having nodes which
608 compare equal according to operand_equal_p, but which
609 have different hash codes. */
610 if (CONVERT_EXPR_CODE_P (expr->ops.unary.op)
611 || expr->ops.unary.op == NON_LVALUE_EXPR)
612 hstate.add_int (TYPE_UNSIGNED (expr->type));
613
614 inchash::add_expr (expr->ops.unary.opnd, hstate);
615 break;
616
617 case EXPR_BINARY:
618 hstate.add_object (expr->ops.binary.op);
619 if (commutative_tree_code (expr->ops.binary.op))
620 inchash::add_expr_commutative (expr->ops.binary.opnd0,
621 expr->ops.binary.opnd1, hstate);
622 else
623 {
624 inchash::add_expr (expr->ops.binary.opnd0, hstate);
625 inchash::add_expr (expr->ops.binary.opnd1, hstate);
626 }
627 break;
628
629 case EXPR_TERNARY:
630 hstate.add_object (expr->ops.ternary.op);
631 if (commutative_ternary_tree_code (expr->ops.ternary.op))
632 inchash::add_expr_commutative (expr->ops.ternary.opnd0,
633 expr->ops.ternary.opnd1, hstate);
634 else
635 {
636 inchash::add_expr (expr->ops.ternary.opnd0, hstate);
637 inchash::add_expr (expr->ops.ternary.opnd1, hstate);
638 }
639 inchash::add_expr (expr->ops.ternary.opnd2, hstate);
640 break;
641
642 case EXPR_CALL:
643 {
644 size_t i;
645 enum tree_code code = CALL_EXPR;
646 gcall *fn_from;
647
648 hstate.add_object (code);
649 fn_from = expr->ops.call.fn_from;
650 if (gimple_call_internal_p (fn_from))
651 hstate.merge_hash ((hashval_t) gimple_call_internal_fn (fn_from));
652 else
653 inchash::add_expr (gimple_call_fn (fn_from), hstate);
654 for (i = 0; i < expr->ops.call.nargs; i++)
655 inchash::add_expr (expr->ops.call.args[i], hstate);
656 }
657 break;
658
659 case EXPR_PHI:
660 {
661 size_t i;
662
663 for (i = 0; i < expr->ops.phi.nargs; i++)
664 inchash::add_expr (expr->ops.phi.args[i], hstate);
665 }
666 break;
667
668 default:
669 gcc_unreachable ();
670 }
671 }
672
673 }
674
675 /* Print a diagnostic dump of an expression hash table entry. */
676
677 static void
678 print_expr_hash_elt (FILE * stream, const struct expr_hash_elt *element)
679 {
680 fprintf (stream, "STMT ");
681
682 if (element->lhs)
683 {
684 print_generic_expr (stream, element->lhs, 0);
685 fprintf (stream, " = ");
686 }
687
688 switch (element->expr.kind)
689 {
690 case EXPR_SINGLE:
691 print_generic_expr (stream, element->expr.ops.single.rhs, 0);
692 break;
693
694 case EXPR_UNARY:
695 fprintf (stream, "%s ", get_tree_code_name (element->expr.ops.unary.op));
696 print_generic_expr (stream, element->expr.ops.unary.opnd, 0);
697 break;
698
699 case EXPR_BINARY:
700 print_generic_expr (stream, element->expr.ops.binary.opnd0, 0);
701 fprintf (stream, " %s ", get_tree_code_name (element->expr.ops.binary.op));
702 print_generic_expr (stream, element->expr.ops.binary.opnd1, 0);
703 break;
704
705 case EXPR_TERNARY:
706 fprintf (stream, " %s <", get_tree_code_name (element->expr.ops.ternary.op));
707 print_generic_expr (stream, element->expr.ops.ternary.opnd0, 0);
708 fputs (", ", stream);
709 print_generic_expr (stream, element->expr.ops.ternary.opnd1, 0);
710 fputs (", ", stream);
711 print_generic_expr (stream, element->expr.ops.ternary.opnd2, 0);
712 fputs (">", stream);
713 break;
714
715 case EXPR_CALL:
716 {
717 size_t i;
718 size_t nargs = element->expr.ops.call.nargs;
719 gcall *fn_from;
720
721 fn_from = element->expr.ops.call.fn_from;
722 if (gimple_call_internal_p (fn_from))
723 fputs (internal_fn_name (gimple_call_internal_fn (fn_from)),
724 stream);
725 else
726 print_generic_expr (stream, gimple_call_fn (fn_from), 0);
727 fprintf (stream, " (");
728 for (i = 0; i < nargs; i++)
729 {
730 print_generic_expr (stream, element->expr.ops.call.args[i], 0);
731 if (i + 1 < nargs)
732 fprintf (stream, ", ");
733 }
734 fprintf (stream, ")");
735 }
736 break;
737
738 case EXPR_PHI:
739 {
740 size_t i;
741 size_t nargs = element->expr.ops.phi.nargs;
742
743 fprintf (stream, "PHI <");
744 for (i = 0; i < nargs; i++)
745 {
746 print_generic_expr (stream, element->expr.ops.phi.args[i], 0);
747 if (i + 1 < nargs)
748 fprintf (stream, ", ");
749 }
750 fprintf (stream, ">");
751 }
752 break;
753 }
754
755 if (element->vop)
756 {
757 fprintf (stream, " with ");
758 print_generic_expr (stream, element->vop, 0);
759 }
760
761 fprintf (stream, "\n");
762 }
763
764 /* Delete variable sized pieces of the expr_hash_elt ELEMENT. */
765
766 static void
767 free_expr_hash_elt_contents (struct expr_hash_elt *element)
768 {
769 if (element->expr.kind == EXPR_CALL)
770 free (element->expr.ops.call.args);
771 else if (element->expr.kind == EXPR_PHI)
772 free (element->expr.ops.phi.args);
773 }
774
775 /* Delete an expr_hash_elt and reclaim its storage. */
776
777 static void
778 free_expr_hash_elt (void *elt)
779 {
780 struct expr_hash_elt *element = ((struct expr_hash_elt *)elt);
781 free_expr_hash_elt_contents (element);
782 free (element);
783 }
784
785 /* Allocate an EDGE_INFO for edge E and attach it to E.
786 Return the new EDGE_INFO structure. */
787
788 static struct edge_info *
789 allocate_edge_info (edge e)
790 {
791 struct edge_info *edge_info;
792
793 edge_info = XCNEW (struct edge_info);
794
795 e->aux = edge_info;
796 return edge_info;
797 }
798
799 /* Free all EDGE_INFO structures associated with edges in the CFG.
800 If a particular edge can be threaded, copy the redirection
801 target from the EDGE_INFO structure into the edge's AUX field
802 as required by code to update the CFG and SSA graph for
803 jump threading. */
804
805 static void
806 free_all_edge_infos (void)
807 {
808 basic_block bb;
809 edge_iterator ei;
810 edge e;
811
812 FOR_EACH_BB_FN (bb, cfun)
813 {
814 FOR_EACH_EDGE (e, ei, bb->preds)
815 {
816 struct edge_info *edge_info = (struct edge_info *) e->aux;
817
818 if (edge_info)
819 {
820 edge_info->cond_equivalences.release ();
821 free (edge_info);
822 e->aux = NULL;
823 }
824 }
825 }
826 }
827
828 class dom_opt_dom_walker : public dom_walker
829 {
830 public:
831 dom_opt_dom_walker (cdi_direction direction)
832 : dom_walker (direction), m_dummy_cond (NULL) {}
833
834 virtual void before_dom_children (basic_block);
835 virtual void after_dom_children (basic_block);
836
837 private:
838 void thread_across_edge (edge);
839
840 gcond *m_dummy_cond;
841 };
842
843 /* Jump threading, redundancy elimination and const/copy propagation.
844
845 This pass may expose new symbols that need to be renamed into SSA. For
846 every new symbol exposed, its corresponding bit will be set in
847 VARS_TO_RENAME. */
848
849 namespace {
850
851 const pass_data pass_data_dominator =
852 {
853 GIMPLE_PASS, /* type */
854 "dom", /* name */
855 OPTGROUP_NONE, /* optinfo_flags */
856 TV_TREE_SSA_DOMINATOR_OPTS, /* tv_id */
857 ( PROP_cfg | PROP_ssa ), /* properties_required */
858 0, /* properties_provided */
859 0, /* properties_destroyed */
860 0, /* todo_flags_start */
861 ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
862 };
863
864 class pass_dominator : public gimple_opt_pass
865 {
866 public:
867 pass_dominator (gcc::context *ctxt)
868 : gimple_opt_pass (pass_data_dominator, ctxt)
869 {}
870
871 /* opt_pass methods: */
872 opt_pass * clone () { return new pass_dominator (m_ctxt); }
873 virtual bool gate (function *) { return flag_tree_dom != 0; }
874 virtual unsigned int execute (function *);
875
876 }; // class pass_dominator
877
878 unsigned int
879 pass_dominator::execute (function *fun)
880 {
881 memset (&opt_stats, 0, sizeof (opt_stats));
882
883 /* Create our hash tables. */
884 avail_exprs = new hash_table<expr_elt_hasher> (1024);
885 avail_exprs_stack.create (20);
886 const_and_copies_stack.create (20);
887 need_eh_cleanup = BITMAP_ALLOC (NULL);
888
889 calculate_dominance_info (CDI_DOMINATORS);
890 cfg_altered = false;
891
892 /* We need to know loop structures in order to avoid destroying them
893 in jump threading. Note that we still can e.g. thread through loop
894 headers to an exit edge, or through loop header to the loop body, assuming
895 that we update the loop info.
896
897 TODO: We don't need to set LOOPS_HAVE_PREHEADERS generally, but due
898 to several overly conservative bail-outs in jump threading, case
899 gcc.dg/tree-ssa/pr21417.c can't be threaded if loop preheader is
900 missing. We should improve jump threading in future then
901 LOOPS_HAVE_PREHEADERS won't be needed here. */
902 loop_optimizer_init (LOOPS_HAVE_PREHEADERS | LOOPS_HAVE_SIMPLE_LATCHES);
903
904 /* Initialize the value-handle array. */
905 threadedge_initialize_values ();
906
907 /* We need accurate information regarding back edges in the CFG
908 for jump threading; this may include back edges that are not part of
909 a single loop. */
910 mark_dfs_back_edges ();
911
912 /* Recursively walk the dominator tree optimizing statements. */
913 dom_opt_dom_walker (CDI_DOMINATORS).walk (fun->cfg->x_entry_block_ptr);
914
915 {
916 gimple_stmt_iterator gsi;
917 basic_block bb;
918 FOR_EACH_BB_FN (bb, fun)
919 {
920 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
921 update_stmt_if_modified (gsi_stmt (gsi));
922 }
923 }
924
925 /* If we exposed any new variables, go ahead and put them into
926 SSA form now, before we handle jump threading. This simplifies
927 interactions between rewriting of _DECL nodes into SSA form
928 and rewriting SSA_NAME nodes into SSA form after block
929 duplication and CFG manipulation. */
930 update_ssa (TODO_update_ssa);
931
932 free_all_edge_infos ();
933
934 /* Thread jumps, creating duplicate blocks as needed. */
935 cfg_altered |= thread_through_all_blocks (first_pass_instance);
936
937 if (cfg_altered)
938 free_dominance_info (CDI_DOMINATORS);
939
940 /* Removal of statements may make some EH edges dead. Purge
941 such edges from the CFG as needed. */
942 if (!bitmap_empty_p (need_eh_cleanup))
943 {
944 unsigned i;
945 bitmap_iterator bi;
946
947 /* Jump threading may have created forwarder blocks from blocks
948 needing EH cleanup; the new successor of these blocks, which
949 has inherited from the original block, needs the cleanup.
950 Don't clear bits in the bitmap, as that can break the bitmap
951 iterator. */
952 EXECUTE_IF_SET_IN_BITMAP (need_eh_cleanup, 0, i, bi)
953 {
954 basic_block bb = BASIC_BLOCK_FOR_FN (fun, i);
955 if (bb == NULL)
956 continue;
957 while (single_succ_p (bb)
958 && (single_succ_edge (bb)->flags & EDGE_EH) == 0)
959 bb = single_succ (bb);
960 if (bb == EXIT_BLOCK_PTR_FOR_FN (fun))
961 continue;
962 if ((unsigned) bb->index != i)
963 bitmap_set_bit (need_eh_cleanup, bb->index);
964 }
965
966 gimple_purge_all_dead_eh_edges (need_eh_cleanup);
967 bitmap_clear (need_eh_cleanup);
968 }
969
970 statistics_counter_event (fun, "Redundant expressions eliminated",
971 opt_stats.num_re);
972 statistics_counter_event (fun, "Constants propagated",
973 opt_stats.num_const_prop);
974 statistics_counter_event (fun, "Copies propagated",
975 opt_stats.num_copy_prop);
976
977 /* Debugging dumps. */
978 if (dump_file && (dump_flags & TDF_STATS))
979 dump_dominator_optimization_stats (dump_file);
980
981 loop_optimizer_finalize ();
982
983 /* Delete our main hashtable. */
984 delete avail_exprs;
985 avail_exprs = NULL;
986
987 /* Free asserted bitmaps and stacks. */
988 BITMAP_FREE (need_eh_cleanup);
989
990 avail_exprs_stack.release ();
991 const_and_copies_stack.release ();
992
993 /* Free the value-handle array. */
994 threadedge_finalize_values ();
995
996 return 0;
997 }
998
999 } // anon namespace
1000
1001 gimple_opt_pass *
1002 make_pass_dominator (gcc::context *ctxt)
1003 {
1004 return new pass_dominator (ctxt);
1005 }
1006
1007
1008 /* Given a conditional statement CONDSTMT, convert the
1009 condition to a canonical form. */
1010
1011 static void
1012 canonicalize_comparison (gcond *condstmt)
1013 {
1014 tree op0;
1015 tree op1;
1016 enum tree_code code;
1017
1018 gcc_assert (gimple_code (condstmt) == GIMPLE_COND);
1019
1020 op0 = gimple_cond_lhs (condstmt);
1021 op1 = gimple_cond_rhs (condstmt);
1022
1023 code = gimple_cond_code (condstmt);
1024
1025 /* If it would be profitable to swap the operands, then do so to
1026 canonicalize the statement, enabling better optimization.
1027
1028 By placing canonicalization of such expressions here we
1029 transparently keep statements in canonical form, even
1030 when the statement is modified. */
1031 if (tree_swap_operands_p (op0, op1, false))
1032 {
1033 /* For relationals we need to swap the operands
1034 and change the code. */
1035 if (code == LT_EXPR
1036 || code == GT_EXPR
1037 || code == LE_EXPR
1038 || code == GE_EXPR)
1039 {
1040 code = swap_tree_comparison (code);
1041
1042 gimple_cond_set_code (condstmt, code);
1043 gimple_cond_set_lhs (condstmt, op1);
1044 gimple_cond_set_rhs (condstmt, op0);
1045
1046 update_stmt (condstmt);
1047 }
1048 }
1049 }
1050
1051 /* Initialize local stacks for this optimizer and record equivalences
1052 upon entry to BB. Equivalences can come from the edge traversed to
1053 reach BB or they may come from PHI nodes at the start of BB. */
1054
1055 /* Remove all the expressions in LOCALS from TABLE, stopping when there are
1056 LIMIT entries left in LOCALs. */
1057
1058 static void
1059 remove_local_expressions_from_table (void)
1060 {
1061 /* Remove all the expressions made available in this block. */
1062 while (avail_exprs_stack.length () > 0)
1063 {
1064 std::pair<expr_hash_elt_t, expr_hash_elt_t> victim
1065 = avail_exprs_stack.pop ();
1066 expr_hash_elt **slot;
1067
1068 if (victim.first == NULL)
1069 break;
1070
1071 /* This must precede the actual removal from the hash table,
1072 as ELEMENT and the table entry may share a call argument
1073 vector which will be freed during removal. */
1074 if (dump_file && (dump_flags & TDF_DETAILS))
1075 {
1076 fprintf (dump_file, "<<<< ");
1077 print_expr_hash_elt (dump_file, victim.first);
1078 }
1079
1080 slot = avail_exprs->find_slot (victim.first, NO_INSERT);
1081 gcc_assert (slot && *slot == victim.first);
1082 if (victim.second != NULL)
1083 {
1084 free_expr_hash_elt (*slot);
1085 *slot = victim.second;
1086 }
1087 else
1088 avail_exprs->clear_slot (slot);
1089 }
1090 }
1091
1092 /* Use the source/dest pairs in CONST_AND_COPIES_STACK to restore
1093 CONST_AND_COPIES to its original state, stopping when we hit a
1094 NULL marker. */
1095
1096 static void
1097 restore_vars_to_original_value (void)
1098 {
1099 while (const_and_copies_stack.length () > 0)
1100 {
1101 tree prev_value, dest;
1102
1103 dest = const_and_copies_stack.pop ();
1104
1105 if (dest == NULL)
1106 break;
1107
1108 if (dump_file && (dump_flags & TDF_DETAILS))
1109 {
1110 fprintf (dump_file, "<<<< COPY ");
1111 print_generic_expr (dump_file, dest, 0);
1112 fprintf (dump_file, " = ");
1113 print_generic_expr (dump_file, SSA_NAME_VALUE (dest), 0);
1114 fprintf (dump_file, "\n");
1115 }
1116
1117 prev_value = const_and_copies_stack.pop ();
1118 set_ssa_name_value (dest, prev_value);
1119 }
1120 }
1121
1122 /* A trivial wrapper so that we can present the generic jump
1123 threading code with a simple API for simplifying statements. */
1124 static tree
1125 simplify_stmt_for_jump_threading (gimple stmt,
1126 gimple within_stmt ATTRIBUTE_UNUSED)
1127 {
1128 return lookup_avail_expr (stmt, false);
1129 }
1130
1131 /* Record into the equivalence tables any equivalences implied by
1132 traversing edge E (which are cached in E->aux).
1133
1134 Callers are responsible for managing the unwinding markers. */
1135 static void
1136 record_temporary_equivalences (edge e)
1137 {
1138 int i;
1139 struct edge_info *edge_info = (struct edge_info *) e->aux;
1140
1141 /* If we have info associated with this edge, record it into
1142 our equivalence tables. */
1143 if (edge_info)
1144 {
1145 cond_equivalence *eq;
1146 tree lhs = edge_info->lhs;
1147 tree rhs = edge_info->rhs;
1148
1149 /* If we have a simple NAME = VALUE equivalence, record it. */
1150 if (lhs && TREE_CODE (lhs) == SSA_NAME)
1151 record_const_or_copy (lhs, rhs);
1152
1153 /* If we have 0 = COND or 1 = COND equivalences, record them
1154 into our expression hash tables. */
1155 for (i = 0; edge_info->cond_equivalences.iterate (i, &eq); ++i)
1156 record_cond (eq);
1157 }
1158 }
1159
1160 /* Wrapper for common code to attempt to thread an edge. For example,
1161 it handles lazily building the dummy condition and the bookkeeping
1162 when jump threading is successful. */
1163
1164 void
1165 dom_opt_dom_walker::thread_across_edge (edge e)
1166 {
1167 if (! m_dummy_cond)
1168 m_dummy_cond =
1169 gimple_build_cond (NE_EXPR,
1170 integer_zero_node, integer_zero_node,
1171 NULL, NULL);
1172
1173 /* Push a marker on both stacks so we can unwind the tables back to their
1174 current state. */
1175 avail_exprs_stack.safe_push
1176 (std::pair<expr_hash_elt_t, expr_hash_elt_t> (NULL, NULL));
1177 const_and_copies_stack.safe_push (NULL_TREE);
1178
1179 /* Traversing E may result in equivalences we can utilize. */
1180 record_temporary_equivalences (e);
1181
1182 /* With all the edge equivalences in the tables, go ahead and attempt
1183 to thread through E->dest. */
1184 ::thread_across_edge (m_dummy_cond, e, false,
1185 &const_and_copies_stack,
1186 simplify_stmt_for_jump_threading);
1187
1188 /* And restore the various tables to their state before
1189 we threaded this edge.
1190
1191 XXX The code in tree-ssa-threadedge.c will restore the state of
1192 the const_and_copies table. We we just have to restore the expression
1193 table. */
1194 remove_local_expressions_from_table ();
1195 }
1196
1197 /* PHI nodes can create equivalences too.
1198
1199 Ignoring any alternatives which are the same as the result, if
1200 all the alternatives are equal, then the PHI node creates an
1201 equivalence. */
1202
1203 static void
1204 record_equivalences_from_phis (basic_block bb)
1205 {
1206 gphi_iterator gsi;
1207
1208 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1209 {
1210 gphi *phi = gsi.phi ();
1211
1212 tree lhs = gimple_phi_result (phi);
1213 tree rhs = NULL;
1214 size_t i;
1215
1216 for (i = 0; i < gimple_phi_num_args (phi); i++)
1217 {
1218 tree t = gimple_phi_arg_def (phi, i);
1219
1220 /* Ignore alternatives which are the same as our LHS. Since
1221 LHS is a PHI_RESULT, it is known to be a SSA_NAME, so we
1222 can simply compare pointers. */
1223 if (lhs == t)
1224 continue;
1225
1226 /* If we have not processed an alternative yet, then set
1227 RHS to this alternative. */
1228 if (rhs == NULL)
1229 rhs = t;
1230 /* If we have processed an alternative (stored in RHS), then
1231 see if it is equal to this one. If it isn't, then stop
1232 the search. */
1233 else if (! operand_equal_for_phi_arg_p (rhs, t))
1234 break;
1235 }
1236
1237 /* If we had no interesting alternatives, then all the RHS alternatives
1238 must have been the same as LHS. */
1239 if (!rhs)
1240 rhs = lhs;
1241
1242 /* If we managed to iterate through each PHI alternative without
1243 breaking out of the loop, then we have a PHI which may create
1244 a useful equivalence. We do not need to record unwind data for
1245 this, since this is a true assignment and not an equivalence
1246 inferred from a comparison. All uses of this ssa name are dominated
1247 by this assignment, so unwinding just costs time and space. */
1248 if (i == gimple_phi_num_args (phi)
1249 && may_propagate_copy (lhs, rhs))
1250 set_ssa_name_value (lhs, rhs);
1251 }
1252 }
1253
1254 /* Ignoring loop backedges, if BB has precisely one incoming edge then
1255 return that edge. Otherwise return NULL. */
1256 static edge
1257 single_incoming_edge_ignoring_loop_edges (basic_block bb)
1258 {
1259 edge retval = NULL;
1260 edge e;
1261 edge_iterator ei;
1262
1263 FOR_EACH_EDGE (e, ei, bb->preds)
1264 {
1265 /* A loop back edge can be identified by the destination of
1266 the edge dominating the source of the edge. */
1267 if (dominated_by_p (CDI_DOMINATORS, e->src, e->dest))
1268 continue;
1269
1270 /* If we have already seen a non-loop edge, then we must have
1271 multiple incoming non-loop edges and thus we return NULL. */
1272 if (retval)
1273 return NULL;
1274
1275 /* This is the first non-loop incoming edge we have found. Record
1276 it. */
1277 retval = e;
1278 }
1279
1280 return retval;
1281 }
1282
1283 /* Record any equivalences created by the incoming edge to BB. If BB
1284 has more than one incoming edge, then no equivalence is created. */
1285
1286 static void
1287 record_equivalences_from_incoming_edge (basic_block bb)
1288 {
1289 edge e;
1290 basic_block parent;
1291 struct edge_info *edge_info;
1292
1293 /* If our parent block ended with a control statement, then we may be
1294 able to record some equivalences based on which outgoing edge from
1295 the parent was followed. */
1296 parent = get_immediate_dominator (CDI_DOMINATORS, bb);
1297
1298 e = single_incoming_edge_ignoring_loop_edges (bb);
1299
1300 /* If we had a single incoming edge from our parent block, then enter
1301 any data associated with the edge into our tables. */
1302 if (e && e->src == parent)
1303 {
1304 unsigned int i;
1305
1306 edge_info = (struct edge_info *) e->aux;
1307
1308 if (edge_info)
1309 {
1310 tree lhs = edge_info->lhs;
1311 tree rhs = edge_info->rhs;
1312 cond_equivalence *eq;
1313
1314 if (lhs)
1315 record_equality (lhs, rhs);
1316
1317 /* If LHS is an SSA_NAME and RHS is a constant integer and LHS was
1318 set via a widening type conversion, then we may be able to record
1319 additional equivalences. */
1320 if (lhs
1321 && TREE_CODE (lhs) == SSA_NAME
1322 && is_gimple_constant (rhs)
1323 && TREE_CODE (rhs) == INTEGER_CST)
1324 {
1325 gimple defstmt = SSA_NAME_DEF_STMT (lhs);
1326
1327 if (defstmt
1328 && is_gimple_assign (defstmt)
1329 && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (defstmt)))
1330 {
1331 tree old_rhs = gimple_assign_rhs1 (defstmt);
1332
1333 /* If the conversion widens the original value and
1334 the constant is in the range of the type of OLD_RHS,
1335 then convert the constant and record the equivalence.
1336
1337 Note that int_fits_type_p does not check the precision
1338 if the upper and lower bounds are OK. */
1339 if (INTEGRAL_TYPE_P (TREE_TYPE (old_rhs))
1340 && (TYPE_PRECISION (TREE_TYPE (lhs))
1341 > TYPE_PRECISION (TREE_TYPE (old_rhs)))
1342 && int_fits_type_p (rhs, TREE_TYPE (old_rhs)))
1343 {
1344 tree newval = fold_convert (TREE_TYPE (old_rhs), rhs);
1345 record_equality (old_rhs, newval);
1346 }
1347 }
1348 }
1349
1350 for (i = 0; edge_info->cond_equivalences.iterate (i, &eq); ++i)
1351 record_cond (eq);
1352 }
1353 }
1354 }
1355
1356 /* Dump SSA statistics on FILE. */
1357
1358 void
1359 dump_dominator_optimization_stats (FILE *file)
1360 {
1361 fprintf (file, "Total number of statements: %6ld\n\n",
1362 opt_stats.num_stmts);
1363 fprintf (file, "Exprs considered for dominator optimizations: %6ld\n",
1364 opt_stats.num_exprs_considered);
1365
1366 fprintf (file, "\nHash table statistics:\n");
1367
1368 fprintf (file, " avail_exprs: ");
1369 htab_statistics (file, *avail_exprs);
1370 }
1371
1372
1373 /* Dump SSA statistics on stderr. */
1374
1375 DEBUG_FUNCTION void
1376 debug_dominator_optimization_stats (void)
1377 {
1378 dump_dominator_optimization_stats (stderr);
1379 }
1380
1381
1382 /* Dump statistics for the hash table HTAB. */
1383
1384 static void
1385 htab_statistics (FILE *file, const hash_table<expr_elt_hasher> &htab)
1386 {
1387 fprintf (file, "size %ld, %ld elements, %f collision/search ratio\n",
1388 (long) htab.size (),
1389 (long) htab.elements (),
1390 htab.collisions ());
1391 }
1392
1393
1394 /* Enter condition equivalence into the expression hash table.
1395 This indicates that a conditional expression has a known
1396 boolean value. */
1397
1398 static void
1399 record_cond (cond_equivalence *p)
1400 {
1401 struct expr_hash_elt *element = XCNEW (struct expr_hash_elt);
1402 expr_hash_elt **slot;
1403
1404 initialize_hash_element_from_expr (&p->cond, p->value, element);
1405
1406 slot = avail_exprs->find_slot_with_hash (element, element->hash, INSERT);
1407 if (*slot == NULL)
1408 {
1409 *slot = element;
1410
1411 if (dump_file && (dump_flags & TDF_DETAILS))
1412 {
1413 fprintf (dump_file, "1>>> ");
1414 print_expr_hash_elt (dump_file, element);
1415 }
1416
1417 avail_exprs_stack.safe_push
1418 (std::pair<expr_hash_elt_t, expr_hash_elt_t> (element, NULL));
1419 }
1420 else
1421 free_expr_hash_elt (element);
1422 }
1423
1424 /* Build a cond_equivalence record indicating that the comparison
1425 CODE holds between operands OP0 and OP1 and push it to **P. */
1426
1427 static void
1428 build_and_record_new_cond (enum tree_code code,
1429 tree op0, tree op1,
1430 vec<cond_equivalence> *p)
1431 {
1432 cond_equivalence c;
1433 struct hashable_expr *cond = &c.cond;
1434
1435 gcc_assert (TREE_CODE_CLASS (code) == tcc_comparison);
1436
1437 cond->type = boolean_type_node;
1438 cond->kind = EXPR_BINARY;
1439 cond->ops.binary.op = code;
1440 cond->ops.binary.opnd0 = op0;
1441 cond->ops.binary.opnd1 = op1;
1442
1443 c.value = boolean_true_node;
1444 p->safe_push (c);
1445 }
1446
1447 /* Record that COND is true and INVERTED is false into the edge information
1448 structure. Also record that any conditions dominated by COND are true
1449 as well.
1450
1451 For example, if a < b is true, then a <= b must also be true. */
1452
1453 static void
1454 record_conditions (struct edge_info *edge_info, tree cond, tree inverted)
1455 {
1456 tree op0, op1;
1457 cond_equivalence c;
1458
1459 if (!COMPARISON_CLASS_P (cond))
1460 return;
1461
1462 op0 = TREE_OPERAND (cond, 0);
1463 op1 = TREE_OPERAND (cond, 1);
1464
1465 switch (TREE_CODE (cond))
1466 {
1467 case LT_EXPR:
1468 case GT_EXPR:
1469 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
1470 {
1471 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1472 &edge_info->cond_equivalences);
1473 build_and_record_new_cond (LTGT_EXPR, op0, op1,
1474 &edge_info->cond_equivalences);
1475 }
1476
1477 build_and_record_new_cond ((TREE_CODE (cond) == LT_EXPR
1478 ? LE_EXPR : GE_EXPR),
1479 op0, op1, &edge_info->cond_equivalences);
1480 build_and_record_new_cond (NE_EXPR, op0, op1,
1481 &edge_info->cond_equivalences);
1482 break;
1483
1484 case GE_EXPR:
1485 case LE_EXPR:
1486 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
1487 {
1488 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1489 &edge_info->cond_equivalences);
1490 }
1491 break;
1492
1493 case EQ_EXPR:
1494 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
1495 {
1496 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1497 &edge_info->cond_equivalences);
1498 }
1499 build_and_record_new_cond (LE_EXPR, op0, op1,
1500 &edge_info->cond_equivalences);
1501 build_and_record_new_cond (GE_EXPR, op0, op1,
1502 &edge_info->cond_equivalences);
1503 break;
1504
1505 case UNORDERED_EXPR:
1506 build_and_record_new_cond (NE_EXPR, op0, op1,
1507 &edge_info->cond_equivalences);
1508 build_and_record_new_cond (UNLE_EXPR, op0, op1,
1509 &edge_info->cond_equivalences);
1510 build_and_record_new_cond (UNGE_EXPR, op0, op1,
1511 &edge_info->cond_equivalences);
1512 build_and_record_new_cond (UNEQ_EXPR, op0, op1,
1513 &edge_info->cond_equivalences);
1514 build_and_record_new_cond (UNLT_EXPR, op0, op1,
1515 &edge_info->cond_equivalences);
1516 build_and_record_new_cond (UNGT_EXPR, op0, op1,
1517 &edge_info->cond_equivalences);
1518 break;
1519
1520 case UNLT_EXPR:
1521 case UNGT_EXPR:
1522 build_and_record_new_cond ((TREE_CODE (cond) == UNLT_EXPR
1523 ? UNLE_EXPR : UNGE_EXPR),
1524 op0, op1, &edge_info->cond_equivalences);
1525 build_and_record_new_cond (NE_EXPR, op0, op1,
1526 &edge_info->cond_equivalences);
1527 break;
1528
1529 case UNEQ_EXPR:
1530 build_and_record_new_cond (UNLE_EXPR, op0, op1,
1531 &edge_info->cond_equivalences);
1532 build_and_record_new_cond (UNGE_EXPR, op0, op1,
1533 &edge_info->cond_equivalences);
1534 break;
1535
1536 case LTGT_EXPR:
1537 build_and_record_new_cond (NE_EXPR, op0, op1,
1538 &edge_info->cond_equivalences);
1539 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1540 &edge_info->cond_equivalences);
1541 break;
1542
1543 default:
1544 break;
1545 }
1546
1547 /* Now store the original true and false conditions into the first
1548 two slots. */
1549 initialize_expr_from_cond (cond, &c.cond);
1550 c.value = boolean_true_node;
1551 edge_info->cond_equivalences.safe_push (c);
1552
1553 /* It is possible for INVERTED to be the negation of a comparison,
1554 and not a valid RHS or GIMPLE_COND condition. This happens because
1555 invert_truthvalue may return such an expression when asked to invert
1556 a floating-point comparison. These comparisons are not assumed to
1557 obey the trichotomy law. */
1558 initialize_expr_from_cond (inverted, &c.cond);
1559 c.value = boolean_false_node;
1560 edge_info->cond_equivalences.safe_push (c);
1561 }
1562
1563 /* A helper function for record_const_or_copy and record_equality.
1564 Do the work of recording the value and undo info. */
1565
1566 static void
1567 record_const_or_copy_1 (tree x, tree y, tree prev_x)
1568 {
1569 set_ssa_name_value (x, y);
1570
1571 if (dump_file && (dump_flags & TDF_DETAILS))
1572 {
1573 fprintf (dump_file, "0>>> COPY ");
1574 print_generic_expr (dump_file, x, 0);
1575 fprintf (dump_file, " = ");
1576 print_generic_expr (dump_file, y, 0);
1577 fprintf (dump_file, "\n");
1578 }
1579
1580 const_and_copies_stack.reserve (2);
1581 const_and_copies_stack.quick_push (prev_x);
1582 const_and_copies_stack.quick_push (x);
1583 }
1584
1585 /* Record that X is equal to Y in const_and_copies. Record undo
1586 information in the block-local vector. */
1587
1588 static void
1589 record_const_or_copy (tree x, tree y)
1590 {
1591 tree prev_x = SSA_NAME_VALUE (x);
1592
1593 gcc_assert (TREE_CODE (x) == SSA_NAME);
1594
1595 if (TREE_CODE (y) == SSA_NAME)
1596 {
1597 tree tmp = SSA_NAME_VALUE (y);
1598 if (tmp)
1599 y = tmp;
1600 }
1601
1602 record_const_or_copy_1 (x, y, prev_x);
1603 }
1604
1605 /* Return the loop depth of the basic block of the defining statement of X.
1606 This number should not be treated as absolutely correct because the loop
1607 information may not be completely up-to-date when dom runs. However, it
1608 will be relatively correct, and as more passes are taught to keep loop info
1609 up to date, the result will become more and more accurate. */
1610
1611 static int
1612 loop_depth_of_name (tree x)
1613 {
1614 gimple defstmt;
1615 basic_block defbb;
1616
1617 /* If it's not an SSA_NAME, we have no clue where the definition is. */
1618 if (TREE_CODE (x) != SSA_NAME)
1619 return 0;
1620
1621 /* Otherwise return the loop depth of the defining statement's bb.
1622 Note that there may not actually be a bb for this statement, if the
1623 ssa_name is live on entry. */
1624 defstmt = SSA_NAME_DEF_STMT (x);
1625 defbb = gimple_bb (defstmt);
1626 if (!defbb)
1627 return 0;
1628
1629 return bb_loop_depth (defbb);
1630 }
1631
1632 /* Similarly, but assume that X and Y are the two operands of an EQ_EXPR.
1633 This constrains the cases in which we may treat this as assignment. */
1634
1635 static void
1636 record_equality (tree x, tree y)
1637 {
1638 tree prev_x = NULL, prev_y = NULL;
1639
1640 if (TREE_CODE (x) == SSA_NAME)
1641 prev_x = SSA_NAME_VALUE (x);
1642 if (TREE_CODE (y) == SSA_NAME)
1643 prev_y = SSA_NAME_VALUE (y);
1644
1645 /* If one of the previous values is invariant, or invariant in more loops
1646 (by depth), then use that.
1647 Otherwise it doesn't matter which value we choose, just so
1648 long as we canonicalize on one value. */
1649 if (is_gimple_min_invariant (y))
1650 ;
1651 else if (is_gimple_min_invariant (x)
1652 /* ??? When threading over backedges the following is important
1653 for correctness. See PR61757. */
1654 || (loop_depth_of_name (x) <= loop_depth_of_name (y)))
1655 prev_x = x, x = y, y = prev_x, prev_x = prev_y;
1656 else if (prev_x && is_gimple_min_invariant (prev_x))
1657 x = y, y = prev_x, prev_x = prev_y;
1658 else if (prev_y)
1659 y = prev_y;
1660
1661 /* After the swapping, we must have one SSA_NAME. */
1662 if (TREE_CODE (x) != SSA_NAME)
1663 return;
1664
1665 /* For IEEE, -0.0 == 0.0, so we don't necessarily know the sign of a
1666 variable compared against zero. If we're honoring signed zeros,
1667 then we cannot record this value unless we know that the value is
1668 nonzero. */
1669 if (HONOR_SIGNED_ZEROS (x)
1670 && (TREE_CODE (y) != REAL_CST
1671 || REAL_VALUES_EQUAL (dconst0, TREE_REAL_CST (y))))
1672 return;
1673
1674 record_const_or_copy_1 (x, y, prev_x);
1675 }
1676
1677 /* Returns true when STMT is a simple iv increment. It detects the
1678 following situation:
1679
1680 i_1 = phi (..., i_2)
1681 i_2 = i_1 +/- ... */
1682
1683 bool
1684 simple_iv_increment_p (gimple stmt)
1685 {
1686 enum tree_code code;
1687 tree lhs, preinc;
1688 gimple phi;
1689 size_t i;
1690
1691 if (gimple_code (stmt) != GIMPLE_ASSIGN)
1692 return false;
1693
1694 lhs = gimple_assign_lhs (stmt);
1695 if (TREE_CODE (lhs) != SSA_NAME)
1696 return false;
1697
1698 code = gimple_assign_rhs_code (stmt);
1699 if (code != PLUS_EXPR
1700 && code != MINUS_EXPR
1701 && code != POINTER_PLUS_EXPR)
1702 return false;
1703
1704 preinc = gimple_assign_rhs1 (stmt);
1705 if (TREE_CODE (preinc) != SSA_NAME)
1706 return false;
1707
1708 phi = SSA_NAME_DEF_STMT (preinc);
1709 if (gimple_code (phi) != GIMPLE_PHI)
1710 return false;
1711
1712 for (i = 0; i < gimple_phi_num_args (phi); i++)
1713 if (gimple_phi_arg_def (phi, i) == lhs)
1714 return true;
1715
1716 return false;
1717 }
1718
1719 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
1720 known value for that SSA_NAME (or NULL if no value is known).
1721
1722 Propagate values from CONST_AND_COPIES into the PHI nodes of the
1723 successors of BB. */
1724
1725 static void
1726 cprop_into_successor_phis (basic_block bb)
1727 {
1728 edge e;
1729 edge_iterator ei;
1730
1731 FOR_EACH_EDGE (e, ei, bb->succs)
1732 {
1733 int indx;
1734 gphi_iterator gsi;
1735
1736 /* If this is an abnormal edge, then we do not want to copy propagate
1737 into the PHI alternative associated with this edge. */
1738 if (e->flags & EDGE_ABNORMAL)
1739 continue;
1740
1741 gsi = gsi_start_phis (e->dest);
1742 if (gsi_end_p (gsi))
1743 continue;
1744
1745 /* We may have an equivalence associated with this edge. While
1746 we can not propagate it into non-dominated blocks, we can
1747 propagate them into PHIs in non-dominated blocks. */
1748
1749 /* Push the unwind marker so we can reset the const and copies
1750 table back to its original state after processing this edge. */
1751 const_and_copies_stack.safe_push (NULL_TREE);
1752
1753 /* Extract and record any simple NAME = VALUE equivalences.
1754
1755 Don't bother with [01] = COND equivalences, they're not useful
1756 here. */
1757 struct edge_info *edge_info = (struct edge_info *) e->aux;
1758 if (edge_info)
1759 {
1760 tree lhs = edge_info->lhs;
1761 tree rhs = edge_info->rhs;
1762
1763 if (lhs && TREE_CODE (lhs) == SSA_NAME)
1764 record_const_or_copy (lhs, rhs);
1765 }
1766
1767 indx = e->dest_idx;
1768 for ( ; !gsi_end_p (gsi); gsi_next (&gsi))
1769 {
1770 tree new_val;
1771 use_operand_p orig_p;
1772 tree orig_val;
1773 gphi *phi = gsi.phi ();
1774
1775 /* The alternative may be associated with a constant, so verify
1776 it is an SSA_NAME before doing anything with it. */
1777 orig_p = gimple_phi_arg_imm_use_ptr (phi, indx);
1778 orig_val = get_use_from_ptr (orig_p);
1779 if (TREE_CODE (orig_val) != SSA_NAME)
1780 continue;
1781
1782 /* If we have *ORIG_P in our constant/copy table, then replace
1783 ORIG_P with its value in our constant/copy table. */
1784 new_val = SSA_NAME_VALUE (orig_val);
1785 if (new_val
1786 && new_val != orig_val
1787 && (TREE_CODE (new_val) == SSA_NAME
1788 || is_gimple_min_invariant (new_val))
1789 && may_propagate_copy (orig_val, new_val))
1790 propagate_value (orig_p, new_val);
1791 }
1792
1793 restore_vars_to_original_value ();
1794 }
1795 }
1796
1797 /* We have finished optimizing BB, record any information implied by
1798 taking a specific outgoing edge from BB. */
1799
1800 static void
1801 record_edge_info (basic_block bb)
1802 {
1803 gimple_stmt_iterator gsi = gsi_last_bb (bb);
1804 struct edge_info *edge_info;
1805
1806 if (! gsi_end_p (gsi))
1807 {
1808 gimple stmt = gsi_stmt (gsi);
1809 location_t loc = gimple_location (stmt);
1810
1811 if (gimple_code (stmt) == GIMPLE_SWITCH)
1812 {
1813 gswitch *switch_stmt = as_a <gswitch *> (stmt);
1814 tree index = gimple_switch_index (switch_stmt);
1815
1816 if (TREE_CODE (index) == SSA_NAME)
1817 {
1818 int i;
1819 int n_labels = gimple_switch_num_labels (switch_stmt);
1820 tree *info = XCNEWVEC (tree, last_basic_block_for_fn (cfun));
1821 edge e;
1822 edge_iterator ei;
1823
1824 for (i = 0; i < n_labels; i++)
1825 {
1826 tree label = gimple_switch_label (switch_stmt, i);
1827 basic_block target_bb = label_to_block (CASE_LABEL (label));
1828 if (CASE_HIGH (label)
1829 || !CASE_LOW (label)
1830 || info[target_bb->index])
1831 info[target_bb->index] = error_mark_node;
1832 else
1833 info[target_bb->index] = label;
1834 }
1835
1836 FOR_EACH_EDGE (e, ei, bb->succs)
1837 {
1838 basic_block target_bb = e->dest;
1839 tree label = info[target_bb->index];
1840
1841 if (label != NULL && label != error_mark_node)
1842 {
1843 tree x = fold_convert_loc (loc, TREE_TYPE (index),
1844 CASE_LOW (label));
1845 edge_info = allocate_edge_info (e);
1846 edge_info->lhs = index;
1847 edge_info->rhs = x;
1848 }
1849 }
1850 free (info);
1851 }
1852 }
1853
1854 /* A COND_EXPR may create equivalences too. */
1855 if (gimple_code (stmt) == GIMPLE_COND)
1856 {
1857 edge true_edge;
1858 edge false_edge;
1859
1860 tree op0 = gimple_cond_lhs (stmt);
1861 tree op1 = gimple_cond_rhs (stmt);
1862 enum tree_code code = gimple_cond_code (stmt);
1863
1864 extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
1865
1866 /* Special case comparing booleans against a constant as we
1867 know the value of OP0 on both arms of the branch. i.e., we
1868 can record an equivalence for OP0 rather than COND. */
1869 if ((code == EQ_EXPR || code == NE_EXPR)
1870 && TREE_CODE (op0) == SSA_NAME
1871 && TREE_CODE (TREE_TYPE (op0)) == BOOLEAN_TYPE
1872 && is_gimple_min_invariant (op1))
1873 {
1874 if (code == EQ_EXPR)
1875 {
1876 edge_info = allocate_edge_info (true_edge);
1877 edge_info->lhs = op0;
1878 edge_info->rhs = (integer_zerop (op1)
1879 ? boolean_false_node
1880 : boolean_true_node);
1881
1882 edge_info = allocate_edge_info (false_edge);
1883 edge_info->lhs = op0;
1884 edge_info->rhs = (integer_zerop (op1)
1885 ? boolean_true_node
1886 : boolean_false_node);
1887 }
1888 else
1889 {
1890 edge_info = allocate_edge_info (true_edge);
1891 edge_info->lhs = op0;
1892 edge_info->rhs = (integer_zerop (op1)
1893 ? boolean_true_node
1894 : boolean_false_node);
1895
1896 edge_info = allocate_edge_info (false_edge);
1897 edge_info->lhs = op0;
1898 edge_info->rhs = (integer_zerop (op1)
1899 ? boolean_false_node
1900 : boolean_true_node);
1901 }
1902 }
1903 else if (is_gimple_min_invariant (op0)
1904 && (TREE_CODE (op1) == SSA_NAME
1905 || is_gimple_min_invariant (op1)))
1906 {
1907 tree cond = build2 (code, boolean_type_node, op0, op1);
1908 tree inverted = invert_truthvalue_loc (loc, cond);
1909 bool can_infer_simple_equiv
1910 = !(HONOR_SIGNED_ZEROS (op0)
1911 && real_zerop (op0));
1912 struct edge_info *edge_info;
1913
1914 edge_info = allocate_edge_info (true_edge);
1915 record_conditions (edge_info, cond, inverted);
1916
1917 if (can_infer_simple_equiv && code == EQ_EXPR)
1918 {
1919 edge_info->lhs = op1;
1920 edge_info->rhs = op0;
1921 }
1922
1923 edge_info = allocate_edge_info (false_edge);
1924 record_conditions (edge_info, inverted, cond);
1925
1926 if (can_infer_simple_equiv && TREE_CODE (inverted) == EQ_EXPR)
1927 {
1928 edge_info->lhs = op1;
1929 edge_info->rhs = op0;
1930 }
1931 }
1932
1933 else if (TREE_CODE (op0) == SSA_NAME
1934 && (TREE_CODE (op1) == SSA_NAME
1935 || is_gimple_min_invariant (op1)))
1936 {
1937 tree cond = build2 (code, boolean_type_node, op0, op1);
1938 tree inverted = invert_truthvalue_loc (loc, cond);
1939 bool can_infer_simple_equiv
1940 = !(HONOR_SIGNED_ZEROS (op1)
1941 && (TREE_CODE (op1) == SSA_NAME || real_zerop (op1)));
1942 struct edge_info *edge_info;
1943
1944 edge_info = allocate_edge_info (true_edge);
1945 record_conditions (edge_info, cond, inverted);
1946
1947 if (can_infer_simple_equiv && code == EQ_EXPR)
1948 {
1949 edge_info->lhs = op0;
1950 edge_info->rhs = op1;
1951 }
1952
1953 edge_info = allocate_edge_info (false_edge);
1954 record_conditions (edge_info, inverted, cond);
1955
1956 if (can_infer_simple_equiv && TREE_CODE (inverted) == EQ_EXPR)
1957 {
1958 edge_info->lhs = op0;
1959 edge_info->rhs = op1;
1960 }
1961 }
1962 }
1963
1964 /* ??? TRUTH_NOT_EXPR can create an equivalence too. */
1965 }
1966 }
1967
1968 void
1969 dom_opt_dom_walker::before_dom_children (basic_block bb)
1970 {
1971 gimple_stmt_iterator gsi;
1972
1973 if (dump_file && (dump_flags & TDF_DETAILS))
1974 fprintf (dump_file, "\n\nOptimizing block #%d\n\n", bb->index);
1975
1976 /* Push a marker on the stacks of local information so that we know how
1977 far to unwind when we finalize this block. */
1978 avail_exprs_stack.safe_push
1979 (std::pair<expr_hash_elt_t, expr_hash_elt_t> (NULL, NULL));
1980 const_and_copies_stack.safe_push (NULL_TREE);
1981
1982 record_equivalences_from_incoming_edge (bb);
1983
1984 /* PHI nodes can create equivalences too. */
1985 record_equivalences_from_phis (bb);
1986
1987 /* Create equivalences from redundant PHIs. PHIs are only truly
1988 redundant when they exist in the same block, so push another
1989 marker and unwind right afterwards. */
1990 avail_exprs_stack.safe_push
1991 (std::pair<expr_hash_elt_t, expr_hash_elt_t> (NULL, NULL));
1992 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1993 eliminate_redundant_computations (&gsi);
1994 remove_local_expressions_from_table ();
1995
1996 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1997 optimize_stmt (bb, gsi);
1998
1999 /* Now prepare to process dominated blocks. */
2000 record_edge_info (bb);
2001 cprop_into_successor_phis (bb);
2002 }
2003
2004 /* We have finished processing the dominator children of BB, perform
2005 any finalization actions in preparation for leaving this node in
2006 the dominator tree. */
2007
2008 void
2009 dom_opt_dom_walker::after_dom_children (basic_block bb)
2010 {
2011 gimple last;
2012
2013 /* If we have an outgoing edge to a block with multiple incoming and
2014 outgoing edges, then we may be able to thread the edge, i.e., we
2015 may be able to statically determine which of the outgoing edges
2016 will be traversed when the incoming edge from BB is traversed. */
2017 if (single_succ_p (bb)
2018 && (single_succ_edge (bb)->flags & EDGE_ABNORMAL) == 0
2019 && potentially_threadable_block (single_succ (bb)))
2020 {
2021 thread_across_edge (single_succ_edge (bb));
2022 }
2023 else if ((last = last_stmt (bb))
2024 && gimple_code (last) == GIMPLE_COND
2025 && EDGE_COUNT (bb->succs) == 2
2026 && (EDGE_SUCC (bb, 0)->flags & EDGE_ABNORMAL) == 0
2027 && (EDGE_SUCC (bb, 1)->flags & EDGE_ABNORMAL) == 0)
2028 {
2029 edge true_edge, false_edge;
2030
2031 extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
2032
2033 /* Only try to thread the edge if it reaches a target block with
2034 more than one predecessor and more than one successor. */
2035 if (potentially_threadable_block (true_edge->dest))
2036 thread_across_edge (true_edge);
2037
2038 /* Similarly for the ELSE arm. */
2039 if (potentially_threadable_block (false_edge->dest))
2040 thread_across_edge (false_edge);
2041
2042 }
2043
2044 /* These remove expressions local to BB from the tables. */
2045 remove_local_expressions_from_table ();
2046 restore_vars_to_original_value ();
2047 }
2048
2049 /* Search for redundant computations in STMT. If any are found, then
2050 replace them with the variable holding the result of the computation.
2051
2052 If safe, record this expression into the available expression hash
2053 table. */
2054
2055 static void
2056 eliminate_redundant_computations (gimple_stmt_iterator* gsi)
2057 {
2058 tree expr_type;
2059 tree cached_lhs;
2060 tree def;
2061 bool insert = true;
2062 bool assigns_var_p = false;
2063
2064 gimple stmt = gsi_stmt (*gsi);
2065
2066 if (gimple_code (stmt) == GIMPLE_PHI)
2067 def = gimple_phi_result (stmt);
2068 else
2069 def = gimple_get_lhs (stmt);
2070
2071 /* Certain expressions on the RHS can be optimized away, but can not
2072 themselves be entered into the hash tables. */
2073 if (! def
2074 || TREE_CODE (def) != SSA_NAME
2075 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (def)
2076 || gimple_vdef (stmt)
2077 /* Do not record equivalences for increments of ivs. This would create
2078 overlapping live ranges for a very questionable gain. */
2079 || simple_iv_increment_p (stmt))
2080 insert = false;
2081
2082 /* Check if the expression has been computed before. */
2083 cached_lhs = lookup_avail_expr (stmt, insert);
2084
2085 opt_stats.num_exprs_considered++;
2086
2087 /* Get the type of the expression we are trying to optimize. */
2088 if (is_gimple_assign (stmt))
2089 {
2090 expr_type = TREE_TYPE (gimple_assign_lhs (stmt));
2091 assigns_var_p = true;
2092 }
2093 else if (gimple_code (stmt) == GIMPLE_COND)
2094 expr_type = boolean_type_node;
2095 else if (is_gimple_call (stmt))
2096 {
2097 gcc_assert (gimple_call_lhs (stmt));
2098 expr_type = TREE_TYPE (gimple_call_lhs (stmt));
2099 assigns_var_p = true;
2100 }
2101 else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
2102 expr_type = TREE_TYPE (gimple_switch_index (swtch_stmt));
2103 else if (gimple_code (stmt) == GIMPLE_PHI)
2104 /* We can't propagate into a phi, so the logic below doesn't apply.
2105 Instead record an equivalence between the cached LHS and the
2106 PHI result of this statement, provided they are in the same block.
2107 This should be sufficient to kill the redundant phi. */
2108 {
2109 if (def && cached_lhs)
2110 record_const_or_copy (def, cached_lhs);
2111 return;
2112 }
2113 else
2114 gcc_unreachable ();
2115
2116 if (!cached_lhs)
2117 return;
2118
2119 /* It is safe to ignore types here since we have already done
2120 type checking in the hashing and equality routines. In fact
2121 type checking here merely gets in the way of constant
2122 propagation. Also, make sure that it is safe to propagate
2123 CACHED_LHS into the expression in STMT. */
2124 if ((TREE_CODE (cached_lhs) != SSA_NAME
2125 && (assigns_var_p
2126 || useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs))))
2127 || may_propagate_copy_into_stmt (stmt, cached_lhs))
2128 {
2129 gcc_checking_assert (TREE_CODE (cached_lhs) == SSA_NAME
2130 || is_gimple_min_invariant (cached_lhs));
2131
2132 if (dump_file && (dump_flags & TDF_DETAILS))
2133 {
2134 fprintf (dump_file, " Replaced redundant expr '");
2135 print_gimple_expr (dump_file, stmt, 0, dump_flags);
2136 fprintf (dump_file, "' with '");
2137 print_generic_expr (dump_file, cached_lhs, dump_flags);
2138 fprintf (dump_file, "'\n");
2139 }
2140
2141 opt_stats.num_re++;
2142
2143 if (assigns_var_p
2144 && !useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs)))
2145 cached_lhs = fold_convert (expr_type, cached_lhs);
2146
2147 propagate_tree_value_into_stmt (gsi, cached_lhs);
2148
2149 /* Since it is always necessary to mark the result as modified,
2150 perhaps we should move this into propagate_tree_value_into_stmt
2151 itself. */
2152 gimple_set_modified (gsi_stmt (*gsi), true);
2153 }
2154 }
2155
2156 /* STMT, a GIMPLE_ASSIGN, may create certain equivalences, in either
2157 the available expressions table or the const_and_copies table.
2158 Detect and record those equivalences. */
2159 /* We handle only very simple copy equivalences here. The heavy
2160 lifing is done by eliminate_redundant_computations. */
2161
2162 static void
2163 record_equivalences_from_stmt (gimple stmt, int may_optimize_p)
2164 {
2165 tree lhs;
2166 enum tree_code lhs_code;
2167
2168 gcc_assert (is_gimple_assign (stmt));
2169
2170 lhs = gimple_assign_lhs (stmt);
2171 lhs_code = TREE_CODE (lhs);
2172
2173 if (lhs_code == SSA_NAME
2174 && gimple_assign_single_p (stmt))
2175 {
2176 tree rhs = gimple_assign_rhs1 (stmt);
2177
2178 /* If the RHS of the assignment is a constant or another variable that
2179 may be propagated, register it in the CONST_AND_COPIES table. We
2180 do not need to record unwind data for this, since this is a true
2181 assignment and not an equivalence inferred from a comparison. All
2182 uses of this ssa name are dominated by this assignment, so unwinding
2183 just costs time and space. */
2184 if (may_optimize_p
2185 && (TREE_CODE (rhs) == SSA_NAME
2186 || is_gimple_min_invariant (rhs)))
2187 {
2188 if (dump_file && (dump_flags & TDF_DETAILS))
2189 {
2190 fprintf (dump_file, "==== ASGN ");
2191 print_generic_expr (dump_file, lhs, 0);
2192 fprintf (dump_file, " = ");
2193 print_generic_expr (dump_file, rhs, 0);
2194 fprintf (dump_file, "\n");
2195 }
2196
2197 set_ssa_name_value (lhs, rhs);
2198 }
2199 }
2200
2201 /* Make sure we can propagate &x + CST. */
2202 if (lhs_code == SSA_NAME
2203 && gimple_assign_rhs_code (stmt) == POINTER_PLUS_EXPR
2204 && TREE_CODE (gimple_assign_rhs1 (stmt)) == ADDR_EXPR
2205 && TREE_CODE (gimple_assign_rhs2 (stmt)) == INTEGER_CST)
2206 {
2207 tree op0 = gimple_assign_rhs1 (stmt);
2208 tree op1 = gimple_assign_rhs2 (stmt);
2209 tree new_rhs
2210 = build_fold_addr_expr (fold_build2 (MEM_REF,
2211 TREE_TYPE (TREE_TYPE (op0)),
2212 unshare_expr (op0),
2213 fold_convert (ptr_type_node,
2214 op1)));
2215 if (dump_file && (dump_flags & TDF_DETAILS))
2216 {
2217 fprintf (dump_file, "==== ASGN ");
2218 print_generic_expr (dump_file, lhs, 0);
2219 fprintf (dump_file, " = ");
2220 print_generic_expr (dump_file, new_rhs, 0);
2221 fprintf (dump_file, "\n");
2222 }
2223
2224 set_ssa_name_value (lhs, new_rhs);
2225 }
2226
2227 /* A memory store, even an aliased store, creates a useful
2228 equivalence. By exchanging the LHS and RHS, creating suitable
2229 vops and recording the result in the available expression table,
2230 we may be able to expose more redundant loads. */
2231 if (!gimple_has_volatile_ops (stmt)
2232 && gimple_references_memory_p (stmt)
2233 && gimple_assign_single_p (stmt)
2234 && (TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME
2235 || is_gimple_min_invariant (gimple_assign_rhs1 (stmt)))
2236 && !is_gimple_reg (lhs))
2237 {
2238 tree rhs = gimple_assign_rhs1 (stmt);
2239 gassign *new_stmt;
2240
2241 /* Build a new statement with the RHS and LHS exchanged. */
2242 if (TREE_CODE (rhs) == SSA_NAME)
2243 {
2244 /* NOTE tuples. The call to gimple_build_assign below replaced
2245 a call to build_gimple_modify_stmt, which did not set the
2246 SSA_NAME_DEF_STMT on the LHS of the assignment. Doing so
2247 may cause an SSA validation failure, as the LHS may be a
2248 default-initialized name and should have no definition. I'm
2249 a bit dubious of this, as the artificial statement that we
2250 generate here may in fact be ill-formed, but it is simply
2251 used as an internal device in this pass, and never becomes
2252 part of the CFG. */
2253 gimple defstmt = SSA_NAME_DEF_STMT (rhs);
2254 new_stmt = gimple_build_assign (rhs, lhs);
2255 SSA_NAME_DEF_STMT (rhs) = defstmt;
2256 }
2257 else
2258 new_stmt = gimple_build_assign (rhs, lhs);
2259
2260 gimple_set_vuse (new_stmt, gimple_vdef (stmt));
2261
2262 /* Finally enter the statement into the available expression
2263 table. */
2264 lookup_avail_expr (new_stmt, true);
2265 }
2266 }
2267
2268 /* Replace *OP_P in STMT with any known equivalent value for *OP_P from
2269 CONST_AND_COPIES. */
2270
2271 static void
2272 cprop_operand (gimple stmt, use_operand_p op_p)
2273 {
2274 tree val;
2275 tree op = USE_FROM_PTR (op_p);
2276
2277 /* If the operand has a known constant value or it is known to be a
2278 copy of some other variable, use the value or copy stored in
2279 CONST_AND_COPIES. */
2280 val = SSA_NAME_VALUE (op);
2281 if (val && val != op)
2282 {
2283 /* Do not replace hard register operands in asm statements. */
2284 if (gimple_code (stmt) == GIMPLE_ASM
2285 && !may_propagate_copy_into_asm (op))
2286 return;
2287
2288 /* Certain operands are not allowed to be copy propagated due
2289 to their interaction with exception handling and some GCC
2290 extensions. */
2291 if (!may_propagate_copy (op, val))
2292 return;
2293
2294 /* Do not propagate copies into simple IV increment statements.
2295 See PR23821 for how this can disturb IV analysis. */
2296 if (TREE_CODE (val) != INTEGER_CST
2297 && simple_iv_increment_p (stmt))
2298 return;
2299
2300 /* Dump details. */
2301 if (dump_file && (dump_flags & TDF_DETAILS))
2302 {
2303 fprintf (dump_file, " Replaced '");
2304 print_generic_expr (dump_file, op, dump_flags);
2305 fprintf (dump_file, "' with %s '",
2306 (TREE_CODE (val) != SSA_NAME ? "constant" : "variable"));
2307 print_generic_expr (dump_file, val, dump_flags);
2308 fprintf (dump_file, "'\n");
2309 }
2310
2311 if (TREE_CODE (val) != SSA_NAME)
2312 opt_stats.num_const_prop++;
2313 else
2314 opt_stats.num_copy_prop++;
2315
2316 propagate_value (op_p, val);
2317
2318 /* And note that we modified this statement. This is now
2319 safe, even if we changed virtual operands since we will
2320 rescan the statement and rewrite its operands again. */
2321 gimple_set_modified (stmt, true);
2322 }
2323 }
2324
2325 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
2326 known value for that SSA_NAME (or NULL if no value is known).
2327
2328 Propagate values from CONST_AND_COPIES into the uses, vuses and
2329 vdef_ops of STMT. */
2330
2331 static void
2332 cprop_into_stmt (gimple stmt)
2333 {
2334 use_operand_p op_p;
2335 ssa_op_iter iter;
2336
2337 FOR_EACH_SSA_USE_OPERAND (op_p, stmt, iter, SSA_OP_USE)
2338 cprop_operand (stmt, op_p);
2339 }
2340
2341 /* Optimize the statement pointed to by iterator SI.
2342
2343 We try to perform some simplistic global redundancy elimination and
2344 constant propagation:
2345
2346 1- To detect global redundancy, we keep track of expressions that have
2347 been computed in this block and its dominators. If we find that the
2348 same expression is computed more than once, we eliminate repeated
2349 computations by using the target of the first one.
2350
2351 2- Constant values and copy assignments. This is used to do very
2352 simplistic constant and copy propagation. When a constant or copy
2353 assignment is found, we map the value on the RHS of the assignment to
2354 the variable in the LHS in the CONST_AND_COPIES table. */
2355
2356 static void
2357 optimize_stmt (basic_block bb, gimple_stmt_iterator si)
2358 {
2359 gimple stmt, old_stmt;
2360 bool may_optimize_p;
2361 bool modified_p = false;
2362
2363 old_stmt = stmt = gsi_stmt (si);
2364
2365 if (dump_file && (dump_flags & TDF_DETAILS))
2366 {
2367 fprintf (dump_file, "Optimizing statement ");
2368 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2369 }
2370
2371 if (gimple_code (stmt) == GIMPLE_COND)
2372 canonicalize_comparison (as_a <gcond *> (stmt));
2373
2374 update_stmt_if_modified (stmt);
2375 opt_stats.num_stmts++;
2376
2377 /* Const/copy propagate into USES, VUSES and the RHS of VDEFs. */
2378 cprop_into_stmt (stmt);
2379
2380 /* If the statement has been modified with constant replacements,
2381 fold its RHS before checking for redundant computations. */
2382 if (gimple_modified_p (stmt))
2383 {
2384 tree rhs = NULL;
2385
2386 /* Try to fold the statement making sure that STMT is kept
2387 up to date. */
2388 if (fold_stmt (&si))
2389 {
2390 stmt = gsi_stmt (si);
2391 gimple_set_modified (stmt, true);
2392
2393 if (dump_file && (dump_flags & TDF_DETAILS))
2394 {
2395 fprintf (dump_file, " Folded to: ");
2396 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2397 }
2398 }
2399
2400 /* We only need to consider cases that can yield a gimple operand. */
2401 if (gimple_assign_single_p (stmt))
2402 rhs = gimple_assign_rhs1 (stmt);
2403 else if (gimple_code (stmt) == GIMPLE_GOTO)
2404 rhs = gimple_goto_dest (stmt);
2405 else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
2406 /* This should never be an ADDR_EXPR. */
2407 rhs = gimple_switch_index (swtch_stmt);
2408
2409 if (rhs && TREE_CODE (rhs) == ADDR_EXPR)
2410 recompute_tree_invariant_for_addr_expr (rhs);
2411
2412 /* Indicate that maybe_clean_or_replace_eh_stmt needs to be called,
2413 even if fold_stmt updated the stmt already and thus cleared
2414 gimple_modified_p flag on it. */
2415 modified_p = true;
2416 }
2417
2418 /* Check for redundant computations. Do this optimization only
2419 for assignments that have no volatile ops and conditionals. */
2420 may_optimize_p = (!gimple_has_side_effects (stmt)
2421 && (is_gimple_assign (stmt)
2422 || (is_gimple_call (stmt)
2423 && gimple_call_lhs (stmt) != NULL_TREE)
2424 || gimple_code (stmt) == GIMPLE_COND
2425 || gimple_code (stmt) == GIMPLE_SWITCH));
2426
2427 if (may_optimize_p)
2428 {
2429 if (gimple_code (stmt) == GIMPLE_CALL)
2430 {
2431 /* Resolve __builtin_constant_p. If it hasn't been
2432 folded to integer_one_node by now, it's fairly
2433 certain that the value simply isn't constant. */
2434 tree callee = gimple_call_fndecl (stmt);
2435 if (callee
2436 && DECL_BUILT_IN_CLASS (callee) == BUILT_IN_NORMAL
2437 && DECL_FUNCTION_CODE (callee) == BUILT_IN_CONSTANT_P)
2438 {
2439 propagate_tree_value_into_stmt (&si, integer_zero_node);
2440 stmt = gsi_stmt (si);
2441 }
2442 }
2443
2444 update_stmt_if_modified (stmt);
2445 eliminate_redundant_computations (&si);
2446 stmt = gsi_stmt (si);
2447
2448 /* Perform simple redundant store elimination. */
2449 if (gimple_assign_single_p (stmt)
2450 && TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2451 {
2452 tree lhs = gimple_assign_lhs (stmt);
2453 tree rhs = gimple_assign_rhs1 (stmt);
2454 tree cached_lhs;
2455 gassign *new_stmt;
2456 if (TREE_CODE (rhs) == SSA_NAME)
2457 {
2458 tree tem = SSA_NAME_VALUE (rhs);
2459 if (tem)
2460 rhs = tem;
2461 }
2462 /* Build a new statement with the RHS and LHS exchanged. */
2463 if (TREE_CODE (rhs) == SSA_NAME)
2464 {
2465 gimple defstmt = SSA_NAME_DEF_STMT (rhs);
2466 new_stmt = gimple_build_assign (rhs, lhs);
2467 SSA_NAME_DEF_STMT (rhs) = defstmt;
2468 }
2469 else
2470 new_stmt = gimple_build_assign (rhs, lhs);
2471 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
2472 cached_lhs = lookup_avail_expr (new_stmt, false);
2473 if (cached_lhs
2474 && rhs == cached_lhs)
2475 {
2476 basic_block bb = gimple_bb (stmt);
2477 unlink_stmt_vdef (stmt);
2478 if (gsi_remove (&si, true))
2479 {
2480 bitmap_set_bit (need_eh_cleanup, bb->index);
2481 if (dump_file && (dump_flags & TDF_DETAILS))
2482 fprintf (dump_file, " Flagged to clear EH edges.\n");
2483 }
2484 release_defs (stmt);
2485 return;
2486 }
2487 }
2488 }
2489
2490 /* Record any additional equivalences created by this statement. */
2491 if (is_gimple_assign (stmt))
2492 record_equivalences_from_stmt (stmt, may_optimize_p);
2493
2494 /* If STMT is a COND_EXPR and it was modified, then we may know
2495 where it goes. If that is the case, then mark the CFG as altered.
2496
2497 This will cause us to later call remove_unreachable_blocks and
2498 cleanup_tree_cfg when it is safe to do so. It is not safe to
2499 clean things up here since removal of edges and such can trigger
2500 the removal of PHI nodes, which in turn can release SSA_NAMEs to
2501 the manager.
2502
2503 That's all fine and good, except that once SSA_NAMEs are released
2504 to the manager, we must not call create_ssa_name until all references
2505 to released SSA_NAMEs have been eliminated.
2506
2507 All references to the deleted SSA_NAMEs can not be eliminated until
2508 we remove unreachable blocks.
2509
2510 We can not remove unreachable blocks until after we have completed
2511 any queued jump threading.
2512
2513 We can not complete any queued jump threads until we have taken
2514 appropriate variables out of SSA form. Taking variables out of
2515 SSA form can call create_ssa_name and thus we lose.
2516
2517 Ultimately I suspect we're going to need to change the interface
2518 into the SSA_NAME manager. */
2519 if (gimple_modified_p (stmt) || modified_p)
2520 {
2521 tree val = NULL;
2522
2523 update_stmt_if_modified (stmt);
2524
2525 if (gimple_code (stmt) == GIMPLE_COND)
2526 val = fold_binary_loc (gimple_location (stmt),
2527 gimple_cond_code (stmt), boolean_type_node,
2528 gimple_cond_lhs (stmt), gimple_cond_rhs (stmt));
2529 else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
2530 val = gimple_switch_index (swtch_stmt);
2531
2532 if (val && TREE_CODE (val) == INTEGER_CST && find_taken_edge (bb, val))
2533 cfg_altered = true;
2534
2535 /* If we simplified a statement in such a way as to be shown that it
2536 cannot trap, update the eh information and the cfg to match. */
2537 if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt))
2538 {
2539 bitmap_set_bit (need_eh_cleanup, bb->index);
2540 if (dump_file && (dump_flags & TDF_DETAILS))
2541 fprintf (dump_file, " Flagged to clear EH edges.\n");
2542 }
2543 }
2544 }
2545
2546 /* Helper for walk_non_aliased_vuses. Determine if we arrived at
2547 the desired memory state. */
2548
2549 static void *
2550 vuse_eq (ao_ref *, tree vuse1, unsigned int cnt, void *data)
2551 {
2552 tree vuse2 = (tree) data;
2553 if (vuse1 == vuse2)
2554 return data;
2555
2556 /* This bounds the stmt walks we perform on reference lookups
2557 to O(1) instead of O(N) where N is the number of dominating
2558 stores leading to a candidate. We re-use the SCCVN param
2559 for this as it is basically the same complexity. */
2560 if (cnt > (unsigned) PARAM_VALUE (PARAM_SCCVN_MAX_ALIAS_QUERIES_PER_ACCESS))
2561 return (void *)-1;
2562
2563 return NULL;
2564 }
2565
2566 /* Search for an existing instance of STMT in the AVAIL_EXPRS table.
2567 If found, return its LHS. Otherwise insert STMT in the table and
2568 return NULL_TREE.
2569
2570 Also, when an expression is first inserted in the table, it is also
2571 is also added to AVAIL_EXPRS_STACK, so that it can be removed when
2572 we finish processing this block and its children. */
2573
2574 static tree
2575 lookup_avail_expr (gimple stmt, bool insert)
2576 {
2577 expr_hash_elt **slot;
2578 tree lhs;
2579 tree temp;
2580 struct expr_hash_elt element;
2581
2582 /* Get LHS of phi, assignment, or call; else NULL_TREE. */
2583 if (gimple_code (stmt) == GIMPLE_PHI)
2584 lhs = gimple_phi_result (stmt);
2585 else
2586 lhs = gimple_get_lhs (stmt);
2587
2588 initialize_hash_element (stmt, lhs, &element);
2589
2590 if (dump_file && (dump_flags & TDF_DETAILS))
2591 {
2592 fprintf (dump_file, "LKUP ");
2593 print_expr_hash_elt (dump_file, &element);
2594 }
2595
2596 /* Don't bother remembering constant assignments and copy operations.
2597 Constants and copy operations are handled by the constant/copy propagator
2598 in optimize_stmt. */
2599 if (element.expr.kind == EXPR_SINGLE
2600 && (TREE_CODE (element.expr.ops.single.rhs) == SSA_NAME
2601 || is_gimple_min_invariant (element.expr.ops.single.rhs)))
2602 return NULL_TREE;
2603
2604 /* Finally try to find the expression in the main expression hash table. */
2605 slot = avail_exprs->find_slot (&element, (insert ? INSERT : NO_INSERT));
2606 if (slot == NULL)
2607 {
2608 free_expr_hash_elt_contents (&element);
2609 return NULL_TREE;
2610 }
2611 else if (*slot == NULL)
2612 {
2613 struct expr_hash_elt *element2 = XNEW (struct expr_hash_elt);
2614 *element2 = element;
2615 element2->stamp = element2;
2616 *slot = element2;
2617
2618 if (dump_file && (dump_flags & TDF_DETAILS))
2619 {
2620 fprintf (dump_file, "2>>> ");
2621 print_expr_hash_elt (dump_file, element2);
2622 }
2623
2624 avail_exprs_stack.safe_push
2625 (std::pair<expr_hash_elt_t, expr_hash_elt_t> (element2, NULL));
2626 return NULL_TREE;
2627 }
2628
2629 /* If we found a redundant memory operation do an alias walk to
2630 check if we can re-use it. */
2631 if (gimple_vuse (stmt) != (*slot)->vop)
2632 {
2633 tree vuse1 = (*slot)->vop;
2634 tree vuse2 = gimple_vuse (stmt);
2635 /* If we have a load of a register and a candidate in the
2636 hash with vuse1 then try to reach its stmt by walking
2637 up the virtual use-def chain using walk_non_aliased_vuses.
2638 But don't do this when removing expressions from the hash. */
2639 ao_ref ref;
2640 if (!(vuse1 && vuse2
2641 && gimple_assign_single_p (stmt)
2642 && TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME
2643 && (ao_ref_init (&ref, gimple_assign_rhs1 (stmt)), true)
2644 && walk_non_aliased_vuses (&ref, vuse2,
2645 vuse_eq, NULL, NULL, vuse1) != NULL))
2646 {
2647 struct expr_hash_elt *element2 = XNEW (struct expr_hash_elt);
2648 *element2 = element;
2649 element2->stamp = element2;
2650
2651 /* Insert the expr into the hash by replacing the current
2652 entry and recording the value to restore in the
2653 aval_exprs_stack. */
2654 avail_exprs_stack.safe_push (std::make_pair (element2, *slot));
2655 *slot = element2;
2656 if (dump_file && (dump_flags & TDF_DETAILS))
2657 {
2658 fprintf (dump_file, "2>>> ");
2659 print_expr_hash_elt (dump_file, *slot);
2660 }
2661 return NULL_TREE;
2662 }
2663 }
2664
2665 free_expr_hash_elt_contents (&element);
2666
2667 /* Extract the LHS of the assignment so that it can be used as the current
2668 definition of another variable. */
2669 lhs = (*slot)->lhs;
2670
2671 /* See if the LHS appears in the CONST_AND_COPIES table. If it does, then
2672 use the value from the const_and_copies table. */
2673 if (TREE_CODE (lhs) == SSA_NAME)
2674 {
2675 temp = SSA_NAME_VALUE (lhs);
2676 if (temp)
2677 lhs = temp;
2678 }
2679
2680 if (dump_file && (dump_flags & TDF_DETAILS))
2681 {
2682 fprintf (dump_file, "FIND: ");
2683 print_generic_expr (dump_file, lhs, 0);
2684 fprintf (dump_file, "\n");
2685 }
2686
2687 return lhs;
2688 }
2689
2690 /* Hashing and equality functions for AVAIL_EXPRS. We compute a value number
2691 for expressions using the code of the expression and the SSA numbers of
2692 its operands. */
2693
2694 static hashval_t
2695 avail_expr_hash (const void *p)
2696 {
2697 const struct hashable_expr *expr = &((const struct expr_hash_elt *)p)->expr;
2698 inchash::hash hstate;
2699
2700 inchash::add_hashable_expr (expr, hstate);
2701
2702 return hstate.end ();
2703 }
2704
2705 /* PHI-ONLY copy and constant propagation. This pass is meant to clean
2706 up degenerate PHIs created by or exposed by jump threading. */
2707
2708 /* Given a statement STMT, which is either a PHI node or an assignment,
2709 remove it from the IL. */
2710
2711 static void
2712 remove_stmt_or_phi (gimple stmt)
2713 {
2714 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
2715
2716 if (gimple_code (stmt) == GIMPLE_PHI)
2717 remove_phi_node (&gsi, true);
2718 else
2719 {
2720 gsi_remove (&gsi, true);
2721 release_defs (stmt);
2722 }
2723 }
2724
2725 /* Given a statement STMT, which is either a PHI node or an assignment,
2726 return the "rhs" of the node, in the case of a non-degenerate
2727 phi, NULL is returned. */
2728
2729 static tree
2730 get_rhs_or_phi_arg (gimple stmt)
2731 {
2732 if (gimple_code (stmt) == GIMPLE_PHI)
2733 return degenerate_phi_result (as_a <gphi *> (stmt));
2734 else if (gimple_assign_single_p (stmt))
2735 return gimple_assign_rhs1 (stmt);
2736 else
2737 gcc_unreachable ();
2738 }
2739
2740
2741 /* Given a statement STMT, which is either a PHI node or an assignment,
2742 return the "lhs" of the node. */
2743
2744 static tree
2745 get_lhs_or_phi_result (gimple stmt)
2746 {
2747 if (gimple_code (stmt) == GIMPLE_PHI)
2748 return gimple_phi_result (stmt);
2749 else if (is_gimple_assign (stmt))
2750 return gimple_assign_lhs (stmt);
2751 else
2752 gcc_unreachable ();
2753 }
2754
2755 /* Propagate RHS into all uses of LHS (when possible).
2756
2757 RHS and LHS are derived from STMT, which is passed in solely so
2758 that we can remove it if propagation is successful.
2759
2760 When propagating into a PHI node or into a statement which turns
2761 into a trivial copy or constant initialization, set the
2762 appropriate bit in INTERESTING_NAMEs so that we will visit those
2763 nodes as well in an effort to pick up secondary optimization
2764 opportunities. */
2765
2766 static void
2767 propagate_rhs_into_lhs (gimple stmt, tree lhs, tree rhs, bitmap interesting_names)
2768 {
2769 /* First verify that propagation is valid. */
2770 if (may_propagate_copy (lhs, rhs))
2771 {
2772 use_operand_p use_p;
2773 imm_use_iterator iter;
2774 gimple use_stmt;
2775 bool all = true;
2776
2777 /* Dump details. */
2778 if (dump_file && (dump_flags & TDF_DETAILS))
2779 {
2780 fprintf (dump_file, " Replacing '");
2781 print_generic_expr (dump_file, lhs, dump_flags);
2782 fprintf (dump_file, "' with %s '",
2783 (TREE_CODE (rhs) != SSA_NAME ? "constant" : "variable"));
2784 print_generic_expr (dump_file, rhs, dump_flags);
2785 fprintf (dump_file, "'\n");
2786 }
2787
2788 /* Walk over every use of LHS and try to replace the use with RHS.
2789 At this point the only reason why such a propagation would not
2790 be successful would be if the use occurs in an ASM_EXPR. */
2791 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
2792 {
2793 /* Leave debug stmts alone. If we succeed in propagating
2794 all non-debug uses, we'll drop the DEF, and propagation
2795 into debug stmts will occur then. */
2796 if (gimple_debug_bind_p (use_stmt))
2797 continue;
2798
2799 /* It's not always safe to propagate into an ASM_EXPR. */
2800 if (gimple_code (use_stmt) == GIMPLE_ASM
2801 && ! may_propagate_copy_into_asm (lhs))
2802 {
2803 all = false;
2804 continue;
2805 }
2806
2807 /* It's not ok to propagate into the definition stmt of RHS.
2808 <bb 9>:
2809 # prephitmp.12_36 = PHI <g_67.1_6(9)>
2810 g_67.1_6 = prephitmp.12_36;
2811 goto <bb 9>;
2812 While this is strictly all dead code we do not want to
2813 deal with this here. */
2814 if (TREE_CODE (rhs) == SSA_NAME
2815 && SSA_NAME_DEF_STMT (rhs) == use_stmt)
2816 {
2817 all = false;
2818 continue;
2819 }
2820
2821 /* Dump details. */
2822 if (dump_file && (dump_flags & TDF_DETAILS))
2823 {
2824 fprintf (dump_file, " Original statement:");
2825 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2826 }
2827
2828 /* Propagate the RHS into this use of the LHS. */
2829 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2830 propagate_value (use_p, rhs);
2831
2832 /* Special cases to avoid useless calls into the folding
2833 routines, operand scanning, etc.
2834
2835 Propagation into a PHI may cause the PHI to become
2836 a degenerate, so mark the PHI as interesting. No other
2837 actions are necessary. */
2838 if (gimple_code (use_stmt) == GIMPLE_PHI)
2839 {
2840 tree result;
2841
2842 /* Dump details. */
2843 if (dump_file && (dump_flags & TDF_DETAILS))
2844 {
2845 fprintf (dump_file, " Updated statement:");
2846 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2847 }
2848
2849 result = get_lhs_or_phi_result (use_stmt);
2850 bitmap_set_bit (interesting_names, SSA_NAME_VERSION (result));
2851 continue;
2852 }
2853
2854 /* From this point onward we are propagating into a
2855 real statement. Folding may (or may not) be possible,
2856 we may expose new operands, expose dead EH edges,
2857 etc. */
2858 /* NOTE tuples. In the tuples world, fold_stmt_inplace
2859 cannot fold a call that simplifies to a constant,
2860 because the GIMPLE_CALL must be replaced by a
2861 GIMPLE_ASSIGN, and there is no way to effect such a
2862 transformation in-place. We might want to consider
2863 using the more general fold_stmt here. */
2864 {
2865 gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
2866 fold_stmt_inplace (&gsi);
2867 }
2868
2869 /* Sometimes propagation can expose new operands to the
2870 renamer. */
2871 update_stmt (use_stmt);
2872
2873 /* Dump details. */
2874 if (dump_file && (dump_flags & TDF_DETAILS))
2875 {
2876 fprintf (dump_file, " Updated statement:");
2877 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2878 }
2879
2880 /* If we replaced a variable index with a constant, then
2881 we would need to update the invariant flag for ADDR_EXPRs. */
2882 if (gimple_assign_single_p (use_stmt)
2883 && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ADDR_EXPR)
2884 recompute_tree_invariant_for_addr_expr
2885 (gimple_assign_rhs1 (use_stmt));
2886
2887 /* If we cleaned up EH information from the statement,
2888 mark its containing block as needing EH cleanups. */
2889 if (maybe_clean_or_replace_eh_stmt (use_stmt, use_stmt))
2890 {
2891 bitmap_set_bit (need_eh_cleanup, gimple_bb (use_stmt)->index);
2892 if (dump_file && (dump_flags & TDF_DETAILS))
2893 fprintf (dump_file, " Flagged to clear EH edges.\n");
2894 }
2895
2896 /* Propagation may expose new trivial copy/constant propagation
2897 opportunities. */
2898 if (gimple_assign_single_p (use_stmt)
2899 && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME
2900 && (TREE_CODE (gimple_assign_rhs1 (use_stmt)) == SSA_NAME
2901 || is_gimple_min_invariant (gimple_assign_rhs1 (use_stmt))))
2902 {
2903 tree result = get_lhs_or_phi_result (use_stmt);
2904 bitmap_set_bit (interesting_names, SSA_NAME_VERSION (result));
2905 }
2906
2907 /* Propagation into these nodes may make certain edges in
2908 the CFG unexecutable. We want to identify them as PHI nodes
2909 at the destination of those unexecutable edges may become
2910 degenerates. */
2911 else if (gimple_code (use_stmt) == GIMPLE_COND
2912 || gimple_code (use_stmt) == GIMPLE_SWITCH
2913 || gimple_code (use_stmt) == GIMPLE_GOTO)
2914 {
2915 tree val;
2916
2917 if (gimple_code (use_stmt) == GIMPLE_COND)
2918 val = fold_binary_loc (gimple_location (use_stmt),
2919 gimple_cond_code (use_stmt),
2920 boolean_type_node,
2921 gimple_cond_lhs (use_stmt),
2922 gimple_cond_rhs (use_stmt));
2923 else if (gimple_code (use_stmt) == GIMPLE_SWITCH)
2924 val = gimple_switch_index (as_a <gswitch *> (use_stmt));
2925 else
2926 val = gimple_goto_dest (use_stmt);
2927
2928 if (val && is_gimple_min_invariant (val))
2929 {
2930 basic_block bb = gimple_bb (use_stmt);
2931 edge te = find_taken_edge (bb, val);
2932 edge_iterator ei;
2933 edge e;
2934 gimple_stmt_iterator gsi;
2935 gphi_iterator psi;
2936
2937 /* Remove all outgoing edges except TE. */
2938 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei));)
2939 {
2940 if (e != te)
2941 {
2942 /* Mark all the PHI nodes at the destination of
2943 the unexecutable edge as interesting. */
2944 for (psi = gsi_start_phis (e->dest);
2945 !gsi_end_p (psi);
2946 gsi_next (&psi))
2947 {
2948 gphi *phi = psi.phi ();
2949
2950 tree result = gimple_phi_result (phi);
2951 int version = SSA_NAME_VERSION (result);
2952
2953 bitmap_set_bit (interesting_names, version);
2954 }
2955
2956 te->probability += e->probability;
2957
2958 te->count += e->count;
2959 remove_edge (e);
2960 cfg_altered = true;
2961 }
2962 else
2963 ei_next (&ei);
2964 }
2965
2966 gsi = gsi_last_bb (gimple_bb (use_stmt));
2967 gsi_remove (&gsi, true);
2968
2969 /* And fixup the flags on the single remaining edge. */
2970 te->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
2971 te->flags &= ~EDGE_ABNORMAL;
2972 te->flags |= EDGE_FALLTHRU;
2973 if (te->probability > REG_BR_PROB_BASE)
2974 te->probability = REG_BR_PROB_BASE;
2975 }
2976 }
2977 }
2978
2979 /* Ensure there is nothing else to do. */
2980 gcc_assert (!all || has_zero_uses (lhs));
2981
2982 /* If we were able to propagate away all uses of LHS, then
2983 we can remove STMT. */
2984 if (all)
2985 remove_stmt_or_phi (stmt);
2986 }
2987 }
2988
2989 /* STMT is either a PHI node (potentially a degenerate PHI node) or
2990 a statement that is a trivial copy or constant initialization.
2991
2992 Attempt to eliminate T by propagating its RHS into all uses of
2993 its LHS. This may in turn set new bits in INTERESTING_NAMES
2994 for nodes we want to revisit later.
2995
2996 All exit paths should clear INTERESTING_NAMES for the result
2997 of STMT. */
2998
2999 static void
3000 eliminate_const_or_copy (gimple stmt, bitmap interesting_names)
3001 {
3002 tree lhs = get_lhs_or_phi_result (stmt);
3003 tree rhs;
3004 int version = SSA_NAME_VERSION (lhs);
3005
3006 /* If the LHS of this statement or PHI has no uses, then we can
3007 just eliminate it. This can occur if, for example, the PHI
3008 was created by block duplication due to threading and its only
3009 use was in the conditional at the end of the block which was
3010 deleted. */
3011 if (has_zero_uses (lhs))
3012 {
3013 bitmap_clear_bit (interesting_names, version);
3014 remove_stmt_or_phi (stmt);
3015 return;
3016 }
3017
3018 /* Get the RHS of the assignment or PHI node if the PHI is a
3019 degenerate. */
3020 rhs = get_rhs_or_phi_arg (stmt);
3021 if (!rhs)
3022 {
3023 bitmap_clear_bit (interesting_names, version);
3024 return;
3025 }
3026
3027 if (!virtual_operand_p (lhs))
3028 propagate_rhs_into_lhs (stmt, lhs, rhs, interesting_names);
3029 else
3030 {
3031 gimple use_stmt;
3032 imm_use_iterator iter;
3033 use_operand_p use_p;
3034 /* For virtual operands we have to propagate into all uses as
3035 otherwise we will create overlapping life-ranges. */
3036 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
3037 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
3038 SET_USE (use_p, rhs);
3039 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
3040 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs) = 1;
3041 remove_stmt_or_phi (stmt);
3042 }
3043
3044 /* Note that STMT may well have been deleted by now, so do
3045 not access it, instead use the saved version # to clear
3046 T's entry in the worklist. */
3047 bitmap_clear_bit (interesting_names, version);
3048 }
3049
3050 /* The first phase in degenerate PHI elimination.
3051
3052 Eliminate the degenerate PHIs in BB, then recurse on the
3053 dominator children of BB. */
3054
3055 static void
3056 eliminate_degenerate_phis_1 (basic_block bb, bitmap interesting_names)
3057 {
3058 gphi_iterator gsi;
3059 basic_block son;
3060
3061 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3062 {
3063 gphi *phi = gsi.phi ();
3064
3065 eliminate_const_or_copy (phi, interesting_names);
3066 }
3067
3068 /* Recurse into the dominator children of BB. */
3069 for (son = first_dom_son (CDI_DOMINATORS, bb);
3070 son;
3071 son = next_dom_son (CDI_DOMINATORS, son))
3072 eliminate_degenerate_phis_1 (son, interesting_names);
3073 }
3074
3075
3076 /* A very simple pass to eliminate degenerate PHI nodes from the
3077 IL. This is meant to be fast enough to be able to be run several
3078 times in the optimization pipeline.
3079
3080 Certain optimizations, particularly those which duplicate blocks
3081 or remove edges from the CFG can create or expose PHIs which are
3082 trivial copies or constant initializations.
3083
3084 While we could pick up these optimizations in DOM or with the
3085 combination of copy-prop and CCP, those solutions are far too
3086 heavy-weight for our needs.
3087
3088 This implementation has two phases so that we can efficiently
3089 eliminate the first order degenerate PHIs and second order
3090 degenerate PHIs.
3091
3092 The first phase performs a dominator walk to identify and eliminate
3093 the vast majority of the degenerate PHIs. When a degenerate PHI
3094 is identified and eliminated any affected statements or PHIs
3095 are put on a worklist.
3096
3097 The second phase eliminates degenerate PHIs and trivial copies
3098 or constant initializations using the worklist. This is how we
3099 pick up the secondary optimization opportunities with minimal
3100 cost. */
3101
3102 namespace {
3103
3104 const pass_data pass_data_phi_only_cprop =
3105 {
3106 GIMPLE_PASS, /* type */
3107 "phicprop", /* name */
3108 OPTGROUP_NONE, /* optinfo_flags */
3109 TV_TREE_PHI_CPROP, /* tv_id */
3110 ( PROP_cfg | PROP_ssa ), /* properties_required */
3111 0, /* properties_provided */
3112 0, /* properties_destroyed */
3113 0, /* todo_flags_start */
3114 ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
3115 };
3116
3117 class pass_phi_only_cprop : public gimple_opt_pass
3118 {
3119 public:
3120 pass_phi_only_cprop (gcc::context *ctxt)
3121 : gimple_opt_pass (pass_data_phi_only_cprop, ctxt)
3122 {}
3123
3124 /* opt_pass methods: */
3125 opt_pass * clone () { return new pass_phi_only_cprop (m_ctxt); }
3126 virtual bool gate (function *) { return flag_tree_dom != 0; }
3127 virtual unsigned int execute (function *);
3128
3129 }; // class pass_phi_only_cprop
3130
3131 unsigned int
3132 pass_phi_only_cprop::execute (function *fun)
3133 {
3134 bitmap interesting_names;
3135 bitmap interesting_names1;
3136
3137 /* Bitmap of blocks which need EH information updated. We can not
3138 update it on-the-fly as doing so invalidates the dominator tree. */
3139 need_eh_cleanup = BITMAP_ALLOC (NULL);
3140
3141 /* INTERESTING_NAMES is effectively our worklist, indexed by
3142 SSA_NAME_VERSION.
3143
3144 A set bit indicates that the statement or PHI node which
3145 defines the SSA_NAME should be (re)examined to determine if
3146 it has become a degenerate PHI or trivial const/copy propagation
3147 opportunity.
3148
3149 Experiments have show we generally get better compilation
3150 time behavior with bitmaps rather than sbitmaps. */
3151 interesting_names = BITMAP_ALLOC (NULL);
3152 interesting_names1 = BITMAP_ALLOC (NULL);
3153
3154 calculate_dominance_info (CDI_DOMINATORS);
3155 cfg_altered = false;
3156
3157 /* First phase. Eliminate degenerate PHIs via a dominator
3158 walk of the CFG.
3159
3160 Experiments have indicated that we generally get better
3161 compile-time behavior by visiting blocks in the first
3162 phase in dominator order. Presumably this is because walking
3163 in dominator order leaves fewer PHIs for later examination
3164 by the worklist phase. */
3165 eliminate_degenerate_phis_1 (ENTRY_BLOCK_PTR_FOR_FN (fun),
3166 interesting_names);
3167
3168 /* Second phase. Eliminate second order degenerate PHIs as well
3169 as trivial copies or constant initializations identified by
3170 the first phase or this phase. Basically we keep iterating
3171 until our set of INTERESTING_NAMEs is empty. */
3172 while (!bitmap_empty_p (interesting_names))
3173 {
3174 unsigned int i;
3175 bitmap_iterator bi;
3176
3177 /* EXECUTE_IF_SET_IN_BITMAP does not like its bitmap
3178 changed during the loop. Copy it to another bitmap and
3179 use that. */
3180 bitmap_copy (interesting_names1, interesting_names);
3181
3182 EXECUTE_IF_SET_IN_BITMAP (interesting_names1, 0, i, bi)
3183 {
3184 tree name = ssa_name (i);
3185
3186 /* Ignore SSA_NAMEs that have been released because
3187 their defining statement was deleted (unreachable). */
3188 if (name)
3189 eliminate_const_or_copy (SSA_NAME_DEF_STMT (ssa_name (i)),
3190 interesting_names);
3191 }
3192 }
3193
3194 if (cfg_altered)
3195 {
3196 free_dominance_info (CDI_DOMINATORS);
3197 /* If we changed the CFG schedule loops for fixup by cfgcleanup. */
3198 loops_state_set (LOOPS_NEED_FIXUP);
3199 }
3200
3201 /* Propagation of const and copies may make some EH edges dead. Purge
3202 such edges from the CFG as needed. */
3203 if (!bitmap_empty_p (need_eh_cleanup))
3204 {
3205 gimple_purge_all_dead_eh_edges (need_eh_cleanup);
3206 BITMAP_FREE (need_eh_cleanup);
3207 }
3208
3209 BITMAP_FREE (interesting_names);
3210 BITMAP_FREE (interesting_names1);
3211 return 0;
3212 }
3213
3214 } // anon namespace
3215
3216 gimple_opt_pass *
3217 make_pass_phi_only_cprop (gcc::context *ctxt)
3218 {
3219 return new pass_phi_only_cprop (ctxt);
3220 }