decl.c (value_annotation_hasher::handle_cache_entry): Delete.
[gcc.git] / gcc / tree-ssa-dom.c
1 /* SSA Dominator optimizations for trees
2 Copyright (C) 2001-2015 Free Software Foundation, Inc.
3 Contributed by Diego Novillo <dnovillo@redhat.com>
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "alias.h"
26 #include "symtab.h"
27 #include "tree.h"
28 #include "fold-const.h"
29 #include "stor-layout.h"
30 #include "flags.h"
31 #include "tm_p.h"
32 #include "predict.h"
33 #include "hard-reg-set.h"
34 #include "function.h"
35 #include "dominance.h"
36 #include "cfg.h"
37 #include "cfganal.h"
38 #include "basic-block.h"
39 #include "cfgloop.h"
40 #include "gimple-pretty-print.h"
41 #include "tree-ssa-alias.h"
42 #include "internal-fn.h"
43 #include "gimple-fold.h"
44 #include "tree-eh.h"
45 #include "gimple-expr.h"
46 #include "gimple.h"
47 #include "gimple-iterator.h"
48 #include "gimple-ssa.h"
49 #include "tree-cfg.h"
50 #include "tree-phinodes.h"
51 #include "ssa-iterators.h"
52 #include "stringpool.h"
53 #include "tree-ssanames.h"
54 #include "tree-into-ssa.h"
55 #include "domwalk.h"
56 #include "tree-pass.h"
57 #include "tree-ssa-propagate.h"
58 #include "tree-ssa-threadupdate.h"
59 #include "langhooks.h"
60 #include "params.h"
61 #include "tree-ssa-scopedtables.h"
62 #include "tree-ssa-threadedge.h"
63 #include "tree-ssa-dom.h"
64 #include "gimplify.h"
65 #include "tree-cfgcleanup.h"
66
67 /* This file implements optimizations on the dominator tree. */
68
69 /* Representation of a "naked" right-hand-side expression, to be used
70 in recording available expressions in the expression hash table. */
71
72 enum expr_kind
73 {
74 EXPR_SINGLE,
75 EXPR_UNARY,
76 EXPR_BINARY,
77 EXPR_TERNARY,
78 EXPR_CALL,
79 EXPR_PHI
80 };
81
82 struct hashable_expr
83 {
84 tree type;
85 enum expr_kind kind;
86 union {
87 struct { tree rhs; } single;
88 struct { enum tree_code op; tree opnd; } unary;
89 struct { enum tree_code op; tree opnd0, opnd1; } binary;
90 struct { enum tree_code op; tree opnd0, opnd1, opnd2; } ternary;
91 struct { gcall *fn_from; bool pure; size_t nargs; tree *args; } call;
92 struct { size_t nargs; tree *args; } phi;
93 } ops;
94 };
95
96 /* Structure for recording known values of a conditional expression
97 at the exits from its block. */
98
99 typedef struct cond_equivalence_s
100 {
101 struct hashable_expr cond;
102 tree value;
103 } cond_equivalence;
104
105
106 /* Structure for recording edge equivalences as well as any pending
107 edge redirections during the dominator optimizer.
108
109 Computing and storing the edge equivalences instead of creating
110 them on-demand can save significant amounts of time, particularly
111 for pathological cases involving switch statements.
112
113 These structures live for a single iteration of the dominator
114 optimizer in the edge's AUX field. At the end of an iteration we
115 free each of these structures and update the AUX field to point
116 to any requested redirection target (the code for updating the
117 CFG and SSA graph for edge redirection expects redirection edge
118 targets to be in the AUX field for each edge. */
119
120 struct edge_info
121 {
122 /* If this edge creates a simple equivalence, the LHS and RHS of
123 the equivalence will be stored here. */
124 tree lhs;
125 tree rhs;
126
127 /* Traversing an edge may also indicate one or more particular conditions
128 are true or false. */
129 vec<cond_equivalence> cond_equivalences;
130 };
131
132 /* Stack of available expressions in AVAIL_EXPRs. Each block pushes any
133 expressions it enters into the hash table along with a marker entry
134 (null). When we finish processing the block, we pop off entries and
135 remove the expressions from the global hash table until we hit the
136 marker. */
137 typedef struct expr_hash_elt * expr_hash_elt_t;
138
139 static vec<std::pair<expr_hash_elt_t, expr_hash_elt_t> > avail_exprs_stack;
140
141 /* Structure for entries in the expression hash table. */
142
143 struct expr_hash_elt
144 {
145 /* The value (lhs) of this expression. */
146 tree lhs;
147
148 /* The expression (rhs) we want to record. */
149 struct hashable_expr expr;
150
151 /* The virtual operand associated with the nearest dominating stmt
152 loading from or storing to expr. */
153 tree vop;
154
155 /* The hash value for RHS. */
156 hashval_t hash;
157
158 /* A unique stamp, typically the address of the hash
159 element itself, used in removing entries from the table. */
160 struct expr_hash_elt *stamp;
161 };
162
163 /* Hashtable helpers. */
164
165 static bool hashable_expr_equal_p (const struct hashable_expr *,
166 const struct hashable_expr *);
167 static void free_expr_hash_elt (void *);
168
169 struct expr_elt_hasher
170 {
171 typedef expr_hash_elt *value_type;
172 typedef expr_hash_elt *compare_type;
173 static inline hashval_t hash (const value_type &);
174 static inline bool equal (const value_type &, const compare_type &);
175 static inline void remove (value_type &);
176 };
177
178 inline hashval_t
179 expr_elt_hasher::hash (const value_type &p)
180 {
181 return p->hash;
182 }
183
184 inline bool
185 expr_elt_hasher::equal (const value_type &p1, const compare_type &p2)
186 {
187 const struct hashable_expr *expr1 = &p1->expr;
188 const struct expr_hash_elt *stamp1 = p1->stamp;
189 const struct hashable_expr *expr2 = &p2->expr;
190 const struct expr_hash_elt *stamp2 = p2->stamp;
191
192 /* This case should apply only when removing entries from the table. */
193 if (stamp1 == stamp2)
194 return true;
195
196 if (p1->hash != p2->hash)
197 return false;
198
199 /* In case of a collision, both RHS have to be identical and have the
200 same VUSE operands. */
201 if (hashable_expr_equal_p (expr1, expr2)
202 && types_compatible_p (expr1->type, expr2->type))
203 return true;
204
205 return false;
206 }
207
208 /* Delete an expr_hash_elt and reclaim its storage. */
209
210 inline void
211 expr_elt_hasher::remove (value_type &element)
212 {
213 free_expr_hash_elt (element);
214 }
215
216 /* Hash table with expressions made available during the renaming process.
217 When an assignment of the form X_i = EXPR is found, the statement is
218 stored in this table. If the same expression EXPR is later found on the
219 RHS of another statement, it is replaced with X_i (thus performing
220 global redundancy elimination). Similarly as we pass through conditionals
221 we record the conditional itself as having either a true or false value
222 in this table. */
223 static hash_table<expr_elt_hasher> *avail_exprs;
224
225 /* Unwindable const/copy equivalences. */
226 static const_and_copies *const_and_copies;
227
228 /* Track whether or not we have changed the control flow graph. */
229 static bool cfg_altered;
230
231 /* Bitmap of blocks that have had EH statements cleaned. We should
232 remove their dead edges eventually. */
233 static bitmap need_eh_cleanup;
234 static vec<gimple> need_noreturn_fixup;
235
236 /* Statistics for dominator optimizations. */
237 struct opt_stats_d
238 {
239 long num_stmts;
240 long num_exprs_considered;
241 long num_re;
242 long num_const_prop;
243 long num_copy_prop;
244 };
245
246 static struct opt_stats_d opt_stats;
247
248 /* Local functions. */
249 static void optimize_stmt (basic_block, gimple_stmt_iterator);
250 static tree lookup_avail_expr (gimple, bool);
251 static hashval_t avail_expr_hash (const void *);
252 static void htab_statistics (FILE *,
253 const hash_table<expr_elt_hasher> &);
254 static void record_cond (cond_equivalence *);
255 static void record_equality (tree, tree);
256 static void record_equivalences_from_phis (basic_block);
257 static void record_equivalences_from_incoming_edge (basic_block);
258 static void eliminate_redundant_computations (gimple_stmt_iterator *);
259 static void record_equivalences_from_stmt (gimple, int);
260 static void remove_local_expressions_from_table (void);
261 static edge single_incoming_edge_ignoring_loop_edges (basic_block);
262
263
264 /* Given a statement STMT, initialize the hash table element pointed to
265 by ELEMENT. */
266
267 static void
268 initialize_hash_element (gimple stmt, tree lhs,
269 struct expr_hash_elt *element)
270 {
271 enum gimple_code code = gimple_code (stmt);
272 struct hashable_expr *expr = &element->expr;
273
274 if (code == GIMPLE_ASSIGN)
275 {
276 enum tree_code subcode = gimple_assign_rhs_code (stmt);
277
278 switch (get_gimple_rhs_class (subcode))
279 {
280 case GIMPLE_SINGLE_RHS:
281 expr->kind = EXPR_SINGLE;
282 expr->type = TREE_TYPE (gimple_assign_rhs1 (stmt));
283 expr->ops.single.rhs = gimple_assign_rhs1 (stmt);
284 break;
285 case GIMPLE_UNARY_RHS:
286 expr->kind = EXPR_UNARY;
287 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
288 if (CONVERT_EXPR_CODE_P (subcode))
289 subcode = NOP_EXPR;
290 expr->ops.unary.op = subcode;
291 expr->ops.unary.opnd = gimple_assign_rhs1 (stmt);
292 break;
293 case GIMPLE_BINARY_RHS:
294 expr->kind = EXPR_BINARY;
295 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
296 expr->ops.binary.op = subcode;
297 expr->ops.binary.opnd0 = gimple_assign_rhs1 (stmt);
298 expr->ops.binary.opnd1 = gimple_assign_rhs2 (stmt);
299 break;
300 case GIMPLE_TERNARY_RHS:
301 expr->kind = EXPR_TERNARY;
302 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
303 expr->ops.ternary.op = subcode;
304 expr->ops.ternary.opnd0 = gimple_assign_rhs1 (stmt);
305 expr->ops.ternary.opnd1 = gimple_assign_rhs2 (stmt);
306 expr->ops.ternary.opnd2 = gimple_assign_rhs3 (stmt);
307 break;
308 default:
309 gcc_unreachable ();
310 }
311 }
312 else if (code == GIMPLE_COND)
313 {
314 expr->type = boolean_type_node;
315 expr->kind = EXPR_BINARY;
316 expr->ops.binary.op = gimple_cond_code (stmt);
317 expr->ops.binary.opnd0 = gimple_cond_lhs (stmt);
318 expr->ops.binary.opnd1 = gimple_cond_rhs (stmt);
319 }
320 else if (gcall *call_stmt = dyn_cast <gcall *> (stmt))
321 {
322 size_t nargs = gimple_call_num_args (call_stmt);
323 size_t i;
324
325 gcc_assert (gimple_call_lhs (call_stmt));
326
327 expr->type = TREE_TYPE (gimple_call_lhs (call_stmt));
328 expr->kind = EXPR_CALL;
329 expr->ops.call.fn_from = call_stmt;
330
331 if (gimple_call_flags (call_stmt) & (ECF_CONST | ECF_PURE))
332 expr->ops.call.pure = true;
333 else
334 expr->ops.call.pure = false;
335
336 expr->ops.call.nargs = nargs;
337 expr->ops.call.args = XCNEWVEC (tree, nargs);
338 for (i = 0; i < nargs; i++)
339 expr->ops.call.args[i] = gimple_call_arg (call_stmt, i);
340 }
341 else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
342 {
343 expr->type = TREE_TYPE (gimple_switch_index (swtch_stmt));
344 expr->kind = EXPR_SINGLE;
345 expr->ops.single.rhs = gimple_switch_index (swtch_stmt);
346 }
347 else if (code == GIMPLE_GOTO)
348 {
349 expr->type = TREE_TYPE (gimple_goto_dest (stmt));
350 expr->kind = EXPR_SINGLE;
351 expr->ops.single.rhs = gimple_goto_dest (stmt);
352 }
353 else if (code == GIMPLE_PHI)
354 {
355 size_t nargs = gimple_phi_num_args (stmt);
356 size_t i;
357
358 expr->type = TREE_TYPE (gimple_phi_result (stmt));
359 expr->kind = EXPR_PHI;
360 expr->ops.phi.nargs = nargs;
361 expr->ops.phi.args = XCNEWVEC (tree, nargs);
362
363 for (i = 0; i < nargs; i++)
364 expr->ops.phi.args[i] = gimple_phi_arg_def (stmt, i);
365 }
366 else
367 gcc_unreachable ();
368
369 element->lhs = lhs;
370 element->vop = gimple_vuse (stmt);
371 element->hash = avail_expr_hash (element);
372 element->stamp = element;
373 }
374
375 /* Given a conditional expression COND as a tree, initialize
376 a hashable_expr expression EXPR. The conditional must be a
377 comparison or logical negation. A constant or a variable is
378 not permitted. */
379
380 static void
381 initialize_expr_from_cond (tree cond, struct hashable_expr *expr)
382 {
383 expr->type = boolean_type_node;
384
385 if (COMPARISON_CLASS_P (cond))
386 {
387 expr->kind = EXPR_BINARY;
388 expr->ops.binary.op = TREE_CODE (cond);
389 expr->ops.binary.opnd0 = TREE_OPERAND (cond, 0);
390 expr->ops.binary.opnd1 = TREE_OPERAND (cond, 1);
391 }
392 else if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
393 {
394 expr->kind = EXPR_UNARY;
395 expr->ops.unary.op = TRUTH_NOT_EXPR;
396 expr->ops.unary.opnd = TREE_OPERAND (cond, 0);
397 }
398 else
399 gcc_unreachable ();
400 }
401
402 /* Given a hashable_expr expression EXPR and an LHS,
403 initialize the hash table element pointed to by ELEMENT. */
404
405 static void
406 initialize_hash_element_from_expr (struct hashable_expr *expr,
407 tree lhs,
408 struct expr_hash_elt *element)
409 {
410 element->expr = *expr;
411 element->lhs = lhs;
412 element->vop = NULL_TREE;
413 element->hash = avail_expr_hash (element);
414 element->stamp = element;
415 }
416
417 /* Compare two hashable_expr structures for equivalence.
418 They are considered equivalent when the the expressions
419 they denote must necessarily be equal. The logic is intended
420 to follow that of operand_equal_p in fold-const.c */
421
422 static bool
423 hashable_expr_equal_p (const struct hashable_expr *expr0,
424 const struct hashable_expr *expr1)
425 {
426 tree type0 = expr0->type;
427 tree type1 = expr1->type;
428
429 /* If either type is NULL, there is nothing to check. */
430 if ((type0 == NULL_TREE) ^ (type1 == NULL_TREE))
431 return false;
432
433 /* If both types don't have the same signedness, precision, and mode,
434 then we can't consider them equal. */
435 if (type0 != type1
436 && (TREE_CODE (type0) == ERROR_MARK
437 || TREE_CODE (type1) == ERROR_MARK
438 || TYPE_UNSIGNED (type0) != TYPE_UNSIGNED (type1)
439 || TYPE_PRECISION (type0) != TYPE_PRECISION (type1)
440 || TYPE_MODE (type0) != TYPE_MODE (type1)))
441 return false;
442
443 if (expr0->kind != expr1->kind)
444 return false;
445
446 switch (expr0->kind)
447 {
448 case EXPR_SINGLE:
449 return operand_equal_p (expr0->ops.single.rhs,
450 expr1->ops.single.rhs, 0);
451
452 case EXPR_UNARY:
453 if (expr0->ops.unary.op != expr1->ops.unary.op)
454 return false;
455
456 if ((CONVERT_EXPR_CODE_P (expr0->ops.unary.op)
457 || expr0->ops.unary.op == NON_LVALUE_EXPR)
458 && TYPE_UNSIGNED (expr0->type) != TYPE_UNSIGNED (expr1->type))
459 return false;
460
461 return operand_equal_p (expr0->ops.unary.opnd,
462 expr1->ops.unary.opnd, 0);
463
464 case EXPR_BINARY:
465 if (expr0->ops.binary.op != expr1->ops.binary.op)
466 return false;
467
468 if (operand_equal_p (expr0->ops.binary.opnd0,
469 expr1->ops.binary.opnd0, 0)
470 && operand_equal_p (expr0->ops.binary.opnd1,
471 expr1->ops.binary.opnd1, 0))
472 return true;
473
474 /* For commutative ops, allow the other order. */
475 return (commutative_tree_code (expr0->ops.binary.op)
476 && operand_equal_p (expr0->ops.binary.opnd0,
477 expr1->ops.binary.opnd1, 0)
478 && operand_equal_p (expr0->ops.binary.opnd1,
479 expr1->ops.binary.opnd0, 0));
480
481 case EXPR_TERNARY:
482 if (expr0->ops.ternary.op != expr1->ops.ternary.op
483 || !operand_equal_p (expr0->ops.ternary.opnd2,
484 expr1->ops.ternary.opnd2, 0))
485 return false;
486
487 if (operand_equal_p (expr0->ops.ternary.opnd0,
488 expr1->ops.ternary.opnd0, 0)
489 && operand_equal_p (expr0->ops.ternary.opnd1,
490 expr1->ops.ternary.opnd1, 0))
491 return true;
492
493 /* For commutative ops, allow the other order. */
494 return (commutative_ternary_tree_code (expr0->ops.ternary.op)
495 && operand_equal_p (expr0->ops.ternary.opnd0,
496 expr1->ops.ternary.opnd1, 0)
497 && operand_equal_p (expr0->ops.ternary.opnd1,
498 expr1->ops.ternary.opnd0, 0));
499
500 case EXPR_CALL:
501 {
502 size_t i;
503
504 /* If the calls are to different functions, then they
505 clearly cannot be equal. */
506 if (!gimple_call_same_target_p (expr0->ops.call.fn_from,
507 expr1->ops.call.fn_from))
508 return false;
509
510 if (! expr0->ops.call.pure)
511 return false;
512
513 if (expr0->ops.call.nargs != expr1->ops.call.nargs)
514 return false;
515
516 for (i = 0; i < expr0->ops.call.nargs; i++)
517 if (! operand_equal_p (expr0->ops.call.args[i],
518 expr1->ops.call.args[i], 0))
519 return false;
520
521 if (stmt_could_throw_p (expr0->ops.call.fn_from))
522 {
523 int lp0 = lookup_stmt_eh_lp (expr0->ops.call.fn_from);
524 int lp1 = lookup_stmt_eh_lp (expr1->ops.call.fn_from);
525 if ((lp0 > 0 || lp1 > 0) && lp0 != lp1)
526 return false;
527 }
528
529 return true;
530 }
531
532 case EXPR_PHI:
533 {
534 size_t i;
535
536 if (expr0->ops.phi.nargs != expr1->ops.phi.nargs)
537 return false;
538
539 for (i = 0; i < expr0->ops.phi.nargs; i++)
540 if (! operand_equal_p (expr0->ops.phi.args[i],
541 expr1->ops.phi.args[i], 0))
542 return false;
543
544 return true;
545 }
546
547 default:
548 gcc_unreachable ();
549 }
550 }
551
552 /* Generate a hash value for a pair of expressions. This can be used
553 iteratively by passing a previous result in HSTATE.
554
555 The same hash value is always returned for a given pair of expressions,
556 regardless of the order in which they are presented. This is useful in
557 hashing the operands of commutative functions. */
558
559 namespace inchash
560 {
561
562 static void
563 add_expr_commutative (const_tree t1, const_tree t2, hash &hstate)
564 {
565 hash one, two;
566
567 inchash::add_expr (t1, one);
568 inchash::add_expr (t2, two);
569 hstate.add_commutative (one, two);
570 }
571
572 /* Compute a hash value for a hashable_expr value EXPR and a
573 previously accumulated hash value VAL. If two hashable_expr
574 values compare equal with hashable_expr_equal_p, they must
575 hash to the same value, given an identical value of VAL.
576 The logic is intended to follow inchash::add_expr in tree.c. */
577
578 static void
579 add_hashable_expr (const struct hashable_expr *expr, hash &hstate)
580 {
581 switch (expr->kind)
582 {
583 case EXPR_SINGLE:
584 inchash::add_expr (expr->ops.single.rhs, hstate);
585 break;
586
587 case EXPR_UNARY:
588 hstate.add_object (expr->ops.unary.op);
589
590 /* Make sure to include signedness in the hash computation.
591 Don't hash the type, that can lead to having nodes which
592 compare equal according to operand_equal_p, but which
593 have different hash codes. */
594 if (CONVERT_EXPR_CODE_P (expr->ops.unary.op)
595 || expr->ops.unary.op == NON_LVALUE_EXPR)
596 hstate.add_int (TYPE_UNSIGNED (expr->type));
597
598 inchash::add_expr (expr->ops.unary.opnd, hstate);
599 break;
600
601 case EXPR_BINARY:
602 hstate.add_object (expr->ops.binary.op);
603 if (commutative_tree_code (expr->ops.binary.op))
604 inchash::add_expr_commutative (expr->ops.binary.opnd0,
605 expr->ops.binary.opnd1, hstate);
606 else
607 {
608 inchash::add_expr (expr->ops.binary.opnd0, hstate);
609 inchash::add_expr (expr->ops.binary.opnd1, hstate);
610 }
611 break;
612
613 case EXPR_TERNARY:
614 hstate.add_object (expr->ops.ternary.op);
615 if (commutative_ternary_tree_code (expr->ops.ternary.op))
616 inchash::add_expr_commutative (expr->ops.ternary.opnd0,
617 expr->ops.ternary.opnd1, hstate);
618 else
619 {
620 inchash::add_expr (expr->ops.ternary.opnd0, hstate);
621 inchash::add_expr (expr->ops.ternary.opnd1, hstate);
622 }
623 inchash::add_expr (expr->ops.ternary.opnd2, hstate);
624 break;
625
626 case EXPR_CALL:
627 {
628 size_t i;
629 enum tree_code code = CALL_EXPR;
630 gcall *fn_from;
631
632 hstate.add_object (code);
633 fn_from = expr->ops.call.fn_from;
634 if (gimple_call_internal_p (fn_from))
635 hstate.merge_hash ((hashval_t) gimple_call_internal_fn (fn_from));
636 else
637 inchash::add_expr (gimple_call_fn (fn_from), hstate);
638 for (i = 0; i < expr->ops.call.nargs; i++)
639 inchash::add_expr (expr->ops.call.args[i], hstate);
640 }
641 break;
642
643 case EXPR_PHI:
644 {
645 size_t i;
646
647 for (i = 0; i < expr->ops.phi.nargs; i++)
648 inchash::add_expr (expr->ops.phi.args[i], hstate);
649 }
650 break;
651
652 default:
653 gcc_unreachable ();
654 }
655 }
656
657 }
658
659 /* Print a diagnostic dump of an expression hash table entry. */
660
661 static void
662 print_expr_hash_elt (FILE * stream, const struct expr_hash_elt *element)
663 {
664 fprintf (stream, "STMT ");
665
666 if (element->lhs)
667 {
668 print_generic_expr (stream, element->lhs, 0);
669 fprintf (stream, " = ");
670 }
671
672 switch (element->expr.kind)
673 {
674 case EXPR_SINGLE:
675 print_generic_expr (stream, element->expr.ops.single.rhs, 0);
676 break;
677
678 case EXPR_UNARY:
679 fprintf (stream, "%s ", get_tree_code_name (element->expr.ops.unary.op));
680 print_generic_expr (stream, element->expr.ops.unary.opnd, 0);
681 break;
682
683 case EXPR_BINARY:
684 print_generic_expr (stream, element->expr.ops.binary.opnd0, 0);
685 fprintf (stream, " %s ", get_tree_code_name (element->expr.ops.binary.op));
686 print_generic_expr (stream, element->expr.ops.binary.opnd1, 0);
687 break;
688
689 case EXPR_TERNARY:
690 fprintf (stream, " %s <", get_tree_code_name (element->expr.ops.ternary.op));
691 print_generic_expr (stream, element->expr.ops.ternary.opnd0, 0);
692 fputs (", ", stream);
693 print_generic_expr (stream, element->expr.ops.ternary.opnd1, 0);
694 fputs (", ", stream);
695 print_generic_expr (stream, element->expr.ops.ternary.opnd2, 0);
696 fputs (">", stream);
697 break;
698
699 case EXPR_CALL:
700 {
701 size_t i;
702 size_t nargs = element->expr.ops.call.nargs;
703 gcall *fn_from;
704
705 fn_from = element->expr.ops.call.fn_from;
706 if (gimple_call_internal_p (fn_from))
707 fputs (internal_fn_name (gimple_call_internal_fn (fn_from)),
708 stream);
709 else
710 print_generic_expr (stream, gimple_call_fn (fn_from), 0);
711 fprintf (stream, " (");
712 for (i = 0; i < nargs; i++)
713 {
714 print_generic_expr (stream, element->expr.ops.call.args[i], 0);
715 if (i + 1 < nargs)
716 fprintf (stream, ", ");
717 }
718 fprintf (stream, ")");
719 }
720 break;
721
722 case EXPR_PHI:
723 {
724 size_t i;
725 size_t nargs = element->expr.ops.phi.nargs;
726
727 fprintf (stream, "PHI <");
728 for (i = 0; i < nargs; i++)
729 {
730 print_generic_expr (stream, element->expr.ops.phi.args[i], 0);
731 if (i + 1 < nargs)
732 fprintf (stream, ", ");
733 }
734 fprintf (stream, ">");
735 }
736 break;
737 }
738
739 if (element->vop)
740 {
741 fprintf (stream, " with ");
742 print_generic_expr (stream, element->vop, 0);
743 }
744
745 fprintf (stream, "\n");
746 }
747
748 /* Delete variable sized pieces of the expr_hash_elt ELEMENT. */
749
750 static void
751 free_expr_hash_elt_contents (struct expr_hash_elt *element)
752 {
753 if (element->expr.kind == EXPR_CALL)
754 free (element->expr.ops.call.args);
755 else if (element->expr.kind == EXPR_PHI)
756 free (element->expr.ops.phi.args);
757 }
758
759 /* Delete an expr_hash_elt and reclaim its storage. */
760
761 static void
762 free_expr_hash_elt (void *elt)
763 {
764 struct expr_hash_elt *element = ((struct expr_hash_elt *)elt);
765 free_expr_hash_elt_contents (element);
766 free (element);
767 }
768
769 /* Allocate an EDGE_INFO for edge E and attach it to E.
770 Return the new EDGE_INFO structure. */
771
772 static struct edge_info *
773 allocate_edge_info (edge e)
774 {
775 struct edge_info *edge_info;
776
777 edge_info = XCNEW (struct edge_info);
778
779 e->aux = edge_info;
780 return edge_info;
781 }
782
783 /* Free all EDGE_INFO structures associated with edges in the CFG.
784 If a particular edge can be threaded, copy the redirection
785 target from the EDGE_INFO structure into the edge's AUX field
786 as required by code to update the CFG and SSA graph for
787 jump threading. */
788
789 static void
790 free_all_edge_infos (void)
791 {
792 basic_block bb;
793 edge_iterator ei;
794 edge e;
795
796 FOR_EACH_BB_FN (bb, cfun)
797 {
798 FOR_EACH_EDGE (e, ei, bb->preds)
799 {
800 struct edge_info *edge_info = (struct edge_info *) e->aux;
801
802 if (edge_info)
803 {
804 edge_info->cond_equivalences.release ();
805 free (edge_info);
806 e->aux = NULL;
807 }
808 }
809 }
810 }
811
812 /* Build a cond_equivalence record indicating that the comparison
813 CODE holds between operands OP0 and OP1 and push it to **P. */
814
815 static void
816 build_and_record_new_cond (enum tree_code code,
817 tree op0, tree op1,
818 vec<cond_equivalence> *p)
819 {
820 cond_equivalence c;
821 struct hashable_expr *cond = &c.cond;
822
823 gcc_assert (TREE_CODE_CLASS (code) == tcc_comparison);
824
825 cond->type = boolean_type_node;
826 cond->kind = EXPR_BINARY;
827 cond->ops.binary.op = code;
828 cond->ops.binary.opnd0 = op0;
829 cond->ops.binary.opnd1 = op1;
830
831 c.value = boolean_true_node;
832 p->safe_push (c);
833 }
834
835 /* Record that COND is true and INVERTED is false into the edge information
836 structure. Also record that any conditions dominated by COND are true
837 as well.
838
839 For example, if a < b is true, then a <= b must also be true. */
840
841 static void
842 record_conditions (struct edge_info *edge_info, tree cond, tree inverted)
843 {
844 tree op0, op1;
845 cond_equivalence c;
846
847 if (!COMPARISON_CLASS_P (cond))
848 return;
849
850 op0 = TREE_OPERAND (cond, 0);
851 op1 = TREE_OPERAND (cond, 1);
852
853 switch (TREE_CODE (cond))
854 {
855 case LT_EXPR:
856 case GT_EXPR:
857 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
858 {
859 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
860 &edge_info->cond_equivalences);
861 build_and_record_new_cond (LTGT_EXPR, op0, op1,
862 &edge_info->cond_equivalences);
863 }
864
865 build_and_record_new_cond ((TREE_CODE (cond) == LT_EXPR
866 ? LE_EXPR : GE_EXPR),
867 op0, op1, &edge_info->cond_equivalences);
868 build_and_record_new_cond (NE_EXPR, op0, op1,
869 &edge_info->cond_equivalences);
870 break;
871
872 case GE_EXPR:
873 case LE_EXPR:
874 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
875 {
876 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
877 &edge_info->cond_equivalences);
878 }
879 break;
880
881 case EQ_EXPR:
882 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
883 {
884 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
885 &edge_info->cond_equivalences);
886 }
887 build_and_record_new_cond (LE_EXPR, op0, op1,
888 &edge_info->cond_equivalences);
889 build_and_record_new_cond (GE_EXPR, op0, op1,
890 &edge_info->cond_equivalences);
891 break;
892
893 case UNORDERED_EXPR:
894 build_and_record_new_cond (NE_EXPR, op0, op1,
895 &edge_info->cond_equivalences);
896 build_and_record_new_cond (UNLE_EXPR, op0, op1,
897 &edge_info->cond_equivalences);
898 build_and_record_new_cond (UNGE_EXPR, op0, op1,
899 &edge_info->cond_equivalences);
900 build_and_record_new_cond (UNEQ_EXPR, op0, op1,
901 &edge_info->cond_equivalences);
902 build_and_record_new_cond (UNLT_EXPR, op0, op1,
903 &edge_info->cond_equivalences);
904 build_and_record_new_cond (UNGT_EXPR, op0, op1,
905 &edge_info->cond_equivalences);
906 break;
907
908 case UNLT_EXPR:
909 case UNGT_EXPR:
910 build_and_record_new_cond ((TREE_CODE (cond) == UNLT_EXPR
911 ? UNLE_EXPR : UNGE_EXPR),
912 op0, op1, &edge_info->cond_equivalences);
913 build_and_record_new_cond (NE_EXPR, op0, op1,
914 &edge_info->cond_equivalences);
915 break;
916
917 case UNEQ_EXPR:
918 build_and_record_new_cond (UNLE_EXPR, op0, op1,
919 &edge_info->cond_equivalences);
920 build_and_record_new_cond (UNGE_EXPR, op0, op1,
921 &edge_info->cond_equivalences);
922 break;
923
924 case LTGT_EXPR:
925 build_and_record_new_cond (NE_EXPR, op0, op1,
926 &edge_info->cond_equivalences);
927 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
928 &edge_info->cond_equivalences);
929 break;
930
931 default:
932 break;
933 }
934
935 /* Now store the original true and false conditions into the first
936 two slots. */
937 initialize_expr_from_cond (cond, &c.cond);
938 c.value = boolean_true_node;
939 edge_info->cond_equivalences.safe_push (c);
940
941 /* It is possible for INVERTED to be the negation of a comparison,
942 and not a valid RHS or GIMPLE_COND condition. This happens because
943 invert_truthvalue may return such an expression when asked to invert
944 a floating-point comparison. These comparisons are not assumed to
945 obey the trichotomy law. */
946 initialize_expr_from_cond (inverted, &c.cond);
947 c.value = boolean_false_node;
948 edge_info->cond_equivalences.safe_push (c);
949 }
950
951 /* We have finished optimizing BB, record any information implied by
952 taking a specific outgoing edge from BB. */
953
954 static void
955 record_edge_info (basic_block bb)
956 {
957 gimple_stmt_iterator gsi = gsi_last_bb (bb);
958 struct edge_info *edge_info;
959
960 if (! gsi_end_p (gsi))
961 {
962 gimple stmt = gsi_stmt (gsi);
963 location_t loc = gimple_location (stmt);
964
965 if (gimple_code (stmt) == GIMPLE_SWITCH)
966 {
967 gswitch *switch_stmt = as_a <gswitch *> (stmt);
968 tree index = gimple_switch_index (switch_stmt);
969
970 if (TREE_CODE (index) == SSA_NAME)
971 {
972 int i;
973 int n_labels = gimple_switch_num_labels (switch_stmt);
974 tree *info = XCNEWVEC (tree, last_basic_block_for_fn (cfun));
975 edge e;
976 edge_iterator ei;
977
978 for (i = 0; i < n_labels; i++)
979 {
980 tree label = gimple_switch_label (switch_stmt, i);
981 basic_block target_bb = label_to_block (CASE_LABEL (label));
982 if (CASE_HIGH (label)
983 || !CASE_LOW (label)
984 || info[target_bb->index])
985 info[target_bb->index] = error_mark_node;
986 else
987 info[target_bb->index] = label;
988 }
989
990 FOR_EACH_EDGE (e, ei, bb->succs)
991 {
992 basic_block target_bb = e->dest;
993 tree label = info[target_bb->index];
994
995 if (label != NULL && label != error_mark_node)
996 {
997 tree x = fold_convert_loc (loc, TREE_TYPE (index),
998 CASE_LOW (label));
999 edge_info = allocate_edge_info (e);
1000 edge_info->lhs = index;
1001 edge_info->rhs = x;
1002 }
1003 }
1004 free (info);
1005 }
1006 }
1007
1008 /* A COND_EXPR may create equivalences too. */
1009 if (gimple_code (stmt) == GIMPLE_COND)
1010 {
1011 edge true_edge;
1012 edge false_edge;
1013
1014 tree op0 = gimple_cond_lhs (stmt);
1015 tree op1 = gimple_cond_rhs (stmt);
1016 enum tree_code code = gimple_cond_code (stmt);
1017
1018 extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
1019
1020 /* Special case comparing booleans against a constant as we
1021 know the value of OP0 on both arms of the branch. i.e., we
1022 can record an equivalence for OP0 rather than COND. */
1023 if ((code == EQ_EXPR || code == NE_EXPR)
1024 && TREE_CODE (op0) == SSA_NAME
1025 && TREE_CODE (TREE_TYPE (op0)) == BOOLEAN_TYPE
1026 && is_gimple_min_invariant (op1))
1027 {
1028 if (code == EQ_EXPR)
1029 {
1030 edge_info = allocate_edge_info (true_edge);
1031 edge_info->lhs = op0;
1032 edge_info->rhs = (integer_zerop (op1)
1033 ? boolean_false_node
1034 : boolean_true_node);
1035
1036 edge_info = allocate_edge_info (false_edge);
1037 edge_info->lhs = op0;
1038 edge_info->rhs = (integer_zerop (op1)
1039 ? boolean_true_node
1040 : boolean_false_node);
1041 }
1042 else
1043 {
1044 edge_info = allocate_edge_info (true_edge);
1045 edge_info->lhs = op0;
1046 edge_info->rhs = (integer_zerop (op1)
1047 ? boolean_true_node
1048 : boolean_false_node);
1049
1050 edge_info = allocate_edge_info (false_edge);
1051 edge_info->lhs = op0;
1052 edge_info->rhs = (integer_zerop (op1)
1053 ? boolean_false_node
1054 : boolean_true_node);
1055 }
1056 }
1057 else if (is_gimple_min_invariant (op0)
1058 && (TREE_CODE (op1) == SSA_NAME
1059 || is_gimple_min_invariant (op1)))
1060 {
1061 tree cond = build2 (code, boolean_type_node, op0, op1);
1062 tree inverted = invert_truthvalue_loc (loc, cond);
1063 bool can_infer_simple_equiv
1064 = !(HONOR_SIGNED_ZEROS (op0)
1065 && real_zerop (op0));
1066 struct edge_info *edge_info;
1067
1068 edge_info = allocate_edge_info (true_edge);
1069 record_conditions (edge_info, cond, inverted);
1070
1071 if (can_infer_simple_equiv && code == EQ_EXPR)
1072 {
1073 edge_info->lhs = op1;
1074 edge_info->rhs = op0;
1075 }
1076
1077 edge_info = allocate_edge_info (false_edge);
1078 record_conditions (edge_info, inverted, cond);
1079
1080 if (can_infer_simple_equiv && TREE_CODE (inverted) == EQ_EXPR)
1081 {
1082 edge_info->lhs = op1;
1083 edge_info->rhs = op0;
1084 }
1085 }
1086
1087 else if (TREE_CODE (op0) == SSA_NAME
1088 && (TREE_CODE (op1) == SSA_NAME
1089 || is_gimple_min_invariant (op1)))
1090 {
1091 tree cond = build2 (code, boolean_type_node, op0, op1);
1092 tree inverted = invert_truthvalue_loc (loc, cond);
1093 bool can_infer_simple_equiv
1094 = !(HONOR_SIGNED_ZEROS (op1)
1095 && (TREE_CODE (op1) == SSA_NAME || real_zerop (op1)));
1096 struct edge_info *edge_info;
1097
1098 edge_info = allocate_edge_info (true_edge);
1099 record_conditions (edge_info, cond, inverted);
1100
1101 if (can_infer_simple_equiv && code == EQ_EXPR)
1102 {
1103 edge_info->lhs = op0;
1104 edge_info->rhs = op1;
1105 }
1106
1107 edge_info = allocate_edge_info (false_edge);
1108 record_conditions (edge_info, inverted, cond);
1109
1110 if (can_infer_simple_equiv && TREE_CODE (inverted) == EQ_EXPR)
1111 {
1112 edge_info->lhs = op0;
1113 edge_info->rhs = op1;
1114 }
1115 }
1116 }
1117
1118 /* ??? TRUTH_NOT_EXPR can create an equivalence too. */
1119 }
1120 }
1121
1122
1123 class dom_opt_dom_walker : public dom_walker
1124 {
1125 public:
1126 dom_opt_dom_walker (cdi_direction direction)
1127 : dom_walker (direction), m_dummy_cond (NULL) {}
1128
1129 virtual void before_dom_children (basic_block);
1130 virtual void after_dom_children (basic_block);
1131
1132 private:
1133 void thread_across_edge (edge);
1134
1135 gcond *m_dummy_cond;
1136 };
1137
1138 /* Jump threading, redundancy elimination and const/copy propagation.
1139
1140 This pass may expose new symbols that need to be renamed into SSA. For
1141 every new symbol exposed, its corresponding bit will be set in
1142 VARS_TO_RENAME. */
1143
1144 namespace {
1145
1146 const pass_data pass_data_dominator =
1147 {
1148 GIMPLE_PASS, /* type */
1149 "dom", /* name */
1150 OPTGROUP_NONE, /* optinfo_flags */
1151 TV_TREE_SSA_DOMINATOR_OPTS, /* tv_id */
1152 ( PROP_cfg | PROP_ssa ), /* properties_required */
1153 0, /* properties_provided */
1154 0, /* properties_destroyed */
1155 0, /* todo_flags_start */
1156 ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
1157 };
1158
1159 class pass_dominator : public gimple_opt_pass
1160 {
1161 public:
1162 pass_dominator (gcc::context *ctxt)
1163 : gimple_opt_pass (pass_data_dominator, ctxt)
1164 {}
1165
1166 /* opt_pass methods: */
1167 opt_pass * clone () { return new pass_dominator (m_ctxt); }
1168 virtual bool gate (function *) { return flag_tree_dom != 0; }
1169 virtual unsigned int execute (function *);
1170
1171 }; // class pass_dominator
1172
1173 unsigned int
1174 pass_dominator::execute (function *fun)
1175 {
1176 memset (&opt_stats, 0, sizeof (opt_stats));
1177
1178 /* Create our hash tables. */
1179 avail_exprs = new hash_table<expr_elt_hasher> (1024);
1180 avail_exprs_stack.create (20);
1181 const_and_copies = new class const_and_copies (dump_file, dump_flags);
1182 need_eh_cleanup = BITMAP_ALLOC (NULL);
1183 need_noreturn_fixup.create (0);
1184
1185 calculate_dominance_info (CDI_DOMINATORS);
1186 cfg_altered = false;
1187
1188 /* We need to know loop structures in order to avoid destroying them
1189 in jump threading. Note that we still can e.g. thread through loop
1190 headers to an exit edge, or through loop header to the loop body, assuming
1191 that we update the loop info.
1192
1193 TODO: We don't need to set LOOPS_HAVE_PREHEADERS generally, but due
1194 to several overly conservative bail-outs in jump threading, case
1195 gcc.dg/tree-ssa/pr21417.c can't be threaded if loop preheader is
1196 missing. We should improve jump threading in future then
1197 LOOPS_HAVE_PREHEADERS won't be needed here. */
1198 loop_optimizer_init (LOOPS_HAVE_PREHEADERS | LOOPS_HAVE_SIMPLE_LATCHES);
1199
1200 /* Initialize the value-handle array. */
1201 threadedge_initialize_values ();
1202
1203 /* We need accurate information regarding back edges in the CFG
1204 for jump threading; this may include back edges that are not part of
1205 a single loop. */
1206 mark_dfs_back_edges ();
1207
1208 /* Recursively walk the dominator tree optimizing statements. */
1209 dom_opt_dom_walker (CDI_DOMINATORS).walk (fun->cfg->x_entry_block_ptr);
1210
1211 {
1212 gimple_stmt_iterator gsi;
1213 basic_block bb;
1214 FOR_EACH_BB_FN (bb, fun)
1215 {
1216 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1217 update_stmt_if_modified (gsi_stmt (gsi));
1218 }
1219 }
1220
1221 /* If we exposed any new variables, go ahead and put them into
1222 SSA form now, before we handle jump threading. This simplifies
1223 interactions between rewriting of _DECL nodes into SSA form
1224 and rewriting SSA_NAME nodes into SSA form after block
1225 duplication and CFG manipulation. */
1226 update_ssa (TODO_update_ssa);
1227
1228 free_all_edge_infos ();
1229
1230 /* Thread jumps, creating duplicate blocks as needed. */
1231 cfg_altered |= thread_through_all_blocks (first_pass_instance);
1232
1233 if (cfg_altered)
1234 free_dominance_info (CDI_DOMINATORS);
1235
1236 /* Removal of statements may make some EH edges dead. Purge
1237 such edges from the CFG as needed. */
1238 if (!bitmap_empty_p (need_eh_cleanup))
1239 {
1240 unsigned i;
1241 bitmap_iterator bi;
1242
1243 /* Jump threading may have created forwarder blocks from blocks
1244 needing EH cleanup; the new successor of these blocks, which
1245 has inherited from the original block, needs the cleanup.
1246 Don't clear bits in the bitmap, as that can break the bitmap
1247 iterator. */
1248 EXECUTE_IF_SET_IN_BITMAP (need_eh_cleanup, 0, i, bi)
1249 {
1250 basic_block bb = BASIC_BLOCK_FOR_FN (fun, i);
1251 if (bb == NULL)
1252 continue;
1253 while (single_succ_p (bb)
1254 && (single_succ_edge (bb)->flags & EDGE_EH) == 0)
1255 bb = single_succ (bb);
1256 if (bb == EXIT_BLOCK_PTR_FOR_FN (fun))
1257 continue;
1258 if ((unsigned) bb->index != i)
1259 bitmap_set_bit (need_eh_cleanup, bb->index);
1260 }
1261
1262 gimple_purge_all_dead_eh_edges (need_eh_cleanup);
1263 bitmap_clear (need_eh_cleanup);
1264 }
1265
1266 /* Fixup stmts that became noreturn calls. This may require splitting
1267 blocks and thus isn't possible during the dominator walk or before
1268 jump threading finished. Do this in reverse order so we don't
1269 inadvertedly remove a stmt we want to fixup by visiting a dominating
1270 now noreturn call first. */
1271 while (!need_noreturn_fixup.is_empty ())
1272 {
1273 gimple stmt = need_noreturn_fixup.pop ();
1274 if (dump_file && dump_flags & TDF_DETAILS)
1275 {
1276 fprintf (dump_file, "Fixing up noreturn call ");
1277 print_gimple_stmt (dump_file, stmt, 0, 0);
1278 fprintf (dump_file, "\n");
1279 }
1280 fixup_noreturn_call (stmt);
1281 }
1282
1283 statistics_counter_event (fun, "Redundant expressions eliminated",
1284 opt_stats.num_re);
1285 statistics_counter_event (fun, "Constants propagated",
1286 opt_stats.num_const_prop);
1287 statistics_counter_event (fun, "Copies propagated",
1288 opt_stats.num_copy_prop);
1289
1290 /* Debugging dumps. */
1291 if (dump_file && (dump_flags & TDF_STATS))
1292 dump_dominator_optimization_stats (dump_file);
1293
1294 loop_optimizer_finalize ();
1295
1296 /* Delete our main hashtable. */
1297 delete avail_exprs;
1298 avail_exprs = NULL;
1299
1300 /* Free asserted bitmaps and stacks. */
1301 BITMAP_FREE (need_eh_cleanup);
1302 need_noreturn_fixup.release ();
1303 avail_exprs_stack.release ();
1304 delete const_and_copies;
1305
1306 /* Free the value-handle array. */
1307 threadedge_finalize_values ();
1308
1309 return 0;
1310 }
1311
1312 } // anon namespace
1313
1314 gimple_opt_pass *
1315 make_pass_dominator (gcc::context *ctxt)
1316 {
1317 return new pass_dominator (ctxt);
1318 }
1319
1320
1321 /* Given a conditional statement CONDSTMT, convert the
1322 condition to a canonical form. */
1323
1324 static void
1325 canonicalize_comparison (gcond *condstmt)
1326 {
1327 tree op0;
1328 tree op1;
1329 enum tree_code code;
1330
1331 gcc_assert (gimple_code (condstmt) == GIMPLE_COND);
1332
1333 op0 = gimple_cond_lhs (condstmt);
1334 op1 = gimple_cond_rhs (condstmt);
1335
1336 code = gimple_cond_code (condstmt);
1337
1338 /* If it would be profitable to swap the operands, then do so to
1339 canonicalize the statement, enabling better optimization.
1340
1341 By placing canonicalization of such expressions here we
1342 transparently keep statements in canonical form, even
1343 when the statement is modified. */
1344 if (tree_swap_operands_p (op0, op1, false))
1345 {
1346 /* For relationals we need to swap the operands
1347 and change the code. */
1348 if (code == LT_EXPR
1349 || code == GT_EXPR
1350 || code == LE_EXPR
1351 || code == GE_EXPR)
1352 {
1353 code = swap_tree_comparison (code);
1354
1355 gimple_cond_set_code (condstmt, code);
1356 gimple_cond_set_lhs (condstmt, op1);
1357 gimple_cond_set_rhs (condstmt, op0);
1358
1359 update_stmt (condstmt);
1360 }
1361 }
1362 }
1363
1364 /* Initialize local stacks for this optimizer and record equivalences
1365 upon entry to BB. Equivalences can come from the edge traversed to
1366 reach BB or they may come from PHI nodes at the start of BB. */
1367
1368 /* Remove all the expressions in LOCALS from TABLE, stopping when there are
1369 LIMIT entries left in LOCALs. */
1370
1371 static void
1372 remove_local_expressions_from_table (void)
1373 {
1374 /* Remove all the expressions made available in this block. */
1375 while (avail_exprs_stack.length () > 0)
1376 {
1377 std::pair<expr_hash_elt_t, expr_hash_elt_t> victim
1378 = avail_exprs_stack.pop ();
1379 expr_hash_elt **slot;
1380
1381 if (victim.first == NULL)
1382 break;
1383
1384 /* This must precede the actual removal from the hash table,
1385 as ELEMENT and the table entry may share a call argument
1386 vector which will be freed during removal. */
1387 if (dump_file && (dump_flags & TDF_DETAILS))
1388 {
1389 fprintf (dump_file, "<<<< ");
1390 print_expr_hash_elt (dump_file, victim.first);
1391 }
1392
1393 slot = avail_exprs->find_slot (victim.first, NO_INSERT);
1394 gcc_assert (slot && *slot == victim.first);
1395 if (victim.second != NULL)
1396 {
1397 free_expr_hash_elt (*slot);
1398 *slot = victim.second;
1399 }
1400 else
1401 avail_exprs->clear_slot (slot);
1402 }
1403 }
1404
1405 /* A trivial wrapper so that we can present the generic jump
1406 threading code with a simple API for simplifying statements. */
1407 static tree
1408 simplify_stmt_for_jump_threading (gimple stmt,
1409 gimple within_stmt ATTRIBUTE_UNUSED)
1410 {
1411 return lookup_avail_expr (stmt, false);
1412 }
1413
1414 /* Record into the equivalence tables any equivalences implied by
1415 traversing edge E (which are cached in E->aux).
1416
1417 Callers are responsible for managing the unwinding markers. */
1418 static void
1419 record_temporary_equivalences (edge e)
1420 {
1421 int i;
1422 struct edge_info *edge_info = (struct edge_info *) e->aux;
1423
1424 /* If we have info associated with this edge, record it into
1425 our equivalence tables. */
1426 if (edge_info)
1427 {
1428 cond_equivalence *eq;
1429 tree lhs = edge_info->lhs;
1430 tree rhs = edge_info->rhs;
1431
1432 /* If we have a simple NAME = VALUE equivalence, record it. */
1433 if (lhs && TREE_CODE (lhs) == SSA_NAME)
1434 const_and_copies->record_const_or_copy (lhs, rhs);
1435
1436 /* If we have 0 = COND or 1 = COND equivalences, record them
1437 into our expression hash tables. */
1438 for (i = 0; edge_info->cond_equivalences.iterate (i, &eq); ++i)
1439 record_cond (eq);
1440 }
1441 }
1442
1443 /* Wrapper for common code to attempt to thread an edge. For example,
1444 it handles lazily building the dummy condition and the bookkeeping
1445 when jump threading is successful. */
1446
1447 void
1448 dom_opt_dom_walker::thread_across_edge (edge e)
1449 {
1450 if (! m_dummy_cond)
1451 m_dummy_cond =
1452 gimple_build_cond (NE_EXPR,
1453 integer_zero_node, integer_zero_node,
1454 NULL, NULL);
1455
1456 /* Push a marker on both stacks so we can unwind the tables back to their
1457 current state. */
1458 avail_exprs_stack.safe_push
1459 (std::pair<expr_hash_elt_t, expr_hash_elt_t> (NULL, NULL));
1460 const_and_copies->push_marker ();
1461
1462 /* Traversing E may result in equivalences we can utilize. */
1463 record_temporary_equivalences (e);
1464
1465 /* With all the edge equivalences in the tables, go ahead and attempt
1466 to thread through E->dest. */
1467 ::thread_across_edge (m_dummy_cond, e, false,
1468 const_and_copies,
1469 simplify_stmt_for_jump_threading);
1470
1471 /* And restore the various tables to their state before
1472 we threaded this edge.
1473
1474 XXX The code in tree-ssa-threadedge.c will restore the state of
1475 the const_and_copies table. We we just have to restore the expression
1476 table. */
1477 remove_local_expressions_from_table ();
1478 }
1479
1480 /* PHI nodes can create equivalences too.
1481
1482 Ignoring any alternatives which are the same as the result, if
1483 all the alternatives are equal, then the PHI node creates an
1484 equivalence. */
1485
1486 static void
1487 record_equivalences_from_phis (basic_block bb)
1488 {
1489 gphi_iterator gsi;
1490
1491 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1492 {
1493 gphi *phi = gsi.phi ();
1494
1495 tree lhs = gimple_phi_result (phi);
1496 tree rhs = NULL;
1497 size_t i;
1498
1499 for (i = 0; i < gimple_phi_num_args (phi); i++)
1500 {
1501 tree t = gimple_phi_arg_def (phi, i);
1502
1503 /* Ignore alternatives which are the same as our LHS. Since
1504 LHS is a PHI_RESULT, it is known to be a SSA_NAME, so we
1505 can simply compare pointers. */
1506 if (lhs == t)
1507 continue;
1508
1509 /* Valueize t. */
1510 if (TREE_CODE (t) == SSA_NAME)
1511 {
1512 tree tmp = SSA_NAME_VALUE (t);
1513 t = tmp ? tmp : t;
1514 }
1515
1516 /* If we have not processed an alternative yet, then set
1517 RHS to this alternative. */
1518 if (rhs == NULL)
1519 rhs = t;
1520 /* If we have processed an alternative (stored in RHS), then
1521 see if it is equal to this one. If it isn't, then stop
1522 the search. */
1523 else if (! operand_equal_for_phi_arg_p (rhs, t))
1524 break;
1525 }
1526
1527 /* If we had no interesting alternatives, then all the RHS alternatives
1528 must have been the same as LHS. */
1529 if (!rhs)
1530 rhs = lhs;
1531
1532 /* If we managed to iterate through each PHI alternative without
1533 breaking out of the loop, then we have a PHI which may create
1534 a useful equivalence. We do not need to record unwind data for
1535 this, since this is a true assignment and not an equivalence
1536 inferred from a comparison. All uses of this ssa name are dominated
1537 by this assignment, so unwinding just costs time and space. */
1538 if (i == gimple_phi_num_args (phi)
1539 && may_propagate_copy (lhs, rhs))
1540 set_ssa_name_value (lhs, rhs);
1541 }
1542 }
1543
1544 /* Ignoring loop backedges, if BB has precisely one incoming edge then
1545 return that edge. Otherwise return NULL. */
1546 static edge
1547 single_incoming_edge_ignoring_loop_edges (basic_block bb)
1548 {
1549 edge retval = NULL;
1550 edge e;
1551 edge_iterator ei;
1552
1553 FOR_EACH_EDGE (e, ei, bb->preds)
1554 {
1555 /* A loop back edge can be identified by the destination of
1556 the edge dominating the source of the edge. */
1557 if (dominated_by_p (CDI_DOMINATORS, e->src, e->dest))
1558 continue;
1559
1560 /* If we have already seen a non-loop edge, then we must have
1561 multiple incoming non-loop edges and thus we return NULL. */
1562 if (retval)
1563 return NULL;
1564
1565 /* This is the first non-loop incoming edge we have found. Record
1566 it. */
1567 retval = e;
1568 }
1569
1570 return retval;
1571 }
1572
1573 /* Record any equivalences created by the incoming edge to BB. If BB
1574 has more than one incoming edge, then no equivalence is created. */
1575
1576 static void
1577 record_equivalences_from_incoming_edge (basic_block bb)
1578 {
1579 edge e;
1580 basic_block parent;
1581 struct edge_info *edge_info;
1582
1583 /* If our parent block ended with a control statement, then we may be
1584 able to record some equivalences based on which outgoing edge from
1585 the parent was followed. */
1586 parent = get_immediate_dominator (CDI_DOMINATORS, bb);
1587
1588 e = single_incoming_edge_ignoring_loop_edges (bb);
1589
1590 /* If we had a single incoming edge from our parent block, then enter
1591 any data associated with the edge into our tables. */
1592 if (e && e->src == parent)
1593 {
1594 unsigned int i;
1595
1596 edge_info = (struct edge_info *) e->aux;
1597
1598 if (edge_info)
1599 {
1600 tree lhs = edge_info->lhs;
1601 tree rhs = edge_info->rhs;
1602 cond_equivalence *eq;
1603
1604 if (lhs)
1605 record_equality (lhs, rhs);
1606
1607 /* If LHS is an SSA_NAME and RHS is a constant integer and LHS was
1608 set via a widening type conversion, then we may be able to record
1609 additional equivalences. */
1610 if (lhs
1611 && TREE_CODE (lhs) == SSA_NAME
1612 && is_gimple_constant (rhs)
1613 && TREE_CODE (rhs) == INTEGER_CST)
1614 {
1615 gimple defstmt = SSA_NAME_DEF_STMT (lhs);
1616
1617 if (defstmt
1618 && is_gimple_assign (defstmt)
1619 && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (defstmt)))
1620 {
1621 tree old_rhs = gimple_assign_rhs1 (defstmt);
1622
1623 /* If the conversion widens the original value and
1624 the constant is in the range of the type of OLD_RHS,
1625 then convert the constant and record the equivalence.
1626
1627 Note that int_fits_type_p does not check the precision
1628 if the upper and lower bounds are OK. */
1629 if (INTEGRAL_TYPE_P (TREE_TYPE (old_rhs))
1630 && (TYPE_PRECISION (TREE_TYPE (lhs))
1631 > TYPE_PRECISION (TREE_TYPE (old_rhs)))
1632 && int_fits_type_p (rhs, TREE_TYPE (old_rhs)))
1633 {
1634 tree newval = fold_convert (TREE_TYPE (old_rhs), rhs);
1635 record_equality (old_rhs, newval);
1636 }
1637 }
1638 }
1639
1640 for (i = 0; edge_info->cond_equivalences.iterate (i, &eq); ++i)
1641 record_cond (eq);
1642 }
1643 }
1644 }
1645
1646 /* Dump SSA statistics on FILE. */
1647
1648 void
1649 dump_dominator_optimization_stats (FILE *file)
1650 {
1651 fprintf (file, "Total number of statements: %6ld\n\n",
1652 opt_stats.num_stmts);
1653 fprintf (file, "Exprs considered for dominator optimizations: %6ld\n",
1654 opt_stats.num_exprs_considered);
1655
1656 fprintf (file, "\nHash table statistics:\n");
1657
1658 fprintf (file, " avail_exprs: ");
1659 htab_statistics (file, *avail_exprs);
1660 }
1661
1662
1663 /* Dump SSA statistics on stderr. */
1664
1665 DEBUG_FUNCTION void
1666 debug_dominator_optimization_stats (void)
1667 {
1668 dump_dominator_optimization_stats (stderr);
1669 }
1670
1671
1672 /* Dump statistics for the hash table HTAB. */
1673
1674 static void
1675 htab_statistics (FILE *file, const hash_table<expr_elt_hasher> &htab)
1676 {
1677 fprintf (file, "size %ld, %ld elements, %f collision/search ratio\n",
1678 (long) htab.size (),
1679 (long) htab.elements (),
1680 htab.collisions ());
1681 }
1682
1683
1684 /* Enter condition equivalence into the expression hash table.
1685 This indicates that a conditional expression has a known
1686 boolean value. */
1687
1688 static void
1689 record_cond (cond_equivalence *p)
1690 {
1691 struct expr_hash_elt *element = XCNEW (struct expr_hash_elt);
1692 expr_hash_elt **slot;
1693
1694 initialize_hash_element_from_expr (&p->cond, p->value, element);
1695
1696 slot = avail_exprs->find_slot_with_hash (element, element->hash, INSERT);
1697 if (*slot == NULL)
1698 {
1699 *slot = element;
1700
1701 if (dump_file && (dump_flags & TDF_DETAILS))
1702 {
1703 fprintf (dump_file, "1>>> ");
1704 print_expr_hash_elt (dump_file, element);
1705 }
1706
1707 avail_exprs_stack.safe_push
1708 (std::pair<expr_hash_elt_t, expr_hash_elt_t> (element, NULL));
1709 }
1710 else
1711 free_expr_hash_elt (element);
1712 }
1713
1714 /* Return the loop depth of the basic block of the defining statement of X.
1715 This number should not be treated as absolutely correct because the loop
1716 information may not be completely up-to-date when dom runs. However, it
1717 will be relatively correct, and as more passes are taught to keep loop info
1718 up to date, the result will become more and more accurate. */
1719
1720 static int
1721 loop_depth_of_name (tree x)
1722 {
1723 gimple defstmt;
1724 basic_block defbb;
1725
1726 /* If it's not an SSA_NAME, we have no clue where the definition is. */
1727 if (TREE_CODE (x) != SSA_NAME)
1728 return 0;
1729
1730 /* Otherwise return the loop depth of the defining statement's bb.
1731 Note that there may not actually be a bb for this statement, if the
1732 ssa_name is live on entry. */
1733 defstmt = SSA_NAME_DEF_STMT (x);
1734 defbb = gimple_bb (defstmt);
1735 if (!defbb)
1736 return 0;
1737
1738 return bb_loop_depth (defbb);
1739 }
1740
1741 /* Similarly, but assume that X and Y are the two operands of an EQ_EXPR.
1742 This constrains the cases in which we may treat this as assignment. */
1743
1744 static void
1745 record_equality (tree x, tree y)
1746 {
1747 tree prev_x = NULL, prev_y = NULL;
1748
1749 if (tree_swap_operands_p (x, y, false))
1750 std::swap (x, y);
1751
1752 /* Most of the time tree_swap_operands_p does what we want. But there
1753 are cases where we know one operand is better for copy propagation than
1754 the other. Given no other code cares about ordering of equality
1755 comparison operators for that purpose, we just handle the special cases
1756 here. */
1757 if (TREE_CODE (x) == SSA_NAME && TREE_CODE (y) == SSA_NAME)
1758 {
1759 /* If one operand is a single use operand, then make it
1760 X. This will preserve its single use properly and if this
1761 conditional is eliminated, the computation of X can be
1762 eliminated as well. */
1763 if (has_single_use (y) && ! has_single_use (x))
1764 std::swap (x, y);
1765 }
1766 if (TREE_CODE (x) == SSA_NAME)
1767 prev_x = SSA_NAME_VALUE (x);
1768 if (TREE_CODE (y) == SSA_NAME)
1769 prev_y = SSA_NAME_VALUE (y);
1770
1771 /* If one of the previous values is invariant, or invariant in more loops
1772 (by depth), then use that.
1773 Otherwise it doesn't matter which value we choose, just so
1774 long as we canonicalize on one value. */
1775 if (is_gimple_min_invariant (y))
1776 ;
1777 else if (is_gimple_min_invariant (x)
1778 /* ??? When threading over backedges the following is important
1779 for correctness. See PR61757. */
1780 || (loop_depth_of_name (x) < loop_depth_of_name (y)))
1781 prev_x = x, x = y, y = prev_x, prev_x = prev_y;
1782 else if (prev_x && is_gimple_min_invariant (prev_x))
1783 x = y, y = prev_x, prev_x = prev_y;
1784 else if (prev_y)
1785 y = prev_y;
1786
1787 /* After the swapping, we must have one SSA_NAME. */
1788 if (TREE_CODE (x) != SSA_NAME)
1789 return;
1790
1791 /* For IEEE, -0.0 == 0.0, so we don't necessarily know the sign of a
1792 variable compared against zero. If we're honoring signed zeros,
1793 then we cannot record this value unless we know that the value is
1794 nonzero. */
1795 if (HONOR_SIGNED_ZEROS (x)
1796 && (TREE_CODE (y) != REAL_CST
1797 || REAL_VALUES_EQUAL (dconst0, TREE_REAL_CST (y))))
1798 return;
1799
1800 const_and_copies->record_const_or_copy (x, y, prev_x);
1801 }
1802
1803 /* Returns true when STMT is a simple iv increment. It detects the
1804 following situation:
1805
1806 i_1 = phi (..., i_2)
1807 i_2 = i_1 +/- ... */
1808
1809 bool
1810 simple_iv_increment_p (gimple stmt)
1811 {
1812 enum tree_code code;
1813 tree lhs, preinc;
1814 gimple phi;
1815 size_t i;
1816
1817 if (gimple_code (stmt) != GIMPLE_ASSIGN)
1818 return false;
1819
1820 lhs = gimple_assign_lhs (stmt);
1821 if (TREE_CODE (lhs) != SSA_NAME)
1822 return false;
1823
1824 code = gimple_assign_rhs_code (stmt);
1825 if (code != PLUS_EXPR
1826 && code != MINUS_EXPR
1827 && code != POINTER_PLUS_EXPR)
1828 return false;
1829
1830 preinc = gimple_assign_rhs1 (stmt);
1831 if (TREE_CODE (preinc) != SSA_NAME)
1832 return false;
1833
1834 phi = SSA_NAME_DEF_STMT (preinc);
1835 if (gimple_code (phi) != GIMPLE_PHI)
1836 return false;
1837
1838 for (i = 0; i < gimple_phi_num_args (phi); i++)
1839 if (gimple_phi_arg_def (phi, i) == lhs)
1840 return true;
1841
1842 return false;
1843 }
1844
1845 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
1846 known value for that SSA_NAME (or NULL if no value is known).
1847
1848 Propagate values from CONST_AND_COPIES into the PHI nodes of the
1849 successors of BB. */
1850
1851 static void
1852 cprop_into_successor_phis (basic_block bb)
1853 {
1854 edge e;
1855 edge_iterator ei;
1856
1857 FOR_EACH_EDGE (e, ei, bb->succs)
1858 {
1859 int indx;
1860 gphi_iterator gsi;
1861
1862 /* If this is an abnormal edge, then we do not want to copy propagate
1863 into the PHI alternative associated with this edge. */
1864 if (e->flags & EDGE_ABNORMAL)
1865 continue;
1866
1867 gsi = gsi_start_phis (e->dest);
1868 if (gsi_end_p (gsi))
1869 continue;
1870
1871 /* We may have an equivalence associated with this edge. While
1872 we can not propagate it into non-dominated blocks, we can
1873 propagate them into PHIs in non-dominated blocks. */
1874
1875 /* Push the unwind marker so we can reset the const and copies
1876 table back to its original state after processing this edge. */
1877 const_and_copies->push_marker ();
1878
1879 /* Extract and record any simple NAME = VALUE equivalences.
1880
1881 Don't bother with [01] = COND equivalences, they're not useful
1882 here. */
1883 struct edge_info *edge_info = (struct edge_info *) e->aux;
1884 if (edge_info)
1885 {
1886 tree lhs = edge_info->lhs;
1887 tree rhs = edge_info->rhs;
1888
1889 if (lhs && TREE_CODE (lhs) == SSA_NAME)
1890 const_and_copies->record_const_or_copy (lhs, rhs);
1891 }
1892
1893 indx = e->dest_idx;
1894 for ( ; !gsi_end_p (gsi); gsi_next (&gsi))
1895 {
1896 tree new_val;
1897 use_operand_p orig_p;
1898 tree orig_val;
1899 gphi *phi = gsi.phi ();
1900
1901 /* The alternative may be associated with a constant, so verify
1902 it is an SSA_NAME before doing anything with it. */
1903 orig_p = gimple_phi_arg_imm_use_ptr (phi, indx);
1904 orig_val = get_use_from_ptr (orig_p);
1905 if (TREE_CODE (orig_val) != SSA_NAME)
1906 continue;
1907
1908 /* If we have *ORIG_P in our constant/copy table, then replace
1909 ORIG_P with its value in our constant/copy table. */
1910 new_val = SSA_NAME_VALUE (orig_val);
1911 if (new_val
1912 && new_val != orig_val
1913 && (TREE_CODE (new_val) == SSA_NAME
1914 || is_gimple_min_invariant (new_val))
1915 && may_propagate_copy (orig_val, new_val))
1916 propagate_value (orig_p, new_val);
1917 }
1918
1919 const_and_copies->pop_to_marker ();
1920 }
1921 }
1922
1923 void
1924 dom_opt_dom_walker::before_dom_children (basic_block bb)
1925 {
1926 gimple_stmt_iterator gsi;
1927
1928 if (dump_file && (dump_flags & TDF_DETAILS))
1929 fprintf (dump_file, "\n\nOptimizing block #%d\n\n", bb->index);
1930
1931 /* Push a marker on the stacks of local information so that we know how
1932 far to unwind when we finalize this block. */
1933 avail_exprs_stack.safe_push
1934 (std::pair<expr_hash_elt_t, expr_hash_elt_t> (NULL, NULL));
1935 const_and_copies->push_marker ();
1936
1937 record_equivalences_from_incoming_edge (bb);
1938
1939 /* PHI nodes can create equivalences too. */
1940 record_equivalences_from_phis (bb);
1941
1942 /* Create equivalences from redundant PHIs. PHIs are only truly
1943 redundant when they exist in the same block, so push another
1944 marker and unwind right afterwards. */
1945 avail_exprs_stack.safe_push
1946 (std::pair<expr_hash_elt_t, expr_hash_elt_t> (NULL, NULL));
1947 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1948 eliminate_redundant_computations (&gsi);
1949 remove_local_expressions_from_table ();
1950
1951 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1952 optimize_stmt (bb, gsi);
1953
1954 /* Now prepare to process dominated blocks. */
1955 record_edge_info (bb);
1956 cprop_into_successor_phis (bb);
1957 }
1958
1959 /* We have finished processing the dominator children of BB, perform
1960 any finalization actions in preparation for leaving this node in
1961 the dominator tree. */
1962
1963 void
1964 dom_opt_dom_walker::after_dom_children (basic_block bb)
1965 {
1966 gimple last;
1967
1968 /* If we have an outgoing edge to a block with multiple incoming and
1969 outgoing edges, then we may be able to thread the edge, i.e., we
1970 may be able to statically determine which of the outgoing edges
1971 will be traversed when the incoming edge from BB is traversed. */
1972 if (single_succ_p (bb)
1973 && (single_succ_edge (bb)->flags & EDGE_ABNORMAL) == 0
1974 && potentially_threadable_block (single_succ (bb)))
1975 {
1976 thread_across_edge (single_succ_edge (bb));
1977 }
1978 else if ((last = last_stmt (bb))
1979 && gimple_code (last) == GIMPLE_COND
1980 && EDGE_COUNT (bb->succs) == 2
1981 && (EDGE_SUCC (bb, 0)->flags & EDGE_ABNORMAL) == 0
1982 && (EDGE_SUCC (bb, 1)->flags & EDGE_ABNORMAL) == 0)
1983 {
1984 edge true_edge, false_edge;
1985
1986 extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
1987
1988 /* Only try to thread the edge if it reaches a target block with
1989 more than one predecessor and more than one successor. */
1990 if (potentially_threadable_block (true_edge->dest))
1991 thread_across_edge (true_edge);
1992
1993 /* Similarly for the ELSE arm. */
1994 if (potentially_threadable_block (false_edge->dest))
1995 thread_across_edge (false_edge);
1996
1997 }
1998
1999 /* These remove expressions local to BB from the tables. */
2000 remove_local_expressions_from_table ();
2001 const_and_copies->pop_to_marker ();
2002 }
2003
2004 /* Search for redundant computations in STMT. If any are found, then
2005 replace them with the variable holding the result of the computation.
2006
2007 If safe, record this expression into the available expression hash
2008 table. */
2009
2010 static void
2011 eliminate_redundant_computations (gimple_stmt_iterator* gsi)
2012 {
2013 tree expr_type;
2014 tree cached_lhs;
2015 tree def;
2016 bool insert = true;
2017 bool assigns_var_p = false;
2018
2019 gimple stmt = gsi_stmt (*gsi);
2020
2021 if (gimple_code (stmt) == GIMPLE_PHI)
2022 def = gimple_phi_result (stmt);
2023 else
2024 def = gimple_get_lhs (stmt);
2025
2026 /* Certain expressions on the RHS can be optimized away, but can not
2027 themselves be entered into the hash tables. */
2028 if (! def
2029 || TREE_CODE (def) != SSA_NAME
2030 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (def)
2031 || gimple_vdef (stmt)
2032 /* Do not record equivalences for increments of ivs. This would create
2033 overlapping live ranges for a very questionable gain. */
2034 || simple_iv_increment_p (stmt))
2035 insert = false;
2036
2037 /* Check if the expression has been computed before. */
2038 cached_lhs = lookup_avail_expr (stmt, insert);
2039
2040 opt_stats.num_exprs_considered++;
2041
2042 /* Get the type of the expression we are trying to optimize. */
2043 if (is_gimple_assign (stmt))
2044 {
2045 expr_type = TREE_TYPE (gimple_assign_lhs (stmt));
2046 assigns_var_p = true;
2047 }
2048 else if (gimple_code (stmt) == GIMPLE_COND)
2049 expr_type = boolean_type_node;
2050 else if (is_gimple_call (stmt))
2051 {
2052 gcc_assert (gimple_call_lhs (stmt));
2053 expr_type = TREE_TYPE (gimple_call_lhs (stmt));
2054 assigns_var_p = true;
2055 }
2056 else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
2057 expr_type = TREE_TYPE (gimple_switch_index (swtch_stmt));
2058 else if (gimple_code (stmt) == GIMPLE_PHI)
2059 /* We can't propagate into a phi, so the logic below doesn't apply.
2060 Instead record an equivalence between the cached LHS and the
2061 PHI result of this statement, provided they are in the same block.
2062 This should be sufficient to kill the redundant phi. */
2063 {
2064 if (def && cached_lhs)
2065 const_and_copies->record_const_or_copy (def, cached_lhs);
2066 return;
2067 }
2068 else
2069 gcc_unreachable ();
2070
2071 if (!cached_lhs)
2072 return;
2073
2074 /* It is safe to ignore types here since we have already done
2075 type checking in the hashing and equality routines. In fact
2076 type checking here merely gets in the way of constant
2077 propagation. Also, make sure that it is safe to propagate
2078 CACHED_LHS into the expression in STMT. */
2079 if ((TREE_CODE (cached_lhs) != SSA_NAME
2080 && (assigns_var_p
2081 || useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs))))
2082 || may_propagate_copy_into_stmt (stmt, cached_lhs))
2083 {
2084 gcc_checking_assert (TREE_CODE (cached_lhs) == SSA_NAME
2085 || is_gimple_min_invariant (cached_lhs));
2086
2087 if (dump_file && (dump_flags & TDF_DETAILS))
2088 {
2089 fprintf (dump_file, " Replaced redundant expr '");
2090 print_gimple_expr (dump_file, stmt, 0, dump_flags);
2091 fprintf (dump_file, "' with '");
2092 print_generic_expr (dump_file, cached_lhs, dump_flags);
2093 fprintf (dump_file, "'\n");
2094 }
2095
2096 opt_stats.num_re++;
2097
2098 if (assigns_var_p
2099 && !useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs)))
2100 cached_lhs = fold_convert (expr_type, cached_lhs);
2101
2102 propagate_tree_value_into_stmt (gsi, cached_lhs);
2103
2104 /* Since it is always necessary to mark the result as modified,
2105 perhaps we should move this into propagate_tree_value_into_stmt
2106 itself. */
2107 gimple_set_modified (gsi_stmt (*gsi), true);
2108 }
2109 }
2110
2111 /* STMT, a GIMPLE_ASSIGN, may create certain equivalences, in either
2112 the available expressions table or the const_and_copies table.
2113 Detect and record those equivalences. */
2114 /* We handle only very simple copy equivalences here. The heavy
2115 lifing is done by eliminate_redundant_computations. */
2116
2117 static void
2118 record_equivalences_from_stmt (gimple stmt, int may_optimize_p)
2119 {
2120 tree lhs;
2121 enum tree_code lhs_code;
2122
2123 gcc_assert (is_gimple_assign (stmt));
2124
2125 lhs = gimple_assign_lhs (stmt);
2126 lhs_code = TREE_CODE (lhs);
2127
2128 if (lhs_code == SSA_NAME
2129 && gimple_assign_single_p (stmt))
2130 {
2131 tree rhs = gimple_assign_rhs1 (stmt);
2132
2133 /* If the RHS of the assignment is a constant or another variable that
2134 may be propagated, register it in the CONST_AND_COPIES table. We
2135 do not need to record unwind data for this, since this is a true
2136 assignment and not an equivalence inferred from a comparison. All
2137 uses of this ssa name are dominated by this assignment, so unwinding
2138 just costs time and space. */
2139 if (may_optimize_p
2140 && (TREE_CODE (rhs) == SSA_NAME
2141 || is_gimple_min_invariant (rhs)))
2142 {
2143 /* Valueize rhs. */
2144 if (TREE_CODE (rhs) == SSA_NAME)
2145 {
2146 tree tmp = SSA_NAME_VALUE (rhs);
2147 rhs = tmp ? tmp : rhs;
2148 }
2149
2150 if (dump_file && (dump_flags & TDF_DETAILS))
2151 {
2152 fprintf (dump_file, "==== ASGN ");
2153 print_generic_expr (dump_file, lhs, 0);
2154 fprintf (dump_file, " = ");
2155 print_generic_expr (dump_file, rhs, 0);
2156 fprintf (dump_file, "\n");
2157 }
2158
2159 set_ssa_name_value (lhs, rhs);
2160 }
2161 }
2162
2163 /* Make sure we can propagate &x + CST. */
2164 if (lhs_code == SSA_NAME
2165 && gimple_assign_rhs_code (stmt) == POINTER_PLUS_EXPR
2166 && TREE_CODE (gimple_assign_rhs1 (stmt)) == ADDR_EXPR
2167 && TREE_CODE (gimple_assign_rhs2 (stmt)) == INTEGER_CST)
2168 {
2169 tree op0 = gimple_assign_rhs1 (stmt);
2170 tree op1 = gimple_assign_rhs2 (stmt);
2171 tree new_rhs
2172 = build_fold_addr_expr (fold_build2 (MEM_REF,
2173 TREE_TYPE (TREE_TYPE (op0)),
2174 unshare_expr (op0),
2175 fold_convert (ptr_type_node,
2176 op1)));
2177 if (dump_file && (dump_flags & TDF_DETAILS))
2178 {
2179 fprintf (dump_file, "==== ASGN ");
2180 print_generic_expr (dump_file, lhs, 0);
2181 fprintf (dump_file, " = ");
2182 print_generic_expr (dump_file, new_rhs, 0);
2183 fprintf (dump_file, "\n");
2184 }
2185
2186 set_ssa_name_value (lhs, new_rhs);
2187 }
2188
2189 /* A memory store, even an aliased store, creates a useful
2190 equivalence. By exchanging the LHS and RHS, creating suitable
2191 vops and recording the result in the available expression table,
2192 we may be able to expose more redundant loads. */
2193 if (!gimple_has_volatile_ops (stmt)
2194 && gimple_references_memory_p (stmt)
2195 && gimple_assign_single_p (stmt)
2196 && (TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME
2197 || is_gimple_min_invariant (gimple_assign_rhs1 (stmt)))
2198 && !is_gimple_reg (lhs))
2199 {
2200 tree rhs = gimple_assign_rhs1 (stmt);
2201 gassign *new_stmt;
2202
2203 /* Build a new statement with the RHS and LHS exchanged. */
2204 if (TREE_CODE (rhs) == SSA_NAME)
2205 {
2206 /* NOTE tuples. The call to gimple_build_assign below replaced
2207 a call to build_gimple_modify_stmt, which did not set the
2208 SSA_NAME_DEF_STMT on the LHS of the assignment. Doing so
2209 may cause an SSA validation failure, as the LHS may be a
2210 default-initialized name and should have no definition. I'm
2211 a bit dubious of this, as the artificial statement that we
2212 generate here may in fact be ill-formed, but it is simply
2213 used as an internal device in this pass, and never becomes
2214 part of the CFG. */
2215 gimple defstmt = SSA_NAME_DEF_STMT (rhs);
2216 new_stmt = gimple_build_assign (rhs, lhs);
2217 SSA_NAME_DEF_STMT (rhs) = defstmt;
2218 }
2219 else
2220 new_stmt = gimple_build_assign (rhs, lhs);
2221
2222 gimple_set_vuse (new_stmt, gimple_vdef (stmt));
2223
2224 /* Finally enter the statement into the available expression
2225 table. */
2226 lookup_avail_expr (new_stmt, true);
2227 }
2228 }
2229
2230 /* Replace *OP_P in STMT with any known equivalent value for *OP_P from
2231 CONST_AND_COPIES. */
2232
2233 static void
2234 cprop_operand (gimple stmt, use_operand_p op_p)
2235 {
2236 tree val;
2237 tree op = USE_FROM_PTR (op_p);
2238
2239 /* If the operand has a known constant value or it is known to be a
2240 copy of some other variable, use the value or copy stored in
2241 CONST_AND_COPIES. */
2242 val = SSA_NAME_VALUE (op);
2243 if (val && val != op)
2244 {
2245 /* Do not replace hard register operands in asm statements. */
2246 if (gimple_code (stmt) == GIMPLE_ASM
2247 && !may_propagate_copy_into_asm (op))
2248 return;
2249
2250 /* Certain operands are not allowed to be copy propagated due
2251 to their interaction with exception handling and some GCC
2252 extensions. */
2253 if (!may_propagate_copy (op, val))
2254 return;
2255
2256 /* Do not propagate copies into BIVs.
2257 See PR23821 and PR62217 for how this can disturb IV and
2258 number of iteration analysis. */
2259 if (TREE_CODE (val) != INTEGER_CST)
2260 {
2261 gimple def = SSA_NAME_DEF_STMT (op);
2262 if (gimple_code (def) == GIMPLE_PHI
2263 && gimple_bb (def)->loop_father->header == gimple_bb (def))
2264 return;
2265 }
2266
2267 /* Dump details. */
2268 if (dump_file && (dump_flags & TDF_DETAILS))
2269 {
2270 fprintf (dump_file, " Replaced '");
2271 print_generic_expr (dump_file, op, dump_flags);
2272 fprintf (dump_file, "' with %s '",
2273 (TREE_CODE (val) != SSA_NAME ? "constant" : "variable"));
2274 print_generic_expr (dump_file, val, dump_flags);
2275 fprintf (dump_file, "'\n");
2276 }
2277
2278 if (TREE_CODE (val) != SSA_NAME)
2279 opt_stats.num_const_prop++;
2280 else
2281 opt_stats.num_copy_prop++;
2282
2283 propagate_value (op_p, val);
2284
2285 /* And note that we modified this statement. This is now
2286 safe, even if we changed virtual operands since we will
2287 rescan the statement and rewrite its operands again. */
2288 gimple_set_modified (stmt, true);
2289 }
2290 }
2291
2292 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
2293 known value for that SSA_NAME (or NULL if no value is known).
2294
2295 Propagate values from CONST_AND_COPIES into the uses, vuses and
2296 vdef_ops of STMT. */
2297
2298 static void
2299 cprop_into_stmt (gimple stmt)
2300 {
2301 use_operand_p op_p;
2302 ssa_op_iter iter;
2303
2304 FOR_EACH_SSA_USE_OPERAND (op_p, stmt, iter, SSA_OP_USE)
2305 cprop_operand (stmt, op_p);
2306 }
2307
2308 /* Optimize the statement pointed to by iterator SI.
2309
2310 We try to perform some simplistic global redundancy elimination and
2311 constant propagation:
2312
2313 1- To detect global redundancy, we keep track of expressions that have
2314 been computed in this block and its dominators. If we find that the
2315 same expression is computed more than once, we eliminate repeated
2316 computations by using the target of the first one.
2317
2318 2- Constant values and copy assignments. This is used to do very
2319 simplistic constant and copy propagation. When a constant or copy
2320 assignment is found, we map the value on the RHS of the assignment to
2321 the variable in the LHS in the CONST_AND_COPIES table. */
2322
2323 static void
2324 optimize_stmt (basic_block bb, gimple_stmt_iterator si)
2325 {
2326 gimple stmt, old_stmt;
2327 bool may_optimize_p;
2328 bool modified_p = false;
2329 bool was_noreturn;
2330
2331 old_stmt = stmt = gsi_stmt (si);
2332 was_noreturn = is_gimple_call (stmt) && gimple_call_noreturn_p (stmt);
2333
2334 if (dump_file && (dump_flags & TDF_DETAILS))
2335 {
2336 fprintf (dump_file, "Optimizing statement ");
2337 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2338 }
2339
2340 if (gimple_code (stmt) == GIMPLE_COND)
2341 canonicalize_comparison (as_a <gcond *> (stmt));
2342
2343 update_stmt_if_modified (stmt);
2344 opt_stats.num_stmts++;
2345
2346 /* Const/copy propagate into USES, VUSES and the RHS of VDEFs. */
2347 cprop_into_stmt (stmt);
2348
2349 /* If the statement has been modified with constant replacements,
2350 fold its RHS before checking for redundant computations. */
2351 if (gimple_modified_p (stmt))
2352 {
2353 tree rhs = NULL;
2354
2355 /* Try to fold the statement making sure that STMT is kept
2356 up to date. */
2357 if (fold_stmt (&si))
2358 {
2359 stmt = gsi_stmt (si);
2360 gimple_set_modified (stmt, true);
2361
2362 if (dump_file && (dump_flags & TDF_DETAILS))
2363 {
2364 fprintf (dump_file, " Folded to: ");
2365 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2366 }
2367 }
2368
2369 /* We only need to consider cases that can yield a gimple operand. */
2370 if (gimple_assign_single_p (stmt))
2371 rhs = gimple_assign_rhs1 (stmt);
2372 else if (gimple_code (stmt) == GIMPLE_GOTO)
2373 rhs = gimple_goto_dest (stmt);
2374 else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
2375 /* This should never be an ADDR_EXPR. */
2376 rhs = gimple_switch_index (swtch_stmt);
2377
2378 if (rhs && TREE_CODE (rhs) == ADDR_EXPR)
2379 recompute_tree_invariant_for_addr_expr (rhs);
2380
2381 /* Indicate that maybe_clean_or_replace_eh_stmt needs to be called,
2382 even if fold_stmt updated the stmt already and thus cleared
2383 gimple_modified_p flag on it. */
2384 modified_p = true;
2385 }
2386
2387 /* Check for redundant computations. Do this optimization only
2388 for assignments that have no volatile ops and conditionals. */
2389 may_optimize_p = (!gimple_has_side_effects (stmt)
2390 && (is_gimple_assign (stmt)
2391 || (is_gimple_call (stmt)
2392 && gimple_call_lhs (stmt) != NULL_TREE)
2393 || gimple_code (stmt) == GIMPLE_COND
2394 || gimple_code (stmt) == GIMPLE_SWITCH));
2395
2396 if (may_optimize_p)
2397 {
2398 if (gimple_code (stmt) == GIMPLE_CALL)
2399 {
2400 /* Resolve __builtin_constant_p. If it hasn't been
2401 folded to integer_one_node by now, it's fairly
2402 certain that the value simply isn't constant. */
2403 tree callee = gimple_call_fndecl (stmt);
2404 if (callee
2405 && DECL_BUILT_IN_CLASS (callee) == BUILT_IN_NORMAL
2406 && DECL_FUNCTION_CODE (callee) == BUILT_IN_CONSTANT_P)
2407 {
2408 propagate_tree_value_into_stmt (&si, integer_zero_node);
2409 stmt = gsi_stmt (si);
2410 }
2411 }
2412
2413 update_stmt_if_modified (stmt);
2414 eliminate_redundant_computations (&si);
2415 stmt = gsi_stmt (si);
2416
2417 /* Perform simple redundant store elimination. */
2418 if (gimple_assign_single_p (stmt)
2419 && TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2420 {
2421 tree lhs = gimple_assign_lhs (stmt);
2422 tree rhs = gimple_assign_rhs1 (stmt);
2423 tree cached_lhs;
2424 gassign *new_stmt;
2425 if (TREE_CODE (rhs) == SSA_NAME)
2426 {
2427 tree tem = SSA_NAME_VALUE (rhs);
2428 if (tem)
2429 rhs = tem;
2430 }
2431 /* Build a new statement with the RHS and LHS exchanged. */
2432 if (TREE_CODE (rhs) == SSA_NAME)
2433 {
2434 gimple defstmt = SSA_NAME_DEF_STMT (rhs);
2435 new_stmt = gimple_build_assign (rhs, lhs);
2436 SSA_NAME_DEF_STMT (rhs) = defstmt;
2437 }
2438 else
2439 new_stmt = gimple_build_assign (rhs, lhs);
2440 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
2441 cached_lhs = lookup_avail_expr (new_stmt, false);
2442 if (cached_lhs
2443 && rhs == cached_lhs)
2444 {
2445 basic_block bb = gimple_bb (stmt);
2446 unlink_stmt_vdef (stmt);
2447 if (gsi_remove (&si, true))
2448 {
2449 bitmap_set_bit (need_eh_cleanup, bb->index);
2450 if (dump_file && (dump_flags & TDF_DETAILS))
2451 fprintf (dump_file, " Flagged to clear EH edges.\n");
2452 }
2453 release_defs (stmt);
2454 return;
2455 }
2456 }
2457 }
2458
2459 /* Record any additional equivalences created by this statement. */
2460 if (is_gimple_assign (stmt))
2461 record_equivalences_from_stmt (stmt, may_optimize_p);
2462
2463 /* If STMT is a COND_EXPR and it was modified, then we may know
2464 where it goes. If that is the case, then mark the CFG as altered.
2465
2466 This will cause us to later call remove_unreachable_blocks and
2467 cleanup_tree_cfg when it is safe to do so. It is not safe to
2468 clean things up here since removal of edges and such can trigger
2469 the removal of PHI nodes, which in turn can release SSA_NAMEs to
2470 the manager.
2471
2472 That's all fine and good, except that once SSA_NAMEs are released
2473 to the manager, we must not call create_ssa_name until all references
2474 to released SSA_NAMEs have been eliminated.
2475
2476 All references to the deleted SSA_NAMEs can not be eliminated until
2477 we remove unreachable blocks.
2478
2479 We can not remove unreachable blocks until after we have completed
2480 any queued jump threading.
2481
2482 We can not complete any queued jump threads until we have taken
2483 appropriate variables out of SSA form. Taking variables out of
2484 SSA form can call create_ssa_name and thus we lose.
2485
2486 Ultimately I suspect we're going to need to change the interface
2487 into the SSA_NAME manager. */
2488 if (gimple_modified_p (stmt) || modified_p)
2489 {
2490 tree val = NULL;
2491
2492 update_stmt_if_modified (stmt);
2493
2494 if (gimple_code (stmt) == GIMPLE_COND)
2495 val = fold_binary_loc (gimple_location (stmt),
2496 gimple_cond_code (stmt), boolean_type_node,
2497 gimple_cond_lhs (stmt), gimple_cond_rhs (stmt));
2498 else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
2499 val = gimple_switch_index (swtch_stmt);
2500
2501 if (val && TREE_CODE (val) == INTEGER_CST && find_taken_edge (bb, val))
2502 cfg_altered = true;
2503
2504 /* If we simplified a statement in such a way as to be shown that it
2505 cannot trap, update the eh information and the cfg to match. */
2506 if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt))
2507 {
2508 bitmap_set_bit (need_eh_cleanup, bb->index);
2509 if (dump_file && (dump_flags & TDF_DETAILS))
2510 fprintf (dump_file, " Flagged to clear EH edges.\n");
2511 }
2512
2513 if (!was_noreturn
2514 && is_gimple_call (stmt) && gimple_call_noreturn_p (stmt))
2515 need_noreturn_fixup.safe_push (stmt);
2516 }
2517 }
2518
2519 /* Helper for walk_non_aliased_vuses. Determine if we arrived at
2520 the desired memory state. */
2521
2522 static void *
2523 vuse_eq (ao_ref *, tree vuse1, unsigned int cnt, void *data)
2524 {
2525 tree vuse2 = (tree) data;
2526 if (vuse1 == vuse2)
2527 return data;
2528
2529 /* This bounds the stmt walks we perform on reference lookups
2530 to O(1) instead of O(N) where N is the number of dominating
2531 stores leading to a candidate. We re-use the SCCVN param
2532 for this as it is basically the same complexity. */
2533 if (cnt > (unsigned) PARAM_VALUE (PARAM_SCCVN_MAX_ALIAS_QUERIES_PER_ACCESS))
2534 return (void *)-1;
2535
2536 return NULL;
2537 }
2538
2539 /* Search for an existing instance of STMT in the AVAIL_EXPRS table.
2540 If found, return its LHS. Otherwise insert STMT in the table and
2541 return NULL_TREE.
2542
2543 Also, when an expression is first inserted in the table, it is also
2544 is also added to AVAIL_EXPRS_STACK, so that it can be removed when
2545 we finish processing this block and its children. */
2546
2547 static tree
2548 lookup_avail_expr (gimple stmt, bool insert)
2549 {
2550 expr_hash_elt **slot;
2551 tree lhs;
2552 tree temp;
2553 struct expr_hash_elt element;
2554
2555 /* Get LHS of phi, assignment, or call; else NULL_TREE. */
2556 if (gimple_code (stmt) == GIMPLE_PHI)
2557 lhs = gimple_phi_result (stmt);
2558 else
2559 lhs = gimple_get_lhs (stmt);
2560
2561 initialize_hash_element (stmt, lhs, &element);
2562
2563 if (dump_file && (dump_flags & TDF_DETAILS))
2564 {
2565 fprintf (dump_file, "LKUP ");
2566 print_expr_hash_elt (dump_file, &element);
2567 }
2568
2569 /* Don't bother remembering constant assignments and copy operations.
2570 Constants and copy operations are handled by the constant/copy propagator
2571 in optimize_stmt. */
2572 if (element.expr.kind == EXPR_SINGLE
2573 && (TREE_CODE (element.expr.ops.single.rhs) == SSA_NAME
2574 || is_gimple_min_invariant (element.expr.ops.single.rhs)))
2575 return NULL_TREE;
2576
2577 /* Finally try to find the expression in the main expression hash table. */
2578 slot = avail_exprs->find_slot (&element, (insert ? INSERT : NO_INSERT));
2579 if (slot == NULL)
2580 {
2581 free_expr_hash_elt_contents (&element);
2582 return NULL_TREE;
2583 }
2584 else if (*slot == NULL)
2585 {
2586 struct expr_hash_elt *element2 = XNEW (struct expr_hash_elt);
2587 *element2 = element;
2588 element2->stamp = element2;
2589 *slot = element2;
2590
2591 if (dump_file && (dump_flags & TDF_DETAILS))
2592 {
2593 fprintf (dump_file, "2>>> ");
2594 print_expr_hash_elt (dump_file, element2);
2595 }
2596
2597 avail_exprs_stack.safe_push
2598 (std::pair<expr_hash_elt_t, expr_hash_elt_t> (element2, NULL));
2599 return NULL_TREE;
2600 }
2601
2602 /* If we found a redundant memory operation do an alias walk to
2603 check if we can re-use it. */
2604 if (gimple_vuse (stmt) != (*slot)->vop)
2605 {
2606 tree vuse1 = (*slot)->vop;
2607 tree vuse2 = gimple_vuse (stmt);
2608 /* If we have a load of a register and a candidate in the
2609 hash with vuse1 then try to reach its stmt by walking
2610 up the virtual use-def chain using walk_non_aliased_vuses.
2611 But don't do this when removing expressions from the hash. */
2612 ao_ref ref;
2613 if (!(vuse1 && vuse2
2614 && gimple_assign_single_p (stmt)
2615 && TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME
2616 && (ao_ref_init (&ref, gimple_assign_rhs1 (stmt)), true)
2617 && walk_non_aliased_vuses (&ref, vuse2,
2618 vuse_eq, NULL, NULL, vuse1) != NULL))
2619 {
2620 if (insert)
2621 {
2622 struct expr_hash_elt *element2 = XNEW (struct expr_hash_elt);
2623 *element2 = element;
2624 element2->stamp = element2;
2625
2626 /* Insert the expr into the hash by replacing the current
2627 entry and recording the value to restore in the
2628 avail_exprs_stack. */
2629 avail_exprs_stack.safe_push (std::make_pair (element2, *slot));
2630 *slot = element2;
2631 if (dump_file && (dump_flags & TDF_DETAILS))
2632 {
2633 fprintf (dump_file, "2>>> ");
2634 print_expr_hash_elt (dump_file, *slot);
2635 }
2636 }
2637 return NULL_TREE;
2638 }
2639 }
2640
2641 free_expr_hash_elt_contents (&element);
2642
2643 /* Extract the LHS of the assignment so that it can be used as the current
2644 definition of another variable. */
2645 lhs = (*slot)->lhs;
2646
2647 /* See if the LHS appears in the CONST_AND_COPIES table. If it does, then
2648 use the value from the const_and_copies table. */
2649 if (TREE_CODE (lhs) == SSA_NAME)
2650 {
2651 temp = SSA_NAME_VALUE (lhs);
2652 if (temp)
2653 lhs = temp;
2654 }
2655
2656 if (dump_file && (dump_flags & TDF_DETAILS))
2657 {
2658 fprintf (dump_file, "FIND: ");
2659 print_generic_expr (dump_file, lhs, 0);
2660 fprintf (dump_file, "\n");
2661 }
2662
2663 return lhs;
2664 }
2665
2666 /* Hashing and equality functions for AVAIL_EXPRS. We compute a value number
2667 for expressions using the code of the expression and the SSA numbers of
2668 its operands. */
2669
2670 static hashval_t
2671 avail_expr_hash (const void *p)
2672 {
2673 const struct hashable_expr *expr = &((const struct expr_hash_elt *)p)->expr;
2674 inchash::hash hstate;
2675
2676 inchash::add_hashable_expr (expr, hstate);
2677
2678 return hstate.end ();
2679 }
2680
2681 /* PHI-ONLY copy and constant propagation. This pass is meant to clean
2682 up degenerate PHIs created by or exposed by jump threading. */
2683
2684 /* Given a statement STMT, which is either a PHI node or an assignment,
2685 remove it from the IL. */
2686
2687 static void
2688 remove_stmt_or_phi (gimple stmt)
2689 {
2690 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
2691
2692 if (gimple_code (stmt) == GIMPLE_PHI)
2693 remove_phi_node (&gsi, true);
2694 else
2695 {
2696 gsi_remove (&gsi, true);
2697 release_defs (stmt);
2698 }
2699 }
2700
2701 /* Given a statement STMT, which is either a PHI node or an assignment,
2702 return the "rhs" of the node, in the case of a non-degenerate
2703 phi, NULL is returned. */
2704
2705 static tree
2706 get_rhs_or_phi_arg (gimple stmt)
2707 {
2708 if (gimple_code (stmt) == GIMPLE_PHI)
2709 return degenerate_phi_result (as_a <gphi *> (stmt));
2710 else if (gimple_assign_single_p (stmt))
2711 return gimple_assign_rhs1 (stmt);
2712 else
2713 gcc_unreachable ();
2714 }
2715
2716
2717 /* Given a statement STMT, which is either a PHI node or an assignment,
2718 return the "lhs" of the node. */
2719
2720 static tree
2721 get_lhs_or_phi_result (gimple stmt)
2722 {
2723 if (gimple_code (stmt) == GIMPLE_PHI)
2724 return gimple_phi_result (stmt);
2725 else if (is_gimple_assign (stmt))
2726 return gimple_assign_lhs (stmt);
2727 else
2728 gcc_unreachable ();
2729 }
2730
2731 /* Propagate RHS into all uses of LHS (when possible).
2732
2733 RHS and LHS are derived from STMT, which is passed in solely so
2734 that we can remove it if propagation is successful.
2735
2736 When propagating into a PHI node or into a statement which turns
2737 into a trivial copy or constant initialization, set the
2738 appropriate bit in INTERESTING_NAMEs so that we will visit those
2739 nodes as well in an effort to pick up secondary optimization
2740 opportunities. */
2741
2742 static void
2743 propagate_rhs_into_lhs (gimple stmt, tree lhs, tree rhs, bitmap interesting_names)
2744 {
2745 /* First verify that propagation is valid. */
2746 if (may_propagate_copy (lhs, rhs))
2747 {
2748 use_operand_p use_p;
2749 imm_use_iterator iter;
2750 gimple use_stmt;
2751 bool all = true;
2752
2753 /* Dump details. */
2754 if (dump_file && (dump_flags & TDF_DETAILS))
2755 {
2756 fprintf (dump_file, " Replacing '");
2757 print_generic_expr (dump_file, lhs, dump_flags);
2758 fprintf (dump_file, "' with %s '",
2759 (TREE_CODE (rhs) != SSA_NAME ? "constant" : "variable"));
2760 print_generic_expr (dump_file, rhs, dump_flags);
2761 fprintf (dump_file, "'\n");
2762 }
2763
2764 /* Walk over every use of LHS and try to replace the use with RHS.
2765 At this point the only reason why such a propagation would not
2766 be successful would be if the use occurs in an ASM_EXPR. */
2767 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
2768 {
2769 /* Leave debug stmts alone. If we succeed in propagating
2770 all non-debug uses, we'll drop the DEF, and propagation
2771 into debug stmts will occur then. */
2772 if (gimple_debug_bind_p (use_stmt))
2773 continue;
2774
2775 /* It's not always safe to propagate into an ASM_EXPR. */
2776 if (gimple_code (use_stmt) == GIMPLE_ASM
2777 && ! may_propagate_copy_into_asm (lhs))
2778 {
2779 all = false;
2780 continue;
2781 }
2782
2783 /* It's not ok to propagate into the definition stmt of RHS.
2784 <bb 9>:
2785 # prephitmp.12_36 = PHI <g_67.1_6(9)>
2786 g_67.1_6 = prephitmp.12_36;
2787 goto <bb 9>;
2788 While this is strictly all dead code we do not want to
2789 deal with this here. */
2790 if (TREE_CODE (rhs) == SSA_NAME
2791 && SSA_NAME_DEF_STMT (rhs) == use_stmt)
2792 {
2793 all = false;
2794 continue;
2795 }
2796
2797 /* Dump details. */
2798 if (dump_file && (dump_flags & TDF_DETAILS))
2799 {
2800 fprintf (dump_file, " Original statement:");
2801 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2802 }
2803
2804 /* Propagate the RHS into this use of the LHS. */
2805 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2806 propagate_value (use_p, rhs);
2807
2808 /* Special cases to avoid useless calls into the folding
2809 routines, operand scanning, etc.
2810
2811 Propagation into a PHI may cause the PHI to become
2812 a degenerate, so mark the PHI as interesting. No other
2813 actions are necessary. */
2814 if (gimple_code (use_stmt) == GIMPLE_PHI)
2815 {
2816 tree result;
2817
2818 /* Dump details. */
2819 if (dump_file && (dump_flags & TDF_DETAILS))
2820 {
2821 fprintf (dump_file, " Updated statement:");
2822 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2823 }
2824
2825 result = get_lhs_or_phi_result (use_stmt);
2826 bitmap_set_bit (interesting_names, SSA_NAME_VERSION (result));
2827 continue;
2828 }
2829
2830 /* From this point onward we are propagating into a
2831 real statement. Folding may (or may not) be possible,
2832 we may expose new operands, expose dead EH edges,
2833 etc. */
2834 /* NOTE tuples. In the tuples world, fold_stmt_inplace
2835 cannot fold a call that simplifies to a constant,
2836 because the GIMPLE_CALL must be replaced by a
2837 GIMPLE_ASSIGN, and there is no way to effect such a
2838 transformation in-place. We might want to consider
2839 using the more general fold_stmt here. */
2840 {
2841 gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
2842 fold_stmt_inplace (&gsi);
2843 }
2844
2845 /* Sometimes propagation can expose new operands to the
2846 renamer. */
2847 update_stmt (use_stmt);
2848
2849 /* Dump details. */
2850 if (dump_file && (dump_flags & TDF_DETAILS))
2851 {
2852 fprintf (dump_file, " Updated statement:");
2853 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2854 }
2855
2856 /* If we replaced a variable index with a constant, then
2857 we would need to update the invariant flag for ADDR_EXPRs. */
2858 if (gimple_assign_single_p (use_stmt)
2859 && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ADDR_EXPR)
2860 recompute_tree_invariant_for_addr_expr
2861 (gimple_assign_rhs1 (use_stmt));
2862
2863 /* If we cleaned up EH information from the statement,
2864 mark its containing block as needing EH cleanups. */
2865 if (maybe_clean_or_replace_eh_stmt (use_stmt, use_stmt))
2866 {
2867 bitmap_set_bit (need_eh_cleanup, gimple_bb (use_stmt)->index);
2868 if (dump_file && (dump_flags & TDF_DETAILS))
2869 fprintf (dump_file, " Flagged to clear EH edges.\n");
2870 }
2871
2872 /* Propagation may expose new trivial copy/constant propagation
2873 opportunities. */
2874 if (gimple_assign_single_p (use_stmt)
2875 && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME
2876 && (TREE_CODE (gimple_assign_rhs1 (use_stmt)) == SSA_NAME
2877 || is_gimple_min_invariant (gimple_assign_rhs1 (use_stmt))))
2878 {
2879 tree result = get_lhs_or_phi_result (use_stmt);
2880 bitmap_set_bit (interesting_names, SSA_NAME_VERSION (result));
2881 }
2882
2883 /* Propagation into these nodes may make certain edges in
2884 the CFG unexecutable. We want to identify them as PHI nodes
2885 at the destination of those unexecutable edges may become
2886 degenerates. */
2887 else if (gimple_code (use_stmt) == GIMPLE_COND
2888 || gimple_code (use_stmt) == GIMPLE_SWITCH
2889 || gimple_code (use_stmt) == GIMPLE_GOTO)
2890 {
2891 tree val;
2892
2893 if (gimple_code (use_stmt) == GIMPLE_COND)
2894 val = fold_binary_loc (gimple_location (use_stmt),
2895 gimple_cond_code (use_stmt),
2896 boolean_type_node,
2897 gimple_cond_lhs (use_stmt),
2898 gimple_cond_rhs (use_stmt));
2899 else if (gimple_code (use_stmt) == GIMPLE_SWITCH)
2900 val = gimple_switch_index (as_a <gswitch *> (use_stmt));
2901 else
2902 val = gimple_goto_dest (use_stmt);
2903
2904 if (val && is_gimple_min_invariant (val))
2905 {
2906 basic_block bb = gimple_bb (use_stmt);
2907 edge te = find_taken_edge (bb, val);
2908 if (!te)
2909 continue;
2910
2911 edge_iterator ei;
2912 edge e;
2913 gimple_stmt_iterator gsi;
2914 gphi_iterator psi;
2915
2916 /* Remove all outgoing edges except TE. */
2917 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei));)
2918 {
2919 if (e != te)
2920 {
2921 /* Mark all the PHI nodes at the destination of
2922 the unexecutable edge as interesting. */
2923 for (psi = gsi_start_phis (e->dest);
2924 !gsi_end_p (psi);
2925 gsi_next (&psi))
2926 {
2927 gphi *phi = psi.phi ();
2928
2929 tree result = gimple_phi_result (phi);
2930 int version = SSA_NAME_VERSION (result);
2931
2932 bitmap_set_bit (interesting_names, version);
2933 }
2934
2935 te->probability += e->probability;
2936
2937 te->count += e->count;
2938 remove_edge (e);
2939 cfg_altered = true;
2940 }
2941 else
2942 ei_next (&ei);
2943 }
2944
2945 gsi = gsi_last_bb (gimple_bb (use_stmt));
2946 gsi_remove (&gsi, true);
2947
2948 /* And fixup the flags on the single remaining edge. */
2949 te->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
2950 te->flags &= ~EDGE_ABNORMAL;
2951 te->flags |= EDGE_FALLTHRU;
2952 if (te->probability > REG_BR_PROB_BASE)
2953 te->probability = REG_BR_PROB_BASE;
2954 }
2955 }
2956 }
2957
2958 /* Ensure there is nothing else to do. */
2959 gcc_assert (!all || has_zero_uses (lhs));
2960
2961 /* If we were able to propagate away all uses of LHS, then
2962 we can remove STMT. */
2963 if (all)
2964 remove_stmt_or_phi (stmt);
2965 }
2966 }
2967
2968 /* STMT is either a PHI node (potentially a degenerate PHI node) or
2969 a statement that is a trivial copy or constant initialization.
2970
2971 Attempt to eliminate T by propagating its RHS into all uses of
2972 its LHS. This may in turn set new bits in INTERESTING_NAMES
2973 for nodes we want to revisit later.
2974
2975 All exit paths should clear INTERESTING_NAMES for the result
2976 of STMT. */
2977
2978 static void
2979 eliminate_const_or_copy (gimple stmt, bitmap interesting_names)
2980 {
2981 tree lhs = get_lhs_or_phi_result (stmt);
2982 tree rhs;
2983 int version = SSA_NAME_VERSION (lhs);
2984
2985 /* If the LHS of this statement or PHI has no uses, then we can
2986 just eliminate it. This can occur if, for example, the PHI
2987 was created by block duplication due to threading and its only
2988 use was in the conditional at the end of the block which was
2989 deleted. */
2990 if (has_zero_uses (lhs))
2991 {
2992 bitmap_clear_bit (interesting_names, version);
2993 remove_stmt_or_phi (stmt);
2994 return;
2995 }
2996
2997 /* Get the RHS of the assignment or PHI node if the PHI is a
2998 degenerate. */
2999 rhs = get_rhs_or_phi_arg (stmt);
3000 if (!rhs)
3001 {
3002 bitmap_clear_bit (interesting_names, version);
3003 return;
3004 }
3005
3006 if (!virtual_operand_p (lhs))
3007 propagate_rhs_into_lhs (stmt, lhs, rhs, interesting_names);
3008 else
3009 {
3010 gimple use_stmt;
3011 imm_use_iterator iter;
3012 use_operand_p use_p;
3013 /* For virtual operands we have to propagate into all uses as
3014 otherwise we will create overlapping life-ranges. */
3015 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
3016 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
3017 SET_USE (use_p, rhs);
3018 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
3019 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs) = 1;
3020 remove_stmt_or_phi (stmt);
3021 }
3022
3023 /* Note that STMT may well have been deleted by now, so do
3024 not access it, instead use the saved version # to clear
3025 T's entry in the worklist. */
3026 bitmap_clear_bit (interesting_names, version);
3027 }
3028
3029 /* The first phase in degenerate PHI elimination.
3030
3031 Eliminate the degenerate PHIs in BB, then recurse on the
3032 dominator children of BB. */
3033
3034 static void
3035 eliminate_degenerate_phis_1 (basic_block bb, bitmap interesting_names)
3036 {
3037 gphi_iterator gsi;
3038 basic_block son;
3039
3040 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3041 {
3042 gphi *phi = gsi.phi ();
3043
3044 eliminate_const_or_copy (phi, interesting_names);
3045 }
3046
3047 /* Recurse into the dominator children of BB. */
3048 for (son = first_dom_son (CDI_DOMINATORS, bb);
3049 son;
3050 son = next_dom_son (CDI_DOMINATORS, son))
3051 eliminate_degenerate_phis_1 (son, interesting_names);
3052 }
3053
3054
3055 /* A very simple pass to eliminate degenerate PHI nodes from the
3056 IL. This is meant to be fast enough to be able to be run several
3057 times in the optimization pipeline.
3058
3059 Certain optimizations, particularly those which duplicate blocks
3060 or remove edges from the CFG can create or expose PHIs which are
3061 trivial copies or constant initializations.
3062
3063 While we could pick up these optimizations in DOM or with the
3064 combination of copy-prop and CCP, those solutions are far too
3065 heavy-weight for our needs.
3066
3067 This implementation has two phases so that we can efficiently
3068 eliminate the first order degenerate PHIs and second order
3069 degenerate PHIs.
3070
3071 The first phase performs a dominator walk to identify and eliminate
3072 the vast majority of the degenerate PHIs. When a degenerate PHI
3073 is identified and eliminated any affected statements or PHIs
3074 are put on a worklist.
3075
3076 The second phase eliminates degenerate PHIs and trivial copies
3077 or constant initializations using the worklist. This is how we
3078 pick up the secondary optimization opportunities with minimal
3079 cost. */
3080
3081 namespace {
3082
3083 const pass_data pass_data_phi_only_cprop =
3084 {
3085 GIMPLE_PASS, /* type */
3086 "phicprop", /* name */
3087 OPTGROUP_NONE, /* optinfo_flags */
3088 TV_TREE_PHI_CPROP, /* tv_id */
3089 ( PROP_cfg | PROP_ssa ), /* properties_required */
3090 0, /* properties_provided */
3091 0, /* properties_destroyed */
3092 0, /* todo_flags_start */
3093 ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
3094 };
3095
3096 class pass_phi_only_cprop : public gimple_opt_pass
3097 {
3098 public:
3099 pass_phi_only_cprop (gcc::context *ctxt)
3100 : gimple_opt_pass (pass_data_phi_only_cprop, ctxt)
3101 {}
3102
3103 /* opt_pass methods: */
3104 opt_pass * clone () { return new pass_phi_only_cprop (m_ctxt); }
3105 virtual bool gate (function *) { return flag_tree_dom != 0; }
3106 virtual unsigned int execute (function *);
3107
3108 }; // class pass_phi_only_cprop
3109
3110 unsigned int
3111 pass_phi_only_cprop::execute (function *fun)
3112 {
3113 bitmap interesting_names;
3114 bitmap interesting_names1;
3115
3116 /* Bitmap of blocks which need EH information updated. We can not
3117 update it on-the-fly as doing so invalidates the dominator tree. */
3118 need_eh_cleanup = BITMAP_ALLOC (NULL);
3119
3120 /* INTERESTING_NAMES is effectively our worklist, indexed by
3121 SSA_NAME_VERSION.
3122
3123 A set bit indicates that the statement or PHI node which
3124 defines the SSA_NAME should be (re)examined to determine if
3125 it has become a degenerate PHI or trivial const/copy propagation
3126 opportunity.
3127
3128 Experiments have show we generally get better compilation
3129 time behavior with bitmaps rather than sbitmaps. */
3130 interesting_names = BITMAP_ALLOC (NULL);
3131 interesting_names1 = BITMAP_ALLOC (NULL);
3132
3133 calculate_dominance_info (CDI_DOMINATORS);
3134 cfg_altered = false;
3135
3136 /* First phase. Eliminate degenerate PHIs via a dominator
3137 walk of the CFG.
3138
3139 Experiments have indicated that we generally get better
3140 compile-time behavior by visiting blocks in the first
3141 phase in dominator order. Presumably this is because walking
3142 in dominator order leaves fewer PHIs for later examination
3143 by the worklist phase. */
3144 eliminate_degenerate_phis_1 (ENTRY_BLOCK_PTR_FOR_FN (fun),
3145 interesting_names);
3146
3147 /* Second phase. Eliminate second order degenerate PHIs as well
3148 as trivial copies or constant initializations identified by
3149 the first phase or this phase. Basically we keep iterating
3150 until our set of INTERESTING_NAMEs is empty. */
3151 while (!bitmap_empty_p (interesting_names))
3152 {
3153 unsigned int i;
3154 bitmap_iterator bi;
3155
3156 /* EXECUTE_IF_SET_IN_BITMAP does not like its bitmap
3157 changed during the loop. Copy it to another bitmap and
3158 use that. */
3159 bitmap_copy (interesting_names1, interesting_names);
3160
3161 EXECUTE_IF_SET_IN_BITMAP (interesting_names1, 0, i, bi)
3162 {
3163 tree name = ssa_name (i);
3164
3165 /* Ignore SSA_NAMEs that have been released because
3166 their defining statement was deleted (unreachable). */
3167 if (name)
3168 eliminate_const_or_copy (SSA_NAME_DEF_STMT (ssa_name (i)),
3169 interesting_names);
3170 }
3171 }
3172
3173 if (cfg_altered)
3174 {
3175 free_dominance_info (CDI_DOMINATORS);
3176 /* If we changed the CFG schedule loops for fixup by cfgcleanup. */
3177 loops_state_set (LOOPS_NEED_FIXUP);
3178 }
3179
3180 /* Propagation of const and copies may make some EH edges dead. Purge
3181 such edges from the CFG as needed. */
3182 if (!bitmap_empty_p (need_eh_cleanup))
3183 {
3184 gimple_purge_all_dead_eh_edges (need_eh_cleanup);
3185 BITMAP_FREE (need_eh_cleanup);
3186 }
3187
3188 BITMAP_FREE (interesting_names);
3189 BITMAP_FREE (interesting_names1);
3190 return 0;
3191 }
3192
3193 } // anon namespace
3194
3195 gimple_opt_pass *
3196 make_pass_phi_only_cprop (gcc::context *ctxt)
3197 {
3198 return new pass_phi_only_cprop (ctxt);
3199 }