re PR libfortran/47894 (Documentation text for VERIFY intrinsic function is wrong.)
[gcc.git] / gcc / tree-ssa-dom.c
1 /* SSA Dominator optimizations for trees
2 Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
3 Free Software Foundation, Inc.
4 Contributed by Diego Novillo <dnovillo@redhat.com>
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
12
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "tree.h"
27 #include "flags.h"
28 #include "tm_p.h"
29 #include "basic-block.h"
30 #include "cfgloop.h"
31 #include "output.h"
32 #include "function.h"
33 #include "tree-pretty-print.h"
34 #include "gimple-pretty-print.h"
35 #include "timevar.h"
36 #include "tree-dump.h"
37 #include "tree-flow.h"
38 #include "domwalk.h"
39 #include "tree-pass.h"
40 #include "tree-ssa-propagate.h"
41 #include "langhooks.h"
42 #include "params.h"
43
44 /* This file implements optimizations on the dominator tree. */
45
46 /* Representation of a "naked" right-hand-side expression, to be used
47 in recording available expressions in the expression hash table. */
48
49 enum expr_kind
50 {
51 EXPR_SINGLE,
52 EXPR_UNARY,
53 EXPR_BINARY,
54 EXPR_TERNARY,
55 EXPR_CALL
56 };
57
58 struct hashable_expr
59 {
60 tree type;
61 enum expr_kind kind;
62 union {
63 struct { tree rhs; } single;
64 struct { enum tree_code op; tree opnd; } unary;
65 struct { enum tree_code op; tree opnd0, opnd1; } binary;
66 struct { enum tree_code op; tree opnd0, opnd1, opnd2; } ternary;
67 struct { tree fn; bool pure; size_t nargs; tree *args; } call;
68 } ops;
69 };
70
71 /* Structure for recording known values of a conditional expression
72 at the exits from its block. */
73
74 typedef struct cond_equivalence_s
75 {
76 struct hashable_expr cond;
77 tree value;
78 } cond_equivalence;
79
80 DEF_VEC_O(cond_equivalence);
81 DEF_VEC_ALLOC_O(cond_equivalence,heap);
82
83 /* Structure for recording edge equivalences as well as any pending
84 edge redirections during the dominator optimizer.
85
86 Computing and storing the edge equivalences instead of creating
87 them on-demand can save significant amounts of time, particularly
88 for pathological cases involving switch statements.
89
90 These structures live for a single iteration of the dominator
91 optimizer in the edge's AUX field. At the end of an iteration we
92 free each of these structures and update the AUX field to point
93 to any requested redirection target (the code for updating the
94 CFG and SSA graph for edge redirection expects redirection edge
95 targets to be in the AUX field for each edge. */
96
97 struct edge_info
98 {
99 /* If this edge creates a simple equivalence, the LHS and RHS of
100 the equivalence will be stored here. */
101 tree lhs;
102 tree rhs;
103
104 /* Traversing an edge may also indicate one or more particular conditions
105 are true or false. */
106 VEC(cond_equivalence, heap) *cond_equivalences;
107 };
108
109 /* Hash table with expressions made available during the renaming process.
110 When an assignment of the form X_i = EXPR is found, the statement is
111 stored in this table. If the same expression EXPR is later found on the
112 RHS of another statement, it is replaced with X_i (thus performing
113 global redundancy elimination). Similarly as we pass through conditionals
114 we record the conditional itself as having either a true or false value
115 in this table. */
116 static htab_t avail_exprs;
117
118 /* Stack of available expressions in AVAIL_EXPRs. Each block pushes any
119 expressions it enters into the hash table along with a marker entry
120 (null). When we finish processing the block, we pop off entries and
121 remove the expressions from the global hash table until we hit the
122 marker. */
123 typedef struct expr_hash_elt * expr_hash_elt_t;
124 DEF_VEC_P(expr_hash_elt_t);
125 DEF_VEC_ALLOC_P(expr_hash_elt_t,heap);
126
127 static VEC(expr_hash_elt_t,heap) *avail_exprs_stack;
128
129 /* Structure for entries in the expression hash table. */
130
131 struct expr_hash_elt
132 {
133 /* The value (lhs) of this expression. */
134 tree lhs;
135
136 /* The expression (rhs) we want to record. */
137 struct hashable_expr expr;
138
139 /* The stmt pointer if this element corresponds to a statement. */
140 gimple stmt;
141
142 /* The hash value for RHS. */
143 hashval_t hash;
144
145 /* A unique stamp, typically the address of the hash
146 element itself, used in removing entries from the table. */
147 struct expr_hash_elt *stamp;
148 };
149
150 /* Stack of dest,src pairs that need to be restored during finalization.
151
152 A NULL entry is used to mark the end of pairs which need to be
153 restored during finalization of this block. */
154 static VEC(tree,heap) *const_and_copies_stack;
155
156 /* Track whether or not we have changed the control flow graph. */
157 static bool cfg_altered;
158
159 /* Bitmap of blocks that have had EH statements cleaned. We should
160 remove their dead edges eventually. */
161 static bitmap need_eh_cleanup;
162
163 /* Statistics for dominator optimizations. */
164 struct opt_stats_d
165 {
166 long num_stmts;
167 long num_exprs_considered;
168 long num_re;
169 long num_const_prop;
170 long num_copy_prop;
171 };
172
173 static struct opt_stats_d opt_stats;
174
175 /* Local functions. */
176 static void optimize_stmt (basic_block, gimple_stmt_iterator);
177 static tree lookup_avail_expr (gimple, bool);
178 static hashval_t avail_expr_hash (const void *);
179 static hashval_t real_avail_expr_hash (const void *);
180 static int avail_expr_eq (const void *, const void *);
181 static void htab_statistics (FILE *, htab_t);
182 static void record_cond (cond_equivalence *);
183 static void record_const_or_copy (tree, tree);
184 static void record_equality (tree, tree);
185 static void record_equivalences_from_phis (basic_block);
186 static void record_equivalences_from_incoming_edge (basic_block);
187 static void eliminate_redundant_computations (gimple_stmt_iterator *);
188 static void record_equivalences_from_stmt (gimple, int);
189 static void dom_thread_across_edge (struct dom_walk_data *, edge);
190 static void dom_opt_leave_block (struct dom_walk_data *, basic_block);
191 static void dom_opt_enter_block (struct dom_walk_data *, basic_block);
192 static void remove_local_expressions_from_table (void);
193 static void restore_vars_to_original_value (void);
194 static edge single_incoming_edge_ignoring_loop_edges (basic_block);
195
196
197 /* Given a statement STMT, initialize the hash table element pointed to
198 by ELEMENT. */
199
200 static void
201 initialize_hash_element (gimple stmt, tree lhs,
202 struct expr_hash_elt *element)
203 {
204 enum gimple_code code = gimple_code (stmt);
205 struct hashable_expr *expr = &element->expr;
206
207 if (code == GIMPLE_ASSIGN)
208 {
209 enum tree_code subcode = gimple_assign_rhs_code (stmt);
210
211 expr->type = NULL_TREE;
212
213 switch (get_gimple_rhs_class (subcode))
214 {
215 case GIMPLE_SINGLE_RHS:
216 expr->kind = EXPR_SINGLE;
217 expr->ops.single.rhs = gimple_assign_rhs1 (stmt);
218 break;
219 case GIMPLE_UNARY_RHS:
220 expr->kind = EXPR_UNARY;
221 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
222 expr->ops.unary.op = subcode;
223 expr->ops.unary.opnd = gimple_assign_rhs1 (stmt);
224 break;
225 case GIMPLE_BINARY_RHS:
226 expr->kind = EXPR_BINARY;
227 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
228 expr->ops.binary.op = subcode;
229 expr->ops.binary.opnd0 = gimple_assign_rhs1 (stmt);
230 expr->ops.binary.opnd1 = gimple_assign_rhs2 (stmt);
231 break;
232 case GIMPLE_TERNARY_RHS:
233 expr->kind = EXPR_TERNARY;
234 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
235 expr->ops.ternary.op = subcode;
236 expr->ops.ternary.opnd0 = gimple_assign_rhs1 (stmt);
237 expr->ops.ternary.opnd1 = gimple_assign_rhs2 (stmt);
238 expr->ops.ternary.opnd2 = gimple_assign_rhs3 (stmt);
239 break;
240 default:
241 gcc_unreachable ();
242 }
243 }
244 else if (code == GIMPLE_COND)
245 {
246 expr->type = boolean_type_node;
247 expr->kind = EXPR_BINARY;
248 expr->ops.binary.op = gimple_cond_code (stmt);
249 expr->ops.binary.opnd0 = gimple_cond_lhs (stmt);
250 expr->ops.binary.opnd1 = gimple_cond_rhs (stmt);
251 }
252 else if (code == GIMPLE_CALL)
253 {
254 size_t nargs = gimple_call_num_args (stmt);
255 size_t i;
256
257 gcc_assert (gimple_call_lhs (stmt));
258
259 expr->type = TREE_TYPE (gimple_call_lhs (stmt));
260 expr->kind = EXPR_CALL;
261 expr->ops.call.fn = gimple_call_fn (stmt);
262
263 if (gimple_call_flags (stmt) & (ECF_CONST | ECF_PURE))
264 expr->ops.call.pure = true;
265 else
266 expr->ops.call.pure = false;
267
268 expr->ops.call.nargs = nargs;
269 expr->ops.call.args = (tree *) xcalloc (nargs, sizeof (tree));
270 for (i = 0; i < nargs; i++)
271 expr->ops.call.args[i] = gimple_call_arg (stmt, i);
272 }
273 else if (code == GIMPLE_SWITCH)
274 {
275 expr->type = TREE_TYPE (gimple_switch_index (stmt));
276 expr->kind = EXPR_SINGLE;
277 expr->ops.single.rhs = gimple_switch_index (stmt);
278 }
279 else if (code == GIMPLE_GOTO)
280 {
281 expr->type = TREE_TYPE (gimple_goto_dest (stmt));
282 expr->kind = EXPR_SINGLE;
283 expr->ops.single.rhs = gimple_goto_dest (stmt);
284 }
285 else
286 gcc_unreachable ();
287
288 element->lhs = lhs;
289 element->stmt = stmt;
290 element->hash = avail_expr_hash (element);
291 element->stamp = element;
292 }
293
294 /* Given a conditional expression COND as a tree, initialize
295 a hashable_expr expression EXPR. The conditional must be a
296 comparison or logical negation. A constant or a variable is
297 not permitted. */
298
299 static void
300 initialize_expr_from_cond (tree cond, struct hashable_expr *expr)
301 {
302 expr->type = boolean_type_node;
303
304 if (COMPARISON_CLASS_P (cond))
305 {
306 expr->kind = EXPR_BINARY;
307 expr->ops.binary.op = TREE_CODE (cond);
308 expr->ops.binary.opnd0 = TREE_OPERAND (cond, 0);
309 expr->ops.binary.opnd1 = TREE_OPERAND (cond, 1);
310 }
311 else if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
312 {
313 expr->kind = EXPR_UNARY;
314 expr->ops.unary.op = TRUTH_NOT_EXPR;
315 expr->ops.unary.opnd = TREE_OPERAND (cond, 0);
316 }
317 else
318 gcc_unreachable ();
319 }
320
321 /* Given a hashable_expr expression EXPR and an LHS,
322 initialize the hash table element pointed to by ELEMENT. */
323
324 static void
325 initialize_hash_element_from_expr (struct hashable_expr *expr,
326 tree lhs,
327 struct expr_hash_elt *element)
328 {
329 element->expr = *expr;
330 element->lhs = lhs;
331 element->stmt = NULL;
332 element->hash = avail_expr_hash (element);
333 element->stamp = element;
334 }
335
336 /* Compare two hashable_expr structures for equivalence.
337 They are considered equivalent when the the expressions
338 they denote must necessarily be equal. The logic is intended
339 to follow that of operand_equal_p in fold-const.c */
340
341 static bool
342 hashable_expr_equal_p (const struct hashable_expr *expr0,
343 const struct hashable_expr *expr1)
344 {
345 tree type0 = expr0->type;
346 tree type1 = expr1->type;
347
348 /* If either type is NULL, there is nothing to check. */
349 if ((type0 == NULL_TREE) ^ (type1 == NULL_TREE))
350 return false;
351
352 /* If both types don't have the same signedness, precision, and mode,
353 then we can't consider them equal. */
354 if (type0 != type1
355 && (TREE_CODE (type0) == ERROR_MARK
356 || TREE_CODE (type1) == ERROR_MARK
357 || TYPE_UNSIGNED (type0) != TYPE_UNSIGNED (type1)
358 || TYPE_PRECISION (type0) != TYPE_PRECISION (type1)
359 || TYPE_MODE (type0) != TYPE_MODE (type1)))
360 return false;
361
362 if (expr0->kind != expr1->kind)
363 return false;
364
365 switch (expr0->kind)
366 {
367 case EXPR_SINGLE:
368 return operand_equal_p (expr0->ops.single.rhs,
369 expr1->ops.single.rhs, 0);
370
371 case EXPR_UNARY:
372 if (expr0->ops.unary.op != expr1->ops.unary.op)
373 return false;
374
375 if ((CONVERT_EXPR_CODE_P (expr0->ops.unary.op)
376 || expr0->ops.unary.op == NON_LVALUE_EXPR)
377 && TYPE_UNSIGNED (expr0->type) != TYPE_UNSIGNED (expr1->type))
378 return false;
379
380 return operand_equal_p (expr0->ops.unary.opnd,
381 expr1->ops.unary.opnd, 0);
382
383 case EXPR_BINARY:
384 if (expr0->ops.binary.op != expr1->ops.binary.op)
385 return false;
386
387 if (operand_equal_p (expr0->ops.binary.opnd0,
388 expr1->ops.binary.opnd0, 0)
389 && operand_equal_p (expr0->ops.binary.opnd1,
390 expr1->ops.binary.opnd1, 0))
391 return true;
392
393 /* For commutative ops, allow the other order. */
394 return (commutative_tree_code (expr0->ops.binary.op)
395 && operand_equal_p (expr0->ops.binary.opnd0,
396 expr1->ops.binary.opnd1, 0)
397 && operand_equal_p (expr0->ops.binary.opnd1,
398 expr1->ops.binary.opnd0, 0));
399
400 case EXPR_TERNARY:
401 if (expr0->ops.ternary.op != expr1->ops.ternary.op
402 || !operand_equal_p (expr0->ops.ternary.opnd2,
403 expr1->ops.ternary.opnd2, 0))
404 return false;
405
406 if (operand_equal_p (expr0->ops.ternary.opnd0,
407 expr1->ops.ternary.opnd0, 0)
408 && operand_equal_p (expr0->ops.ternary.opnd1,
409 expr1->ops.ternary.opnd1, 0))
410 return true;
411
412 /* For commutative ops, allow the other order. */
413 return (commutative_ternary_tree_code (expr0->ops.ternary.op)
414 && operand_equal_p (expr0->ops.ternary.opnd0,
415 expr1->ops.ternary.opnd1, 0)
416 && operand_equal_p (expr0->ops.ternary.opnd1,
417 expr1->ops.ternary.opnd0, 0));
418
419 case EXPR_CALL:
420 {
421 size_t i;
422
423 /* If the calls are to different functions, then they
424 clearly cannot be equal. */
425 if (! operand_equal_p (expr0->ops.call.fn,
426 expr1->ops.call.fn, 0))
427 return false;
428
429 if (! expr0->ops.call.pure)
430 return false;
431
432 if (expr0->ops.call.nargs != expr1->ops.call.nargs)
433 return false;
434
435 for (i = 0; i < expr0->ops.call.nargs; i++)
436 if (! operand_equal_p (expr0->ops.call.args[i],
437 expr1->ops.call.args[i], 0))
438 return false;
439
440 return true;
441 }
442
443 default:
444 gcc_unreachable ();
445 }
446 }
447
448 /* Compute a hash value for a hashable_expr value EXPR and a
449 previously accumulated hash value VAL. If two hashable_expr
450 values compare equal with hashable_expr_equal_p, they must
451 hash to the same value, given an identical value of VAL.
452 The logic is intended to follow iterative_hash_expr in tree.c. */
453
454 static hashval_t
455 iterative_hash_hashable_expr (const struct hashable_expr *expr, hashval_t val)
456 {
457 switch (expr->kind)
458 {
459 case EXPR_SINGLE:
460 val = iterative_hash_expr (expr->ops.single.rhs, val);
461 break;
462
463 case EXPR_UNARY:
464 val = iterative_hash_object (expr->ops.unary.op, val);
465
466 /* Make sure to include signedness in the hash computation.
467 Don't hash the type, that can lead to having nodes which
468 compare equal according to operand_equal_p, but which
469 have different hash codes. */
470 if (CONVERT_EXPR_CODE_P (expr->ops.unary.op)
471 || expr->ops.unary.op == NON_LVALUE_EXPR)
472 val += TYPE_UNSIGNED (expr->type);
473
474 val = iterative_hash_expr (expr->ops.unary.opnd, val);
475 break;
476
477 case EXPR_BINARY:
478 val = iterative_hash_object (expr->ops.binary.op, val);
479 if (commutative_tree_code (expr->ops.binary.op))
480 val = iterative_hash_exprs_commutative (expr->ops.binary.opnd0,
481 expr->ops.binary.opnd1, val);
482 else
483 {
484 val = iterative_hash_expr (expr->ops.binary.opnd0, val);
485 val = iterative_hash_expr (expr->ops.binary.opnd1, val);
486 }
487 break;
488
489 case EXPR_TERNARY:
490 val = iterative_hash_object (expr->ops.ternary.op, val);
491 if (commutative_ternary_tree_code (expr->ops.ternary.op))
492 val = iterative_hash_exprs_commutative (expr->ops.ternary.opnd0,
493 expr->ops.ternary.opnd1, val);
494 else
495 {
496 val = iterative_hash_expr (expr->ops.ternary.opnd0, val);
497 val = iterative_hash_expr (expr->ops.ternary.opnd1, val);
498 }
499 val = iterative_hash_expr (expr->ops.ternary.opnd2, val);
500 break;
501
502 case EXPR_CALL:
503 {
504 size_t i;
505 enum tree_code code = CALL_EXPR;
506
507 val = iterative_hash_object (code, val);
508 val = iterative_hash_expr (expr->ops.call.fn, val);
509 for (i = 0; i < expr->ops.call.nargs; i++)
510 val = iterative_hash_expr (expr->ops.call.args[i], val);
511 }
512 break;
513
514 default:
515 gcc_unreachable ();
516 }
517
518 return val;
519 }
520
521 /* Print a diagnostic dump of an expression hash table entry. */
522
523 static void
524 print_expr_hash_elt (FILE * stream, const struct expr_hash_elt *element)
525 {
526 if (element->stmt)
527 fprintf (stream, "STMT ");
528 else
529 fprintf (stream, "COND ");
530
531 if (element->lhs)
532 {
533 print_generic_expr (stream, element->lhs, 0);
534 fprintf (stream, " = ");
535 }
536
537 switch (element->expr.kind)
538 {
539 case EXPR_SINGLE:
540 print_generic_expr (stream, element->expr.ops.single.rhs, 0);
541 break;
542
543 case EXPR_UNARY:
544 fprintf (stream, "%s ", tree_code_name[element->expr.ops.unary.op]);
545 print_generic_expr (stream, element->expr.ops.unary.opnd, 0);
546 break;
547
548 case EXPR_BINARY:
549 print_generic_expr (stream, element->expr.ops.binary.opnd0, 0);
550 fprintf (stream, " %s ", tree_code_name[element->expr.ops.binary.op]);
551 print_generic_expr (stream, element->expr.ops.binary.opnd1, 0);
552 break;
553
554 case EXPR_TERNARY:
555 fprintf (stream, " %s <", tree_code_name[element->expr.ops.ternary.op]);
556 print_generic_expr (stream, element->expr.ops.ternary.opnd0, 0);
557 fputs (", ", stream);
558 print_generic_expr (stream, element->expr.ops.ternary.opnd1, 0);
559 fputs (", ", stream);
560 print_generic_expr (stream, element->expr.ops.ternary.opnd2, 0);
561 fputs (">", stream);
562 break;
563
564 case EXPR_CALL:
565 {
566 size_t i;
567 size_t nargs = element->expr.ops.call.nargs;
568
569 print_generic_expr (stream, element->expr.ops.call.fn, 0);
570 fprintf (stream, " (");
571 for (i = 0; i < nargs; i++)
572 {
573 print_generic_expr (stream, element->expr.ops.call.args[i], 0);
574 if (i + 1 < nargs)
575 fprintf (stream, ", ");
576 }
577 fprintf (stream, ")");
578 }
579 break;
580 }
581 fprintf (stream, "\n");
582
583 if (element->stmt)
584 {
585 fprintf (stream, " ");
586 print_gimple_stmt (stream, element->stmt, 0, 0);
587 }
588 }
589
590 /* Delete an expr_hash_elt and reclaim its storage. */
591
592 static void
593 free_expr_hash_elt (void *elt)
594 {
595 struct expr_hash_elt *element = ((struct expr_hash_elt *)elt);
596
597 if (element->expr.kind == EXPR_CALL)
598 free (element->expr.ops.call.args);
599
600 free (element);
601 }
602
603 /* Allocate an EDGE_INFO for edge E and attach it to E.
604 Return the new EDGE_INFO structure. */
605
606 static struct edge_info *
607 allocate_edge_info (edge e)
608 {
609 struct edge_info *edge_info;
610
611 edge_info = XCNEW (struct edge_info);
612
613 e->aux = edge_info;
614 return edge_info;
615 }
616
617 /* Free all EDGE_INFO structures associated with edges in the CFG.
618 If a particular edge can be threaded, copy the redirection
619 target from the EDGE_INFO structure into the edge's AUX field
620 as required by code to update the CFG and SSA graph for
621 jump threading. */
622
623 static void
624 free_all_edge_infos (void)
625 {
626 basic_block bb;
627 edge_iterator ei;
628 edge e;
629
630 FOR_EACH_BB (bb)
631 {
632 FOR_EACH_EDGE (e, ei, bb->preds)
633 {
634 struct edge_info *edge_info = (struct edge_info *) e->aux;
635
636 if (edge_info)
637 {
638 if (edge_info->cond_equivalences)
639 VEC_free (cond_equivalence, heap, edge_info->cond_equivalences);
640 free (edge_info);
641 e->aux = NULL;
642 }
643 }
644 }
645 }
646
647 /* Jump threading, redundancy elimination and const/copy propagation.
648
649 This pass may expose new symbols that need to be renamed into SSA. For
650 every new symbol exposed, its corresponding bit will be set in
651 VARS_TO_RENAME. */
652
653 static unsigned int
654 tree_ssa_dominator_optimize (void)
655 {
656 struct dom_walk_data walk_data;
657
658 memset (&opt_stats, 0, sizeof (opt_stats));
659
660 /* Create our hash tables. */
661 avail_exprs = htab_create (1024, real_avail_expr_hash, avail_expr_eq, free_expr_hash_elt);
662 avail_exprs_stack = VEC_alloc (expr_hash_elt_t, heap, 20);
663 const_and_copies_stack = VEC_alloc (tree, heap, 20);
664 need_eh_cleanup = BITMAP_ALLOC (NULL);
665
666 /* Setup callbacks for the generic dominator tree walker. */
667 walk_data.dom_direction = CDI_DOMINATORS;
668 walk_data.initialize_block_local_data = NULL;
669 walk_data.before_dom_children = dom_opt_enter_block;
670 walk_data.after_dom_children = dom_opt_leave_block;
671 /* Right now we only attach a dummy COND_EXPR to the global data pointer.
672 When we attach more stuff we'll need to fill this out with a real
673 structure. */
674 walk_data.global_data = NULL;
675 walk_data.block_local_data_size = 0;
676
677 /* Now initialize the dominator walker. */
678 init_walk_dominator_tree (&walk_data);
679
680 calculate_dominance_info (CDI_DOMINATORS);
681 cfg_altered = false;
682
683 /* We need to know loop structures in order to avoid destroying them
684 in jump threading. Note that we still can e.g. thread through loop
685 headers to an exit edge, or through loop header to the loop body, assuming
686 that we update the loop info. */
687 loop_optimizer_init (LOOPS_HAVE_SIMPLE_LATCHES);
688
689 /* Initialize the value-handle array. */
690 threadedge_initialize_values ();
691
692 /* We need accurate information regarding back edges in the CFG
693 for jump threading; this may include back edges that are not part of
694 a single loop. */
695 mark_dfs_back_edges ();
696
697 /* Recursively walk the dominator tree optimizing statements. */
698 walk_dominator_tree (&walk_data, ENTRY_BLOCK_PTR);
699
700 {
701 gimple_stmt_iterator gsi;
702 basic_block bb;
703 FOR_EACH_BB (bb)
704 {for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
705 update_stmt_if_modified (gsi_stmt (gsi));
706 }
707 }
708
709 /* If we exposed any new variables, go ahead and put them into
710 SSA form now, before we handle jump threading. This simplifies
711 interactions between rewriting of _DECL nodes into SSA form
712 and rewriting SSA_NAME nodes into SSA form after block
713 duplication and CFG manipulation. */
714 update_ssa (TODO_update_ssa);
715
716 free_all_edge_infos ();
717
718 /* Thread jumps, creating duplicate blocks as needed. */
719 cfg_altered |= thread_through_all_blocks (first_pass_instance);
720
721 if (cfg_altered)
722 free_dominance_info (CDI_DOMINATORS);
723
724 /* Removal of statements may make some EH edges dead. Purge
725 such edges from the CFG as needed. */
726 if (!bitmap_empty_p (need_eh_cleanup))
727 {
728 unsigned i;
729 bitmap_iterator bi;
730
731 /* Jump threading may have created forwarder blocks from blocks
732 needing EH cleanup; the new successor of these blocks, which
733 has inherited from the original block, needs the cleanup. */
734 EXECUTE_IF_SET_IN_BITMAP (need_eh_cleanup, 0, i, bi)
735 {
736 basic_block bb = BASIC_BLOCK (i);
737 if (single_succ_p (bb) == 1
738 && (single_succ_edge (bb)->flags & EDGE_EH) == 0)
739 {
740 bitmap_clear_bit (need_eh_cleanup, i);
741 bitmap_set_bit (need_eh_cleanup, single_succ (bb)->index);
742 }
743 }
744
745 gimple_purge_all_dead_eh_edges (need_eh_cleanup);
746 bitmap_zero (need_eh_cleanup);
747 }
748
749 statistics_counter_event (cfun, "Redundant expressions eliminated",
750 opt_stats.num_re);
751 statistics_counter_event (cfun, "Constants propagated",
752 opt_stats.num_const_prop);
753 statistics_counter_event (cfun, "Copies propagated",
754 opt_stats.num_copy_prop);
755
756 /* Debugging dumps. */
757 if (dump_file && (dump_flags & TDF_STATS))
758 dump_dominator_optimization_stats (dump_file);
759
760 loop_optimizer_finalize ();
761
762 /* Delete our main hashtable. */
763 htab_delete (avail_exprs);
764
765 /* And finalize the dominator walker. */
766 fini_walk_dominator_tree (&walk_data);
767
768 /* Free asserted bitmaps and stacks. */
769 BITMAP_FREE (need_eh_cleanup);
770
771 VEC_free (expr_hash_elt_t, heap, avail_exprs_stack);
772 VEC_free (tree, heap, const_and_copies_stack);
773
774 /* Free the value-handle array. */
775 threadedge_finalize_values ();
776 ssa_name_values = NULL;
777
778 return 0;
779 }
780
781 static bool
782 gate_dominator (void)
783 {
784 return flag_tree_dom != 0;
785 }
786
787 struct gimple_opt_pass pass_dominator =
788 {
789 {
790 GIMPLE_PASS,
791 "dom", /* name */
792 gate_dominator, /* gate */
793 tree_ssa_dominator_optimize, /* execute */
794 NULL, /* sub */
795 NULL, /* next */
796 0, /* static_pass_number */
797 TV_TREE_SSA_DOMINATOR_OPTS, /* tv_id */
798 PROP_cfg | PROP_ssa, /* properties_required */
799 0, /* properties_provided */
800 0, /* properties_destroyed */
801 0, /* todo_flags_start */
802 TODO_cleanup_cfg
803 | TODO_update_ssa
804 | TODO_verify_ssa
805 | TODO_verify_flow
806 | TODO_dump_func /* todo_flags_finish */
807 }
808 };
809
810
811 /* Given a conditional statement CONDSTMT, convert the
812 condition to a canonical form. */
813
814 static void
815 canonicalize_comparison (gimple condstmt)
816 {
817 tree op0;
818 tree op1;
819 enum tree_code code;
820
821 gcc_assert (gimple_code (condstmt) == GIMPLE_COND);
822
823 op0 = gimple_cond_lhs (condstmt);
824 op1 = gimple_cond_rhs (condstmt);
825
826 code = gimple_cond_code (condstmt);
827
828 /* If it would be profitable to swap the operands, then do so to
829 canonicalize the statement, enabling better optimization.
830
831 By placing canonicalization of such expressions here we
832 transparently keep statements in canonical form, even
833 when the statement is modified. */
834 if (tree_swap_operands_p (op0, op1, false))
835 {
836 /* For relationals we need to swap the operands
837 and change the code. */
838 if (code == LT_EXPR
839 || code == GT_EXPR
840 || code == LE_EXPR
841 || code == GE_EXPR)
842 {
843 code = swap_tree_comparison (code);
844
845 gimple_cond_set_code (condstmt, code);
846 gimple_cond_set_lhs (condstmt, op1);
847 gimple_cond_set_rhs (condstmt, op0);
848
849 update_stmt (condstmt);
850 }
851 }
852 }
853
854 /* Initialize local stacks for this optimizer and record equivalences
855 upon entry to BB. Equivalences can come from the edge traversed to
856 reach BB or they may come from PHI nodes at the start of BB. */
857
858 /* Remove all the expressions in LOCALS from TABLE, stopping when there are
859 LIMIT entries left in LOCALs. */
860
861 static void
862 remove_local_expressions_from_table (void)
863 {
864 /* Remove all the expressions made available in this block. */
865 while (VEC_length (expr_hash_elt_t, avail_exprs_stack) > 0)
866 {
867 expr_hash_elt_t victim = VEC_pop (expr_hash_elt_t, avail_exprs_stack);
868 void **slot;
869
870 if (victim == NULL)
871 break;
872
873 /* This must precede the actual removal from the hash table,
874 as ELEMENT and the table entry may share a call argument
875 vector which will be freed during removal. */
876 if (dump_file && (dump_flags & TDF_DETAILS))
877 {
878 fprintf (dump_file, "<<<< ");
879 print_expr_hash_elt (dump_file, victim);
880 }
881
882 slot = htab_find_slot_with_hash (avail_exprs,
883 victim, victim->hash, NO_INSERT);
884 gcc_assert (slot && *slot == (void *) victim);
885 htab_clear_slot (avail_exprs, slot);
886 }
887 }
888
889 /* Use the source/dest pairs in CONST_AND_COPIES_STACK to restore
890 CONST_AND_COPIES to its original state, stopping when we hit a
891 NULL marker. */
892
893 static void
894 restore_vars_to_original_value (void)
895 {
896 while (VEC_length (tree, const_and_copies_stack) > 0)
897 {
898 tree prev_value, dest;
899
900 dest = VEC_pop (tree, const_and_copies_stack);
901
902 if (dest == NULL)
903 break;
904
905 if (dump_file && (dump_flags & TDF_DETAILS))
906 {
907 fprintf (dump_file, "<<<< COPY ");
908 print_generic_expr (dump_file, dest, 0);
909 fprintf (dump_file, " = ");
910 print_generic_expr (dump_file, SSA_NAME_VALUE (dest), 0);
911 fprintf (dump_file, "\n");
912 }
913
914 prev_value = VEC_pop (tree, const_and_copies_stack);
915 set_ssa_name_value (dest, prev_value);
916 }
917 }
918
919 /* A trivial wrapper so that we can present the generic jump
920 threading code with a simple API for simplifying statements. */
921 static tree
922 simplify_stmt_for_jump_threading (gimple stmt,
923 gimple within_stmt ATTRIBUTE_UNUSED)
924 {
925 return lookup_avail_expr (stmt, false);
926 }
927
928 /* Wrapper for common code to attempt to thread an edge. For example,
929 it handles lazily building the dummy condition and the bookkeeping
930 when jump threading is successful. */
931
932 static void
933 dom_thread_across_edge (struct dom_walk_data *walk_data, edge e)
934 {
935 if (! walk_data->global_data)
936 {
937 gimple dummy_cond =
938 gimple_build_cond (NE_EXPR,
939 integer_zero_node, integer_zero_node,
940 NULL, NULL);
941 walk_data->global_data = dummy_cond;
942 }
943
944 thread_across_edge ((gimple) walk_data->global_data, e, false,
945 &const_and_copies_stack,
946 simplify_stmt_for_jump_threading);
947 }
948
949 /* PHI nodes can create equivalences too.
950
951 Ignoring any alternatives which are the same as the result, if
952 all the alternatives are equal, then the PHI node creates an
953 equivalence. */
954
955 static void
956 record_equivalences_from_phis (basic_block bb)
957 {
958 gimple_stmt_iterator gsi;
959
960 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
961 {
962 gimple phi = gsi_stmt (gsi);
963
964 tree lhs = gimple_phi_result (phi);
965 tree rhs = NULL;
966 size_t i;
967
968 for (i = 0; i < gimple_phi_num_args (phi); i++)
969 {
970 tree t = gimple_phi_arg_def (phi, i);
971
972 /* Ignore alternatives which are the same as our LHS. Since
973 LHS is a PHI_RESULT, it is known to be a SSA_NAME, so we
974 can simply compare pointers. */
975 if (lhs == t)
976 continue;
977
978 /* If we have not processed an alternative yet, then set
979 RHS to this alternative. */
980 if (rhs == NULL)
981 rhs = t;
982 /* If we have processed an alternative (stored in RHS), then
983 see if it is equal to this one. If it isn't, then stop
984 the search. */
985 else if (! operand_equal_for_phi_arg_p (rhs, t))
986 break;
987 }
988
989 /* If we had no interesting alternatives, then all the RHS alternatives
990 must have been the same as LHS. */
991 if (!rhs)
992 rhs = lhs;
993
994 /* If we managed to iterate through each PHI alternative without
995 breaking out of the loop, then we have a PHI which may create
996 a useful equivalence. We do not need to record unwind data for
997 this, since this is a true assignment and not an equivalence
998 inferred from a comparison. All uses of this ssa name are dominated
999 by this assignment, so unwinding just costs time and space. */
1000 if (i == gimple_phi_num_args (phi) && may_propagate_copy (lhs, rhs))
1001 set_ssa_name_value (lhs, rhs);
1002 }
1003 }
1004
1005 /* Ignoring loop backedges, if BB has precisely one incoming edge then
1006 return that edge. Otherwise return NULL. */
1007 static edge
1008 single_incoming_edge_ignoring_loop_edges (basic_block bb)
1009 {
1010 edge retval = NULL;
1011 edge e;
1012 edge_iterator ei;
1013
1014 FOR_EACH_EDGE (e, ei, bb->preds)
1015 {
1016 /* A loop back edge can be identified by the destination of
1017 the edge dominating the source of the edge. */
1018 if (dominated_by_p (CDI_DOMINATORS, e->src, e->dest))
1019 continue;
1020
1021 /* If we have already seen a non-loop edge, then we must have
1022 multiple incoming non-loop edges and thus we return NULL. */
1023 if (retval)
1024 return NULL;
1025
1026 /* This is the first non-loop incoming edge we have found. Record
1027 it. */
1028 retval = e;
1029 }
1030
1031 return retval;
1032 }
1033
1034 /* Record any equivalences created by the incoming edge to BB. If BB
1035 has more than one incoming edge, then no equivalence is created. */
1036
1037 static void
1038 record_equivalences_from_incoming_edge (basic_block bb)
1039 {
1040 edge e;
1041 basic_block parent;
1042 struct edge_info *edge_info;
1043
1044 /* If our parent block ended with a control statement, then we may be
1045 able to record some equivalences based on which outgoing edge from
1046 the parent was followed. */
1047 parent = get_immediate_dominator (CDI_DOMINATORS, bb);
1048
1049 e = single_incoming_edge_ignoring_loop_edges (bb);
1050
1051 /* If we had a single incoming edge from our parent block, then enter
1052 any data associated with the edge into our tables. */
1053 if (e && e->src == parent)
1054 {
1055 unsigned int i;
1056
1057 edge_info = (struct edge_info *) e->aux;
1058
1059 if (edge_info)
1060 {
1061 tree lhs = edge_info->lhs;
1062 tree rhs = edge_info->rhs;
1063 cond_equivalence *eq;
1064
1065 if (lhs)
1066 record_equality (lhs, rhs);
1067
1068 for (i = 0; VEC_iterate (cond_equivalence,
1069 edge_info->cond_equivalences, i, eq); ++i)
1070 record_cond (eq);
1071 }
1072 }
1073 }
1074
1075 /* Dump SSA statistics on FILE. */
1076
1077 void
1078 dump_dominator_optimization_stats (FILE *file)
1079 {
1080 fprintf (file, "Total number of statements: %6ld\n\n",
1081 opt_stats.num_stmts);
1082 fprintf (file, "Exprs considered for dominator optimizations: %6ld\n",
1083 opt_stats.num_exprs_considered);
1084
1085 fprintf (file, "\nHash table statistics:\n");
1086
1087 fprintf (file, " avail_exprs: ");
1088 htab_statistics (file, avail_exprs);
1089 }
1090
1091
1092 /* Dump SSA statistics on stderr. */
1093
1094 DEBUG_FUNCTION void
1095 debug_dominator_optimization_stats (void)
1096 {
1097 dump_dominator_optimization_stats (stderr);
1098 }
1099
1100
1101 /* Dump statistics for the hash table HTAB. */
1102
1103 static void
1104 htab_statistics (FILE *file, htab_t htab)
1105 {
1106 fprintf (file, "size %ld, %ld elements, %f collision/search ratio\n",
1107 (long) htab_size (htab),
1108 (long) htab_elements (htab),
1109 htab_collisions (htab));
1110 }
1111
1112
1113 /* Enter condition equivalence into the expression hash table.
1114 This indicates that a conditional expression has a known
1115 boolean value. */
1116
1117 static void
1118 record_cond (cond_equivalence *p)
1119 {
1120 struct expr_hash_elt *element = XCNEW (struct expr_hash_elt);
1121 void **slot;
1122
1123 initialize_hash_element_from_expr (&p->cond, p->value, element);
1124
1125 slot = htab_find_slot_with_hash (avail_exprs, (void *)element,
1126 element->hash, INSERT);
1127 if (*slot == NULL)
1128 {
1129 *slot = (void *) element;
1130
1131 if (dump_file && (dump_flags & TDF_DETAILS))
1132 {
1133 fprintf (dump_file, "1>>> ");
1134 print_expr_hash_elt (dump_file, element);
1135 }
1136
1137 VEC_safe_push (expr_hash_elt_t, heap, avail_exprs_stack, element);
1138 }
1139 else
1140 free (element);
1141 }
1142
1143 /* Build a cond_equivalence record indicating that the comparison
1144 CODE holds between operands OP0 and OP1 and push it to **P. */
1145
1146 static void
1147 build_and_record_new_cond (enum tree_code code,
1148 tree op0, tree op1,
1149 VEC(cond_equivalence, heap) **p)
1150 {
1151 cond_equivalence c;
1152 struct hashable_expr *cond = &c.cond;
1153
1154 gcc_assert (TREE_CODE_CLASS (code) == tcc_comparison);
1155
1156 cond->type = boolean_type_node;
1157 cond->kind = EXPR_BINARY;
1158 cond->ops.binary.op = code;
1159 cond->ops.binary.opnd0 = op0;
1160 cond->ops.binary.opnd1 = op1;
1161
1162 c.value = boolean_true_node;
1163 VEC_safe_push (cond_equivalence, heap, *p, &c);
1164 }
1165
1166 /* Record that COND is true and INVERTED is false into the edge information
1167 structure. Also record that any conditions dominated by COND are true
1168 as well.
1169
1170 For example, if a < b is true, then a <= b must also be true. */
1171
1172 static void
1173 record_conditions (struct edge_info *edge_info, tree cond, tree inverted)
1174 {
1175 tree op0, op1;
1176 cond_equivalence c;
1177
1178 if (!COMPARISON_CLASS_P (cond))
1179 return;
1180
1181 op0 = TREE_OPERAND (cond, 0);
1182 op1 = TREE_OPERAND (cond, 1);
1183
1184 switch (TREE_CODE (cond))
1185 {
1186 case LT_EXPR:
1187 case GT_EXPR:
1188 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
1189 {
1190 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1191 &edge_info->cond_equivalences);
1192 build_and_record_new_cond (LTGT_EXPR, op0, op1,
1193 &edge_info->cond_equivalences);
1194 }
1195
1196 build_and_record_new_cond ((TREE_CODE (cond) == LT_EXPR
1197 ? LE_EXPR : GE_EXPR),
1198 op0, op1, &edge_info->cond_equivalences);
1199 build_and_record_new_cond (NE_EXPR, op0, op1,
1200 &edge_info->cond_equivalences);
1201 break;
1202
1203 case GE_EXPR:
1204 case LE_EXPR:
1205 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
1206 {
1207 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1208 &edge_info->cond_equivalences);
1209 }
1210 break;
1211
1212 case EQ_EXPR:
1213 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
1214 {
1215 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1216 &edge_info->cond_equivalences);
1217 }
1218 build_and_record_new_cond (LE_EXPR, op0, op1,
1219 &edge_info->cond_equivalences);
1220 build_and_record_new_cond (GE_EXPR, op0, op1,
1221 &edge_info->cond_equivalences);
1222 break;
1223
1224 case UNORDERED_EXPR:
1225 build_and_record_new_cond (NE_EXPR, op0, op1,
1226 &edge_info->cond_equivalences);
1227 build_and_record_new_cond (UNLE_EXPR, op0, op1,
1228 &edge_info->cond_equivalences);
1229 build_and_record_new_cond (UNGE_EXPR, op0, op1,
1230 &edge_info->cond_equivalences);
1231 build_and_record_new_cond (UNEQ_EXPR, op0, op1,
1232 &edge_info->cond_equivalences);
1233 build_and_record_new_cond (UNLT_EXPR, op0, op1,
1234 &edge_info->cond_equivalences);
1235 build_and_record_new_cond (UNGT_EXPR, op0, op1,
1236 &edge_info->cond_equivalences);
1237 break;
1238
1239 case UNLT_EXPR:
1240 case UNGT_EXPR:
1241 build_and_record_new_cond ((TREE_CODE (cond) == UNLT_EXPR
1242 ? UNLE_EXPR : UNGE_EXPR),
1243 op0, op1, &edge_info->cond_equivalences);
1244 build_and_record_new_cond (NE_EXPR, op0, op1,
1245 &edge_info->cond_equivalences);
1246 break;
1247
1248 case UNEQ_EXPR:
1249 build_and_record_new_cond (UNLE_EXPR, op0, op1,
1250 &edge_info->cond_equivalences);
1251 build_and_record_new_cond (UNGE_EXPR, op0, op1,
1252 &edge_info->cond_equivalences);
1253 break;
1254
1255 case LTGT_EXPR:
1256 build_and_record_new_cond (NE_EXPR, op0, op1,
1257 &edge_info->cond_equivalences);
1258 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1259 &edge_info->cond_equivalences);
1260 break;
1261
1262 default:
1263 break;
1264 }
1265
1266 /* Now store the original true and false conditions into the first
1267 two slots. */
1268 initialize_expr_from_cond (cond, &c.cond);
1269 c.value = boolean_true_node;
1270 VEC_safe_push (cond_equivalence, heap, edge_info->cond_equivalences, &c);
1271
1272 /* It is possible for INVERTED to be the negation of a comparison,
1273 and not a valid RHS or GIMPLE_COND condition. This happens because
1274 invert_truthvalue may return such an expression when asked to invert
1275 a floating-point comparison. These comparisons are not assumed to
1276 obey the trichotomy law. */
1277 initialize_expr_from_cond (inverted, &c.cond);
1278 c.value = boolean_false_node;
1279 VEC_safe_push (cond_equivalence, heap, edge_info->cond_equivalences, &c);
1280 }
1281
1282 /* A helper function for record_const_or_copy and record_equality.
1283 Do the work of recording the value and undo info. */
1284
1285 static void
1286 record_const_or_copy_1 (tree x, tree y, tree prev_x)
1287 {
1288 set_ssa_name_value (x, y);
1289
1290 if (dump_file && (dump_flags & TDF_DETAILS))
1291 {
1292 fprintf (dump_file, "0>>> COPY ");
1293 print_generic_expr (dump_file, x, 0);
1294 fprintf (dump_file, " = ");
1295 print_generic_expr (dump_file, y, 0);
1296 fprintf (dump_file, "\n");
1297 }
1298
1299 VEC_reserve (tree, heap, const_and_copies_stack, 2);
1300 VEC_quick_push (tree, const_and_copies_stack, prev_x);
1301 VEC_quick_push (tree, const_and_copies_stack, x);
1302 }
1303
1304 /* Return the loop depth of the basic block of the defining statement of X.
1305 This number should not be treated as absolutely correct because the loop
1306 information may not be completely up-to-date when dom runs. However, it
1307 will be relatively correct, and as more passes are taught to keep loop info
1308 up to date, the result will become more and more accurate. */
1309
1310 int
1311 loop_depth_of_name (tree x)
1312 {
1313 gimple defstmt;
1314 basic_block defbb;
1315
1316 /* If it's not an SSA_NAME, we have no clue where the definition is. */
1317 if (TREE_CODE (x) != SSA_NAME)
1318 return 0;
1319
1320 /* Otherwise return the loop depth of the defining statement's bb.
1321 Note that there may not actually be a bb for this statement, if the
1322 ssa_name is live on entry. */
1323 defstmt = SSA_NAME_DEF_STMT (x);
1324 defbb = gimple_bb (defstmt);
1325 if (!defbb)
1326 return 0;
1327
1328 return defbb->loop_depth;
1329 }
1330
1331 /* Record that X is equal to Y in const_and_copies. Record undo
1332 information in the block-local vector. */
1333
1334 static void
1335 record_const_or_copy (tree x, tree y)
1336 {
1337 tree prev_x = SSA_NAME_VALUE (x);
1338
1339 gcc_assert (TREE_CODE (x) == SSA_NAME);
1340
1341 if (TREE_CODE (y) == SSA_NAME)
1342 {
1343 tree tmp = SSA_NAME_VALUE (y);
1344 if (tmp)
1345 y = tmp;
1346 }
1347
1348 record_const_or_copy_1 (x, y, prev_x);
1349 }
1350
1351 /* Similarly, but assume that X and Y are the two operands of an EQ_EXPR.
1352 This constrains the cases in which we may treat this as assignment. */
1353
1354 static void
1355 record_equality (tree x, tree y)
1356 {
1357 tree prev_x = NULL, prev_y = NULL;
1358
1359 if (TREE_CODE (x) == SSA_NAME)
1360 prev_x = SSA_NAME_VALUE (x);
1361 if (TREE_CODE (y) == SSA_NAME)
1362 prev_y = SSA_NAME_VALUE (y);
1363
1364 /* If one of the previous values is invariant, or invariant in more loops
1365 (by depth), then use that.
1366 Otherwise it doesn't matter which value we choose, just so
1367 long as we canonicalize on one value. */
1368 if (is_gimple_min_invariant (y))
1369 ;
1370 else if (is_gimple_min_invariant (x)
1371 || (loop_depth_of_name (x) <= loop_depth_of_name (y)))
1372 prev_x = x, x = y, y = prev_x, prev_x = prev_y;
1373 else if (prev_x && is_gimple_min_invariant (prev_x))
1374 x = y, y = prev_x, prev_x = prev_y;
1375 else if (prev_y)
1376 y = prev_y;
1377
1378 /* After the swapping, we must have one SSA_NAME. */
1379 if (TREE_CODE (x) != SSA_NAME)
1380 return;
1381
1382 /* For IEEE, -0.0 == 0.0, so we don't necessarily know the sign of a
1383 variable compared against zero. If we're honoring signed zeros,
1384 then we cannot record this value unless we know that the value is
1385 nonzero. */
1386 if (HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (x)))
1387 && (TREE_CODE (y) != REAL_CST
1388 || REAL_VALUES_EQUAL (dconst0, TREE_REAL_CST (y))))
1389 return;
1390
1391 record_const_or_copy_1 (x, y, prev_x);
1392 }
1393
1394 /* Returns true when STMT is a simple iv increment. It detects the
1395 following situation:
1396
1397 i_1 = phi (..., i_2)
1398 i_2 = i_1 +/- ... */
1399
1400 static bool
1401 simple_iv_increment_p (gimple stmt)
1402 {
1403 tree lhs, preinc;
1404 gimple phi;
1405 size_t i;
1406
1407 if (gimple_code (stmt) != GIMPLE_ASSIGN)
1408 return false;
1409
1410 lhs = gimple_assign_lhs (stmt);
1411 if (TREE_CODE (lhs) != SSA_NAME)
1412 return false;
1413
1414 if (gimple_assign_rhs_code (stmt) != PLUS_EXPR
1415 && gimple_assign_rhs_code (stmt) != MINUS_EXPR)
1416 return false;
1417
1418 preinc = gimple_assign_rhs1 (stmt);
1419
1420 if (TREE_CODE (preinc) != SSA_NAME)
1421 return false;
1422
1423 phi = SSA_NAME_DEF_STMT (preinc);
1424 if (gimple_code (phi) != GIMPLE_PHI)
1425 return false;
1426
1427 for (i = 0; i < gimple_phi_num_args (phi); i++)
1428 if (gimple_phi_arg_def (phi, i) == lhs)
1429 return true;
1430
1431 return false;
1432 }
1433
1434 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
1435 known value for that SSA_NAME (or NULL if no value is known).
1436
1437 Propagate values from CONST_AND_COPIES into the PHI nodes of the
1438 successors of BB. */
1439
1440 static void
1441 cprop_into_successor_phis (basic_block bb)
1442 {
1443 edge e;
1444 edge_iterator ei;
1445
1446 FOR_EACH_EDGE (e, ei, bb->succs)
1447 {
1448 int indx;
1449 gimple_stmt_iterator gsi;
1450
1451 /* If this is an abnormal edge, then we do not want to copy propagate
1452 into the PHI alternative associated with this edge. */
1453 if (e->flags & EDGE_ABNORMAL)
1454 continue;
1455
1456 gsi = gsi_start_phis (e->dest);
1457 if (gsi_end_p (gsi))
1458 continue;
1459
1460 indx = e->dest_idx;
1461 for ( ; !gsi_end_p (gsi); gsi_next (&gsi))
1462 {
1463 tree new_val;
1464 use_operand_p orig_p;
1465 tree orig_val;
1466 gimple phi = gsi_stmt (gsi);
1467
1468 /* The alternative may be associated with a constant, so verify
1469 it is an SSA_NAME before doing anything with it. */
1470 orig_p = gimple_phi_arg_imm_use_ptr (phi, indx);
1471 orig_val = get_use_from_ptr (orig_p);
1472 if (TREE_CODE (orig_val) != SSA_NAME)
1473 continue;
1474
1475 /* If we have *ORIG_P in our constant/copy table, then replace
1476 ORIG_P with its value in our constant/copy table. */
1477 new_val = SSA_NAME_VALUE (orig_val);
1478 if (new_val
1479 && new_val != orig_val
1480 && (TREE_CODE (new_val) == SSA_NAME
1481 || is_gimple_min_invariant (new_val))
1482 && may_propagate_copy (orig_val, new_val))
1483 propagate_value (orig_p, new_val);
1484 }
1485 }
1486 }
1487
1488 /* We have finished optimizing BB, record any information implied by
1489 taking a specific outgoing edge from BB. */
1490
1491 static void
1492 record_edge_info (basic_block bb)
1493 {
1494 gimple_stmt_iterator gsi = gsi_last_bb (bb);
1495 struct edge_info *edge_info;
1496
1497 if (! gsi_end_p (gsi))
1498 {
1499 gimple stmt = gsi_stmt (gsi);
1500 location_t loc = gimple_location (stmt);
1501
1502 if (gimple_code (stmt) == GIMPLE_SWITCH)
1503 {
1504 tree index = gimple_switch_index (stmt);
1505
1506 if (TREE_CODE (index) == SSA_NAME)
1507 {
1508 int i;
1509 int n_labels = gimple_switch_num_labels (stmt);
1510 tree *info = XCNEWVEC (tree, last_basic_block);
1511 edge e;
1512 edge_iterator ei;
1513
1514 for (i = 0; i < n_labels; i++)
1515 {
1516 tree label = gimple_switch_label (stmt, i);
1517 basic_block target_bb = label_to_block (CASE_LABEL (label));
1518 if (CASE_HIGH (label)
1519 || !CASE_LOW (label)
1520 || info[target_bb->index])
1521 info[target_bb->index] = error_mark_node;
1522 else
1523 info[target_bb->index] = label;
1524 }
1525
1526 FOR_EACH_EDGE (e, ei, bb->succs)
1527 {
1528 basic_block target_bb = e->dest;
1529 tree label = info[target_bb->index];
1530
1531 if (label != NULL && label != error_mark_node)
1532 {
1533 tree x = fold_convert_loc (loc, TREE_TYPE (index),
1534 CASE_LOW (label));
1535 edge_info = allocate_edge_info (e);
1536 edge_info->lhs = index;
1537 edge_info->rhs = x;
1538 }
1539 }
1540 free (info);
1541 }
1542 }
1543
1544 /* A COND_EXPR may create equivalences too. */
1545 if (gimple_code (stmt) == GIMPLE_COND)
1546 {
1547 edge true_edge;
1548 edge false_edge;
1549
1550 tree op0 = gimple_cond_lhs (stmt);
1551 tree op1 = gimple_cond_rhs (stmt);
1552 enum tree_code code = gimple_cond_code (stmt);
1553
1554 extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
1555
1556 /* Special case comparing booleans against a constant as we
1557 know the value of OP0 on both arms of the branch. i.e., we
1558 can record an equivalence for OP0 rather than COND. */
1559 if ((code == EQ_EXPR || code == NE_EXPR)
1560 && TREE_CODE (op0) == SSA_NAME
1561 && TREE_CODE (TREE_TYPE (op0)) == BOOLEAN_TYPE
1562 && is_gimple_min_invariant (op1))
1563 {
1564 if (code == EQ_EXPR)
1565 {
1566 edge_info = allocate_edge_info (true_edge);
1567 edge_info->lhs = op0;
1568 edge_info->rhs = (integer_zerop (op1)
1569 ? boolean_false_node
1570 : boolean_true_node);
1571
1572 edge_info = allocate_edge_info (false_edge);
1573 edge_info->lhs = op0;
1574 edge_info->rhs = (integer_zerop (op1)
1575 ? boolean_true_node
1576 : boolean_false_node);
1577 }
1578 else
1579 {
1580 edge_info = allocate_edge_info (true_edge);
1581 edge_info->lhs = op0;
1582 edge_info->rhs = (integer_zerop (op1)
1583 ? boolean_true_node
1584 : boolean_false_node);
1585
1586 edge_info = allocate_edge_info (false_edge);
1587 edge_info->lhs = op0;
1588 edge_info->rhs = (integer_zerop (op1)
1589 ? boolean_false_node
1590 : boolean_true_node);
1591 }
1592 }
1593 else if (is_gimple_min_invariant (op0)
1594 && (TREE_CODE (op1) == SSA_NAME
1595 || is_gimple_min_invariant (op1)))
1596 {
1597 tree cond = build2 (code, boolean_type_node, op0, op1);
1598 tree inverted = invert_truthvalue_loc (loc, cond);
1599 struct edge_info *edge_info;
1600
1601 edge_info = allocate_edge_info (true_edge);
1602 record_conditions (edge_info, cond, inverted);
1603
1604 if (code == EQ_EXPR)
1605 {
1606 edge_info->lhs = op1;
1607 edge_info->rhs = op0;
1608 }
1609
1610 edge_info = allocate_edge_info (false_edge);
1611 record_conditions (edge_info, inverted, cond);
1612
1613 if (TREE_CODE (inverted) == EQ_EXPR)
1614 {
1615 edge_info->lhs = op1;
1616 edge_info->rhs = op0;
1617 }
1618 }
1619
1620 else if (TREE_CODE (op0) == SSA_NAME
1621 && (is_gimple_min_invariant (op1)
1622 || TREE_CODE (op1) == SSA_NAME))
1623 {
1624 tree cond = build2 (code, boolean_type_node, op0, op1);
1625 tree inverted = invert_truthvalue_loc (loc, cond);
1626 struct edge_info *edge_info;
1627
1628 edge_info = allocate_edge_info (true_edge);
1629 record_conditions (edge_info, cond, inverted);
1630
1631 if (code == EQ_EXPR)
1632 {
1633 edge_info->lhs = op0;
1634 edge_info->rhs = op1;
1635 }
1636
1637 edge_info = allocate_edge_info (false_edge);
1638 record_conditions (edge_info, inverted, cond);
1639
1640 if (TREE_CODE (inverted) == EQ_EXPR)
1641 {
1642 edge_info->lhs = op0;
1643 edge_info->rhs = op1;
1644 }
1645 }
1646 }
1647
1648 /* ??? TRUTH_NOT_EXPR can create an equivalence too. */
1649 }
1650 }
1651
1652 static void
1653 dom_opt_enter_block (struct dom_walk_data *walk_data ATTRIBUTE_UNUSED,
1654 basic_block bb)
1655 {
1656 gimple_stmt_iterator gsi;
1657
1658 if (dump_file && (dump_flags & TDF_DETAILS))
1659 fprintf (dump_file, "\n\nOptimizing block #%d\n\n", bb->index);
1660
1661 /* Push a marker on the stacks of local information so that we know how
1662 far to unwind when we finalize this block. */
1663 VEC_safe_push (expr_hash_elt_t, heap, avail_exprs_stack, NULL);
1664 VEC_safe_push (tree, heap, const_and_copies_stack, NULL_TREE);
1665
1666 record_equivalences_from_incoming_edge (bb);
1667
1668 /* PHI nodes can create equivalences too. */
1669 record_equivalences_from_phis (bb);
1670
1671 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1672 optimize_stmt (bb, gsi);
1673
1674 /* Now prepare to process dominated blocks. */
1675 record_edge_info (bb);
1676 cprop_into_successor_phis (bb);
1677 }
1678
1679 /* We have finished processing the dominator children of BB, perform
1680 any finalization actions in preparation for leaving this node in
1681 the dominator tree. */
1682
1683 static void
1684 dom_opt_leave_block (struct dom_walk_data *walk_data, basic_block bb)
1685 {
1686 gimple last;
1687
1688 /* If we have an outgoing edge to a block with multiple incoming and
1689 outgoing edges, then we may be able to thread the edge, i.e., we
1690 may be able to statically determine which of the outgoing edges
1691 will be traversed when the incoming edge from BB is traversed. */
1692 if (single_succ_p (bb)
1693 && (single_succ_edge (bb)->flags & EDGE_ABNORMAL) == 0
1694 && potentially_threadable_block (single_succ (bb)))
1695 {
1696 dom_thread_across_edge (walk_data, single_succ_edge (bb));
1697 }
1698 else if ((last = last_stmt (bb))
1699 && gimple_code (last) == GIMPLE_COND
1700 && EDGE_COUNT (bb->succs) == 2
1701 && (EDGE_SUCC (bb, 0)->flags & EDGE_ABNORMAL) == 0
1702 && (EDGE_SUCC (bb, 1)->flags & EDGE_ABNORMAL) == 0)
1703 {
1704 edge true_edge, false_edge;
1705
1706 extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
1707
1708 /* Only try to thread the edge if it reaches a target block with
1709 more than one predecessor and more than one successor. */
1710 if (potentially_threadable_block (true_edge->dest))
1711 {
1712 struct edge_info *edge_info;
1713 unsigned int i;
1714
1715 /* Push a marker onto the available expression stack so that we
1716 unwind any expressions related to the TRUE arm before processing
1717 the false arm below. */
1718 VEC_safe_push (expr_hash_elt_t, heap, avail_exprs_stack, NULL);
1719 VEC_safe_push (tree, heap, const_and_copies_stack, NULL_TREE);
1720
1721 edge_info = (struct edge_info *) true_edge->aux;
1722
1723 /* If we have info associated with this edge, record it into
1724 our equivalence tables. */
1725 if (edge_info)
1726 {
1727 cond_equivalence *eq;
1728 tree lhs = edge_info->lhs;
1729 tree rhs = edge_info->rhs;
1730
1731 /* If we have a simple NAME = VALUE equivalence, record it. */
1732 if (lhs && TREE_CODE (lhs) == SSA_NAME)
1733 record_const_or_copy (lhs, rhs);
1734
1735 /* If we have 0 = COND or 1 = COND equivalences, record them
1736 into our expression hash tables. */
1737 for (i = 0; VEC_iterate (cond_equivalence,
1738 edge_info->cond_equivalences, i, eq); ++i)
1739 record_cond (eq);
1740 }
1741
1742 dom_thread_across_edge (walk_data, true_edge);
1743
1744 /* And restore the various tables to their state before
1745 we threaded this edge. */
1746 remove_local_expressions_from_table ();
1747 }
1748
1749 /* Similarly for the ELSE arm. */
1750 if (potentially_threadable_block (false_edge->dest))
1751 {
1752 struct edge_info *edge_info;
1753 unsigned int i;
1754
1755 VEC_safe_push (tree, heap, const_and_copies_stack, NULL_TREE);
1756 edge_info = (struct edge_info *) false_edge->aux;
1757
1758 /* If we have info associated with this edge, record it into
1759 our equivalence tables. */
1760 if (edge_info)
1761 {
1762 cond_equivalence *eq;
1763 tree lhs = edge_info->lhs;
1764 tree rhs = edge_info->rhs;
1765
1766 /* If we have a simple NAME = VALUE equivalence, record it. */
1767 if (lhs && TREE_CODE (lhs) == SSA_NAME)
1768 record_const_or_copy (lhs, rhs);
1769
1770 /* If we have 0 = COND or 1 = COND equivalences, record them
1771 into our expression hash tables. */
1772 for (i = 0; VEC_iterate (cond_equivalence,
1773 edge_info->cond_equivalences, i, eq); ++i)
1774 record_cond (eq);
1775 }
1776
1777 /* Now thread the edge. */
1778 dom_thread_across_edge (walk_data, false_edge);
1779
1780 /* No need to remove local expressions from our tables
1781 or restore vars to their original value as that will
1782 be done immediately below. */
1783 }
1784 }
1785
1786 remove_local_expressions_from_table ();
1787 restore_vars_to_original_value ();
1788 }
1789
1790 /* Search for redundant computations in STMT. If any are found, then
1791 replace them with the variable holding the result of the computation.
1792
1793 If safe, record this expression into the available expression hash
1794 table. */
1795
1796 static void
1797 eliminate_redundant_computations (gimple_stmt_iterator* gsi)
1798 {
1799 tree expr_type;
1800 tree cached_lhs;
1801 bool insert = true;
1802 bool assigns_var_p = false;
1803
1804 gimple stmt = gsi_stmt (*gsi);
1805
1806 tree def = gimple_get_lhs (stmt);
1807
1808 /* Certain expressions on the RHS can be optimized away, but can not
1809 themselves be entered into the hash tables. */
1810 if (! def
1811 || TREE_CODE (def) != SSA_NAME
1812 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (def)
1813 || gimple_vdef (stmt)
1814 /* Do not record equivalences for increments of ivs. This would create
1815 overlapping live ranges for a very questionable gain. */
1816 || simple_iv_increment_p (stmt))
1817 insert = false;
1818
1819 /* Check if the expression has been computed before. */
1820 cached_lhs = lookup_avail_expr (stmt, insert);
1821
1822 opt_stats.num_exprs_considered++;
1823
1824 /* Get the type of the expression we are trying to optimize. */
1825 if (is_gimple_assign (stmt))
1826 {
1827 expr_type = TREE_TYPE (gimple_assign_lhs (stmt));
1828 assigns_var_p = true;
1829 }
1830 else if (gimple_code (stmt) == GIMPLE_COND)
1831 expr_type = boolean_type_node;
1832 else if (is_gimple_call (stmt))
1833 {
1834 gcc_assert (gimple_call_lhs (stmt));
1835 expr_type = TREE_TYPE (gimple_call_lhs (stmt));
1836 assigns_var_p = true;
1837 }
1838 else if (gimple_code (stmt) == GIMPLE_SWITCH)
1839 expr_type = TREE_TYPE (gimple_switch_index (stmt));
1840 else
1841 gcc_unreachable ();
1842
1843 if (!cached_lhs)
1844 return;
1845
1846 /* It is safe to ignore types here since we have already done
1847 type checking in the hashing and equality routines. In fact
1848 type checking here merely gets in the way of constant
1849 propagation. Also, make sure that it is safe to propagate
1850 CACHED_LHS into the expression in STMT. */
1851 if ((TREE_CODE (cached_lhs) != SSA_NAME
1852 && (assigns_var_p
1853 || useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs))))
1854 || may_propagate_copy_into_stmt (stmt, cached_lhs))
1855 {
1856 gcc_checking_assert (TREE_CODE (cached_lhs) == SSA_NAME
1857 || is_gimple_min_invariant (cached_lhs));
1858
1859 if (dump_file && (dump_flags & TDF_DETAILS))
1860 {
1861 fprintf (dump_file, " Replaced redundant expr '");
1862 print_gimple_expr (dump_file, stmt, 0, dump_flags);
1863 fprintf (dump_file, "' with '");
1864 print_generic_expr (dump_file, cached_lhs, dump_flags);
1865 fprintf (dump_file, "'\n");
1866 }
1867
1868 opt_stats.num_re++;
1869
1870 if (assigns_var_p
1871 && !useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs)))
1872 cached_lhs = fold_convert (expr_type, cached_lhs);
1873
1874 propagate_tree_value_into_stmt (gsi, cached_lhs);
1875
1876 /* Since it is always necessary to mark the result as modified,
1877 perhaps we should move this into propagate_tree_value_into_stmt
1878 itself. */
1879 gimple_set_modified (gsi_stmt (*gsi), true);
1880 }
1881 }
1882
1883 /* STMT, a GIMPLE_ASSIGN, may create certain equivalences, in either
1884 the available expressions table or the const_and_copies table.
1885 Detect and record those equivalences. */
1886 /* We handle only very simple copy equivalences here. The heavy
1887 lifing is done by eliminate_redundant_computations. */
1888
1889 static void
1890 record_equivalences_from_stmt (gimple stmt, int may_optimize_p)
1891 {
1892 tree lhs;
1893 enum tree_code lhs_code;
1894
1895 gcc_assert (is_gimple_assign (stmt));
1896
1897 lhs = gimple_assign_lhs (stmt);
1898 lhs_code = TREE_CODE (lhs);
1899
1900 if (lhs_code == SSA_NAME
1901 && gimple_assign_single_p (stmt))
1902 {
1903 tree rhs = gimple_assign_rhs1 (stmt);
1904
1905 /* If the RHS of the assignment is a constant or another variable that
1906 may be propagated, register it in the CONST_AND_COPIES table. We
1907 do not need to record unwind data for this, since this is a true
1908 assignment and not an equivalence inferred from a comparison. All
1909 uses of this ssa name are dominated by this assignment, so unwinding
1910 just costs time and space. */
1911 if (may_optimize_p
1912 && (TREE_CODE (rhs) == SSA_NAME
1913 || is_gimple_min_invariant (rhs)))
1914 {
1915 if (dump_file && (dump_flags & TDF_DETAILS))
1916 {
1917 fprintf (dump_file, "==== ASGN ");
1918 print_generic_expr (dump_file, lhs, 0);
1919 fprintf (dump_file, " = ");
1920 print_generic_expr (dump_file, rhs, 0);
1921 fprintf (dump_file, "\n");
1922 }
1923
1924 set_ssa_name_value (lhs, rhs);
1925 }
1926 }
1927
1928 /* A memory store, even an aliased store, creates a useful
1929 equivalence. By exchanging the LHS and RHS, creating suitable
1930 vops and recording the result in the available expression table,
1931 we may be able to expose more redundant loads. */
1932 if (!gimple_has_volatile_ops (stmt)
1933 && gimple_references_memory_p (stmt)
1934 && gimple_assign_single_p (stmt)
1935 && (TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME
1936 || is_gimple_min_invariant (gimple_assign_rhs1 (stmt)))
1937 && !is_gimple_reg (lhs))
1938 {
1939 tree rhs = gimple_assign_rhs1 (stmt);
1940 gimple new_stmt;
1941
1942 /* Build a new statement with the RHS and LHS exchanged. */
1943 if (TREE_CODE (rhs) == SSA_NAME)
1944 {
1945 /* NOTE tuples. The call to gimple_build_assign below replaced
1946 a call to build_gimple_modify_stmt, which did not set the
1947 SSA_NAME_DEF_STMT on the LHS of the assignment. Doing so
1948 may cause an SSA validation failure, as the LHS may be a
1949 default-initialized name and should have no definition. I'm
1950 a bit dubious of this, as the artificial statement that we
1951 generate here may in fact be ill-formed, but it is simply
1952 used as an internal device in this pass, and never becomes
1953 part of the CFG. */
1954 gimple defstmt = SSA_NAME_DEF_STMT (rhs);
1955 new_stmt = gimple_build_assign (rhs, lhs);
1956 SSA_NAME_DEF_STMT (rhs) = defstmt;
1957 }
1958 else
1959 new_stmt = gimple_build_assign (rhs, lhs);
1960
1961 gimple_set_vuse (new_stmt, gimple_vdef (stmt));
1962
1963 /* Finally enter the statement into the available expression
1964 table. */
1965 lookup_avail_expr (new_stmt, true);
1966 }
1967 }
1968
1969 /* Replace *OP_P in STMT with any known equivalent value for *OP_P from
1970 CONST_AND_COPIES. */
1971
1972 static void
1973 cprop_operand (gimple stmt, use_operand_p op_p)
1974 {
1975 tree val;
1976 tree op = USE_FROM_PTR (op_p);
1977
1978 /* If the operand has a known constant value or it is known to be a
1979 copy of some other variable, use the value or copy stored in
1980 CONST_AND_COPIES. */
1981 val = SSA_NAME_VALUE (op);
1982 if (val && val != op)
1983 {
1984 /* Do not change the base variable in the virtual operand
1985 tables. That would make it impossible to reconstruct
1986 the renamed virtual operand if we later modify this
1987 statement. Also only allow the new value to be an SSA_NAME
1988 for propagation into virtual operands. */
1989 if (!is_gimple_reg (op)
1990 && (TREE_CODE (val) != SSA_NAME
1991 || is_gimple_reg (val)
1992 || get_virtual_var (val) != get_virtual_var (op)))
1993 return;
1994
1995 /* Do not replace hard register operands in asm statements. */
1996 if (gimple_code (stmt) == GIMPLE_ASM
1997 && !may_propagate_copy_into_asm (op))
1998 return;
1999
2000 /* Certain operands are not allowed to be copy propagated due
2001 to their interaction with exception handling and some GCC
2002 extensions. */
2003 if (!may_propagate_copy (op, val))
2004 return;
2005
2006 /* Do not propagate addresses that point to volatiles into memory
2007 stmts without volatile operands. */
2008 if (POINTER_TYPE_P (TREE_TYPE (val))
2009 && TYPE_VOLATILE (TREE_TYPE (TREE_TYPE (val)))
2010 && gimple_has_mem_ops (stmt)
2011 && !gimple_has_volatile_ops (stmt))
2012 return;
2013
2014 /* Do not propagate copies if the propagated value is at a deeper loop
2015 depth than the propagatee. Otherwise, this may move loop variant
2016 variables outside of their loops and prevent coalescing
2017 opportunities. If the value was loop invariant, it will be hoisted
2018 by LICM and exposed for copy propagation. */
2019 if (loop_depth_of_name (val) > loop_depth_of_name (op))
2020 return;
2021
2022 /* Do not propagate copies into simple IV increment statements.
2023 See PR23821 for how this can disturb IV analysis. */
2024 if (TREE_CODE (val) != INTEGER_CST
2025 && simple_iv_increment_p (stmt))
2026 return;
2027
2028 /* Dump details. */
2029 if (dump_file && (dump_flags & TDF_DETAILS))
2030 {
2031 fprintf (dump_file, " Replaced '");
2032 print_generic_expr (dump_file, op, dump_flags);
2033 fprintf (dump_file, "' with %s '",
2034 (TREE_CODE (val) != SSA_NAME ? "constant" : "variable"));
2035 print_generic_expr (dump_file, val, dump_flags);
2036 fprintf (dump_file, "'\n");
2037 }
2038
2039 if (TREE_CODE (val) != SSA_NAME)
2040 opt_stats.num_const_prop++;
2041 else
2042 opt_stats.num_copy_prop++;
2043
2044 propagate_value (op_p, val);
2045
2046 /* And note that we modified this statement. This is now
2047 safe, even if we changed virtual operands since we will
2048 rescan the statement and rewrite its operands again. */
2049 gimple_set_modified (stmt, true);
2050 }
2051 }
2052
2053 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
2054 known value for that SSA_NAME (or NULL if no value is known).
2055
2056 Propagate values from CONST_AND_COPIES into the uses, vuses and
2057 vdef_ops of STMT. */
2058
2059 static void
2060 cprop_into_stmt (gimple stmt)
2061 {
2062 use_operand_p op_p;
2063 ssa_op_iter iter;
2064
2065 FOR_EACH_SSA_USE_OPERAND (op_p, stmt, iter, SSA_OP_ALL_USES)
2066 {
2067 if (TREE_CODE (USE_FROM_PTR (op_p)) == SSA_NAME)
2068 cprop_operand (stmt, op_p);
2069 }
2070 }
2071
2072 /* Optimize the statement pointed to by iterator SI.
2073
2074 We try to perform some simplistic global redundancy elimination and
2075 constant propagation:
2076
2077 1- To detect global redundancy, we keep track of expressions that have
2078 been computed in this block and its dominators. If we find that the
2079 same expression is computed more than once, we eliminate repeated
2080 computations by using the target of the first one.
2081
2082 2- Constant values and copy assignments. This is used to do very
2083 simplistic constant and copy propagation. When a constant or copy
2084 assignment is found, we map the value on the RHS of the assignment to
2085 the variable in the LHS in the CONST_AND_COPIES table. */
2086
2087 static void
2088 optimize_stmt (basic_block bb, gimple_stmt_iterator si)
2089 {
2090 gimple stmt, old_stmt;
2091 bool may_optimize_p;
2092 bool modified_p = false;
2093
2094 old_stmt = stmt = gsi_stmt (si);
2095
2096 if (gimple_code (stmt) == GIMPLE_COND)
2097 canonicalize_comparison (stmt);
2098
2099 update_stmt_if_modified (stmt);
2100 opt_stats.num_stmts++;
2101
2102 if (dump_file && (dump_flags & TDF_DETAILS))
2103 {
2104 fprintf (dump_file, "Optimizing statement ");
2105 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2106 }
2107
2108 /* Const/copy propagate into USES, VUSES and the RHS of VDEFs. */
2109 cprop_into_stmt (stmt);
2110
2111 /* If the statement has been modified with constant replacements,
2112 fold its RHS before checking for redundant computations. */
2113 if (gimple_modified_p (stmt))
2114 {
2115 tree rhs = NULL;
2116
2117 /* Try to fold the statement making sure that STMT is kept
2118 up to date. */
2119 if (fold_stmt (&si))
2120 {
2121 stmt = gsi_stmt (si);
2122 gimple_set_modified (stmt, true);
2123
2124 if (dump_file && (dump_flags & TDF_DETAILS))
2125 {
2126 fprintf (dump_file, " Folded to: ");
2127 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2128 }
2129 }
2130
2131 /* We only need to consider cases that can yield a gimple operand. */
2132 if (gimple_assign_single_p (stmt))
2133 rhs = gimple_assign_rhs1 (stmt);
2134 else if (gimple_code (stmt) == GIMPLE_GOTO)
2135 rhs = gimple_goto_dest (stmt);
2136 else if (gimple_code (stmt) == GIMPLE_SWITCH)
2137 /* This should never be an ADDR_EXPR. */
2138 rhs = gimple_switch_index (stmt);
2139
2140 if (rhs && TREE_CODE (rhs) == ADDR_EXPR)
2141 recompute_tree_invariant_for_addr_expr (rhs);
2142
2143 /* Indicate that maybe_clean_or_replace_eh_stmt needs to be called,
2144 even if fold_stmt updated the stmt already and thus cleared
2145 gimple_modified_p flag on it. */
2146 modified_p = true;
2147 }
2148
2149 /* Check for redundant computations. Do this optimization only
2150 for assignments that have no volatile ops and conditionals. */
2151 may_optimize_p = (!gimple_has_volatile_ops (stmt)
2152 && ((is_gimple_assign (stmt)
2153 && !gimple_rhs_has_side_effects (stmt))
2154 || (is_gimple_call (stmt)
2155 && gimple_call_lhs (stmt) != NULL_TREE
2156 && !gimple_rhs_has_side_effects (stmt))
2157 || gimple_code (stmt) == GIMPLE_COND
2158 || gimple_code (stmt) == GIMPLE_SWITCH));
2159
2160 if (may_optimize_p)
2161 {
2162 if (gimple_code (stmt) == GIMPLE_CALL)
2163 {
2164 /* Resolve __builtin_constant_p. If it hasn't been
2165 folded to integer_one_node by now, it's fairly
2166 certain that the value simply isn't constant. */
2167 tree callee = gimple_call_fndecl (stmt);
2168 if (callee
2169 && DECL_BUILT_IN_CLASS (callee) == BUILT_IN_NORMAL
2170 && DECL_FUNCTION_CODE (callee) == BUILT_IN_CONSTANT_P)
2171 {
2172 propagate_tree_value_into_stmt (&si, integer_zero_node);
2173 stmt = gsi_stmt (si);
2174 }
2175 }
2176
2177 update_stmt_if_modified (stmt);
2178 eliminate_redundant_computations (&si);
2179 stmt = gsi_stmt (si);
2180
2181 /* Perform simple redundant store elimination. */
2182 if (gimple_assign_single_p (stmt)
2183 && TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2184 {
2185 tree lhs = gimple_assign_lhs (stmt);
2186 tree rhs = gimple_assign_rhs1 (stmt);
2187 tree cached_lhs;
2188 gimple new_stmt;
2189 if (TREE_CODE (rhs) == SSA_NAME)
2190 {
2191 tree tem = SSA_NAME_VALUE (rhs);
2192 if (tem)
2193 rhs = tem;
2194 }
2195 /* Build a new statement with the RHS and LHS exchanged. */
2196 if (TREE_CODE (rhs) == SSA_NAME)
2197 {
2198 gimple defstmt = SSA_NAME_DEF_STMT (rhs);
2199 new_stmt = gimple_build_assign (rhs, lhs);
2200 SSA_NAME_DEF_STMT (rhs) = defstmt;
2201 }
2202 else
2203 new_stmt = gimple_build_assign (rhs, lhs);
2204 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
2205 cached_lhs = lookup_avail_expr (new_stmt, false);
2206 if (cached_lhs
2207 && rhs == cached_lhs)
2208 {
2209 basic_block bb = gimple_bb (stmt);
2210 int lp_nr = lookup_stmt_eh_lp (stmt);
2211 unlink_stmt_vdef (stmt);
2212 gsi_remove (&si, true);
2213 if (lp_nr != 0)
2214 {
2215 bitmap_set_bit (need_eh_cleanup, bb->index);
2216 if (dump_file && (dump_flags & TDF_DETAILS))
2217 fprintf (dump_file, " Flagged to clear EH edges.\n");
2218 }
2219 return;
2220 }
2221 }
2222 }
2223
2224 /* Record any additional equivalences created by this statement. */
2225 if (is_gimple_assign (stmt))
2226 record_equivalences_from_stmt (stmt, may_optimize_p);
2227
2228 /* If STMT is a COND_EXPR and it was modified, then we may know
2229 where it goes. If that is the case, then mark the CFG as altered.
2230
2231 This will cause us to later call remove_unreachable_blocks and
2232 cleanup_tree_cfg when it is safe to do so. It is not safe to
2233 clean things up here since removal of edges and such can trigger
2234 the removal of PHI nodes, which in turn can release SSA_NAMEs to
2235 the manager.
2236
2237 That's all fine and good, except that once SSA_NAMEs are released
2238 to the manager, we must not call create_ssa_name until all references
2239 to released SSA_NAMEs have been eliminated.
2240
2241 All references to the deleted SSA_NAMEs can not be eliminated until
2242 we remove unreachable blocks.
2243
2244 We can not remove unreachable blocks until after we have completed
2245 any queued jump threading.
2246
2247 We can not complete any queued jump threads until we have taken
2248 appropriate variables out of SSA form. Taking variables out of
2249 SSA form can call create_ssa_name and thus we lose.
2250
2251 Ultimately I suspect we're going to need to change the interface
2252 into the SSA_NAME manager. */
2253 if (gimple_modified_p (stmt) || modified_p)
2254 {
2255 tree val = NULL;
2256
2257 update_stmt_if_modified (stmt);
2258
2259 if (gimple_code (stmt) == GIMPLE_COND)
2260 val = fold_binary_loc (gimple_location (stmt),
2261 gimple_cond_code (stmt), boolean_type_node,
2262 gimple_cond_lhs (stmt), gimple_cond_rhs (stmt));
2263 else if (gimple_code (stmt) == GIMPLE_SWITCH)
2264 val = gimple_switch_index (stmt);
2265
2266 if (val && TREE_CODE (val) == INTEGER_CST && find_taken_edge (bb, val))
2267 cfg_altered = true;
2268
2269 /* If we simplified a statement in such a way as to be shown that it
2270 cannot trap, update the eh information and the cfg to match. */
2271 if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt))
2272 {
2273 bitmap_set_bit (need_eh_cleanup, bb->index);
2274 if (dump_file && (dump_flags & TDF_DETAILS))
2275 fprintf (dump_file, " Flagged to clear EH edges.\n");
2276 }
2277 }
2278 }
2279
2280 /* Search for an existing instance of STMT in the AVAIL_EXPRS table.
2281 If found, return its LHS. Otherwise insert STMT in the table and
2282 return NULL_TREE.
2283
2284 Also, when an expression is first inserted in the table, it is also
2285 is also added to AVAIL_EXPRS_STACK, so that it can be removed when
2286 we finish processing this block and its children. */
2287
2288 static tree
2289 lookup_avail_expr (gimple stmt, bool insert)
2290 {
2291 void **slot;
2292 tree lhs;
2293 tree temp;
2294 struct expr_hash_elt element;
2295
2296 /* Get LHS of assignment or call, else NULL_TREE. */
2297 lhs = gimple_get_lhs (stmt);
2298
2299 initialize_hash_element (stmt, lhs, &element);
2300
2301 if (dump_file && (dump_flags & TDF_DETAILS))
2302 {
2303 fprintf (dump_file, "LKUP ");
2304 print_expr_hash_elt (dump_file, &element);
2305 }
2306
2307 /* Don't bother remembering constant assignments and copy operations.
2308 Constants and copy operations are handled by the constant/copy propagator
2309 in optimize_stmt. */
2310 if (element.expr.kind == EXPR_SINGLE
2311 && (TREE_CODE (element.expr.ops.single.rhs) == SSA_NAME
2312 || is_gimple_min_invariant (element.expr.ops.single.rhs)))
2313 return NULL_TREE;
2314
2315 /* Finally try to find the expression in the main expression hash table. */
2316 slot = htab_find_slot_with_hash (avail_exprs, &element, element.hash,
2317 (insert ? INSERT : NO_INSERT));
2318 if (slot == NULL)
2319 return NULL_TREE;
2320
2321 if (*slot == NULL)
2322 {
2323 struct expr_hash_elt *element2 = XNEW (struct expr_hash_elt);
2324 *element2 = element;
2325 element2->stamp = element2;
2326 *slot = (void *) element2;
2327
2328 if (dump_file && (dump_flags & TDF_DETAILS))
2329 {
2330 fprintf (dump_file, "2>>> ");
2331 print_expr_hash_elt (dump_file, element2);
2332 }
2333
2334 VEC_safe_push (expr_hash_elt_t, heap, avail_exprs_stack, element2);
2335 return NULL_TREE;
2336 }
2337
2338 /* Extract the LHS of the assignment so that it can be used as the current
2339 definition of another variable. */
2340 lhs = ((struct expr_hash_elt *)*slot)->lhs;
2341
2342 /* See if the LHS appears in the CONST_AND_COPIES table. If it does, then
2343 use the value from the const_and_copies table. */
2344 if (TREE_CODE (lhs) == SSA_NAME)
2345 {
2346 temp = SSA_NAME_VALUE (lhs);
2347 if (temp)
2348 lhs = temp;
2349 }
2350
2351 if (dump_file && (dump_flags & TDF_DETAILS))
2352 {
2353 fprintf (dump_file, "FIND: ");
2354 print_generic_expr (dump_file, lhs, 0);
2355 fprintf (dump_file, "\n");
2356 }
2357
2358 return lhs;
2359 }
2360
2361 /* Hashing and equality functions for AVAIL_EXPRS. We compute a value number
2362 for expressions using the code of the expression and the SSA numbers of
2363 its operands. */
2364
2365 static hashval_t
2366 avail_expr_hash (const void *p)
2367 {
2368 gimple stmt = ((const struct expr_hash_elt *)p)->stmt;
2369 const struct hashable_expr *expr = &((const struct expr_hash_elt *)p)->expr;
2370 tree vuse;
2371 hashval_t val = 0;
2372
2373 val = iterative_hash_hashable_expr (expr, val);
2374
2375 /* If the hash table entry is not associated with a statement, then we
2376 can just hash the expression and not worry about virtual operands
2377 and such. */
2378 if (!stmt)
2379 return val;
2380
2381 /* Add the SSA version numbers of the vuse operand. This is important
2382 because compound variables like arrays are not renamed in the
2383 operands. Rather, the rename is done on the virtual variable
2384 representing all the elements of the array. */
2385 if ((vuse = gimple_vuse (stmt)))
2386 val = iterative_hash_expr (vuse, val);
2387
2388 return val;
2389 }
2390
2391 static hashval_t
2392 real_avail_expr_hash (const void *p)
2393 {
2394 return ((const struct expr_hash_elt *)p)->hash;
2395 }
2396
2397 static int
2398 avail_expr_eq (const void *p1, const void *p2)
2399 {
2400 gimple stmt1 = ((const struct expr_hash_elt *)p1)->stmt;
2401 const struct hashable_expr *expr1 = &((const struct expr_hash_elt *)p1)->expr;
2402 const struct expr_hash_elt *stamp1 = ((const struct expr_hash_elt *)p1)->stamp;
2403 gimple stmt2 = ((const struct expr_hash_elt *)p2)->stmt;
2404 const struct hashable_expr *expr2 = &((const struct expr_hash_elt *)p2)->expr;
2405 const struct expr_hash_elt *stamp2 = ((const struct expr_hash_elt *)p2)->stamp;
2406
2407 /* This case should apply only when removing entries from the table. */
2408 if (stamp1 == stamp2)
2409 return true;
2410
2411 /* FIXME tuples:
2412 We add stmts to a hash table and them modify them. To detect the case
2413 that we modify a stmt and then search for it, we assume that the hash
2414 is always modified by that change.
2415 We have to fully check why this doesn't happen on trunk or rewrite
2416 this in a more reliable (and easier to understand) way. */
2417 if (((const struct expr_hash_elt *)p1)->hash
2418 != ((const struct expr_hash_elt *)p2)->hash)
2419 return false;
2420
2421 /* In case of a collision, both RHS have to be identical and have the
2422 same VUSE operands. */
2423 if (hashable_expr_equal_p (expr1, expr2)
2424 && types_compatible_p (expr1->type, expr2->type))
2425 {
2426 /* Note that STMT1 and/or STMT2 may be NULL. */
2427 return ((stmt1 ? gimple_vuse (stmt1) : NULL_TREE)
2428 == (stmt2 ? gimple_vuse (stmt2) : NULL_TREE));
2429 }
2430
2431 return false;
2432 }
2433
2434 /* PHI-ONLY copy and constant propagation. This pass is meant to clean
2435 up degenerate PHIs created by or exposed by jump threading. */
2436
2437 /* Given PHI, return its RHS if the PHI is a degenerate, otherwise return
2438 NULL. */
2439
2440 tree
2441 degenerate_phi_result (gimple phi)
2442 {
2443 tree lhs = gimple_phi_result (phi);
2444 tree val = NULL;
2445 size_t i;
2446
2447 /* Ignoring arguments which are the same as LHS, if all the remaining
2448 arguments are the same, then the PHI is a degenerate and has the
2449 value of that common argument. */
2450 for (i = 0; i < gimple_phi_num_args (phi); i++)
2451 {
2452 tree arg = gimple_phi_arg_def (phi, i);
2453
2454 if (arg == lhs)
2455 continue;
2456 else if (!arg)
2457 break;
2458 else if (!val)
2459 val = arg;
2460 else if (arg == val)
2461 continue;
2462 /* We bring in some of operand_equal_p not only to speed things
2463 up, but also to avoid crashing when dereferencing the type of
2464 a released SSA name. */
2465 else if (TREE_CODE (val) != TREE_CODE (arg)
2466 || TREE_CODE (val) == SSA_NAME
2467 || !operand_equal_p (arg, val, 0))
2468 break;
2469 }
2470 return (i == gimple_phi_num_args (phi) ? val : NULL);
2471 }
2472
2473 /* Given a statement STMT, which is either a PHI node or an assignment,
2474 remove it from the IL. */
2475
2476 static void
2477 remove_stmt_or_phi (gimple stmt)
2478 {
2479 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
2480
2481 if (gimple_code (stmt) == GIMPLE_PHI)
2482 remove_phi_node (&gsi, true);
2483 else
2484 {
2485 gsi_remove (&gsi, true);
2486 release_defs (stmt);
2487 }
2488 }
2489
2490 /* Given a statement STMT, which is either a PHI node or an assignment,
2491 return the "rhs" of the node, in the case of a non-degenerate
2492 phi, NULL is returned. */
2493
2494 static tree
2495 get_rhs_or_phi_arg (gimple stmt)
2496 {
2497 if (gimple_code (stmt) == GIMPLE_PHI)
2498 return degenerate_phi_result (stmt);
2499 else if (gimple_assign_single_p (stmt))
2500 return gimple_assign_rhs1 (stmt);
2501 else
2502 gcc_unreachable ();
2503 }
2504
2505
2506 /* Given a statement STMT, which is either a PHI node or an assignment,
2507 return the "lhs" of the node. */
2508
2509 static tree
2510 get_lhs_or_phi_result (gimple stmt)
2511 {
2512 if (gimple_code (stmt) == GIMPLE_PHI)
2513 return gimple_phi_result (stmt);
2514 else if (is_gimple_assign (stmt))
2515 return gimple_assign_lhs (stmt);
2516 else
2517 gcc_unreachable ();
2518 }
2519
2520 /* Propagate RHS into all uses of LHS (when possible).
2521
2522 RHS and LHS are derived from STMT, which is passed in solely so
2523 that we can remove it if propagation is successful.
2524
2525 When propagating into a PHI node or into a statement which turns
2526 into a trivial copy or constant initialization, set the
2527 appropriate bit in INTERESTING_NAMEs so that we will visit those
2528 nodes as well in an effort to pick up secondary optimization
2529 opportunities. */
2530
2531 static void
2532 propagate_rhs_into_lhs (gimple stmt, tree lhs, tree rhs, bitmap interesting_names)
2533 {
2534 /* First verify that propagation is valid and isn't going to move a
2535 loop variant variable outside its loop. */
2536 if (! SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs)
2537 && (TREE_CODE (rhs) != SSA_NAME
2538 || ! SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs))
2539 && may_propagate_copy (lhs, rhs)
2540 && loop_depth_of_name (lhs) >= loop_depth_of_name (rhs))
2541 {
2542 use_operand_p use_p;
2543 imm_use_iterator iter;
2544 gimple use_stmt;
2545 bool all = true;
2546
2547 /* Dump details. */
2548 if (dump_file && (dump_flags & TDF_DETAILS))
2549 {
2550 fprintf (dump_file, " Replacing '");
2551 print_generic_expr (dump_file, lhs, dump_flags);
2552 fprintf (dump_file, "' with %s '",
2553 (TREE_CODE (rhs) != SSA_NAME ? "constant" : "variable"));
2554 print_generic_expr (dump_file, rhs, dump_flags);
2555 fprintf (dump_file, "'\n");
2556 }
2557
2558 /* Walk over every use of LHS and try to replace the use with RHS.
2559 At this point the only reason why such a propagation would not
2560 be successful would be if the use occurs in an ASM_EXPR. */
2561 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
2562 {
2563 /* Leave debug stmts alone. If we succeed in propagating
2564 all non-debug uses, we'll drop the DEF, and propagation
2565 into debug stmts will occur then. */
2566 if (gimple_debug_bind_p (use_stmt))
2567 continue;
2568
2569 /* It's not always safe to propagate into an ASM_EXPR. */
2570 if (gimple_code (use_stmt) == GIMPLE_ASM
2571 && ! may_propagate_copy_into_asm (lhs))
2572 {
2573 all = false;
2574 continue;
2575 }
2576
2577 /* It's not ok to propagate into the definition stmt of RHS.
2578 <bb 9>:
2579 # prephitmp.12_36 = PHI <g_67.1_6(9)>
2580 g_67.1_6 = prephitmp.12_36;
2581 goto <bb 9>;
2582 While this is strictly all dead code we do not want to
2583 deal with this here. */
2584 if (TREE_CODE (rhs) == SSA_NAME
2585 && SSA_NAME_DEF_STMT (rhs) == use_stmt)
2586 {
2587 all = false;
2588 continue;
2589 }
2590
2591 /* Dump details. */
2592 if (dump_file && (dump_flags & TDF_DETAILS))
2593 {
2594 fprintf (dump_file, " Original statement:");
2595 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2596 }
2597
2598 /* Propagate the RHS into this use of the LHS. */
2599 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2600 propagate_value (use_p, rhs);
2601
2602 /* Special cases to avoid useless calls into the folding
2603 routines, operand scanning, etc.
2604
2605 First, propagation into a PHI may cause the PHI to become
2606 a degenerate, so mark the PHI as interesting. No other
2607 actions are necessary.
2608
2609 Second, if we're propagating a virtual operand and the
2610 propagation does not change the underlying _DECL node for
2611 the virtual operand, then no further actions are necessary. */
2612 if (gimple_code (use_stmt) == GIMPLE_PHI
2613 || (! is_gimple_reg (lhs)
2614 && TREE_CODE (rhs) == SSA_NAME
2615 && SSA_NAME_VAR (lhs) == SSA_NAME_VAR (rhs)))
2616 {
2617 /* Dump details. */
2618 if (dump_file && (dump_flags & TDF_DETAILS))
2619 {
2620 fprintf (dump_file, " Updated statement:");
2621 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2622 }
2623
2624 /* Propagation into a PHI may expose new degenerate PHIs,
2625 so mark the result of the PHI as interesting. */
2626 if (gimple_code (use_stmt) == GIMPLE_PHI)
2627 {
2628 tree result = get_lhs_or_phi_result (use_stmt);
2629 bitmap_set_bit (interesting_names, SSA_NAME_VERSION (result));
2630 }
2631
2632 continue;
2633 }
2634
2635 /* From this point onward we are propagating into a
2636 real statement. Folding may (or may not) be possible,
2637 we may expose new operands, expose dead EH edges,
2638 etc. */
2639 /* NOTE tuples. In the tuples world, fold_stmt_inplace
2640 cannot fold a call that simplifies to a constant,
2641 because the GIMPLE_CALL must be replaced by a
2642 GIMPLE_ASSIGN, and there is no way to effect such a
2643 transformation in-place. We might want to consider
2644 using the more general fold_stmt here. */
2645 fold_stmt_inplace (use_stmt);
2646
2647 /* Sometimes propagation can expose new operands to the
2648 renamer. */
2649 update_stmt (use_stmt);
2650
2651 /* Dump details. */
2652 if (dump_file && (dump_flags & TDF_DETAILS))
2653 {
2654 fprintf (dump_file, " Updated statement:");
2655 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2656 }
2657
2658 /* If we replaced a variable index with a constant, then
2659 we would need to update the invariant flag for ADDR_EXPRs. */
2660 if (gimple_assign_single_p (use_stmt)
2661 && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ADDR_EXPR)
2662 recompute_tree_invariant_for_addr_expr
2663 (gimple_assign_rhs1 (use_stmt));
2664
2665 /* If we cleaned up EH information from the statement,
2666 mark its containing block as needing EH cleanups. */
2667 if (maybe_clean_or_replace_eh_stmt (use_stmt, use_stmt))
2668 {
2669 bitmap_set_bit (need_eh_cleanup, gimple_bb (use_stmt)->index);
2670 if (dump_file && (dump_flags & TDF_DETAILS))
2671 fprintf (dump_file, " Flagged to clear EH edges.\n");
2672 }
2673
2674 /* Propagation may expose new trivial copy/constant propagation
2675 opportunities. */
2676 if (gimple_assign_single_p (use_stmt)
2677 && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME
2678 && (TREE_CODE (gimple_assign_rhs1 (use_stmt)) == SSA_NAME
2679 || is_gimple_min_invariant (gimple_assign_rhs1 (use_stmt))))
2680 {
2681 tree result = get_lhs_or_phi_result (use_stmt);
2682 bitmap_set_bit (interesting_names, SSA_NAME_VERSION (result));
2683 }
2684
2685 /* Propagation into these nodes may make certain edges in
2686 the CFG unexecutable. We want to identify them as PHI nodes
2687 at the destination of those unexecutable edges may become
2688 degenerates. */
2689 else if (gimple_code (use_stmt) == GIMPLE_COND
2690 || gimple_code (use_stmt) == GIMPLE_SWITCH
2691 || gimple_code (use_stmt) == GIMPLE_GOTO)
2692 {
2693 tree val;
2694
2695 if (gimple_code (use_stmt) == GIMPLE_COND)
2696 val = fold_binary_loc (gimple_location (use_stmt),
2697 gimple_cond_code (use_stmt),
2698 boolean_type_node,
2699 gimple_cond_lhs (use_stmt),
2700 gimple_cond_rhs (use_stmt));
2701 else if (gimple_code (use_stmt) == GIMPLE_SWITCH)
2702 val = gimple_switch_index (use_stmt);
2703 else
2704 val = gimple_goto_dest (use_stmt);
2705
2706 if (val && is_gimple_min_invariant (val))
2707 {
2708 basic_block bb = gimple_bb (use_stmt);
2709 edge te = find_taken_edge (bb, val);
2710 edge_iterator ei;
2711 edge e;
2712 gimple_stmt_iterator gsi, psi;
2713
2714 /* Remove all outgoing edges except TE. */
2715 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei));)
2716 {
2717 if (e != te)
2718 {
2719 /* Mark all the PHI nodes at the destination of
2720 the unexecutable edge as interesting. */
2721 for (psi = gsi_start_phis (e->dest);
2722 !gsi_end_p (psi);
2723 gsi_next (&psi))
2724 {
2725 gimple phi = gsi_stmt (psi);
2726
2727 tree result = gimple_phi_result (phi);
2728 int version = SSA_NAME_VERSION (result);
2729
2730 bitmap_set_bit (interesting_names, version);
2731 }
2732
2733 te->probability += e->probability;
2734
2735 te->count += e->count;
2736 remove_edge (e);
2737 cfg_altered = true;
2738 }
2739 else
2740 ei_next (&ei);
2741 }
2742
2743 gsi = gsi_last_bb (gimple_bb (use_stmt));
2744 gsi_remove (&gsi, true);
2745
2746 /* And fixup the flags on the single remaining edge. */
2747 te->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
2748 te->flags &= ~EDGE_ABNORMAL;
2749 te->flags |= EDGE_FALLTHRU;
2750 if (te->probability > REG_BR_PROB_BASE)
2751 te->probability = REG_BR_PROB_BASE;
2752 }
2753 }
2754 }
2755
2756 /* Ensure there is nothing else to do. */
2757 gcc_assert (!all || has_zero_uses (lhs));
2758
2759 /* If we were able to propagate away all uses of LHS, then
2760 we can remove STMT. */
2761 if (all)
2762 remove_stmt_or_phi (stmt);
2763 }
2764 }
2765
2766 /* STMT is either a PHI node (potentially a degenerate PHI node) or
2767 a statement that is a trivial copy or constant initialization.
2768
2769 Attempt to eliminate T by propagating its RHS into all uses of
2770 its LHS. This may in turn set new bits in INTERESTING_NAMES
2771 for nodes we want to revisit later.
2772
2773 All exit paths should clear INTERESTING_NAMES for the result
2774 of STMT. */
2775
2776 static void
2777 eliminate_const_or_copy (gimple stmt, bitmap interesting_names)
2778 {
2779 tree lhs = get_lhs_or_phi_result (stmt);
2780 tree rhs;
2781 int version = SSA_NAME_VERSION (lhs);
2782
2783 /* If the LHS of this statement or PHI has no uses, then we can
2784 just eliminate it. This can occur if, for example, the PHI
2785 was created by block duplication due to threading and its only
2786 use was in the conditional at the end of the block which was
2787 deleted. */
2788 if (has_zero_uses (lhs))
2789 {
2790 bitmap_clear_bit (interesting_names, version);
2791 remove_stmt_or_phi (stmt);
2792 return;
2793 }
2794
2795 /* Get the RHS of the assignment or PHI node if the PHI is a
2796 degenerate. */
2797 rhs = get_rhs_or_phi_arg (stmt);
2798 if (!rhs)
2799 {
2800 bitmap_clear_bit (interesting_names, version);
2801 return;
2802 }
2803
2804 propagate_rhs_into_lhs (stmt, lhs, rhs, interesting_names);
2805
2806 /* Note that STMT may well have been deleted by now, so do
2807 not access it, instead use the saved version # to clear
2808 T's entry in the worklist. */
2809 bitmap_clear_bit (interesting_names, version);
2810 }
2811
2812 /* The first phase in degenerate PHI elimination.
2813
2814 Eliminate the degenerate PHIs in BB, then recurse on the
2815 dominator children of BB. */
2816
2817 static void
2818 eliminate_degenerate_phis_1 (basic_block bb, bitmap interesting_names)
2819 {
2820 gimple_stmt_iterator gsi;
2821 basic_block son;
2822
2823 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2824 {
2825 gimple phi = gsi_stmt (gsi);
2826
2827 eliminate_const_or_copy (phi, interesting_names);
2828 }
2829
2830 /* Recurse into the dominator children of BB. */
2831 for (son = first_dom_son (CDI_DOMINATORS, bb);
2832 son;
2833 son = next_dom_son (CDI_DOMINATORS, son))
2834 eliminate_degenerate_phis_1 (son, interesting_names);
2835 }
2836
2837
2838 /* A very simple pass to eliminate degenerate PHI nodes from the
2839 IL. This is meant to be fast enough to be able to be run several
2840 times in the optimization pipeline.
2841
2842 Certain optimizations, particularly those which duplicate blocks
2843 or remove edges from the CFG can create or expose PHIs which are
2844 trivial copies or constant initializations.
2845
2846 While we could pick up these optimizations in DOM or with the
2847 combination of copy-prop and CCP, those solutions are far too
2848 heavy-weight for our needs.
2849
2850 This implementation has two phases so that we can efficiently
2851 eliminate the first order degenerate PHIs and second order
2852 degenerate PHIs.
2853
2854 The first phase performs a dominator walk to identify and eliminate
2855 the vast majority of the degenerate PHIs. When a degenerate PHI
2856 is identified and eliminated any affected statements or PHIs
2857 are put on a worklist.
2858
2859 The second phase eliminates degenerate PHIs and trivial copies
2860 or constant initializations using the worklist. This is how we
2861 pick up the secondary optimization opportunities with minimal
2862 cost. */
2863
2864 static unsigned int
2865 eliminate_degenerate_phis (void)
2866 {
2867 bitmap interesting_names;
2868 bitmap interesting_names1;
2869
2870 /* Bitmap of blocks which need EH information updated. We can not
2871 update it on-the-fly as doing so invalidates the dominator tree. */
2872 need_eh_cleanup = BITMAP_ALLOC (NULL);
2873
2874 /* INTERESTING_NAMES is effectively our worklist, indexed by
2875 SSA_NAME_VERSION.
2876
2877 A set bit indicates that the statement or PHI node which
2878 defines the SSA_NAME should be (re)examined to determine if
2879 it has become a degenerate PHI or trivial const/copy propagation
2880 opportunity.
2881
2882 Experiments have show we generally get better compilation
2883 time behavior with bitmaps rather than sbitmaps. */
2884 interesting_names = BITMAP_ALLOC (NULL);
2885 interesting_names1 = BITMAP_ALLOC (NULL);
2886
2887 calculate_dominance_info (CDI_DOMINATORS);
2888 cfg_altered = false;
2889
2890 /* First phase. Eliminate degenerate PHIs via a dominator
2891 walk of the CFG.
2892
2893 Experiments have indicated that we generally get better
2894 compile-time behavior by visiting blocks in the first
2895 phase in dominator order. Presumably this is because walking
2896 in dominator order leaves fewer PHIs for later examination
2897 by the worklist phase. */
2898 eliminate_degenerate_phis_1 (ENTRY_BLOCK_PTR, interesting_names);
2899
2900 /* Second phase. Eliminate second order degenerate PHIs as well
2901 as trivial copies or constant initializations identified by
2902 the first phase or this phase. Basically we keep iterating
2903 until our set of INTERESTING_NAMEs is empty. */
2904 while (!bitmap_empty_p (interesting_names))
2905 {
2906 unsigned int i;
2907 bitmap_iterator bi;
2908
2909 /* EXECUTE_IF_SET_IN_BITMAP does not like its bitmap
2910 changed during the loop. Copy it to another bitmap and
2911 use that. */
2912 bitmap_copy (interesting_names1, interesting_names);
2913
2914 EXECUTE_IF_SET_IN_BITMAP (interesting_names1, 0, i, bi)
2915 {
2916 tree name = ssa_name (i);
2917
2918 /* Ignore SSA_NAMEs that have been released because
2919 their defining statement was deleted (unreachable). */
2920 if (name)
2921 eliminate_const_or_copy (SSA_NAME_DEF_STMT (ssa_name (i)),
2922 interesting_names);
2923 }
2924 }
2925
2926 if (cfg_altered)
2927 free_dominance_info (CDI_DOMINATORS);
2928
2929 /* Propagation of const and copies may make some EH edges dead. Purge
2930 such edges from the CFG as needed. */
2931 if (!bitmap_empty_p (need_eh_cleanup))
2932 {
2933 gimple_purge_all_dead_eh_edges (need_eh_cleanup);
2934 BITMAP_FREE (need_eh_cleanup);
2935 }
2936
2937 BITMAP_FREE (interesting_names);
2938 BITMAP_FREE (interesting_names1);
2939 return 0;
2940 }
2941
2942 struct gimple_opt_pass pass_phi_only_cprop =
2943 {
2944 {
2945 GIMPLE_PASS,
2946 "phicprop", /* name */
2947 gate_dominator, /* gate */
2948 eliminate_degenerate_phis, /* execute */
2949 NULL, /* sub */
2950 NULL, /* next */
2951 0, /* static_pass_number */
2952 TV_TREE_PHI_CPROP, /* tv_id */
2953 PROP_cfg | PROP_ssa, /* properties_required */
2954 0, /* properties_provided */
2955 0, /* properties_destroyed */
2956 0, /* todo_flags_start */
2957 TODO_cleanup_cfg
2958 | TODO_dump_func
2959 | TODO_ggc_collect
2960 | TODO_verify_ssa
2961 | TODO_verify_stmts
2962 | TODO_update_ssa /* todo_flags_finish */
2963 }
2964 };