* jvspec.c (jvgenmain_spec): Don't handle -fnew-verifier.
[gcc.git] / gcc / tree-ssa-dom.c
1 /* SSA Dominator optimizations for trees
2 Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
3 Free Software Foundation, Inc.
4 Contributed by Diego Novillo <dnovillo@redhat.com>
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
12
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "tree.h"
27 #include "flags.h"
28 #include "tm_p.h"
29 #include "basic-block.h"
30 #include "cfgloop.h"
31 #include "output.h"
32 #include "function.h"
33 #include "tree-pretty-print.h"
34 #include "gimple-pretty-print.h"
35 #include "timevar.h"
36 #include "tree-dump.h"
37 #include "tree-flow.h"
38 #include "domwalk.h"
39 #include "tree-pass.h"
40 #include "tree-ssa-propagate.h"
41 #include "langhooks.h"
42 #include "params.h"
43
44 /* This file implements optimizations on the dominator tree. */
45
46 /* Representation of a "naked" right-hand-side expression, to be used
47 in recording available expressions in the expression hash table. */
48
49 enum expr_kind
50 {
51 EXPR_SINGLE,
52 EXPR_UNARY,
53 EXPR_BINARY,
54 EXPR_TERNARY,
55 EXPR_CALL
56 };
57
58 struct hashable_expr
59 {
60 tree type;
61 enum expr_kind kind;
62 union {
63 struct { tree rhs; } single;
64 struct { enum tree_code op; tree opnd; } unary;
65 struct { enum tree_code op; tree opnd0, opnd1; } binary;
66 struct { enum tree_code op; tree opnd0, opnd1, opnd2; } ternary;
67 struct { tree fn; bool pure; size_t nargs; tree *args; } call;
68 } ops;
69 };
70
71 /* Structure for recording known values of a conditional expression
72 at the exits from its block. */
73
74 typedef struct cond_equivalence_s
75 {
76 struct hashable_expr cond;
77 tree value;
78 } cond_equivalence;
79
80 DEF_VEC_O(cond_equivalence);
81 DEF_VEC_ALLOC_O(cond_equivalence,heap);
82
83 /* Structure for recording edge equivalences as well as any pending
84 edge redirections during the dominator optimizer.
85
86 Computing and storing the edge equivalences instead of creating
87 them on-demand can save significant amounts of time, particularly
88 for pathological cases involving switch statements.
89
90 These structures live for a single iteration of the dominator
91 optimizer in the edge's AUX field. At the end of an iteration we
92 free each of these structures and update the AUX field to point
93 to any requested redirection target (the code for updating the
94 CFG and SSA graph for edge redirection expects redirection edge
95 targets to be in the AUX field for each edge. */
96
97 struct edge_info
98 {
99 /* If this edge creates a simple equivalence, the LHS and RHS of
100 the equivalence will be stored here. */
101 tree lhs;
102 tree rhs;
103
104 /* Traversing an edge may also indicate one or more particular conditions
105 are true or false. */
106 VEC(cond_equivalence, heap) *cond_equivalences;
107 };
108
109 /* Hash table with expressions made available during the renaming process.
110 When an assignment of the form X_i = EXPR is found, the statement is
111 stored in this table. If the same expression EXPR is later found on the
112 RHS of another statement, it is replaced with X_i (thus performing
113 global redundancy elimination). Similarly as we pass through conditionals
114 we record the conditional itself as having either a true or false value
115 in this table. */
116 static htab_t avail_exprs;
117
118 /* Stack of available expressions in AVAIL_EXPRs. Each block pushes any
119 expressions it enters into the hash table along with a marker entry
120 (null). When we finish processing the block, we pop off entries and
121 remove the expressions from the global hash table until we hit the
122 marker. */
123 typedef struct expr_hash_elt * expr_hash_elt_t;
124 DEF_VEC_P(expr_hash_elt_t);
125 DEF_VEC_ALLOC_P(expr_hash_elt_t,heap);
126
127 static VEC(expr_hash_elt_t,heap) *avail_exprs_stack;
128
129 /* Structure for entries in the expression hash table. */
130
131 struct expr_hash_elt
132 {
133 /* The value (lhs) of this expression. */
134 tree lhs;
135
136 /* The expression (rhs) we want to record. */
137 struct hashable_expr expr;
138
139 /* The stmt pointer if this element corresponds to a statement. */
140 gimple stmt;
141
142 /* The hash value for RHS. */
143 hashval_t hash;
144
145 /* A unique stamp, typically the address of the hash
146 element itself, used in removing entries from the table. */
147 struct expr_hash_elt *stamp;
148 };
149
150 /* Stack of dest,src pairs that need to be restored during finalization.
151
152 A NULL entry is used to mark the end of pairs which need to be
153 restored during finalization of this block. */
154 static VEC(tree,heap) *const_and_copies_stack;
155
156 /* Track whether or not we have changed the control flow graph. */
157 static bool cfg_altered;
158
159 /* Bitmap of blocks that have had EH statements cleaned. We should
160 remove their dead edges eventually. */
161 static bitmap need_eh_cleanup;
162
163 /* Statistics for dominator optimizations. */
164 struct opt_stats_d
165 {
166 long num_stmts;
167 long num_exprs_considered;
168 long num_re;
169 long num_const_prop;
170 long num_copy_prop;
171 };
172
173 static struct opt_stats_d opt_stats;
174
175 /* Local functions. */
176 static void optimize_stmt (basic_block, gimple_stmt_iterator);
177 static tree lookup_avail_expr (gimple, bool);
178 static hashval_t avail_expr_hash (const void *);
179 static hashval_t real_avail_expr_hash (const void *);
180 static int avail_expr_eq (const void *, const void *);
181 static void htab_statistics (FILE *, htab_t);
182 static void record_cond (cond_equivalence *);
183 static void record_const_or_copy (tree, tree);
184 static void record_equality (tree, tree);
185 static void record_equivalences_from_phis (basic_block);
186 static void record_equivalences_from_incoming_edge (basic_block);
187 static void eliminate_redundant_computations (gimple_stmt_iterator *);
188 static void record_equivalences_from_stmt (gimple, int);
189 static void dom_thread_across_edge (struct dom_walk_data *, edge);
190 static void dom_opt_leave_block (struct dom_walk_data *, basic_block);
191 static void dom_opt_enter_block (struct dom_walk_data *, basic_block);
192 static void remove_local_expressions_from_table (void);
193 static void restore_vars_to_original_value (void);
194 static edge single_incoming_edge_ignoring_loop_edges (basic_block);
195
196
197 /* Given a statement STMT, initialize the hash table element pointed to
198 by ELEMENT. */
199
200 static void
201 initialize_hash_element (gimple stmt, tree lhs,
202 struct expr_hash_elt *element)
203 {
204 enum gimple_code code = gimple_code (stmt);
205 struct hashable_expr *expr = &element->expr;
206
207 if (code == GIMPLE_ASSIGN)
208 {
209 enum tree_code subcode = gimple_assign_rhs_code (stmt);
210
211 expr->type = NULL_TREE;
212
213 switch (get_gimple_rhs_class (subcode))
214 {
215 case GIMPLE_SINGLE_RHS:
216 expr->kind = EXPR_SINGLE;
217 expr->ops.single.rhs = gimple_assign_rhs1 (stmt);
218 break;
219 case GIMPLE_UNARY_RHS:
220 expr->kind = EXPR_UNARY;
221 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
222 expr->ops.unary.op = subcode;
223 expr->ops.unary.opnd = gimple_assign_rhs1 (stmt);
224 break;
225 case GIMPLE_BINARY_RHS:
226 expr->kind = EXPR_BINARY;
227 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
228 expr->ops.binary.op = subcode;
229 expr->ops.binary.opnd0 = gimple_assign_rhs1 (stmt);
230 expr->ops.binary.opnd1 = gimple_assign_rhs2 (stmt);
231 break;
232 case GIMPLE_TERNARY_RHS:
233 expr->kind = EXPR_TERNARY;
234 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
235 expr->ops.ternary.op = subcode;
236 expr->ops.ternary.opnd0 = gimple_assign_rhs1 (stmt);
237 expr->ops.ternary.opnd1 = gimple_assign_rhs2 (stmt);
238 expr->ops.ternary.opnd2 = gimple_assign_rhs3 (stmt);
239 break;
240 default:
241 gcc_unreachable ();
242 }
243 }
244 else if (code == GIMPLE_COND)
245 {
246 expr->type = boolean_type_node;
247 expr->kind = EXPR_BINARY;
248 expr->ops.binary.op = gimple_cond_code (stmt);
249 expr->ops.binary.opnd0 = gimple_cond_lhs (stmt);
250 expr->ops.binary.opnd1 = gimple_cond_rhs (stmt);
251 }
252 else if (code == GIMPLE_CALL)
253 {
254 size_t nargs = gimple_call_num_args (stmt);
255 size_t i;
256
257 gcc_assert (gimple_call_lhs (stmt));
258
259 expr->type = TREE_TYPE (gimple_call_lhs (stmt));
260 expr->kind = EXPR_CALL;
261 expr->ops.call.fn = gimple_call_fn (stmt);
262
263 if (gimple_call_flags (stmt) & (ECF_CONST | ECF_PURE))
264 expr->ops.call.pure = true;
265 else
266 expr->ops.call.pure = false;
267
268 expr->ops.call.nargs = nargs;
269 expr->ops.call.args = (tree *) xcalloc (nargs, sizeof (tree));
270 for (i = 0; i < nargs; i++)
271 expr->ops.call.args[i] = gimple_call_arg (stmt, i);
272 }
273 else if (code == GIMPLE_SWITCH)
274 {
275 expr->type = TREE_TYPE (gimple_switch_index (stmt));
276 expr->kind = EXPR_SINGLE;
277 expr->ops.single.rhs = gimple_switch_index (stmt);
278 }
279 else if (code == GIMPLE_GOTO)
280 {
281 expr->type = TREE_TYPE (gimple_goto_dest (stmt));
282 expr->kind = EXPR_SINGLE;
283 expr->ops.single.rhs = gimple_goto_dest (stmt);
284 }
285 else
286 gcc_unreachable ();
287
288 element->lhs = lhs;
289 element->stmt = stmt;
290 element->hash = avail_expr_hash (element);
291 element->stamp = element;
292 }
293
294 /* Given a conditional expression COND as a tree, initialize
295 a hashable_expr expression EXPR. The conditional must be a
296 comparison or logical negation. A constant or a variable is
297 not permitted. */
298
299 static void
300 initialize_expr_from_cond (tree cond, struct hashable_expr *expr)
301 {
302 expr->type = boolean_type_node;
303
304 if (COMPARISON_CLASS_P (cond))
305 {
306 expr->kind = EXPR_BINARY;
307 expr->ops.binary.op = TREE_CODE (cond);
308 expr->ops.binary.opnd0 = TREE_OPERAND (cond, 0);
309 expr->ops.binary.opnd1 = TREE_OPERAND (cond, 1);
310 }
311 else if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
312 {
313 expr->kind = EXPR_UNARY;
314 expr->ops.unary.op = TRUTH_NOT_EXPR;
315 expr->ops.unary.opnd = TREE_OPERAND (cond, 0);
316 }
317 else
318 gcc_unreachable ();
319 }
320
321 /* Given a hashable_expr expression EXPR and an LHS,
322 initialize the hash table element pointed to by ELEMENT. */
323
324 static void
325 initialize_hash_element_from_expr (struct hashable_expr *expr,
326 tree lhs,
327 struct expr_hash_elt *element)
328 {
329 element->expr = *expr;
330 element->lhs = lhs;
331 element->stmt = NULL;
332 element->hash = avail_expr_hash (element);
333 element->stamp = element;
334 }
335
336 /* Compare two hashable_expr structures for equivalence.
337 They are considered equivalent when the the expressions
338 they denote must necessarily be equal. The logic is intended
339 to follow that of operand_equal_p in fold-const.c */
340
341 static bool
342 hashable_expr_equal_p (const struct hashable_expr *expr0,
343 const struct hashable_expr *expr1)
344 {
345 tree type0 = expr0->type;
346 tree type1 = expr1->type;
347
348 /* If either type is NULL, there is nothing to check. */
349 if ((type0 == NULL_TREE) ^ (type1 == NULL_TREE))
350 return false;
351
352 /* If both types don't have the same signedness, precision, and mode,
353 then we can't consider them equal. */
354 if (type0 != type1
355 && (TREE_CODE (type0) == ERROR_MARK
356 || TREE_CODE (type1) == ERROR_MARK
357 || TYPE_UNSIGNED (type0) != TYPE_UNSIGNED (type1)
358 || TYPE_PRECISION (type0) != TYPE_PRECISION (type1)
359 || TYPE_MODE (type0) != TYPE_MODE (type1)))
360 return false;
361
362 if (expr0->kind != expr1->kind)
363 return false;
364
365 switch (expr0->kind)
366 {
367 case EXPR_SINGLE:
368 return operand_equal_p (expr0->ops.single.rhs,
369 expr1->ops.single.rhs, 0);
370
371 case EXPR_UNARY:
372 if (expr0->ops.unary.op != expr1->ops.unary.op)
373 return false;
374
375 if ((CONVERT_EXPR_CODE_P (expr0->ops.unary.op)
376 || expr0->ops.unary.op == NON_LVALUE_EXPR)
377 && TYPE_UNSIGNED (expr0->type) != TYPE_UNSIGNED (expr1->type))
378 return false;
379
380 return operand_equal_p (expr0->ops.unary.opnd,
381 expr1->ops.unary.opnd, 0);
382
383 case EXPR_BINARY:
384 if (expr0->ops.binary.op != expr1->ops.binary.op)
385 return false;
386
387 if (operand_equal_p (expr0->ops.binary.opnd0,
388 expr1->ops.binary.opnd0, 0)
389 && operand_equal_p (expr0->ops.binary.opnd1,
390 expr1->ops.binary.opnd1, 0))
391 return true;
392
393 /* For commutative ops, allow the other order. */
394 return (commutative_tree_code (expr0->ops.binary.op)
395 && operand_equal_p (expr0->ops.binary.opnd0,
396 expr1->ops.binary.opnd1, 0)
397 && operand_equal_p (expr0->ops.binary.opnd1,
398 expr1->ops.binary.opnd0, 0));
399
400 case EXPR_TERNARY:
401 if (expr0->ops.ternary.op != expr1->ops.ternary.op
402 || !operand_equal_p (expr0->ops.ternary.opnd2,
403 expr1->ops.ternary.opnd2, 0))
404 return false;
405
406 if (operand_equal_p (expr0->ops.ternary.opnd0,
407 expr1->ops.ternary.opnd0, 0)
408 && operand_equal_p (expr0->ops.ternary.opnd1,
409 expr1->ops.ternary.opnd1, 0))
410 return true;
411
412 /* For commutative ops, allow the other order. */
413 return (commutative_ternary_tree_code (expr0->ops.ternary.op)
414 && operand_equal_p (expr0->ops.ternary.opnd0,
415 expr1->ops.ternary.opnd1, 0)
416 && operand_equal_p (expr0->ops.ternary.opnd1,
417 expr1->ops.ternary.opnd0, 0));
418
419 case EXPR_CALL:
420 {
421 size_t i;
422
423 /* If the calls are to different functions, then they
424 clearly cannot be equal. */
425 if (! operand_equal_p (expr0->ops.call.fn,
426 expr1->ops.call.fn, 0))
427 return false;
428
429 if (! expr0->ops.call.pure)
430 return false;
431
432 if (expr0->ops.call.nargs != expr1->ops.call.nargs)
433 return false;
434
435 for (i = 0; i < expr0->ops.call.nargs; i++)
436 if (! operand_equal_p (expr0->ops.call.args[i],
437 expr1->ops.call.args[i], 0))
438 return false;
439
440 return true;
441 }
442
443 default:
444 gcc_unreachable ();
445 }
446 }
447
448 /* Compute a hash value for a hashable_expr value EXPR and a
449 previously accumulated hash value VAL. If two hashable_expr
450 values compare equal with hashable_expr_equal_p, they must
451 hash to the same value, given an identical value of VAL.
452 The logic is intended to follow iterative_hash_expr in tree.c. */
453
454 static hashval_t
455 iterative_hash_hashable_expr (const struct hashable_expr *expr, hashval_t val)
456 {
457 switch (expr->kind)
458 {
459 case EXPR_SINGLE:
460 val = iterative_hash_expr (expr->ops.single.rhs, val);
461 break;
462
463 case EXPR_UNARY:
464 val = iterative_hash_object (expr->ops.unary.op, val);
465
466 /* Make sure to include signedness in the hash computation.
467 Don't hash the type, that can lead to having nodes which
468 compare equal according to operand_equal_p, but which
469 have different hash codes. */
470 if (CONVERT_EXPR_CODE_P (expr->ops.unary.op)
471 || expr->ops.unary.op == NON_LVALUE_EXPR)
472 val += TYPE_UNSIGNED (expr->type);
473
474 val = iterative_hash_expr (expr->ops.unary.opnd, val);
475 break;
476
477 case EXPR_BINARY:
478 val = iterative_hash_object (expr->ops.binary.op, val);
479 if (commutative_tree_code (expr->ops.binary.op))
480 val = iterative_hash_exprs_commutative (expr->ops.binary.opnd0,
481 expr->ops.binary.opnd1, val);
482 else
483 {
484 val = iterative_hash_expr (expr->ops.binary.opnd0, val);
485 val = iterative_hash_expr (expr->ops.binary.opnd1, val);
486 }
487 break;
488
489 case EXPR_TERNARY:
490 val = iterative_hash_object (expr->ops.ternary.op, val);
491 if (commutative_ternary_tree_code (expr->ops.ternary.op))
492 val = iterative_hash_exprs_commutative (expr->ops.ternary.opnd0,
493 expr->ops.ternary.opnd1, val);
494 else
495 {
496 val = iterative_hash_expr (expr->ops.ternary.opnd0, val);
497 val = iterative_hash_expr (expr->ops.ternary.opnd1, val);
498 }
499 val = iterative_hash_expr (expr->ops.ternary.opnd2, val);
500 break;
501
502 case EXPR_CALL:
503 {
504 size_t i;
505 enum tree_code code = CALL_EXPR;
506
507 val = iterative_hash_object (code, val);
508 val = iterative_hash_expr (expr->ops.call.fn, val);
509 for (i = 0; i < expr->ops.call.nargs; i++)
510 val = iterative_hash_expr (expr->ops.call.args[i], val);
511 }
512 break;
513
514 default:
515 gcc_unreachable ();
516 }
517
518 return val;
519 }
520
521 /* Print a diagnostic dump of an expression hash table entry. */
522
523 static void
524 print_expr_hash_elt (FILE * stream, const struct expr_hash_elt *element)
525 {
526 if (element->stmt)
527 fprintf (stream, "STMT ");
528 else
529 fprintf (stream, "COND ");
530
531 if (element->lhs)
532 {
533 print_generic_expr (stream, element->lhs, 0);
534 fprintf (stream, " = ");
535 }
536
537 switch (element->expr.kind)
538 {
539 case EXPR_SINGLE:
540 print_generic_expr (stream, element->expr.ops.single.rhs, 0);
541 break;
542
543 case EXPR_UNARY:
544 fprintf (stream, "%s ", tree_code_name[element->expr.ops.unary.op]);
545 print_generic_expr (stream, element->expr.ops.unary.opnd, 0);
546 break;
547
548 case EXPR_BINARY:
549 print_generic_expr (stream, element->expr.ops.binary.opnd0, 0);
550 fprintf (stream, " %s ", tree_code_name[element->expr.ops.binary.op]);
551 print_generic_expr (stream, element->expr.ops.binary.opnd1, 0);
552 break;
553
554 case EXPR_TERNARY:
555 fprintf (stream, " %s <", tree_code_name[element->expr.ops.ternary.op]);
556 print_generic_expr (stream, element->expr.ops.ternary.opnd0, 0);
557 fputs (", ", stream);
558 print_generic_expr (stream, element->expr.ops.ternary.opnd1, 0);
559 fputs (", ", stream);
560 print_generic_expr (stream, element->expr.ops.ternary.opnd2, 0);
561 fputs (">", stream);
562 break;
563
564 case EXPR_CALL:
565 {
566 size_t i;
567 size_t nargs = element->expr.ops.call.nargs;
568
569 print_generic_expr (stream, element->expr.ops.call.fn, 0);
570 fprintf (stream, " (");
571 for (i = 0; i < nargs; i++)
572 {
573 print_generic_expr (stream, element->expr.ops.call.args[i], 0);
574 if (i + 1 < nargs)
575 fprintf (stream, ", ");
576 }
577 fprintf (stream, ")");
578 }
579 break;
580 }
581 fprintf (stream, "\n");
582
583 if (element->stmt)
584 {
585 fprintf (stream, " ");
586 print_gimple_stmt (stream, element->stmt, 0, 0);
587 }
588 }
589
590 /* Delete an expr_hash_elt and reclaim its storage. */
591
592 static void
593 free_expr_hash_elt (void *elt)
594 {
595 struct expr_hash_elt *element = ((struct expr_hash_elt *)elt);
596
597 if (element->expr.kind == EXPR_CALL)
598 free (element->expr.ops.call.args);
599
600 free (element);
601 }
602
603 /* Allocate an EDGE_INFO for edge E and attach it to E.
604 Return the new EDGE_INFO structure. */
605
606 static struct edge_info *
607 allocate_edge_info (edge e)
608 {
609 struct edge_info *edge_info;
610
611 edge_info = XCNEW (struct edge_info);
612
613 e->aux = edge_info;
614 return edge_info;
615 }
616
617 /* Free all EDGE_INFO structures associated with edges in the CFG.
618 If a particular edge can be threaded, copy the redirection
619 target from the EDGE_INFO structure into the edge's AUX field
620 as required by code to update the CFG and SSA graph for
621 jump threading. */
622
623 static void
624 free_all_edge_infos (void)
625 {
626 basic_block bb;
627 edge_iterator ei;
628 edge e;
629
630 FOR_EACH_BB (bb)
631 {
632 FOR_EACH_EDGE (e, ei, bb->preds)
633 {
634 struct edge_info *edge_info = (struct edge_info *) e->aux;
635
636 if (edge_info)
637 {
638 if (edge_info->cond_equivalences)
639 VEC_free (cond_equivalence, heap, edge_info->cond_equivalences);
640 free (edge_info);
641 e->aux = NULL;
642 }
643 }
644 }
645 }
646
647 /* Jump threading, redundancy elimination and const/copy propagation.
648
649 This pass may expose new symbols that need to be renamed into SSA. For
650 every new symbol exposed, its corresponding bit will be set in
651 VARS_TO_RENAME. */
652
653 static unsigned int
654 tree_ssa_dominator_optimize (void)
655 {
656 struct dom_walk_data walk_data;
657
658 memset (&opt_stats, 0, sizeof (opt_stats));
659
660 /* Create our hash tables. */
661 avail_exprs = htab_create (1024, real_avail_expr_hash, avail_expr_eq, free_expr_hash_elt);
662 avail_exprs_stack = VEC_alloc (expr_hash_elt_t, heap, 20);
663 const_and_copies_stack = VEC_alloc (tree, heap, 20);
664 need_eh_cleanup = BITMAP_ALLOC (NULL);
665
666 /* Setup callbacks for the generic dominator tree walker. */
667 walk_data.dom_direction = CDI_DOMINATORS;
668 walk_data.initialize_block_local_data = NULL;
669 walk_data.before_dom_children = dom_opt_enter_block;
670 walk_data.after_dom_children = dom_opt_leave_block;
671 /* Right now we only attach a dummy COND_EXPR to the global data pointer.
672 When we attach more stuff we'll need to fill this out with a real
673 structure. */
674 walk_data.global_data = NULL;
675 walk_data.block_local_data_size = 0;
676
677 /* Now initialize the dominator walker. */
678 init_walk_dominator_tree (&walk_data);
679
680 calculate_dominance_info (CDI_DOMINATORS);
681 cfg_altered = false;
682
683 /* We need to know loop structures in order to avoid destroying them
684 in jump threading. Note that we still can e.g. thread through loop
685 headers to an exit edge, or through loop header to the loop body, assuming
686 that we update the loop info. */
687 loop_optimizer_init (LOOPS_HAVE_SIMPLE_LATCHES);
688
689 /* Initialize the value-handle array. */
690 threadedge_initialize_values ();
691
692 /* We need accurate information regarding back edges in the CFG
693 for jump threading; this may include back edges that are not part of
694 a single loop. */
695 mark_dfs_back_edges ();
696
697 /* Recursively walk the dominator tree optimizing statements. */
698 walk_dominator_tree (&walk_data, ENTRY_BLOCK_PTR);
699
700 {
701 gimple_stmt_iterator gsi;
702 basic_block bb;
703 FOR_EACH_BB (bb)
704 {for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
705 update_stmt_if_modified (gsi_stmt (gsi));
706 }
707 }
708
709 /* If we exposed any new variables, go ahead and put them into
710 SSA form now, before we handle jump threading. This simplifies
711 interactions between rewriting of _DECL nodes into SSA form
712 and rewriting SSA_NAME nodes into SSA form after block
713 duplication and CFG manipulation. */
714 update_ssa (TODO_update_ssa);
715
716 free_all_edge_infos ();
717
718 /* Thread jumps, creating duplicate blocks as needed. */
719 cfg_altered |= thread_through_all_blocks (first_pass_instance);
720
721 if (cfg_altered)
722 free_dominance_info (CDI_DOMINATORS);
723
724 /* Removal of statements may make some EH edges dead. Purge
725 such edges from the CFG as needed. */
726 if (!bitmap_empty_p (need_eh_cleanup))
727 {
728 unsigned i;
729 bitmap_iterator bi;
730
731 /* Jump threading may have created forwarder blocks from blocks
732 needing EH cleanup; the new successor of these blocks, which
733 has inherited from the original block, needs the cleanup. */
734 EXECUTE_IF_SET_IN_BITMAP (need_eh_cleanup, 0, i, bi)
735 {
736 basic_block bb = BASIC_BLOCK (i);
737 if (single_succ_p (bb) == 1
738 && (single_succ_edge (bb)->flags & EDGE_EH) == 0)
739 {
740 bitmap_clear_bit (need_eh_cleanup, i);
741 bitmap_set_bit (need_eh_cleanup, single_succ (bb)->index);
742 }
743 }
744
745 gimple_purge_all_dead_eh_edges (need_eh_cleanup);
746 bitmap_zero (need_eh_cleanup);
747 }
748
749 statistics_counter_event (cfun, "Redundant expressions eliminated",
750 opt_stats.num_re);
751 statistics_counter_event (cfun, "Constants propagated",
752 opt_stats.num_const_prop);
753 statistics_counter_event (cfun, "Copies propagated",
754 opt_stats.num_copy_prop);
755
756 /* Debugging dumps. */
757 if (dump_file && (dump_flags & TDF_STATS))
758 dump_dominator_optimization_stats (dump_file);
759
760 loop_optimizer_finalize ();
761
762 /* Delete our main hashtable. */
763 htab_delete (avail_exprs);
764
765 /* And finalize the dominator walker. */
766 fini_walk_dominator_tree (&walk_data);
767
768 /* Free asserted bitmaps and stacks. */
769 BITMAP_FREE (need_eh_cleanup);
770
771 VEC_free (expr_hash_elt_t, heap, avail_exprs_stack);
772 VEC_free (tree, heap, const_and_copies_stack);
773
774 /* Free the value-handle array. */
775 threadedge_finalize_values ();
776 ssa_name_values = NULL;
777
778 return 0;
779 }
780
781 static bool
782 gate_dominator (void)
783 {
784 return flag_tree_dom != 0;
785 }
786
787 struct gimple_opt_pass pass_dominator =
788 {
789 {
790 GIMPLE_PASS,
791 "dom", /* name */
792 gate_dominator, /* gate */
793 tree_ssa_dominator_optimize, /* execute */
794 NULL, /* sub */
795 NULL, /* next */
796 0, /* static_pass_number */
797 TV_TREE_SSA_DOMINATOR_OPTS, /* tv_id */
798 PROP_cfg | PROP_ssa, /* properties_required */
799 0, /* properties_provided */
800 0, /* properties_destroyed */
801 0, /* todo_flags_start */
802 TODO_dump_func
803 | TODO_update_ssa
804 | TODO_cleanup_cfg
805 | TODO_verify_ssa /* todo_flags_finish */
806 }
807 };
808
809
810 /* Given a conditional statement CONDSTMT, convert the
811 condition to a canonical form. */
812
813 static void
814 canonicalize_comparison (gimple condstmt)
815 {
816 tree op0;
817 tree op1;
818 enum tree_code code;
819
820 gcc_assert (gimple_code (condstmt) == GIMPLE_COND);
821
822 op0 = gimple_cond_lhs (condstmt);
823 op1 = gimple_cond_rhs (condstmt);
824
825 code = gimple_cond_code (condstmt);
826
827 /* If it would be profitable to swap the operands, then do so to
828 canonicalize the statement, enabling better optimization.
829
830 By placing canonicalization of such expressions here we
831 transparently keep statements in canonical form, even
832 when the statement is modified. */
833 if (tree_swap_operands_p (op0, op1, false))
834 {
835 /* For relationals we need to swap the operands
836 and change the code. */
837 if (code == LT_EXPR
838 || code == GT_EXPR
839 || code == LE_EXPR
840 || code == GE_EXPR)
841 {
842 code = swap_tree_comparison (code);
843
844 gimple_cond_set_code (condstmt, code);
845 gimple_cond_set_lhs (condstmt, op1);
846 gimple_cond_set_rhs (condstmt, op0);
847
848 update_stmt (condstmt);
849 }
850 }
851 }
852
853 /* Initialize local stacks for this optimizer and record equivalences
854 upon entry to BB. Equivalences can come from the edge traversed to
855 reach BB or they may come from PHI nodes at the start of BB. */
856
857 /* Remove all the expressions in LOCALS from TABLE, stopping when there are
858 LIMIT entries left in LOCALs. */
859
860 static void
861 remove_local_expressions_from_table (void)
862 {
863 /* Remove all the expressions made available in this block. */
864 while (VEC_length (expr_hash_elt_t, avail_exprs_stack) > 0)
865 {
866 expr_hash_elt_t victim = VEC_pop (expr_hash_elt_t, avail_exprs_stack);
867 void **slot;
868
869 if (victim == NULL)
870 break;
871
872 /* This must precede the actual removal from the hash table,
873 as ELEMENT and the table entry may share a call argument
874 vector which will be freed during removal. */
875 if (dump_file && (dump_flags & TDF_DETAILS))
876 {
877 fprintf (dump_file, "<<<< ");
878 print_expr_hash_elt (dump_file, victim);
879 }
880
881 slot = htab_find_slot_with_hash (avail_exprs,
882 victim, victim->hash, NO_INSERT);
883 gcc_assert (slot && *slot == (void *) victim);
884 htab_clear_slot (avail_exprs, slot);
885 }
886 }
887
888 /* Use the source/dest pairs in CONST_AND_COPIES_STACK to restore
889 CONST_AND_COPIES to its original state, stopping when we hit a
890 NULL marker. */
891
892 static void
893 restore_vars_to_original_value (void)
894 {
895 while (VEC_length (tree, const_and_copies_stack) > 0)
896 {
897 tree prev_value, dest;
898
899 dest = VEC_pop (tree, const_and_copies_stack);
900
901 if (dest == NULL)
902 break;
903
904 if (dump_file && (dump_flags & TDF_DETAILS))
905 {
906 fprintf (dump_file, "<<<< COPY ");
907 print_generic_expr (dump_file, dest, 0);
908 fprintf (dump_file, " = ");
909 print_generic_expr (dump_file, SSA_NAME_VALUE (dest), 0);
910 fprintf (dump_file, "\n");
911 }
912
913 prev_value = VEC_pop (tree, const_and_copies_stack);
914 set_ssa_name_value (dest, prev_value);
915 }
916 }
917
918 /* A trivial wrapper so that we can present the generic jump
919 threading code with a simple API for simplifying statements. */
920 static tree
921 simplify_stmt_for_jump_threading (gimple stmt,
922 gimple within_stmt ATTRIBUTE_UNUSED)
923 {
924 return lookup_avail_expr (stmt, false);
925 }
926
927 /* Wrapper for common code to attempt to thread an edge. For example,
928 it handles lazily building the dummy condition and the bookkeeping
929 when jump threading is successful. */
930
931 static void
932 dom_thread_across_edge (struct dom_walk_data *walk_data, edge e)
933 {
934 if (! walk_data->global_data)
935 {
936 gimple dummy_cond =
937 gimple_build_cond (NE_EXPR,
938 integer_zero_node, integer_zero_node,
939 NULL, NULL);
940 walk_data->global_data = dummy_cond;
941 }
942
943 thread_across_edge ((gimple) walk_data->global_data, e, false,
944 &const_and_copies_stack,
945 simplify_stmt_for_jump_threading);
946 }
947
948 /* PHI nodes can create equivalences too.
949
950 Ignoring any alternatives which are the same as the result, if
951 all the alternatives are equal, then the PHI node creates an
952 equivalence. */
953
954 static void
955 record_equivalences_from_phis (basic_block bb)
956 {
957 gimple_stmt_iterator gsi;
958
959 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
960 {
961 gimple phi = gsi_stmt (gsi);
962
963 tree lhs = gimple_phi_result (phi);
964 tree rhs = NULL;
965 size_t i;
966
967 for (i = 0; i < gimple_phi_num_args (phi); i++)
968 {
969 tree t = gimple_phi_arg_def (phi, i);
970
971 /* Ignore alternatives which are the same as our LHS. Since
972 LHS is a PHI_RESULT, it is known to be a SSA_NAME, so we
973 can simply compare pointers. */
974 if (lhs == t)
975 continue;
976
977 /* If we have not processed an alternative yet, then set
978 RHS to this alternative. */
979 if (rhs == NULL)
980 rhs = t;
981 /* If we have processed an alternative (stored in RHS), then
982 see if it is equal to this one. If it isn't, then stop
983 the search. */
984 else if (! operand_equal_for_phi_arg_p (rhs, t))
985 break;
986 }
987
988 /* If we had no interesting alternatives, then all the RHS alternatives
989 must have been the same as LHS. */
990 if (!rhs)
991 rhs = lhs;
992
993 /* If we managed to iterate through each PHI alternative without
994 breaking out of the loop, then we have a PHI which may create
995 a useful equivalence. We do not need to record unwind data for
996 this, since this is a true assignment and not an equivalence
997 inferred from a comparison. All uses of this ssa name are dominated
998 by this assignment, so unwinding just costs time and space. */
999 if (i == gimple_phi_num_args (phi) && may_propagate_copy (lhs, rhs))
1000 set_ssa_name_value (lhs, rhs);
1001 }
1002 }
1003
1004 /* Ignoring loop backedges, if BB has precisely one incoming edge then
1005 return that edge. Otherwise return NULL. */
1006 static edge
1007 single_incoming_edge_ignoring_loop_edges (basic_block bb)
1008 {
1009 edge retval = NULL;
1010 edge e;
1011 edge_iterator ei;
1012
1013 FOR_EACH_EDGE (e, ei, bb->preds)
1014 {
1015 /* A loop back edge can be identified by the destination of
1016 the edge dominating the source of the edge. */
1017 if (dominated_by_p (CDI_DOMINATORS, e->src, e->dest))
1018 continue;
1019
1020 /* If we have already seen a non-loop edge, then we must have
1021 multiple incoming non-loop edges and thus we return NULL. */
1022 if (retval)
1023 return NULL;
1024
1025 /* This is the first non-loop incoming edge we have found. Record
1026 it. */
1027 retval = e;
1028 }
1029
1030 return retval;
1031 }
1032
1033 /* Record any equivalences created by the incoming edge to BB. If BB
1034 has more than one incoming edge, then no equivalence is created. */
1035
1036 static void
1037 record_equivalences_from_incoming_edge (basic_block bb)
1038 {
1039 edge e;
1040 basic_block parent;
1041 struct edge_info *edge_info;
1042
1043 /* If our parent block ended with a control statement, then we may be
1044 able to record some equivalences based on which outgoing edge from
1045 the parent was followed. */
1046 parent = get_immediate_dominator (CDI_DOMINATORS, bb);
1047
1048 e = single_incoming_edge_ignoring_loop_edges (bb);
1049
1050 /* If we had a single incoming edge from our parent block, then enter
1051 any data associated with the edge into our tables. */
1052 if (e && e->src == parent)
1053 {
1054 unsigned int i;
1055
1056 edge_info = (struct edge_info *) e->aux;
1057
1058 if (edge_info)
1059 {
1060 tree lhs = edge_info->lhs;
1061 tree rhs = edge_info->rhs;
1062 cond_equivalence *eq;
1063
1064 if (lhs)
1065 record_equality (lhs, rhs);
1066
1067 for (i = 0; VEC_iterate (cond_equivalence,
1068 edge_info->cond_equivalences, i, eq); ++i)
1069 record_cond (eq);
1070 }
1071 }
1072 }
1073
1074 /* Dump SSA statistics on FILE. */
1075
1076 void
1077 dump_dominator_optimization_stats (FILE *file)
1078 {
1079 fprintf (file, "Total number of statements: %6ld\n\n",
1080 opt_stats.num_stmts);
1081 fprintf (file, "Exprs considered for dominator optimizations: %6ld\n",
1082 opt_stats.num_exprs_considered);
1083
1084 fprintf (file, "\nHash table statistics:\n");
1085
1086 fprintf (file, " avail_exprs: ");
1087 htab_statistics (file, avail_exprs);
1088 }
1089
1090
1091 /* Dump SSA statistics on stderr. */
1092
1093 DEBUG_FUNCTION void
1094 debug_dominator_optimization_stats (void)
1095 {
1096 dump_dominator_optimization_stats (stderr);
1097 }
1098
1099
1100 /* Dump statistics for the hash table HTAB. */
1101
1102 static void
1103 htab_statistics (FILE *file, htab_t htab)
1104 {
1105 fprintf (file, "size %ld, %ld elements, %f collision/search ratio\n",
1106 (long) htab_size (htab),
1107 (long) htab_elements (htab),
1108 htab_collisions (htab));
1109 }
1110
1111
1112 /* Enter condition equivalence into the expression hash table.
1113 This indicates that a conditional expression has a known
1114 boolean value. */
1115
1116 static void
1117 record_cond (cond_equivalence *p)
1118 {
1119 struct expr_hash_elt *element = XCNEW (struct expr_hash_elt);
1120 void **slot;
1121
1122 initialize_hash_element_from_expr (&p->cond, p->value, element);
1123
1124 slot = htab_find_slot_with_hash (avail_exprs, (void *)element,
1125 element->hash, INSERT);
1126 if (*slot == NULL)
1127 {
1128 *slot = (void *) element;
1129
1130 if (dump_file && (dump_flags & TDF_DETAILS))
1131 {
1132 fprintf (dump_file, "1>>> ");
1133 print_expr_hash_elt (dump_file, element);
1134 }
1135
1136 VEC_safe_push (expr_hash_elt_t, heap, avail_exprs_stack, element);
1137 }
1138 else
1139 free (element);
1140 }
1141
1142 /* Build a cond_equivalence record indicating that the comparison
1143 CODE holds between operands OP0 and OP1 and push it to **P. */
1144
1145 static void
1146 build_and_record_new_cond (enum tree_code code,
1147 tree op0, tree op1,
1148 VEC(cond_equivalence, heap) **p)
1149 {
1150 cond_equivalence c;
1151 struct hashable_expr *cond = &c.cond;
1152
1153 gcc_assert (TREE_CODE_CLASS (code) == tcc_comparison);
1154
1155 cond->type = boolean_type_node;
1156 cond->kind = EXPR_BINARY;
1157 cond->ops.binary.op = code;
1158 cond->ops.binary.opnd0 = op0;
1159 cond->ops.binary.opnd1 = op1;
1160
1161 c.value = boolean_true_node;
1162 VEC_safe_push (cond_equivalence, heap, *p, &c);
1163 }
1164
1165 /* Record that COND is true and INVERTED is false into the edge information
1166 structure. Also record that any conditions dominated by COND are true
1167 as well.
1168
1169 For example, if a < b is true, then a <= b must also be true. */
1170
1171 static void
1172 record_conditions (struct edge_info *edge_info, tree cond, tree inverted)
1173 {
1174 tree op0, op1;
1175 cond_equivalence c;
1176
1177 if (!COMPARISON_CLASS_P (cond))
1178 return;
1179
1180 op0 = TREE_OPERAND (cond, 0);
1181 op1 = TREE_OPERAND (cond, 1);
1182
1183 switch (TREE_CODE (cond))
1184 {
1185 case LT_EXPR:
1186 case GT_EXPR:
1187 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
1188 {
1189 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1190 &edge_info->cond_equivalences);
1191 build_and_record_new_cond (LTGT_EXPR, op0, op1,
1192 &edge_info->cond_equivalences);
1193 }
1194
1195 build_and_record_new_cond ((TREE_CODE (cond) == LT_EXPR
1196 ? LE_EXPR : GE_EXPR),
1197 op0, op1, &edge_info->cond_equivalences);
1198 build_and_record_new_cond (NE_EXPR, op0, op1,
1199 &edge_info->cond_equivalences);
1200 break;
1201
1202 case GE_EXPR:
1203 case LE_EXPR:
1204 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
1205 {
1206 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1207 &edge_info->cond_equivalences);
1208 }
1209 break;
1210
1211 case EQ_EXPR:
1212 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
1213 {
1214 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1215 &edge_info->cond_equivalences);
1216 }
1217 build_and_record_new_cond (LE_EXPR, op0, op1,
1218 &edge_info->cond_equivalences);
1219 build_and_record_new_cond (GE_EXPR, op0, op1,
1220 &edge_info->cond_equivalences);
1221 break;
1222
1223 case UNORDERED_EXPR:
1224 build_and_record_new_cond (NE_EXPR, op0, op1,
1225 &edge_info->cond_equivalences);
1226 build_and_record_new_cond (UNLE_EXPR, op0, op1,
1227 &edge_info->cond_equivalences);
1228 build_and_record_new_cond (UNGE_EXPR, op0, op1,
1229 &edge_info->cond_equivalences);
1230 build_and_record_new_cond (UNEQ_EXPR, op0, op1,
1231 &edge_info->cond_equivalences);
1232 build_and_record_new_cond (UNLT_EXPR, op0, op1,
1233 &edge_info->cond_equivalences);
1234 build_and_record_new_cond (UNGT_EXPR, op0, op1,
1235 &edge_info->cond_equivalences);
1236 break;
1237
1238 case UNLT_EXPR:
1239 case UNGT_EXPR:
1240 build_and_record_new_cond ((TREE_CODE (cond) == UNLT_EXPR
1241 ? UNLE_EXPR : UNGE_EXPR),
1242 op0, op1, &edge_info->cond_equivalences);
1243 build_and_record_new_cond (NE_EXPR, op0, op1,
1244 &edge_info->cond_equivalences);
1245 break;
1246
1247 case UNEQ_EXPR:
1248 build_and_record_new_cond (UNLE_EXPR, op0, op1,
1249 &edge_info->cond_equivalences);
1250 build_and_record_new_cond (UNGE_EXPR, op0, op1,
1251 &edge_info->cond_equivalences);
1252 break;
1253
1254 case LTGT_EXPR:
1255 build_and_record_new_cond (NE_EXPR, op0, op1,
1256 &edge_info->cond_equivalences);
1257 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1258 &edge_info->cond_equivalences);
1259 break;
1260
1261 default:
1262 break;
1263 }
1264
1265 /* Now store the original true and false conditions into the first
1266 two slots. */
1267 initialize_expr_from_cond (cond, &c.cond);
1268 c.value = boolean_true_node;
1269 VEC_safe_push (cond_equivalence, heap, edge_info->cond_equivalences, &c);
1270
1271 /* It is possible for INVERTED to be the negation of a comparison,
1272 and not a valid RHS or GIMPLE_COND condition. This happens because
1273 invert_truthvalue may return such an expression when asked to invert
1274 a floating-point comparison. These comparisons are not assumed to
1275 obey the trichotomy law. */
1276 initialize_expr_from_cond (inverted, &c.cond);
1277 c.value = boolean_false_node;
1278 VEC_safe_push (cond_equivalence, heap, edge_info->cond_equivalences, &c);
1279 }
1280
1281 /* A helper function for record_const_or_copy and record_equality.
1282 Do the work of recording the value and undo info. */
1283
1284 static void
1285 record_const_or_copy_1 (tree x, tree y, tree prev_x)
1286 {
1287 set_ssa_name_value (x, y);
1288
1289 if (dump_file && (dump_flags & TDF_DETAILS))
1290 {
1291 fprintf (dump_file, "0>>> COPY ");
1292 print_generic_expr (dump_file, x, 0);
1293 fprintf (dump_file, " = ");
1294 print_generic_expr (dump_file, y, 0);
1295 fprintf (dump_file, "\n");
1296 }
1297
1298 VEC_reserve (tree, heap, const_and_copies_stack, 2);
1299 VEC_quick_push (tree, const_and_copies_stack, prev_x);
1300 VEC_quick_push (tree, const_and_copies_stack, x);
1301 }
1302
1303 /* Return the loop depth of the basic block of the defining statement of X.
1304 This number should not be treated as absolutely correct because the loop
1305 information may not be completely up-to-date when dom runs. However, it
1306 will be relatively correct, and as more passes are taught to keep loop info
1307 up to date, the result will become more and more accurate. */
1308
1309 int
1310 loop_depth_of_name (tree x)
1311 {
1312 gimple defstmt;
1313 basic_block defbb;
1314
1315 /* If it's not an SSA_NAME, we have no clue where the definition is. */
1316 if (TREE_CODE (x) != SSA_NAME)
1317 return 0;
1318
1319 /* Otherwise return the loop depth of the defining statement's bb.
1320 Note that there may not actually be a bb for this statement, if the
1321 ssa_name is live on entry. */
1322 defstmt = SSA_NAME_DEF_STMT (x);
1323 defbb = gimple_bb (defstmt);
1324 if (!defbb)
1325 return 0;
1326
1327 return defbb->loop_depth;
1328 }
1329
1330 /* Record that X is equal to Y in const_and_copies. Record undo
1331 information in the block-local vector. */
1332
1333 static void
1334 record_const_or_copy (tree x, tree y)
1335 {
1336 tree prev_x = SSA_NAME_VALUE (x);
1337
1338 gcc_assert (TREE_CODE (x) == SSA_NAME);
1339
1340 if (TREE_CODE (y) == SSA_NAME)
1341 {
1342 tree tmp = SSA_NAME_VALUE (y);
1343 if (tmp)
1344 y = tmp;
1345 }
1346
1347 record_const_or_copy_1 (x, y, prev_x);
1348 }
1349
1350 /* Similarly, but assume that X and Y are the two operands of an EQ_EXPR.
1351 This constrains the cases in which we may treat this as assignment. */
1352
1353 static void
1354 record_equality (tree x, tree y)
1355 {
1356 tree prev_x = NULL, prev_y = NULL;
1357
1358 if (TREE_CODE (x) == SSA_NAME)
1359 prev_x = SSA_NAME_VALUE (x);
1360 if (TREE_CODE (y) == SSA_NAME)
1361 prev_y = SSA_NAME_VALUE (y);
1362
1363 /* If one of the previous values is invariant, or invariant in more loops
1364 (by depth), then use that.
1365 Otherwise it doesn't matter which value we choose, just so
1366 long as we canonicalize on one value. */
1367 if (is_gimple_min_invariant (y))
1368 ;
1369 else if (is_gimple_min_invariant (x)
1370 || (loop_depth_of_name (x) <= loop_depth_of_name (y)))
1371 prev_x = x, x = y, y = prev_x, prev_x = prev_y;
1372 else if (prev_x && is_gimple_min_invariant (prev_x))
1373 x = y, y = prev_x, prev_x = prev_y;
1374 else if (prev_y)
1375 y = prev_y;
1376
1377 /* After the swapping, we must have one SSA_NAME. */
1378 if (TREE_CODE (x) != SSA_NAME)
1379 return;
1380
1381 /* For IEEE, -0.0 == 0.0, so we don't necessarily know the sign of a
1382 variable compared against zero. If we're honoring signed zeros,
1383 then we cannot record this value unless we know that the value is
1384 nonzero. */
1385 if (HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (x)))
1386 && (TREE_CODE (y) != REAL_CST
1387 || REAL_VALUES_EQUAL (dconst0, TREE_REAL_CST (y))))
1388 return;
1389
1390 record_const_or_copy_1 (x, y, prev_x);
1391 }
1392
1393 /* Returns true when STMT is a simple iv increment. It detects the
1394 following situation:
1395
1396 i_1 = phi (..., i_2)
1397 i_2 = i_1 +/- ... */
1398
1399 static bool
1400 simple_iv_increment_p (gimple stmt)
1401 {
1402 tree lhs, preinc;
1403 gimple phi;
1404 size_t i;
1405
1406 if (gimple_code (stmt) != GIMPLE_ASSIGN)
1407 return false;
1408
1409 lhs = gimple_assign_lhs (stmt);
1410 if (TREE_CODE (lhs) != SSA_NAME)
1411 return false;
1412
1413 if (gimple_assign_rhs_code (stmt) != PLUS_EXPR
1414 && gimple_assign_rhs_code (stmt) != MINUS_EXPR)
1415 return false;
1416
1417 preinc = gimple_assign_rhs1 (stmt);
1418
1419 if (TREE_CODE (preinc) != SSA_NAME)
1420 return false;
1421
1422 phi = SSA_NAME_DEF_STMT (preinc);
1423 if (gimple_code (phi) != GIMPLE_PHI)
1424 return false;
1425
1426 for (i = 0; i < gimple_phi_num_args (phi); i++)
1427 if (gimple_phi_arg_def (phi, i) == lhs)
1428 return true;
1429
1430 return false;
1431 }
1432
1433 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
1434 known value for that SSA_NAME (or NULL if no value is known).
1435
1436 Propagate values from CONST_AND_COPIES into the PHI nodes of the
1437 successors of BB. */
1438
1439 static void
1440 cprop_into_successor_phis (basic_block bb)
1441 {
1442 edge e;
1443 edge_iterator ei;
1444
1445 FOR_EACH_EDGE (e, ei, bb->succs)
1446 {
1447 int indx;
1448 gimple_stmt_iterator gsi;
1449
1450 /* If this is an abnormal edge, then we do not want to copy propagate
1451 into the PHI alternative associated with this edge. */
1452 if (e->flags & EDGE_ABNORMAL)
1453 continue;
1454
1455 gsi = gsi_start_phis (e->dest);
1456 if (gsi_end_p (gsi))
1457 continue;
1458
1459 indx = e->dest_idx;
1460 for ( ; !gsi_end_p (gsi); gsi_next (&gsi))
1461 {
1462 tree new_val;
1463 use_operand_p orig_p;
1464 tree orig_val;
1465 gimple phi = gsi_stmt (gsi);
1466
1467 /* The alternative may be associated with a constant, so verify
1468 it is an SSA_NAME before doing anything with it. */
1469 orig_p = gimple_phi_arg_imm_use_ptr (phi, indx);
1470 orig_val = get_use_from_ptr (orig_p);
1471 if (TREE_CODE (orig_val) != SSA_NAME)
1472 continue;
1473
1474 /* If we have *ORIG_P in our constant/copy table, then replace
1475 ORIG_P with its value in our constant/copy table. */
1476 new_val = SSA_NAME_VALUE (orig_val);
1477 if (new_val
1478 && new_val != orig_val
1479 && (TREE_CODE (new_val) == SSA_NAME
1480 || is_gimple_min_invariant (new_val))
1481 && may_propagate_copy (orig_val, new_val))
1482 propagate_value (orig_p, new_val);
1483 }
1484 }
1485 }
1486
1487 /* We have finished optimizing BB, record any information implied by
1488 taking a specific outgoing edge from BB. */
1489
1490 static void
1491 record_edge_info (basic_block bb)
1492 {
1493 gimple_stmt_iterator gsi = gsi_last_bb (bb);
1494 struct edge_info *edge_info;
1495
1496 if (! gsi_end_p (gsi))
1497 {
1498 gimple stmt = gsi_stmt (gsi);
1499 location_t loc = gimple_location (stmt);
1500
1501 if (gimple_code (stmt) == GIMPLE_SWITCH)
1502 {
1503 tree index = gimple_switch_index (stmt);
1504
1505 if (TREE_CODE (index) == SSA_NAME)
1506 {
1507 int i;
1508 int n_labels = gimple_switch_num_labels (stmt);
1509 tree *info = XCNEWVEC (tree, last_basic_block);
1510 edge e;
1511 edge_iterator ei;
1512
1513 for (i = 0; i < n_labels; i++)
1514 {
1515 tree label = gimple_switch_label (stmt, i);
1516 basic_block target_bb = label_to_block (CASE_LABEL (label));
1517 if (CASE_HIGH (label)
1518 || !CASE_LOW (label)
1519 || info[target_bb->index])
1520 info[target_bb->index] = error_mark_node;
1521 else
1522 info[target_bb->index] = label;
1523 }
1524
1525 FOR_EACH_EDGE (e, ei, bb->succs)
1526 {
1527 basic_block target_bb = e->dest;
1528 tree label = info[target_bb->index];
1529
1530 if (label != NULL && label != error_mark_node)
1531 {
1532 tree x = fold_convert_loc (loc, TREE_TYPE (index),
1533 CASE_LOW (label));
1534 edge_info = allocate_edge_info (e);
1535 edge_info->lhs = index;
1536 edge_info->rhs = x;
1537 }
1538 }
1539 free (info);
1540 }
1541 }
1542
1543 /* A COND_EXPR may create equivalences too. */
1544 if (gimple_code (stmt) == GIMPLE_COND)
1545 {
1546 edge true_edge;
1547 edge false_edge;
1548
1549 tree op0 = gimple_cond_lhs (stmt);
1550 tree op1 = gimple_cond_rhs (stmt);
1551 enum tree_code code = gimple_cond_code (stmt);
1552
1553 extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
1554
1555 /* Special case comparing booleans against a constant as we
1556 know the value of OP0 on both arms of the branch. i.e., we
1557 can record an equivalence for OP0 rather than COND. */
1558 if ((code == EQ_EXPR || code == NE_EXPR)
1559 && TREE_CODE (op0) == SSA_NAME
1560 && TREE_CODE (TREE_TYPE (op0)) == BOOLEAN_TYPE
1561 && is_gimple_min_invariant (op1))
1562 {
1563 if (code == EQ_EXPR)
1564 {
1565 edge_info = allocate_edge_info (true_edge);
1566 edge_info->lhs = op0;
1567 edge_info->rhs = (integer_zerop (op1)
1568 ? boolean_false_node
1569 : boolean_true_node);
1570
1571 edge_info = allocate_edge_info (false_edge);
1572 edge_info->lhs = op0;
1573 edge_info->rhs = (integer_zerop (op1)
1574 ? boolean_true_node
1575 : boolean_false_node);
1576 }
1577 else
1578 {
1579 edge_info = allocate_edge_info (true_edge);
1580 edge_info->lhs = op0;
1581 edge_info->rhs = (integer_zerop (op1)
1582 ? boolean_true_node
1583 : boolean_false_node);
1584
1585 edge_info = allocate_edge_info (false_edge);
1586 edge_info->lhs = op0;
1587 edge_info->rhs = (integer_zerop (op1)
1588 ? boolean_false_node
1589 : boolean_true_node);
1590 }
1591 }
1592 else if (is_gimple_min_invariant (op0)
1593 && (TREE_CODE (op1) == SSA_NAME
1594 || is_gimple_min_invariant (op1)))
1595 {
1596 tree cond = build2 (code, boolean_type_node, op0, op1);
1597 tree inverted = invert_truthvalue_loc (loc, cond);
1598 struct edge_info *edge_info;
1599
1600 edge_info = allocate_edge_info (true_edge);
1601 record_conditions (edge_info, cond, inverted);
1602
1603 if (code == EQ_EXPR)
1604 {
1605 edge_info->lhs = op1;
1606 edge_info->rhs = op0;
1607 }
1608
1609 edge_info = allocate_edge_info (false_edge);
1610 record_conditions (edge_info, inverted, cond);
1611
1612 if (TREE_CODE (inverted) == EQ_EXPR)
1613 {
1614 edge_info->lhs = op1;
1615 edge_info->rhs = op0;
1616 }
1617 }
1618
1619 else if (TREE_CODE (op0) == SSA_NAME
1620 && (is_gimple_min_invariant (op1)
1621 || TREE_CODE (op1) == SSA_NAME))
1622 {
1623 tree cond = build2 (code, boolean_type_node, op0, op1);
1624 tree inverted = invert_truthvalue_loc (loc, cond);
1625 struct edge_info *edge_info;
1626
1627 edge_info = allocate_edge_info (true_edge);
1628 record_conditions (edge_info, cond, inverted);
1629
1630 if (code == EQ_EXPR)
1631 {
1632 edge_info->lhs = op0;
1633 edge_info->rhs = op1;
1634 }
1635
1636 edge_info = allocate_edge_info (false_edge);
1637 record_conditions (edge_info, inverted, cond);
1638
1639 if (TREE_CODE (inverted) == EQ_EXPR)
1640 {
1641 edge_info->lhs = op0;
1642 edge_info->rhs = op1;
1643 }
1644 }
1645 }
1646
1647 /* ??? TRUTH_NOT_EXPR can create an equivalence too. */
1648 }
1649 }
1650
1651 static void
1652 dom_opt_enter_block (struct dom_walk_data *walk_data ATTRIBUTE_UNUSED,
1653 basic_block bb)
1654 {
1655 gimple_stmt_iterator gsi;
1656
1657 if (dump_file && (dump_flags & TDF_DETAILS))
1658 fprintf (dump_file, "\n\nOptimizing block #%d\n\n", bb->index);
1659
1660 /* Push a marker on the stacks of local information so that we know how
1661 far to unwind when we finalize this block. */
1662 VEC_safe_push (expr_hash_elt_t, heap, avail_exprs_stack, NULL);
1663 VEC_safe_push (tree, heap, const_and_copies_stack, NULL_TREE);
1664
1665 record_equivalences_from_incoming_edge (bb);
1666
1667 /* PHI nodes can create equivalences too. */
1668 record_equivalences_from_phis (bb);
1669
1670 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1671 optimize_stmt (bb, gsi);
1672
1673 /* Now prepare to process dominated blocks. */
1674 record_edge_info (bb);
1675 cprop_into_successor_phis (bb);
1676 }
1677
1678 /* We have finished processing the dominator children of BB, perform
1679 any finalization actions in preparation for leaving this node in
1680 the dominator tree. */
1681
1682 static void
1683 dom_opt_leave_block (struct dom_walk_data *walk_data, basic_block bb)
1684 {
1685 gimple last;
1686
1687 /* If we have an outgoing edge to a block with multiple incoming and
1688 outgoing edges, then we may be able to thread the edge, i.e., we
1689 may be able to statically determine which of the outgoing edges
1690 will be traversed when the incoming edge from BB is traversed. */
1691 if (single_succ_p (bb)
1692 && (single_succ_edge (bb)->flags & EDGE_ABNORMAL) == 0
1693 && potentially_threadable_block (single_succ (bb)))
1694 {
1695 dom_thread_across_edge (walk_data, single_succ_edge (bb));
1696 }
1697 else if ((last = last_stmt (bb))
1698 && gimple_code (last) == GIMPLE_COND
1699 && EDGE_COUNT (bb->succs) == 2
1700 && (EDGE_SUCC (bb, 0)->flags & EDGE_ABNORMAL) == 0
1701 && (EDGE_SUCC (bb, 1)->flags & EDGE_ABNORMAL) == 0)
1702 {
1703 edge true_edge, false_edge;
1704
1705 extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
1706
1707 /* Only try to thread the edge if it reaches a target block with
1708 more than one predecessor and more than one successor. */
1709 if (potentially_threadable_block (true_edge->dest))
1710 {
1711 struct edge_info *edge_info;
1712 unsigned int i;
1713
1714 /* Push a marker onto the available expression stack so that we
1715 unwind any expressions related to the TRUE arm before processing
1716 the false arm below. */
1717 VEC_safe_push (expr_hash_elt_t, heap, avail_exprs_stack, NULL);
1718 VEC_safe_push (tree, heap, const_and_copies_stack, NULL_TREE);
1719
1720 edge_info = (struct edge_info *) true_edge->aux;
1721
1722 /* If we have info associated with this edge, record it into
1723 our equivalence tables. */
1724 if (edge_info)
1725 {
1726 cond_equivalence *eq;
1727 tree lhs = edge_info->lhs;
1728 tree rhs = edge_info->rhs;
1729
1730 /* If we have a simple NAME = VALUE equivalence, record it. */
1731 if (lhs && TREE_CODE (lhs) == SSA_NAME)
1732 record_const_or_copy (lhs, rhs);
1733
1734 /* If we have 0 = COND or 1 = COND equivalences, record them
1735 into our expression hash tables. */
1736 for (i = 0; VEC_iterate (cond_equivalence,
1737 edge_info->cond_equivalences, i, eq); ++i)
1738 record_cond (eq);
1739 }
1740
1741 dom_thread_across_edge (walk_data, true_edge);
1742
1743 /* And restore the various tables to their state before
1744 we threaded this edge. */
1745 remove_local_expressions_from_table ();
1746 }
1747
1748 /* Similarly for the ELSE arm. */
1749 if (potentially_threadable_block (false_edge->dest))
1750 {
1751 struct edge_info *edge_info;
1752 unsigned int i;
1753
1754 VEC_safe_push (tree, heap, const_and_copies_stack, NULL_TREE);
1755 edge_info = (struct edge_info *) false_edge->aux;
1756
1757 /* If we have info associated with this edge, record it into
1758 our equivalence tables. */
1759 if (edge_info)
1760 {
1761 cond_equivalence *eq;
1762 tree lhs = edge_info->lhs;
1763 tree rhs = edge_info->rhs;
1764
1765 /* If we have a simple NAME = VALUE equivalence, record it. */
1766 if (lhs && TREE_CODE (lhs) == SSA_NAME)
1767 record_const_or_copy (lhs, rhs);
1768
1769 /* If we have 0 = COND or 1 = COND equivalences, record them
1770 into our expression hash tables. */
1771 for (i = 0; VEC_iterate (cond_equivalence,
1772 edge_info->cond_equivalences, i, eq); ++i)
1773 record_cond (eq);
1774 }
1775
1776 /* Now thread the edge. */
1777 dom_thread_across_edge (walk_data, false_edge);
1778
1779 /* No need to remove local expressions from our tables
1780 or restore vars to their original value as that will
1781 be done immediately below. */
1782 }
1783 }
1784
1785 remove_local_expressions_from_table ();
1786 restore_vars_to_original_value ();
1787 }
1788
1789 /* Search for redundant computations in STMT. If any are found, then
1790 replace them with the variable holding the result of the computation.
1791
1792 If safe, record this expression into the available expression hash
1793 table. */
1794
1795 static void
1796 eliminate_redundant_computations (gimple_stmt_iterator* gsi)
1797 {
1798 tree expr_type;
1799 tree cached_lhs;
1800 bool insert = true;
1801 bool assigns_var_p = false;
1802
1803 gimple stmt = gsi_stmt (*gsi);
1804
1805 tree def = gimple_get_lhs (stmt);
1806
1807 /* Certain expressions on the RHS can be optimized away, but can not
1808 themselves be entered into the hash tables. */
1809 if (! def
1810 || TREE_CODE (def) != SSA_NAME
1811 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (def)
1812 || gimple_vdef (stmt)
1813 /* Do not record equivalences for increments of ivs. This would create
1814 overlapping live ranges for a very questionable gain. */
1815 || simple_iv_increment_p (stmt))
1816 insert = false;
1817
1818 /* Check if the expression has been computed before. */
1819 cached_lhs = lookup_avail_expr (stmt, insert);
1820
1821 opt_stats.num_exprs_considered++;
1822
1823 /* Get the type of the expression we are trying to optimize. */
1824 if (is_gimple_assign (stmt))
1825 {
1826 expr_type = TREE_TYPE (gimple_assign_lhs (stmt));
1827 assigns_var_p = true;
1828 }
1829 else if (gimple_code (stmt) == GIMPLE_COND)
1830 expr_type = boolean_type_node;
1831 else if (is_gimple_call (stmt))
1832 {
1833 gcc_assert (gimple_call_lhs (stmt));
1834 expr_type = TREE_TYPE (gimple_call_lhs (stmt));
1835 assigns_var_p = true;
1836 }
1837 else if (gimple_code (stmt) == GIMPLE_SWITCH)
1838 expr_type = TREE_TYPE (gimple_switch_index (stmt));
1839 else
1840 gcc_unreachable ();
1841
1842 if (!cached_lhs)
1843 return;
1844
1845 /* It is safe to ignore types here since we have already done
1846 type checking in the hashing and equality routines. In fact
1847 type checking here merely gets in the way of constant
1848 propagation. Also, make sure that it is safe to propagate
1849 CACHED_LHS into the expression in STMT. */
1850 if ((TREE_CODE (cached_lhs) != SSA_NAME
1851 && (assigns_var_p
1852 || useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs))))
1853 || may_propagate_copy_into_stmt (stmt, cached_lhs))
1854 {
1855 #if defined ENABLE_CHECKING
1856 gcc_assert (TREE_CODE (cached_lhs) == SSA_NAME
1857 || is_gimple_min_invariant (cached_lhs));
1858 #endif
1859
1860 if (dump_file && (dump_flags & TDF_DETAILS))
1861 {
1862 fprintf (dump_file, " Replaced redundant expr '");
1863 print_gimple_expr (dump_file, stmt, 0, dump_flags);
1864 fprintf (dump_file, "' with '");
1865 print_generic_expr (dump_file, cached_lhs, dump_flags);
1866 fprintf (dump_file, "'\n");
1867 }
1868
1869 opt_stats.num_re++;
1870
1871 if (assigns_var_p
1872 && !useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs)))
1873 cached_lhs = fold_convert (expr_type, cached_lhs);
1874
1875 propagate_tree_value_into_stmt (gsi, cached_lhs);
1876
1877 /* Since it is always necessary to mark the result as modified,
1878 perhaps we should move this into propagate_tree_value_into_stmt
1879 itself. */
1880 gimple_set_modified (gsi_stmt (*gsi), true);
1881 }
1882 }
1883
1884 /* STMT, a GIMPLE_ASSIGN, may create certain equivalences, in either
1885 the available expressions table or the const_and_copies table.
1886 Detect and record those equivalences. */
1887 /* We handle only very simple copy equivalences here. The heavy
1888 lifing is done by eliminate_redundant_computations. */
1889
1890 static void
1891 record_equivalences_from_stmt (gimple stmt, int may_optimize_p)
1892 {
1893 tree lhs;
1894 enum tree_code lhs_code;
1895
1896 gcc_assert (is_gimple_assign (stmt));
1897
1898 lhs = gimple_assign_lhs (stmt);
1899 lhs_code = TREE_CODE (lhs);
1900
1901 if (lhs_code == SSA_NAME
1902 && gimple_assign_single_p (stmt))
1903 {
1904 tree rhs = gimple_assign_rhs1 (stmt);
1905
1906 /* If the RHS of the assignment is a constant or another variable that
1907 may be propagated, register it in the CONST_AND_COPIES table. We
1908 do not need to record unwind data for this, since this is a true
1909 assignment and not an equivalence inferred from a comparison. All
1910 uses of this ssa name are dominated by this assignment, so unwinding
1911 just costs time and space. */
1912 if (may_optimize_p
1913 && (TREE_CODE (rhs) == SSA_NAME
1914 || is_gimple_min_invariant (rhs)))
1915 {
1916 if (dump_file && (dump_flags & TDF_DETAILS))
1917 {
1918 fprintf (dump_file, "==== ASGN ");
1919 print_generic_expr (dump_file, lhs, 0);
1920 fprintf (dump_file, " = ");
1921 print_generic_expr (dump_file, rhs, 0);
1922 fprintf (dump_file, "\n");
1923 }
1924
1925 set_ssa_name_value (lhs, rhs);
1926 }
1927 }
1928
1929 /* A memory store, even an aliased store, creates a useful
1930 equivalence. By exchanging the LHS and RHS, creating suitable
1931 vops and recording the result in the available expression table,
1932 we may be able to expose more redundant loads. */
1933 if (!gimple_has_volatile_ops (stmt)
1934 && gimple_references_memory_p (stmt)
1935 && gimple_assign_single_p (stmt)
1936 && (TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME
1937 || is_gimple_min_invariant (gimple_assign_rhs1 (stmt)))
1938 && !is_gimple_reg (lhs))
1939 {
1940 tree rhs = gimple_assign_rhs1 (stmt);
1941 gimple new_stmt;
1942
1943 /* Build a new statement with the RHS and LHS exchanged. */
1944 if (TREE_CODE (rhs) == SSA_NAME)
1945 {
1946 /* NOTE tuples. The call to gimple_build_assign below replaced
1947 a call to build_gimple_modify_stmt, which did not set the
1948 SSA_NAME_DEF_STMT on the LHS of the assignment. Doing so
1949 may cause an SSA validation failure, as the LHS may be a
1950 default-initialized name and should have no definition. I'm
1951 a bit dubious of this, as the artificial statement that we
1952 generate here may in fact be ill-formed, but it is simply
1953 used as an internal device in this pass, and never becomes
1954 part of the CFG. */
1955 gimple defstmt = SSA_NAME_DEF_STMT (rhs);
1956 new_stmt = gimple_build_assign (rhs, lhs);
1957 SSA_NAME_DEF_STMT (rhs) = defstmt;
1958 }
1959 else
1960 new_stmt = gimple_build_assign (rhs, lhs);
1961
1962 gimple_set_vuse (new_stmt, gimple_vdef (stmt));
1963
1964 /* Finally enter the statement into the available expression
1965 table. */
1966 lookup_avail_expr (new_stmt, true);
1967 }
1968 }
1969
1970 /* Replace *OP_P in STMT with any known equivalent value for *OP_P from
1971 CONST_AND_COPIES. */
1972
1973 static void
1974 cprop_operand (gimple stmt, use_operand_p op_p)
1975 {
1976 tree val;
1977 tree op = USE_FROM_PTR (op_p);
1978
1979 /* If the operand has a known constant value or it is known to be a
1980 copy of some other variable, use the value or copy stored in
1981 CONST_AND_COPIES. */
1982 val = SSA_NAME_VALUE (op);
1983 if (val && val != op)
1984 {
1985 /* Do not change the base variable in the virtual operand
1986 tables. That would make it impossible to reconstruct
1987 the renamed virtual operand if we later modify this
1988 statement. Also only allow the new value to be an SSA_NAME
1989 for propagation into virtual operands. */
1990 if (!is_gimple_reg (op)
1991 && (TREE_CODE (val) != SSA_NAME
1992 || is_gimple_reg (val)
1993 || get_virtual_var (val) != get_virtual_var (op)))
1994 return;
1995
1996 /* Do not replace hard register operands in asm statements. */
1997 if (gimple_code (stmt) == GIMPLE_ASM
1998 && !may_propagate_copy_into_asm (op))
1999 return;
2000
2001 /* Certain operands are not allowed to be copy propagated due
2002 to their interaction with exception handling and some GCC
2003 extensions. */
2004 if (!may_propagate_copy (op, val))
2005 return;
2006
2007 /* Do not propagate addresses that point to volatiles into memory
2008 stmts without volatile operands. */
2009 if (POINTER_TYPE_P (TREE_TYPE (val))
2010 && TYPE_VOLATILE (TREE_TYPE (TREE_TYPE (val)))
2011 && gimple_has_mem_ops (stmt)
2012 && !gimple_has_volatile_ops (stmt))
2013 return;
2014
2015 /* Do not propagate copies if the propagated value is at a deeper loop
2016 depth than the propagatee. Otherwise, this may move loop variant
2017 variables outside of their loops and prevent coalescing
2018 opportunities. If the value was loop invariant, it will be hoisted
2019 by LICM and exposed for copy propagation. */
2020 if (loop_depth_of_name (val) > loop_depth_of_name (op))
2021 return;
2022
2023 /* Do not propagate copies into simple IV increment statements.
2024 See PR23821 for how this can disturb IV analysis. */
2025 if (TREE_CODE (val) != INTEGER_CST
2026 && simple_iv_increment_p (stmt))
2027 return;
2028
2029 /* Dump details. */
2030 if (dump_file && (dump_flags & TDF_DETAILS))
2031 {
2032 fprintf (dump_file, " Replaced '");
2033 print_generic_expr (dump_file, op, dump_flags);
2034 fprintf (dump_file, "' with %s '",
2035 (TREE_CODE (val) != SSA_NAME ? "constant" : "variable"));
2036 print_generic_expr (dump_file, val, dump_flags);
2037 fprintf (dump_file, "'\n");
2038 }
2039
2040 if (TREE_CODE (val) != SSA_NAME)
2041 opt_stats.num_const_prop++;
2042 else
2043 opt_stats.num_copy_prop++;
2044
2045 propagate_value (op_p, val);
2046
2047 /* And note that we modified this statement. This is now
2048 safe, even if we changed virtual operands since we will
2049 rescan the statement and rewrite its operands again. */
2050 gimple_set_modified (stmt, true);
2051 }
2052 }
2053
2054 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
2055 known value for that SSA_NAME (or NULL if no value is known).
2056
2057 Propagate values from CONST_AND_COPIES into the uses, vuses and
2058 vdef_ops of STMT. */
2059
2060 static void
2061 cprop_into_stmt (gimple stmt)
2062 {
2063 use_operand_p op_p;
2064 ssa_op_iter iter;
2065
2066 FOR_EACH_SSA_USE_OPERAND (op_p, stmt, iter, SSA_OP_ALL_USES)
2067 {
2068 if (TREE_CODE (USE_FROM_PTR (op_p)) == SSA_NAME)
2069 cprop_operand (stmt, op_p);
2070 }
2071 }
2072
2073 /* Optimize the statement pointed to by iterator SI.
2074
2075 We try to perform some simplistic global redundancy elimination and
2076 constant propagation:
2077
2078 1- To detect global redundancy, we keep track of expressions that have
2079 been computed in this block and its dominators. If we find that the
2080 same expression is computed more than once, we eliminate repeated
2081 computations by using the target of the first one.
2082
2083 2- Constant values and copy assignments. This is used to do very
2084 simplistic constant and copy propagation. When a constant or copy
2085 assignment is found, we map the value on the RHS of the assignment to
2086 the variable in the LHS in the CONST_AND_COPIES table. */
2087
2088 static void
2089 optimize_stmt (basic_block bb, gimple_stmt_iterator si)
2090 {
2091 gimple stmt, old_stmt;
2092 bool may_optimize_p;
2093 bool modified_p = false;
2094
2095 old_stmt = stmt = gsi_stmt (si);
2096
2097 if (gimple_code (stmt) == GIMPLE_COND)
2098 canonicalize_comparison (stmt);
2099
2100 update_stmt_if_modified (stmt);
2101 opt_stats.num_stmts++;
2102
2103 if (dump_file && (dump_flags & TDF_DETAILS))
2104 {
2105 fprintf (dump_file, "Optimizing statement ");
2106 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2107 }
2108
2109 /* Const/copy propagate into USES, VUSES and the RHS of VDEFs. */
2110 cprop_into_stmt (stmt);
2111
2112 /* If the statement has been modified with constant replacements,
2113 fold its RHS before checking for redundant computations. */
2114 if (gimple_modified_p (stmt))
2115 {
2116 tree rhs = NULL;
2117
2118 /* Try to fold the statement making sure that STMT is kept
2119 up to date. */
2120 if (fold_stmt (&si))
2121 {
2122 stmt = gsi_stmt (si);
2123 gimple_set_modified (stmt, true);
2124
2125 if (dump_file && (dump_flags & TDF_DETAILS))
2126 {
2127 fprintf (dump_file, " Folded to: ");
2128 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2129 }
2130 }
2131
2132 /* We only need to consider cases that can yield a gimple operand. */
2133 if (gimple_assign_single_p (stmt))
2134 rhs = gimple_assign_rhs1 (stmt);
2135 else if (gimple_code (stmt) == GIMPLE_GOTO)
2136 rhs = gimple_goto_dest (stmt);
2137 else if (gimple_code (stmt) == GIMPLE_SWITCH)
2138 /* This should never be an ADDR_EXPR. */
2139 rhs = gimple_switch_index (stmt);
2140
2141 if (rhs && TREE_CODE (rhs) == ADDR_EXPR)
2142 recompute_tree_invariant_for_addr_expr (rhs);
2143
2144 /* Indicate that maybe_clean_or_replace_eh_stmt needs to be called,
2145 even if fold_stmt updated the stmt already and thus cleared
2146 gimple_modified_p flag on it. */
2147 modified_p = true;
2148 }
2149
2150 /* Check for redundant computations. Do this optimization only
2151 for assignments that have no volatile ops and conditionals. */
2152 may_optimize_p = (!gimple_has_volatile_ops (stmt)
2153 && ((is_gimple_assign (stmt)
2154 && !gimple_rhs_has_side_effects (stmt))
2155 || (is_gimple_call (stmt)
2156 && gimple_call_lhs (stmt) != NULL_TREE
2157 && !gimple_rhs_has_side_effects (stmt))
2158 || gimple_code (stmt) == GIMPLE_COND
2159 || gimple_code (stmt) == GIMPLE_SWITCH));
2160
2161 if (may_optimize_p)
2162 {
2163 if (gimple_code (stmt) == GIMPLE_CALL)
2164 {
2165 /* Resolve __builtin_constant_p. If it hasn't been
2166 folded to integer_one_node by now, it's fairly
2167 certain that the value simply isn't constant. */
2168 tree callee = gimple_call_fndecl (stmt);
2169 if (callee
2170 && DECL_BUILT_IN_CLASS (callee) == BUILT_IN_NORMAL
2171 && DECL_FUNCTION_CODE (callee) == BUILT_IN_CONSTANT_P)
2172 {
2173 propagate_tree_value_into_stmt (&si, integer_zero_node);
2174 stmt = gsi_stmt (si);
2175 }
2176 }
2177
2178 update_stmt_if_modified (stmt);
2179 eliminate_redundant_computations (&si);
2180 stmt = gsi_stmt (si);
2181 }
2182
2183 /* Record any additional equivalences created by this statement. */
2184 if (is_gimple_assign (stmt))
2185 record_equivalences_from_stmt (stmt, may_optimize_p);
2186
2187 /* If STMT is a COND_EXPR and it was modified, then we may know
2188 where it goes. If that is the case, then mark the CFG as altered.
2189
2190 This will cause us to later call remove_unreachable_blocks and
2191 cleanup_tree_cfg when it is safe to do so. It is not safe to
2192 clean things up here since removal of edges and such can trigger
2193 the removal of PHI nodes, which in turn can release SSA_NAMEs to
2194 the manager.
2195
2196 That's all fine and good, except that once SSA_NAMEs are released
2197 to the manager, we must not call create_ssa_name until all references
2198 to released SSA_NAMEs have been eliminated.
2199
2200 All references to the deleted SSA_NAMEs can not be eliminated until
2201 we remove unreachable blocks.
2202
2203 We can not remove unreachable blocks until after we have completed
2204 any queued jump threading.
2205
2206 We can not complete any queued jump threads until we have taken
2207 appropriate variables out of SSA form. Taking variables out of
2208 SSA form can call create_ssa_name and thus we lose.
2209
2210 Ultimately I suspect we're going to need to change the interface
2211 into the SSA_NAME manager. */
2212 if (gimple_modified_p (stmt) || modified_p)
2213 {
2214 tree val = NULL;
2215
2216 update_stmt_if_modified (stmt);
2217
2218 if (gimple_code (stmt) == GIMPLE_COND)
2219 val = fold_binary_loc (gimple_location (stmt),
2220 gimple_cond_code (stmt), boolean_type_node,
2221 gimple_cond_lhs (stmt), gimple_cond_rhs (stmt));
2222 else if (gimple_code (stmt) == GIMPLE_SWITCH)
2223 val = gimple_switch_index (stmt);
2224
2225 if (val && TREE_CODE (val) == INTEGER_CST && find_taken_edge (bb, val))
2226 cfg_altered = true;
2227
2228 /* If we simplified a statement in such a way as to be shown that it
2229 cannot trap, update the eh information and the cfg to match. */
2230 if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt))
2231 {
2232 bitmap_set_bit (need_eh_cleanup, bb->index);
2233 if (dump_file && (dump_flags & TDF_DETAILS))
2234 fprintf (dump_file, " Flagged to clear EH edges.\n");
2235 }
2236 }
2237 }
2238
2239 /* Search for an existing instance of STMT in the AVAIL_EXPRS table.
2240 If found, return its LHS. Otherwise insert STMT in the table and
2241 return NULL_TREE.
2242
2243 Also, when an expression is first inserted in the table, it is also
2244 is also added to AVAIL_EXPRS_STACK, so that it can be removed when
2245 we finish processing this block and its children. */
2246
2247 static tree
2248 lookup_avail_expr (gimple stmt, bool insert)
2249 {
2250 void **slot;
2251 tree lhs;
2252 tree temp;
2253 struct expr_hash_elt element;
2254
2255 /* Get LHS of assignment or call, else NULL_TREE. */
2256 lhs = gimple_get_lhs (stmt);
2257
2258 initialize_hash_element (stmt, lhs, &element);
2259
2260 if (dump_file && (dump_flags & TDF_DETAILS))
2261 {
2262 fprintf (dump_file, "LKUP ");
2263 print_expr_hash_elt (dump_file, &element);
2264 }
2265
2266 /* Don't bother remembering constant assignments and copy operations.
2267 Constants and copy operations are handled by the constant/copy propagator
2268 in optimize_stmt. */
2269 if (element.expr.kind == EXPR_SINGLE
2270 && (TREE_CODE (element.expr.ops.single.rhs) == SSA_NAME
2271 || is_gimple_min_invariant (element.expr.ops.single.rhs)))
2272 return NULL_TREE;
2273
2274 /* Finally try to find the expression in the main expression hash table. */
2275 slot = htab_find_slot_with_hash (avail_exprs, &element, element.hash,
2276 (insert ? INSERT : NO_INSERT));
2277 if (slot == NULL)
2278 return NULL_TREE;
2279
2280 if (*slot == NULL)
2281 {
2282 struct expr_hash_elt *element2 = XNEW (struct expr_hash_elt);
2283 *element2 = element;
2284 element2->stamp = element2;
2285 *slot = (void *) element2;
2286
2287 if (dump_file && (dump_flags & TDF_DETAILS))
2288 {
2289 fprintf (dump_file, "2>>> ");
2290 print_expr_hash_elt (dump_file, element2);
2291 }
2292
2293 VEC_safe_push (expr_hash_elt_t, heap, avail_exprs_stack, element2);
2294 return NULL_TREE;
2295 }
2296
2297 /* Extract the LHS of the assignment so that it can be used as the current
2298 definition of another variable. */
2299 lhs = ((struct expr_hash_elt *)*slot)->lhs;
2300
2301 /* See if the LHS appears in the CONST_AND_COPIES table. If it does, then
2302 use the value from the const_and_copies table. */
2303 if (TREE_CODE (lhs) == SSA_NAME)
2304 {
2305 temp = SSA_NAME_VALUE (lhs);
2306 if (temp)
2307 lhs = temp;
2308 }
2309
2310 if (dump_file && (dump_flags & TDF_DETAILS))
2311 {
2312 fprintf (dump_file, "FIND: ");
2313 print_generic_expr (dump_file, lhs, 0);
2314 fprintf (dump_file, "\n");
2315 }
2316
2317 return lhs;
2318 }
2319
2320 /* Hashing and equality functions for AVAIL_EXPRS. We compute a value number
2321 for expressions using the code of the expression and the SSA numbers of
2322 its operands. */
2323
2324 static hashval_t
2325 avail_expr_hash (const void *p)
2326 {
2327 gimple stmt = ((const struct expr_hash_elt *)p)->stmt;
2328 const struct hashable_expr *expr = &((const struct expr_hash_elt *)p)->expr;
2329 tree vuse;
2330 hashval_t val = 0;
2331
2332 val = iterative_hash_hashable_expr (expr, val);
2333
2334 /* If the hash table entry is not associated with a statement, then we
2335 can just hash the expression and not worry about virtual operands
2336 and such. */
2337 if (!stmt)
2338 return val;
2339
2340 /* Add the SSA version numbers of the vuse operand. This is important
2341 because compound variables like arrays are not renamed in the
2342 operands. Rather, the rename is done on the virtual variable
2343 representing all the elements of the array. */
2344 if ((vuse = gimple_vuse (stmt)))
2345 val = iterative_hash_expr (vuse, val);
2346
2347 return val;
2348 }
2349
2350 static hashval_t
2351 real_avail_expr_hash (const void *p)
2352 {
2353 return ((const struct expr_hash_elt *)p)->hash;
2354 }
2355
2356 static int
2357 avail_expr_eq (const void *p1, const void *p2)
2358 {
2359 gimple stmt1 = ((const struct expr_hash_elt *)p1)->stmt;
2360 const struct hashable_expr *expr1 = &((const struct expr_hash_elt *)p1)->expr;
2361 const struct expr_hash_elt *stamp1 = ((const struct expr_hash_elt *)p1)->stamp;
2362 gimple stmt2 = ((const struct expr_hash_elt *)p2)->stmt;
2363 const struct hashable_expr *expr2 = &((const struct expr_hash_elt *)p2)->expr;
2364 const struct expr_hash_elt *stamp2 = ((const struct expr_hash_elt *)p2)->stamp;
2365
2366 /* This case should apply only when removing entries from the table. */
2367 if (stamp1 == stamp2)
2368 return true;
2369
2370 /* FIXME tuples:
2371 We add stmts to a hash table and them modify them. To detect the case
2372 that we modify a stmt and then search for it, we assume that the hash
2373 is always modified by that change.
2374 We have to fully check why this doesn't happen on trunk or rewrite
2375 this in a more reliable (and easier to understand) way. */
2376 if (((const struct expr_hash_elt *)p1)->hash
2377 != ((const struct expr_hash_elt *)p2)->hash)
2378 return false;
2379
2380 /* In case of a collision, both RHS have to be identical and have the
2381 same VUSE operands. */
2382 if (hashable_expr_equal_p (expr1, expr2)
2383 && types_compatible_p (expr1->type, expr2->type))
2384 {
2385 /* Note that STMT1 and/or STMT2 may be NULL. */
2386 return ((stmt1 ? gimple_vuse (stmt1) : NULL_TREE)
2387 == (stmt2 ? gimple_vuse (stmt2) : NULL_TREE));
2388 }
2389
2390 return false;
2391 }
2392
2393 /* PHI-ONLY copy and constant propagation. This pass is meant to clean
2394 up degenerate PHIs created by or exposed by jump threading. */
2395
2396 /* Given PHI, return its RHS if the PHI is a degenerate, otherwise return
2397 NULL. */
2398
2399 tree
2400 degenerate_phi_result (gimple phi)
2401 {
2402 tree lhs = gimple_phi_result (phi);
2403 tree val = NULL;
2404 size_t i;
2405
2406 /* Ignoring arguments which are the same as LHS, if all the remaining
2407 arguments are the same, then the PHI is a degenerate and has the
2408 value of that common argument. */
2409 for (i = 0; i < gimple_phi_num_args (phi); i++)
2410 {
2411 tree arg = gimple_phi_arg_def (phi, i);
2412
2413 if (arg == lhs)
2414 continue;
2415 else if (!arg)
2416 break;
2417 else if (!val)
2418 val = arg;
2419 else if (arg == val)
2420 continue;
2421 /* We bring in some of operand_equal_p not only to speed things
2422 up, but also to avoid crashing when dereferencing the type of
2423 a released SSA name. */
2424 else if (TREE_CODE (val) != TREE_CODE (arg)
2425 || TREE_CODE (val) == SSA_NAME
2426 || !operand_equal_p (arg, val, 0))
2427 break;
2428 }
2429 return (i == gimple_phi_num_args (phi) ? val : NULL);
2430 }
2431
2432 /* Given a statement STMT, which is either a PHI node or an assignment,
2433 remove it from the IL. */
2434
2435 static void
2436 remove_stmt_or_phi (gimple stmt)
2437 {
2438 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
2439
2440 if (gimple_code (stmt) == GIMPLE_PHI)
2441 remove_phi_node (&gsi, true);
2442 else
2443 {
2444 gsi_remove (&gsi, true);
2445 release_defs (stmt);
2446 }
2447 }
2448
2449 /* Given a statement STMT, which is either a PHI node or an assignment,
2450 return the "rhs" of the node, in the case of a non-degenerate
2451 phi, NULL is returned. */
2452
2453 static tree
2454 get_rhs_or_phi_arg (gimple stmt)
2455 {
2456 if (gimple_code (stmt) == GIMPLE_PHI)
2457 return degenerate_phi_result (stmt);
2458 else if (gimple_assign_single_p (stmt))
2459 return gimple_assign_rhs1 (stmt);
2460 else
2461 gcc_unreachable ();
2462 }
2463
2464
2465 /* Given a statement STMT, which is either a PHI node or an assignment,
2466 return the "lhs" of the node. */
2467
2468 static tree
2469 get_lhs_or_phi_result (gimple stmt)
2470 {
2471 if (gimple_code (stmt) == GIMPLE_PHI)
2472 return gimple_phi_result (stmt);
2473 else if (is_gimple_assign (stmt))
2474 return gimple_assign_lhs (stmt);
2475 else
2476 gcc_unreachable ();
2477 }
2478
2479 /* Propagate RHS into all uses of LHS (when possible).
2480
2481 RHS and LHS are derived from STMT, which is passed in solely so
2482 that we can remove it if propagation is successful.
2483
2484 When propagating into a PHI node or into a statement which turns
2485 into a trivial copy or constant initialization, set the
2486 appropriate bit in INTERESTING_NAMEs so that we will visit those
2487 nodes as well in an effort to pick up secondary optimization
2488 opportunities. */
2489
2490 static void
2491 propagate_rhs_into_lhs (gimple stmt, tree lhs, tree rhs, bitmap interesting_names)
2492 {
2493 /* First verify that propagation is valid and isn't going to move a
2494 loop variant variable outside its loop. */
2495 if (! SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs)
2496 && (TREE_CODE (rhs) != SSA_NAME
2497 || ! SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs))
2498 && may_propagate_copy (lhs, rhs)
2499 && loop_depth_of_name (lhs) >= loop_depth_of_name (rhs))
2500 {
2501 use_operand_p use_p;
2502 imm_use_iterator iter;
2503 gimple use_stmt;
2504 bool all = true;
2505
2506 /* Dump details. */
2507 if (dump_file && (dump_flags & TDF_DETAILS))
2508 {
2509 fprintf (dump_file, " Replacing '");
2510 print_generic_expr (dump_file, lhs, dump_flags);
2511 fprintf (dump_file, "' with %s '",
2512 (TREE_CODE (rhs) != SSA_NAME ? "constant" : "variable"));
2513 print_generic_expr (dump_file, rhs, dump_flags);
2514 fprintf (dump_file, "'\n");
2515 }
2516
2517 /* Walk over every use of LHS and try to replace the use with RHS.
2518 At this point the only reason why such a propagation would not
2519 be successful would be if the use occurs in an ASM_EXPR. */
2520 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
2521 {
2522 /* Leave debug stmts alone. If we succeed in propagating
2523 all non-debug uses, we'll drop the DEF, and propagation
2524 into debug stmts will occur then. */
2525 if (gimple_debug_bind_p (use_stmt))
2526 continue;
2527
2528 /* It's not always safe to propagate into an ASM_EXPR. */
2529 if (gimple_code (use_stmt) == GIMPLE_ASM
2530 && ! may_propagate_copy_into_asm (lhs))
2531 {
2532 all = false;
2533 continue;
2534 }
2535
2536 /* It's not ok to propagate into the definition stmt of RHS.
2537 <bb 9>:
2538 # prephitmp.12_36 = PHI <g_67.1_6(9)>
2539 g_67.1_6 = prephitmp.12_36;
2540 goto <bb 9>;
2541 While this is strictly all dead code we do not want to
2542 deal with this here. */
2543 if (TREE_CODE (rhs) == SSA_NAME
2544 && SSA_NAME_DEF_STMT (rhs) == use_stmt)
2545 {
2546 all = false;
2547 continue;
2548 }
2549
2550 /* Dump details. */
2551 if (dump_file && (dump_flags & TDF_DETAILS))
2552 {
2553 fprintf (dump_file, " Original statement:");
2554 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2555 }
2556
2557 /* Propagate the RHS into this use of the LHS. */
2558 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2559 propagate_value (use_p, rhs);
2560
2561 /* Special cases to avoid useless calls into the folding
2562 routines, operand scanning, etc.
2563
2564 First, propagation into a PHI may cause the PHI to become
2565 a degenerate, so mark the PHI as interesting. No other
2566 actions are necessary.
2567
2568 Second, if we're propagating a virtual operand and the
2569 propagation does not change the underlying _DECL node for
2570 the virtual operand, then no further actions are necessary. */
2571 if (gimple_code (use_stmt) == GIMPLE_PHI
2572 || (! is_gimple_reg (lhs)
2573 && TREE_CODE (rhs) == SSA_NAME
2574 && SSA_NAME_VAR (lhs) == SSA_NAME_VAR (rhs)))
2575 {
2576 /* Dump details. */
2577 if (dump_file && (dump_flags & TDF_DETAILS))
2578 {
2579 fprintf (dump_file, " Updated statement:");
2580 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2581 }
2582
2583 /* Propagation into a PHI may expose new degenerate PHIs,
2584 so mark the result of the PHI as interesting. */
2585 if (gimple_code (use_stmt) == GIMPLE_PHI)
2586 {
2587 tree result = get_lhs_or_phi_result (use_stmt);
2588 bitmap_set_bit (interesting_names, SSA_NAME_VERSION (result));
2589 }
2590
2591 continue;
2592 }
2593
2594 /* From this point onward we are propagating into a
2595 real statement. Folding may (or may not) be possible,
2596 we may expose new operands, expose dead EH edges,
2597 etc. */
2598 /* NOTE tuples. In the tuples world, fold_stmt_inplace
2599 cannot fold a call that simplifies to a constant,
2600 because the GIMPLE_CALL must be replaced by a
2601 GIMPLE_ASSIGN, and there is no way to effect such a
2602 transformation in-place. We might want to consider
2603 using the more general fold_stmt here. */
2604 fold_stmt_inplace (use_stmt);
2605
2606 /* Sometimes propagation can expose new operands to the
2607 renamer. */
2608 update_stmt (use_stmt);
2609
2610 /* Dump details. */
2611 if (dump_file && (dump_flags & TDF_DETAILS))
2612 {
2613 fprintf (dump_file, " Updated statement:");
2614 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2615 }
2616
2617 /* If we replaced a variable index with a constant, then
2618 we would need to update the invariant flag for ADDR_EXPRs. */
2619 if (gimple_assign_single_p (use_stmt)
2620 && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ADDR_EXPR)
2621 recompute_tree_invariant_for_addr_expr
2622 (gimple_assign_rhs1 (use_stmt));
2623
2624 /* If we cleaned up EH information from the statement,
2625 mark its containing block as needing EH cleanups. */
2626 if (maybe_clean_or_replace_eh_stmt (use_stmt, use_stmt))
2627 {
2628 bitmap_set_bit (need_eh_cleanup, gimple_bb (use_stmt)->index);
2629 if (dump_file && (dump_flags & TDF_DETAILS))
2630 fprintf (dump_file, " Flagged to clear EH edges.\n");
2631 }
2632
2633 /* Propagation may expose new trivial copy/constant propagation
2634 opportunities. */
2635 if (gimple_assign_single_p (use_stmt)
2636 && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME
2637 && (TREE_CODE (gimple_assign_rhs1 (use_stmt)) == SSA_NAME
2638 || is_gimple_min_invariant (gimple_assign_rhs1 (use_stmt))))
2639 {
2640 tree result = get_lhs_or_phi_result (use_stmt);
2641 bitmap_set_bit (interesting_names, SSA_NAME_VERSION (result));
2642 }
2643
2644 /* Propagation into these nodes may make certain edges in
2645 the CFG unexecutable. We want to identify them as PHI nodes
2646 at the destination of those unexecutable edges may become
2647 degenerates. */
2648 else if (gimple_code (use_stmt) == GIMPLE_COND
2649 || gimple_code (use_stmt) == GIMPLE_SWITCH
2650 || gimple_code (use_stmt) == GIMPLE_GOTO)
2651 {
2652 tree val;
2653
2654 if (gimple_code (use_stmt) == GIMPLE_COND)
2655 val = fold_binary_loc (gimple_location (use_stmt),
2656 gimple_cond_code (use_stmt),
2657 boolean_type_node,
2658 gimple_cond_lhs (use_stmt),
2659 gimple_cond_rhs (use_stmt));
2660 else if (gimple_code (use_stmt) == GIMPLE_SWITCH)
2661 val = gimple_switch_index (use_stmt);
2662 else
2663 val = gimple_goto_dest (use_stmt);
2664
2665 if (val && is_gimple_min_invariant (val))
2666 {
2667 basic_block bb = gimple_bb (use_stmt);
2668 edge te = find_taken_edge (bb, val);
2669 edge_iterator ei;
2670 edge e;
2671 gimple_stmt_iterator gsi, psi;
2672
2673 /* Remove all outgoing edges except TE. */
2674 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei));)
2675 {
2676 if (e != te)
2677 {
2678 /* Mark all the PHI nodes at the destination of
2679 the unexecutable edge as interesting. */
2680 for (psi = gsi_start_phis (e->dest);
2681 !gsi_end_p (psi);
2682 gsi_next (&psi))
2683 {
2684 gimple phi = gsi_stmt (psi);
2685
2686 tree result = gimple_phi_result (phi);
2687 int version = SSA_NAME_VERSION (result);
2688
2689 bitmap_set_bit (interesting_names, version);
2690 }
2691
2692 te->probability += e->probability;
2693
2694 te->count += e->count;
2695 remove_edge (e);
2696 cfg_altered = true;
2697 }
2698 else
2699 ei_next (&ei);
2700 }
2701
2702 gsi = gsi_last_bb (gimple_bb (use_stmt));
2703 gsi_remove (&gsi, true);
2704
2705 /* And fixup the flags on the single remaining edge. */
2706 te->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
2707 te->flags &= ~EDGE_ABNORMAL;
2708 te->flags |= EDGE_FALLTHRU;
2709 if (te->probability > REG_BR_PROB_BASE)
2710 te->probability = REG_BR_PROB_BASE;
2711 }
2712 }
2713 }
2714
2715 /* Ensure there is nothing else to do. */
2716 gcc_assert (!all || has_zero_uses (lhs));
2717
2718 /* If we were able to propagate away all uses of LHS, then
2719 we can remove STMT. */
2720 if (all)
2721 remove_stmt_or_phi (stmt);
2722 }
2723 }
2724
2725 /* STMT is either a PHI node (potentially a degenerate PHI node) or
2726 a statement that is a trivial copy or constant initialization.
2727
2728 Attempt to eliminate T by propagating its RHS into all uses of
2729 its LHS. This may in turn set new bits in INTERESTING_NAMES
2730 for nodes we want to revisit later.
2731
2732 All exit paths should clear INTERESTING_NAMES for the result
2733 of STMT. */
2734
2735 static void
2736 eliminate_const_or_copy (gimple stmt, bitmap interesting_names)
2737 {
2738 tree lhs = get_lhs_or_phi_result (stmt);
2739 tree rhs;
2740 int version = SSA_NAME_VERSION (lhs);
2741
2742 /* If the LHS of this statement or PHI has no uses, then we can
2743 just eliminate it. This can occur if, for example, the PHI
2744 was created by block duplication due to threading and its only
2745 use was in the conditional at the end of the block which was
2746 deleted. */
2747 if (has_zero_uses (lhs))
2748 {
2749 bitmap_clear_bit (interesting_names, version);
2750 remove_stmt_or_phi (stmt);
2751 return;
2752 }
2753
2754 /* Get the RHS of the assignment or PHI node if the PHI is a
2755 degenerate. */
2756 rhs = get_rhs_or_phi_arg (stmt);
2757 if (!rhs)
2758 {
2759 bitmap_clear_bit (interesting_names, version);
2760 return;
2761 }
2762
2763 propagate_rhs_into_lhs (stmt, lhs, rhs, interesting_names);
2764
2765 /* Note that STMT may well have been deleted by now, so do
2766 not access it, instead use the saved version # to clear
2767 T's entry in the worklist. */
2768 bitmap_clear_bit (interesting_names, version);
2769 }
2770
2771 /* The first phase in degenerate PHI elimination.
2772
2773 Eliminate the degenerate PHIs in BB, then recurse on the
2774 dominator children of BB. */
2775
2776 static void
2777 eliminate_degenerate_phis_1 (basic_block bb, bitmap interesting_names)
2778 {
2779 gimple_stmt_iterator gsi;
2780 basic_block son;
2781
2782 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2783 {
2784 gimple phi = gsi_stmt (gsi);
2785
2786 eliminate_const_or_copy (phi, interesting_names);
2787 }
2788
2789 /* Recurse into the dominator children of BB. */
2790 for (son = first_dom_son (CDI_DOMINATORS, bb);
2791 son;
2792 son = next_dom_son (CDI_DOMINATORS, son))
2793 eliminate_degenerate_phis_1 (son, interesting_names);
2794 }
2795
2796
2797 /* A very simple pass to eliminate degenerate PHI nodes from the
2798 IL. This is meant to be fast enough to be able to be run several
2799 times in the optimization pipeline.
2800
2801 Certain optimizations, particularly those which duplicate blocks
2802 or remove edges from the CFG can create or expose PHIs which are
2803 trivial copies or constant initializations.
2804
2805 While we could pick up these optimizations in DOM or with the
2806 combination of copy-prop and CCP, those solutions are far too
2807 heavy-weight for our needs.
2808
2809 This implementation has two phases so that we can efficiently
2810 eliminate the first order degenerate PHIs and second order
2811 degenerate PHIs.
2812
2813 The first phase performs a dominator walk to identify and eliminate
2814 the vast majority of the degenerate PHIs. When a degenerate PHI
2815 is identified and eliminated any affected statements or PHIs
2816 are put on a worklist.
2817
2818 The second phase eliminates degenerate PHIs and trivial copies
2819 or constant initializations using the worklist. This is how we
2820 pick up the secondary optimization opportunities with minimal
2821 cost. */
2822
2823 static unsigned int
2824 eliminate_degenerate_phis (void)
2825 {
2826 bitmap interesting_names;
2827 bitmap interesting_names1;
2828
2829 /* Bitmap of blocks which need EH information updated. We can not
2830 update it on-the-fly as doing so invalidates the dominator tree. */
2831 need_eh_cleanup = BITMAP_ALLOC (NULL);
2832
2833 /* INTERESTING_NAMES is effectively our worklist, indexed by
2834 SSA_NAME_VERSION.
2835
2836 A set bit indicates that the statement or PHI node which
2837 defines the SSA_NAME should be (re)examined to determine if
2838 it has become a degenerate PHI or trivial const/copy propagation
2839 opportunity.
2840
2841 Experiments have show we generally get better compilation
2842 time behavior with bitmaps rather than sbitmaps. */
2843 interesting_names = BITMAP_ALLOC (NULL);
2844 interesting_names1 = BITMAP_ALLOC (NULL);
2845
2846 calculate_dominance_info (CDI_DOMINATORS);
2847 cfg_altered = false;
2848
2849 /* First phase. Eliminate degenerate PHIs via a dominator
2850 walk of the CFG.
2851
2852 Experiments have indicated that we generally get better
2853 compile-time behavior by visiting blocks in the first
2854 phase in dominator order. Presumably this is because walking
2855 in dominator order leaves fewer PHIs for later examination
2856 by the worklist phase. */
2857 eliminate_degenerate_phis_1 (ENTRY_BLOCK_PTR, interesting_names);
2858
2859 /* Second phase. Eliminate second order degenerate PHIs as well
2860 as trivial copies or constant initializations identified by
2861 the first phase or this phase. Basically we keep iterating
2862 until our set of INTERESTING_NAMEs is empty. */
2863 while (!bitmap_empty_p (interesting_names))
2864 {
2865 unsigned int i;
2866 bitmap_iterator bi;
2867
2868 /* EXECUTE_IF_SET_IN_BITMAP does not like its bitmap
2869 changed during the loop. Copy it to another bitmap and
2870 use that. */
2871 bitmap_copy (interesting_names1, interesting_names);
2872
2873 EXECUTE_IF_SET_IN_BITMAP (interesting_names1, 0, i, bi)
2874 {
2875 tree name = ssa_name (i);
2876
2877 /* Ignore SSA_NAMEs that have been released because
2878 their defining statement was deleted (unreachable). */
2879 if (name)
2880 eliminate_const_or_copy (SSA_NAME_DEF_STMT (ssa_name (i)),
2881 interesting_names);
2882 }
2883 }
2884
2885 if (cfg_altered)
2886 free_dominance_info (CDI_DOMINATORS);
2887
2888 /* Propagation of const and copies may make some EH edges dead. Purge
2889 such edges from the CFG as needed. */
2890 if (!bitmap_empty_p (need_eh_cleanup))
2891 {
2892 gimple_purge_all_dead_eh_edges (need_eh_cleanup);
2893 BITMAP_FREE (need_eh_cleanup);
2894 }
2895
2896 BITMAP_FREE (interesting_names);
2897 BITMAP_FREE (interesting_names1);
2898 return 0;
2899 }
2900
2901 struct gimple_opt_pass pass_phi_only_cprop =
2902 {
2903 {
2904 GIMPLE_PASS,
2905 "phicprop", /* name */
2906 gate_dominator, /* gate */
2907 eliminate_degenerate_phis, /* execute */
2908 NULL, /* sub */
2909 NULL, /* next */
2910 0, /* static_pass_number */
2911 TV_TREE_PHI_CPROP, /* tv_id */
2912 PROP_cfg | PROP_ssa, /* properties_required */
2913 0, /* properties_provided */
2914 0, /* properties_destroyed */
2915 0, /* todo_flags_start */
2916 TODO_cleanup_cfg
2917 | TODO_dump_func
2918 | TODO_ggc_collect
2919 | TODO_verify_ssa
2920 | TODO_verify_stmts
2921 | TODO_update_ssa /* todo_flags_finish */
2922 }
2923 };