b97125ae9a0f7c500940e293b739a0b431c58d9d
[gcc.git] / gcc / tree-ssa-dom.c
1 /* SSA Dominator optimizations for trees
2 Copyright (C) 2001-2015 Free Software Foundation, Inc.
3 Contributed by Diego Novillo <dnovillo@redhat.com>
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "cfghooks.h"
26 #include "tree.h"
27 #include "gimple.h"
28 #include "hard-reg-set.h"
29 #include "ssa.h"
30 #include "alias.h"
31 #include "fold-const.h"
32 #include "stor-layout.h"
33 #include "flags.h"
34 #include "tm_p.h"
35 #include "cfganal.h"
36 #include "cfgloop.h"
37 #include "gimple-pretty-print.h"
38 #include "internal-fn.h"
39 #include "gimple-fold.h"
40 #include "tree-eh.h"
41 #include "gimple-iterator.h"
42 #include "tree-cfg.h"
43 #include "tree-into-ssa.h"
44 #include "domwalk.h"
45 #include "tree-pass.h"
46 #include "tree-ssa-propagate.h"
47 #include "tree-ssa-threadupdate.h"
48 #include "langhooks.h"
49 #include "params.h"
50 #include "tree-ssa-scopedtables.h"
51 #include "tree-ssa-threadedge.h"
52 #include "tree-ssa-dom.h"
53 #include "gimplify.h"
54 #include "tree-cfgcleanup.h"
55
56 /* This file implements optimizations on the dominator tree. */
57
58 /* Structure for recording known values of a conditional expression
59 at the exits from its block. */
60
61 struct cond_equivalence
62 {
63 struct hashable_expr cond;
64 tree value;
65 };
66
67 /* Structure for recording edge equivalences.
68
69 Computing and storing the edge equivalences instead of creating
70 them on-demand can save significant amounts of time, particularly
71 for pathological cases involving switch statements.
72
73 These structures live for a single iteration of the dominator
74 optimizer in the edge's AUX field. At the end of an iteration we
75 free each of these structures. */
76
77 struct edge_info
78 {
79 /* If this edge creates a simple equivalence, the LHS and RHS of
80 the equivalence will be stored here. */
81 tree lhs;
82 tree rhs;
83
84 /* Traversing an edge may also indicate one or more particular conditions
85 are true or false. */
86 vec<cond_equivalence> cond_equivalences;
87 };
88
89 /* Hash table with expressions made available during the renaming process.
90 When an assignment of the form X_i = EXPR is found, the statement is
91 stored in this table. If the same expression EXPR is later found on the
92 RHS of another statement, it is replaced with X_i (thus performing
93 global redundancy elimination). Similarly as we pass through conditionals
94 we record the conditional itself as having either a true or false value
95 in this table. */
96 static hash_table<expr_elt_hasher> *avail_exprs;
97
98 /* Unwindable equivalences, both const/copy and expression varieties. */
99 static const_and_copies *const_and_copies;
100 static avail_exprs_stack *avail_exprs_stack;
101
102 /* Track whether or not we have changed the control flow graph. */
103 static bool cfg_altered;
104
105 /* Bitmap of blocks that have had EH statements cleaned. We should
106 remove their dead edges eventually. */
107 static bitmap need_eh_cleanup;
108 static vec<gimple> need_noreturn_fixup;
109
110 /* Statistics for dominator optimizations. */
111 struct opt_stats_d
112 {
113 long num_stmts;
114 long num_exprs_considered;
115 long num_re;
116 long num_const_prop;
117 long num_copy_prop;
118 };
119
120 static struct opt_stats_d opt_stats;
121
122 /* Local functions. */
123 static void optimize_stmt (basic_block, gimple_stmt_iterator);
124 static tree lookup_avail_expr (gimple, bool);
125 static void htab_statistics (FILE *,
126 const hash_table<expr_elt_hasher> &);
127 static void record_cond (cond_equivalence *);
128 static void record_equality (tree, tree);
129 static void record_equivalences_from_phis (basic_block);
130 static void record_equivalences_from_incoming_edge (basic_block);
131 static void eliminate_redundant_computations (gimple_stmt_iterator *);
132 static void record_equivalences_from_stmt (gimple, int);
133 static edge single_incoming_edge_ignoring_loop_edges (basic_block);
134
135 /* Allocate an EDGE_INFO for edge E and attach it to E.
136 Return the new EDGE_INFO structure. */
137
138 static struct edge_info *
139 allocate_edge_info (edge e)
140 {
141 struct edge_info *edge_info;
142
143 edge_info = XCNEW (struct edge_info);
144
145 e->aux = edge_info;
146 return edge_info;
147 }
148
149 /* Free all EDGE_INFO structures associated with edges in the CFG.
150 If a particular edge can be threaded, copy the redirection
151 target from the EDGE_INFO structure into the edge's AUX field
152 as required by code to update the CFG and SSA graph for
153 jump threading. */
154
155 static void
156 free_all_edge_infos (void)
157 {
158 basic_block bb;
159 edge_iterator ei;
160 edge e;
161
162 FOR_EACH_BB_FN (bb, cfun)
163 {
164 FOR_EACH_EDGE (e, ei, bb->preds)
165 {
166 struct edge_info *edge_info = (struct edge_info *) e->aux;
167
168 if (edge_info)
169 {
170 edge_info->cond_equivalences.release ();
171 free (edge_info);
172 e->aux = NULL;
173 }
174 }
175 }
176 }
177
178 /* Build a cond_equivalence record indicating that the comparison
179 CODE holds between operands OP0 and OP1 and push it to **P. */
180
181 static void
182 build_and_record_new_cond (enum tree_code code,
183 tree op0, tree op1,
184 vec<cond_equivalence> *p,
185 bool val = true)
186 {
187 cond_equivalence c;
188 struct hashable_expr *cond = &c.cond;
189
190 gcc_assert (TREE_CODE_CLASS (code) == tcc_comparison);
191
192 cond->type = boolean_type_node;
193 cond->kind = EXPR_BINARY;
194 cond->ops.binary.op = code;
195 cond->ops.binary.opnd0 = op0;
196 cond->ops.binary.opnd1 = op1;
197
198 c.value = val ? boolean_true_node : boolean_false_node;
199 p->safe_push (c);
200 }
201
202 /* Record that COND is true and INVERTED is false into the edge information
203 structure. Also record that any conditions dominated by COND are true
204 as well.
205
206 For example, if a < b is true, then a <= b must also be true. */
207
208 static void
209 record_conditions (struct edge_info *edge_info, tree cond, tree inverted)
210 {
211 tree op0, op1;
212 cond_equivalence c;
213
214 if (!COMPARISON_CLASS_P (cond))
215 return;
216
217 op0 = TREE_OPERAND (cond, 0);
218 op1 = TREE_OPERAND (cond, 1);
219
220 switch (TREE_CODE (cond))
221 {
222 case LT_EXPR:
223 case GT_EXPR:
224 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
225 {
226 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
227 &edge_info->cond_equivalences);
228 build_and_record_new_cond (LTGT_EXPR, op0, op1,
229 &edge_info->cond_equivalences);
230 }
231
232 build_and_record_new_cond ((TREE_CODE (cond) == LT_EXPR
233 ? LE_EXPR : GE_EXPR),
234 op0, op1, &edge_info->cond_equivalences);
235 build_and_record_new_cond (NE_EXPR, op0, op1,
236 &edge_info->cond_equivalences);
237 build_and_record_new_cond (EQ_EXPR, op0, op1,
238 &edge_info->cond_equivalences, false);
239 break;
240
241 case GE_EXPR:
242 case LE_EXPR:
243 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
244 {
245 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
246 &edge_info->cond_equivalences);
247 }
248 break;
249
250 case EQ_EXPR:
251 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
252 {
253 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
254 &edge_info->cond_equivalences);
255 }
256 build_and_record_new_cond (LE_EXPR, op0, op1,
257 &edge_info->cond_equivalences);
258 build_and_record_new_cond (GE_EXPR, op0, op1,
259 &edge_info->cond_equivalences);
260 break;
261
262 case UNORDERED_EXPR:
263 build_and_record_new_cond (NE_EXPR, op0, op1,
264 &edge_info->cond_equivalences);
265 build_and_record_new_cond (UNLE_EXPR, op0, op1,
266 &edge_info->cond_equivalences);
267 build_and_record_new_cond (UNGE_EXPR, op0, op1,
268 &edge_info->cond_equivalences);
269 build_and_record_new_cond (UNEQ_EXPR, op0, op1,
270 &edge_info->cond_equivalences);
271 build_and_record_new_cond (UNLT_EXPR, op0, op1,
272 &edge_info->cond_equivalences);
273 build_and_record_new_cond (UNGT_EXPR, op0, op1,
274 &edge_info->cond_equivalences);
275 break;
276
277 case UNLT_EXPR:
278 case UNGT_EXPR:
279 build_and_record_new_cond ((TREE_CODE (cond) == UNLT_EXPR
280 ? UNLE_EXPR : UNGE_EXPR),
281 op0, op1, &edge_info->cond_equivalences);
282 build_and_record_new_cond (NE_EXPR, op0, op1,
283 &edge_info->cond_equivalences);
284 break;
285
286 case UNEQ_EXPR:
287 build_and_record_new_cond (UNLE_EXPR, op0, op1,
288 &edge_info->cond_equivalences);
289 build_and_record_new_cond (UNGE_EXPR, op0, op1,
290 &edge_info->cond_equivalences);
291 break;
292
293 case LTGT_EXPR:
294 build_and_record_new_cond (NE_EXPR, op0, op1,
295 &edge_info->cond_equivalences);
296 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
297 &edge_info->cond_equivalences);
298 break;
299
300 default:
301 break;
302 }
303
304 /* Now store the original true and false conditions into the first
305 two slots. */
306 initialize_expr_from_cond (cond, &c.cond);
307 c.value = boolean_true_node;
308 edge_info->cond_equivalences.safe_push (c);
309
310 /* It is possible for INVERTED to be the negation of a comparison,
311 and not a valid RHS or GIMPLE_COND condition. This happens because
312 invert_truthvalue may return such an expression when asked to invert
313 a floating-point comparison. These comparisons are not assumed to
314 obey the trichotomy law. */
315 initialize_expr_from_cond (inverted, &c.cond);
316 c.value = boolean_false_node;
317 edge_info->cond_equivalences.safe_push (c);
318 }
319
320 /* We have finished optimizing BB, record any information implied by
321 taking a specific outgoing edge from BB. */
322
323 static void
324 record_edge_info (basic_block bb)
325 {
326 gimple_stmt_iterator gsi = gsi_last_bb (bb);
327 struct edge_info *edge_info;
328
329 if (! gsi_end_p (gsi))
330 {
331 gimple stmt = gsi_stmt (gsi);
332 location_t loc = gimple_location (stmt);
333
334 if (gimple_code (stmt) == GIMPLE_SWITCH)
335 {
336 gswitch *switch_stmt = as_a <gswitch *> (stmt);
337 tree index = gimple_switch_index (switch_stmt);
338
339 if (TREE_CODE (index) == SSA_NAME)
340 {
341 int i;
342 int n_labels = gimple_switch_num_labels (switch_stmt);
343 tree *info = XCNEWVEC (tree, last_basic_block_for_fn (cfun));
344 edge e;
345 edge_iterator ei;
346
347 for (i = 0; i < n_labels; i++)
348 {
349 tree label = gimple_switch_label (switch_stmt, i);
350 basic_block target_bb = label_to_block (CASE_LABEL (label));
351 if (CASE_HIGH (label)
352 || !CASE_LOW (label)
353 || info[target_bb->index])
354 info[target_bb->index] = error_mark_node;
355 else
356 info[target_bb->index] = label;
357 }
358
359 FOR_EACH_EDGE (e, ei, bb->succs)
360 {
361 basic_block target_bb = e->dest;
362 tree label = info[target_bb->index];
363
364 if (label != NULL && label != error_mark_node)
365 {
366 tree x = fold_convert_loc (loc, TREE_TYPE (index),
367 CASE_LOW (label));
368 edge_info = allocate_edge_info (e);
369 edge_info->lhs = index;
370 edge_info->rhs = x;
371 }
372 }
373 free (info);
374 }
375 }
376
377 /* A COND_EXPR may create equivalences too. */
378 if (gimple_code (stmt) == GIMPLE_COND)
379 {
380 edge true_edge;
381 edge false_edge;
382
383 tree op0 = gimple_cond_lhs (stmt);
384 tree op1 = gimple_cond_rhs (stmt);
385 enum tree_code code = gimple_cond_code (stmt);
386
387 extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
388
389 /* Special case comparing booleans against a constant as we
390 know the value of OP0 on both arms of the branch. i.e., we
391 can record an equivalence for OP0 rather than COND. */
392 if ((code == EQ_EXPR || code == NE_EXPR)
393 && TREE_CODE (op0) == SSA_NAME
394 && TREE_CODE (TREE_TYPE (op0)) == BOOLEAN_TYPE
395 && is_gimple_min_invariant (op1))
396 {
397 if (code == EQ_EXPR)
398 {
399 edge_info = allocate_edge_info (true_edge);
400 edge_info->lhs = op0;
401 edge_info->rhs = (integer_zerop (op1)
402 ? boolean_false_node
403 : boolean_true_node);
404
405 edge_info = allocate_edge_info (false_edge);
406 edge_info->lhs = op0;
407 edge_info->rhs = (integer_zerop (op1)
408 ? boolean_true_node
409 : boolean_false_node);
410 }
411 else
412 {
413 edge_info = allocate_edge_info (true_edge);
414 edge_info->lhs = op0;
415 edge_info->rhs = (integer_zerop (op1)
416 ? boolean_true_node
417 : boolean_false_node);
418
419 edge_info = allocate_edge_info (false_edge);
420 edge_info->lhs = op0;
421 edge_info->rhs = (integer_zerop (op1)
422 ? boolean_false_node
423 : boolean_true_node);
424 }
425 }
426 else if (is_gimple_min_invariant (op0)
427 && (TREE_CODE (op1) == SSA_NAME
428 || is_gimple_min_invariant (op1)))
429 {
430 tree cond = build2 (code, boolean_type_node, op0, op1);
431 tree inverted = invert_truthvalue_loc (loc, cond);
432 bool can_infer_simple_equiv
433 = !(HONOR_SIGNED_ZEROS (op0)
434 && real_zerop (op0));
435 struct edge_info *edge_info;
436
437 edge_info = allocate_edge_info (true_edge);
438 record_conditions (edge_info, cond, inverted);
439
440 if (can_infer_simple_equiv && code == EQ_EXPR)
441 {
442 edge_info->lhs = op1;
443 edge_info->rhs = op0;
444 }
445
446 edge_info = allocate_edge_info (false_edge);
447 record_conditions (edge_info, inverted, cond);
448
449 if (can_infer_simple_equiv && TREE_CODE (inverted) == EQ_EXPR)
450 {
451 edge_info->lhs = op1;
452 edge_info->rhs = op0;
453 }
454 }
455
456 else if (TREE_CODE (op0) == SSA_NAME
457 && (TREE_CODE (op1) == SSA_NAME
458 || is_gimple_min_invariant (op1)))
459 {
460 tree cond = build2 (code, boolean_type_node, op0, op1);
461 tree inverted = invert_truthvalue_loc (loc, cond);
462 bool can_infer_simple_equiv
463 = !(HONOR_SIGNED_ZEROS (op1)
464 && (TREE_CODE (op1) == SSA_NAME || real_zerop (op1)));
465 struct edge_info *edge_info;
466
467 edge_info = allocate_edge_info (true_edge);
468 record_conditions (edge_info, cond, inverted);
469
470 if (can_infer_simple_equiv && code == EQ_EXPR)
471 {
472 edge_info->lhs = op0;
473 edge_info->rhs = op1;
474 }
475
476 edge_info = allocate_edge_info (false_edge);
477 record_conditions (edge_info, inverted, cond);
478
479 if (can_infer_simple_equiv && TREE_CODE (inverted) == EQ_EXPR)
480 {
481 edge_info->lhs = op0;
482 edge_info->rhs = op1;
483 }
484 }
485 }
486
487 /* ??? TRUTH_NOT_EXPR can create an equivalence too. */
488 }
489 }
490
491
492 class dom_opt_dom_walker : public dom_walker
493 {
494 public:
495 dom_opt_dom_walker (cdi_direction direction)
496 : dom_walker (direction), m_dummy_cond (NULL) {}
497
498 virtual void before_dom_children (basic_block);
499 virtual void after_dom_children (basic_block);
500
501 private:
502 void thread_across_edge (edge);
503
504 gcond *m_dummy_cond;
505 };
506
507 /* Jump threading, redundancy elimination and const/copy propagation.
508
509 This pass may expose new symbols that need to be renamed into SSA. For
510 every new symbol exposed, its corresponding bit will be set in
511 VARS_TO_RENAME. */
512
513 namespace {
514
515 const pass_data pass_data_dominator =
516 {
517 GIMPLE_PASS, /* type */
518 "dom", /* name */
519 OPTGROUP_NONE, /* optinfo_flags */
520 TV_TREE_SSA_DOMINATOR_OPTS, /* tv_id */
521 ( PROP_cfg | PROP_ssa ), /* properties_required */
522 0, /* properties_provided */
523 0, /* properties_destroyed */
524 0, /* todo_flags_start */
525 ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
526 };
527
528 class pass_dominator : public gimple_opt_pass
529 {
530 public:
531 pass_dominator (gcc::context *ctxt)
532 : gimple_opt_pass (pass_data_dominator, ctxt)
533 {}
534
535 /* opt_pass methods: */
536 opt_pass * clone () { return new pass_dominator (m_ctxt); }
537 virtual bool gate (function *) { return flag_tree_dom != 0; }
538 virtual unsigned int execute (function *);
539
540 }; // class pass_dominator
541
542 unsigned int
543 pass_dominator::execute (function *fun)
544 {
545 memset (&opt_stats, 0, sizeof (opt_stats));
546
547 /* Create our hash tables. */
548 avail_exprs = new hash_table<expr_elt_hasher> (1024);
549 avail_exprs_stack = new class avail_exprs_stack (avail_exprs);
550 const_and_copies = new class const_and_copies ();
551 need_eh_cleanup = BITMAP_ALLOC (NULL);
552 need_noreturn_fixup.create (0);
553
554 calculate_dominance_info (CDI_DOMINATORS);
555 cfg_altered = false;
556
557 /* We need to know loop structures in order to avoid destroying them
558 in jump threading. Note that we still can e.g. thread through loop
559 headers to an exit edge, or through loop header to the loop body, assuming
560 that we update the loop info.
561
562 TODO: We don't need to set LOOPS_HAVE_PREHEADERS generally, but due
563 to several overly conservative bail-outs in jump threading, case
564 gcc.dg/tree-ssa/pr21417.c can't be threaded if loop preheader is
565 missing. We should improve jump threading in future then
566 LOOPS_HAVE_PREHEADERS won't be needed here. */
567 loop_optimizer_init (LOOPS_HAVE_PREHEADERS | LOOPS_HAVE_SIMPLE_LATCHES);
568
569 /* Initialize the value-handle array. */
570 threadedge_initialize_values ();
571
572 /* We need accurate information regarding back edges in the CFG
573 for jump threading; this may include back edges that are not part of
574 a single loop. */
575 mark_dfs_back_edges ();
576
577 /* Recursively walk the dominator tree optimizing statements. */
578 dom_opt_dom_walker (CDI_DOMINATORS).walk (fun->cfg->x_entry_block_ptr);
579
580 {
581 gimple_stmt_iterator gsi;
582 basic_block bb;
583 FOR_EACH_BB_FN (bb, fun)
584 {
585 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
586 update_stmt_if_modified (gsi_stmt (gsi));
587 }
588 }
589
590 /* If we exposed any new variables, go ahead and put them into
591 SSA form now, before we handle jump threading. This simplifies
592 interactions between rewriting of _DECL nodes into SSA form
593 and rewriting SSA_NAME nodes into SSA form after block
594 duplication and CFG manipulation. */
595 update_ssa (TODO_update_ssa);
596
597 free_all_edge_infos ();
598
599 /* Thread jumps, creating duplicate blocks as needed. */
600 cfg_altered |= thread_through_all_blocks (first_pass_instance);
601
602 if (cfg_altered)
603 free_dominance_info (CDI_DOMINATORS);
604
605 /* Removal of statements may make some EH edges dead. Purge
606 such edges from the CFG as needed. */
607 if (!bitmap_empty_p (need_eh_cleanup))
608 {
609 unsigned i;
610 bitmap_iterator bi;
611
612 /* Jump threading may have created forwarder blocks from blocks
613 needing EH cleanup; the new successor of these blocks, which
614 has inherited from the original block, needs the cleanup.
615 Don't clear bits in the bitmap, as that can break the bitmap
616 iterator. */
617 EXECUTE_IF_SET_IN_BITMAP (need_eh_cleanup, 0, i, bi)
618 {
619 basic_block bb = BASIC_BLOCK_FOR_FN (fun, i);
620 if (bb == NULL)
621 continue;
622 while (single_succ_p (bb)
623 && (single_succ_edge (bb)->flags & EDGE_EH) == 0)
624 bb = single_succ (bb);
625 if (bb == EXIT_BLOCK_PTR_FOR_FN (fun))
626 continue;
627 if ((unsigned) bb->index != i)
628 bitmap_set_bit (need_eh_cleanup, bb->index);
629 }
630
631 gimple_purge_all_dead_eh_edges (need_eh_cleanup);
632 bitmap_clear (need_eh_cleanup);
633 }
634
635 /* Fixup stmts that became noreturn calls. This may require splitting
636 blocks and thus isn't possible during the dominator walk or before
637 jump threading finished. Do this in reverse order so we don't
638 inadvertedly remove a stmt we want to fixup by visiting a dominating
639 now noreturn call first. */
640 while (!need_noreturn_fixup.is_empty ())
641 {
642 gimple stmt = need_noreturn_fixup.pop ();
643 if (dump_file && dump_flags & TDF_DETAILS)
644 {
645 fprintf (dump_file, "Fixing up noreturn call ");
646 print_gimple_stmt (dump_file, stmt, 0, 0);
647 fprintf (dump_file, "\n");
648 }
649 fixup_noreturn_call (stmt);
650 }
651
652 statistics_counter_event (fun, "Redundant expressions eliminated",
653 opt_stats.num_re);
654 statistics_counter_event (fun, "Constants propagated",
655 opt_stats.num_const_prop);
656 statistics_counter_event (fun, "Copies propagated",
657 opt_stats.num_copy_prop);
658
659 /* Debugging dumps. */
660 if (dump_file && (dump_flags & TDF_STATS))
661 dump_dominator_optimization_stats (dump_file);
662
663 loop_optimizer_finalize ();
664
665 /* Delete our main hashtable. */
666 delete avail_exprs;
667 avail_exprs = NULL;
668
669 /* Free asserted bitmaps and stacks. */
670 BITMAP_FREE (need_eh_cleanup);
671 need_noreturn_fixup.release ();
672 delete avail_exprs_stack;
673 delete const_and_copies;
674
675 /* Free the value-handle array. */
676 threadedge_finalize_values ();
677
678 return 0;
679 }
680
681 } // anon namespace
682
683 gimple_opt_pass *
684 make_pass_dominator (gcc::context *ctxt)
685 {
686 return new pass_dominator (ctxt);
687 }
688
689
690 /* Given a conditional statement CONDSTMT, convert the
691 condition to a canonical form. */
692
693 static void
694 canonicalize_comparison (gcond *condstmt)
695 {
696 tree op0;
697 tree op1;
698 enum tree_code code;
699
700 gcc_assert (gimple_code (condstmt) == GIMPLE_COND);
701
702 op0 = gimple_cond_lhs (condstmt);
703 op1 = gimple_cond_rhs (condstmt);
704
705 code = gimple_cond_code (condstmt);
706
707 /* If it would be profitable to swap the operands, then do so to
708 canonicalize the statement, enabling better optimization.
709
710 By placing canonicalization of such expressions here we
711 transparently keep statements in canonical form, even
712 when the statement is modified. */
713 if (tree_swap_operands_p (op0, op1, false))
714 {
715 /* For relationals we need to swap the operands
716 and change the code. */
717 if (code == LT_EXPR
718 || code == GT_EXPR
719 || code == LE_EXPR
720 || code == GE_EXPR)
721 {
722 code = swap_tree_comparison (code);
723
724 gimple_cond_set_code (condstmt, code);
725 gimple_cond_set_lhs (condstmt, op1);
726 gimple_cond_set_rhs (condstmt, op0);
727
728 update_stmt (condstmt);
729 }
730 }
731 }
732
733 /* A trivial wrapper so that we can present the generic jump
734 threading code with a simple API for simplifying statements. */
735 static tree
736 simplify_stmt_for_jump_threading (gimple stmt,
737 gimple within_stmt ATTRIBUTE_UNUSED)
738 {
739 return lookup_avail_expr (stmt, false);
740 }
741
742 /* Valueize hook for gimple_fold_stmt_to_constant_1. */
743
744 static tree
745 dom_valueize (tree t)
746 {
747 if (TREE_CODE (t) == SSA_NAME)
748 {
749 tree tem = SSA_NAME_VALUE (t);
750 if (tem)
751 return tem;
752 }
753 return t;
754 }
755
756 /* Record into the equivalence tables any equivalences implied by
757 traversing edge E (which are cached in E->aux).
758
759 Callers are responsible for managing the unwinding markers. */
760 static void
761 record_temporary_equivalences (edge e)
762 {
763 int i;
764 struct edge_info *edge_info = (struct edge_info *) e->aux;
765
766 /* If we have info associated with this edge, record it into
767 our equivalence tables. */
768 if (edge_info)
769 {
770 cond_equivalence *eq;
771 tree lhs = edge_info->lhs;
772 tree rhs = edge_info->rhs;
773
774 /* If we have a simple NAME = VALUE equivalence, record it. */
775 if (lhs)
776 record_equality (lhs, rhs);
777
778 /* If LHS is an SSA_NAME and RHS is a constant integer and LHS was
779 set via a widening type conversion, then we may be able to record
780 additional equivalences. */
781 if (lhs
782 && TREE_CODE (lhs) == SSA_NAME
783 && TREE_CODE (rhs) == INTEGER_CST)
784 {
785 gimple defstmt = SSA_NAME_DEF_STMT (lhs);
786
787 if (defstmt
788 && is_gimple_assign (defstmt)
789 && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (defstmt)))
790 {
791 tree old_rhs = gimple_assign_rhs1 (defstmt);
792
793 /* If the conversion widens the original value and
794 the constant is in the range of the type of OLD_RHS,
795 then convert the constant and record the equivalence.
796
797 Note that int_fits_type_p does not check the precision
798 if the upper and lower bounds are OK. */
799 if (INTEGRAL_TYPE_P (TREE_TYPE (old_rhs))
800 && (TYPE_PRECISION (TREE_TYPE (lhs))
801 > TYPE_PRECISION (TREE_TYPE (old_rhs)))
802 && int_fits_type_p (rhs, TREE_TYPE (old_rhs)))
803 {
804 tree newval = fold_convert (TREE_TYPE (old_rhs), rhs);
805 record_equality (old_rhs, newval);
806 }
807 }
808 }
809
810 /* If LHS is an SSA_NAME with a new equivalency then try if
811 stmts with uses of that LHS that dominate the edge destination
812 simplify and allow further equivalences to be recorded. */
813 if (lhs && TREE_CODE (lhs) == SSA_NAME)
814 {
815 use_operand_p use_p;
816 imm_use_iterator iter;
817 FOR_EACH_IMM_USE_FAST (use_p, iter, lhs)
818 {
819 gimple use_stmt = USE_STMT (use_p);
820
821 /* Only bother to record more equivalences for lhs that
822 can be directly used by e->dest.
823 ??? If the code gets re-organized to a worklist to
824 catch more indirect opportunities and it is made to
825 handle PHIs then this should only consider use_stmts
826 in basic-blocks we have already visited. */
827 if (e->dest == gimple_bb (use_stmt)
828 || !dominated_by_p (CDI_DOMINATORS,
829 e->dest, gimple_bb (use_stmt)))
830 continue;
831 tree lhs2 = gimple_get_lhs (use_stmt);
832 if (lhs2 && TREE_CODE (lhs2) == SSA_NAME)
833 {
834 tree res
835 = gimple_fold_stmt_to_constant_1 (use_stmt, dom_valueize,
836 no_follow_ssa_edges);
837 if (res
838 && (TREE_CODE (res) == SSA_NAME
839 || is_gimple_min_invariant (res)))
840 record_equality (lhs2, res);
841 }
842 }
843 }
844
845 /* If we have 0 = COND or 1 = COND equivalences, record them
846 into our expression hash tables. */
847 for (i = 0; edge_info->cond_equivalences.iterate (i, &eq); ++i)
848 record_cond (eq);
849 }
850 }
851
852 /* Wrapper for common code to attempt to thread an edge. For example,
853 it handles lazily building the dummy condition and the bookkeeping
854 when jump threading is successful. */
855
856 void
857 dom_opt_dom_walker::thread_across_edge (edge e)
858 {
859 if (! m_dummy_cond)
860 m_dummy_cond =
861 gimple_build_cond (NE_EXPR,
862 integer_zero_node, integer_zero_node,
863 NULL, NULL);
864
865 /* Push a marker on both stacks so we can unwind the tables back to their
866 current state. */
867 avail_exprs_stack->push_marker ();
868 const_and_copies->push_marker ();
869
870 /* Traversing E may result in equivalences we can utilize. */
871 record_temporary_equivalences (e);
872
873 /* With all the edge equivalences in the tables, go ahead and attempt
874 to thread through E->dest. */
875 ::thread_across_edge (m_dummy_cond, e, false,
876 const_and_copies,
877 simplify_stmt_for_jump_threading);
878
879 /* And restore the various tables to their state before
880 we threaded this edge.
881
882 XXX The code in tree-ssa-threadedge.c will restore the state of
883 the const_and_copies table. We we just have to restore the expression
884 table. */
885 avail_exprs_stack->pop_to_marker ();
886 }
887
888 /* PHI nodes can create equivalences too.
889
890 Ignoring any alternatives which are the same as the result, if
891 all the alternatives are equal, then the PHI node creates an
892 equivalence. */
893
894 static void
895 record_equivalences_from_phis (basic_block bb)
896 {
897 gphi_iterator gsi;
898
899 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
900 {
901 gphi *phi = gsi.phi ();
902
903 tree lhs = gimple_phi_result (phi);
904 tree rhs = NULL;
905 size_t i;
906
907 for (i = 0; i < gimple_phi_num_args (phi); i++)
908 {
909 tree t = gimple_phi_arg_def (phi, i);
910
911 /* Ignore alternatives which are the same as our LHS. Since
912 LHS is a PHI_RESULT, it is known to be a SSA_NAME, so we
913 can simply compare pointers. */
914 if (lhs == t)
915 continue;
916
917 t = dom_valueize (t);
918
919 /* If we have not processed an alternative yet, then set
920 RHS to this alternative. */
921 if (rhs == NULL)
922 rhs = t;
923 /* If we have processed an alternative (stored in RHS), then
924 see if it is equal to this one. If it isn't, then stop
925 the search. */
926 else if (! operand_equal_for_phi_arg_p (rhs, t))
927 break;
928 }
929
930 /* If we had no interesting alternatives, then all the RHS alternatives
931 must have been the same as LHS. */
932 if (!rhs)
933 rhs = lhs;
934
935 /* If we managed to iterate through each PHI alternative without
936 breaking out of the loop, then we have a PHI which may create
937 a useful equivalence. We do not need to record unwind data for
938 this, since this is a true assignment and not an equivalence
939 inferred from a comparison. All uses of this ssa name are dominated
940 by this assignment, so unwinding just costs time and space. */
941 if (i == gimple_phi_num_args (phi)
942 && may_propagate_copy (lhs, rhs))
943 set_ssa_name_value (lhs, rhs);
944 }
945 }
946
947 /* Ignoring loop backedges, if BB has precisely one incoming edge then
948 return that edge. Otherwise return NULL. */
949 static edge
950 single_incoming_edge_ignoring_loop_edges (basic_block bb)
951 {
952 edge retval = NULL;
953 edge e;
954 edge_iterator ei;
955
956 FOR_EACH_EDGE (e, ei, bb->preds)
957 {
958 /* A loop back edge can be identified by the destination of
959 the edge dominating the source of the edge. */
960 if (dominated_by_p (CDI_DOMINATORS, e->src, e->dest))
961 continue;
962
963 /* If we have already seen a non-loop edge, then we must have
964 multiple incoming non-loop edges and thus we return NULL. */
965 if (retval)
966 return NULL;
967
968 /* This is the first non-loop incoming edge we have found. Record
969 it. */
970 retval = e;
971 }
972
973 return retval;
974 }
975
976 /* Record any equivalences created by the incoming edge to BB. If BB
977 has more than one incoming edge, then no equivalence is created. */
978
979 static void
980 record_equivalences_from_incoming_edge (basic_block bb)
981 {
982 edge e;
983 basic_block parent;
984
985 /* If our parent block ended with a control statement, then we may be
986 able to record some equivalences based on which outgoing edge from
987 the parent was followed. */
988 parent = get_immediate_dominator (CDI_DOMINATORS, bb);
989
990 e = single_incoming_edge_ignoring_loop_edges (bb);
991
992 /* If we had a single incoming edge from our parent block, then enter
993 any data associated with the edge into our tables. */
994 if (e && e->src == parent)
995 record_temporary_equivalences (e);
996 }
997
998 /* Dump SSA statistics on FILE. */
999
1000 void
1001 dump_dominator_optimization_stats (FILE *file)
1002 {
1003 fprintf (file, "Total number of statements: %6ld\n\n",
1004 opt_stats.num_stmts);
1005 fprintf (file, "Exprs considered for dominator optimizations: %6ld\n",
1006 opt_stats.num_exprs_considered);
1007
1008 fprintf (file, "\nHash table statistics:\n");
1009
1010 fprintf (file, " avail_exprs: ");
1011 htab_statistics (file, *avail_exprs);
1012 }
1013
1014
1015 /* Dump SSA statistics on stderr. */
1016
1017 DEBUG_FUNCTION void
1018 debug_dominator_optimization_stats (void)
1019 {
1020 dump_dominator_optimization_stats (stderr);
1021 }
1022
1023
1024 /* Dump statistics for the hash table HTAB. */
1025
1026 static void
1027 htab_statistics (FILE *file, const hash_table<expr_elt_hasher> &htab)
1028 {
1029 fprintf (file, "size %ld, %ld elements, %f collision/search ratio\n",
1030 (long) htab.size (),
1031 (long) htab.elements (),
1032 htab.collisions ());
1033 }
1034
1035
1036 /* Enter condition equivalence into the expression hash table.
1037 This indicates that a conditional expression has a known
1038 boolean value. */
1039
1040 static void
1041 record_cond (cond_equivalence *p)
1042 {
1043 class expr_hash_elt *element = new expr_hash_elt (&p->cond, p->value);
1044 expr_hash_elt **slot;
1045
1046 slot = avail_exprs->find_slot_with_hash (element, element->hash (), INSERT);
1047 if (*slot == NULL)
1048 {
1049 *slot = element;
1050
1051 avail_exprs_stack->record_expr (element, NULL, '1');
1052 }
1053 else
1054 delete element;
1055 }
1056
1057 /* Return the loop depth of the basic block of the defining statement of X.
1058 This number should not be treated as absolutely correct because the loop
1059 information may not be completely up-to-date when dom runs. However, it
1060 will be relatively correct, and as more passes are taught to keep loop info
1061 up to date, the result will become more and more accurate. */
1062
1063 static int
1064 loop_depth_of_name (tree x)
1065 {
1066 gimple defstmt;
1067 basic_block defbb;
1068
1069 /* If it's not an SSA_NAME, we have no clue where the definition is. */
1070 if (TREE_CODE (x) != SSA_NAME)
1071 return 0;
1072
1073 /* Otherwise return the loop depth of the defining statement's bb.
1074 Note that there may not actually be a bb for this statement, if the
1075 ssa_name is live on entry. */
1076 defstmt = SSA_NAME_DEF_STMT (x);
1077 defbb = gimple_bb (defstmt);
1078 if (!defbb)
1079 return 0;
1080
1081 return bb_loop_depth (defbb);
1082 }
1083
1084 /* Similarly, but assume that X and Y are the two operands of an EQ_EXPR.
1085 This constrains the cases in which we may treat this as assignment. */
1086
1087 static void
1088 record_equality (tree x, tree y)
1089 {
1090 tree prev_x = NULL, prev_y = NULL;
1091
1092 if (tree_swap_operands_p (x, y, false))
1093 std::swap (x, y);
1094
1095 /* Most of the time tree_swap_operands_p does what we want. But there
1096 are cases where we know one operand is better for copy propagation than
1097 the other. Given no other code cares about ordering of equality
1098 comparison operators for that purpose, we just handle the special cases
1099 here. */
1100 if (TREE_CODE (x) == SSA_NAME && TREE_CODE (y) == SSA_NAME)
1101 {
1102 /* If one operand is a single use operand, then make it
1103 X. This will preserve its single use properly and if this
1104 conditional is eliminated, the computation of X can be
1105 eliminated as well. */
1106 if (has_single_use (y) && ! has_single_use (x))
1107 std::swap (x, y);
1108 }
1109 if (TREE_CODE (x) == SSA_NAME)
1110 prev_x = SSA_NAME_VALUE (x);
1111 if (TREE_CODE (y) == SSA_NAME)
1112 prev_y = SSA_NAME_VALUE (y);
1113
1114 /* If one of the previous values is invariant, or invariant in more loops
1115 (by depth), then use that.
1116 Otherwise it doesn't matter which value we choose, just so
1117 long as we canonicalize on one value. */
1118 if (is_gimple_min_invariant (y))
1119 ;
1120 else if (is_gimple_min_invariant (x)
1121 /* ??? When threading over backedges the following is important
1122 for correctness. See PR61757. */
1123 || (loop_depth_of_name (x) < loop_depth_of_name (y)))
1124 prev_x = x, x = y, y = prev_x, prev_x = prev_y;
1125 else if (prev_x && is_gimple_min_invariant (prev_x))
1126 x = y, y = prev_x, prev_x = prev_y;
1127 else if (prev_y)
1128 y = prev_y;
1129
1130 /* After the swapping, we must have one SSA_NAME. */
1131 if (TREE_CODE (x) != SSA_NAME)
1132 return;
1133
1134 /* For IEEE, -0.0 == 0.0, so we don't necessarily know the sign of a
1135 variable compared against zero. If we're honoring signed zeros,
1136 then we cannot record this value unless we know that the value is
1137 nonzero. */
1138 if (HONOR_SIGNED_ZEROS (x)
1139 && (TREE_CODE (y) != REAL_CST
1140 || REAL_VALUES_EQUAL (dconst0, TREE_REAL_CST (y))))
1141 return;
1142
1143 const_and_copies->record_const_or_copy (x, y, prev_x);
1144 }
1145
1146 /* Returns true when STMT is a simple iv increment. It detects the
1147 following situation:
1148
1149 i_1 = phi (..., i_2)
1150 i_2 = i_1 +/- ... */
1151
1152 bool
1153 simple_iv_increment_p (gimple stmt)
1154 {
1155 enum tree_code code;
1156 tree lhs, preinc;
1157 gimple phi;
1158 size_t i;
1159
1160 if (gimple_code (stmt) != GIMPLE_ASSIGN)
1161 return false;
1162
1163 lhs = gimple_assign_lhs (stmt);
1164 if (TREE_CODE (lhs) != SSA_NAME)
1165 return false;
1166
1167 code = gimple_assign_rhs_code (stmt);
1168 if (code != PLUS_EXPR
1169 && code != MINUS_EXPR
1170 && code != POINTER_PLUS_EXPR)
1171 return false;
1172
1173 preinc = gimple_assign_rhs1 (stmt);
1174 if (TREE_CODE (preinc) != SSA_NAME)
1175 return false;
1176
1177 phi = SSA_NAME_DEF_STMT (preinc);
1178 if (gimple_code (phi) != GIMPLE_PHI)
1179 return false;
1180
1181 for (i = 0; i < gimple_phi_num_args (phi); i++)
1182 if (gimple_phi_arg_def (phi, i) == lhs)
1183 return true;
1184
1185 return false;
1186 }
1187
1188 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
1189 known value for that SSA_NAME (or NULL if no value is known).
1190
1191 Propagate values from CONST_AND_COPIES into the PHI nodes of the
1192 successors of BB. */
1193
1194 static void
1195 cprop_into_successor_phis (basic_block bb)
1196 {
1197 edge e;
1198 edge_iterator ei;
1199
1200 FOR_EACH_EDGE (e, ei, bb->succs)
1201 {
1202 int indx;
1203 gphi_iterator gsi;
1204
1205 /* If this is an abnormal edge, then we do not want to copy propagate
1206 into the PHI alternative associated with this edge. */
1207 if (e->flags & EDGE_ABNORMAL)
1208 continue;
1209
1210 gsi = gsi_start_phis (e->dest);
1211 if (gsi_end_p (gsi))
1212 continue;
1213
1214 /* We may have an equivalence associated with this edge. While
1215 we can not propagate it into non-dominated blocks, we can
1216 propagate them into PHIs in non-dominated blocks. */
1217
1218 /* Push the unwind marker so we can reset the const and copies
1219 table back to its original state after processing this edge. */
1220 const_and_copies->push_marker ();
1221
1222 /* Extract and record any simple NAME = VALUE equivalences.
1223
1224 Don't bother with [01] = COND equivalences, they're not useful
1225 here. */
1226 struct edge_info *edge_info = (struct edge_info *) e->aux;
1227 if (edge_info)
1228 {
1229 tree lhs = edge_info->lhs;
1230 tree rhs = edge_info->rhs;
1231
1232 if (lhs && TREE_CODE (lhs) == SSA_NAME)
1233 const_and_copies->record_const_or_copy (lhs, rhs);
1234 }
1235
1236 indx = e->dest_idx;
1237 for ( ; !gsi_end_p (gsi); gsi_next (&gsi))
1238 {
1239 tree new_val;
1240 use_operand_p orig_p;
1241 tree orig_val;
1242 gphi *phi = gsi.phi ();
1243
1244 /* The alternative may be associated with a constant, so verify
1245 it is an SSA_NAME before doing anything with it. */
1246 orig_p = gimple_phi_arg_imm_use_ptr (phi, indx);
1247 orig_val = get_use_from_ptr (orig_p);
1248 if (TREE_CODE (orig_val) != SSA_NAME)
1249 continue;
1250
1251 /* If we have *ORIG_P in our constant/copy table, then replace
1252 ORIG_P with its value in our constant/copy table. */
1253 new_val = SSA_NAME_VALUE (orig_val);
1254 if (new_val
1255 && new_val != orig_val
1256 && (TREE_CODE (new_val) == SSA_NAME
1257 || is_gimple_min_invariant (new_val))
1258 && may_propagate_copy (orig_val, new_val))
1259 propagate_value (orig_p, new_val);
1260 }
1261
1262 const_and_copies->pop_to_marker ();
1263 }
1264 }
1265
1266 void
1267 dom_opt_dom_walker::before_dom_children (basic_block bb)
1268 {
1269 gimple_stmt_iterator gsi;
1270
1271 if (dump_file && (dump_flags & TDF_DETAILS))
1272 fprintf (dump_file, "\n\nOptimizing block #%d\n\n", bb->index);
1273
1274 /* Push a marker on the stacks of local information so that we know how
1275 far to unwind when we finalize this block. */
1276 avail_exprs_stack->push_marker ();
1277 const_and_copies->push_marker ();
1278
1279 record_equivalences_from_incoming_edge (bb);
1280
1281 /* PHI nodes can create equivalences too. */
1282 record_equivalences_from_phis (bb);
1283
1284 /* Create equivalences from redundant PHIs. PHIs are only truly
1285 redundant when they exist in the same block, so push another
1286 marker and unwind right afterwards. */
1287 avail_exprs_stack->push_marker ();
1288 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1289 eliminate_redundant_computations (&gsi);
1290 avail_exprs_stack->pop_to_marker ();
1291
1292 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1293 optimize_stmt (bb, gsi);
1294
1295 /* Now prepare to process dominated blocks. */
1296 record_edge_info (bb);
1297 cprop_into_successor_phis (bb);
1298 }
1299
1300 /* We have finished processing the dominator children of BB, perform
1301 any finalization actions in preparation for leaving this node in
1302 the dominator tree. */
1303
1304 void
1305 dom_opt_dom_walker::after_dom_children (basic_block bb)
1306 {
1307 gimple last;
1308
1309 /* If we have an outgoing edge to a block with multiple incoming and
1310 outgoing edges, then we may be able to thread the edge, i.e., we
1311 may be able to statically determine which of the outgoing edges
1312 will be traversed when the incoming edge from BB is traversed. */
1313 if (single_succ_p (bb)
1314 && (single_succ_edge (bb)->flags & EDGE_ABNORMAL) == 0
1315 && potentially_threadable_block (single_succ (bb)))
1316 {
1317 thread_across_edge (single_succ_edge (bb));
1318 }
1319 else if ((last = last_stmt (bb))
1320 && gimple_code (last) == GIMPLE_COND
1321 && EDGE_COUNT (bb->succs) == 2
1322 && (EDGE_SUCC (bb, 0)->flags & EDGE_ABNORMAL) == 0
1323 && (EDGE_SUCC (bb, 1)->flags & EDGE_ABNORMAL) == 0)
1324 {
1325 edge true_edge, false_edge;
1326
1327 extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
1328
1329 /* Only try to thread the edge if it reaches a target block with
1330 more than one predecessor and more than one successor. */
1331 if (potentially_threadable_block (true_edge->dest))
1332 thread_across_edge (true_edge);
1333
1334 /* Similarly for the ELSE arm. */
1335 if (potentially_threadable_block (false_edge->dest))
1336 thread_across_edge (false_edge);
1337
1338 }
1339
1340 /* These remove expressions local to BB from the tables. */
1341 avail_exprs_stack->pop_to_marker ();
1342 const_and_copies->pop_to_marker ();
1343 }
1344
1345 /* Search for redundant computations in STMT. If any are found, then
1346 replace them with the variable holding the result of the computation.
1347
1348 If safe, record this expression into the available expression hash
1349 table. */
1350
1351 static void
1352 eliminate_redundant_computations (gimple_stmt_iterator* gsi)
1353 {
1354 tree expr_type;
1355 tree cached_lhs;
1356 tree def;
1357 bool insert = true;
1358 bool assigns_var_p = false;
1359
1360 gimple stmt = gsi_stmt (*gsi);
1361
1362 if (gimple_code (stmt) == GIMPLE_PHI)
1363 def = gimple_phi_result (stmt);
1364 else
1365 def = gimple_get_lhs (stmt);
1366
1367 /* Certain expressions on the RHS can be optimized away, but can not
1368 themselves be entered into the hash tables. */
1369 if (! def
1370 || TREE_CODE (def) != SSA_NAME
1371 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (def)
1372 || gimple_vdef (stmt)
1373 /* Do not record equivalences for increments of ivs. This would create
1374 overlapping live ranges for a very questionable gain. */
1375 || simple_iv_increment_p (stmt))
1376 insert = false;
1377
1378 /* Check if the expression has been computed before. */
1379 cached_lhs = lookup_avail_expr (stmt, insert);
1380
1381 opt_stats.num_exprs_considered++;
1382
1383 /* Get the type of the expression we are trying to optimize. */
1384 if (is_gimple_assign (stmt))
1385 {
1386 expr_type = TREE_TYPE (gimple_assign_lhs (stmt));
1387 assigns_var_p = true;
1388 }
1389 else if (gimple_code (stmt) == GIMPLE_COND)
1390 expr_type = boolean_type_node;
1391 else if (is_gimple_call (stmt))
1392 {
1393 gcc_assert (gimple_call_lhs (stmt));
1394 expr_type = TREE_TYPE (gimple_call_lhs (stmt));
1395 assigns_var_p = true;
1396 }
1397 else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
1398 expr_type = TREE_TYPE (gimple_switch_index (swtch_stmt));
1399 else if (gimple_code (stmt) == GIMPLE_PHI)
1400 /* We can't propagate into a phi, so the logic below doesn't apply.
1401 Instead record an equivalence between the cached LHS and the
1402 PHI result of this statement, provided they are in the same block.
1403 This should be sufficient to kill the redundant phi. */
1404 {
1405 if (def && cached_lhs)
1406 const_and_copies->record_const_or_copy (def, cached_lhs);
1407 return;
1408 }
1409 else
1410 gcc_unreachable ();
1411
1412 if (!cached_lhs)
1413 return;
1414
1415 /* It is safe to ignore types here since we have already done
1416 type checking in the hashing and equality routines. In fact
1417 type checking here merely gets in the way of constant
1418 propagation. Also, make sure that it is safe to propagate
1419 CACHED_LHS into the expression in STMT. */
1420 if ((TREE_CODE (cached_lhs) != SSA_NAME
1421 && (assigns_var_p
1422 || useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs))))
1423 || may_propagate_copy_into_stmt (stmt, cached_lhs))
1424 {
1425 gcc_checking_assert (TREE_CODE (cached_lhs) == SSA_NAME
1426 || is_gimple_min_invariant (cached_lhs));
1427
1428 if (dump_file && (dump_flags & TDF_DETAILS))
1429 {
1430 fprintf (dump_file, " Replaced redundant expr '");
1431 print_gimple_expr (dump_file, stmt, 0, dump_flags);
1432 fprintf (dump_file, "' with '");
1433 print_generic_expr (dump_file, cached_lhs, dump_flags);
1434 fprintf (dump_file, "'\n");
1435 }
1436
1437 opt_stats.num_re++;
1438
1439 if (assigns_var_p
1440 && !useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs)))
1441 cached_lhs = fold_convert (expr_type, cached_lhs);
1442
1443 propagate_tree_value_into_stmt (gsi, cached_lhs);
1444
1445 /* Since it is always necessary to mark the result as modified,
1446 perhaps we should move this into propagate_tree_value_into_stmt
1447 itself. */
1448 gimple_set_modified (gsi_stmt (*gsi), true);
1449 }
1450 }
1451
1452 /* STMT, a GIMPLE_ASSIGN, may create certain equivalences, in either
1453 the available expressions table or the const_and_copies table.
1454 Detect and record those equivalences. */
1455 /* We handle only very simple copy equivalences here. The heavy
1456 lifing is done by eliminate_redundant_computations. */
1457
1458 static void
1459 record_equivalences_from_stmt (gimple stmt, int may_optimize_p)
1460 {
1461 tree lhs;
1462 enum tree_code lhs_code;
1463
1464 gcc_assert (is_gimple_assign (stmt));
1465
1466 lhs = gimple_assign_lhs (stmt);
1467 lhs_code = TREE_CODE (lhs);
1468
1469 if (lhs_code == SSA_NAME
1470 && gimple_assign_single_p (stmt))
1471 {
1472 tree rhs = gimple_assign_rhs1 (stmt);
1473
1474 /* If the RHS of the assignment is a constant or another variable that
1475 may be propagated, register it in the CONST_AND_COPIES table. We
1476 do not need to record unwind data for this, since this is a true
1477 assignment and not an equivalence inferred from a comparison. All
1478 uses of this ssa name are dominated by this assignment, so unwinding
1479 just costs time and space. */
1480 if (may_optimize_p
1481 && (TREE_CODE (rhs) == SSA_NAME
1482 || is_gimple_min_invariant (rhs)))
1483 {
1484 rhs = dom_valueize (rhs);
1485
1486 if (dump_file && (dump_flags & TDF_DETAILS))
1487 {
1488 fprintf (dump_file, "==== ASGN ");
1489 print_generic_expr (dump_file, lhs, 0);
1490 fprintf (dump_file, " = ");
1491 print_generic_expr (dump_file, rhs, 0);
1492 fprintf (dump_file, "\n");
1493 }
1494
1495 set_ssa_name_value (lhs, rhs);
1496 }
1497 }
1498
1499 /* Make sure we can propagate &x + CST. */
1500 if (lhs_code == SSA_NAME
1501 && gimple_assign_rhs_code (stmt) == POINTER_PLUS_EXPR
1502 && TREE_CODE (gimple_assign_rhs1 (stmt)) == ADDR_EXPR
1503 && TREE_CODE (gimple_assign_rhs2 (stmt)) == INTEGER_CST)
1504 {
1505 tree op0 = gimple_assign_rhs1 (stmt);
1506 tree op1 = gimple_assign_rhs2 (stmt);
1507 tree new_rhs
1508 = build_fold_addr_expr (fold_build2 (MEM_REF,
1509 TREE_TYPE (TREE_TYPE (op0)),
1510 unshare_expr (op0),
1511 fold_convert (ptr_type_node,
1512 op1)));
1513 if (dump_file && (dump_flags & TDF_DETAILS))
1514 {
1515 fprintf (dump_file, "==== ASGN ");
1516 print_generic_expr (dump_file, lhs, 0);
1517 fprintf (dump_file, " = ");
1518 print_generic_expr (dump_file, new_rhs, 0);
1519 fprintf (dump_file, "\n");
1520 }
1521
1522 set_ssa_name_value (lhs, new_rhs);
1523 }
1524
1525 /* A memory store, even an aliased store, creates a useful
1526 equivalence. By exchanging the LHS and RHS, creating suitable
1527 vops and recording the result in the available expression table,
1528 we may be able to expose more redundant loads. */
1529 if (!gimple_has_volatile_ops (stmt)
1530 && gimple_references_memory_p (stmt)
1531 && gimple_assign_single_p (stmt)
1532 && (TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME
1533 || is_gimple_min_invariant (gimple_assign_rhs1 (stmt)))
1534 && !is_gimple_reg (lhs))
1535 {
1536 tree rhs = gimple_assign_rhs1 (stmt);
1537 gassign *new_stmt;
1538
1539 /* Build a new statement with the RHS and LHS exchanged. */
1540 if (TREE_CODE (rhs) == SSA_NAME)
1541 {
1542 /* NOTE tuples. The call to gimple_build_assign below replaced
1543 a call to build_gimple_modify_stmt, which did not set the
1544 SSA_NAME_DEF_STMT on the LHS of the assignment. Doing so
1545 may cause an SSA validation failure, as the LHS may be a
1546 default-initialized name and should have no definition. I'm
1547 a bit dubious of this, as the artificial statement that we
1548 generate here may in fact be ill-formed, but it is simply
1549 used as an internal device in this pass, and never becomes
1550 part of the CFG. */
1551 gimple defstmt = SSA_NAME_DEF_STMT (rhs);
1552 new_stmt = gimple_build_assign (rhs, lhs);
1553 SSA_NAME_DEF_STMT (rhs) = defstmt;
1554 }
1555 else
1556 new_stmt = gimple_build_assign (rhs, lhs);
1557
1558 gimple_set_vuse (new_stmt, gimple_vdef (stmt));
1559
1560 /* Finally enter the statement into the available expression
1561 table. */
1562 lookup_avail_expr (new_stmt, true);
1563 }
1564 }
1565
1566 /* Replace *OP_P in STMT with any known equivalent value for *OP_P from
1567 CONST_AND_COPIES. */
1568
1569 static void
1570 cprop_operand (gimple stmt, use_operand_p op_p)
1571 {
1572 tree val;
1573 tree op = USE_FROM_PTR (op_p);
1574
1575 /* If the operand has a known constant value or it is known to be a
1576 copy of some other variable, use the value or copy stored in
1577 CONST_AND_COPIES. */
1578 val = SSA_NAME_VALUE (op);
1579 if (val && val != op)
1580 {
1581 /* Do not replace hard register operands in asm statements. */
1582 if (gimple_code (stmt) == GIMPLE_ASM
1583 && !may_propagate_copy_into_asm (op))
1584 return;
1585
1586 /* Certain operands are not allowed to be copy propagated due
1587 to their interaction with exception handling and some GCC
1588 extensions. */
1589 if (!may_propagate_copy (op, val))
1590 return;
1591
1592 /* Do not propagate copies into BIVs.
1593 See PR23821 and PR62217 for how this can disturb IV and
1594 number of iteration analysis. */
1595 if (TREE_CODE (val) != INTEGER_CST)
1596 {
1597 gimple def = SSA_NAME_DEF_STMT (op);
1598 if (gimple_code (def) == GIMPLE_PHI
1599 && gimple_bb (def)->loop_father->header == gimple_bb (def))
1600 return;
1601 }
1602
1603 /* Dump details. */
1604 if (dump_file && (dump_flags & TDF_DETAILS))
1605 {
1606 fprintf (dump_file, " Replaced '");
1607 print_generic_expr (dump_file, op, dump_flags);
1608 fprintf (dump_file, "' with %s '",
1609 (TREE_CODE (val) != SSA_NAME ? "constant" : "variable"));
1610 print_generic_expr (dump_file, val, dump_flags);
1611 fprintf (dump_file, "'\n");
1612 }
1613
1614 if (TREE_CODE (val) != SSA_NAME)
1615 opt_stats.num_const_prop++;
1616 else
1617 opt_stats.num_copy_prop++;
1618
1619 propagate_value (op_p, val);
1620
1621 /* And note that we modified this statement. This is now
1622 safe, even if we changed virtual operands since we will
1623 rescan the statement and rewrite its operands again. */
1624 gimple_set_modified (stmt, true);
1625 }
1626 }
1627
1628 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
1629 known value for that SSA_NAME (or NULL if no value is known).
1630
1631 Propagate values from CONST_AND_COPIES into the uses, vuses and
1632 vdef_ops of STMT. */
1633
1634 static void
1635 cprop_into_stmt (gimple stmt)
1636 {
1637 use_operand_p op_p;
1638 ssa_op_iter iter;
1639
1640 FOR_EACH_SSA_USE_OPERAND (op_p, stmt, iter, SSA_OP_USE)
1641 cprop_operand (stmt, op_p);
1642 }
1643
1644 /* Optimize the statement pointed to by iterator SI.
1645
1646 We try to perform some simplistic global redundancy elimination and
1647 constant propagation:
1648
1649 1- To detect global redundancy, we keep track of expressions that have
1650 been computed in this block and its dominators. If we find that the
1651 same expression is computed more than once, we eliminate repeated
1652 computations by using the target of the first one.
1653
1654 2- Constant values and copy assignments. This is used to do very
1655 simplistic constant and copy propagation. When a constant or copy
1656 assignment is found, we map the value on the RHS of the assignment to
1657 the variable in the LHS in the CONST_AND_COPIES table. */
1658
1659 static void
1660 optimize_stmt (basic_block bb, gimple_stmt_iterator si)
1661 {
1662 gimple stmt, old_stmt;
1663 bool may_optimize_p;
1664 bool modified_p = false;
1665 bool was_noreturn;
1666
1667 old_stmt = stmt = gsi_stmt (si);
1668 was_noreturn = is_gimple_call (stmt) && gimple_call_noreturn_p (stmt);
1669
1670 if (dump_file && (dump_flags & TDF_DETAILS))
1671 {
1672 fprintf (dump_file, "Optimizing statement ");
1673 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
1674 }
1675
1676 if (gimple_code (stmt) == GIMPLE_COND)
1677 canonicalize_comparison (as_a <gcond *> (stmt));
1678
1679 update_stmt_if_modified (stmt);
1680 opt_stats.num_stmts++;
1681
1682 /* Const/copy propagate into USES, VUSES and the RHS of VDEFs. */
1683 cprop_into_stmt (stmt);
1684
1685 /* If the statement has been modified with constant replacements,
1686 fold its RHS before checking for redundant computations. */
1687 if (gimple_modified_p (stmt))
1688 {
1689 tree rhs = NULL;
1690
1691 /* Try to fold the statement making sure that STMT is kept
1692 up to date. */
1693 if (fold_stmt (&si))
1694 {
1695 stmt = gsi_stmt (si);
1696 gimple_set_modified (stmt, true);
1697
1698 if (dump_file && (dump_flags & TDF_DETAILS))
1699 {
1700 fprintf (dump_file, " Folded to: ");
1701 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
1702 }
1703 }
1704
1705 /* We only need to consider cases that can yield a gimple operand. */
1706 if (gimple_assign_single_p (stmt))
1707 rhs = gimple_assign_rhs1 (stmt);
1708 else if (gimple_code (stmt) == GIMPLE_GOTO)
1709 rhs = gimple_goto_dest (stmt);
1710 else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
1711 /* This should never be an ADDR_EXPR. */
1712 rhs = gimple_switch_index (swtch_stmt);
1713
1714 if (rhs && TREE_CODE (rhs) == ADDR_EXPR)
1715 recompute_tree_invariant_for_addr_expr (rhs);
1716
1717 /* Indicate that maybe_clean_or_replace_eh_stmt needs to be called,
1718 even if fold_stmt updated the stmt already and thus cleared
1719 gimple_modified_p flag on it. */
1720 modified_p = true;
1721 }
1722
1723 /* Check for redundant computations. Do this optimization only
1724 for assignments that have no volatile ops and conditionals. */
1725 may_optimize_p = (!gimple_has_side_effects (stmt)
1726 && (is_gimple_assign (stmt)
1727 || (is_gimple_call (stmt)
1728 && gimple_call_lhs (stmt) != NULL_TREE)
1729 || gimple_code (stmt) == GIMPLE_COND
1730 || gimple_code (stmt) == GIMPLE_SWITCH));
1731
1732 if (may_optimize_p)
1733 {
1734 if (gimple_code (stmt) == GIMPLE_CALL)
1735 {
1736 /* Resolve __builtin_constant_p. If it hasn't been
1737 folded to integer_one_node by now, it's fairly
1738 certain that the value simply isn't constant. */
1739 tree callee = gimple_call_fndecl (stmt);
1740 if (callee
1741 && DECL_BUILT_IN_CLASS (callee) == BUILT_IN_NORMAL
1742 && DECL_FUNCTION_CODE (callee) == BUILT_IN_CONSTANT_P)
1743 {
1744 propagate_tree_value_into_stmt (&si, integer_zero_node);
1745 stmt = gsi_stmt (si);
1746 }
1747 }
1748
1749 update_stmt_if_modified (stmt);
1750 eliminate_redundant_computations (&si);
1751 stmt = gsi_stmt (si);
1752
1753 /* Perform simple redundant store elimination. */
1754 if (gimple_assign_single_p (stmt)
1755 && TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
1756 {
1757 tree lhs = gimple_assign_lhs (stmt);
1758 tree rhs = gimple_assign_rhs1 (stmt);
1759 tree cached_lhs;
1760 gassign *new_stmt;
1761 rhs = dom_valueize (rhs);
1762 /* Build a new statement with the RHS and LHS exchanged. */
1763 if (TREE_CODE (rhs) == SSA_NAME)
1764 {
1765 gimple defstmt = SSA_NAME_DEF_STMT (rhs);
1766 new_stmt = gimple_build_assign (rhs, lhs);
1767 SSA_NAME_DEF_STMT (rhs) = defstmt;
1768 }
1769 else
1770 new_stmt = gimple_build_assign (rhs, lhs);
1771 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
1772 cached_lhs = lookup_avail_expr (new_stmt, false);
1773 if (cached_lhs
1774 && rhs == cached_lhs)
1775 {
1776 basic_block bb = gimple_bb (stmt);
1777 unlink_stmt_vdef (stmt);
1778 if (gsi_remove (&si, true))
1779 {
1780 bitmap_set_bit (need_eh_cleanup, bb->index);
1781 if (dump_file && (dump_flags & TDF_DETAILS))
1782 fprintf (dump_file, " Flagged to clear EH edges.\n");
1783 }
1784 release_defs (stmt);
1785 return;
1786 }
1787 }
1788 }
1789
1790 /* Record any additional equivalences created by this statement. */
1791 if (is_gimple_assign (stmt))
1792 record_equivalences_from_stmt (stmt, may_optimize_p);
1793
1794 /* If STMT is a COND_EXPR and it was modified, then we may know
1795 where it goes. If that is the case, then mark the CFG as altered.
1796
1797 This will cause us to later call remove_unreachable_blocks and
1798 cleanup_tree_cfg when it is safe to do so. It is not safe to
1799 clean things up here since removal of edges and such can trigger
1800 the removal of PHI nodes, which in turn can release SSA_NAMEs to
1801 the manager.
1802
1803 That's all fine and good, except that once SSA_NAMEs are released
1804 to the manager, we must not call create_ssa_name until all references
1805 to released SSA_NAMEs have been eliminated.
1806
1807 All references to the deleted SSA_NAMEs can not be eliminated until
1808 we remove unreachable blocks.
1809
1810 We can not remove unreachable blocks until after we have completed
1811 any queued jump threading.
1812
1813 We can not complete any queued jump threads until we have taken
1814 appropriate variables out of SSA form. Taking variables out of
1815 SSA form can call create_ssa_name and thus we lose.
1816
1817 Ultimately I suspect we're going to need to change the interface
1818 into the SSA_NAME manager. */
1819 if (gimple_modified_p (stmt) || modified_p)
1820 {
1821 tree val = NULL;
1822
1823 update_stmt_if_modified (stmt);
1824
1825 if (gimple_code (stmt) == GIMPLE_COND)
1826 val = fold_binary_loc (gimple_location (stmt),
1827 gimple_cond_code (stmt), boolean_type_node,
1828 gimple_cond_lhs (stmt), gimple_cond_rhs (stmt));
1829 else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
1830 val = gimple_switch_index (swtch_stmt);
1831
1832 if (val && TREE_CODE (val) == INTEGER_CST && find_taken_edge (bb, val))
1833 cfg_altered = true;
1834
1835 /* If we simplified a statement in such a way as to be shown that it
1836 cannot trap, update the eh information and the cfg to match. */
1837 if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt))
1838 {
1839 bitmap_set_bit (need_eh_cleanup, bb->index);
1840 if (dump_file && (dump_flags & TDF_DETAILS))
1841 fprintf (dump_file, " Flagged to clear EH edges.\n");
1842 }
1843
1844 if (!was_noreturn
1845 && is_gimple_call (stmt) && gimple_call_noreturn_p (stmt))
1846 need_noreturn_fixup.safe_push (stmt);
1847 }
1848 }
1849
1850 /* Helper for walk_non_aliased_vuses. Determine if we arrived at
1851 the desired memory state. */
1852
1853 static void *
1854 vuse_eq (ao_ref *, tree vuse1, unsigned int cnt, void *data)
1855 {
1856 tree vuse2 = (tree) data;
1857 if (vuse1 == vuse2)
1858 return data;
1859
1860 /* This bounds the stmt walks we perform on reference lookups
1861 to O(1) instead of O(N) where N is the number of dominating
1862 stores leading to a candidate. We re-use the SCCVN param
1863 for this as it is basically the same complexity. */
1864 if (cnt > (unsigned) PARAM_VALUE (PARAM_SCCVN_MAX_ALIAS_QUERIES_PER_ACCESS))
1865 return (void *)-1;
1866
1867 return NULL;
1868 }
1869
1870 /* Search for an existing instance of STMT in the AVAIL_EXPRS table.
1871 If found, return its LHS. Otherwise insert STMT in the table and
1872 return NULL_TREE.
1873
1874 Also, when an expression is first inserted in the table, it is also
1875 is also added to AVAIL_EXPRS_STACK, so that it can be removed when
1876 we finish processing this block and its children. */
1877
1878 static tree
1879 lookup_avail_expr (gimple stmt, bool insert)
1880 {
1881 expr_hash_elt **slot;
1882 tree lhs;
1883
1884 /* Get LHS of phi, assignment, or call; else NULL_TREE. */
1885 if (gimple_code (stmt) == GIMPLE_PHI)
1886 lhs = gimple_phi_result (stmt);
1887 else
1888 lhs = gimple_get_lhs (stmt);
1889
1890 class expr_hash_elt element (stmt, lhs);
1891
1892 if (dump_file && (dump_flags & TDF_DETAILS))
1893 {
1894 fprintf (dump_file, "LKUP ");
1895 element.print (dump_file);
1896 }
1897
1898 /* Don't bother remembering constant assignments and copy operations.
1899 Constants and copy operations are handled by the constant/copy propagator
1900 in optimize_stmt. */
1901 if (element.expr()->kind == EXPR_SINGLE
1902 && (TREE_CODE (element.expr()->ops.single.rhs) == SSA_NAME
1903 || is_gimple_min_invariant (element.expr()->ops.single.rhs)))
1904 return NULL_TREE;
1905
1906 /* Finally try to find the expression in the main expression hash table. */
1907 slot = avail_exprs->find_slot (&element, (insert ? INSERT : NO_INSERT));
1908 if (slot == NULL)
1909 {
1910 return NULL_TREE;
1911 }
1912 else if (*slot == NULL)
1913 {
1914 class expr_hash_elt *element2 = new expr_hash_elt (element);
1915 *slot = element2;
1916
1917 avail_exprs_stack->record_expr (element2, NULL, '2');
1918 return NULL_TREE;
1919 }
1920
1921 /* If we found a redundant memory operation do an alias walk to
1922 check if we can re-use it. */
1923 if (gimple_vuse (stmt) != (*slot)->vop ())
1924 {
1925 tree vuse1 = (*slot)->vop ();
1926 tree vuse2 = gimple_vuse (stmt);
1927 /* If we have a load of a register and a candidate in the
1928 hash with vuse1 then try to reach its stmt by walking
1929 up the virtual use-def chain using walk_non_aliased_vuses.
1930 But don't do this when removing expressions from the hash. */
1931 ao_ref ref;
1932 if (!(vuse1 && vuse2
1933 && gimple_assign_single_p (stmt)
1934 && TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME
1935 && (ao_ref_init (&ref, gimple_assign_rhs1 (stmt)), true)
1936 && walk_non_aliased_vuses (&ref, vuse2,
1937 vuse_eq, NULL, NULL, vuse1) != NULL))
1938 {
1939 if (insert)
1940 {
1941 class expr_hash_elt *element2 = new expr_hash_elt (element);
1942
1943 /* Insert the expr into the hash by replacing the current
1944 entry and recording the value to restore in the
1945 avail_exprs_stack. */
1946 avail_exprs_stack->record_expr (element2, *slot, '2');
1947 *slot = element2;
1948 }
1949 return NULL_TREE;
1950 }
1951 }
1952
1953 /* Extract the LHS of the assignment so that it can be used as the current
1954 definition of another variable. */
1955 lhs = (*slot)->lhs ();
1956
1957 lhs = dom_valueize (lhs);
1958
1959 if (dump_file && (dump_flags & TDF_DETAILS))
1960 {
1961 fprintf (dump_file, "FIND: ");
1962 print_generic_expr (dump_file, lhs, 0);
1963 fprintf (dump_file, "\n");
1964 }
1965
1966 return lhs;
1967 }
1968
1969 /* PHI-ONLY copy and constant propagation. This pass is meant to clean
1970 up degenerate PHIs created by or exposed by jump threading. */
1971
1972 /* Given a statement STMT, which is either a PHI node or an assignment,
1973 remove it from the IL. */
1974
1975 static void
1976 remove_stmt_or_phi (gimple stmt)
1977 {
1978 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
1979
1980 if (gimple_code (stmt) == GIMPLE_PHI)
1981 remove_phi_node (&gsi, true);
1982 else
1983 {
1984 gsi_remove (&gsi, true);
1985 release_defs (stmt);
1986 }
1987 }
1988
1989 /* Given a statement STMT, which is either a PHI node or an assignment,
1990 return the "rhs" of the node, in the case of a non-degenerate
1991 phi, NULL is returned. */
1992
1993 static tree
1994 get_rhs_or_phi_arg (gimple stmt)
1995 {
1996 if (gimple_code (stmt) == GIMPLE_PHI)
1997 return degenerate_phi_result (as_a <gphi *> (stmt));
1998 else if (gimple_assign_single_p (stmt))
1999 return gimple_assign_rhs1 (stmt);
2000 else
2001 gcc_unreachable ();
2002 }
2003
2004
2005 /* Given a statement STMT, which is either a PHI node or an assignment,
2006 return the "lhs" of the node. */
2007
2008 static tree
2009 get_lhs_or_phi_result (gimple stmt)
2010 {
2011 if (gimple_code (stmt) == GIMPLE_PHI)
2012 return gimple_phi_result (stmt);
2013 else if (is_gimple_assign (stmt))
2014 return gimple_assign_lhs (stmt);
2015 else
2016 gcc_unreachable ();
2017 }
2018
2019 /* Propagate RHS into all uses of LHS (when possible).
2020
2021 RHS and LHS are derived from STMT, which is passed in solely so
2022 that we can remove it if propagation is successful.
2023
2024 When propagating into a PHI node or into a statement which turns
2025 into a trivial copy or constant initialization, set the
2026 appropriate bit in INTERESTING_NAMEs so that we will visit those
2027 nodes as well in an effort to pick up secondary optimization
2028 opportunities. */
2029
2030 static void
2031 propagate_rhs_into_lhs (gimple stmt, tree lhs, tree rhs, bitmap interesting_names)
2032 {
2033 /* First verify that propagation is valid. */
2034 if (may_propagate_copy (lhs, rhs))
2035 {
2036 use_operand_p use_p;
2037 imm_use_iterator iter;
2038 gimple use_stmt;
2039 bool all = true;
2040
2041 /* Dump details. */
2042 if (dump_file && (dump_flags & TDF_DETAILS))
2043 {
2044 fprintf (dump_file, " Replacing '");
2045 print_generic_expr (dump_file, lhs, dump_flags);
2046 fprintf (dump_file, "' with %s '",
2047 (TREE_CODE (rhs) != SSA_NAME ? "constant" : "variable"));
2048 print_generic_expr (dump_file, rhs, dump_flags);
2049 fprintf (dump_file, "'\n");
2050 }
2051
2052 /* Walk over every use of LHS and try to replace the use with RHS.
2053 At this point the only reason why such a propagation would not
2054 be successful would be if the use occurs in an ASM_EXPR. */
2055 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
2056 {
2057 /* Leave debug stmts alone. If we succeed in propagating
2058 all non-debug uses, we'll drop the DEF, and propagation
2059 into debug stmts will occur then. */
2060 if (gimple_debug_bind_p (use_stmt))
2061 continue;
2062
2063 /* It's not always safe to propagate into an ASM_EXPR. */
2064 if (gimple_code (use_stmt) == GIMPLE_ASM
2065 && ! may_propagate_copy_into_asm (lhs))
2066 {
2067 all = false;
2068 continue;
2069 }
2070
2071 /* It's not ok to propagate into the definition stmt of RHS.
2072 <bb 9>:
2073 # prephitmp.12_36 = PHI <g_67.1_6(9)>
2074 g_67.1_6 = prephitmp.12_36;
2075 goto <bb 9>;
2076 While this is strictly all dead code we do not want to
2077 deal with this here. */
2078 if (TREE_CODE (rhs) == SSA_NAME
2079 && SSA_NAME_DEF_STMT (rhs) == use_stmt)
2080 {
2081 all = false;
2082 continue;
2083 }
2084
2085 /* Dump details. */
2086 if (dump_file && (dump_flags & TDF_DETAILS))
2087 {
2088 fprintf (dump_file, " Original statement:");
2089 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2090 }
2091
2092 /* Propagate the RHS into this use of the LHS. */
2093 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2094 propagate_value (use_p, rhs);
2095
2096 /* Special cases to avoid useless calls into the folding
2097 routines, operand scanning, etc.
2098
2099 Propagation into a PHI may cause the PHI to become
2100 a degenerate, so mark the PHI as interesting. No other
2101 actions are necessary. */
2102 if (gimple_code (use_stmt) == GIMPLE_PHI)
2103 {
2104 tree result;
2105
2106 /* Dump details. */
2107 if (dump_file && (dump_flags & TDF_DETAILS))
2108 {
2109 fprintf (dump_file, " Updated statement:");
2110 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2111 }
2112
2113 result = get_lhs_or_phi_result (use_stmt);
2114 bitmap_set_bit (interesting_names, SSA_NAME_VERSION (result));
2115 continue;
2116 }
2117
2118 /* From this point onward we are propagating into a
2119 real statement. Folding may (or may not) be possible,
2120 we may expose new operands, expose dead EH edges,
2121 etc. */
2122 /* NOTE tuples. In the tuples world, fold_stmt_inplace
2123 cannot fold a call that simplifies to a constant,
2124 because the GIMPLE_CALL must be replaced by a
2125 GIMPLE_ASSIGN, and there is no way to effect such a
2126 transformation in-place. We might want to consider
2127 using the more general fold_stmt here. */
2128 {
2129 gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
2130 fold_stmt_inplace (&gsi);
2131 }
2132
2133 /* Sometimes propagation can expose new operands to the
2134 renamer. */
2135 update_stmt (use_stmt);
2136
2137 /* Dump details. */
2138 if (dump_file && (dump_flags & TDF_DETAILS))
2139 {
2140 fprintf (dump_file, " Updated statement:");
2141 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2142 }
2143
2144 /* If we replaced a variable index with a constant, then
2145 we would need to update the invariant flag for ADDR_EXPRs. */
2146 if (gimple_assign_single_p (use_stmt)
2147 && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ADDR_EXPR)
2148 recompute_tree_invariant_for_addr_expr
2149 (gimple_assign_rhs1 (use_stmt));
2150
2151 /* If we cleaned up EH information from the statement,
2152 mark its containing block as needing EH cleanups. */
2153 if (maybe_clean_or_replace_eh_stmt (use_stmt, use_stmt))
2154 {
2155 bitmap_set_bit (need_eh_cleanup, gimple_bb (use_stmt)->index);
2156 if (dump_file && (dump_flags & TDF_DETAILS))
2157 fprintf (dump_file, " Flagged to clear EH edges.\n");
2158 }
2159
2160 /* Propagation may expose new trivial copy/constant propagation
2161 opportunities. */
2162 if (gimple_assign_single_p (use_stmt)
2163 && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME
2164 && (TREE_CODE (gimple_assign_rhs1 (use_stmt)) == SSA_NAME
2165 || is_gimple_min_invariant (gimple_assign_rhs1 (use_stmt))))
2166 {
2167 tree result = get_lhs_or_phi_result (use_stmt);
2168 bitmap_set_bit (interesting_names, SSA_NAME_VERSION (result));
2169 }
2170
2171 /* Propagation into these nodes may make certain edges in
2172 the CFG unexecutable. We want to identify them as PHI nodes
2173 at the destination of those unexecutable edges may become
2174 degenerates. */
2175 else if (gimple_code (use_stmt) == GIMPLE_COND
2176 || gimple_code (use_stmt) == GIMPLE_SWITCH
2177 || gimple_code (use_stmt) == GIMPLE_GOTO)
2178 {
2179 tree val;
2180
2181 if (gimple_code (use_stmt) == GIMPLE_COND)
2182 val = fold_binary_loc (gimple_location (use_stmt),
2183 gimple_cond_code (use_stmt),
2184 boolean_type_node,
2185 gimple_cond_lhs (use_stmt),
2186 gimple_cond_rhs (use_stmt));
2187 else if (gimple_code (use_stmt) == GIMPLE_SWITCH)
2188 val = gimple_switch_index (as_a <gswitch *> (use_stmt));
2189 else
2190 val = gimple_goto_dest (use_stmt);
2191
2192 if (val && is_gimple_min_invariant (val))
2193 {
2194 basic_block bb = gimple_bb (use_stmt);
2195 edge te = find_taken_edge (bb, val);
2196 if (!te)
2197 continue;
2198
2199 edge_iterator ei;
2200 edge e;
2201 gimple_stmt_iterator gsi;
2202 gphi_iterator psi;
2203
2204 /* Remove all outgoing edges except TE. */
2205 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei));)
2206 {
2207 if (e != te)
2208 {
2209 /* Mark all the PHI nodes at the destination of
2210 the unexecutable edge as interesting. */
2211 for (psi = gsi_start_phis (e->dest);
2212 !gsi_end_p (psi);
2213 gsi_next (&psi))
2214 {
2215 gphi *phi = psi.phi ();
2216
2217 tree result = gimple_phi_result (phi);
2218 int version = SSA_NAME_VERSION (result);
2219
2220 bitmap_set_bit (interesting_names, version);
2221 }
2222
2223 te->probability += e->probability;
2224
2225 te->count += e->count;
2226 remove_edge (e);
2227 cfg_altered = true;
2228 }
2229 else
2230 ei_next (&ei);
2231 }
2232
2233 gsi = gsi_last_bb (gimple_bb (use_stmt));
2234 gsi_remove (&gsi, true);
2235
2236 /* And fixup the flags on the single remaining edge. */
2237 te->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
2238 te->flags &= ~EDGE_ABNORMAL;
2239 te->flags |= EDGE_FALLTHRU;
2240 if (te->probability > REG_BR_PROB_BASE)
2241 te->probability = REG_BR_PROB_BASE;
2242 }
2243 }
2244 }
2245
2246 /* Ensure there is nothing else to do. */
2247 gcc_assert (!all || has_zero_uses (lhs));
2248
2249 /* If we were able to propagate away all uses of LHS, then
2250 we can remove STMT. */
2251 if (all)
2252 remove_stmt_or_phi (stmt);
2253 }
2254 }
2255
2256 /* STMT is either a PHI node (potentially a degenerate PHI node) or
2257 a statement that is a trivial copy or constant initialization.
2258
2259 Attempt to eliminate T by propagating its RHS into all uses of
2260 its LHS. This may in turn set new bits in INTERESTING_NAMES
2261 for nodes we want to revisit later.
2262
2263 All exit paths should clear INTERESTING_NAMES for the result
2264 of STMT. */
2265
2266 static void
2267 eliminate_const_or_copy (gimple stmt, bitmap interesting_names)
2268 {
2269 tree lhs = get_lhs_or_phi_result (stmt);
2270 tree rhs;
2271 int version = SSA_NAME_VERSION (lhs);
2272
2273 /* If the LHS of this statement or PHI has no uses, then we can
2274 just eliminate it. This can occur if, for example, the PHI
2275 was created by block duplication due to threading and its only
2276 use was in the conditional at the end of the block which was
2277 deleted. */
2278 if (has_zero_uses (lhs))
2279 {
2280 bitmap_clear_bit (interesting_names, version);
2281 remove_stmt_or_phi (stmt);
2282 return;
2283 }
2284
2285 /* Get the RHS of the assignment or PHI node if the PHI is a
2286 degenerate. */
2287 rhs = get_rhs_or_phi_arg (stmt);
2288 if (!rhs)
2289 {
2290 bitmap_clear_bit (interesting_names, version);
2291 return;
2292 }
2293
2294 if (!virtual_operand_p (lhs))
2295 propagate_rhs_into_lhs (stmt, lhs, rhs, interesting_names);
2296 else
2297 {
2298 gimple use_stmt;
2299 imm_use_iterator iter;
2300 use_operand_p use_p;
2301 /* For virtual operands we have to propagate into all uses as
2302 otherwise we will create overlapping life-ranges. */
2303 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
2304 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2305 SET_USE (use_p, rhs);
2306 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
2307 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs) = 1;
2308 remove_stmt_or_phi (stmt);
2309 }
2310
2311 /* Note that STMT may well have been deleted by now, so do
2312 not access it, instead use the saved version # to clear
2313 T's entry in the worklist. */
2314 bitmap_clear_bit (interesting_names, version);
2315 }
2316
2317 /* The first phase in degenerate PHI elimination.
2318
2319 Eliminate the degenerate PHIs in BB, then recurse on the
2320 dominator children of BB. */
2321
2322 static void
2323 eliminate_degenerate_phis_1 (basic_block bb, bitmap interesting_names)
2324 {
2325 gphi_iterator gsi;
2326 basic_block son;
2327
2328 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2329 {
2330 gphi *phi = gsi.phi ();
2331
2332 eliminate_const_or_copy (phi, interesting_names);
2333 }
2334
2335 /* Recurse into the dominator children of BB. */
2336 for (son = first_dom_son (CDI_DOMINATORS, bb);
2337 son;
2338 son = next_dom_son (CDI_DOMINATORS, son))
2339 eliminate_degenerate_phis_1 (son, interesting_names);
2340 }
2341
2342
2343 /* A very simple pass to eliminate degenerate PHI nodes from the
2344 IL. This is meant to be fast enough to be able to be run several
2345 times in the optimization pipeline.
2346
2347 Certain optimizations, particularly those which duplicate blocks
2348 or remove edges from the CFG can create or expose PHIs which are
2349 trivial copies or constant initializations.
2350
2351 While we could pick up these optimizations in DOM or with the
2352 combination of copy-prop and CCP, those solutions are far too
2353 heavy-weight for our needs.
2354
2355 This implementation has two phases so that we can efficiently
2356 eliminate the first order degenerate PHIs and second order
2357 degenerate PHIs.
2358
2359 The first phase performs a dominator walk to identify and eliminate
2360 the vast majority of the degenerate PHIs. When a degenerate PHI
2361 is identified and eliminated any affected statements or PHIs
2362 are put on a worklist.
2363
2364 The second phase eliminates degenerate PHIs and trivial copies
2365 or constant initializations using the worklist. This is how we
2366 pick up the secondary optimization opportunities with minimal
2367 cost. */
2368
2369 namespace {
2370
2371 const pass_data pass_data_phi_only_cprop =
2372 {
2373 GIMPLE_PASS, /* type */
2374 "phicprop", /* name */
2375 OPTGROUP_NONE, /* optinfo_flags */
2376 TV_TREE_PHI_CPROP, /* tv_id */
2377 ( PROP_cfg | PROP_ssa ), /* properties_required */
2378 0, /* properties_provided */
2379 0, /* properties_destroyed */
2380 0, /* todo_flags_start */
2381 ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
2382 };
2383
2384 class pass_phi_only_cprop : public gimple_opt_pass
2385 {
2386 public:
2387 pass_phi_only_cprop (gcc::context *ctxt)
2388 : gimple_opt_pass (pass_data_phi_only_cprop, ctxt)
2389 {}
2390
2391 /* opt_pass methods: */
2392 opt_pass * clone () { return new pass_phi_only_cprop (m_ctxt); }
2393 virtual bool gate (function *) { return flag_tree_dom != 0; }
2394 virtual unsigned int execute (function *);
2395
2396 }; // class pass_phi_only_cprop
2397
2398 unsigned int
2399 pass_phi_only_cprop::execute (function *fun)
2400 {
2401 bitmap interesting_names;
2402 bitmap interesting_names1;
2403
2404 /* Bitmap of blocks which need EH information updated. We can not
2405 update it on-the-fly as doing so invalidates the dominator tree. */
2406 need_eh_cleanup = BITMAP_ALLOC (NULL);
2407
2408 /* INTERESTING_NAMES is effectively our worklist, indexed by
2409 SSA_NAME_VERSION.
2410
2411 A set bit indicates that the statement or PHI node which
2412 defines the SSA_NAME should be (re)examined to determine if
2413 it has become a degenerate PHI or trivial const/copy propagation
2414 opportunity.
2415
2416 Experiments have show we generally get better compilation
2417 time behavior with bitmaps rather than sbitmaps. */
2418 interesting_names = BITMAP_ALLOC (NULL);
2419 interesting_names1 = BITMAP_ALLOC (NULL);
2420
2421 calculate_dominance_info (CDI_DOMINATORS);
2422 cfg_altered = false;
2423
2424 /* First phase. Eliminate degenerate PHIs via a dominator
2425 walk of the CFG.
2426
2427 Experiments have indicated that we generally get better
2428 compile-time behavior by visiting blocks in the first
2429 phase in dominator order. Presumably this is because walking
2430 in dominator order leaves fewer PHIs for later examination
2431 by the worklist phase. */
2432 eliminate_degenerate_phis_1 (ENTRY_BLOCK_PTR_FOR_FN (fun),
2433 interesting_names);
2434
2435 /* Second phase. Eliminate second order degenerate PHIs as well
2436 as trivial copies or constant initializations identified by
2437 the first phase or this phase. Basically we keep iterating
2438 until our set of INTERESTING_NAMEs is empty. */
2439 while (!bitmap_empty_p (interesting_names))
2440 {
2441 unsigned int i;
2442 bitmap_iterator bi;
2443
2444 /* EXECUTE_IF_SET_IN_BITMAP does not like its bitmap
2445 changed during the loop. Copy it to another bitmap and
2446 use that. */
2447 bitmap_copy (interesting_names1, interesting_names);
2448
2449 EXECUTE_IF_SET_IN_BITMAP (interesting_names1, 0, i, bi)
2450 {
2451 tree name = ssa_name (i);
2452
2453 /* Ignore SSA_NAMEs that have been released because
2454 their defining statement was deleted (unreachable). */
2455 if (name)
2456 eliminate_const_or_copy (SSA_NAME_DEF_STMT (ssa_name (i)),
2457 interesting_names);
2458 }
2459 }
2460
2461 if (cfg_altered)
2462 {
2463 free_dominance_info (CDI_DOMINATORS);
2464 /* If we changed the CFG schedule loops for fixup by cfgcleanup. */
2465 loops_state_set (LOOPS_NEED_FIXUP);
2466 }
2467
2468 /* Propagation of const and copies may make some EH edges dead. Purge
2469 such edges from the CFG as needed. */
2470 if (!bitmap_empty_p (need_eh_cleanup))
2471 {
2472 gimple_purge_all_dead_eh_edges (need_eh_cleanup);
2473 BITMAP_FREE (need_eh_cleanup);
2474 }
2475
2476 BITMAP_FREE (interesting_names);
2477 BITMAP_FREE (interesting_names1);
2478 return 0;
2479 }
2480
2481 } // anon namespace
2482
2483 gimple_opt_pass *
2484 make_pass_phi_only_cprop (gcc::context *ctxt)
2485 {
2486 return new pass_phi_only_cprop (ctxt);
2487 }