x86: Update message for target_clones and unsupported ISAs
[gcc.git] / gcc / tree-ssa-dom.c
1 /* SSA Dominator optimizations for trees
2 Copyright (C) 2001-2019 Free Software Foundation, Inc.
3 Contributed by Diego Novillo <dnovillo@redhat.com>
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "tree-pass.h"
28 #include "ssa.h"
29 #include "gimple-pretty-print.h"
30 #include "fold-const.h"
31 #include "cfganal.h"
32 #include "cfgloop.h"
33 #include "gimple-fold.h"
34 #include "tree-eh.h"
35 #include "tree-inline.h"
36 #include "gimple-iterator.h"
37 #include "tree-cfg.h"
38 #include "tree-into-ssa.h"
39 #include "domwalk.h"
40 #include "tree-ssa-propagate.h"
41 #include "tree-ssa-threadupdate.h"
42 #include "params.h"
43 #include "tree-ssa-scopedtables.h"
44 #include "tree-ssa-threadedge.h"
45 #include "tree-ssa-dom.h"
46 #include "gimplify.h"
47 #include "tree-cfgcleanup.h"
48 #include "dbgcnt.h"
49 #include "alloc-pool.h"
50 #include "tree-vrp.h"
51 #include "vr-values.h"
52 #include "gimple-ssa-evrp-analyze.h"
53
54 /* This file implements optimizations on the dominator tree. */
55
56 /* Structure for recording edge equivalences.
57
58 Computing and storing the edge equivalences instead of creating
59 them on-demand can save significant amounts of time, particularly
60 for pathological cases involving switch statements.
61
62 These structures live for a single iteration of the dominator
63 optimizer in the edge's AUX field. At the end of an iteration we
64 free each of these structures. */
65 class edge_info
66 {
67 public:
68 typedef std::pair <tree, tree> equiv_pair;
69 edge_info (edge);
70 ~edge_info ();
71
72 /* Record a simple LHS = RHS equivalence. This may trigger
73 calls to derive_equivalences. */
74 void record_simple_equiv (tree, tree);
75
76 /* If traversing this edge creates simple equivalences, we store
77 them as LHS/RHS pairs within this vector. */
78 vec<equiv_pair> simple_equivalences;
79
80 /* Traversing an edge may also indicate one or more particular conditions
81 are true or false. */
82 vec<cond_equivalence> cond_equivalences;
83
84 private:
85 /* Derive equivalences by walking the use-def chains. */
86 void derive_equivalences (tree, tree, int);
87 };
88
89 /* Track whether or not we have changed the control flow graph. */
90 static bool cfg_altered;
91
92 /* Bitmap of blocks that have had EH statements cleaned. We should
93 remove their dead edges eventually. */
94 static bitmap need_eh_cleanup;
95 static vec<gimple *> need_noreturn_fixup;
96
97 /* Statistics for dominator optimizations. */
98 struct opt_stats_d
99 {
100 long num_stmts;
101 long num_exprs_considered;
102 long num_re;
103 long num_const_prop;
104 long num_copy_prop;
105 };
106
107 static struct opt_stats_d opt_stats;
108
109 /* Local functions. */
110 static void record_equality (tree, tree, class const_and_copies *);
111 static void record_equivalences_from_phis (basic_block);
112 static void record_equivalences_from_incoming_edge (basic_block,
113 class const_and_copies *,
114 class avail_exprs_stack *);
115 static void eliminate_redundant_computations (gimple_stmt_iterator *,
116 class const_and_copies *,
117 class avail_exprs_stack *);
118 static void record_equivalences_from_stmt (gimple *, int,
119 class avail_exprs_stack *);
120 static void dump_dominator_optimization_stats (FILE *file,
121 hash_table<expr_elt_hasher> *);
122
123 /* Constructor for EDGE_INFO. An EDGE_INFO instance is always
124 associated with an edge E. */
125
126 edge_info::edge_info (edge e)
127 {
128 /* Free the old one associated with E, if it exists and
129 associate our new object with E. */
130 free_dom_edge_info (e);
131 e->aux = this;
132
133 /* And initialize the embedded vectors. */
134 simple_equivalences = vNULL;
135 cond_equivalences = vNULL;
136 }
137
138 /* Destructor just needs to release the vectors. */
139
140 edge_info::~edge_info (void)
141 {
142 this->cond_equivalences.release ();
143 this->simple_equivalences.release ();
144 }
145
146 /* NAME is known to have the value VALUE, which must be a constant.
147
148 Walk through its use-def chain to see if there are other equivalences
149 we might be able to derive.
150
151 RECURSION_LIMIT controls how far back we recurse through the use-def
152 chains. */
153
154 void
155 edge_info::derive_equivalences (tree name, tree value, int recursion_limit)
156 {
157 if (TREE_CODE (name) != SSA_NAME || TREE_CODE (value) != INTEGER_CST)
158 return;
159
160 /* This records the equivalence for the toplevel object. Do
161 this before checking the recursion limit. */
162 simple_equivalences.safe_push (equiv_pair (name, value));
163
164 /* Limit how far up the use-def chains we are willing to walk. */
165 if (recursion_limit == 0)
166 return;
167
168 /* We can walk up the use-def chains to potentially find more
169 equivalences. */
170 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
171 if (is_gimple_assign (def_stmt))
172 {
173 enum tree_code code = gimple_assign_rhs_code (def_stmt);
174 switch (code)
175 {
176 /* If the result of an OR is zero, then its operands are, too. */
177 case BIT_IOR_EXPR:
178 if (integer_zerop (value))
179 {
180 tree rhs1 = gimple_assign_rhs1 (def_stmt);
181 tree rhs2 = gimple_assign_rhs2 (def_stmt);
182
183 value = build_zero_cst (TREE_TYPE (rhs1));
184 derive_equivalences (rhs1, value, recursion_limit - 1);
185 value = build_zero_cst (TREE_TYPE (rhs2));
186 derive_equivalences (rhs2, value, recursion_limit - 1);
187 }
188 break;
189
190 /* If the result of an AND is nonzero, then its operands are, too. */
191 case BIT_AND_EXPR:
192 if (!integer_zerop (value))
193 {
194 tree rhs1 = gimple_assign_rhs1 (def_stmt);
195 tree rhs2 = gimple_assign_rhs2 (def_stmt);
196
197 /* If either operand has a boolean range, then we
198 know its value must be one, otherwise we just know it
199 is nonzero. The former is clearly useful, I haven't
200 seen cases where the latter is helpful yet. */
201 if (TREE_CODE (rhs1) == SSA_NAME)
202 {
203 if (ssa_name_has_boolean_range (rhs1))
204 {
205 value = build_one_cst (TREE_TYPE (rhs1));
206 derive_equivalences (rhs1, value, recursion_limit - 1);
207 }
208 }
209 if (TREE_CODE (rhs2) == SSA_NAME)
210 {
211 if (ssa_name_has_boolean_range (rhs2))
212 {
213 value = build_one_cst (TREE_TYPE (rhs2));
214 derive_equivalences (rhs2, value, recursion_limit - 1);
215 }
216 }
217 }
218 break;
219
220 /* If LHS is an SSA_NAME and RHS is a constant integer and LHS was
221 set via a widening type conversion, then we may be able to record
222 additional equivalences. */
223 case NOP_EXPR:
224 case CONVERT_EXPR:
225 {
226 tree rhs = gimple_assign_rhs1 (def_stmt);
227 tree rhs_type = TREE_TYPE (rhs);
228 if (INTEGRAL_TYPE_P (rhs_type)
229 && (TYPE_PRECISION (TREE_TYPE (name))
230 >= TYPE_PRECISION (rhs_type))
231 && int_fits_type_p (value, rhs_type))
232 derive_equivalences (rhs,
233 fold_convert (rhs_type, value),
234 recursion_limit - 1);
235 break;
236 }
237
238 /* We can invert the operation of these codes trivially if
239 one of the RHS operands is a constant to produce a known
240 value for the other RHS operand. */
241 case POINTER_PLUS_EXPR:
242 case PLUS_EXPR:
243 {
244 tree rhs1 = gimple_assign_rhs1 (def_stmt);
245 tree rhs2 = gimple_assign_rhs2 (def_stmt);
246
247 /* If either argument is a constant, then we can compute
248 a constant value for the nonconstant argument. */
249 if (TREE_CODE (rhs1) == INTEGER_CST
250 && TREE_CODE (rhs2) == SSA_NAME)
251 derive_equivalences (rhs2,
252 fold_binary (MINUS_EXPR, TREE_TYPE (rhs1),
253 value, rhs1),
254 recursion_limit - 1);
255 else if (TREE_CODE (rhs2) == INTEGER_CST
256 && TREE_CODE (rhs1) == SSA_NAME)
257 derive_equivalences (rhs1,
258 fold_binary (MINUS_EXPR, TREE_TYPE (rhs1),
259 value, rhs2),
260 recursion_limit - 1);
261 break;
262 }
263
264 /* If one of the operands is a constant, then we can compute
265 the value of the other operand. If both operands are
266 SSA_NAMEs, then they must be equal if the result is zero. */
267 case MINUS_EXPR:
268 {
269 tree rhs1 = gimple_assign_rhs1 (def_stmt);
270 tree rhs2 = gimple_assign_rhs2 (def_stmt);
271
272 /* If either argument is a constant, then we can compute
273 a constant value for the nonconstant argument. */
274 if (TREE_CODE (rhs1) == INTEGER_CST
275 && TREE_CODE (rhs2) == SSA_NAME)
276 derive_equivalences (rhs2,
277 fold_binary (MINUS_EXPR, TREE_TYPE (rhs1),
278 rhs1, value),
279 recursion_limit - 1);
280 else if (TREE_CODE (rhs2) == INTEGER_CST
281 && TREE_CODE (rhs1) == SSA_NAME)
282 derive_equivalences (rhs1,
283 fold_binary (PLUS_EXPR, TREE_TYPE (rhs1),
284 value, rhs2),
285 recursion_limit - 1);
286 else if (integer_zerop (value))
287 {
288 tree cond = build2 (EQ_EXPR, boolean_type_node,
289 gimple_assign_rhs1 (def_stmt),
290 gimple_assign_rhs2 (def_stmt));
291 tree inverted = invert_truthvalue (cond);
292 record_conditions (&this->cond_equivalences, cond, inverted);
293 }
294 break;
295 }
296
297 case EQ_EXPR:
298 case NE_EXPR:
299 {
300 if ((code == EQ_EXPR && integer_onep (value))
301 || (code == NE_EXPR && integer_zerop (value)))
302 {
303 tree rhs1 = gimple_assign_rhs1 (def_stmt);
304 tree rhs2 = gimple_assign_rhs2 (def_stmt);
305
306 /* If either argument is a constant, then record the
307 other argument as being the same as that constant.
308
309 If neither operand is a constant, then we have a
310 conditional name == name equivalence. */
311 if (TREE_CODE (rhs1) == INTEGER_CST)
312 derive_equivalences (rhs2, rhs1, recursion_limit - 1);
313 else if (TREE_CODE (rhs2) == INTEGER_CST)
314 derive_equivalences (rhs1, rhs2, recursion_limit - 1);
315 }
316 else
317 {
318 tree cond = build2 (code, boolean_type_node,
319 gimple_assign_rhs1 (def_stmt),
320 gimple_assign_rhs2 (def_stmt));
321 tree inverted = invert_truthvalue (cond);
322 if (integer_zerop (value))
323 std::swap (cond, inverted);
324 record_conditions (&this->cond_equivalences, cond, inverted);
325 }
326 break;
327 }
328
329 /* For BIT_NOT and NEGATE, we can just apply the operation to the
330 VALUE to get the new equivalence. It will always be a constant
331 so we can recurse. */
332 case BIT_NOT_EXPR:
333 case NEGATE_EXPR:
334 {
335 tree rhs = gimple_assign_rhs1 (def_stmt);
336 tree res;
337 /* If this is a NOT and the operand has a boolean range, then we
338 know its value must be zero or one. We are not supposed to
339 have a BIT_NOT_EXPR for boolean types with precision > 1 in
340 the general case, see e.g. the handling of TRUTH_NOT_EXPR in
341 the gimplifier, but it can be generated by match.pd out of
342 a BIT_XOR_EXPR wrapped in a BIT_AND_EXPR. Now the handling
343 of BIT_AND_EXPR above already forces a specific semantics for
344 boolean types with precision > 1 so we must do the same here,
345 otherwise we could change the semantics of TRUTH_NOT_EXPR for
346 boolean types with precision > 1. */
347 if (code == BIT_NOT_EXPR
348 && TREE_CODE (rhs) == SSA_NAME
349 && ssa_name_has_boolean_range (rhs))
350 {
351 if ((TREE_INT_CST_LOW (value) & 1) == 0)
352 res = build_one_cst (TREE_TYPE (rhs));
353 else
354 res = build_zero_cst (TREE_TYPE (rhs));
355 }
356 else
357 res = fold_build1 (code, TREE_TYPE (rhs), value);
358 derive_equivalences (rhs, res, recursion_limit - 1);
359 break;
360 }
361
362 default:
363 {
364 if (TREE_CODE_CLASS (code) == tcc_comparison)
365 {
366 tree cond = build2 (code, boolean_type_node,
367 gimple_assign_rhs1 (def_stmt),
368 gimple_assign_rhs2 (def_stmt));
369 tree inverted = invert_truthvalue (cond);
370 if (integer_zerop (value))
371 std::swap (cond, inverted);
372 record_conditions (&this->cond_equivalences, cond, inverted);
373 break;
374 }
375 break;
376 }
377 }
378 }
379 }
380
381 void
382 edge_info::record_simple_equiv (tree lhs, tree rhs)
383 {
384 /* If the RHS is a constant, then we may be able to derive
385 further equivalences. Else just record the name = name
386 equivalence. */
387 if (TREE_CODE (rhs) == INTEGER_CST)
388 derive_equivalences (lhs, rhs, 4);
389 else
390 simple_equivalences.safe_push (equiv_pair (lhs, rhs));
391 }
392
393 /* Free the edge_info data attached to E, if it exists. */
394
395 void
396 free_dom_edge_info (edge e)
397 {
398 class edge_info *edge_info = (struct edge_info *)e->aux;
399
400 if (edge_info)
401 delete edge_info;
402 }
403
404 /* Free all EDGE_INFO structures associated with edges in the CFG.
405 If a particular edge can be threaded, copy the redirection
406 target from the EDGE_INFO structure into the edge's AUX field
407 as required by code to update the CFG and SSA graph for
408 jump threading. */
409
410 static void
411 free_all_edge_infos (void)
412 {
413 basic_block bb;
414 edge_iterator ei;
415 edge e;
416
417 FOR_EACH_BB_FN (bb, cfun)
418 {
419 FOR_EACH_EDGE (e, ei, bb->preds)
420 {
421 free_dom_edge_info (e);
422 e->aux = NULL;
423 }
424 }
425 }
426
427 /* We have finished optimizing BB, record any information implied by
428 taking a specific outgoing edge from BB. */
429
430 static void
431 record_edge_info (basic_block bb)
432 {
433 gimple_stmt_iterator gsi = gsi_last_bb (bb);
434 class edge_info *edge_info;
435
436 if (! gsi_end_p (gsi))
437 {
438 gimple *stmt = gsi_stmt (gsi);
439 location_t loc = gimple_location (stmt);
440
441 if (gimple_code (stmt) == GIMPLE_SWITCH)
442 {
443 gswitch *switch_stmt = as_a <gswitch *> (stmt);
444 tree index = gimple_switch_index (switch_stmt);
445
446 if (TREE_CODE (index) == SSA_NAME)
447 {
448 int i;
449 int n_labels = gimple_switch_num_labels (switch_stmt);
450 tree *info = XCNEWVEC (tree, last_basic_block_for_fn (cfun));
451 edge e;
452 edge_iterator ei;
453
454 for (i = 0; i < n_labels; i++)
455 {
456 tree label = gimple_switch_label (switch_stmt, i);
457 basic_block target_bb
458 = label_to_block (cfun, CASE_LABEL (label));
459 if (CASE_HIGH (label)
460 || !CASE_LOW (label)
461 || info[target_bb->index])
462 info[target_bb->index] = error_mark_node;
463 else
464 info[target_bb->index] = label;
465 }
466
467 FOR_EACH_EDGE (e, ei, bb->succs)
468 {
469 basic_block target_bb = e->dest;
470 tree label = info[target_bb->index];
471
472 if (label != NULL && label != error_mark_node)
473 {
474 tree x = fold_convert_loc (loc, TREE_TYPE (index),
475 CASE_LOW (label));
476 edge_info = new class edge_info (e);
477 edge_info->record_simple_equiv (index, x);
478 }
479 }
480 free (info);
481 }
482 }
483
484 /* A COND_EXPR may create equivalences too. */
485 if (gimple_code (stmt) == GIMPLE_COND)
486 {
487 edge true_edge;
488 edge false_edge;
489
490 tree op0 = gimple_cond_lhs (stmt);
491 tree op1 = gimple_cond_rhs (stmt);
492 enum tree_code code = gimple_cond_code (stmt);
493
494 extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
495
496 /* Special case comparing booleans against a constant as we
497 know the value of OP0 on both arms of the branch. i.e., we
498 can record an equivalence for OP0 rather than COND.
499
500 However, don't do this if the constant isn't zero or one.
501 Such conditionals will get optimized more thoroughly during
502 the domwalk. */
503 if ((code == EQ_EXPR || code == NE_EXPR)
504 && TREE_CODE (op0) == SSA_NAME
505 && ssa_name_has_boolean_range (op0)
506 && is_gimple_min_invariant (op1)
507 && (integer_zerop (op1) || integer_onep (op1)))
508 {
509 tree true_val = constant_boolean_node (true, TREE_TYPE (op0));
510 tree false_val = constant_boolean_node (false, TREE_TYPE (op0));
511
512 if (code == EQ_EXPR)
513 {
514 edge_info = new class edge_info (true_edge);
515 edge_info->record_simple_equiv (op0,
516 (integer_zerop (op1)
517 ? false_val : true_val));
518 edge_info = new class edge_info (false_edge);
519 edge_info->record_simple_equiv (op0,
520 (integer_zerop (op1)
521 ? true_val : false_val));
522 }
523 else
524 {
525 edge_info = new class edge_info (true_edge);
526 edge_info->record_simple_equiv (op0,
527 (integer_zerop (op1)
528 ? true_val : false_val));
529 edge_info = new class edge_info (false_edge);
530 edge_info->record_simple_equiv (op0,
531 (integer_zerop (op1)
532 ? false_val : true_val));
533 }
534 }
535 /* This can show up in the IL as a result of copy propagation
536 it will eventually be canonicalized, but we have to cope
537 with this case within the pass. */
538 else if (is_gimple_min_invariant (op0)
539 && TREE_CODE (op1) == SSA_NAME)
540 {
541 tree cond = build2 (code, boolean_type_node, op0, op1);
542 tree inverted = invert_truthvalue_loc (loc, cond);
543 bool can_infer_simple_equiv
544 = !(HONOR_SIGNED_ZEROS (op0)
545 && real_zerop (op0));
546 struct edge_info *edge_info;
547
548 edge_info = new class edge_info (true_edge);
549 record_conditions (&edge_info->cond_equivalences, cond, inverted);
550
551 if (can_infer_simple_equiv && code == EQ_EXPR)
552 edge_info->record_simple_equiv (op1, op0);
553
554 edge_info = new class edge_info (false_edge);
555 record_conditions (&edge_info->cond_equivalences, inverted, cond);
556
557 if (can_infer_simple_equiv && TREE_CODE (inverted) == EQ_EXPR)
558 edge_info->record_simple_equiv (op1, op0);
559 }
560
561 else if (TREE_CODE (op0) == SSA_NAME
562 && (TREE_CODE (op1) == SSA_NAME
563 || is_gimple_min_invariant (op1)))
564 {
565 tree cond = build2 (code, boolean_type_node, op0, op1);
566 tree inverted = invert_truthvalue_loc (loc, cond);
567 bool can_infer_simple_equiv
568 = !(HONOR_SIGNED_ZEROS (op1)
569 && (TREE_CODE (op1) == SSA_NAME || real_zerop (op1)));
570 struct edge_info *edge_info;
571
572 edge_info = new class edge_info (true_edge);
573 record_conditions (&edge_info->cond_equivalences, cond, inverted);
574
575 if (can_infer_simple_equiv && code == EQ_EXPR)
576 edge_info->record_simple_equiv (op0, op1);
577
578 edge_info = new class edge_info (false_edge);
579 record_conditions (&edge_info->cond_equivalences, inverted, cond);
580
581 if (can_infer_simple_equiv && TREE_CODE (inverted) == EQ_EXPR)
582 edge_info->record_simple_equiv (op0, op1);
583 }
584 }
585 }
586 }
587
588
589 class dom_opt_dom_walker : public dom_walker
590 {
591 public:
592 dom_opt_dom_walker (cdi_direction direction,
593 class const_and_copies *const_and_copies,
594 class avail_exprs_stack *avail_exprs_stack,
595 gcond *dummy_cond)
596 : dom_walker (direction, REACHABLE_BLOCKS),
597 m_const_and_copies (const_and_copies),
598 m_avail_exprs_stack (avail_exprs_stack),
599 evrp_range_analyzer (true),
600 m_dummy_cond (dummy_cond) { }
601
602 virtual edge before_dom_children (basic_block);
603 virtual void after_dom_children (basic_block);
604
605 private:
606
607 /* Unwindable equivalences, both const/copy and expression varieties. */
608 class const_and_copies *m_const_and_copies;
609 class avail_exprs_stack *m_avail_exprs_stack;
610
611 /* VRP data. */
612 class evrp_range_analyzer evrp_range_analyzer;
613
614 /* Dummy condition to avoid creating lots of throw away statements. */
615 gcond *m_dummy_cond;
616
617 /* Optimize a single statement within a basic block using the
618 various tables mantained by DOM. Returns the taken edge if
619 the statement is a conditional with a statically determined
620 value. */
621 edge optimize_stmt (basic_block, gimple_stmt_iterator *, bool *);
622 };
623
624 /* Jump threading, redundancy elimination and const/copy propagation.
625
626 This pass may expose new symbols that need to be renamed into SSA. For
627 every new symbol exposed, its corresponding bit will be set in
628 VARS_TO_RENAME. */
629
630 namespace {
631
632 const pass_data pass_data_dominator =
633 {
634 GIMPLE_PASS, /* type */
635 "dom", /* name */
636 OPTGROUP_NONE, /* optinfo_flags */
637 TV_TREE_SSA_DOMINATOR_OPTS, /* tv_id */
638 ( PROP_cfg | PROP_ssa ), /* properties_required */
639 0, /* properties_provided */
640 0, /* properties_destroyed */
641 0, /* todo_flags_start */
642 ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
643 };
644
645 class pass_dominator : public gimple_opt_pass
646 {
647 public:
648 pass_dominator (gcc::context *ctxt)
649 : gimple_opt_pass (pass_data_dominator, ctxt),
650 may_peel_loop_headers_p (false)
651 {}
652
653 /* opt_pass methods: */
654 opt_pass * clone () { return new pass_dominator (m_ctxt); }
655 void set_pass_param (unsigned int n, bool param)
656 {
657 gcc_assert (n == 0);
658 may_peel_loop_headers_p = param;
659 }
660 virtual bool gate (function *) { return flag_tree_dom != 0; }
661 virtual unsigned int execute (function *);
662
663 private:
664 /* This flag is used to prevent loops from being peeled repeatedly in jump
665 threading; it will be removed once we preserve loop structures throughout
666 the compilation -- we will be able to mark the affected loops directly in
667 jump threading, and avoid peeling them next time. */
668 bool may_peel_loop_headers_p;
669 }; // class pass_dominator
670
671 unsigned int
672 pass_dominator::execute (function *fun)
673 {
674 memset (&opt_stats, 0, sizeof (opt_stats));
675
676 /* Create our hash tables. */
677 hash_table<expr_elt_hasher> *avail_exprs
678 = new hash_table<expr_elt_hasher> (1024);
679 class avail_exprs_stack *avail_exprs_stack
680 = new class avail_exprs_stack (avail_exprs);
681 class const_and_copies *const_and_copies = new class const_and_copies ();
682 need_eh_cleanup = BITMAP_ALLOC (NULL);
683 need_noreturn_fixup.create (0);
684
685 calculate_dominance_info (CDI_DOMINATORS);
686 cfg_altered = false;
687
688 /* We need to know loop structures in order to avoid destroying them
689 in jump threading. Note that we still can e.g. thread through loop
690 headers to an exit edge, or through loop header to the loop body, assuming
691 that we update the loop info.
692
693 TODO: We don't need to set LOOPS_HAVE_PREHEADERS generally, but due
694 to several overly conservative bail-outs in jump threading, case
695 gcc.dg/tree-ssa/pr21417.c can't be threaded if loop preheader is
696 missing. We should improve jump threading in future then
697 LOOPS_HAVE_PREHEADERS won't be needed here. */
698 loop_optimizer_init (LOOPS_HAVE_PREHEADERS | LOOPS_HAVE_SIMPLE_LATCHES);
699
700 /* Initialize the value-handle array. */
701 threadedge_initialize_values ();
702
703 /* We need accurate information regarding back edges in the CFG
704 for jump threading; this may include back edges that are not part of
705 a single loop. */
706 mark_dfs_back_edges ();
707
708 /* We want to create the edge info structures before the dominator walk
709 so that they'll be in place for the jump threader, particularly when
710 threading through a join block.
711
712 The conditions will be lazily updated with global equivalences as
713 we reach them during the dominator walk. */
714 basic_block bb;
715 FOR_EACH_BB_FN (bb, fun)
716 record_edge_info (bb);
717
718 gcond *dummy_cond = gimple_build_cond (NE_EXPR, integer_zero_node,
719 integer_zero_node, NULL, NULL);
720
721 /* Recursively walk the dominator tree optimizing statements. */
722 dom_opt_dom_walker walker (CDI_DOMINATORS, const_and_copies,
723 avail_exprs_stack, dummy_cond);
724 walker.walk (fun->cfg->x_entry_block_ptr);
725
726 /* Look for blocks where we cleared EDGE_EXECUTABLE on an outgoing
727 edge. When found, remove jump threads which contain any outgoing
728 edge from the affected block. */
729 if (cfg_altered)
730 {
731 FOR_EACH_BB_FN (bb, fun)
732 {
733 edge_iterator ei;
734 edge e;
735
736 /* First see if there are any edges without EDGE_EXECUTABLE
737 set. */
738 bool found = false;
739 FOR_EACH_EDGE (e, ei, bb->succs)
740 {
741 if ((e->flags & EDGE_EXECUTABLE) == 0)
742 {
743 found = true;
744 break;
745 }
746 }
747
748 /* If there were any such edges found, then remove jump threads
749 containing any edge leaving BB. */
750 if (found)
751 FOR_EACH_EDGE (e, ei, bb->succs)
752 remove_jump_threads_including (e);
753 }
754 }
755
756 {
757 gimple_stmt_iterator gsi;
758 basic_block bb;
759 FOR_EACH_BB_FN (bb, fun)
760 {
761 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
762 update_stmt_if_modified (gsi_stmt (gsi));
763 }
764 }
765
766 /* If we exposed any new variables, go ahead and put them into
767 SSA form now, before we handle jump threading. This simplifies
768 interactions between rewriting of _DECL nodes into SSA form
769 and rewriting SSA_NAME nodes into SSA form after block
770 duplication and CFG manipulation. */
771 update_ssa (TODO_update_ssa);
772
773 free_all_edge_infos ();
774
775 /* Thread jumps, creating duplicate blocks as needed. */
776 cfg_altered |= thread_through_all_blocks (may_peel_loop_headers_p);
777
778 if (cfg_altered)
779 free_dominance_info (CDI_DOMINATORS);
780
781 /* Removal of statements may make some EH edges dead. Purge
782 such edges from the CFG as needed. */
783 if (!bitmap_empty_p (need_eh_cleanup))
784 {
785 unsigned i;
786 bitmap_iterator bi;
787
788 /* Jump threading may have created forwarder blocks from blocks
789 needing EH cleanup; the new successor of these blocks, which
790 has inherited from the original block, needs the cleanup.
791 Don't clear bits in the bitmap, as that can break the bitmap
792 iterator. */
793 EXECUTE_IF_SET_IN_BITMAP (need_eh_cleanup, 0, i, bi)
794 {
795 basic_block bb = BASIC_BLOCK_FOR_FN (fun, i);
796 if (bb == NULL)
797 continue;
798 while (single_succ_p (bb)
799 && (single_succ_edge (bb)->flags
800 & (EDGE_EH|EDGE_DFS_BACK)) == 0)
801 bb = single_succ (bb);
802 if (bb == EXIT_BLOCK_PTR_FOR_FN (fun))
803 continue;
804 if ((unsigned) bb->index != i)
805 bitmap_set_bit (need_eh_cleanup, bb->index);
806 }
807
808 gimple_purge_all_dead_eh_edges (need_eh_cleanup);
809 bitmap_clear (need_eh_cleanup);
810 }
811
812 /* Fixup stmts that became noreturn calls. This may require splitting
813 blocks and thus isn't possible during the dominator walk or before
814 jump threading finished. Do this in reverse order so we don't
815 inadvertedly remove a stmt we want to fixup by visiting a dominating
816 now noreturn call first. */
817 while (!need_noreturn_fixup.is_empty ())
818 {
819 gimple *stmt = need_noreturn_fixup.pop ();
820 if (dump_file && dump_flags & TDF_DETAILS)
821 {
822 fprintf (dump_file, "Fixing up noreturn call ");
823 print_gimple_stmt (dump_file, stmt, 0);
824 fprintf (dump_file, "\n");
825 }
826 fixup_noreturn_call (stmt);
827 }
828
829 statistics_counter_event (fun, "Redundant expressions eliminated",
830 opt_stats.num_re);
831 statistics_counter_event (fun, "Constants propagated",
832 opt_stats.num_const_prop);
833 statistics_counter_event (fun, "Copies propagated",
834 opt_stats.num_copy_prop);
835
836 /* Debugging dumps. */
837 if (dump_file && (dump_flags & TDF_STATS))
838 dump_dominator_optimization_stats (dump_file, avail_exprs);
839
840 loop_optimizer_finalize ();
841
842 /* Delete our main hashtable. */
843 delete avail_exprs;
844 avail_exprs = NULL;
845
846 /* Free asserted bitmaps and stacks. */
847 BITMAP_FREE (need_eh_cleanup);
848 need_noreturn_fixup.release ();
849 delete avail_exprs_stack;
850 delete const_and_copies;
851
852 /* Free the value-handle array. */
853 threadedge_finalize_values ();
854
855 return 0;
856 }
857
858 } // anon namespace
859
860 gimple_opt_pass *
861 make_pass_dominator (gcc::context *ctxt)
862 {
863 return new pass_dominator (ctxt);
864 }
865
866 /* A hack until we remove threading from tree-vrp.c and bring the
867 simplification routine into the dom_opt_dom_walker class. */
868 static class vr_values *x_vr_values;
869
870 /* A trivial wrapper so that we can present the generic jump
871 threading code with a simple API for simplifying statements. */
872 static tree
873 simplify_stmt_for_jump_threading (gimple *stmt,
874 gimple *within_stmt ATTRIBUTE_UNUSED,
875 class avail_exprs_stack *avail_exprs_stack,
876 basic_block bb ATTRIBUTE_UNUSED)
877 {
878 /* First query our hash table to see if the the expression is available
879 there. A non-NULL return value will be either a constant or another
880 SSA_NAME. */
881 tree cached_lhs = avail_exprs_stack->lookup_avail_expr (stmt, false, true);
882 if (cached_lhs)
883 return cached_lhs;
884
885 /* If the hash table query failed, query VRP information. This is
886 essentially the same as tree-vrp's simplification routine. The
887 copy in tree-vrp is scheduled for removal in gcc-9. */
888 if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
889 {
890 cached_lhs
891 = x_vr_values->vrp_evaluate_conditional (gimple_cond_code (cond_stmt),
892 gimple_cond_lhs (cond_stmt),
893 gimple_cond_rhs (cond_stmt),
894 within_stmt);
895 return cached_lhs;
896 }
897
898 if (gswitch *switch_stmt = dyn_cast <gswitch *> (stmt))
899 {
900 tree op = gimple_switch_index (switch_stmt);
901 if (TREE_CODE (op) != SSA_NAME)
902 return NULL_TREE;
903
904 value_range *vr = x_vr_values->get_value_range (op);
905 if (vr->undefined_p ()
906 || vr->varying_p ()
907 || vr->symbolic_p ())
908 return NULL_TREE;
909
910 if (vr->kind () == VR_RANGE)
911 {
912 size_t i, j;
913
914 find_case_label_range (switch_stmt, vr->min (), vr->max (), &i, &j);
915
916 if (i == j)
917 {
918 tree label = gimple_switch_label (switch_stmt, i);
919 tree singleton;
920
921 if (CASE_HIGH (label) != NULL_TREE
922 ? (tree_int_cst_compare (CASE_LOW (label), vr->min ()) <= 0
923 && tree_int_cst_compare (CASE_HIGH (label), vr->max ()) >= 0)
924 : (vr->singleton_p (&singleton)
925 && tree_int_cst_equal (CASE_LOW (label), singleton)))
926 return label;
927
928 if (i > j)
929 return gimple_switch_label (switch_stmt, 0);
930 }
931 }
932
933 if (vr->kind () == VR_ANTI_RANGE)
934 {
935 unsigned n = gimple_switch_num_labels (switch_stmt);
936 tree min_label = gimple_switch_label (switch_stmt, 1);
937 tree max_label = gimple_switch_label (switch_stmt, n - 1);
938
939 /* The default label will be taken only if the anti-range of the
940 operand is entirely outside the bounds of all the (non-default)
941 case labels. */
942 if (tree_int_cst_compare (vr->min (), CASE_LOW (min_label)) <= 0
943 && (CASE_HIGH (max_label) != NULL_TREE
944 ? tree_int_cst_compare (vr->max (), CASE_HIGH (max_label)) >= 0
945 : tree_int_cst_compare (vr->max (), CASE_LOW (max_label)) >= 0))
946 return gimple_switch_label (switch_stmt, 0);
947 }
948 return NULL_TREE;
949 }
950
951 if (gassign *assign_stmt = dyn_cast <gassign *> (stmt))
952 {
953 tree lhs = gimple_assign_lhs (assign_stmt);
954 if (TREE_CODE (lhs) == SSA_NAME
955 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
956 || POINTER_TYPE_P (TREE_TYPE (lhs)))
957 && stmt_interesting_for_vrp (stmt))
958 {
959 edge dummy_e;
960 tree dummy_tree;
961 value_range new_vr;
962 x_vr_values->extract_range_from_stmt (stmt, &dummy_e,
963 &dummy_tree, &new_vr);
964 tree singleton;
965 if (new_vr.singleton_p (&singleton))
966 return singleton;
967 }
968 }
969 return NULL;
970 }
971
972 /* Valueize hook for gimple_fold_stmt_to_constant_1. */
973
974 static tree
975 dom_valueize (tree t)
976 {
977 if (TREE_CODE (t) == SSA_NAME)
978 {
979 tree tem = SSA_NAME_VALUE (t);
980 if (tem)
981 return tem;
982 }
983 return t;
984 }
985
986 /* We have just found an equivalence for LHS on an edge E.
987 Look backwards to other uses of LHS and see if we can derive
988 additional equivalences that are valid on edge E. */
989 static void
990 back_propagate_equivalences (tree lhs, edge e,
991 class const_and_copies *const_and_copies)
992 {
993 use_operand_p use_p;
994 imm_use_iterator iter;
995 bitmap domby = NULL;
996 basic_block dest = e->dest;
997
998 /* Iterate over the uses of LHS to see if any dominate E->dest.
999 If so, they may create useful equivalences too.
1000
1001 ??? If the code gets re-organized to a worklist to catch more
1002 indirect opportunities and it is made to handle PHIs then this
1003 should only consider use_stmts in basic-blocks we have already visited. */
1004 FOR_EACH_IMM_USE_FAST (use_p, iter, lhs)
1005 {
1006 gimple *use_stmt = USE_STMT (use_p);
1007
1008 /* Often the use is in DEST, which we trivially know we can't use.
1009 This is cheaper than the dominator set tests below. */
1010 if (dest == gimple_bb (use_stmt))
1011 continue;
1012
1013 /* Filter out statements that can never produce a useful
1014 equivalence. */
1015 tree lhs2 = gimple_get_lhs (use_stmt);
1016 if (!lhs2 || TREE_CODE (lhs2) != SSA_NAME)
1017 continue;
1018
1019 /* Profiling has shown the domination tests here can be fairly
1020 expensive. We get significant improvements by building the
1021 set of blocks that dominate BB. We can then just test
1022 for set membership below.
1023
1024 We also initialize the set lazily since often the only uses
1025 are going to be in the same block as DEST. */
1026 if (!domby)
1027 {
1028 domby = BITMAP_ALLOC (NULL);
1029 basic_block bb = get_immediate_dominator (CDI_DOMINATORS, dest);
1030 while (bb)
1031 {
1032 bitmap_set_bit (domby, bb->index);
1033 bb = get_immediate_dominator (CDI_DOMINATORS, bb);
1034 }
1035 }
1036
1037 /* This tests if USE_STMT does not dominate DEST. */
1038 if (!bitmap_bit_p (domby, gimple_bb (use_stmt)->index))
1039 continue;
1040
1041 /* At this point USE_STMT dominates DEST and may result in a
1042 useful equivalence. Try to simplify its RHS to a constant
1043 or SSA_NAME. */
1044 tree res = gimple_fold_stmt_to_constant_1 (use_stmt, dom_valueize,
1045 no_follow_ssa_edges);
1046 if (res && (TREE_CODE (res) == SSA_NAME || is_gimple_min_invariant (res)))
1047 record_equality (lhs2, res, const_and_copies);
1048 }
1049
1050 if (domby)
1051 BITMAP_FREE (domby);
1052 }
1053
1054 /* Record into CONST_AND_COPIES and AVAIL_EXPRS_STACK any equivalences implied
1055 by traversing edge E (which are cached in E->aux).
1056
1057 Callers are responsible for managing the unwinding markers. */
1058 void
1059 record_temporary_equivalences (edge e,
1060 class const_and_copies *const_and_copies,
1061 class avail_exprs_stack *avail_exprs_stack)
1062 {
1063 int i;
1064 class edge_info *edge_info = (class edge_info *) e->aux;
1065
1066 /* If we have info associated with this edge, record it into
1067 our equivalence tables. */
1068 if (edge_info)
1069 {
1070 cond_equivalence *eq;
1071 /* If we have 0 = COND or 1 = COND equivalences, record them
1072 into our expression hash tables. */
1073 for (i = 0; edge_info->cond_equivalences.iterate (i, &eq); ++i)
1074 avail_exprs_stack->record_cond (eq);
1075
1076 edge_info::equiv_pair *seq;
1077 for (i = 0; edge_info->simple_equivalences.iterate (i, &seq); ++i)
1078 {
1079 tree lhs = seq->first;
1080 if (!lhs || TREE_CODE (lhs) != SSA_NAME)
1081 continue;
1082
1083 /* Record the simple NAME = VALUE equivalence. */
1084 tree rhs = seq->second;
1085
1086 /* If this is a SSA_NAME = SSA_NAME equivalence and one operand is
1087 cheaper to compute than the other, then set up the equivalence
1088 such that we replace the expensive one with the cheap one.
1089
1090 If they are the same cost to compute, then do not record
1091 anything. */
1092 if (TREE_CODE (lhs) == SSA_NAME && TREE_CODE (rhs) == SSA_NAME)
1093 {
1094 gimple *rhs_def = SSA_NAME_DEF_STMT (rhs);
1095 int rhs_cost = estimate_num_insns (rhs_def, &eni_size_weights);
1096
1097 gimple *lhs_def = SSA_NAME_DEF_STMT (lhs);
1098 int lhs_cost = estimate_num_insns (lhs_def, &eni_size_weights);
1099
1100 if (rhs_cost > lhs_cost)
1101 record_equality (rhs, lhs, const_and_copies);
1102 else if (rhs_cost < lhs_cost)
1103 record_equality (lhs, rhs, const_and_copies);
1104 }
1105 else
1106 record_equality (lhs, rhs, const_and_copies);
1107
1108
1109 /* Any equivalence found for LHS may result in additional
1110 equivalences for other uses of LHS that we have already
1111 processed. */
1112 back_propagate_equivalences (lhs, e, const_and_copies);
1113 }
1114 }
1115 }
1116
1117 /* PHI nodes can create equivalences too.
1118
1119 Ignoring any alternatives which are the same as the result, if
1120 all the alternatives are equal, then the PHI node creates an
1121 equivalence. */
1122
1123 static void
1124 record_equivalences_from_phis (basic_block bb)
1125 {
1126 gphi_iterator gsi;
1127
1128 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); )
1129 {
1130 gphi *phi = gsi.phi ();
1131
1132 /* We might eliminate the PHI, so advance GSI now. */
1133 gsi_next (&gsi);
1134
1135 tree lhs = gimple_phi_result (phi);
1136 tree rhs = NULL;
1137 size_t i;
1138
1139 for (i = 0; i < gimple_phi_num_args (phi); i++)
1140 {
1141 tree t = gimple_phi_arg_def (phi, i);
1142
1143 /* Ignore alternatives which are the same as our LHS. Since
1144 LHS is a PHI_RESULT, it is known to be a SSA_NAME, so we
1145 can simply compare pointers. */
1146 if (lhs == t)
1147 continue;
1148
1149 /* If the associated edge is not marked as executable, then it
1150 can be ignored. */
1151 if ((gimple_phi_arg_edge (phi, i)->flags & EDGE_EXECUTABLE) == 0)
1152 continue;
1153
1154 t = dom_valueize (t);
1155
1156 /* If T is an SSA_NAME and its associated edge is a backedge,
1157 then quit as we cannot utilize this equivalence. */
1158 if (TREE_CODE (t) == SSA_NAME
1159 && (gimple_phi_arg_edge (phi, i)->flags & EDGE_DFS_BACK))
1160 break;
1161
1162 /* If we have not processed an alternative yet, then set
1163 RHS to this alternative. */
1164 if (rhs == NULL)
1165 rhs = t;
1166 /* If we have processed an alternative (stored in RHS), then
1167 see if it is equal to this one. If it isn't, then stop
1168 the search. */
1169 else if (! operand_equal_for_phi_arg_p (rhs, t))
1170 break;
1171 }
1172
1173 /* If we had no interesting alternatives, then all the RHS alternatives
1174 must have been the same as LHS. */
1175 if (!rhs)
1176 rhs = lhs;
1177
1178 /* If we managed to iterate through each PHI alternative without
1179 breaking out of the loop, then we have a PHI which may create
1180 a useful equivalence. We do not need to record unwind data for
1181 this, since this is a true assignment and not an equivalence
1182 inferred from a comparison. All uses of this ssa name are dominated
1183 by this assignment, so unwinding just costs time and space. */
1184 if (i == gimple_phi_num_args (phi))
1185 {
1186 if (may_propagate_copy (lhs, rhs))
1187 set_ssa_name_value (lhs, rhs);
1188 else if (virtual_operand_p (lhs))
1189 {
1190 gimple *use_stmt;
1191 imm_use_iterator iter;
1192 use_operand_p use_p;
1193 /* For virtual operands we have to propagate into all uses as
1194 otherwise we will create overlapping life-ranges. */
1195 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
1196 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
1197 SET_USE (use_p, rhs);
1198 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
1199 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs) = 1;
1200 gimple_stmt_iterator tmp_gsi = gsi_for_stmt (phi);
1201 remove_phi_node (&tmp_gsi, true);
1202 }
1203 }
1204 }
1205 }
1206
1207 /* Record any equivalences created by the incoming edge to BB into
1208 CONST_AND_COPIES and AVAIL_EXPRS_STACK. If BB has more than one
1209 incoming edge, then no equivalence is created. */
1210
1211 static void
1212 record_equivalences_from_incoming_edge (basic_block bb,
1213 class const_and_copies *const_and_copies,
1214 class avail_exprs_stack *avail_exprs_stack)
1215 {
1216 edge e;
1217 basic_block parent;
1218
1219 /* If our parent block ended with a control statement, then we may be
1220 able to record some equivalences based on which outgoing edge from
1221 the parent was followed. */
1222 parent = get_immediate_dominator (CDI_DOMINATORS, bb);
1223
1224 e = single_pred_edge_ignoring_loop_edges (bb, true);
1225
1226 /* If we had a single incoming edge from our parent block, then enter
1227 any data associated with the edge into our tables. */
1228 if (e && e->src == parent)
1229 record_temporary_equivalences (e, const_and_copies, avail_exprs_stack);
1230 }
1231
1232 /* Dump statistics for the hash table HTAB. */
1233
1234 static void
1235 htab_statistics (FILE *file, const hash_table<expr_elt_hasher> &htab)
1236 {
1237 fprintf (file, "size %ld, %ld elements, %f collision/search ratio\n",
1238 (long) htab.size (),
1239 (long) htab.elements (),
1240 htab.collisions ());
1241 }
1242
1243 /* Dump SSA statistics on FILE. */
1244
1245 static void
1246 dump_dominator_optimization_stats (FILE *file,
1247 hash_table<expr_elt_hasher> *avail_exprs)
1248 {
1249 fprintf (file, "Total number of statements: %6ld\n\n",
1250 opt_stats.num_stmts);
1251 fprintf (file, "Exprs considered for dominator optimizations: %6ld\n",
1252 opt_stats.num_exprs_considered);
1253
1254 fprintf (file, "\nHash table statistics:\n");
1255
1256 fprintf (file, " avail_exprs: ");
1257 htab_statistics (file, *avail_exprs);
1258 }
1259
1260
1261 /* Similarly, but assume that X and Y are the two operands of an EQ_EXPR.
1262 This constrains the cases in which we may treat this as assignment. */
1263
1264 static void
1265 record_equality (tree x, tree y, class const_and_copies *const_and_copies)
1266 {
1267 tree prev_x = NULL, prev_y = NULL;
1268
1269 if (tree_swap_operands_p (x, y))
1270 std::swap (x, y);
1271
1272 /* Most of the time tree_swap_operands_p does what we want. But there
1273 are cases where we know one operand is better for copy propagation than
1274 the other. Given no other code cares about ordering of equality
1275 comparison operators for that purpose, we just handle the special cases
1276 here. */
1277 if (TREE_CODE (x) == SSA_NAME && TREE_CODE (y) == SSA_NAME)
1278 {
1279 /* If one operand is a single use operand, then make it
1280 X. This will preserve its single use properly and if this
1281 conditional is eliminated, the computation of X can be
1282 eliminated as well. */
1283 if (has_single_use (y) && ! has_single_use (x))
1284 std::swap (x, y);
1285 }
1286 if (TREE_CODE (x) == SSA_NAME)
1287 prev_x = SSA_NAME_VALUE (x);
1288 if (TREE_CODE (y) == SSA_NAME)
1289 prev_y = SSA_NAME_VALUE (y);
1290
1291 /* If one of the previous values is invariant, or invariant in more loops
1292 (by depth), then use that.
1293 Otherwise it doesn't matter which value we choose, just so
1294 long as we canonicalize on one value. */
1295 if (is_gimple_min_invariant (y))
1296 ;
1297 else if (is_gimple_min_invariant (x))
1298 prev_x = x, x = y, y = prev_x, prev_x = prev_y;
1299 else if (prev_x && is_gimple_min_invariant (prev_x))
1300 x = y, y = prev_x, prev_x = prev_y;
1301 else if (prev_y)
1302 y = prev_y;
1303
1304 /* After the swapping, we must have one SSA_NAME. */
1305 if (TREE_CODE (x) != SSA_NAME)
1306 return;
1307
1308 /* For IEEE, -0.0 == 0.0, so we don't necessarily know the sign of a
1309 variable compared against zero. If we're honoring signed zeros,
1310 then we cannot record this value unless we know that the value is
1311 nonzero. */
1312 if (HONOR_SIGNED_ZEROS (x)
1313 && (TREE_CODE (y) != REAL_CST
1314 || real_equal (&dconst0, &TREE_REAL_CST (y))))
1315 return;
1316
1317 const_and_copies->record_const_or_copy (x, y, prev_x);
1318 }
1319
1320 /* Returns true when STMT is a simple iv increment. It detects the
1321 following situation:
1322
1323 i_1 = phi (..., i_k)
1324 [...]
1325 i_j = i_{j-1} for each j : 2 <= j <= k-1
1326 [...]
1327 i_k = i_{k-1} +/- ... */
1328
1329 bool
1330 simple_iv_increment_p (gimple *stmt)
1331 {
1332 enum tree_code code;
1333 tree lhs, preinc;
1334 gimple *phi;
1335 size_t i;
1336
1337 if (gimple_code (stmt) != GIMPLE_ASSIGN)
1338 return false;
1339
1340 lhs = gimple_assign_lhs (stmt);
1341 if (TREE_CODE (lhs) != SSA_NAME)
1342 return false;
1343
1344 code = gimple_assign_rhs_code (stmt);
1345 if (code != PLUS_EXPR
1346 && code != MINUS_EXPR
1347 && code != POINTER_PLUS_EXPR)
1348 return false;
1349
1350 preinc = gimple_assign_rhs1 (stmt);
1351 if (TREE_CODE (preinc) != SSA_NAME)
1352 return false;
1353
1354 phi = SSA_NAME_DEF_STMT (preinc);
1355 while (gimple_code (phi) != GIMPLE_PHI)
1356 {
1357 /* Follow trivial copies, but not the DEF used in a back edge,
1358 so that we don't prevent coalescing. */
1359 if (!gimple_assign_ssa_name_copy_p (phi))
1360 return false;
1361 preinc = gimple_assign_rhs1 (phi);
1362 phi = SSA_NAME_DEF_STMT (preinc);
1363 }
1364
1365 for (i = 0; i < gimple_phi_num_args (phi); i++)
1366 if (gimple_phi_arg_def (phi, i) == lhs)
1367 return true;
1368
1369 return false;
1370 }
1371
1372 /* Propagate know values from SSA_NAME_VALUE into the PHI nodes of the
1373 successors of BB. */
1374
1375 static void
1376 cprop_into_successor_phis (basic_block bb,
1377 class const_and_copies *const_and_copies)
1378 {
1379 edge e;
1380 edge_iterator ei;
1381
1382 FOR_EACH_EDGE (e, ei, bb->succs)
1383 {
1384 int indx;
1385 gphi_iterator gsi;
1386
1387 /* If this is an abnormal edge, then we do not want to copy propagate
1388 into the PHI alternative associated with this edge. */
1389 if (e->flags & EDGE_ABNORMAL)
1390 continue;
1391
1392 gsi = gsi_start_phis (e->dest);
1393 if (gsi_end_p (gsi))
1394 continue;
1395
1396 /* We may have an equivalence associated with this edge. While
1397 we cannot propagate it into non-dominated blocks, we can
1398 propagate them into PHIs in non-dominated blocks. */
1399
1400 /* Push the unwind marker so we can reset the const and copies
1401 table back to its original state after processing this edge. */
1402 const_and_copies->push_marker ();
1403
1404 /* Extract and record any simple NAME = VALUE equivalences.
1405
1406 Don't bother with [01] = COND equivalences, they're not useful
1407 here. */
1408 class edge_info *edge_info = (class edge_info *) e->aux;
1409
1410 if (edge_info)
1411 {
1412 edge_info::equiv_pair *seq;
1413 for (int i = 0; edge_info->simple_equivalences.iterate (i, &seq); ++i)
1414 {
1415 tree lhs = seq->first;
1416 tree rhs = seq->second;
1417
1418 if (lhs && TREE_CODE (lhs) == SSA_NAME)
1419 const_and_copies->record_const_or_copy (lhs, rhs);
1420 }
1421
1422 }
1423
1424 indx = e->dest_idx;
1425 for ( ; !gsi_end_p (gsi); gsi_next (&gsi))
1426 {
1427 tree new_val;
1428 use_operand_p orig_p;
1429 tree orig_val;
1430 gphi *phi = gsi.phi ();
1431
1432 /* The alternative may be associated with a constant, so verify
1433 it is an SSA_NAME before doing anything with it. */
1434 orig_p = gimple_phi_arg_imm_use_ptr (phi, indx);
1435 orig_val = get_use_from_ptr (orig_p);
1436 if (TREE_CODE (orig_val) != SSA_NAME)
1437 continue;
1438
1439 /* If we have *ORIG_P in our constant/copy table, then replace
1440 ORIG_P with its value in our constant/copy table. */
1441 new_val = SSA_NAME_VALUE (orig_val);
1442 if (new_val
1443 && new_val != orig_val
1444 && may_propagate_copy (orig_val, new_val))
1445 propagate_value (orig_p, new_val);
1446 }
1447
1448 const_and_copies->pop_to_marker ();
1449 }
1450 }
1451
1452 edge
1453 dom_opt_dom_walker::before_dom_children (basic_block bb)
1454 {
1455 gimple_stmt_iterator gsi;
1456
1457 if (dump_file && (dump_flags & TDF_DETAILS))
1458 fprintf (dump_file, "\n\nOptimizing block #%d\n\n", bb->index);
1459
1460 evrp_range_analyzer.enter (bb);
1461
1462 /* Push a marker on the stacks of local information so that we know how
1463 far to unwind when we finalize this block. */
1464 m_avail_exprs_stack->push_marker ();
1465 m_const_and_copies->push_marker ();
1466
1467 record_equivalences_from_incoming_edge (bb, m_const_and_copies,
1468 m_avail_exprs_stack);
1469
1470 /* PHI nodes can create equivalences too. */
1471 record_equivalences_from_phis (bb);
1472
1473 /* Create equivalences from redundant PHIs. PHIs are only truly
1474 redundant when they exist in the same block, so push another
1475 marker and unwind right afterwards. */
1476 m_avail_exprs_stack->push_marker ();
1477 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1478 eliminate_redundant_computations (&gsi, m_const_and_copies,
1479 m_avail_exprs_stack);
1480 m_avail_exprs_stack->pop_to_marker ();
1481
1482 edge taken_edge = NULL;
1483 /* Initialize visited flag ahead of us, it has undefined state on
1484 pass entry. */
1485 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1486 gimple_set_visited (gsi_stmt (gsi), false);
1487 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi);)
1488 {
1489 /* Do not optimize a stmt twice, substitution might end up with
1490 _3 = _3 which is not valid. */
1491 if (gimple_visited_p (gsi_stmt (gsi)))
1492 {
1493 gsi_next (&gsi);
1494 continue;
1495 }
1496
1497 /* Compute range information and optimize the stmt. */
1498 evrp_range_analyzer.record_ranges_from_stmt (gsi_stmt (gsi), false);
1499 bool removed_p = false;
1500 taken_edge = this->optimize_stmt (bb, &gsi, &removed_p);
1501 if (!removed_p)
1502 gimple_set_visited (gsi_stmt (gsi), true);
1503
1504 /* Go back and visit stmts inserted by folding after substituting
1505 into the stmt at gsi. */
1506 if (gsi_end_p (gsi))
1507 {
1508 gcc_checking_assert (removed_p);
1509 gsi = gsi_last_bb (bb);
1510 while (!gsi_end_p (gsi) && !gimple_visited_p (gsi_stmt (gsi)))
1511 gsi_prev (&gsi);
1512 }
1513 else
1514 {
1515 do
1516 {
1517 gsi_prev (&gsi);
1518 }
1519 while (!gsi_end_p (gsi) && !gimple_visited_p (gsi_stmt (gsi)));
1520 }
1521 if (gsi_end_p (gsi))
1522 gsi = gsi_start_bb (bb);
1523 else
1524 gsi_next (&gsi);
1525 }
1526
1527 /* Now prepare to process dominated blocks. */
1528 record_edge_info (bb);
1529 cprop_into_successor_phis (bb, m_const_and_copies);
1530 if (taken_edge && !dbg_cnt (dom_unreachable_edges))
1531 return NULL;
1532
1533 return taken_edge;
1534 }
1535
1536 /* We have finished processing the dominator children of BB, perform
1537 any finalization actions in preparation for leaving this node in
1538 the dominator tree. */
1539
1540 void
1541 dom_opt_dom_walker::after_dom_children (basic_block bb)
1542 {
1543 x_vr_values = evrp_range_analyzer.get_vr_values ();
1544 thread_outgoing_edges (bb, m_dummy_cond, m_const_and_copies,
1545 m_avail_exprs_stack,
1546 &evrp_range_analyzer,
1547 simplify_stmt_for_jump_threading);
1548 x_vr_values = NULL;
1549
1550 /* These remove expressions local to BB from the tables. */
1551 m_avail_exprs_stack->pop_to_marker ();
1552 m_const_and_copies->pop_to_marker ();
1553 evrp_range_analyzer.leave (bb);
1554 }
1555
1556 /* Search for redundant computations in STMT. If any are found, then
1557 replace them with the variable holding the result of the computation.
1558
1559 If safe, record this expression into AVAIL_EXPRS_STACK and
1560 CONST_AND_COPIES. */
1561
1562 static void
1563 eliminate_redundant_computations (gimple_stmt_iterator* gsi,
1564 class const_and_copies *const_and_copies,
1565 class avail_exprs_stack *avail_exprs_stack)
1566 {
1567 tree expr_type;
1568 tree cached_lhs;
1569 tree def;
1570 bool insert = true;
1571 bool assigns_var_p = false;
1572
1573 gimple *stmt = gsi_stmt (*gsi);
1574
1575 if (gimple_code (stmt) == GIMPLE_PHI)
1576 def = gimple_phi_result (stmt);
1577 else
1578 def = gimple_get_lhs (stmt);
1579
1580 /* Certain expressions on the RHS can be optimized away, but cannot
1581 themselves be entered into the hash tables. */
1582 if (! def
1583 || TREE_CODE (def) != SSA_NAME
1584 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (def)
1585 || gimple_vdef (stmt)
1586 /* Do not record equivalences for increments of ivs. This would create
1587 overlapping live ranges for a very questionable gain. */
1588 || simple_iv_increment_p (stmt))
1589 insert = false;
1590
1591 /* Check if the expression has been computed before. */
1592 cached_lhs = avail_exprs_stack->lookup_avail_expr (stmt, insert, true);
1593
1594 opt_stats.num_exprs_considered++;
1595
1596 /* Get the type of the expression we are trying to optimize. */
1597 if (is_gimple_assign (stmt))
1598 {
1599 expr_type = TREE_TYPE (gimple_assign_lhs (stmt));
1600 assigns_var_p = true;
1601 }
1602 else if (gimple_code (stmt) == GIMPLE_COND)
1603 expr_type = boolean_type_node;
1604 else if (is_gimple_call (stmt))
1605 {
1606 gcc_assert (gimple_call_lhs (stmt));
1607 expr_type = TREE_TYPE (gimple_call_lhs (stmt));
1608 assigns_var_p = true;
1609 }
1610 else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
1611 expr_type = TREE_TYPE (gimple_switch_index (swtch_stmt));
1612 else if (gimple_code (stmt) == GIMPLE_PHI)
1613 /* We can't propagate into a phi, so the logic below doesn't apply.
1614 Instead record an equivalence between the cached LHS and the
1615 PHI result of this statement, provided they are in the same block.
1616 This should be sufficient to kill the redundant phi. */
1617 {
1618 if (def && cached_lhs)
1619 const_and_copies->record_const_or_copy (def, cached_lhs);
1620 return;
1621 }
1622 else
1623 gcc_unreachable ();
1624
1625 if (!cached_lhs)
1626 return;
1627
1628 /* It is safe to ignore types here since we have already done
1629 type checking in the hashing and equality routines. In fact
1630 type checking here merely gets in the way of constant
1631 propagation. Also, make sure that it is safe to propagate
1632 CACHED_LHS into the expression in STMT. */
1633 if ((TREE_CODE (cached_lhs) != SSA_NAME
1634 && (assigns_var_p
1635 || useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs))))
1636 || may_propagate_copy_into_stmt (stmt, cached_lhs))
1637 {
1638 gcc_checking_assert (TREE_CODE (cached_lhs) == SSA_NAME
1639 || is_gimple_min_invariant (cached_lhs));
1640
1641 if (dump_file && (dump_flags & TDF_DETAILS))
1642 {
1643 fprintf (dump_file, " Replaced redundant expr '");
1644 print_gimple_expr (dump_file, stmt, 0, dump_flags);
1645 fprintf (dump_file, "' with '");
1646 print_generic_expr (dump_file, cached_lhs, dump_flags);
1647 fprintf (dump_file, "'\n");
1648 }
1649
1650 opt_stats.num_re++;
1651
1652 if (assigns_var_p
1653 && !useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs)))
1654 cached_lhs = fold_convert (expr_type, cached_lhs);
1655
1656 propagate_tree_value_into_stmt (gsi, cached_lhs);
1657
1658 /* Since it is always necessary to mark the result as modified,
1659 perhaps we should move this into propagate_tree_value_into_stmt
1660 itself. */
1661 gimple_set_modified (gsi_stmt (*gsi), true);
1662 }
1663 }
1664
1665 /* STMT, a GIMPLE_ASSIGN, may create certain equivalences, in either
1666 the available expressions table or the const_and_copies table.
1667 Detect and record those equivalences into AVAIL_EXPRS_STACK.
1668
1669 We handle only very simple copy equivalences here. The heavy
1670 lifing is done by eliminate_redundant_computations. */
1671
1672 static void
1673 record_equivalences_from_stmt (gimple *stmt, int may_optimize_p,
1674 class avail_exprs_stack *avail_exprs_stack)
1675 {
1676 tree lhs;
1677 enum tree_code lhs_code;
1678
1679 gcc_assert (is_gimple_assign (stmt));
1680
1681 lhs = gimple_assign_lhs (stmt);
1682 lhs_code = TREE_CODE (lhs);
1683
1684 if (lhs_code == SSA_NAME
1685 && gimple_assign_single_p (stmt))
1686 {
1687 tree rhs = gimple_assign_rhs1 (stmt);
1688
1689 /* If the RHS of the assignment is a constant or another variable that
1690 may be propagated, register it in the CONST_AND_COPIES table. We
1691 do not need to record unwind data for this, since this is a true
1692 assignment and not an equivalence inferred from a comparison. All
1693 uses of this ssa name are dominated by this assignment, so unwinding
1694 just costs time and space. */
1695 if (may_optimize_p
1696 && (TREE_CODE (rhs) == SSA_NAME
1697 || is_gimple_min_invariant (rhs)))
1698 {
1699 rhs = dom_valueize (rhs);
1700
1701 if (dump_file && (dump_flags & TDF_DETAILS))
1702 {
1703 fprintf (dump_file, "==== ASGN ");
1704 print_generic_expr (dump_file, lhs);
1705 fprintf (dump_file, " = ");
1706 print_generic_expr (dump_file, rhs);
1707 fprintf (dump_file, "\n");
1708 }
1709
1710 set_ssa_name_value (lhs, rhs);
1711 }
1712 }
1713
1714 /* Make sure we can propagate &x + CST. */
1715 if (lhs_code == SSA_NAME
1716 && gimple_assign_rhs_code (stmt) == POINTER_PLUS_EXPR
1717 && TREE_CODE (gimple_assign_rhs1 (stmt)) == ADDR_EXPR
1718 && TREE_CODE (gimple_assign_rhs2 (stmt)) == INTEGER_CST)
1719 {
1720 tree op0 = gimple_assign_rhs1 (stmt);
1721 tree op1 = gimple_assign_rhs2 (stmt);
1722 tree new_rhs
1723 = build_fold_addr_expr (fold_build2 (MEM_REF,
1724 TREE_TYPE (TREE_TYPE (op0)),
1725 unshare_expr (op0),
1726 fold_convert (ptr_type_node,
1727 op1)));
1728 if (dump_file && (dump_flags & TDF_DETAILS))
1729 {
1730 fprintf (dump_file, "==== ASGN ");
1731 print_generic_expr (dump_file, lhs);
1732 fprintf (dump_file, " = ");
1733 print_generic_expr (dump_file, new_rhs);
1734 fprintf (dump_file, "\n");
1735 }
1736
1737 set_ssa_name_value (lhs, new_rhs);
1738 }
1739
1740 /* A memory store, even an aliased store, creates a useful
1741 equivalence. By exchanging the LHS and RHS, creating suitable
1742 vops and recording the result in the available expression table,
1743 we may be able to expose more redundant loads. */
1744 if (!gimple_has_volatile_ops (stmt)
1745 && gimple_references_memory_p (stmt)
1746 && gimple_assign_single_p (stmt)
1747 && (TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME
1748 || is_gimple_min_invariant (gimple_assign_rhs1 (stmt)))
1749 && !is_gimple_reg (lhs))
1750 {
1751 tree rhs = gimple_assign_rhs1 (stmt);
1752 gassign *new_stmt;
1753
1754 /* Build a new statement with the RHS and LHS exchanged. */
1755 if (TREE_CODE (rhs) == SSA_NAME)
1756 {
1757 /* NOTE tuples. The call to gimple_build_assign below replaced
1758 a call to build_gimple_modify_stmt, which did not set the
1759 SSA_NAME_DEF_STMT on the LHS of the assignment. Doing so
1760 may cause an SSA validation failure, as the LHS may be a
1761 default-initialized name and should have no definition. I'm
1762 a bit dubious of this, as the artificial statement that we
1763 generate here may in fact be ill-formed, but it is simply
1764 used as an internal device in this pass, and never becomes
1765 part of the CFG. */
1766 gimple *defstmt = SSA_NAME_DEF_STMT (rhs);
1767 new_stmt = gimple_build_assign (rhs, lhs);
1768 SSA_NAME_DEF_STMT (rhs) = defstmt;
1769 }
1770 else
1771 new_stmt = gimple_build_assign (rhs, lhs);
1772
1773 gimple_set_vuse (new_stmt, gimple_vdef (stmt));
1774
1775 /* Finally enter the statement into the available expression
1776 table. */
1777 avail_exprs_stack->lookup_avail_expr (new_stmt, true, true);
1778 }
1779 }
1780
1781 /* Replace *OP_P in STMT with any known equivalent value for *OP_P from
1782 CONST_AND_COPIES. */
1783
1784 static void
1785 cprop_operand (gimple *stmt, use_operand_p op_p, vr_values *vr_values)
1786 {
1787 tree val;
1788 tree op = USE_FROM_PTR (op_p);
1789
1790 /* If the operand has a known constant value or it is known to be a
1791 copy of some other variable, use the value or copy stored in
1792 CONST_AND_COPIES. */
1793 val = SSA_NAME_VALUE (op);
1794 if (!val)
1795 val = vr_values->op_with_constant_singleton_value_range (op);
1796
1797 if (val && val != op)
1798 {
1799 /* Do not replace hard register operands in asm statements. */
1800 if (gimple_code (stmt) == GIMPLE_ASM
1801 && !may_propagate_copy_into_asm (op))
1802 return;
1803
1804 /* Certain operands are not allowed to be copy propagated due
1805 to their interaction with exception handling and some GCC
1806 extensions. */
1807 if (!may_propagate_copy (op, val))
1808 return;
1809
1810 /* Do not propagate copies into BIVs.
1811 See PR23821 and PR62217 for how this can disturb IV and
1812 number of iteration analysis. */
1813 if (TREE_CODE (val) != INTEGER_CST)
1814 {
1815 gimple *def = SSA_NAME_DEF_STMT (op);
1816 if (gimple_code (def) == GIMPLE_PHI
1817 && gimple_bb (def)->loop_father->header == gimple_bb (def))
1818 return;
1819 }
1820
1821 /* Dump details. */
1822 if (dump_file && (dump_flags & TDF_DETAILS))
1823 {
1824 fprintf (dump_file, " Replaced '");
1825 print_generic_expr (dump_file, op, dump_flags);
1826 fprintf (dump_file, "' with %s '",
1827 (TREE_CODE (val) != SSA_NAME ? "constant" : "variable"));
1828 print_generic_expr (dump_file, val, dump_flags);
1829 fprintf (dump_file, "'\n");
1830 }
1831
1832 if (TREE_CODE (val) != SSA_NAME)
1833 opt_stats.num_const_prop++;
1834 else
1835 opt_stats.num_copy_prop++;
1836
1837 propagate_value (op_p, val);
1838
1839 /* And note that we modified this statement. This is now
1840 safe, even if we changed virtual operands since we will
1841 rescan the statement and rewrite its operands again. */
1842 gimple_set_modified (stmt, true);
1843 }
1844 }
1845
1846 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
1847 known value for that SSA_NAME (or NULL if no value is known).
1848
1849 Propagate values from CONST_AND_COPIES into the uses, vuses and
1850 vdef_ops of STMT. */
1851
1852 static void
1853 cprop_into_stmt (gimple *stmt, vr_values *vr_values)
1854 {
1855 use_operand_p op_p;
1856 ssa_op_iter iter;
1857 tree last_copy_propagated_op = NULL;
1858
1859 FOR_EACH_SSA_USE_OPERAND (op_p, stmt, iter, SSA_OP_USE)
1860 {
1861 tree old_op = USE_FROM_PTR (op_p);
1862
1863 /* If we have A = B and B = A in the copy propagation tables
1864 (due to an equality comparison), avoid substituting B for A
1865 then A for B in the trivially discovered cases. This allows
1866 optimization of statements were A and B appear as input
1867 operands. */
1868 if (old_op != last_copy_propagated_op)
1869 {
1870 cprop_operand (stmt, op_p, vr_values);
1871
1872 tree new_op = USE_FROM_PTR (op_p);
1873 if (new_op != old_op && TREE_CODE (new_op) == SSA_NAME)
1874 last_copy_propagated_op = new_op;
1875 }
1876 }
1877 }
1878
1879 /* If STMT contains a relational test, try to convert it into an
1880 equality test if there is only a single value which can ever
1881 make the test true.
1882
1883 For example, if the expression hash table contains:
1884
1885 TRUE = (i <= 1)
1886
1887 And we have a test within statement of i >= 1, then we can safely
1888 rewrite the test as i == 1 since there only a single value where
1889 the test is true.
1890
1891 This is similar to code in VRP. */
1892
1893 static void
1894 test_for_singularity (gimple *stmt, gcond *dummy_cond,
1895 avail_exprs_stack *avail_exprs_stack)
1896 {
1897 /* We want to support gimple conditionals as well as assignments
1898 where the RHS contains a conditional. */
1899 if (is_gimple_assign (stmt) || gimple_code (stmt) == GIMPLE_COND)
1900 {
1901 enum tree_code code = ERROR_MARK;
1902 tree lhs, rhs;
1903
1904 /* Extract the condition of interest from both forms we support. */
1905 if (is_gimple_assign (stmt))
1906 {
1907 code = gimple_assign_rhs_code (stmt);
1908 lhs = gimple_assign_rhs1 (stmt);
1909 rhs = gimple_assign_rhs2 (stmt);
1910 }
1911 else if (gimple_code (stmt) == GIMPLE_COND)
1912 {
1913 code = gimple_cond_code (as_a <gcond *> (stmt));
1914 lhs = gimple_cond_lhs (as_a <gcond *> (stmt));
1915 rhs = gimple_cond_rhs (as_a <gcond *> (stmt));
1916 }
1917
1918 /* We're looking for a relational test using LE/GE. Also note we can
1919 canonicalize LT/GT tests against constants into LE/GT tests. */
1920 if (code == LE_EXPR || code == GE_EXPR
1921 || ((code == LT_EXPR || code == GT_EXPR)
1922 && TREE_CODE (rhs) == INTEGER_CST))
1923 {
1924 /* For LT_EXPR and GT_EXPR, canonicalize to LE_EXPR and GE_EXPR. */
1925 if (code == LT_EXPR)
1926 rhs = fold_build2 (MINUS_EXPR, TREE_TYPE (rhs),
1927 rhs, build_int_cst (TREE_TYPE (rhs), 1));
1928
1929 if (code == GT_EXPR)
1930 rhs = fold_build2 (PLUS_EXPR, TREE_TYPE (rhs),
1931 rhs, build_int_cst (TREE_TYPE (rhs), 1));
1932
1933 /* Determine the code we want to check for in the hash table. */
1934 enum tree_code test_code;
1935 if (code == GE_EXPR || code == GT_EXPR)
1936 test_code = LE_EXPR;
1937 else
1938 test_code = GE_EXPR;
1939
1940 /* Update the dummy statement so we can query the hash tables. */
1941 gimple_cond_set_code (dummy_cond, test_code);
1942 gimple_cond_set_lhs (dummy_cond, lhs);
1943 gimple_cond_set_rhs (dummy_cond, rhs);
1944 tree cached_lhs
1945 = avail_exprs_stack->lookup_avail_expr (dummy_cond, false, false);
1946
1947 /* If the lookup returned 1 (true), then the expression we
1948 queried was in the hash table. As a result there is only
1949 one value that makes the original conditional true. Update
1950 STMT accordingly. */
1951 if (cached_lhs && integer_onep (cached_lhs))
1952 {
1953 if (is_gimple_assign (stmt))
1954 {
1955 gimple_assign_set_rhs_code (stmt, EQ_EXPR);
1956 gimple_assign_set_rhs2 (stmt, rhs);
1957 gimple_set_modified (stmt, true);
1958 }
1959 else
1960 {
1961 gimple_set_modified (stmt, true);
1962 gimple_cond_set_code (as_a <gcond *> (stmt), EQ_EXPR);
1963 gimple_cond_set_rhs (as_a <gcond *> (stmt), rhs);
1964 gimple_set_modified (stmt, true);
1965 }
1966 }
1967 }
1968 }
1969 }
1970
1971 /* Optimize the statement in block BB pointed to by iterator SI.
1972
1973 We try to perform some simplistic global redundancy elimination and
1974 constant propagation:
1975
1976 1- To detect global redundancy, we keep track of expressions that have
1977 been computed in this block and its dominators. If we find that the
1978 same expression is computed more than once, we eliminate repeated
1979 computations by using the target of the first one.
1980
1981 2- Constant values and copy assignments. This is used to do very
1982 simplistic constant and copy propagation. When a constant or copy
1983 assignment is found, we map the value on the RHS of the assignment to
1984 the variable in the LHS in the CONST_AND_COPIES table.
1985
1986 3- Very simple redundant store elimination is performed.
1987
1988 4- We can simplify a condition to a constant or from a relational
1989 condition to an equality condition. */
1990
1991 edge
1992 dom_opt_dom_walker::optimize_stmt (basic_block bb, gimple_stmt_iterator *si,
1993 bool *removed_p)
1994 {
1995 gimple *stmt, *old_stmt;
1996 bool may_optimize_p;
1997 bool modified_p = false;
1998 bool was_noreturn;
1999 edge retval = NULL;
2000
2001 old_stmt = stmt = gsi_stmt (*si);
2002 was_noreturn = is_gimple_call (stmt) && gimple_call_noreturn_p (stmt);
2003
2004 if (dump_file && (dump_flags & TDF_DETAILS))
2005 {
2006 fprintf (dump_file, "Optimizing statement ");
2007 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2008 }
2009
2010 update_stmt_if_modified (stmt);
2011 opt_stats.num_stmts++;
2012
2013 /* Const/copy propagate into USES, VUSES and the RHS of VDEFs. */
2014 cprop_into_stmt (stmt, evrp_range_analyzer.get_vr_values ());
2015
2016 /* If the statement has been modified with constant replacements,
2017 fold its RHS before checking for redundant computations. */
2018 if (gimple_modified_p (stmt))
2019 {
2020 tree rhs = NULL;
2021
2022 /* Try to fold the statement making sure that STMT is kept
2023 up to date. */
2024 if (fold_stmt (si))
2025 {
2026 stmt = gsi_stmt (*si);
2027 gimple_set_modified (stmt, true);
2028
2029 if (dump_file && (dump_flags & TDF_DETAILS))
2030 {
2031 fprintf (dump_file, " Folded to: ");
2032 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2033 }
2034 }
2035
2036 /* We only need to consider cases that can yield a gimple operand. */
2037 if (gimple_assign_single_p (stmt))
2038 rhs = gimple_assign_rhs1 (stmt);
2039 else if (gimple_code (stmt) == GIMPLE_GOTO)
2040 rhs = gimple_goto_dest (stmt);
2041 else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
2042 /* This should never be an ADDR_EXPR. */
2043 rhs = gimple_switch_index (swtch_stmt);
2044
2045 if (rhs && TREE_CODE (rhs) == ADDR_EXPR)
2046 recompute_tree_invariant_for_addr_expr (rhs);
2047
2048 /* Indicate that maybe_clean_or_replace_eh_stmt needs to be called,
2049 even if fold_stmt updated the stmt already and thus cleared
2050 gimple_modified_p flag on it. */
2051 modified_p = true;
2052 }
2053
2054 /* Check for redundant computations. Do this optimization only
2055 for assignments that have no volatile ops and conditionals. */
2056 may_optimize_p = (!gimple_has_side_effects (stmt)
2057 && (is_gimple_assign (stmt)
2058 || (is_gimple_call (stmt)
2059 && gimple_call_lhs (stmt) != NULL_TREE)
2060 || gimple_code (stmt) == GIMPLE_COND
2061 || gimple_code (stmt) == GIMPLE_SWITCH));
2062
2063 if (may_optimize_p)
2064 {
2065 if (gimple_code (stmt) == GIMPLE_CALL)
2066 {
2067 /* Resolve __builtin_constant_p. If it hasn't been
2068 folded to integer_one_node by now, it's fairly
2069 certain that the value simply isn't constant. */
2070 tree callee = gimple_call_fndecl (stmt);
2071 if (callee
2072 && fndecl_built_in_p (callee, BUILT_IN_CONSTANT_P))
2073 {
2074 propagate_tree_value_into_stmt (si, integer_zero_node);
2075 stmt = gsi_stmt (*si);
2076 }
2077 }
2078
2079 if (gimple_code (stmt) == GIMPLE_COND)
2080 {
2081 tree lhs = gimple_cond_lhs (stmt);
2082 tree rhs = gimple_cond_rhs (stmt);
2083
2084 /* If the LHS has a range [0..1] and the RHS has a range ~[0..1],
2085 then this conditional is computable at compile time. We can just
2086 shove either 0 or 1 into the LHS, mark the statement as modified
2087 and all the right things will just happen below.
2088
2089 Note this would apply to any case where LHS has a range
2090 narrower than its type implies and RHS is outside that
2091 narrower range. Future work. */
2092 if (TREE_CODE (lhs) == SSA_NAME
2093 && ssa_name_has_boolean_range (lhs)
2094 && TREE_CODE (rhs) == INTEGER_CST
2095 && ! (integer_zerop (rhs) || integer_onep (rhs)))
2096 {
2097 gimple_cond_set_lhs (as_a <gcond *> (stmt),
2098 fold_convert (TREE_TYPE (lhs),
2099 integer_zero_node));
2100 gimple_set_modified (stmt, true);
2101 }
2102 else if (TREE_CODE (lhs) == SSA_NAME)
2103 {
2104 /* Exploiting EVRP data is not yet fully integrated into DOM
2105 but we need to do something for this case to avoid regressing
2106 udr4.f90 and new1.C which have unexecutable blocks with
2107 undefined behavior that get diagnosed if they're left in the
2108 IL because we've attached range information to new
2109 SSA_NAMES. */
2110 update_stmt_if_modified (stmt);
2111 edge taken_edge = NULL;
2112 evrp_range_analyzer.vrp_visit_cond_stmt (as_a <gcond *> (stmt),
2113 &taken_edge);
2114 if (taken_edge)
2115 {
2116 if (taken_edge->flags & EDGE_TRUE_VALUE)
2117 gimple_cond_make_true (as_a <gcond *> (stmt));
2118 else if (taken_edge->flags & EDGE_FALSE_VALUE)
2119 gimple_cond_make_false (as_a <gcond *> (stmt));
2120 else
2121 gcc_unreachable ();
2122 gimple_set_modified (stmt, true);
2123 update_stmt (stmt);
2124 cfg_altered = true;
2125 return taken_edge;
2126 }
2127 }
2128 }
2129
2130 update_stmt_if_modified (stmt);
2131 eliminate_redundant_computations (si, m_const_and_copies,
2132 m_avail_exprs_stack);
2133 stmt = gsi_stmt (*si);
2134
2135 /* Perform simple redundant store elimination. */
2136 if (gimple_assign_single_p (stmt)
2137 && TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2138 {
2139 tree lhs = gimple_assign_lhs (stmt);
2140 tree rhs = gimple_assign_rhs1 (stmt);
2141 tree cached_lhs;
2142 gassign *new_stmt;
2143 rhs = dom_valueize (rhs);
2144 /* Build a new statement with the RHS and LHS exchanged. */
2145 if (TREE_CODE (rhs) == SSA_NAME)
2146 {
2147 gimple *defstmt = SSA_NAME_DEF_STMT (rhs);
2148 new_stmt = gimple_build_assign (rhs, lhs);
2149 SSA_NAME_DEF_STMT (rhs) = defstmt;
2150 }
2151 else
2152 new_stmt = gimple_build_assign (rhs, lhs);
2153 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
2154 cached_lhs = m_avail_exprs_stack->lookup_avail_expr (new_stmt, false,
2155 false);
2156 if (cached_lhs && operand_equal_p (rhs, cached_lhs, 0))
2157 {
2158 basic_block bb = gimple_bb (stmt);
2159 unlink_stmt_vdef (stmt);
2160 if (gsi_remove (si, true))
2161 {
2162 bitmap_set_bit (need_eh_cleanup, bb->index);
2163 if (dump_file && (dump_flags & TDF_DETAILS))
2164 fprintf (dump_file, " Flagged to clear EH edges.\n");
2165 }
2166 release_defs (stmt);
2167 *removed_p = true;
2168 return retval;
2169 }
2170 }
2171
2172 /* If this statement was not redundant, we may still be able to simplify
2173 it, which may in turn allow other part of DOM or other passes to do
2174 a better job. */
2175 test_for_singularity (stmt, m_dummy_cond, m_avail_exprs_stack);
2176 }
2177
2178 /* Record any additional equivalences created by this statement. */
2179 if (is_gimple_assign (stmt))
2180 record_equivalences_from_stmt (stmt, may_optimize_p, m_avail_exprs_stack);
2181
2182 /* If STMT is a COND_EXPR or SWITCH_EXPR and it was modified, then we may
2183 know where it goes. */
2184 if (gimple_modified_p (stmt) || modified_p)
2185 {
2186 tree val = NULL;
2187
2188 if (gimple_code (stmt) == GIMPLE_COND)
2189 val = fold_binary_loc (gimple_location (stmt),
2190 gimple_cond_code (stmt), boolean_type_node,
2191 gimple_cond_lhs (stmt),
2192 gimple_cond_rhs (stmt));
2193 else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
2194 val = gimple_switch_index (swtch_stmt);
2195
2196 if (val && TREE_CODE (val) == INTEGER_CST)
2197 {
2198 retval = find_taken_edge (bb, val);
2199 if (retval)
2200 {
2201 /* Fix the condition to be either true or false. */
2202 if (gimple_code (stmt) == GIMPLE_COND)
2203 {
2204 if (integer_zerop (val))
2205 gimple_cond_make_false (as_a <gcond *> (stmt));
2206 else if (integer_onep (val))
2207 gimple_cond_make_true (as_a <gcond *> (stmt));
2208 else
2209 gcc_unreachable ();
2210
2211 gimple_set_modified (stmt, true);
2212 }
2213
2214 /* Further simplifications may be possible. */
2215 cfg_altered = true;
2216 }
2217 }
2218
2219 update_stmt_if_modified (stmt);
2220
2221 /* If we simplified a statement in such a way as to be shown that it
2222 cannot trap, update the eh information and the cfg to match. */
2223 if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt))
2224 {
2225 bitmap_set_bit (need_eh_cleanup, bb->index);
2226 if (dump_file && (dump_flags & TDF_DETAILS))
2227 fprintf (dump_file, " Flagged to clear EH edges.\n");
2228 }
2229
2230 if (!was_noreturn
2231 && is_gimple_call (stmt) && gimple_call_noreturn_p (stmt))
2232 need_noreturn_fixup.safe_push (stmt);
2233 }
2234 return retval;
2235 }