i386.c (legitimize_tls_address): Generate tls_initial_exec_64_sun only when !TARGET_X32.
[gcc.git] / gcc / tree-ssa-phiopt.c
1 /* Optimization of PHI nodes by converting them into straightline code.
2 Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012
3 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by the
9 Free Software Foundation; either version 3, or (at your option) any
10 later version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "ggc.h"
26 #include "tree.h"
27 #include "flags.h"
28 #include "tm_p.h"
29 #include "basic-block.h"
30 #include "timevar.h"
31 #include "tree-flow.h"
32 #include "tree-pass.h"
33 #include "tree-dump.h"
34 #include "langhooks.h"
35 #include "pointer-set.h"
36 #include "domwalk.h"
37 #include "cfgloop.h"
38 #include "tree-data-ref.h"
39 #include "tree-pretty-print.h"
40
41 static unsigned int tree_ssa_phiopt (void);
42 static unsigned int tree_ssa_phiopt_worker (bool);
43 static bool conditional_replacement (basic_block, basic_block,
44 edge, edge, gimple, tree, tree);
45 static int value_replacement (basic_block, basic_block,
46 edge, edge, gimple, tree, tree);
47 static bool minmax_replacement (basic_block, basic_block,
48 edge, edge, gimple, tree, tree);
49 static bool abs_replacement (basic_block, basic_block,
50 edge, edge, gimple, tree, tree);
51 static bool cond_store_replacement (basic_block, basic_block, edge, edge,
52 struct pointer_set_t *);
53 static bool cond_if_else_store_replacement (basic_block, basic_block, basic_block);
54 static struct pointer_set_t * get_non_trapping (void);
55 static void replace_phi_edge_with_variable (basic_block, edge, gimple, tree);
56
57 /* This pass tries to replaces an if-then-else block with an
58 assignment. We have four kinds of transformations. Some of these
59 transformations are also performed by the ifcvt RTL optimizer.
60
61 Conditional Replacement
62 -----------------------
63
64 This transformation, implemented in conditional_replacement,
65 replaces
66
67 bb0:
68 if (cond) goto bb2; else goto bb1;
69 bb1:
70 bb2:
71 x = PHI <0 (bb1), 1 (bb0), ...>;
72
73 with
74
75 bb0:
76 x' = cond;
77 goto bb2;
78 bb2:
79 x = PHI <x' (bb0), ...>;
80
81 We remove bb1 as it becomes unreachable. This occurs often due to
82 gimplification of conditionals.
83
84 Value Replacement
85 -----------------
86
87 This transformation, implemented in value_replacement, replaces
88
89 bb0:
90 if (a != b) goto bb2; else goto bb1;
91 bb1:
92 bb2:
93 x = PHI <a (bb1), b (bb0), ...>;
94
95 with
96
97 bb0:
98 bb2:
99 x = PHI <b (bb0), ...>;
100
101 This opportunity can sometimes occur as a result of other
102 optimizations.
103
104 ABS Replacement
105 ---------------
106
107 This transformation, implemented in abs_replacement, replaces
108
109 bb0:
110 if (a >= 0) goto bb2; else goto bb1;
111 bb1:
112 x = -a;
113 bb2:
114 x = PHI <x (bb1), a (bb0), ...>;
115
116 with
117
118 bb0:
119 x' = ABS_EXPR< a >;
120 bb2:
121 x = PHI <x' (bb0), ...>;
122
123 MIN/MAX Replacement
124 -------------------
125
126 This transformation, minmax_replacement replaces
127
128 bb0:
129 if (a <= b) goto bb2; else goto bb1;
130 bb1:
131 bb2:
132 x = PHI <b (bb1), a (bb0), ...>;
133
134 with
135
136 bb0:
137 x' = MIN_EXPR (a, b)
138 bb2:
139 x = PHI <x' (bb0), ...>;
140
141 A similar transformation is done for MAX_EXPR. */
142
143 static unsigned int
144 tree_ssa_phiopt (void)
145 {
146 return tree_ssa_phiopt_worker (false);
147 }
148
149 /* This pass tries to transform conditional stores into unconditional
150 ones, enabling further simplifications with the simpler then and else
151 blocks. In particular it replaces this:
152
153 bb0:
154 if (cond) goto bb2; else goto bb1;
155 bb1:
156 *p = RHS;
157 bb2:
158
159 with
160
161 bb0:
162 if (cond) goto bb1; else goto bb2;
163 bb1:
164 condtmp' = *p;
165 bb2:
166 condtmp = PHI <RHS, condtmp'>
167 *p = condtmp;
168
169 This transformation can only be done under several constraints,
170 documented below. It also replaces:
171
172 bb0:
173 if (cond) goto bb2; else goto bb1;
174 bb1:
175 *p = RHS1;
176 goto bb3;
177 bb2:
178 *p = RHS2;
179 bb3:
180
181 with
182
183 bb0:
184 if (cond) goto bb3; else goto bb1;
185 bb1:
186 bb3:
187 condtmp = PHI <RHS1, RHS2>
188 *p = condtmp; */
189
190 static unsigned int
191 tree_ssa_cs_elim (void)
192 {
193 return tree_ssa_phiopt_worker (true);
194 }
195
196 /* Return the singleton PHI in the SEQ of PHIs for edges E0 and E1. */
197
198 static gimple
199 single_non_singleton_phi_for_edges (gimple_seq seq, edge e0, edge e1)
200 {
201 gimple_stmt_iterator i;
202 gimple phi = NULL;
203 if (gimple_seq_singleton_p (seq))
204 return gsi_stmt (gsi_start (seq));
205 for (i = gsi_start (seq); !gsi_end_p (i); gsi_next (&i))
206 {
207 gimple p = gsi_stmt (i);
208 /* If the PHI arguments are equal then we can skip this PHI. */
209 if (operand_equal_for_phi_arg_p (gimple_phi_arg_def (p, e0->dest_idx),
210 gimple_phi_arg_def (p, e1->dest_idx)))
211 continue;
212
213 /* If we already have a PHI that has the two edge arguments are
214 different, then return it is not a singleton for these PHIs. */
215 if (phi)
216 return NULL;
217
218 phi = p;
219 }
220 return phi;
221 }
222
223 /* For conditional store replacement we need a temporary to
224 put the old contents of the memory in. */
225 static tree condstoretemp;
226
227 /* The core routine of conditional store replacement and normal
228 phi optimizations. Both share much of the infrastructure in how
229 to match applicable basic block patterns. DO_STORE_ELIM is true
230 when we want to do conditional store replacement, false otherwise. */
231 static unsigned int
232 tree_ssa_phiopt_worker (bool do_store_elim)
233 {
234 basic_block bb;
235 basic_block *bb_order;
236 unsigned n, i;
237 bool cfgchanged = false;
238 struct pointer_set_t *nontrap = 0;
239
240 if (do_store_elim)
241 {
242 condstoretemp = NULL_TREE;
243 /* Calculate the set of non-trapping memory accesses. */
244 nontrap = get_non_trapping ();
245 }
246
247 /* Search every basic block for COND_EXPR we may be able to optimize.
248
249 We walk the blocks in order that guarantees that a block with
250 a single predecessor is processed before the predecessor.
251 This ensures that we collapse inner ifs before visiting the
252 outer ones, and also that we do not try to visit a removed
253 block. */
254 bb_order = blocks_in_phiopt_order ();
255 n = n_basic_blocks - NUM_FIXED_BLOCKS;
256
257 for (i = 0; i < n; i++)
258 {
259 gimple cond_stmt, phi;
260 basic_block bb1, bb2;
261 edge e1, e2;
262 tree arg0, arg1;
263
264 bb = bb_order[i];
265
266 cond_stmt = last_stmt (bb);
267 /* Check to see if the last statement is a GIMPLE_COND. */
268 if (!cond_stmt
269 || gimple_code (cond_stmt) != GIMPLE_COND)
270 continue;
271
272 e1 = EDGE_SUCC (bb, 0);
273 bb1 = e1->dest;
274 e2 = EDGE_SUCC (bb, 1);
275 bb2 = e2->dest;
276
277 /* We cannot do the optimization on abnormal edges. */
278 if ((e1->flags & EDGE_ABNORMAL) != 0
279 || (e2->flags & EDGE_ABNORMAL) != 0)
280 continue;
281
282 /* If either bb1's succ or bb2 or bb2's succ is non NULL. */
283 if (EDGE_COUNT (bb1->succs) == 0
284 || bb2 == NULL
285 || EDGE_COUNT (bb2->succs) == 0)
286 continue;
287
288 /* Find the bb which is the fall through to the other. */
289 if (EDGE_SUCC (bb1, 0)->dest == bb2)
290 ;
291 else if (EDGE_SUCC (bb2, 0)->dest == bb1)
292 {
293 basic_block bb_tmp = bb1;
294 edge e_tmp = e1;
295 bb1 = bb2;
296 bb2 = bb_tmp;
297 e1 = e2;
298 e2 = e_tmp;
299 }
300 else if (do_store_elim
301 && EDGE_SUCC (bb1, 0)->dest == EDGE_SUCC (bb2, 0)->dest)
302 {
303 basic_block bb3 = EDGE_SUCC (bb1, 0)->dest;
304
305 if (!single_succ_p (bb1)
306 || (EDGE_SUCC (bb1, 0)->flags & EDGE_FALLTHRU) == 0
307 || !single_succ_p (bb2)
308 || (EDGE_SUCC (bb2, 0)->flags & EDGE_FALLTHRU) == 0
309 || EDGE_COUNT (bb3->preds) != 2)
310 continue;
311 if (cond_if_else_store_replacement (bb1, bb2, bb3))
312 cfgchanged = true;
313 continue;
314 }
315 else
316 continue;
317
318 e1 = EDGE_SUCC (bb1, 0);
319
320 /* Make sure that bb1 is just a fall through. */
321 if (!single_succ_p (bb1)
322 || (e1->flags & EDGE_FALLTHRU) == 0)
323 continue;
324
325 /* Also make sure that bb1 only have one predecessor and that it
326 is bb. */
327 if (!single_pred_p (bb1)
328 || single_pred (bb1) != bb)
329 continue;
330
331 if (do_store_elim)
332 {
333 /* bb1 is the middle block, bb2 the join block, bb the split block,
334 e1 the fallthrough edge from bb1 to bb2. We can't do the
335 optimization if the join block has more than two predecessors. */
336 if (EDGE_COUNT (bb2->preds) > 2)
337 continue;
338 if (cond_store_replacement (bb1, bb2, e1, e2, nontrap))
339 cfgchanged = true;
340 }
341 else
342 {
343 gimple_seq phis = phi_nodes (bb2);
344 gimple_stmt_iterator gsi;
345 bool candorest = true;
346
347 /* Value replacement can work with more than one PHI
348 so try that first. */
349 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
350 {
351 phi = gsi_stmt (gsi);
352 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
353 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
354 if (value_replacement (bb, bb1, e1, e2, phi, arg0, arg1) == 2)
355 {
356 candorest = false;
357 cfgchanged = true;
358 break;
359 }
360 }
361
362 if (!candorest)
363 continue;
364
365 phi = single_non_singleton_phi_for_edges (phis, e1, e2);
366 if (!phi)
367 continue;
368
369 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
370 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
371
372 /* Something is wrong if we cannot find the arguments in the PHI
373 node. */
374 gcc_assert (arg0 != NULL && arg1 != NULL);
375
376 /* Do the replacement of conditional if it can be done. */
377 if (conditional_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
378 cfgchanged = true;
379 else if (abs_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
380 cfgchanged = true;
381 else if (minmax_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
382 cfgchanged = true;
383 }
384 }
385
386 free (bb_order);
387
388 if (do_store_elim)
389 pointer_set_destroy (nontrap);
390 /* If the CFG has changed, we should cleanup the CFG. */
391 if (cfgchanged && do_store_elim)
392 {
393 /* In cond-store replacement we have added some loads on edges
394 and new VOPS (as we moved the store, and created a load). */
395 gsi_commit_edge_inserts ();
396 return TODO_cleanup_cfg | TODO_update_ssa_only_virtuals;
397 }
398 else if (cfgchanged)
399 return TODO_cleanup_cfg;
400 return 0;
401 }
402
403 /* Returns the list of basic blocks in the function in an order that guarantees
404 that if a block X has just a single predecessor Y, then Y is after X in the
405 ordering. */
406
407 basic_block *
408 blocks_in_phiopt_order (void)
409 {
410 basic_block x, y;
411 basic_block *order = XNEWVEC (basic_block, n_basic_blocks);
412 unsigned n = n_basic_blocks - NUM_FIXED_BLOCKS;
413 unsigned np, i;
414 sbitmap visited = sbitmap_alloc (last_basic_block);
415
416 #define MARK_VISITED(BB) (SET_BIT (visited, (BB)->index))
417 #define VISITED_P(BB) (TEST_BIT (visited, (BB)->index))
418
419 sbitmap_zero (visited);
420
421 MARK_VISITED (ENTRY_BLOCK_PTR);
422 FOR_EACH_BB (x)
423 {
424 if (VISITED_P (x))
425 continue;
426
427 /* Walk the predecessors of x as long as they have precisely one
428 predecessor and add them to the list, so that they get stored
429 after x. */
430 for (y = x, np = 1;
431 single_pred_p (y) && !VISITED_P (single_pred (y));
432 y = single_pred (y))
433 np++;
434 for (y = x, i = n - np;
435 single_pred_p (y) && !VISITED_P (single_pred (y));
436 y = single_pred (y), i++)
437 {
438 order[i] = y;
439 MARK_VISITED (y);
440 }
441 order[i] = y;
442 MARK_VISITED (y);
443
444 gcc_assert (i == n - 1);
445 n -= np;
446 }
447
448 sbitmap_free (visited);
449 gcc_assert (n == 0);
450 return order;
451
452 #undef MARK_VISITED
453 #undef VISITED_P
454 }
455
456
457 /* Return TRUE if block BB has no executable statements, otherwise return
458 FALSE. */
459
460 bool
461 empty_block_p (basic_block bb)
462 {
463 /* BB must have no executable statements. */
464 gimple_stmt_iterator gsi = gsi_after_labels (bb);
465 if (phi_nodes (bb))
466 return false;
467 if (gsi_end_p (gsi))
468 return true;
469 if (is_gimple_debug (gsi_stmt (gsi)))
470 gsi_next_nondebug (&gsi);
471 return gsi_end_p (gsi);
472 }
473
474 /* Replace PHI node element whose edge is E in block BB with variable NEW.
475 Remove the edge from COND_BLOCK which does not lead to BB (COND_BLOCK
476 is known to have two edges, one of which must reach BB). */
477
478 static void
479 replace_phi_edge_with_variable (basic_block cond_block,
480 edge e, gimple phi, tree new_tree)
481 {
482 basic_block bb = gimple_bb (phi);
483 basic_block block_to_remove;
484 gimple_stmt_iterator gsi;
485
486 /* Change the PHI argument to new. */
487 SET_USE (PHI_ARG_DEF_PTR (phi, e->dest_idx), new_tree);
488
489 /* Remove the empty basic block. */
490 if (EDGE_SUCC (cond_block, 0)->dest == bb)
491 {
492 EDGE_SUCC (cond_block, 0)->flags |= EDGE_FALLTHRU;
493 EDGE_SUCC (cond_block, 0)->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
494 EDGE_SUCC (cond_block, 0)->probability = REG_BR_PROB_BASE;
495 EDGE_SUCC (cond_block, 0)->count += EDGE_SUCC (cond_block, 1)->count;
496
497 block_to_remove = EDGE_SUCC (cond_block, 1)->dest;
498 }
499 else
500 {
501 EDGE_SUCC (cond_block, 1)->flags |= EDGE_FALLTHRU;
502 EDGE_SUCC (cond_block, 1)->flags
503 &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
504 EDGE_SUCC (cond_block, 1)->probability = REG_BR_PROB_BASE;
505 EDGE_SUCC (cond_block, 1)->count += EDGE_SUCC (cond_block, 0)->count;
506
507 block_to_remove = EDGE_SUCC (cond_block, 0)->dest;
508 }
509 delete_basic_block (block_to_remove);
510
511 /* Eliminate the COND_EXPR at the end of COND_BLOCK. */
512 gsi = gsi_last_bb (cond_block);
513 gsi_remove (&gsi, true);
514
515 if (dump_file && (dump_flags & TDF_DETAILS))
516 fprintf (dump_file,
517 "COND_EXPR in block %d and PHI in block %d converted to straightline code.\n",
518 cond_block->index,
519 bb->index);
520 }
521
522 /* The function conditional_replacement does the main work of doing the
523 conditional replacement. Return true if the replacement is done.
524 Otherwise return false.
525 BB is the basic block where the replacement is going to be done on. ARG0
526 is argument 0 from PHI. Likewise for ARG1. */
527
528 static bool
529 conditional_replacement (basic_block cond_bb, basic_block middle_bb,
530 edge e0, edge e1, gimple phi,
531 tree arg0, tree arg1)
532 {
533 tree result;
534 gimple stmt, new_stmt;
535 tree cond;
536 gimple_stmt_iterator gsi;
537 edge true_edge, false_edge;
538 tree new_var, new_var2;
539 bool neg;
540
541 /* FIXME: Gimplification of complex type is too hard for now. */
542 if (TREE_CODE (TREE_TYPE (arg0)) == COMPLEX_TYPE
543 || TREE_CODE (TREE_TYPE (arg1)) == COMPLEX_TYPE)
544 return false;
545
546 /* The PHI arguments have the constants 0 and 1, or 0 and -1, then
547 convert it to the conditional. */
548 if ((integer_zerop (arg0) && integer_onep (arg1))
549 || (integer_zerop (arg1) && integer_onep (arg0)))
550 neg = false;
551 else if ((integer_zerop (arg0) && integer_all_onesp (arg1))
552 || (integer_zerop (arg1) && integer_all_onesp (arg0)))
553 neg = true;
554 else
555 return false;
556
557 if (!empty_block_p (middle_bb))
558 return false;
559
560 /* At this point we know we have a GIMPLE_COND with two successors.
561 One successor is BB, the other successor is an empty block which
562 falls through into BB.
563
564 There is a single PHI node at the join point (BB) and its arguments
565 are constants (0, 1) or (0, -1).
566
567 So, given the condition COND, and the two PHI arguments, we can
568 rewrite this PHI into non-branching code:
569
570 dest = (COND) or dest = COND'
571
572 We use the condition as-is if the argument associated with the
573 true edge has the value one or the argument associated with the
574 false edge as the value zero. Note that those conditions are not
575 the same since only one of the outgoing edges from the GIMPLE_COND
576 will directly reach BB and thus be associated with an argument. */
577
578 stmt = last_stmt (cond_bb);
579 result = PHI_RESULT (phi);
580
581 /* To handle special cases like floating point comparison, it is easier and
582 less error-prone to build a tree and gimplify it on the fly though it is
583 less efficient. */
584 cond = fold_build2_loc (gimple_location (stmt),
585 gimple_cond_code (stmt), boolean_type_node,
586 gimple_cond_lhs (stmt), gimple_cond_rhs (stmt));
587
588 /* We need to know which is the true edge and which is the false
589 edge so that we know when to invert the condition below. */
590 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
591 if ((e0 == true_edge && integer_zerop (arg0))
592 || (e0 == false_edge && !integer_zerop (arg0))
593 || (e1 == true_edge && integer_zerop (arg1))
594 || (e1 == false_edge && !integer_zerop (arg1)))
595 cond = fold_build1_loc (gimple_location (stmt),
596 TRUTH_NOT_EXPR, TREE_TYPE (cond), cond);
597
598 if (neg)
599 {
600 cond = fold_convert_loc (gimple_location (stmt),
601 TREE_TYPE (result), cond);
602 cond = fold_build1_loc (gimple_location (stmt),
603 NEGATE_EXPR, TREE_TYPE (cond), cond);
604 }
605
606 /* Insert our new statements at the end of conditional block before the
607 COND_STMT. */
608 gsi = gsi_for_stmt (stmt);
609 new_var = force_gimple_operand_gsi (&gsi, cond, true, NULL, true,
610 GSI_SAME_STMT);
611
612 if (!useless_type_conversion_p (TREE_TYPE (result), TREE_TYPE (new_var)))
613 {
614 source_location locus_0, locus_1;
615
616 new_var2 = create_tmp_var (TREE_TYPE (result), NULL);
617 add_referenced_var (new_var2);
618 new_stmt = gimple_build_assign_with_ops (CONVERT_EXPR, new_var2,
619 new_var, NULL);
620 new_var2 = make_ssa_name (new_var2, new_stmt);
621 gimple_assign_set_lhs (new_stmt, new_var2);
622 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
623 new_var = new_var2;
624
625 /* Set the locus to the first argument, unless is doesn't have one. */
626 locus_0 = gimple_phi_arg_location (phi, 0);
627 locus_1 = gimple_phi_arg_location (phi, 1);
628 if (locus_0 == UNKNOWN_LOCATION)
629 locus_0 = locus_1;
630 gimple_set_location (new_stmt, locus_0);
631 }
632
633 replace_phi_edge_with_variable (cond_bb, e1, phi, new_var);
634
635 /* Note that we optimized this PHI. */
636 return true;
637 }
638
639 /* Update *ARG which is defined in STMT so that it contains the
640 computed value if that seems profitable. Return true if the
641 statement is made dead by that rewriting. */
642
643 static bool
644 jump_function_from_stmt (tree *arg, gimple stmt)
645 {
646 enum tree_code code = gimple_assign_rhs_code (stmt);
647 if (code == ADDR_EXPR)
648 {
649 /* For arg = &p->i transform it to p, if possible. */
650 tree rhs1 = gimple_assign_rhs1 (stmt);
651 HOST_WIDE_INT offset;
652 tree tem = get_addr_base_and_unit_offset (TREE_OPERAND (rhs1, 0),
653 &offset);
654 if (tem
655 && TREE_CODE (tem) == MEM_REF
656 && double_int_zero_p
657 (double_int_add (mem_ref_offset (tem),
658 shwi_to_double_int (offset))))
659 {
660 *arg = TREE_OPERAND (tem, 0);
661 return true;
662 }
663 }
664 /* TODO: Much like IPA-CP jump-functions we want to handle constant
665 additions symbolically here, and we'd need to update the comparison
666 code that compares the arg + cst tuples in our caller. For now the
667 code above exactly handles the VEC_BASE pattern from vec.h. */
668 return false;
669 }
670
671 /* The function value_replacement does the main work of doing the value
672 replacement. Return non-zero if the replacement is done. Otherwise return
673 0. If we remove the middle basic block, return 2.
674 BB is the basic block where the replacement is going to be done on. ARG0
675 is argument 0 from the PHI. Likewise for ARG1. */
676
677 static int
678 value_replacement (basic_block cond_bb, basic_block middle_bb,
679 edge e0, edge e1, gimple phi,
680 tree arg0, tree arg1)
681 {
682 gimple_stmt_iterator gsi;
683 gimple cond;
684 edge true_edge, false_edge;
685 enum tree_code code;
686 bool emtpy_or_with_defined_p = true;
687
688 /* If the type says honor signed zeros we cannot do this
689 optimization. */
690 if (HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (arg1))))
691 return 0;
692
693 /* If there is a statement in MIDDLE_BB that defines one of the PHI
694 arguments, then adjust arg0 or arg1. */
695 gsi = gsi_after_labels (middle_bb);
696 if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
697 gsi_next_nondebug (&gsi);
698 while (!gsi_end_p (gsi))
699 {
700 gimple stmt = gsi_stmt (gsi);
701 tree lhs;
702 gsi_next_nondebug (&gsi);
703 if (!is_gimple_assign (stmt))
704 {
705 emtpy_or_with_defined_p = false;
706 continue;
707 }
708 /* Now try to adjust arg0 or arg1 according to the computation
709 in the statement. */
710 lhs = gimple_assign_lhs (stmt);
711 if (!(lhs == arg0
712 && jump_function_from_stmt (&arg0, stmt))
713 || (lhs == arg1
714 && jump_function_from_stmt (&arg1, stmt)))
715 emtpy_or_with_defined_p = false;
716 }
717
718 cond = last_stmt (cond_bb);
719 code = gimple_cond_code (cond);
720
721 /* This transformation is only valid for equality comparisons. */
722 if (code != NE_EXPR && code != EQ_EXPR)
723 return 0;
724
725 /* We need to know which is the true edge and which is the false
726 edge so that we know if have abs or negative abs. */
727 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
728
729 /* At this point we know we have a COND_EXPR with two successors.
730 One successor is BB, the other successor is an empty block which
731 falls through into BB.
732
733 The condition for the COND_EXPR is known to be NE_EXPR or EQ_EXPR.
734
735 There is a single PHI node at the join point (BB) with two arguments.
736
737 We now need to verify that the two arguments in the PHI node match
738 the two arguments to the equality comparison. */
739
740 if ((operand_equal_for_phi_arg_p (arg0, gimple_cond_lhs (cond))
741 && operand_equal_for_phi_arg_p (arg1, gimple_cond_rhs (cond)))
742 || (operand_equal_for_phi_arg_p (arg1, gimple_cond_lhs (cond))
743 && operand_equal_for_phi_arg_p (arg0, gimple_cond_rhs (cond))))
744 {
745 edge e;
746 tree arg;
747
748 /* For NE_EXPR, we want to build an assignment result = arg where
749 arg is the PHI argument associated with the true edge. For
750 EQ_EXPR we want the PHI argument associated with the false edge. */
751 e = (code == NE_EXPR ? true_edge : false_edge);
752
753 /* Unfortunately, E may not reach BB (it may instead have gone to
754 OTHER_BLOCK). If that is the case, then we want the single outgoing
755 edge from OTHER_BLOCK which reaches BB and represents the desired
756 path from COND_BLOCK. */
757 if (e->dest == middle_bb)
758 e = single_succ_edge (e->dest);
759
760 /* Now we know the incoming edge to BB that has the argument for the
761 RHS of our new assignment statement. */
762 if (e0 == e)
763 arg = arg0;
764 else
765 arg = arg1;
766
767 /* If the middle basic block was empty or is defining the
768 PHI arguments and this is a single phi where the args are different
769 for the edges e0 and e1 then we can remove the middle basic block. */
770 if (emtpy_or_with_defined_p
771 && single_non_singleton_phi_for_edges (phi_nodes (gimple_bb (phi)),
772 e0, e1))
773 {
774 replace_phi_edge_with_variable (cond_bb, e1, phi, arg);
775 /* Note that we optimized this PHI. */
776 return 2;
777 }
778 else
779 {
780 /* Replace the PHI arguments with arg. */
781 SET_PHI_ARG_DEF (phi, e0->dest_idx, arg);
782 SET_PHI_ARG_DEF (phi, e1->dest_idx, arg);
783 if (dump_file && (dump_flags & TDF_DETAILS))
784 {
785 fprintf (dump_file, "PHI ");
786 print_generic_expr (dump_file, gimple_phi_result (phi), 0);
787 fprintf (dump_file, " reduced for COND_EXPR in block %d to ",
788 cond_bb->index);
789 print_generic_expr (dump_file, arg, 0);
790 fprintf (dump_file, ".\n");
791 }
792 return 1;
793 }
794
795 }
796 return 0;
797 }
798
799 /* The function minmax_replacement does the main work of doing the minmax
800 replacement. Return true if the replacement is done. Otherwise return
801 false.
802 BB is the basic block where the replacement is going to be done on. ARG0
803 is argument 0 from the PHI. Likewise for ARG1. */
804
805 static bool
806 minmax_replacement (basic_block cond_bb, basic_block middle_bb,
807 edge e0, edge e1, gimple phi,
808 tree arg0, tree arg1)
809 {
810 tree result, type;
811 gimple cond, new_stmt;
812 edge true_edge, false_edge;
813 enum tree_code cmp, minmax, ass_code;
814 tree smaller, larger, arg_true, arg_false;
815 gimple_stmt_iterator gsi, gsi_from;
816
817 type = TREE_TYPE (PHI_RESULT (phi));
818
819 /* The optimization may be unsafe due to NaNs. */
820 if (HONOR_NANS (TYPE_MODE (type)))
821 return false;
822
823 cond = last_stmt (cond_bb);
824 cmp = gimple_cond_code (cond);
825
826 /* This transformation is only valid for order comparisons. Record which
827 operand is smaller/larger if the result of the comparison is true. */
828 if (cmp == LT_EXPR || cmp == LE_EXPR)
829 {
830 smaller = gimple_cond_lhs (cond);
831 larger = gimple_cond_rhs (cond);
832 }
833 else if (cmp == GT_EXPR || cmp == GE_EXPR)
834 {
835 smaller = gimple_cond_rhs (cond);
836 larger = gimple_cond_lhs (cond);
837 }
838 else
839 return false;
840
841 /* We need to know which is the true edge and which is the false
842 edge so that we know if have abs or negative abs. */
843 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
844
845 /* Forward the edges over the middle basic block. */
846 if (true_edge->dest == middle_bb)
847 true_edge = EDGE_SUCC (true_edge->dest, 0);
848 if (false_edge->dest == middle_bb)
849 false_edge = EDGE_SUCC (false_edge->dest, 0);
850
851 if (true_edge == e0)
852 {
853 gcc_assert (false_edge == e1);
854 arg_true = arg0;
855 arg_false = arg1;
856 }
857 else
858 {
859 gcc_assert (false_edge == e0);
860 gcc_assert (true_edge == e1);
861 arg_true = arg1;
862 arg_false = arg0;
863 }
864
865 if (empty_block_p (middle_bb))
866 {
867 if (operand_equal_for_phi_arg_p (arg_true, smaller)
868 && operand_equal_for_phi_arg_p (arg_false, larger))
869 {
870 /* Case
871
872 if (smaller < larger)
873 rslt = smaller;
874 else
875 rslt = larger; */
876 minmax = MIN_EXPR;
877 }
878 else if (operand_equal_for_phi_arg_p (arg_false, smaller)
879 && operand_equal_for_phi_arg_p (arg_true, larger))
880 minmax = MAX_EXPR;
881 else
882 return false;
883 }
884 else
885 {
886 /* Recognize the following case, assuming d <= u:
887
888 if (a <= u)
889 b = MAX (a, d);
890 x = PHI <b, u>
891
892 This is equivalent to
893
894 b = MAX (a, d);
895 x = MIN (b, u); */
896
897 gimple assign = last_and_only_stmt (middle_bb);
898 tree lhs, op0, op1, bound;
899
900 if (!assign
901 || gimple_code (assign) != GIMPLE_ASSIGN)
902 return false;
903
904 lhs = gimple_assign_lhs (assign);
905 ass_code = gimple_assign_rhs_code (assign);
906 if (ass_code != MAX_EXPR && ass_code != MIN_EXPR)
907 return false;
908 op0 = gimple_assign_rhs1 (assign);
909 op1 = gimple_assign_rhs2 (assign);
910
911 if (true_edge->src == middle_bb)
912 {
913 /* We got here if the condition is true, i.e., SMALLER < LARGER. */
914 if (!operand_equal_for_phi_arg_p (lhs, arg_true))
915 return false;
916
917 if (operand_equal_for_phi_arg_p (arg_false, larger))
918 {
919 /* Case
920
921 if (smaller < larger)
922 {
923 r' = MAX_EXPR (smaller, bound)
924 }
925 r = PHI <r', larger> --> to be turned to MIN_EXPR. */
926 if (ass_code != MAX_EXPR)
927 return false;
928
929 minmax = MIN_EXPR;
930 if (operand_equal_for_phi_arg_p (op0, smaller))
931 bound = op1;
932 else if (operand_equal_for_phi_arg_p (op1, smaller))
933 bound = op0;
934 else
935 return false;
936
937 /* We need BOUND <= LARGER. */
938 if (!integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node,
939 bound, larger)))
940 return false;
941 }
942 else if (operand_equal_for_phi_arg_p (arg_false, smaller))
943 {
944 /* Case
945
946 if (smaller < larger)
947 {
948 r' = MIN_EXPR (larger, bound)
949 }
950 r = PHI <r', smaller> --> to be turned to MAX_EXPR. */
951 if (ass_code != MIN_EXPR)
952 return false;
953
954 minmax = MAX_EXPR;
955 if (operand_equal_for_phi_arg_p (op0, larger))
956 bound = op1;
957 else if (operand_equal_for_phi_arg_p (op1, larger))
958 bound = op0;
959 else
960 return false;
961
962 /* We need BOUND >= SMALLER. */
963 if (!integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node,
964 bound, smaller)))
965 return false;
966 }
967 else
968 return false;
969 }
970 else
971 {
972 /* We got here if the condition is false, i.e., SMALLER > LARGER. */
973 if (!operand_equal_for_phi_arg_p (lhs, arg_false))
974 return false;
975
976 if (operand_equal_for_phi_arg_p (arg_true, larger))
977 {
978 /* Case
979
980 if (smaller > larger)
981 {
982 r' = MIN_EXPR (smaller, bound)
983 }
984 r = PHI <r', larger> --> to be turned to MAX_EXPR. */
985 if (ass_code != MIN_EXPR)
986 return false;
987
988 minmax = MAX_EXPR;
989 if (operand_equal_for_phi_arg_p (op0, smaller))
990 bound = op1;
991 else if (operand_equal_for_phi_arg_p (op1, smaller))
992 bound = op0;
993 else
994 return false;
995
996 /* We need BOUND >= LARGER. */
997 if (!integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node,
998 bound, larger)))
999 return false;
1000 }
1001 else if (operand_equal_for_phi_arg_p (arg_true, smaller))
1002 {
1003 /* Case
1004
1005 if (smaller > larger)
1006 {
1007 r' = MAX_EXPR (larger, bound)
1008 }
1009 r = PHI <r', smaller> --> to be turned to MIN_EXPR. */
1010 if (ass_code != MAX_EXPR)
1011 return false;
1012
1013 minmax = MIN_EXPR;
1014 if (operand_equal_for_phi_arg_p (op0, larger))
1015 bound = op1;
1016 else if (operand_equal_for_phi_arg_p (op1, larger))
1017 bound = op0;
1018 else
1019 return false;
1020
1021 /* We need BOUND <= SMALLER. */
1022 if (!integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node,
1023 bound, smaller)))
1024 return false;
1025 }
1026 else
1027 return false;
1028 }
1029
1030 /* Move the statement from the middle block. */
1031 gsi = gsi_last_bb (cond_bb);
1032 gsi_from = gsi_last_nondebug_bb (middle_bb);
1033 gsi_move_before (&gsi_from, &gsi);
1034 }
1035
1036 /* Emit the statement to compute min/max. */
1037 result = duplicate_ssa_name (PHI_RESULT (phi), NULL);
1038 new_stmt = gimple_build_assign_with_ops (minmax, result, arg0, arg1);
1039 gsi = gsi_last_bb (cond_bb);
1040 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
1041
1042 replace_phi_edge_with_variable (cond_bb, e1, phi, result);
1043 return true;
1044 }
1045
1046 /* The function absolute_replacement does the main work of doing the absolute
1047 replacement. Return true if the replacement is done. Otherwise return
1048 false.
1049 bb is the basic block where the replacement is going to be done on. arg0
1050 is argument 0 from the phi. Likewise for arg1. */
1051
1052 static bool
1053 abs_replacement (basic_block cond_bb, basic_block middle_bb,
1054 edge e0 ATTRIBUTE_UNUSED, edge e1,
1055 gimple phi, tree arg0, tree arg1)
1056 {
1057 tree result;
1058 gimple new_stmt, cond;
1059 gimple_stmt_iterator gsi;
1060 edge true_edge, false_edge;
1061 gimple assign;
1062 edge e;
1063 tree rhs, lhs;
1064 bool negate;
1065 enum tree_code cond_code;
1066
1067 /* If the type says honor signed zeros we cannot do this
1068 optimization. */
1069 if (HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (arg1))))
1070 return false;
1071
1072 /* OTHER_BLOCK must have only one executable statement which must have the
1073 form arg0 = -arg1 or arg1 = -arg0. */
1074
1075 assign = last_and_only_stmt (middle_bb);
1076 /* If we did not find the proper negation assignment, then we can not
1077 optimize. */
1078 if (assign == NULL)
1079 return false;
1080
1081 /* If we got here, then we have found the only executable statement
1082 in OTHER_BLOCK. If it is anything other than arg = -arg1 or
1083 arg1 = -arg0, then we can not optimize. */
1084 if (gimple_code (assign) != GIMPLE_ASSIGN)
1085 return false;
1086
1087 lhs = gimple_assign_lhs (assign);
1088
1089 if (gimple_assign_rhs_code (assign) != NEGATE_EXPR)
1090 return false;
1091
1092 rhs = gimple_assign_rhs1 (assign);
1093
1094 /* The assignment has to be arg0 = -arg1 or arg1 = -arg0. */
1095 if (!(lhs == arg0 && rhs == arg1)
1096 && !(lhs == arg1 && rhs == arg0))
1097 return false;
1098
1099 cond = last_stmt (cond_bb);
1100 result = PHI_RESULT (phi);
1101
1102 /* Only relationals comparing arg[01] against zero are interesting. */
1103 cond_code = gimple_cond_code (cond);
1104 if (cond_code != GT_EXPR && cond_code != GE_EXPR
1105 && cond_code != LT_EXPR && cond_code != LE_EXPR)
1106 return false;
1107
1108 /* Make sure the conditional is arg[01] OP y. */
1109 if (gimple_cond_lhs (cond) != rhs)
1110 return false;
1111
1112 if (FLOAT_TYPE_P (TREE_TYPE (gimple_cond_rhs (cond)))
1113 ? real_zerop (gimple_cond_rhs (cond))
1114 : integer_zerop (gimple_cond_rhs (cond)))
1115 ;
1116 else
1117 return false;
1118
1119 /* We need to know which is the true edge and which is the false
1120 edge so that we know if have abs or negative abs. */
1121 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
1122
1123 /* For GT_EXPR/GE_EXPR, if the true edge goes to OTHER_BLOCK, then we
1124 will need to negate the result. Similarly for LT_EXPR/LE_EXPR if
1125 the false edge goes to OTHER_BLOCK. */
1126 if (cond_code == GT_EXPR || cond_code == GE_EXPR)
1127 e = true_edge;
1128 else
1129 e = false_edge;
1130
1131 if (e->dest == middle_bb)
1132 negate = true;
1133 else
1134 negate = false;
1135
1136 result = duplicate_ssa_name (result, NULL);
1137
1138 if (negate)
1139 {
1140 tree tmp = create_tmp_var (TREE_TYPE (result), NULL);
1141 add_referenced_var (tmp);
1142 lhs = make_ssa_name (tmp, NULL);
1143 }
1144 else
1145 lhs = result;
1146
1147 /* Build the modify expression with abs expression. */
1148 new_stmt = gimple_build_assign_with_ops (ABS_EXPR, lhs, rhs, NULL);
1149
1150 gsi = gsi_last_bb (cond_bb);
1151 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
1152
1153 if (negate)
1154 {
1155 /* Get the right GSI. We want to insert after the recently
1156 added ABS_EXPR statement (which we know is the first statement
1157 in the block. */
1158 new_stmt = gimple_build_assign_with_ops (NEGATE_EXPR, result, lhs, NULL);
1159
1160 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1161 }
1162
1163 replace_phi_edge_with_variable (cond_bb, e1, phi, result);
1164
1165 /* Note that we optimized this PHI. */
1166 return true;
1167 }
1168
1169 /* Auxiliary functions to determine the set of memory accesses which
1170 can't trap because they are preceded by accesses to the same memory
1171 portion. We do that for MEM_REFs, so we only need to track
1172 the SSA_NAME of the pointer indirectly referenced. The algorithm
1173 simply is a walk over all instructions in dominator order. When
1174 we see an MEM_REF we determine if we've already seen a same
1175 ref anywhere up to the root of the dominator tree. If we do the
1176 current access can't trap. If we don't see any dominating access
1177 the current access might trap, but might also make later accesses
1178 non-trapping, so we remember it. We need to be careful with loads
1179 or stores, for instance a load might not trap, while a store would,
1180 so if we see a dominating read access this doesn't mean that a later
1181 write access would not trap. Hence we also need to differentiate the
1182 type of access(es) seen.
1183
1184 ??? We currently are very conservative and assume that a load might
1185 trap even if a store doesn't (write-only memory). This probably is
1186 overly conservative. */
1187
1188 /* A hash-table of SSA_NAMEs, and in which basic block an MEM_REF
1189 through it was seen, which would constitute a no-trap region for
1190 same accesses. */
1191 struct name_to_bb
1192 {
1193 unsigned int ssa_name_ver;
1194 bool store;
1195 HOST_WIDE_INT offset, size;
1196 basic_block bb;
1197 };
1198
1199 /* The hash table for remembering what we've seen. */
1200 static htab_t seen_ssa_names;
1201
1202 /* The set of MEM_REFs which can't trap. */
1203 static struct pointer_set_t *nontrap_set;
1204
1205 /* The hash function. */
1206 static hashval_t
1207 name_to_bb_hash (const void *p)
1208 {
1209 const struct name_to_bb *n = (const struct name_to_bb *) p;
1210 return n->ssa_name_ver ^ (((hashval_t) n->store) << 31)
1211 ^ (n->offset << 6) ^ (n->size << 3);
1212 }
1213
1214 /* The equality function of *P1 and *P2. */
1215 static int
1216 name_to_bb_eq (const void *p1, const void *p2)
1217 {
1218 const struct name_to_bb *n1 = (const struct name_to_bb *)p1;
1219 const struct name_to_bb *n2 = (const struct name_to_bb *)p2;
1220
1221 return n1->ssa_name_ver == n2->ssa_name_ver
1222 && n1->store == n2->store
1223 && n1->offset == n2->offset
1224 && n1->size == n2->size;
1225 }
1226
1227 /* We see the expression EXP in basic block BB. If it's an interesting
1228 expression (an MEM_REF through an SSA_NAME) possibly insert the
1229 expression into the set NONTRAP or the hash table of seen expressions.
1230 STORE is true if this expression is on the LHS, otherwise it's on
1231 the RHS. */
1232 static void
1233 add_or_mark_expr (basic_block bb, tree exp,
1234 struct pointer_set_t *nontrap, bool store)
1235 {
1236 HOST_WIDE_INT size;
1237
1238 if (TREE_CODE (exp) == MEM_REF
1239 && TREE_CODE (TREE_OPERAND (exp, 0)) == SSA_NAME
1240 && host_integerp (TREE_OPERAND (exp, 1), 0)
1241 && (size = int_size_in_bytes (TREE_TYPE (exp))) > 0)
1242 {
1243 tree name = TREE_OPERAND (exp, 0);
1244 struct name_to_bb map;
1245 void **slot;
1246 struct name_to_bb *n2bb;
1247 basic_block found_bb = 0;
1248
1249 /* Try to find the last seen MEM_REF through the same
1250 SSA_NAME, which can trap. */
1251 map.ssa_name_ver = SSA_NAME_VERSION (name);
1252 map.bb = 0;
1253 map.store = store;
1254 map.offset = tree_low_cst (TREE_OPERAND (exp, 1), 0);
1255 map.size = size;
1256
1257 slot = htab_find_slot (seen_ssa_names, &map, INSERT);
1258 n2bb = (struct name_to_bb *) *slot;
1259 if (n2bb)
1260 found_bb = n2bb->bb;
1261
1262 /* If we've found a trapping MEM_REF, _and_ it dominates EXP
1263 (it's in a basic block on the path from us to the dominator root)
1264 then we can't trap. */
1265 if (found_bb && found_bb->aux == (void *)1)
1266 {
1267 pointer_set_insert (nontrap, exp);
1268 }
1269 else
1270 {
1271 /* EXP might trap, so insert it into the hash table. */
1272 if (n2bb)
1273 {
1274 n2bb->bb = bb;
1275 }
1276 else
1277 {
1278 n2bb = XNEW (struct name_to_bb);
1279 n2bb->ssa_name_ver = SSA_NAME_VERSION (name);
1280 n2bb->bb = bb;
1281 n2bb->store = store;
1282 n2bb->offset = map.offset;
1283 n2bb->size = size;
1284 *slot = n2bb;
1285 }
1286 }
1287 }
1288 }
1289
1290 /* Called by walk_dominator_tree, when entering the block BB. */
1291 static void
1292 nt_init_block (struct dom_walk_data *data ATTRIBUTE_UNUSED, basic_block bb)
1293 {
1294 gimple_stmt_iterator gsi;
1295 /* Mark this BB as being on the path to dominator root. */
1296 bb->aux = (void*)1;
1297
1298 /* And walk the statements in order. */
1299 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1300 {
1301 gimple stmt = gsi_stmt (gsi);
1302
1303 if (gimple_assign_single_p (stmt))
1304 {
1305 add_or_mark_expr (bb, gimple_assign_lhs (stmt), nontrap_set, true);
1306 add_or_mark_expr (bb, gimple_assign_rhs1 (stmt), nontrap_set, false);
1307 }
1308 }
1309 }
1310
1311 /* Called by walk_dominator_tree, when basic block BB is exited. */
1312 static void
1313 nt_fini_block (struct dom_walk_data *data ATTRIBUTE_UNUSED, basic_block bb)
1314 {
1315 /* This BB isn't on the path to dominator root anymore. */
1316 bb->aux = NULL;
1317 }
1318
1319 /* This is the entry point of gathering non trapping memory accesses.
1320 It will do a dominator walk over the whole function, and it will
1321 make use of the bb->aux pointers. It returns a set of trees
1322 (the MEM_REFs itself) which can't trap. */
1323 static struct pointer_set_t *
1324 get_non_trapping (void)
1325 {
1326 struct pointer_set_t *nontrap;
1327 struct dom_walk_data walk_data;
1328
1329 nontrap = pointer_set_create ();
1330 seen_ssa_names = htab_create (128, name_to_bb_hash, name_to_bb_eq,
1331 free);
1332 /* We're going to do a dominator walk, so ensure that we have
1333 dominance information. */
1334 calculate_dominance_info (CDI_DOMINATORS);
1335
1336 /* Setup callbacks for the generic dominator tree walker. */
1337 nontrap_set = nontrap;
1338 walk_data.dom_direction = CDI_DOMINATORS;
1339 walk_data.initialize_block_local_data = NULL;
1340 walk_data.before_dom_children = nt_init_block;
1341 walk_data.after_dom_children = nt_fini_block;
1342 walk_data.global_data = NULL;
1343 walk_data.block_local_data_size = 0;
1344
1345 init_walk_dominator_tree (&walk_data);
1346 walk_dominator_tree (&walk_data, ENTRY_BLOCK_PTR);
1347 fini_walk_dominator_tree (&walk_data);
1348 htab_delete (seen_ssa_names);
1349
1350 return nontrap;
1351 }
1352
1353 /* Do the main work of conditional store replacement. We already know
1354 that the recognized pattern looks like so:
1355
1356 split:
1357 if (cond) goto MIDDLE_BB; else goto JOIN_BB (edge E1)
1358 MIDDLE_BB:
1359 something
1360 fallthrough (edge E0)
1361 JOIN_BB:
1362 some more
1363
1364 We check that MIDDLE_BB contains only one store, that that store
1365 doesn't trap (not via NOTRAP, but via checking if an access to the same
1366 memory location dominates us) and that the store has a "simple" RHS. */
1367
1368 static bool
1369 cond_store_replacement (basic_block middle_bb, basic_block join_bb,
1370 edge e0, edge e1, struct pointer_set_t *nontrap)
1371 {
1372 gimple assign = last_and_only_stmt (middle_bb);
1373 tree lhs, rhs, name;
1374 gimple newphi, new_stmt;
1375 gimple_stmt_iterator gsi;
1376 source_location locus;
1377
1378 /* Check if middle_bb contains of only one store. */
1379 if (!assign
1380 || !gimple_assign_single_p (assign))
1381 return false;
1382
1383 locus = gimple_location (assign);
1384 lhs = gimple_assign_lhs (assign);
1385 rhs = gimple_assign_rhs1 (assign);
1386 if (TREE_CODE (lhs) != MEM_REF
1387 || TREE_CODE (TREE_OPERAND (lhs, 0)) != SSA_NAME
1388 || !is_gimple_reg_type (TREE_TYPE (lhs)))
1389 return false;
1390
1391 /* Prove that we can move the store down. We could also check
1392 TREE_THIS_NOTRAP here, but in that case we also could move stores,
1393 whose value is not available readily, which we want to avoid. */
1394 if (!pointer_set_contains (nontrap, lhs))
1395 return false;
1396
1397 /* Now we've checked the constraints, so do the transformation:
1398 1) Remove the single store. */
1399 gsi = gsi_for_stmt (assign);
1400 unlink_stmt_vdef (assign);
1401 gsi_remove (&gsi, true);
1402 release_defs (assign);
1403
1404 /* 2) Create a temporary where we can store the old content
1405 of the memory touched by the store, if we need to. */
1406 if (!condstoretemp || TREE_TYPE (lhs) != TREE_TYPE (condstoretemp))
1407 condstoretemp = create_tmp_reg (TREE_TYPE (lhs), "cstore");
1408 add_referenced_var (condstoretemp);
1409
1410 /* 3) Insert a load from the memory of the store to the temporary
1411 on the edge which did not contain the store. */
1412 lhs = unshare_expr (lhs);
1413 new_stmt = gimple_build_assign (condstoretemp, lhs);
1414 name = make_ssa_name (condstoretemp, new_stmt);
1415 gimple_assign_set_lhs (new_stmt, name);
1416 gimple_set_location (new_stmt, locus);
1417 gsi_insert_on_edge (e1, new_stmt);
1418
1419 /* 4) Create a PHI node at the join block, with one argument
1420 holding the old RHS, and the other holding the temporary
1421 where we stored the old memory contents. */
1422 newphi = create_phi_node (condstoretemp, join_bb);
1423 add_phi_arg (newphi, rhs, e0, locus);
1424 add_phi_arg (newphi, name, e1, locus);
1425
1426 lhs = unshare_expr (lhs);
1427 new_stmt = gimple_build_assign (lhs, PHI_RESULT (newphi));
1428
1429 /* 5) Insert that PHI node. */
1430 gsi = gsi_after_labels (join_bb);
1431 if (gsi_end_p (gsi))
1432 {
1433 gsi = gsi_last_bb (join_bb);
1434 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1435 }
1436 else
1437 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
1438
1439 return true;
1440 }
1441
1442 /* Do the main work of conditional store replacement. */
1443
1444 static bool
1445 cond_if_else_store_replacement_1 (basic_block then_bb, basic_block else_bb,
1446 basic_block join_bb, gimple then_assign,
1447 gimple else_assign)
1448 {
1449 tree lhs_base, lhs, then_rhs, else_rhs;
1450 source_location then_locus, else_locus;
1451 gimple_stmt_iterator gsi;
1452 gimple newphi, new_stmt;
1453
1454 if (then_assign == NULL
1455 || !gimple_assign_single_p (then_assign)
1456 || gimple_clobber_p (then_assign)
1457 || else_assign == NULL
1458 || !gimple_assign_single_p (else_assign)
1459 || gimple_clobber_p (else_assign))
1460 return false;
1461
1462 lhs = gimple_assign_lhs (then_assign);
1463 if (!is_gimple_reg_type (TREE_TYPE (lhs))
1464 || !operand_equal_p (lhs, gimple_assign_lhs (else_assign), 0))
1465 return false;
1466
1467 lhs_base = get_base_address (lhs);
1468 if (lhs_base == NULL_TREE
1469 || (!DECL_P (lhs_base) && TREE_CODE (lhs_base) != MEM_REF))
1470 return false;
1471
1472 then_rhs = gimple_assign_rhs1 (then_assign);
1473 else_rhs = gimple_assign_rhs1 (else_assign);
1474 then_locus = gimple_location (then_assign);
1475 else_locus = gimple_location (else_assign);
1476
1477 /* Now we've checked the constraints, so do the transformation:
1478 1) Remove the stores. */
1479 gsi = gsi_for_stmt (then_assign);
1480 unlink_stmt_vdef (then_assign);
1481 gsi_remove (&gsi, true);
1482 release_defs (then_assign);
1483
1484 gsi = gsi_for_stmt (else_assign);
1485 unlink_stmt_vdef (else_assign);
1486 gsi_remove (&gsi, true);
1487 release_defs (else_assign);
1488
1489 /* 2) Create a temporary where we can store the old content
1490 of the memory touched by the store, if we need to. */
1491 if (!condstoretemp || TREE_TYPE (lhs) != TREE_TYPE (condstoretemp))
1492 condstoretemp = create_tmp_reg (TREE_TYPE (lhs), "cstore");
1493 add_referenced_var (condstoretemp);
1494
1495 /* 3) Create a PHI node at the join block, with one argument
1496 holding the old RHS, and the other holding the temporary
1497 where we stored the old memory contents. */
1498 newphi = create_phi_node (condstoretemp, join_bb);
1499 add_phi_arg (newphi, then_rhs, EDGE_SUCC (then_bb, 0), then_locus);
1500 add_phi_arg (newphi, else_rhs, EDGE_SUCC (else_bb, 0), else_locus);
1501
1502 new_stmt = gimple_build_assign (lhs, PHI_RESULT (newphi));
1503
1504 /* 4) Insert that PHI node. */
1505 gsi = gsi_after_labels (join_bb);
1506 if (gsi_end_p (gsi))
1507 {
1508 gsi = gsi_last_bb (join_bb);
1509 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1510 }
1511 else
1512 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
1513
1514 return true;
1515 }
1516
1517 /* Conditional store replacement. We already know
1518 that the recognized pattern looks like so:
1519
1520 split:
1521 if (cond) goto THEN_BB; else goto ELSE_BB (edge E1)
1522 THEN_BB:
1523 ...
1524 X = Y;
1525 ...
1526 goto JOIN_BB;
1527 ELSE_BB:
1528 ...
1529 X = Z;
1530 ...
1531 fallthrough (edge E0)
1532 JOIN_BB:
1533 some more
1534
1535 We check that it is safe to sink the store to JOIN_BB by verifying that
1536 there are no read-after-write or write-after-write dependencies in
1537 THEN_BB and ELSE_BB. */
1538
1539 static bool
1540 cond_if_else_store_replacement (basic_block then_bb, basic_block else_bb,
1541 basic_block join_bb)
1542 {
1543 gimple then_assign = last_and_only_stmt (then_bb);
1544 gimple else_assign = last_and_only_stmt (else_bb);
1545 VEC (data_reference_p, heap) *then_datarefs, *else_datarefs;
1546 VEC (ddr_p, heap) *then_ddrs, *else_ddrs;
1547 gimple then_store, else_store;
1548 bool found, ok = false, res;
1549 struct data_dependence_relation *ddr;
1550 data_reference_p then_dr, else_dr;
1551 int i, j;
1552 tree then_lhs, else_lhs;
1553 VEC (gimple, heap) *then_stores, *else_stores;
1554 basic_block blocks[3];
1555
1556 if (MAX_STORES_TO_SINK == 0)
1557 return false;
1558
1559 /* Handle the case with single statement in THEN_BB and ELSE_BB. */
1560 if (then_assign && else_assign)
1561 return cond_if_else_store_replacement_1 (then_bb, else_bb, join_bb,
1562 then_assign, else_assign);
1563
1564 /* Find data references. */
1565 then_datarefs = VEC_alloc (data_reference_p, heap, 1);
1566 else_datarefs = VEC_alloc (data_reference_p, heap, 1);
1567 if ((find_data_references_in_bb (NULL, then_bb, &then_datarefs)
1568 == chrec_dont_know)
1569 || !VEC_length (data_reference_p, then_datarefs)
1570 || (find_data_references_in_bb (NULL, else_bb, &else_datarefs)
1571 == chrec_dont_know)
1572 || !VEC_length (data_reference_p, else_datarefs))
1573 {
1574 free_data_refs (then_datarefs);
1575 free_data_refs (else_datarefs);
1576 return false;
1577 }
1578
1579 /* Find pairs of stores with equal LHS. */
1580 then_stores = VEC_alloc (gimple, heap, 1);
1581 else_stores = VEC_alloc (gimple, heap, 1);
1582 FOR_EACH_VEC_ELT (data_reference_p, then_datarefs, i, then_dr)
1583 {
1584 if (DR_IS_READ (then_dr))
1585 continue;
1586
1587 then_store = DR_STMT (then_dr);
1588 then_lhs = gimple_get_lhs (then_store);
1589 found = false;
1590
1591 FOR_EACH_VEC_ELT (data_reference_p, else_datarefs, j, else_dr)
1592 {
1593 if (DR_IS_READ (else_dr))
1594 continue;
1595
1596 else_store = DR_STMT (else_dr);
1597 else_lhs = gimple_get_lhs (else_store);
1598
1599 if (operand_equal_p (then_lhs, else_lhs, 0))
1600 {
1601 found = true;
1602 break;
1603 }
1604 }
1605
1606 if (!found)
1607 continue;
1608
1609 VEC_safe_push (gimple, heap, then_stores, then_store);
1610 VEC_safe_push (gimple, heap, else_stores, else_store);
1611 }
1612
1613 /* No pairs of stores found. */
1614 if (!VEC_length (gimple, then_stores)
1615 || VEC_length (gimple, then_stores) > (unsigned) MAX_STORES_TO_SINK)
1616 {
1617 free_data_refs (then_datarefs);
1618 free_data_refs (else_datarefs);
1619 VEC_free (gimple, heap, then_stores);
1620 VEC_free (gimple, heap, else_stores);
1621 return false;
1622 }
1623
1624 /* Compute and check data dependencies in both basic blocks. */
1625 then_ddrs = VEC_alloc (ddr_p, heap, 1);
1626 else_ddrs = VEC_alloc (ddr_p, heap, 1);
1627 if (!compute_all_dependences (then_datarefs, &then_ddrs, NULL, false)
1628 || !compute_all_dependences (else_datarefs, &else_ddrs, NULL, false))
1629 {
1630 free_dependence_relations (then_ddrs);
1631 free_dependence_relations (else_ddrs);
1632 free_data_refs (then_datarefs);
1633 free_data_refs (else_datarefs);
1634 VEC_free (gimple, heap, then_stores);
1635 VEC_free (gimple, heap, else_stores);
1636 return false;
1637 }
1638 blocks[0] = then_bb;
1639 blocks[1] = else_bb;
1640 blocks[2] = join_bb;
1641 renumber_gimple_stmt_uids_in_blocks (blocks, 3);
1642
1643 /* Check that there are no read-after-write or write-after-write dependencies
1644 in THEN_BB. */
1645 FOR_EACH_VEC_ELT (ddr_p, then_ddrs, i, ddr)
1646 {
1647 struct data_reference *dra = DDR_A (ddr);
1648 struct data_reference *drb = DDR_B (ddr);
1649
1650 if (DDR_ARE_DEPENDENT (ddr) != chrec_known
1651 && ((DR_IS_READ (dra) && DR_IS_WRITE (drb)
1652 && gimple_uid (DR_STMT (dra)) > gimple_uid (DR_STMT (drb)))
1653 || (DR_IS_READ (drb) && DR_IS_WRITE (dra)
1654 && gimple_uid (DR_STMT (drb)) > gimple_uid (DR_STMT (dra)))
1655 || (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))))
1656 {
1657 free_dependence_relations (then_ddrs);
1658 free_dependence_relations (else_ddrs);
1659 free_data_refs (then_datarefs);
1660 free_data_refs (else_datarefs);
1661 VEC_free (gimple, heap, then_stores);
1662 VEC_free (gimple, heap, else_stores);
1663 return false;
1664 }
1665 }
1666
1667 /* Check that there are no read-after-write or write-after-write dependencies
1668 in ELSE_BB. */
1669 FOR_EACH_VEC_ELT (ddr_p, else_ddrs, i, ddr)
1670 {
1671 struct data_reference *dra = DDR_A (ddr);
1672 struct data_reference *drb = DDR_B (ddr);
1673
1674 if (DDR_ARE_DEPENDENT (ddr) != chrec_known
1675 && ((DR_IS_READ (dra) && DR_IS_WRITE (drb)
1676 && gimple_uid (DR_STMT (dra)) > gimple_uid (DR_STMT (drb)))
1677 || (DR_IS_READ (drb) && DR_IS_WRITE (dra)
1678 && gimple_uid (DR_STMT (drb)) > gimple_uid (DR_STMT (dra)))
1679 || (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))))
1680 {
1681 free_dependence_relations (then_ddrs);
1682 free_dependence_relations (else_ddrs);
1683 free_data_refs (then_datarefs);
1684 free_data_refs (else_datarefs);
1685 VEC_free (gimple, heap, then_stores);
1686 VEC_free (gimple, heap, else_stores);
1687 return false;
1688 }
1689 }
1690
1691 /* Sink stores with same LHS. */
1692 FOR_EACH_VEC_ELT (gimple, then_stores, i, then_store)
1693 {
1694 else_store = VEC_index (gimple, else_stores, i);
1695 res = cond_if_else_store_replacement_1 (then_bb, else_bb, join_bb,
1696 then_store, else_store);
1697 ok = ok || res;
1698 }
1699
1700 free_dependence_relations (then_ddrs);
1701 free_dependence_relations (else_ddrs);
1702 free_data_refs (then_datarefs);
1703 free_data_refs (else_datarefs);
1704 VEC_free (gimple, heap, then_stores);
1705 VEC_free (gimple, heap, else_stores);
1706
1707 return ok;
1708 }
1709
1710 /* Always do these optimizations if we have SSA
1711 trees to work on. */
1712 static bool
1713 gate_phiopt (void)
1714 {
1715 return 1;
1716 }
1717
1718 struct gimple_opt_pass pass_phiopt =
1719 {
1720 {
1721 GIMPLE_PASS,
1722 "phiopt", /* name */
1723 gate_phiopt, /* gate */
1724 tree_ssa_phiopt, /* execute */
1725 NULL, /* sub */
1726 NULL, /* next */
1727 0, /* static_pass_number */
1728 TV_TREE_PHIOPT, /* tv_id */
1729 PROP_cfg | PROP_ssa, /* properties_required */
1730 0, /* properties_provided */
1731 0, /* properties_destroyed */
1732 0, /* todo_flags_start */
1733 TODO_ggc_collect
1734 | TODO_verify_ssa
1735 | TODO_verify_flow
1736 | TODO_verify_stmts /* todo_flags_finish */
1737 }
1738 };
1739
1740 static bool
1741 gate_cselim (void)
1742 {
1743 return flag_tree_cselim;
1744 }
1745
1746 struct gimple_opt_pass pass_cselim =
1747 {
1748 {
1749 GIMPLE_PASS,
1750 "cselim", /* name */
1751 gate_cselim, /* gate */
1752 tree_ssa_cs_elim, /* execute */
1753 NULL, /* sub */
1754 NULL, /* next */
1755 0, /* static_pass_number */
1756 TV_TREE_PHIOPT, /* tv_id */
1757 PROP_cfg | PROP_ssa, /* properties_required */
1758 0, /* properties_provided */
1759 0, /* properties_destroyed */
1760 0, /* todo_flags_start */
1761 TODO_ggc_collect
1762 | TODO_verify_ssa
1763 | TODO_verify_flow
1764 | TODO_verify_stmts /* todo_flags_finish */
1765 }
1766 };