re PR tree-optimization/67769 (VRP pass does wrong optimization)
[gcc.git] / gcc / tree-ssa-phiopt.c
1 /* Optimization of PHI nodes by converting them into straightline code.
2 Copyright (C) 2004-2015 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "cfghooks.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "rtl.h"
28 #include "ssa.h"
29 #include "alias.h"
30 #include "fold-const.h"
31 #include "stor-layout.h"
32 #include "flags.h"
33 #include "tm_p.h"
34 #include "cfganal.h"
35 #include "internal-fn.h"
36 #include "gimplify.h"
37 #include "gimple-iterator.h"
38 #include "gimplify-me.h"
39 #include "tree-cfg.h"
40 #include "insn-config.h"
41 #include "tree-dfa.h"
42 #include "tree-pass.h"
43 #include "langhooks.h"
44 #include "domwalk.h"
45 #include "cfgloop.h"
46 #include "tree-data-ref.h"
47 #include "gimple-pretty-print.h"
48 #include "insn-codes.h"
49 #include "optabs-tree.h"
50 #include "tree-scalar-evolution.h"
51 #include "tree-inline.h"
52 #include "params.h"
53
54 static unsigned int tree_ssa_phiopt_worker (bool, bool);
55 static bool conditional_replacement (basic_block, basic_block,
56 edge, edge, gphi *, tree, tree);
57 static bool factor_out_conditional_conversion (edge, edge, gphi *, tree, tree);
58 static int value_replacement (basic_block, basic_block,
59 edge, edge, gimple *, tree, tree);
60 static bool minmax_replacement (basic_block, basic_block,
61 edge, edge, gimple *, tree, tree);
62 static bool abs_replacement (basic_block, basic_block,
63 edge, edge, gimple *, tree, tree);
64 static bool cond_store_replacement (basic_block, basic_block, edge, edge,
65 hash_set<tree> *);
66 static bool cond_if_else_store_replacement (basic_block, basic_block, basic_block);
67 static hash_set<tree> * get_non_trapping ();
68 static void replace_phi_edge_with_variable (basic_block, edge, gimple *, tree);
69 static void hoist_adjacent_loads (basic_block, basic_block,
70 basic_block, basic_block);
71 static bool gate_hoist_loads (void);
72
73 /* This pass tries to transform conditional stores into unconditional
74 ones, enabling further simplifications with the simpler then and else
75 blocks. In particular it replaces this:
76
77 bb0:
78 if (cond) goto bb2; else goto bb1;
79 bb1:
80 *p = RHS;
81 bb2:
82
83 with
84
85 bb0:
86 if (cond) goto bb1; else goto bb2;
87 bb1:
88 condtmp' = *p;
89 bb2:
90 condtmp = PHI <RHS, condtmp'>
91 *p = condtmp;
92
93 This transformation can only be done under several constraints,
94 documented below. It also replaces:
95
96 bb0:
97 if (cond) goto bb2; else goto bb1;
98 bb1:
99 *p = RHS1;
100 goto bb3;
101 bb2:
102 *p = RHS2;
103 bb3:
104
105 with
106
107 bb0:
108 if (cond) goto bb3; else goto bb1;
109 bb1:
110 bb3:
111 condtmp = PHI <RHS1, RHS2>
112 *p = condtmp; */
113
114 static unsigned int
115 tree_ssa_cs_elim (void)
116 {
117 unsigned todo;
118 /* ??? We are not interested in loop related info, but the following
119 will create it, ICEing as we didn't init loops with pre-headers.
120 An interfacing issue of find_data_references_in_bb. */
121 loop_optimizer_init (LOOPS_NORMAL);
122 scev_initialize ();
123 todo = tree_ssa_phiopt_worker (true, false);
124 scev_finalize ();
125 loop_optimizer_finalize ();
126 return todo;
127 }
128
129 /* Return the singleton PHI in the SEQ of PHIs for edges E0 and E1. */
130
131 static gphi *
132 single_non_singleton_phi_for_edges (gimple_seq seq, edge e0, edge e1)
133 {
134 gimple_stmt_iterator i;
135 gphi *phi = NULL;
136 if (gimple_seq_singleton_p (seq))
137 return as_a <gphi *> (gsi_stmt (gsi_start (seq)));
138 for (i = gsi_start (seq); !gsi_end_p (i); gsi_next (&i))
139 {
140 gphi *p = as_a <gphi *> (gsi_stmt (i));
141 /* If the PHI arguments are equal then we can skip this PHI. */
142 if (operand_equal_for_phi_arg_p (gimple_phi_arg_def (p, e0->dest_idx),
143 gimple_phi_arg_def (p, e1->dest_idx)))
144 continue;
145
146 /* If we already have a PHI that has the two edge arguments are
147 different, then return it is not a singleton for these PHIs. */
148 if (phi)
149 return NULL;
150
151 phi = p;
152 }
153 return phi;
154 }
155
156 /* The core routine of conditional store replacement and normal
157 phi optimizations. Both share much of the infrastructure in how
158 to match applicable basic block patterns. DO_STORE_ELIM is true
159 when we want to do conditional store replacement, false otherwise.
160 DO_HOIST_LOADS is true when we want to hoist adjacent loads out
161 of diamond control flow patterns, false otherwise. */
162 static unsigned int
163 tree_ssa_phiopt_worker (bool do_store_elim, bool do_hoist_loads)
164 {
165 basic_block bb;
166 basic_block *bb_order;
167 unsigned n, i;
168 bool cfgchanged = false;
169 hash_set<tree> *nontrap = 0;
170
171 if (do_store_elim)
172 /* Calculate the set of non-trapping memory accesses. */
173 nontrap = get_non_trapping ();
174
175 /* Search every basic block for COND_EXPR we may be able to optimize.
176
177 We walk the blocks in order that guarantees that a block with
178 a single predecessor is processed before the predecessor.
179 This ensures that we collapse inner ifs before visiting the
180 outer ones, and also that we do not try to visit a removed
181 block. */
182 bb_order = single_pred_before_succ_order ();
183 n = n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS;
184
185 for (i = 0; i < n; i++)
186 {
187 gimple *cond_stmt;
188 gphi *phi;
189 basic_block bb1, bb2;
190 edge e1, e2;
191 tree arg0, arg1;
192
193 bb = bb_order[i];
194
195 cond_stmt = last_stmt (bb);
196 /* Check to see if the last statement is a GIMPLE_COND. */
197 if (!cond_stmt
198 || gimple_code (cond_stmt) != GIMPLE_COND)
199 continue;
200
201 e1 = EDGE_SUCC (bb, 0);
202 bb1 = e1->dest;
203 e2 = EDGE_SUCC (bb, 1);
204 bb2 = e2->dest;
205
206 /* We cannot do the optimization on abnormal edges. */
207 if ((e1->flags & EDGE_ABNORMAL) != 0
208 || (e2->flags & EDGE_ABNORMAL) != 0)
209 continue;
210
211 /* If either bb1's succ or bb2 or bb2's succ is non NULL. */
212 if (EDGE_COUNT (bb1->succs) == 0
213 || bb2 == NULL
214 || EDGE_COUNT (bb2->succs) == 0)
215 continue;
216
217 /* Find the bb which is the fall through to the other. */
218 if (EDGE_SUCC (bb1, 0)->dest == bb2)
219 ;
220 else if (EDGE_SUCC (bb2, 0)->dest == bb1)
221 {
222 std::swap (bb1, bb2);
223 std::swap (e1, e2);
224 }
225 else if (do_store_elim
226 && EDGE_SUCC (bb1, 0)->dest == EDGE_SUCC (bb2, 0)->dest)
227 {
228 basic_block bb3 = EDGE_SUCC (bb1, 0)->dest;
229
230 if (!single_succ_p (bb1)
231 || (EDGE_SUCC (bb1, 0)->flags & EDGE_FALLTHRU) == 0
232 || !single_succ_p (bb2)
233 || (EDGE_SUCC (bb2, 0)->flags & EDGE_FALLTHRU) == 0
234 || EDGE_COUNT (bb3->preds) != 2)
235 continue;
236 if (cond_if_else_store_replacement (bb1, bb2, bb3))
237 cfgchanged = true;
238 continue;
239 }
240 else if (do_hoist_loads
241 && EDGE_SUCC (bb1, 0)->dest == EDGE_SUCC (bb2, 0)->dest)
242 {
243 basic_block bb3 = EDGE_SUCC (bb1, 0)->dest;
244
245 if (!FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (cond_stmt)))
246 && single_succ_p (bb1)
247 && single_succ_p (bb2)
248 && single_pred_p (bb1)
249 && single_pred_p (bb2)
250 && EDGE_COUNT (bb->succs) == 2
251 && EDGE_COUNT (bb3->preds) == 2
252 /* If one edge or the other is dominant, a conditional move
253 is likely to perform worse than the well-predicted branch. */
254 && !predictable_edge_p (EDGE_SUCC (bb, 0))
255 && !predictable_edge_p (EDGE_SUCC (bb, 1)))
256 hoist_adjacent_loads (bb, bb1, bb2, bb3);
257 continue;
258 }
259 else
260 continue;
261
262 e1 = EDGE_SUCC (bb1, 0);
263
264 /* Make sure that bb1 is just a fall through. */
265 if (!single_succ_p (bb1)
266 || (e1->flags & EDGE_FALLTHRU) == 0)
267 continue;
268
269 /* Also make sure that bb1 only have one predecessor and that it
270 is bb. */
271 if (!single_pred_p (bb1)
272 || single_pred (bb1) != bb)
273 continue;
274
275 if (do_store_elim)
276 {
277 /* bb1 is the middle block, bb2 the join block, bb the split block,
278 e1 the fallthrough edge from bb1 to bb2. We can't do the
279 optimization if the join block has more than two predecessors. */
280 if (EDGE_COUNT (bb2->preds) > 2)
281 continue;
282 if (cond_store_replacement (bb1, bb2, e1, e2, nontrap))
283 cfgchanged = true;
284 }
285 else
286 {
287 gimple_seq phis = phi_nodes (bb2);
288 gimple_stmt_iterator gsi;
289 bool candorest = true;
290
291 /* Value replacement can work with more than one PHI
292 so try that first. */
293 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
294 {
295 phi = as_a <gphi *> (gsi_stmt (gsi));
296 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
297 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
298 if (value_replacement (bb, bb1, e1, e2, phi, arg0, arg1) == 2)
299 {
300 candorest = false;
301 cfgchanged = true;
302 break;
303 }
304 }
305
306 if (!candorest)
307 continue;
308
309 phi = single_non_singleton_phi_for_edges (phis, e1, e2);
310 if (!phi)
311 continue;
312
313 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
314 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
315
316 /* Something is wrong if we cannot find the arguments in the PHI
317 node. */
318 gcc_assert (arg0 != NULL && arg1 != NULL);
319
320 if (factor_out_conditional_conversion (e1, e2, phi, arg0, arg1))
321 {
322 /* factor_out_conditional_conversion may create a new PHI in
323 BB2 and eliminate an existing PHI in BB2. Recompute values
324 that may be affected by that change. */
325 phis = phi_nodes (bb2);
326 phi = single_non_singleton_phi_for_edges (phis, e1, e2);
327 gcc_assert (phi);
328 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
329 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
330 gcc_assert (arg0 != NULL && arg1 != NULL);
331 }
332
333 /* Do the replacement of conditional if it can be done. */
334 if (conditional_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
335 cfgchanged = true;
336 else if (abs_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
337 cfgchanged = true;
338 else if (minmax_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
339 cfgchanged = true;
340 }
341 }
342
343 free (bb_order);
344
345 if (do_store_elim)
346 delete nontrap;
347 /* If the CFG has changed, we should cleanup the CFG. */
348 if (cfgchanged && do_store_elim)
349 {
350 /* In cond-store replacement we have added some loads on edges
351 and new VOPS (as we moved the store, and created a load). */
352 gsi_commit_edge_inserts ();
353 return TODO_cleanup_cfg | TODO_update_ssa_only_virtuals;
354 }
355 else if (cfgchanged)
356 return TODO_cleanup_cfg;
357 return 0;
358 }
359
360 /* Replace PHI node element whose edge is E in block BB with variable NEW.
361 Remove the edge from COND_BLOCK which does not lead to BB (COND_BLOCK
362 is known to have two edges, one of which must reach BB). */
363
364 static void
365 replace_phi_edge_with_variable (basic_block cond_block,
366 edge e, gimple *phi, tree new_tree)
367 {
368 basic_block bb = gimple_bb (phi);
369 basic_block block_to_remove;
370 gimple_stmt_iterator gsi;
371
372 /* Change the PHI argument to new. */
373 SET_USE (PHI_ARG_DEF_PTR (phi, e->dest_idx), new_tree);
374
375 /* Remove the empty basic block. */
376 if (EDGE_SUCC (cond_block, 0)->dest == bb)
377 {
378 EDGE_SUCC (cond_block, 0)->flags |= EDGE_FALLTHRU;
379 EDGE_SUCC (cond_block, 0)->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
380 EDGE_SUCC (cond_block, 0)->probability = REG_BR_PROB_BASE;
381 EDGE_SUCC (cond_block, 0)->count += EDGE_SUCC (cond_block, 1)->count;
382
383 block_to_remove = EDGE_SUCC (cond_block, 1)->dest;
384 }
385 else
386 {
387 EDGE_SUCC (cond_block, 1)->flags |= EDGE_FALLTHRU;
388 EDGE_SUCC (cond_block, 1)->flags
389 &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
390 EDGE_SUCC (cond_block, 1)->probability = REG_BR_PROB_BASE;
391 EDGE_SUCC (cond_block, 1)->count += EDGE_SUCC (cond_block, 0)->count;
392
393 block_to_remove = EDGE_SUCC (cond_block, 0)->dest;
394 }
395 delete_basic_block (block_to_remove);
396
397 /* Eliminate the COND_EXPR at the end of COND_BLOCK. */
398 gsi = gsi_last_bb (cond_block);
399 gsi_remove (&gsi, true);
400
401 if (dump_file && (dump_flags & TDF_DETAILS))
402 fprintf (dump_file,
403 "COND_EXPR in block %d and PHI in block %d converted to straightline code.\n",
404 cond_block->index,
405 bb->index);
406 }
407
408 /* PR66726: Factor conversion out of COND_EXPR. If the arguments of the PHI
409 stmt are CONVERT_STMT, factor out the conversion and perform the conversion
410 to the result of PHI stmt. */
411
412 static bool
413 factor_out_conditional_conversion (edge e0, edge e1, gphi *phi,
414 tree arg0, tree arg1)
415 {
416 gimple *arg0_def_stmt = NULL, *arg1_def_stmt = NULL, *new_stmt;
417 tree new_arg0 = NULL_TREE, new_arg1 = NULL_TREE;
418 tree temp, result;
419 gphi *newphi;
420 gimple_stmt_iterator gsi, gsi_for_def;
421 source_location locus = gimple_location (phi);
422 enum tree_code convert_code;
423
424 /* Handle only PHI statements with two arguments. TODO: If all
425 other arguments to PHI are INTEGER_CST or if their defining
426 statement have the same unary operation, we can handle more
427 than two arguments too. */
428 if (gimple_phi_num_args (phi) != 2)
429 return false;
430
431 /* First canonicalize to simplify tests. */
432 if (TREE_CODE (arg0) != SSA_NAME)
433 {
434 std::swap (arg0, arg1);
435 std::swap (e0, e1);
436 }
437
438 if (TREE_CODE (arg0) != SSA_NAME
439 || (TREE_CODE (arg1) != SSA_NAME
440 && TREE_CODE (arg1) != INTEGER_CST))
441 return false;
442
443 /* Check if arg0 is an SSA_NAME and the stmt which defines arg0 is
444 a conversion. */
445 arg0_def_stmt = SSA_NAME_DEF_STMT (arg0);
446 if (!is_gimple_assign (arg0_def_stmt)
447 || !gimple_assign_cast_p (arg0_def_stmt))
448 return false;
449
450 /* Use the RHS as new_arg0. */
451 convert_code = gimple_assign_rhs_code (arg0_def_stmt);
452 new_arg0 = gimple_assign_rhs1 (arg0_def_stmt);
453 if (convert_code == VIEW_CONVERT_EXPR)
454 new_arg0 = TREE_OPERAND (new_arg0, 0);
455
456 if (TREE_CODE (arg1) == SSA_NAME)
457 {
458 /* Check if arg1 is an SSA_NAME and the stmt which defines arg1
459 is a conversion. */
460 arg1_def_stmt = SSA_NAME_DEF_STMT (arg1);
461 if (!is_gimple_assign (arg1_def_stmt)
462 || gimple_assign_rhs_code (arg1_def_stmt) != convert_code)
463 return false;
464
465 /* Use the RHS as new_arg1. */
466 new_arg1 = gimple_assign_rhs1 (arg1_def_stmt);
467 if (convert_code == VIEW_CONVERT_EXPR)
468 new_arg1 = TREE_OPERAND (new_arg1, 0);
469 }
470 else
471 {
472 /* If arg1 is an INTEGER_CST, fold it to new type. */
473 if (INTEGRAL_TYPE_P (TREE_TYPE (new_arg0))
474 && int_fits_type_p (arg1, TREE_TYPE (new_arg0)))
475 {
476 if (gimple_assign_cast_p (arg0_def_stmt))
477 new_arg1 = fold_convert (TREE_TYPE (new_arg0), arg1);
478 else
479 return false;
480 }
481 else
482 return false;
483 }
484
485 /* If arg0/arg1 have > 1 use, then this transformation actually increases
486 the number of expressions evaluated at runtime. */
487 if (!has_single_use (arg0)
488 || (arg1_def_stmt && !has_single_use (arg1)))
489 return false;
490
491 /* If types of new_arg0 and new_arg1 are different bailout. */
492 if (!types_compatible_p (TREE_TYPE (new_arg0), TREE_TYPE (new_arg1)))
493 return false;
494
495 /* Create a new PHI stmt. */
496 result = PHI_RESULT (phi);
497 temp = make_ssa_name (TREE_TYPE (new_arg0), NULL);
498 newphi = create_phi_node (temp, gimple_bb (phi));
499
500 if (dump_file && (dump_flags & TDF_DETAILS))
501 {
502 fprintf (dump_file, "PHI ");
503 print_generic_expr (dump_file, gimple_phi_result (phi), 0);
504 fprintf (dump_file,
505 " changed to factor conversion out from COND_EXPR.\n");
506 fprintf (dump_file, "New stmt with CAST that defines ");
507 print_generic_expr (dump_file, result, 0);
508 fprintf (dump_file, ".\n");
509 }
510
511 /* Remove the old cast(s) that has single use. */
512 gsi_for_def = gsi_for_stmt (arg0_def_stmt);
513 gsi_remove (&gsi_for_def, true);
514 if (arg1_def_stmt)
515 {
516 gsi_for_def = gsi_for_stmt (arg1_def_stmt);
517 gsi_remove (&gsi_for_def, true);
518 }
519
520 add_phi_arg (newphi, new_arg0, e0, locus);
521 add_phi_arg (newphi, new_arg1, e1, locus);
522
523 /* Create the conversion stmt and insert it. */
524 if (convert_code == VIEW_CONVERT_EXPR)
525 temp = fold_build1 (VIEW_CONVERT_EXPR, TREE_TYPE (result), temp);
526 new_stmt = gimple_build_assign (result, convert_code, temp);
527 gsi = gsi_after_labels (gimple_bb (phi));
528 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
529
530 /* Remove he original PHI stmt. */
531 gsi = gsi_for_stmt (phi);
532 gsi_remove (&gsi, true);
533 return true;
534 }
535
536 /* The function conditional_replacement does the main work of doing the
537 conditional replacement. Return true if the replacement is done.
538 Otherwise return false.
539 BB is the basic block where the replacement is going to be done on. ARG0
540 is argument 0 from PHI. Likewise for ARG1. */
541
542 static bool
543 conditional_replacement (basic_block cond_bb, basic_block middle_bb,
544 edge e0, edge e1, gphi *phi,
545 tree arg0, tree arg1)
546 {
547 tree result;
548 gimple *stmt;
549 gassign *new_stmt;
550 tree cond;
551 gimple_stmt_iterator gsi;
552 edge true_edge, false_edge;
553 tree new_var, new_var2;
554 bool neg;
555
556 /* FIXME: Gimplification of complex type is too hard for now. */
557 /* We aren't prepared to handle vectors either (and it is a question
558 if it would be worthwhile anyway). */
559 if (!(INTEGRAL_TYPE_P (TREE_TYPE (arg0))
560 || POINTER_TYPE_P (TREE_TYPE (arg0)))
561 || !(INTEGRAL_TYPE_P (TREE_TYPE (arg1))
562 || POINTER_TYPE_P (TREE_TYPE (arg1))))
563 return false;
564
565 /* The PHI arguments have the constants 0 and 1, or 0 and -1, then
566 convert it to the conditional. */
567 if ((integer_zerop (arg0) && integer_onep (arg1))
568 || (integer_zerop (arg1) && integer_onep (arg0)))
569 neg = false;
570 else if ((integer_zerop (arg0) && integer_all_onesp (arg1))
571 || (integer_zerop (arg1) && integer_all_onesp (arg0)))
572 neg = true;
573 else
574 return false;
575
576 if (!empty_block_p (middle_bb))
577 return false;
578
579 /* At this point we know we have a GIMPLE_COND with two successors.
580 One successor is BB, the other successor is an empty block which
581 falls through into BB.
582
583 There is a single PHI node at the join point (BB) and its arguments
584 are constants (0, 1) or (0, -1).
585
586 So, given the condition COND, and the two PHI arguments, we can
587 rewrite this PHI into non-branching code:
588
589 dest = (COND) or dest = COND'
590
591 We use the condition as-is if the argument associated with the
592 true edge has the value one or the argument associated with the
593 false edge as the value zero. Note that those conditions are not
594 the same since only one of the outgoing edges from the GIMPLE_COND
595 will directly reach BB and thus be associated with an argument. */
596
597 stmt = last_stmt (cond_bb);
598 result = PHI_RESULT (phi);
599
600 /* To handle special cases like floating point comparison, it is easier and
601 less error-prone to build a tree and gimplify it on the fly though it is
602 less efficient. */
603 cond = fold_build2_loc (gimple_location (stmt),
604 gimple_cond_code (stmt), boolean_type_node,
605 gimple_cond_lhs (stmt), gimple_cond_rhs (stmt));
606
607 /* We need to know which is the true edge and which is the false
608 edge so that we know when to invert the condition below. */
609 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
610 if ((e0 == true_edge && integer_zerop (arg0))
611 || (e0 == false_edge && !integer_zerop (arg0))
612 || (e1 == true_edge && integer_zerop (arg1))
613 || (e1 == false_edge && !integer_zerop (arg1)))
614 cond = fold_build1_loc (gimple_location (stmt),
615 TRUTH_NOT_EXPR, TREE_TYPE (cond), cond);
616
617 if (neg)
618 {
619 cond = fold_convert_loc (gimple_location (stmt),
620 TREE_TYPE (result), cond);
621 cond = fold_build1_loc (gimple_location (stmt),
622 NEGATE_EXPR, TREE_TYPE (cond), cond);
623 }
624
625 /* Insert our new statements at the end of conditional block before the
626 COND_STMT. */
627 gsi = gsi_for_stmt (stmt);
628 new_var = force_gimple_operand_gsi (&gsi, cond, true, NULL, true,
629 GSI_SAME_STMT);
630
631 if (!useless_type_conversion_p (TREE_TYPE (result), TREE_TYPE (new_var)))
632 {
633 source_location locus_0, locus_1;
634
635 new_var2 = make_ssa_name (TREE_TYPE (result));
636 new_stmt = gimple_build_assign (new_var2, CONVERT_EXPR, new_var);
637 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
638 new_var = new_var2;
639
640 /* Set the locus to the first argument, unless is doesn't have one. */
641 locus_0 = gimple_phi_arg_location (phi, 0);
642 locus_1 = gimple_phi_arg_location (phi, 1);
643 if (locus_0 == UNKNOWN_LOCATION)
644 locus_0 = locus_1;
645 gimple_set_location (new_stmt, locus_0);
646 }
647
648 replace_phi_edge_with_variable (cond_bb, e1, phi, new_var);
649 reset_flow_sensitive_info_in_bb (cond_bb);
650
651 /* Note that we optimized this PHI. */
652 return true;
653 }
654
655 /* Update *ARG which is defined in STMT so that it contains the
656 computed value if that seems profitable. Return true if the
657 statement is made dead by that rewriting. */
658
659 static bool
660 jump_function_from_stmt (tree *arg, gimple *stmt)
661 {
662 enum tree_code code = gimple_assign_rhs_code (stmt);
663 if (code == ADDR_EXPR)
664 {
665 /* For arg = &p->i transform it to p, if possible. */
666 tree rhs1 = gimple_assign_rhs1 (stmt);
667 HOST_WIDE_INT offset;
668 tree tem = get_addr_base_and_unit_offset (TREE_OPERAND (rhs1, 0),
669 &offset);
670 if (tem
671 && TREE_CODE (tem) == MEM_REF
672 && (mem_ref_offset (tem) + offset) == 0)
673 {
674 *arg = TREE_OPERAND (tem, 0);
675 return true;
676 }
677 }
678 /* TODO: Much like IPA-CP jump-functions we want to handle constant
679 additions symbolically here, and we'd need to update the comparison
680 code that compares the arg + cst tuples in our caller. For now the
681 code above exactly handles the VEC_BASE pattern from vec.h. */
682 return false;
683 }
684
685 /* RHS is a source argument in a BIT_AND_EXPR which feeds a conditional
686 of the form SSA_NAME NE 0.
687
688 If RHS is fed by a simple EQ_EXPR comparison of two values, see if
689 the two input values of the EQ_EXPR match arg0 and arg1.
690
691 If so update *code and return TRUE. Otherwise return FALSE. */
692
693 static bool
694 rhs_is_fed_for_value_replacement (const_tree arg0, const_tree arg1,
695 enum tree_code *code, const_tree rhs)
696 {
697 /* Obviously if RHS is not an SSA_NAME, we can't look at the defining
698 statement. */
699 if (TREE_CODE (rhs) == SSA_NAME)
700 {
701 gimple *def1 = SSA_NAME_DEF_STMT (rhs);
702
703 /* Verify the defining statement has an EQ_EXPR on the RHS. */
704 if (is_gimple_assign (def1) && gimple_assign_rhs_code (def1) == EQ_EXPR)
705 {
706 /* Finally verify the source operands of the EQ_EXPR are equal
707 to arg0 and arg1. */
708 tree op0 = gimple_assign_rhs1 (def1);
709 tree op1 = gimple_assign_rhs2 (def1);
710 if ((operand_equal_for_phi_arg_p (arg0, op0)
711 && operand_equal_for_phi_arg_p (arg1, op1))
712 || (operand_equal_for_phi_arg_p (arg0, op1)
713 && operand_equal_for_phi_arg_p (arg1, op0)))
714 {
715 /* We will perform the optimization. */
716 *code = gimple_assign_rhs_code (def1);
717 return true;
718 }
719 }
720 }
721 return false;
722 }
723
724 /* Return TRUE if arg0/arg1 are equal to the rhs/lhs or lhs/rhs of COND.
725
726 Also return TRUE if arg0/arg1 are equal to the source arguments of a
727 an EQ comparison feeding a BIT_AND_EXPR which feeds COND.
728
729 Return FALSE otherwise. */
730
731 static bool
732 operand_equal_for_value_replacement (const_tree arg0, const_tree arg1,
733 enum tree_code *code, gimple *cond)
734 {
735 gimple *def;
736 tree lhs = gimple_cond_lhs (cond);
737 tree rhs = gimple_cond_rhs (cond);
738
739 if ((operand_equal_for_phi_arg_p (arg0, lhs)
740 && operand_equal_for_phi_arg_p (arg1, rhs))
741 || (operand_equal_for_phi_arg_p (arg1, lhs)
742 && operand_equal_for_phi_arg_p (arg0, rhs)))
743 return true;
744
745 /* Now handle more complex case where we have an EQ comparison
746 which feeds a BIT_AND_EXPR which feeds COND.
747
748 First verify that COND is of the form SSA_NAME NE 0. */
749 if (*code != NE_EXPR || !integer_zerop (rhs)
750 || TREE_CODE (lhs) != SSA_NAME)
751 return false;
752
753 /* Now ensure that SSA_NAME is set by a BIT_AND_EXPR. */
754 def = SSA_NAME_DEF_STMT (lhs);
755 if (!is_gimple_assign (def) || gimple_assign_rhs_code (def) != BIT_AND_EXPR)
756 return false;
757
758 /* Now verify arg0/arg1 correspond to the source arguments of an
759 EQ comparison feeding the BIT_AND_EXPR. */
760
761 tree tmp = gimple_assign_rhs1 (def);
762 if (rhs_is_fed_for_value_replacement (arg0, arg1, code, tmp))
763 return true;
764
765 tmp = gimple_assign_rhs2 (def);
766 if (rhs_is_fed_for_value_replacement (arg0, arg1, code, tmp))
767 return true;
768
769 return false;
770 }
771
772 /* Returns true if ARG is a neutral element for operation CODE
773 on the RIGHT side. */
774
775 static bool
776 neutral_element_p (tree_code code, tree arg, bool right)
777 {
778 switch (code)
779 {
780 case PLUS_EXPR:
781 case BIT_IOR_EXPR:
782 case BIT_XOR_EXPR:
783 return integer_zerop (arg);
784
785 case LROTATE_EXPR:
786 case RROTATE_EXPR:
787 case LSHIFT_EXPR:
788 case RSHIFT_EXPR:
789 case MINUS_EXPR:
790 case POINTER_PLUS_EXPR:
791 return right && integer_zerop (arg);
792
793 case MULT_EXPR:
794 return integer_onep (arg);
795
796 case TRUNC_DIV_EXPR:
797 case CEIL_DIV_EXPR:
798 case FLOOR_DIV_EXPR:
799 case ROUND_DIV_EXPR:
800 case EXACT_DIV_EXPR:
801 return right && integer_onep (arg);
802
803 case BIT_AND_EXPR:
804 return integer_all_onesp (arg);
805
806 default:
807 return false;
808 }
809 }
810
811 /* Returns true if ARG is an absorbing element for operation CODE. */
812
813 static bool
814 absorbing_element_p (tree_code code, tree arg)
815 {
816 switch (code)
817 {
818 case BIT_IOR_EXPR:
819 return integer_all_onesp (arg);
820
821 case MULT_EXPR:
822 case BIT_AND_EXPR:
823 return integer_zerop (arg);
824
825 default:
826 return false;
827 }
828 }
829
830 /* The function value_replacement does the main work of doing the value
831 replacement. Return non-zero if the replacement is done. Otherwise return
832 0. If we remove the middle basic block, return 2.
833 BB is the basic block where the replacement is going to be done on. ARG0
834 is argument 0 from the PHI. Likewise for ARG1. */
835
836 static int
837 value_replacement (basic_block cond_bb, basic_block middle_bb,
838 edge e0, edge e1, gimple *phi,
839 tree arg0, tree arg1)
840 {
841 gimple_stmt_iterator gsi;
842 gimple *cond;
843 edge true_edge, false_edge;
844 enum tree_code code;
845 bool emtpy_or_with_defined_p = true;
846
847 /* If the type says honor signed zeros we cannot do this
848 optimization. */
849 if (HONOR_SIGNED_ZEROS (arg1))
850 return 0;
851
852 /* If there is a statement in MIDDLE_BB that defines one of the PHI
853 arguments, then adjust arg0 or arg1. */
854 gsi = gsi_start_nondebug_after_labels_bb (middle_bb);
855 while (!gsi_end_p (gsi))
856 {
857 gimple *stmt = gsi_stmt (gsi);
858 tree lhs;
859 gsi_next_nondebug (&gsi);
860 if (!is_gimple_assign (stmt))
861 {
862 emtpy_or_with_defined_p = false;
863 continue;
864 }
865 /* Now try to adjust arg0 or arg1 according to the computation
866 in the statement. */
867 lhs = gimple_assign_lhs (stmt);
868 if (!(lhs == arg0
869 && jump_function_from_stmt (&arg0, stmt))
870 || (lhs == arg1
871 && jump_function_from_stmt (&arg1, stmt)))
872 emtpy_or_with_defined_p = false;
873 }
874
875 cond = last_stmt (cond_bb);
876 code = gimple_cond_code (cond);
877
878 /* This transformation is only valid for equality comparisons. */
879 if (code != NE_EXPR && code != EQ_EXPR)
880 return 0;
881
882 /* We need to know which is the true edge and which is the false
883 edge so that we know if have abs or negative abs. */
884 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
885
886 /* At this point we know we have a COND_EXPR with two successors.
887 One successor is BB, the other successor is an empty block which
888 falls through into BB.
889
890 The condition for the COND_EXPR is known to be NE_EXPR or EQ_EXPR.
891
892 There is a single PHI node at the join point (BB) with two arguments.
893
894 We now need to verify that the two arguments in the PHI node match
895 the two arguments to the equality comparison. */
896
897 if (operand_equal_for_value_replacement (arg0, arg1, &code, cond))
898 {
899 edge e;
900 tree arg;
901
902 /* For NE_EXPR, we want to build an assignment result = arg where
903 arg is the PHI argument associated with the true edge. For
904 EQ_EXPR we want the PHI argument associated with the false edge. */
905 e = (code == NE_EXPR ? true_edge : false_edge);
906
907 /* Unfortunately, E may not reach BB (it may instead have gone to
908 OTHER_BLOCK). If that is the case, then we want the single outgoing
909 edge from OTHER_BLOCK which reaches BB and represents the desired
910 path from COND_BLOCK. */
911 if (e->dest == middle_bb)
912 e = single_succ_edge (e->dest);
913
914 /* Now we know the incoming edge to BB that has the argument for the
915 RHS of our new assignment statement. */
916 if (e0 == e)
917 arg = arg0;
918 else
919 arg = arg1;
920
921 /* If the middle basic block was empty or is defining the
922 PHI arguments and this is a single phi where the args are different
923 for the edges e0 and e1 then we can remove the middle basic block. */
924 if (emtpy_or_with_defined_p
925 && single_non_singleton_phi_for_edges (phi_nodes (gimple_bb (phi)),
926 e0, e1) == phi)
927 {
928 replace_phi_edge_with_variable (cond_bb, e1, phi, arg);
929 /* Note that we optimized this PHI. */
930 return 2;
931 }
932 else
933 {
934 /* Replace the PHI arguments with arg. */
935 SET_PHI_ARG_DEF (phi, e0->dest_idx, arg);
936 SET_PHI_ARG_DEF (phi, e1->dest_idx, arg);
937 if (dump_file && (dump_flags & TDF_DETAILS))
938 {
939 fprintf (dump_file, "PHI ");
940 print_generic_expr (dump_file, gimple_phi_result (phi), 0);
941 fprintf (dump_file, " reduced for COND_EXPR in block %d to ",
942 cond_bb->index);
943 print_generic_expr (dump_file, arg, 0);
944 fprintf (dump_file, ".\n");
945 }
946 return 1;
947 }
948
949 }
950
951 /* Now optimize (x != 0) ? x + y : y to just y.
952 The following condition is too restrictive, there can easily be another
953 stmt in middle_bb, for instance a CONVERT_EXPR for the second argument. */
954 gimple *assign = last_and_only_stmt (middle_bb);
955 if (!assign || gimple_code (assign) != GIMPLE_ASSIGN
956 || gimple_assign_rhs_class (assign) != GIMPLE_BINARY_RHS
957 || (!INTEGRAL_TYPE_P (TREE_TYPE (arg0))
958 && !POINTER_TYPE_P (TREE_TYPE (arg0))))
959 return 0;
960
961 /* Punt if there are (degenerate) PHIs in middle_bb, there should not be. */
962 if (!gimple_seq_empty_p (phi_nodes (middle_bb)))
963 return 0;
964
965 /* Only transform if it removes the condition. */
966 if (!single_non_singleton_phi_for_edges (phi_nodes (gimple_bb (phi)), e0, e1))
967 return 0;
968
969 /* Size-wise, this is always profitable. */
970 if (optimize_bb_for_speed_p (cond_bb)
971 /* The special case is useless if it has a low probability. */
972 && profile_status_for_fn (cfun) != PROFILE_ABSENT
973 && EDGE_PRED (middle_bb, 0)->probability < PROB_EVEN
974 /* If assign is cheap, there is no point avoiding it. */
975 && estimate_num_insns (assign, &eni_time_weights)
976 >= 3 * estimate_num_insns (cond, &eni_time_weights))
977 return 0;
978
979 tree lhs = gimple_assign_lhs (assign);
980 tree rhs1 = gimple_assign_rhs1 (assign);
981 tree rhs2 = gimple_assign_rhs2 (assign);
982 enum tree_code code_def = gimple_assign_rhs_code (assign);
983 tree cond_lhs = gimple_cond_lhs (cond);
984 tree cond_rhs = gimple_cond_rhs (cond);
985
986 if (((code == NE_EXPR && e1 == false_edge)
987 || (code == EQ_EXPR && e1 == true_edge))
988 && arg0 == lhs
989 && ((arg1 == rhs1
990 && operand_equal_for_phi_arg_p (rhs2, cond_lhs)
991 && neutral_element_p (code_def, cond_rhs, true))
992 || (arg1 == rhs2
993 && operand_equal_for_phi_arg_p (rhs1, cond_lhs)
994 && neutral_element_p (code_def, cond_rhs, false))
995 || (operand_equal_for_phi_arg_p (arg1, cond_rhs)
996 && (operand_equal_for_phi_arg_p (rhs2, cond_lhs)
997 || operand_equal_for_phi_arg_p (rhs1, cond_lhs))
998 && absorbing_element_p (code_def, cond_rhs))))
999 {
1000 gsi = gsi_for_stmt (cond);
1001 if (INTEGRAL_TYPE_P (TREE_TYPE (lhs)))
1002 {
1003 /* Moving ASSIGN might change VR of lhs, e.g. when moving u_6
1004 def-stmt in:
1005 if (n_5 != 0)
1006 goto <bb 3>;
1007 else
1008 goto <bb 4>;
1009
1010 <bb 3>:
1011 # RANGE [0, 4294967294]
1012 u_6 = n_5 + 4294967295;
1013
1014 <bb 4>:
1015 # u_3 = PHI <u_6(3), 4294967295(2)> */
1016 SSA_NAME_RANGE_INFO (lhs) = NULL;
1017 SSA_NAME_ANTI_RANGE_P (lhs) = 0;
1018 /* If available, we can use VR of phi result at least. */
1019 tree phires = gimple_phi_result (phi);
1020 struct range_info_def *phires_range_info
1021 = SSA_NAME_RANGE_INFO (phires);
1022 if (phires_range_info)
1023 duplicate_ssa_name_range_info (lhs, SSA_NAME_RANGE_TYPE (phires),
1024 phires_range_info);
1025 }
1026 gimple_stmt_iterator gsi_from = gsi_for_stmt (assign);
1027 gsi_move_before (&gsi_from, &gsi);
1028 replace_phi_edge_with_variable (cond_bb, e1, phi, lhs);
1029 return 2;
1030 }
1031
1032 return 0;
1033 }
1034
1035 /* The function minmax_replacement does the main work of doing the minmax
1036 replacement. Return true if the replacement is done. Otherwise return
1037 false.
1038 BB is the basic block where the replacement is going to be done on. ARG0
1039 is argument 0 from the PHI. Likewise for ARG1. */
1040
1041 static bool
1042 minmax_replacement (basic_block cond_bb, basic_block middle_bb,
1043 edge e0, edge e1, gimple *phi,
1044 tree arg0, tree arg1)
1045 {
1046 tree result, type;
1047 gcond *cond;
1048 gassign *new_stmt;
1049 edge true_edge, false_edge;
1050 enum tree_code cmp, minmax, ass_code;
1051 tree smaller, larger, arg_true, arg_false;
1052 gimple_stmt_iterator gsi, gsi_from;
1053
1054 type = TREE_TYPE (PHI_RESULT (phi));
1055
1056 /* The optimization may be unsafe due to NaNs. */
1057 if (HONOR_NANS (type))
1058 return false;
1059
1060 cond = as_a <gcond *> (last_stmt (cond_bb));
1061 cmp = gimple_cond_code (cond);
1062
1063 /* This transformation is only valid for order comparisons. Record which
1064 operand is smaller/larger if the result of the comparison is true. */
1065 if (cmp == LT_EXPR || cmp == LE_EXPR)
1066 {
1067 smaller = gimple_cond_lhs (cond);
1068 larger = gimple_cond_rhs (cond);
1069 }
1070 else if (cmp == GT_EXPR || cmp == GE_EXPR)
1071 {
1072 smaller = gimple_cond_rhs (cond);
1073 larger = gimple_cond_lhs (cond);
1074 }
1075 else
1076 return false;
1077
1078 /* We need to know which is the true edge and which is the false
1079 edge so that we know if have abs or negative abs. */
1080 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
1081
1082 /* Forward the edges over the middle basic block. */
1083 if (true_edge->dest == middle_bb)
1084 true_edge = EDGE_SUCC (true_edge->dest, 0);
1085 if (false_edge->dest == middle_bb)
1086 false_edge = EDGE_SUCC (false_edge->dest, 0);
1087
1088 if (true_edge == e0)
1089 {
1090 gcc_assert (false_edge == e1);
1091 arg_true = arg0;
1092 arg_false = arg1;
1093 }
1094 else
1095 {
1096 gcc_assert (false_edge == e0);
1097 gcc_assert (true_edge == e1);
1098 arg_true = arg1;
1099 arg_false = arg0;
1100 }
1101
1102 if (empty_block_p (middle_bb))
1103 {
1104 if (operand_equal_for_phi_arg_p (arg_true, smaller)
1105 && operand_equal_for_phi_arg_p (arg_false, larger))
1106 {
1107 /* Case
1108
1109 if (smaller < larger)
1110 rslt = smaller;
1111 else
1112 rslt = larger; */
1113 minmax = MIN_EXPR;
1114 }
1115 else if (operand_equal_for_phi_arg_p (arg_false, smaller)
1116 && operand_equal_for_phi_arg_p (arg_true, larger))
1117 minmax = MAX_EXPR;
1118 else
1119 return false;
1120 }
1121 else
1122 {
1123 /* Recognize the following case, assuming d <= u:
1124
1125 if (a <= u)
1126 b = MAX (a, d);
1127 x = PHI <b, u>
1128
1129 This is equivalent to
1130
1131 b = MAX (a, d);
1132 x = MIN (b, u); */
1133
1134 gimple *assign = last_and_only_stmt (middle_bb);
1135 tree lhs, op0, op1, bound;
1136
1137 if (!assign
1138 || gimple_code (assign) != GIMPLE_ASSIGN)
1139 return false;
1140
1141 lhs = gimple_assign_lhs (assign);
1142 ass_code = gimple_assign_rhs_code (assign);
1143 if (ass_code != MAX_EXPR && ass_code != MIN_EXPR)
1144 return false;
1145 op0 = gimple_assign_rhs1 (assign);
1146 op1 = gimple_assign_rhs2 (assign);
1147
1148 if (true_edge->src == middle_bb)
1149 {
1150 /* We got here if the condition is true, i.e., SMALLER < LARGER. */
1151 if (!operand_equal_for_phi_arg_p (lhs, arg_true))
1152 return false;
1153
1154 if (operand_equal_for_phi_arg_p (arg_false, larger))
1155 {
1156 /* Case
1157
1158 if (smaller < larger)
1159 {
1160 r' = MAX_EXPR (smaller, bound)
1161 }
1162 r = PHI <r', larger> --> to be turned to MIN_EXPR. */
1163 if (ass_code != MAX_EXPR)
1164 return false;
1165
1166 minmax = MIN_EXPR;
1167 if (operand_equal_for_phi_arg_p (op0, smaller))
1168 bound = op1;
1169 else if (operand_equal_for_phi_arg_p (op1, smaller))
1170 bound = op0;
1171 else
1172 return false;
1173
1174 /* We need BOUND <= LARGER. */
1175 if (!integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node,
1176 bound, larger)))
1177 return false;
1178 }
1179 else if (operand_equal_for_phi_arg_p (arg_false, smaller))
1180 {
1181 /* Case
1182
1183 if (smaller < larger)
1184 {
1185 r' = MIN_EXPR (larger, bound)
1186 }
1187 r = PHI <r', smaller> --> to be turned to MAX_EXPR. */
1188 if (ass_code != MIN_EXPR)
1189 return false;
1190
1191 minmax = MAX_EXPR;
1192 if (operand_equal_for_phi_arg_p (op0, larger))
1193 bound = op1;
1194 else if (operand_equal_for_phi_arg_p (op1, larger))
1195 bound = op0;
1196 else
1197 return false;
1198
1199 /* We need BOUND >= SMALLER. */
1200 if (!integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node,
1201 bound, smaller)))
1202 return false;
1203 }
1204 else
1205 return false;
1206 }
1207 else
1208 {
1209 /* We got here if the condition is false, i.e., SMALLER > LARGER. */
1210 if (!operand_equal_for_phi_arg_p (lhs, arg_false))
1211 return false;
1212
1213 if (operand_equal_for_phi_arg_p (arg_true, larger))
1214 {
1215 /* Case
1216
1217 if (smaller > larger)
1218 {
1219 r' = MIN_EXPR (smaller, bound)
1220 }
1221 r = PHI <r', larger> --> to be turned to MAX_EXPR. */
1222 if (ass_code != MIN_EXPR)
1223 return false;
1224
1225 minmax = MAX_EXPR;
1226 if (operand_equal_for_phi_arg_p (op0, smaller))
1227 bound = op1;
1228 else if (operand_equal_for_phi_arg_p (op1, smaller))
1229 bound = op0;
1230 else
1231 return false;
1232
1233 /* We need BOUND >= LARGER. */
1234 if (!integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node,
1235 bound, larger)))
1236 return false;
1237 }
1238 else if (operand_equal_for_phi_arg_p (arg_true, smaller))
1239 {
1240 /* Case
1241
1242 if (smaller > larger)
1243 {
1244 r' = MAX_EXPR (larger, bound)
1245 }
1246 r = PHI <r', smaller> --> to be turned to MIN_EXPR. */
1247 if (ass_code != MAX_EXPR)
1248 return false;
1249
1250 minmax = MIN_EXPR;
1251 if (operand_equal_for_phi_arg_p (op0, larger))
1252 bound = op1;
1253 else if (operand_equal_for_phi_arg_p (op1, larger))
1254 bound = op0;
1255 else
1256 return false;
1257
1258 /* We need BOUND <= SMALLER. */
1259 if (!integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node,
1260 bound, smaller)))
1261 return false;
1262 }
1263 else
1264 return false;
1265 }
1266
1267 /* Move the statement from the middle block. */
1268 gsi = gsi_last_bb (cond_bb);
1269 gsi_from = gsi_last_nondebug_bb (middle_bb);
1270 gsi_move_before (&gsi_from, &gsi);
1271 }
1272
1273 /* Create an SSA var to hold the min/max result. If we're the only
1274 things setting the target PHI, then we can clone the PHI
1275 variable. Otherwise we must create a new one. */
1276 result = PHI_RESULT (phi);
1277 if (EDGE_COUNT (gimple_bb (phi)->preds) == 2)
1278 result = duplicate_ssa_name (result, NULL);
1279 else
1280 result = make_ssa_name (TREE_TYPE (result));
1281
1282 /* Emit the statement to compute min/max. */
1283 new_stmt = gimple_build_assign (result, minmax, arg0, arg1);
1284 gsi = gsi_last_bb (cond_bb);
1285 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
1286
1287 replace_phi_edge_with_variable (cond_bb, e1, phi, result);
1288 reset_flow_sensitive_info_in_bb (cond_bb);
1289
1290 return true;
1291 }
1292
1293 /* The function absolute_replacement does the main work of doing the absolute
1294 replacement. Return true if the replacement is done. Otherwise return
1295 false.
1296 bb is the basic block where the replacement is going to be done on. arg0
1297 is argument 0 from the phi. Likewise for arg1. */
1298
1299 static bool
1300 abs_replacement (basic_block cond_bb, basic_block middle_bb,
1301 edge e0 ATTRIBUTE_UNUSED, edge e1,
1302 gimple *phi, tree arg0, tree arg1)
1303 {
1304 tree result;
1305 gassign *new_stmt;
1306 gimple *cond;
1307 gimple_stmt_iterator gsi;
1308 edge true_edge, false_edge;
1309 gimple *assign;
1310 edge e;
1311 tree rhs, lhs;
1312 bool negate;
1313 enum tree_code cond_code;
1314
1315 /* If the type says honor signed zeros we cannot do this
1316 optimization. */
1317 if (HONOR_SIGNED_ZEROS (arg1))
1318 return false;
1319
1320 /* OTHER_BLOCK must have only one executable statement which must have the
1321 form arg0 = -arg1 or arg1 = -arg0. */
1322
1323 assign = last_and_only_stmt (middle_bb);
1324 /* If we did not find the proper negation assignment, then we can not
1325 optimize. */
1326 if (assign == NULL)
1327 return false;
1328
1329 /* If we got here, then we have found the only executable statement
1330 in OTHER_BLOCK. If it is anything other than arg = -arg1 or
1331 arg1 = -arg0, then we can not optimize. */
1332 if (gimple_code (assign) != GIMPLE_ASSIGN)
1333 return false;
1334
1335 lhs = gimple_assign_lhs (assign);
1336
1337 if (gimple_assign_rhs_code (assign) != NEGATE_EXPR)
1338 return false;
1339
1340 rhs = gimple_assign_rhs1 (assign);
1341
1342 /* The assignment has to be arg0 = -arg1 or arg1 = -arg0. */
1343 if (!(lhs == arg0 && rhs == arg1)
1344 && !(lhs == arg1 && rhs == arg0))
1345 return false;
1346
1347 cond = last_stmt (cond_bb);
1348 result = PHI_RESULT (phi);
1349
1350 /* Only relationals comparing arg[01] against zero are interesting. */
1351 cond_code = gimple_cond_code (cond);
1352 if (cond_code != GT_EXPR && cond_code != GE_EXPR
1353 && cond_code != LT_EXPR && cond_code != LE_EXPR)
1354 return false;
1355
1356 /* Make sure the conditional is arg[01] OP y. */
1357 if (gimple_cond_lhs (cond) != rhs)
1358 return false;
1359
1360 if (FLOAT_TYPE_P (TREE_TYPE (gimple_cond_rhs (cond)))
1361 ? real_zerop (gimple_cond_rhs (cond))
1362 : integer_zerop (gimple_cond_rhs (cond)))
1363 ;
1364 else
1365 return false;
1366
1367 /* We need to know which is the true edge and which is the false
1368 edge so that we know if have abs or negative abs. */
1369 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
1370
1371 /* For GT_EXPR/GE_EXPR, if the true edge goes to OTHER_BLOCK, then we
1372 will need to negate the result. Similarly for LT_EXPR/LE_EXPR if
1373 the false edge goes to OTHER_BLOCK. */
1374 if (cond_code == GT_EXPR || cond_code == GE_EXPR)
1375 e = true_edge;
1376 else
1377 e = false_edge;
1378
1379 if (e->dest == middle_bb)
1380 negate = true;
1381 else
1382 negate = false;
1383
1384 result = duplicate_ssa_name (result, NULL);
1385
1386 if (negate)
1387 lhs = make_ssa_name (TREE_TYPE (result));
1388 else
1389 lhs = result;
1390
1391 /* Build the modify expression with abs expression. */
1392 new_stmt = gimple_build_assign (lhs, ABS_EXPR, rhs);
1393
1394 gsi = gsi_last_bb (cond_bb);
1395 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
1396
1397 if (negate)
1398 {
1399 /* Get the right GSI. We want to insert after the recently
1400 added ABS_EXPR statement (which we know is the first statement
1401 in the block. */
1402 new_stmt = gimple_build_assign (result, NEGATE_EXPR, lhs);
1403
1404 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1405 }
1406
1407 replace_phi_edge_with_variable (cond_bb, e1, phi, result);
1408 reset_flow_sensitive_info_in_bb (cond_bb);
1409
1410 /* Note that we optimized this PHI. */
1411 return true;
1412 }
1413
1414 /* Auxiliary functions to determine the set of memory accesses which
1415 can't trap because they are preceded by accesses to the same memory
1416 portion. We do that for MEM_REFs, so we only need to track
1417 the SSA_NAME of the pointer indirectly referenced. The algorithm
1418 simply is a walk over all instructions in dominator order. When
1419 we see an MEM_REF we determine if we've already seen a same
1420 ref anywhere up to the root of the dominator tree. If we do the
1421 current access can't trap. If we don't see any dominating access
1422 the current access might trap, but might also make later accesses
1423 non-trapping, so we remember it. We need to be careful with loads
1424 or stores, for instance a load might not trap, while a store would,
1425 so if we see a dominating read access this doesn't mean that a later
1426 write access would not trap. Hence we also need to differentiate the
1427 type of access(es) seen.
1428
1429 ??? We currently are very conservative and assume that a load might
1430 trap even if a store doesn't (write-only memory). This probably is
1431 overly conservative. */
1432
1433 /* A hash-table of SSA_NAMEs, and in which basic block an MEM_REF
1434 through it was seen, which would constitute a no-trap region for
1435 same accesses. */
1436 struct name_to_bb
1437 {
1438 unsigned int ssa_name_ver;
1439 unsigned int phase;
1440 bool store;
1441 HOST_WIDE_INT offset, size;
1442 basic_block bb;
1443 };
1444
1445 /* Hashtable helpers. */
1446
1447 struct ssa_names_hasher : free_ptr_hash <name_to_bb>
1448 {
1449 static inline hashval_t hash (const name_to_bb *);
1450 static inline bool equal (const name_to_bb *, const name_to_bb *);
1451 };
1452
1453 /* Used for quick clearing of the hash-table when we see calls.
1454 Hash entries with phase < nt_call_phase are invalid. */
1455 static unsigned int nt_call_phase;
1456
1457 /* The hash function. */
1458
1459 inline hashval_t
1460 ssa_names_hasher::hash (const name_to_bb *n)
1461 {
1462 return n->ssa_name_ver ^ (((hashval_t) n->store) << 31)
1463 ^ (n->offset << 6) ^ (n->size << 3);
1464 }
1465
1466 /* The equality function of *P1 and *P2. */
1467
1468 inline bool
1469 ssa_names_hasher::equal (const name_to_bb *n1, const name_to_bb *n2)
1470 {
1471 return n1->ssa_name_ver == n2->ssa_name_ver
1472 && n1->store == n2->store
1473 && n1->offset == n2->offset
1474 && n1->size == n2->size;
1475 }
1476
1477 class nontrapping_dom_walker : public dom_walker
1478 {
1479 public:
1480 nontrapping_dom_walker (cdi_direction direction, hash_set<tree> *ps)
1481 : dom_walker (direction), m_nontrapping (ps), m_seen_ssa_names (128) {}
1482
1483 virtual void before_dom_children (basic_block);
1484 virtual void after_dom_children (basic_block);
1485
1486 private:
1487
1488 /* We see the expression EXP in basic block BB. If it's an interesting
1489 expression (an MEM_REF through an SSA_NAME) possibly insert the
1490 expression into the set NONTRAP or the hash table of seen expressions.
1491 STORE is true if this expression is on the LHS, otherwise it's on
1492 the RHS. */
1493 void add_or_mark_expr (basic_block, tree, bool);
1494
1495 hash_set<tree> *m_nontrapping;
1496
1497 /* The hash table for remembering what we've seen. */
1498 hash_table<ssa_names_hasher> m_seen_ssa_names;
1499 };
1500
1501 /* Called by walk_dominator_tree, when entering the block BB. */
1502 void
1503 nontrapping_dom_walker::before_dom_children (basic_block bb)
1504 {
1505 edge e;
1506 edge_iterator ei;
1507 gimple_stmt_iterator gsi;
1508
1509 /* If we haven't seen all our predecessors, clear the hash-table. */
1510 FOR_EACH_EDGE (e, ei, bb->preds)
1511 if ((((size_t)e->src->aux) & 2) == 0)
1512 {
1513 nt_call_phase++;
1514 break;
1515 }
1516
1517 /* Mark this BB as being on the path to dominator root and as visited. */
1518 bb->aux = (void*)(1 | 2);
1519
1520 /* And walk the statements in order. */
1521 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1522 {
1523 gimple *stmt = gsi_stmt (gsi);
1524
1525 if (is_gimple_call (stmt) && !nonfreeing_call_p (stmt))
1526 nt_call_phase++;
1527 else if (gimple_assign_single_p (stmt) && !gimple_has_volatile_ops (stmt))
1528 {
1529 add_or_mark_expr (bb, gimple_assign_lhs (stmt), true);
1530 add_or_mark_expr (bb, gimple_assign_rhs1 (stmt), false);
1531 }
1532 }
1533 }
1534
1535 /* Called by walk_dominator_tree, when basic block BB is exited. */
1536 void
1537 nontrapping_dom_walker::after_dom_children (basic_block bb)
1538 {
1539 /* This BB isn't on the path to dominator root anymore. */
1540 bb->aux = (void*)2;
1541 }
1542
1543 /* We see the expression EXP in basic block BB. If it's an interesting
1544 expression (an MEM_REF through an SSA_NAME) possibly insert the
1545 expression into the set NONTRAP or the hash table of seen expressions.
1546 STORE is true if this expression is on the LHS, otherwise it's on
1547 the RHS. */
1548 void
1549 nontrapping_dom_walker::add_or_mark_expr (basic_block bb, tree exp, bool store)
1550 {
1551 HOST_WIDE_INT size;
1552
1553 if (TREE_CODE (exp) == MEM_REF
1554 && TREE_CODE (TREE_OPERAND (exp, 0)) == SSA_NAME
1555 && tree_fits_shwi_p (TREE_OPERAND (exp, 1))
1556 && (size = int_size_in_bytes (TREE_TYPE (exp))) > 0)
1557 {
1558 tree name = TREE_OPERAND (exp, 0);
1559 struct name_to_bb map;
1560 name_to_bb **slot;
1561 struct name_to_bb *n2bb;
1562 basic_block found_bb = 0;
1563
1564 /* Try to find the last seen MEM_REF through the same
1565 SSA_NAME, which can trap. */
1566 map.ssa_name_ver = SSA_NAME_VERSION (name);
1567 map.phase = 0;
1568 map.bb = 0;
1569 map.store = store;
1570 map.offset = tree_to_shwi (TREE_OPERAND (exp, 1));
1571 map.size = size;
1572
1573 slot = m_seen_ssa_names.find_slot (&map, INSERT);
1574 n2bb = *slot;
1575 if (n2bb && n2bb->phase >= nt_call_phase)
1576 found_bb = n2bb->bb;
1577
1578 /* If we've found a trapping MEM_REF, _and_ it dominates EXP
1579 (it's in a basic block on the path from us to the dominator root)
1580 then we can't trap. */
1581 if (found_bb && (((size_t)found_bb->aux) & 1) == 1)
1582 {
1583 m_nontrapping->add (exp);
1584 }
1585 else
1586 {
1587 /* EXP might trap, so insert it into the hash table. */
1588 if (n2bb)
1589 {
1590 n2bb->phase = nt_call_phase;
1591 n2bb->bb = bb;
1592 }
1593 else
1594 {
1595 n2bb = XNEW (struct name_to_bb);
1596 n2bb->ssa_name_ver = SSA_NAME_VERSION (name);
1597 n2bb->phase = nt_call_phase;
1598 n2bb->bb = bb;
1599 n2bb->store = store;
1600 n2bb->offset = map.offset;
1601 n2bb->size = size;
1602 *slot = n2bb;
1603 }
1604 }
1605 }
1606 }
1607
1608 /* This is the entry point of gathering non trapping memory accesses.
1609 It will do a dominator walk over the whole function, and it will
1610 make use of the bb->aux pointers. It returns a set of trees
1611 (the MEM_REFs itself) which can't trap. */
1612 static hash_set<tree> *
1613 get_non_trapping (void)
1614 {
1615 nt_call_phase = 0;
1616 hash_set<tree> *nontrap = new hash_set<tree>;
1617 /* We're going to do a dominator walk, so ensure that we have
1618 dominance information. */
1619 calculate_dominance_info (CDI_DOMINATORS);
1620
1621 nontrapping_dom_walker (CDI_DOMINATORS, nontrap)
1622 .walk (cfun->cfg->x_entry_block_ptr);
1623
1624 clear_aux_for_blocks ();
1625 return nontrap;
1626 }
1627
1628 /* Do the main work of conditional store replacement. We already know
1629 that the recognized pattern looks like so:
1630
1631 split:
1632 if (cond) goto MIDDLE_BB; else goto JOIN_BB (edge E1)
1633 MIDDLE_BB:
1634 something
1635 fallthrough (edge E0)
1636 JOIN_BB:
1637 some more
1638
1639 We check that MIDDLE_BB contains only one store, that that store
1640 doesn't trap (not via NOTRAP, but via checking if an access to the same
1641 memory location dominates us) and that the store has a "simple" RHS. */
1642
1643 static bool
1644 cond_store_replacement (basic_block middle_bb, basic_block join_bb,
1645 edge e0, edge e1, hash_set<tree> *nontrap)
1646 {
1647 gimple *assign = last_and_only_stmt (middle_bb);
1648 tree lhs, rhs, name, name2;
1649 gphi *newphi;
1650 gassign *new_stmt;
1651 gimple_stmt_iterator gsi;
1652 source_location locus;
1653
1654 /* Check if middle_bb contains of only one store. */
1655 if (!assign
1656 || !gimple_assign_single_p (assign)
1657 || gimple_has_volatile_ops (assign))
1658 return false;
1659
1660 locus = gimple_location (assign);
1661 lhs = gimple_assign_lhs (assign);
1662 rhs = gimple_assign_rhs1 (assign);
1663 if (TREE_CODE (lhs) != MEM_REF
1664 || TREE_CODE (TREE_OPERAND (lhs, 0)) != SSA_NAME
1665 || !is_gimple_reg_type (TREE_TYPE (lhs)))
1666 return false;
1667
1668 /* Prove that we can move the store down. We could also check
1669 TREE_THIS_NOTRAP here, but in that case we also could move stores,
1670 whose value is not available readily, which we want to avoid. */
1671 if (!nontrap->contains (lhs))
1672 return false;
1673
1674 /* Now we've checked the constraints, so do the transformation:
1675 1) Remove the single store. */
1676 gsi = gsi_for_stmt (assign);
1677 unlink_stmt_vdef (assign);
1678 gsi_remove (&gsi, true);
1679 release_defs (assign);
1680
1681 /* 2) Insert a load from the memory of the store to the temporary
1682 on the edge which did not contain the store. */
1683 lhs = unshare_expr (lhs);
1684 name = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
1685 new_stmt = gimple_build_assign (name, lhs);
1686 gimple_set_location (new_stmt, locus);
1687 gsi_insert_on_edge (e1, new_stmt);
1688
1689 /* 3) Create a PHI node at the join block, with one argument
1690 holding the old RHS, and the other holding the temporary
1691 where we stored the old memory contents. */
1692 name2 = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
1693 newphi = create_phi_node (name2, join_bb);
1694 add_phi_arg (newphi, rhs, e0, locus);
1695 add_phi_arg (newphi, name, e1, locus);
1696
1697 lhs = unshare_expr (lhs);
1698 new_stmt = gimple_build_assign (lhs, PHI_RESULT (newphi));
1699
1700 /* 4) Insert that PHI node. */
1701 gsi = gsi_after_labels (join_bb);
1702 if (gsi_end_p (gsi))
1703 {
1704 gsi = gsi_last_bb (join_bb);
1705 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1706 }
1707 else
1708 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
1709
1710 return true;
1711 }
1712
1713 /* Do the main work of conditional store replacement. */
1714
1715 static bool
1716 cond_if_else_store_replacement_1 (basic_block then_bb, basic_block else_bb,
1717 basic_block join_bb, gimple *then_assign,
1718 gimple *else_assign)
1719 {
1720 tree lhs_base, lhs, then_rhs, else_rhs, name;
1721 source_location then_locus, else_locus;
1722 gimple_stmt_iterator gsi;
1723 gphi *newphi;
1724 gassign *new_stmt;
1725
1726 if (then_assign == NULL
1727 || !gimple_assign_single_p (then_assign)
1728 || gimple_clobber_p (then_assign)
1729 || gimple_has_volatile_ops (then_assign)
1730 || else_assign == NULL
1731 || !gimple_assign_single_p (else_assign)
1732 || gimple_clobber_p (else_assign)
1733 || gimple_has_volatile_ops (else_assign))
1734 return false;
1735
1736 lhs = gimple_assign_lhs (then_assign);
1737 if (!is_gimple_reg_type (TREE_TYPE (lhs))
1738 || !operand_equal_p (lhs, gimple_assign_lhs (else_assign), 0))
1739 return false;
1740
1741 lhs_base = get_base_address (lhs);
1742 if (lhs_base == NULL_TREE
1743 || (!DECL_P (lhs_base) && TREE_CODE (lhs_base) != MEM_REF))
1744 return false;
1745
1746 then_rhs = gimple_assign_rhs1 (then_assign);
1747 else_rhs = gimple_assign_rhs1 (else_assign);
1748 then_locus = gimple_location (then_assign);
1749 else_locus = gimple_location (else_assign);
1750
1751 /* Now we've checked the constraints, so do the transformation:
1752 1) Remove the stores. */
1753 gsi = gsi_for_stmt (then_assign);
1754 unlink_stmt_vdef (then_assign);
1755 gsi_remove (&gsi, true);
1756 release_defs (then_assign);
1757
1758 gsi = gsi_for_stmt (else_assign);
1759 unlink_stmt_vdef (else_assign);
1760 gsi_remove (&gsi, true);
1761 release_defs (else_assign);
1762
1763 /* 2) Create a PHI node at the join block, with one argument
1764 holding the old RHS, and the other holding the temporary
1765 where we stored the old memory contents. */
1766 name = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
1767 newphi = create_phi_node (name, join_bb);
1768 add_phi_arg (newphi, then_rhs, EDGE_SUCC (then_bb, 0), then_locus);
1769 add_phi_arg (newphi, else_rhs, EDGE_SUCC (else_bb, 0), else_locus);
1770
1771 new_stmt = gimple_build_assign (lhs, PHI_RESULT (newphi));
1772
1773 /* 3) Insert that PHI node. */
1774 gsi = gsi_after_labels (join_bb);
1775 if (gsi_end_p (gsi))
1776 {
1777 gsi = gsi_last_bb (join_bb);
1778 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1779 }
1780 else
1781 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
1782
1783 return true;
1784 }
1785
1786 /* Conditional store replacement. We already know
1787 that the recognized pattern looks like so:
1788
1789 split:
1790 if (cond) goto THEN_BB; else goto ELSE_BB (edge E1)
1791 THEN_BB:
1792 ...
1793 X = Y;
1794 ...
1795 goto JOIN_BB;
1796 ELSE_BB:
1797 ...
1798 X = Z;
1799 ...
1800 fallthrough (edge E0)
1801 JOIN_BB:
1802 some more
1803
1804 We check that it is safe to sink the store to JOIN_BB by verifying that
1805 there are no read-after-write or write-after-write dependencies in
1806 THEN_BB and ELSE_BB. */
1807
1808 static bool
1809 cond_if_else_store_replacement (basic_block then_bb, basic_block else_bb,
1810 basic_block join_bb)
1811 {
1812 gimple *then_assign = last_and_only_stmt (then_bb);
1813 gimple *else_assign = last_and_only_stmt (else_bb);
1814 vec<data_reference_p> then_datarefs, else_datarefs;
1815 vec<ddr_p> then_ddrs, else_ddrs;
1816 gimple *then_store, *else_store;
1817 bool found, ok = false, res;
1818 struct data_dependence_relation *ddr;
1819 data_reference_p then_dr, else_dr;
1820 int i, j;
1821 tree then_lhs, else_lhs;
1822 basic_block blocks[3];
1823
1824 if (MAX_STORES_TO_SINK == 0)
1825 return false;
1826
1827 /* Handle the case with single statement in THEN_BB and ELSE_BB. */
1828 if (then_assign && else_assign)
1829 return cond_if_else_store_replacement_1 (then_bb, else_bb, join_bb,
1830 then_assign, else_assign);
1831
1832 /* Find data references. */
1833 then_datarefs.create (1);
1834 else_datarefs.create (1);
1835 if ((find_data_references_in_bb (NULL, then_bb, &then_datarefs)
1836 == chrec_dont_know)
1837 || !then_datarefs.length ()
1838 || (find_data_references_in_bb (NULL, else_bb, &else_datarefs)
1839 == chrec_dont_know)
1840 || !else_datarefs.length ())
1841 {
1842 free_data_refs (then_datarefs);
1843 free_data_refs (else_datarefs);
1844 return false;
1845 }
1846
1847 /* Find pairs of stores with equal LHS. */
1848 auto_vec<gimple *, 1> then_stores, else_stores;
1849 FOR_EACH_VEC_ELT (then_datarefs, i, then_dr)
1850 {
1851 if (DR_IS_READ (then_dr))
1852 continue;
1853
1854 then_store = DR_STMT (then_dr);
1855 then_lhs = gimple_get_lhs (then_store);
1856 if (then_lhs == NULL_TREE)
1857 continue;
1858 found = false;
1859
1860 FOR_EACH_VEC_ELT (else_datarefs, j, else_dr)
1861 {
1862 if (DR_IS_READ (else_dr))
1863 continue;
1864
1865 else_store = DR_STMT (else_dr);
1866 else_lhs = gimple_get_lhs (else_store);
1867 if (else_lhs == NULL_TREE)
1868 continue;
1869
1870 if (operand_equal_p (then_lhs, else_lhs, 0))
1871 {
1872 found = true;
1873 break;
1874 }
1875 }
1876
1877 if (!found)
1878 continue;
1879
1880 then_stores.safe_push (then_store);
1881 else_stores.safe_push (else_store);
1882 }
1883
1884 /* No pairs of stores found. */
1885 if (!then_stores.length ()
1886 || then_stores.length () > (unsigned) MAX_STORES_TO_SINK)
1887 {
1888 free_data_refs (then_datarefs);
1889 free_data_refs (else_datarefs);
1890 return false;
1891 }
1892
1893 /* Compute and check data dependencies in both basic blocks. */
1894 then_ddrs.create (1);
1895 else_ddrs.create (1);
1896 if (!compute_all_dependences (then_datarefs, &then_ddrs,
1897 vNULL, false)
1898 || !compute_all_dependences (else_datarefs, &else_ddrs,
1899 vNULL, false))
1900 {
1901 free_dependence_relations (then_ddrs);
1902 free_dependence_relations (else_ddrs);
1903 free_data_refs (then_datarefs);
1904 free_data_refs (else_datarefs);
1905 return false;
1906 }
1907 blocks[0] = then_bb;
1908 blocks[1] = else_bb;
1909 blocks[2] = join_bb;
1910 renumber_gimple_stmt_uids_in_blocks (blocks, 3);
1911
1912 /* Check that there are no read-after-write or write-after-write dependencies
1913 in THEN_BB. */
1914 FOR_EACH_VEC_ELT (then_ddrs, i, ddr)
1915 {
1916 struct data_reference *dra = DDR_A (ddr);
1917 struct data_reference *drb = DDR_B (ddr);
1918
1919 if (DDR_ARE_DEPENDENT (ddr) != chrec_known
1920 && ((DR_IS_READ (dra) && DR_IS_WRITE (drb)
1921 && gimple_uid (DR_STMT (dra)) > gimple_uid (DR_STMT (drb)))
1922 || (DR_IS_READ (drb) && DR_IS_WRITE (dra)
1923 && gimple_uid (DR_STMT (drb)) > gimple_uid (DR_STMT (dra)))
1924 || (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))))
1925 {
1926 free_dependence_relations (then_ddrs);
1927 free_dependence_relations (else_ddrs);
1928 free_data_refs (then_datarefs);
1929 free_data_refs (else_datarefs);
1930 return false;
1931 }
1932 }
1933
1934 /* Check that there are no read-after-write or write-after-write dependencies
1935 in ELSE_BB. */
1936 FOR_EACH_VEC_ELT (else_ddrs, i, ddr)
1937 {
1938 struct data_reference *dra = DDR_A (ddr);
1939 struct data_reference *drb = DDR_B (ddr);
1940
1941 if (DDR_ARE_DEPENDENT (ddr) != chrec_known
1942 && ((DR_IS_READ (dra) && DR_IS_WRITE (drb)
1943 && gimple_uid (DR_STMT (dra)) > gimple_uid (DR_STMT (drb)))
1944 || (DR_IS_READ (drb) && DR_IS_WRITE (dra)
1945 && gimple_uid (DR_STMT (drb)) > gimple_uid (DR_STMT (dra)))
1946 || (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))))
1947 {
1948 free_dependence_relations (then_ddrs);
1949 free_dependence_relations (else_ddrs);
1950 free_data_refs (then_datarefs);
1951 free_data_refs (else_datarefs);
1952 return false;
1953 }
1954 }
1955
1956 /* Sink stores with same LHS. */
1957 FOR_EACH_VEC_ELT (then_stores, i, then_store)
1958 {
1959 else_store = else_stores[i];
1960 res = cond_if_else_store_replacement_1 (then_bb, else_bb, join_bb,
1961 then_store, else_store);
1962 ok = ok || res;
1963 }
1964
1965 free_dependence_relations (then_ddrs);
1966 free_dependence_relations (else_ddrs);
1967 free_data_refs (then_datarefs);
1968 free_data_refs (else_datarefs);
1969
1970 return ok;
1971 }
1972
1973 /* Return TRUE if STMT has a VUSE whose corresponding VDEF is in BB. */
1974
1975 static bool
1976 local_mem_dependence (gimple *stmt, basic_block bb)
1977 {
1978 tree vuse = gimple_vuse (stmt);
1979 gimple *def;
1980
1981 if (!vuse)
1982 return false;
1983
1984 def = SSA_NAME_DEF_STMT (vuse);
1985 return (def && gimple_bb (def) == bb);
1986 }
1987
1988 /* Given a "diamond" control-flow pattern where BB0 tests a condition,
1989 BB1 and BB2 are "then" and "else" blocks dependent on this test,
1990 and BB3 rejoins control flow following BB1 and BB2, look for
1991 opportunities to hoist loads as follows. If BB3 contains a PHI of
1992 two loads, one each occurring in BB1 and BB2, and the loads are
1993 provably of adjacent fields in the same structure, then move both
1994 loads into BB0. Of course this can only be done if there are no
1995 dependencies preventing such motion.
1996
1997 One of the hoisted loads will always be speculative, so the
1998 transformation is currently conservative:
1999
2000 - The fields must be strictly adjacent.
2001 - The two fields must occupy a single memory block that is
2002 guaranteed to not cross a page boundary.
2003
2004 The last is difficult to prove, as such memory blocks should be
2005 aligned on the minimum of the stack alignment boundary and the
2006 alignment guaranteed by heap allocation interfaces. Thus we rely
2007 on a parameter for the alignment value.
2008
2009 Provided a good value is used for the last case, the first
2010 restriction could possibly be relaxed. */
2011
2012 static void
2013 hoist_adjacent_loads (basic_block bb0, basic_block bb1,
2014 basic_block bb2, basic_block bb3)
2015 {
2016 int param_align = PARAM_VALUE (PARAM_L1_CACHE_LINE_SIZE);
2017 unsigned param_align_bits = (unsigned) (param_align * BITS_PER_UNIT);
2018 gphi_iterator gsi;
2019
2020 /* Walk the phis in bb3 looking for an opportunity. We are looking
2021 for phis of two SSA names, one each of which is defined in bb1 and
2022 bb2. */
2023 for (gsi = gsi_start_phis (bb3); !gsi_end_p (gsi); gsi_next (&gsi))
2024 {
2025 gphi *phi_stmt = gsi.phi ();
2026 gimple *def1, *def2;
2027 tree arg1, arg2, ref1, ref2, field1, field2;
2028 tree tree_offset1, tree_offset2, tree_size2, next;
2029 int offset1, offset2, size2;
2030 unsigned align1;
2031 gimple_stmt_iterator gsi2;
2032 basic_block bb_for_def1, bb_for_def2;
2033
2034 if (gimple_phi_num_args (phi_stmt) != 2
2035 || virtual_operand_p (gimple_phi_result (phi_stmt)))
2036 continue;
2037
2038 arg1 = gimple_phi_arg_def (phi_stmt, 0);
2039 arg2 = gimple_phi_arg_def (phi_stmt, 1);
2040
2041 if (TREE_CODE (arg1) != SSA_NAME
2042 || TREE_CODE (arg2) != SSA_NAME
2043 || SSA_NAME_IS_DEFAULT_DEF (arg1)
2044 || SSA_NAME_IS_DEFAULT_DEF (arg2))
2045 continue;
2046
2047 def1 = SSA_NAME_DEF_STMT (arg1);
2048 def2 = SSA_NAME_DEF_STMT (arg2);
2049
2050 if ((gimple_bb (def1) != bb1 || gimple_bb (def2) != bb2)
2051 && (gimple_bb (def2) != bb1 || gimple_bb (def1) != bb2))
2052 continue;
2053
2054 /* Check the mode of the arguments to be sure a conditional move
2055 can be generated for it. */
2056 if (optab_handler (movcc_optab, TYPE_MODE (TREE_TYPE (arg1)))
2057 == CODE_FOR_nothing)
2058 continue;
2059
2060 /* Both statements must be assignments whose RHS is a COMPONENT_REF. */
2061 if (!gimple_assign_single_p (def1)
2062 || !gimple_assign_single_p (def2)
2063 || gimple_has_volatile_ops (def1)
2064 || gimple_has_volatile_ops (def2))
2065 continue;
2066
2067 ref1 = gimple_assign_rhs1 (def1);
2068 ref2 = gimple_assign_rhs1 (def2);
2069
2070 if (TREE_CODE (ref1) != COMPONENT_REF
2071 || TREE_CODE (ref2) != COMPONENT_REF)
2072 continue;
2073
2074 /* The zeroth operand of the two component references must be
2075 identical. It is not sufficient to compare get_base_address of
2076 the two references, because this could allow for different
2077 elements of the same array in the two trees. It is not safe to
2078 assume that the existence of one array element implies the
2079 existence of a different one. */
2080 if (!operand_equal_p (TREE_OPERAND (ref1, 0), TREE_OPERAND (ref2, 0), 0))
2081 continue;
2082
2083 field1 = TREE_OPERAND (ref1, 1);
2084 field2 = TREE_OPERAND (ref2, 1);
2085
2086 /* Check for field adjacency, and ensure field1 comes first. */
2087 for (next = DECL_CHAIN (field1);
2088 next && TREE_CODE (next) != FIELD_DECL;
2089 next = DECL_CHAIN (next))
2090 ;
2091
2092 if (next != field2)
2093 {
2094 for (next = DECL_CHAIN (field2);
2095 next && TREE_CODE (next) != FIELD_DECL;
2096 next = DECL_CHAIN (next))
2097 ;
2098
2099 if (next != field1)
2100 continue;
2101
2102 std::swap (field1, field2);
2103 std::swap (def1, def2);
2104 }
2105
2106 bb_for_def1 = gimple_bb (def1);
2107 bb_for_def2 = gimple_bb (def2);
2108
2109 /* Check for proper alignment of the first field. */
2110 tree_offset1 = bit_position (field1);
2111 tree_offset2 = bit_position (field2);
2112 tree_size2 = DECL_SIZE (field2);
2113
2114 if (!tree_fits_uhwi_p (tree_offset1)
2115 || !tree_fits_uhwi_p (tree_offset2)
2116 || !tree_fits_uhwi_p (tree_size2))
2117 continue;
2118
2119 offset1 = tree_to_uhwi (tree_offset1);
2120 offset2 = tree_to_uhwi (tree_offset2);
2121 size2 = tree_to_uhwi (tree_size2);
2122 align1 = DECL_ALIGN (field1) % param_align_bits;
2123
2124 if (offset1 % BITS_PER_UNIT != 0)
2125 continue;
2126
2127 /* For profitability, the two field references should fit within
2128 a single cache line. */
2129 if (align1 + offset2 - offset1 + size2 > param_align_bits)
2130 continue;
2131
2132 /* The two expressions cannot be dependent upon vdefs defined
2133 in bb1/bb2. */
2134 if (local_mem_dependence (def1, bb_for_def1)
2135 || local_mem_dependence (def2, bb_for_def2))
2136 continue;
2137
2138 /* The conditions are satisfied; hoist the loads from bb1 and bb2 into
2139 bb0. We hoist the first one first so that a cache miss is handled
2140 efficiently regardless of hardware cache-fill policy. */
2141 gsi2 = gsi_for_stmt (def1);
2142 gsi_move_to_bb_end (&gsi2, bb0);
2143 gsi2 = gsi_for_stmt (def2);
2144 gsi_move_to_bb_end (&gsi2, bb0);
2145
2146 if (dump_file && (dump_flags & TDF_DETAILS))
2147 {
2148 fprintf (dump_file,
2149 "\nHoisting adjacent loads from %d and %d into %d: \n",
2150 bb_for_def1->index, bb_for_def2->index, bb0->index);
2151 print_gimple_stmt (dump_file, def1, 0, TDF_VOPS|TDF_MEMSYMS);
2152 print_gimple_stmt (dump_file, def2, 0, TDF_VOPS|TDF_MEMSYMS);
2153 }
2154 }
2155 }
2156
2157 /* Determine whether we should attempt to hoist adjacent loads out of
2158 diamond patterns in pass_phiopt. Always hoist loads if
2159 -fhoist-adjacent-loads is specified and the target machine has
2160 both a conditional move instruction and a defined cache line size. */
2161
2162 static bool
2163 gate_hoist_loads (void)
2164 {
2165 return (flag_hoist_adjacent_loads == 1
2166 && PARAM_VALUE (PARAM_L1_CACHE_LINE_SIZE)
2167 && HAVE_conditional_move);
2168 }
2169
2170 /* This pass tries to replaces an if-then-else block with an
2171 assignment. We have four kinds of transformations. Some of these
2172 transformations are also performed by the ifcvt RTL optimizer.
2173
2174 Conditional Replacement
2175 -----------------------
2176
2177 This transformation, implemented in conditional_replacement,
2178 replaces
2179
2180 bb0:
2181 if (cond) goto bb2; else goto bb1;
2182 bb1:
2183 bb2:
2184 x = PHI <0 (bb1), 1 (bb0), ...>;
2185
2186 with
2187
2188 bb0:
2189 x' = cond;
2190 goto bb2;
2191 bb2:
2192 x = PHI <x' (bb0), ...>;
2193
2194 We remove bb1 as it becomes unreachable. This occurs often due to
2195 gimplification of conditionals.
2196
2197 Value Replacement
2198 -----------------
2199
2200 This transformation, implemented in value_replacement, replaces
2201
2202 bb0:
2203 if (a != b) goto bb2; else goto bb1;
2204 bb1:
2205 bb2:
2206 x = PHI <a (bb1), b (bb0), ...>;
2207
2208 with
2209
2210 bb0:
2211 bb2:
2212 x = PHI <b (bb0), ...>;
2213
2214 This opportunity can sometimes occur as a result of other
2215 optimizations.
2216
2217
2218 Another case caught by value replacement looks like this:
2219
2220 bb0:
2221 t1 = a == CONST;
2222 t2 = b > c;
2223 t3 = t1 & t2;
2224 if (t3 != 0) goto bb1; else goto bb2;
2225 bb1:
2226 bb2:
2227 x = PHI (CONST, a)
2228
2229 Gets replaced with:
2230 bb0:
2231 bb2:
2232 t1 = a == CONST;
2233 t2 = b > c;
2234 t3 = t1 & t2;
2235 x = a;
2236
2237 ABS Replacement
2238 ---------------
2239
2240 This transformation, implemented in abs_replacement, replaces
2241
2242 bb0:
2243 if (a >= 0) goto bb2; else goto bb1;
2244 bb1:
2245 x = -a;
2246 bb2:
2247 x = PHI <x (bb1), a (bb0), ...>;
2248
2249 with
2250
2251 bb0:
2252 x' = ABS_EXPR< a >;
2253 bb2:
2254 x = PHI <x' (bb0), ...>;
2255
2256 MIN/MAX Replacement
2257 -------------------
2258
2259 This transformation, minmax_replacement replaces
2260
2261 bb0:
2262 if (a <= b) goto bb2; else goto bb1;
2263 bb1:
2264 bb2:
2265 x = PHI <b (bb1), a (bb0), ...>;
2266
2267 with
2268
2269 bb0:
2270 x' = MIN_EXPR (a, b)
2271 bb2:
2272 x = PHI <x' (bb0), ...>;
2273
2274 A similar transformation is done for MAX_EXPR.
2275
2276
2277 This pass also performs a fifth transformation of a slightly different
2278 flavor.
2279
2280 Factor conversion in COND_EXPR
2281 ------------------------------
2282
2283 This transformation factors the conversion out of COND_EXPR with
2284 factor_out_conditional_conversion.
2285
2286 For example:
2287 if (a <= CST) goto <bb 3>; else goto <bb 4>;
2288 <bb 3>:
2289 tmp = (int) a;
2290 <bb 4>:
2291 tmp = PHI <tmp, CST>
2292
2293 Into:
2294 if (a <= CST) goto <bb 3>; else goto <bb 4>;
2295 <bb 3>:
2296 <bb 4>:
2297 a = PHI <a, CST>
2298 tmp = (int) a;
2299
2300 Adjacent Load Hoisting
2301 ----------------------
2302
2303 This transformation replaces
2304
2305 bb0:
2306 if (...) goto bb2; else goto bb1;
2307 bb1:
2308 x1 = (<expr>).field1;
2309 goto bb3;
2310 bb2:
2311 x2 = (<expr>).field2;
2312 bb3:
2313 # x = PHI <x1, x2>;
2314
2315 with
2316
2317 bb0:
2318 x1 = (<expr>).field1;
2319 x2 = (<expr>).field2;
2320 if (...) goto bb2; else goto bb1;
2321 bb1:
2322 goto bb3;
2323 bb2:
2324 bb3:
2325 # x = PHI <x1, x2>;
2326
2327 The purpose of this transformation is to enable generation of conditional
2328 move instructions such as Intel CMOVE or PowerPC ISEL. Because one of
2329 the loads is speculative, the transformation is restricted to very
2330 specific cases to avoid introducing a page fault. We are looking for
2331 the common idiom:
2332
2333 if (...)
2334 x = y->left;
2335 else
2336 x = y->right;
2337
2338 where left and right are typically adjacent pointers in a tree structure. */
2339
2340 namespace {
2341
2342 const pass_data pass_data_phiopt =
2343 {
2344 GIMPLE_PASS, /* type */
2345 "phiopt", /* name */
2346 OPTGROUP_NONE, /* optinfo_flags */
2347 TV_TREE_PHIOPT, /* tv_id */
2348 ( PROP_cfg | PROP_ssa ), /* properties_required */
2349 0, /* properties_provided */
2350 0, /* properties_destroyed */
2351 0, /* todo_flags_start */
2352 0, /* todo_flags_finish */
2353 };
2354
2355 class pass_phiopt : public gimple_opt_pass
2356 {
2357 public:
2358 pass_phiopt (gcc::context *ctxt)
2359 : gimple_opt_pass (pass_data_phiopt, ctxt)
2360 {}
2361
2362 /* opt_pass methods: */
2363 opt_pass * clone () { return new pass_phiopt (m_ctxt); }
2364 virtual bool gate (function *) { return flag_ssa_phiopt; }
2365 virtual unsigned int execute (function *)
2366 {
2367 return tree_ssa_phiopt_worker (false, gate_hoist_loads ());
2368 }
2369
2370 }; // class pass_phiopt
2371
2372 } // anon namespace
2373
2374 gimple_opt_pass *
2375 make_pass_phiopt (gcc::context *ctxt)
2376 {
2377 return new pass_phiopt (ctxt);
2378 }
2379
2380 namespace {
2381
2382 const pass_data pass_data_cselim =
2383 {
2384 GIMPLE_PASS, /* type */
2385 "cselim", /* name */
2386 OPTGROUP_NONE, /* optinfo_flags */
2387 TV_TREE_PHIOPT, /* tv_id */
2388 ( PROP_cfg | PROP_ssa ), /* properties_required */
2389 0, /* properties_provided */
2390 0, /* properties_destroyed */
2391 0, /* todo_flags_start */
2392 0, /* todo_flags_finish */
2393 };
2394
2395 class pass_cselim : public gimple_opt_pass
2396 {
2397 public:
2398 pass_cselim (gcc::context *ctxt)
2399 : gimple_opt_pass (pass_data_cselim, ctxt)
2400 {}
2401
2402 /* opt_pass methods: */
2403 virtual bool gate (function *) { return flag_tree_cselim; }
2404 virtual unsigned int execute (function *) { return tree_ssa_cs_elim (); }
2405
2406 }; // class pass_cselim
2407
2408 } // anon namespace
2409
2410 gimple_opt_pass *
2411 make_pass_cselim (gcc::context *ctxt)
2412 {
2413 return new pass_cselim (ctxt);
2414 }