re PR tree-optimization/66949 (ICE at -O2 and -O3 on x86_64-linux-gnu in tree_ssa_phi...
[gcc.git] / gcc / tree-ssa-phiopt.c
1 /* Optimization of PHI nodes by converting them into straightline code.
2 Copyright (C) 2004-2015 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "insn-codes.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "gimple.h"
28 #include "cfghooks.h"
29 #include "tree-pass.h"
30 #include "ssa.h"
31 #include "optabs-tree.h"
32 #include "insn-config.h"
33 #include "gimple-pretty-print.h"
34 #include "fold-const.h"
35 #include "stor-layout.h"
36 #include "cfganal.h"
37 #include "gimplify.h"
38 #include "gimple-iterator.h"
39 #include "gimplify-me.h"
40 #include "tree-cfg.h"
41 #include "tree-dfa.h"
42 #include "domwalk.h"
43 #include "cfgloop.h"
44 #include "tree-data-ref.h"
45 #include "tree-scalar-evolution.h"
46 #include "tree-inline.h"
47 #include "params.h"
48
49 static unsigned int tree_ssa_phiopt_worker (bool, bool);
50 static bool conditional_replacement (basic_block, basic_block,
51 edge, edge, gphi *, tree, tree);
52 static gphi *factor_out_conditional_conversion (edge, edge, gphi *, tree, tree);
53 static int value_replacement (basic_block, basic_block,
54 edge, edge, gimple *, tree, tree);
55 static bool minmax_replacement (basic_block, basic_block,
56 edge, edge, gimple *, tree, tree);
57 static bool abs_replacement (basic_block, basic_block,
58 edge, edge, gimple *, tree, tree);
59 static bool cond_store_replacement (basic_block, basic_block, edge, edge,
60 hash_set<tree> *);
61 static bool cond_if_else_store_replacement (basic_block, basic_block, basic_block);
62 static hash_set<tree> * get_non_trapping ();
63 static void replace_phi_edge_with_variable (basic_block, edge, gimple *, tree);
64 static void hoist_adjacent_loads (basic_block, basic_block,
65 basic_block, basic_block);
66 static bool gate_hoist_loads (void);
67
68 /* This pass tries to transform conditional stores into unconditional
69 ones, enabling further simplifications with the simpler then and else
70 blocks. In particular it replaces this:
71
72 bb0:
73 if (cond) goto bb2; else goto bb1;
74 bb1:
75 *p = RHS;
76 bb2:
77
78 with
79
80 bb0:
81 if (cond) goto bb1; else goto bb2;
82 bb1:
83 condtmp' = *p;
84 bb2:
85 condtmp = PHI <RHS, condtmp'>
86 *p = condtmp;
87
88 This transformation can only be done under several constraints,
89 documented below. It also replaces:
90
91 bb0:
92 if (cond) goto bb2; else goto bb1;
93 bb1:
94 *p = RHS1;
95 goto bb3;
96 bb2:
97 *p = RHS2;
98 bb3:
99
100 with
101
102 bb0:
103 if (cond) goto bb3; else goto bb1;
104 bb1:
105 bb3:
106 condtmp = PHI <RHS1, RHS2>
107 *p = condtmp; */
108
109 static unsigned int
110 tree_ssa_cs_elim (void)
111 {
112 unsigned todo;
113 /* ??? We are not interested in loop related info, but the following
114 will create it, ICEing as we didn't init loops with pre-headers.
115 An interfacing issue of find_data_references_in_bb. */
116 loop_optimizer_init (LOOPS_NORMAL);
117 scev_initialize ();
118 todo = tree_ssa_phiopt_worker (true, false);
119 scev_finalize ();
120 loop_optimizer_finalize ();
121 return todo;
122 }
123
124 /* Return the singleton PHI in the SEQ of PHIs for edges E0 and E1. */
125
126 static gphi *
127 single_non_singleton_phi_for_edges (gimple_seq seq, edge e0, edge e1)
128 {
129 gimple_stmt_iterator i;
130 gphi *phi = NULL;
131 if (gimple_seq_singleton_p (seq))
132 return as_a <gphi *> (gsi_stmt (gsi_start (seq)));
133 for (i = gsi_start (seq); !gsi_end_p (i); gsi_next (&i))
134 {
135 gphi *p = as_a <gphi *> (gsi_stmt (i));
136 /* If the PHI arguments are equal then we can skip this PHI. */
137 if (operand_equal_for_phi_arg_p (gimple_phi_arg_def (p, e0->dest_idx),
138 gimple_phi_arg_def (p, e1->dest_idx)))
139 continue;
140
141 /* If we already have a PHI that has the two edge arguments are
142 different, then return it is not a singleton for these PHIs. */
143 if (phi)
144 return NULL;
145
146 phi = p;
147 }
148 return phi;
149 }
150
151 /* The core routine of conditional store replacement and normal
152 phi optimizations. Both share much of the infrastructure in how
153 to match applicable basic block patterns. DO_STORE_ELIM is true
154 when we want to do conditional store replacement, false otherwise.
155 DO_HOIST_LOADS is true when we want to hoist adjacent loads out
156 of diamond control flow patterns, false otherwise. */
157 static unsigned int
158 tree_ssa_phiopt_worker (bool do_store_elim, bool do_hoist_loads)
159 {
160 basic_block bb;
161 basic_block *bb_order;
162 unsigned n, i;
163 bool cfgchanged = false;
164 hash_set<tree> *nontrap = 0;
165
166 if (do_store_elim)
167 /* Calculate the set of non-trapping memory accesses. */
168 nontrap = get_non_trapping ();
169
170 /* Search every basic block for COND_EXPR we may be able to optimize.
171
172 We walk the blocks in order that guarantees that a block with
173 a single predecessor is processed before the predecessor.
174 This ensures that we collapse inner ifs before visiting the
175 outer ones, and also that we do not try to visit a removed
176 block. */
177 bb_order = single_pred_before_succ_order ();
178 n = n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS;
179
180 for (i = 0; i < n; i++)
181 {
182 gimple *cond_stmt;
183 gphi *phi;
184 basic_block bb1, bb2;
185 edge e1, e2;
186 tree arg0, arg1;
187
188 bb = bb_order[i];
189
190 cond_stmt = last_stmt (bb);
191 /* Check to see if the last statement is a GIMPLE_COND. */
192 if (!cond_stmt
193 || gimple_code (cond_stmt) != GIMPLE_COND)
194 continue;
195
196 e1 = EDGE_SUCC (bb, 0);
197 bb1 = e1->dest;
198 e2 = EDGE_SUCC (bb, 1);
199 bb2 = e2->dest;
200
201 /* We cannot do the optimization on abnormal edges. */
202 if ((e1->flags & EDGE_ABNORMAL) != 0
203 || (e2->flags & EDGE_ABNORMAL) != 0)
204 continue;
205
206 /* If either bb1's succ or bb2 or bb2's succ is non NULL. */
207 if (EDGE_COUNT (bb1->succs) == 0
208 || bb2 == NULL
209 || EDGE_COUNT (bb2->succs) == 0)
210 continue;
211
212 /* Find the bb which is the fall through to the other. */
213 if (EDGE_SUCC (bb1, 0)->dest == bb2)
214 ;
215 else if (EDGE_SUCC (bb2, 0)->dest == bb1)
216 {
217 std::swap (bb1, bb2);
218 std::swap (e1, e2);
219 }
220 else if (do_store_elim
221 && EDGE_SUCC (bb1, 0)->dest == EDGE_SUCC (bb2, 0)->dest)
222 {
223 basic_block bb3 = EDGE_SUCC (bb1, 0)->dest;
224
225 if (!single_succ_p (bb1)
226 || (EDGE_SUCC (bb1, 0)->flags & EDGE_FALLTHRU) == 0
227 || !single_succ_p (bb2)
228 || (EDGE_SUCC (bb2, 0)->flags & EDGE_FALLTHRU) == 0
229 || EDGE_COUNT (bb3->preds) != 2)
230 continue;
231 if (cond_if_else_store_replacement (bb1, bb2, bb3))
232 cfgchanged = true;
233 continue;
234 }
235 else if (do_hoist_loads
236 && EDGE_SUCC (bb1, 0)->dest == EDGE_SUCC (bb2, 0)->dest)
237 {
238 basic_block bb3 = EDGE_SUCC (bb1, 0)->dest;
239
240 if (!FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (cond_stmt)))
241 && single_succ_p (bb1)
242 && single_succ_p (bb2)
243 && single_pred_p (bb1)
244 && single_pred_p (bb2)
245 && EDGE_COUNT (bb->succs) == 2
246 && EDGE_COUNT (bb3->preds) == 2
247 /* If one edge or the other is dominant, a conditional move
248 is likely to perform worse than the well-predicted branch. */
249 && !predictable_edge_p (EDGE_SUCC (bb, 0))
250 && !predictable_edge_p (EDGE_SUCC (bb, 1)))
251 hoist_adjacent_loads (bb, bb1, bb2, bb3);
252 continue;
253 }
254 else
255 continue;
256
257 e1 = EDGE_SUCC (bb1, 0);
258
259 /* Make sure that bb1 is just a fall through. */
260 if (!single_succ_p (bb1)
261 || (e1->flags & EDGE_FALLTHRU) == 0)
262 continue;
263
264 /* Also make sure that bb1 only have one predecessor and that it
265 is bb. */
266 if (!single_pred_p (bb1)
267 || single_pred (bb1) != bb)
268 continue;
269
270 if (do_store_elim)
271 {
272 /* bb1 is the middle block, bb2 the join block, bb the split block,
273 e1 the fallthrough edge from bb1 to bb2. We can't do the
274 optimization if the join block has more than two predecessors. */
275 if (EDGE_COUNT (bb2->preds) > 2)
276 continue;
277 if (cond_store_replacement (bb1, bb2, e1, e2, nontrap))
278 cfgchanged = true;
279 }
280 else
281 {
282 gimple_seq phis = phi_nodes (bb2);
283 gimple_stmt_iterator gsi;
284 bool candorest = true;
285
286 /* Value replacement can work with more than one PHI
287 so try that first. */
288 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
289 {
290 phi = as_a <gphi *> (gsi_stmt (gsi));
291 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
292 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
293 if (value_replacement (bb, bb1, e1, e2, phi, arg0, arg1) == 2)
294 {
295 candorest = false;
296 cfgchanged = true;
297 break;
298 }
299 }
300
301 if (!candorest)
302 continue;
303
304 phi = single_non_singleton_phi_for_edges (phis, e1, e2);
305 if (!phi)
306 continue;
307
308 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
309 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
310
311 /* Something is wrong if we cannot find the arguments in the PHI
312 node. */
313 gcc_assert (arg0 != NULL_TREE && arg1 != NULL_TREE);
314
315 gphi *newphi = factor_out_conditional_conversion (e1, e2, phi,
316 arg0, arg1);
317 if (newphi != NULL)
318 {
319 phi = newphi;
320 /* factor_out_conditional_conversion may create a new PHI in
321 BB2 and eliminate an existing PHI in BB2. Recompute values
322 that may be affected by that change. */
323 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
324 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
325 gcc_assert (arg0 != NULL_TREE && arg1 != NULL_TREE);
326 }
327
328 /* Do the replacement of conditional if it can be done. */
329 if (conditional_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
330 cfgchanged = true;
331 else if (abs_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
332 cfgchanged = true;
333 else if (minmax_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
334 cfgchanged = true;
335 }
336 }
337
338 free (bb_order);
339
340 if (do_store_elim)
341 delete nontrap;
342 /* If the CFG has changed, we should cleanup the CFG. */
343 if (cfgchanged && do_store_elim)
344 {
345 /* In cond-store replacement we have added some loads on edges
346 and new VOPS (as we moved the store, and created a load). */
347 gsi_commit_edge_inserts ();
348 return TODO_cleanup_cfg | TODO_update_ssa_only_virtuals;
349 }
350 else if (cfgchanged)
351 return TODO_cleanup_cfg;
352 return 0;
353 }
354
355 /* Replace PHI node element whose edge is E in block BB with variable NEW.
356 Remove the edge from COND_BLOCK which does not lead to BB (COND_BLOCK
357 is known to have two edges, one of which must reach BB). */
358
359 static void
360 replace_phi_edge_with_variable (basic_block cond_block,
361 edge e, gimple *phi, tree new_tree)
362 {
363 basic_block bb = gimple_bb (phi);
364 basic_block block_to_remove;
365 gimple_stmt_iterator gsi;
366
367 /* Change the PHI argument to new. */
368 SET_USE (PHI_ARG_DEF_PTR (phi, e->dest_idx), new_tree);
369
370 /* Remove the empty basic block. */
371 if (EDGE_SUCC (cond_block, 0)->dest == bb)
372 {
373 EDGE_SUCC (cond_block, 0)->flags |= EDGE_FALLTHRU;
374 EDGE_SUCC (cond_block, 0)->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
375 EDGE_SUCC (cond_block, 0)->probability = REG_BR_PROB_BASE;
376 EDGE_SUCC (cond_block, 0)->count += EDGE_SUCC (cond_block, 1)->count;
377
378 block_to_remove = EDGE_SUCC (cond_block, 1)->dest;
379 }
380 else
381 {
382 EDGE_SUCC (cond_block, 1)->flags |= EDGE_FALLTHRU;
383 EDGE_SUCC (cond_block, 1)->flags
384 &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
385 EDGE_SUCC (cond_block, 1)->probability = REG_BR_PROB_BASE;
386 EDGE_SUCC (cond_block, 1)->count += EDGE_SUCC (cond_block, 0)->count;
387
388 block_to_remove = EDGE_SUCC (cond_block, 0)->dest;
389 }
390 delete_basic_block (block_to_remove);
391
392 /* Eliminate the COND_EXPR at the end of COND_BLOCK. */
393 gsi = gsi_last_bb (cond_block);
394 gsi_remove (&gsi, true);
395
396 if (dump_file && (dump_flags & TDF_DETAILS))
397 fprintf (dump_file,
398 "COND_EXPR in block %d and PHI in block %d converted to straightline code.\n",
399 cond_block->index,
400 bb->index);
401 }
402
403 /* PR66726: Factor conversion out of COND_EXPR. If the arguments of the PHI
404 stmt are CONVERT_STMT, factor out the conversion and perform the conversion
405 to the result of PHI stmt. Return the newly-created PHI, if any. */
406
407 static gphi *
408 factor_out_conditional_conversion (edge e0, edge e1, gphi *phi,
409 tree arg0, tree arg1)
410 {
411 gimple *arg0_def_stmt = NULL, *arg1_def_stmt = NULL, *new_stmt;
412 tree new_arg0 = NULL_TREE, new_arg1 = NULL_TREE;
413 tree temp, result;
414 gphi *newphi;
415 gimple_stmt_iterator gsi, gsi_for_def;
416 source_location locus = gimple_location (phi);
417 enum tree_code convert_code;
418
419 /* Handle only PHI statements with two arguments. TODO: If all
420 other arguments to PHI are INTEGER_CST or if their defining
421 statement have the same unary operation, we can handle more
422 than two arguments too. */
423 if (gimple_phi_num_args (phi) != 2)
424 return NULL;
425
426 /* First canonicalize to simplify tests. */
427 if (TREE_CODE (arg0) != SSA_NAME)
428 {
429 std::swap (arg0, arg1);
430 std::swap (e0, e1);
431 }
432
433 if (TREE_CODE (arg0) != SSA_NAME
434 || (TREE_CODE (arg1) != SSA_NAME
435 && TREE_CODE (arg1) != INTEGER_CST))
436 return NULL;
437
438 /* Check if arg0 is an SSA_NAME and the stmt which defines arg0 is
439 a conversion. */
440 arg0_def_stmt = SSA_NAME_DEF_STMT (arg0);
441 if (!is_gimple_assign (arg0_def_stmt)
442 || !gimple_assign_cast_p (arg0_def_stmt))
443 return NULL;
444
445 /* Use the RHS as new_arg0. */
446 convert_code = gimple_assign_rhs_code (arg0_def_stmt);
447 new_arg0 = gimple_assign_rhs1 (arg0_def_stmt);
448 if (convert_code == VIEW_CONVERT_EXPR)
449 new_arg0 = TREE_OPERAND (new_arg0, 0);
450
451 if (TREE_CODE (arg1) == SSA_NAME)
452 {
453 /* Check if arg1 is an SSA_NAME and the stmt which defines arg1
454 is a conversion. */
455 arg1_def_stmt = SSA_NAME_DEF_STMT (arg1);
456 if (!is_gimple_assign (arg1_def_stmt)
457 || gimple_assign_rhs_code (arg1_def_stmt) != convert_code)
458 return NULL;
459
460 /* Use the RHS as new_arg1. */
461 new_arg1 = gimple_assign_rhs1 (arg1_def_stmt);
462 if (convert_code == VIEW_CONVERT_EXPR)
463 new_arg1 = TREE_OPERAND (new_arg1, 0);
464 }
465 else
466 {
467 /* If arg1 is an INTEGER_CST, fold it to new type. */
468 if (INTEGRAL_TYPE_P (TREE_TYPE (new_arg0))
469 && int_fits_type_p (arg1, TREE_TYPE (new_arg0)))
470 {
471 if (gimple_assign_cast_p (arg0_def_stmt))
472 new_arg1 = fold_convert (TREE_TYPE (new_arg0), arg1);
473 else
474 return NULL;
475 }
476 else
477 return NULL;
478 }
479
480 /* If arg0/arg1 have > 1 use, then this transformation actually increases
481 the number of expressions evaluated at runtime. */
482 if (!has_single_use (arg0)
483 || (arg1_def_stmt && !has_single_use (arg1)))
484 return NULL;
485
486 /* If types of new_arg0 and new_arg1 are different bailout. */
487 if (!types_compatible_p (TREE_TYPE (new_arg0), TREE_TYPE (new_arg1)))
488 return NULL;
489
490 /* Create a new PHI stmt. */
491 result = PHI_RESULT (phi);
492 temp = make_ssa_name (TREE_TYPE (new_arg0), NULL);
493 newphi = create_phi_node (temp, gimple_bb (phi));
494
495 if (dump_file && (dump_flags & TDF_DETAILS))
496 {
497 fprintf (dump_file, "PHI ");
498 print_generic_expr (dump_file, gimple_phi_result (phi), 0);
499 fprintf (dump_file,
500 " changed to factor conversion out from COND_EXPR.\n");
501 fprintf (dump_file, "New stmt with CAST that defines ");
502 print_generic_expr (dump_file, result, 0);
503 fprintf (dump_file, ".\n");
504 }
505
506 /* Remove the old cast(s) that has single use. */
507 gsi_for_def = gsi_for_stmt (arg0_def_stmt);
508 gsi_remove (&gsi_for_def, true);
509 release_defs (arg0_def_stmt);
510
511 if (arg1_def_stmt)
512 {
513 gsi_for_def = gsi_for_stmt (arg1_def_stmt);
514 gsi_remove (&gsi_for_def, true);
515 release_defs (arg1_def_stmt);
516 }
517
518 add_phi_arg (newphi, new_arg0, e0, locus);
519 add_phi_arg (newphi, new_arg1, e1, locus);
520
521 /* Create the conversion stmt and insert it. */
522 if (convert_code == VIEW_CONVERT_EXPR)
523 temp = fold_build1 (VIEW_CONVERT_EXPR, TREE_TYPE (result), temp);
524 new_stmt = gimple_build_assign (result, convert_code, temp);
525 gsi = gsi_after_labels (gimple_bb (phi));
526 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
527
528 /* Remove the original PHI stmt. */
529 gsi = gsi_for_stmt (phi);
530 gsi_remove (&gsi, true);
531 return newphi;
532 }
533
534 /* The function conditional_replacement does the main work of doing the
535 conditional replacement. Return true if the replacement is done.
536 Otherwise return false.
537 BB is the basic block where the replacement is going to be done on. ARG0
538 is argument 0 from PHI. Likewise for ARG1. */
539
540 static bool
541 conditional_replacement (basic_block cond_bb, basic_block middle_bb,
542 edge e0, edge e1, gphi *phi,
543 tree arg0, tree arg1)
544 {
545 tree result;
546 gimple *stmt;
547 gassign *new_stmt;
548 tree cond;
549 gimple_stmt_iterator gsi;
550 edge true_edge, false_edge;
551 tree new_var, new_var2;
552 bool neg;
553
554 /* FIXME: Gimplification of complex type is too hard for now. */
555 /* We aren't prepared to handle vectors either (and it is a question
556 if it would be worthwhile anyway). */
557 if (!(INTEGRAL_TYPE_P (TREE_TYPE (arg0))
558 || POINTER_TYPE_P (TREE_TYPE (arg0)))
559 || !(INTEGRAL_TYPE_P (TREE_TYPE (arg1))
560 || POINTER_TYPE_P (TREE_TYPE (arg1))))
561 return false;
562
563 /* The PHI arguments have the constants 0 and 1, or 0 and -1, then
564 convert it to the conditional. */
565 if ((integer_zerop (arg0) && integer_onep (arg1))
566 || (integer_zerop (arg1) && integer_onep (arg0)))
567 neg = false;
568 else if ((integer_zerop (arg0) && integer_all_onesp (arg1))
569 || (integer_zerop (arg1) && integer_all_onesp (arg0)))
570 neg = true;
571 else
572 return false;
573
574 if (!empty_block_p (middle_bb))
575 return false;
576
577 /* At this point we know we have a GIMPLE_COND with two successors.
578 One successor is BB, the other successor is an empty block which
579 falls through into BB.
580
581 There is a single PHI node at the join point (BB) and its arguments
582 are constants (0, 1) or (0, -1).
583
584 So, given the condition COND, and the two PHI arguments, we can
585 rewrite this PHI into non-branching code:
586
587 dest = (COND) or dest = COND'
588
589 We use the condition as-is if the argument associated with the
590 true edge has the value one or the argument associated with the
591 false edge as the value zero. Note that those conditions are not
592 the same since only one of the outgoing edges from the GIMPLE_COND
593 will directly reach BB and thus be associated with an argument. */
594
595 stmt = last_stmt (cond_bb);
596 result = PHI_RESULT (phi);
597
598 /* To handle special cases like floating point comparison, it is easier and
599 less error-prone to build a tree and gimplify it on the fly though it is
600 less efficient. */
601 cond = fold_build2_loc (gimple_location (stmt),
602 gimple_cond_code (stmt), boolean_type_node,
603 gimple_cond_lhs (stmt), gimple_cond_rhs (stmt));
604
605 /* We need to know which is the true edge and which is the false
606 edge so that we know when to invert the condition below. */
607 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
608 if ((e0 == true_edge && integer_zerop (arg0))
609 || (e0 == false_edge && !integer_zerop (arg0))
610 || (e1 == true_edge && integer_zerop (arg1))
611 || (e1 == false_edge && !integer_zerop (arg1)))
612 cond = fold_build1_loc (gimple_location (stmt),
613 TRUTH_NOT_EXPR, TREE_TYPE (cond), cond);
614
615 if (neg)
616 {
617 cond = fold_convert_loc (gimple_location (stmt),
618 TREE_TYPE (result), cond);
619 cond = fold_build1_loc (gimple_location (stmt),
620 NEGATE_EXPR, TREE_TYPE (cond), cond);
621 }
622
623 /* Insert our new statements at the end of conditional block before the
624 COND_STMT. */
625 gsi = gsi_for_stmt (stmt);
626 new_var = force_gimple_operand_gsi (&gsi, cond, true, NULL, true,
627 GSI_SAME_STMT);
628
629 if (!useless_type_conversion_p (TREE_TYPE (result), TREE_TYPE (new_var)))
630 {
631 source_location locus_0, locus_1;
632
633 new_var2 = make_ssa_name (TREE_TYPE (result));
634 new_stmt = gimple_build_assign (new_var2, CONVERT_EXPR, new_var);
635 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
636 new_var = new_var2;
637
638 /* Set the locus to the first argument, unless is doesn't have one. */
639 locus_0 = gimple_phi_arg_location (phi, 0);
640 locus_1 = gimple_phi_arg_location (phi, 1);
641 if (locus_0 == UNKNOWN_LOCATION)
642 locus_0 = locus_1;
643 gimple_set_location (new_stmt, locus_0);
644 }
645
646 replace_phi_edge_with_variable (cond_bb, e1, phi, new_var);
647 reset_flow_sensitive_info_in_bb (cond_bb);
648
649 /* Note that we optimized this PHI. */
650 return true;
651 }
652
653 /* Update *ARG which is defined in STMT so that it contains the
654 computed value if that seems profitable. Return true if the
655 statement is made dead by that rewriting. */
656
657 static bool
658 jump_function_from_stmt (tree *arg, gimple *stmt)
659 {
660 enum tree_code code = gimple_assign_rhs_code (stmt);
661 if (code == ADDR_EXPR)
662 {
663 /* For arg = &p->i transform it to p, if possible. */
664 tree rhs1 = gimple_assign_rhs1 (stmt);
665 HOST_WIDE_INT offset;
666 tree tem = get_addr_base_and_unit_offset (TREE_OPERAND (rhs1, 0),
667 &offset);
668 if (tem
669 && TREE_CODE (tem) == MEM_REF
670 && (mem_ref_offset (tem) + offset) == 0)
671 {
672 *arg = TREE_OPERAND (tem, 0);
673 return true;
674 }
675 }
676 /* TODO: Much like IPA-CP jump-functions we want to handle constant
677 additions symbolically here, and we'd need to update the comparison
678 code that compares the arg + cst tuples in our caller. For now the
679 code above exactly handles the VEC_BASE pattern from vec.h. */
680 return false;
681 }
682
683 /* RHS is a source argument in a BIT_AND_EXPR which feeds a conditional
684 of the form SSA_NAME NE 0.
685
686 If RHS is fed by a simple EQ_EXPR comparison of two values, see if
687 the two input values of the EQ_EXPR match arg0 and arg1.
688
689 If so update *code and return TRUE. Otherwise return FALSE. */
690
691 static bool
692 rhs_is_fed_for_value_replacement (const_tree arg0, const_tree arg1,
693 enum tree_code *code, const_tree rhs)
694 {
695 /* Obviously if RHS is not an SSA_NAME, we can't look at the defining
696 statement. */
697 if (TREE_CODE (rhs) == SSA_NAME)
698 {
699 gimple *def1 = SSA_NAME_DEF_STMT (rhs);
700
701 /* Verify the defining statement has an EQ_EXPR on the RHS. */
702 if (is_gimple_assign (def1) && gimple_assign_rhs_code (def1) == EQ_EXPR)
703 {
704 /* Finally verify the source operands of the EQ_EXPR are equal
705 to arg0 and arg1. */
706 tree op0 = gimple_assign_rhs1 (def1);
707 tree op1 = gimple_assign_rhs2 (def1);
708 if ((operand_equal_for_phi_arg_p (arg0, op0)
709 && operand_equal_for_phi_arg_p (arg1, op1))
710 || (operand_equal_for_phi_arg_p (arg0, op1)
711 && operand_equal_for_phi_arg_p (arg1, op0)))
712 {
713 /* We will perform the optimization. */
714 *code = gimple_assign_rhs_code (def1);
715 return true;
716 }
717 }
718 }
719 return false;
720 }
721
722 /* Return TRUE if arg0/arg1 are equal to the rhs/lhs or lhs/rhs of COND.
723
724 Also return TRUE if arg0/arg1 are equal to the source arguments of a
725 an EQ comparison feeding a BIT_AND_EXPR which feeds COND.
726
727 Return FALSE otherwise. */
728
729 static bool
730 operand_equal_for_value_replacement (const_tree arg0, const_tree arg1,
731 enum tree_code *code, gimple *cond)
732 {
733 gimple *def;
734 tree lhs = gimple_cond_lhs (cond);
735 tree rhs = gimple_cond_rhs (cond);
736
737 if ((operand_equal_for_phi_arg_p (arg0, lhs)
738 && operand_equal_for_phi_arg_p (arg1, rhs))
739 || (operand_equal_for_phi_arg_p (arg1, lhs)
740 && operand_equal_for_phi_arg_p (arg0, rhs)))
741 return true;
742
743 /* Now handle more complex case where we have an EQ comparison
744 which feeds a BIT_AND_EXPR which feeds COND.
745
746 First verify that COND is of the form SSA_NAME NE 0. */
747 if (*code != NE_EXPR || !integer_zerop (rhs)
748 || TREE_CODE (lhs) != SSA_NAME)
749 return false;
750
751 /* Now ensure that SSA_NAME is set by a BIT_AND_EXPR. */
752 def = SSA_NAME_DEF_STMT (lhs);
753 if (!is_gimple_assign (def) || gimple_assign_rhs_code (def) != BIT_AND_EXPR)
754 return false;
755
756 /* Now verify arg0/arg1 correspond to the source arguments of an
757 EQ comparison feeding the BIT_AND_EXPR. */
758
759 tree tmp = gimple_assign_rhs1 (def);
760 if (rhs_is_fed_for_value_replacement (arg0, arg1, code, tmp))
761 return true;
762
763 tmp = gimple_assign_rhs2 (def);
764 if (rhs_is_fed_for_value_replacement (arg0, arg1, code, tmp))
765 return true;
766
767 return false;
768 }
769
770 /* Returns true if ARG is a neutral element for operation CODE
771 on the RIGHT side. */
772
773 static bool
774 neutral_element_p (tree_code code, tree arg, bool right)
775 {
776 switch (code)
777 {
778 case PLUS_EXPR:
779 case BIT_IOR_EXPR:
780 case BIT_XOR_EXPR:
781 return integer_zerop (arg);
782
783 case LROTATE_EXPR:
784 case RROTATE_EXPR:
785 case LSHIFT_EXPR:
786 case RSHIFT_EXPR:
787 case MINUS_EXPR:
788 case POINTER_PLUS_EXPR:
789 return right && integer_zerop (arg);
790
791 case MULT_EXPR:
792 return integer_onep (arg);
793
794 case TRUNC_DIV_EXPR:
795 case CEIL_DIV_EXPR:
796 case FLOOR_DIV_EXPR:
797 case ROUND_DIV_EXPR:
798 case EXACT_DIV_EXPR:
799 return right && integer_onep (arg);
800
801 case BIT_AND_EXPR:
802 return integer_all_onesp (arg);
803
804 default:
805 return false;
806 }
807 }
808
809 /* Returns true if ARG is an absorbing element for operation CODE. */
810
811 static bool
812 absorbing_element_p (tree_code code, tree arg)
813 {
814 switch (code)
815 {
816 case BIT_IOR_EXPR:
817 return integer_all_onesp (arg);
818
819 case MULT_EXPR:
820 case BIT_AND_EXPR:
821 return integer_zerop (arg);
822
823 default:
824 return false;
825 }
826 }
827
828 /* The function value_replacement does the main work of doing the value
829 replacement. Return non-zero if the replacement is done. Otherwise return
830 0. If we remove the middle basic block, return 2.
831 BB is the basic block where the replacement is going to be done on. ARG0
832 is argument 0 from the PHI. Likewise for ARG1. */
833
834 static int
835 value_replacement (basic_block cond_bb, basic_block middle_bb,
836 edge e0, edge e1, gimple *phi,
837 tree arg0, tree arg1)
838 {
839 gimple_stmt_iterator gsi;
840 gimple *cond;
841 edge true_edge, false_edge;
842 enum tree_code code;
843 bool emtpy_or_with_defined_p = true;
844
845 /* If the type says honor signed zeros we cannot do this
846 optimization. */
847 if (HONOR_SIGNED_ZEROS (arg1))
848 return 0;
849
850 /* If there is a statement in MIDDLE_BB that defines one of the PHI
851 arguments, then adjust arg0 or arg1. */
852 gsi = gsi_start_nondebug_after_labels_bb (middle_bb);
853 while (!gsi_end_p (gsi))
854 {
855 gimple *stmt = gsi_stmt (gsi);
856 tree lhs;
857 gsi_next_nondebug (&gsi);
858 if (!is_gimple_assign (stmt))
859 {
860 emtpy_or_with_defined_p = false;
861 continue;
862 }
863 /* Now try to adjust arg0 or arg1 according to the computation
864 in the statement. */
865 lhs = gimple_assign_lhs (stmt);
866 if (!(lhs == arg0
867 && jump_function_from_stmt (&arg0, stmt))
868 || (lhs == arg1
869 && jump_function_from_stmt (&arg1, stmt)))
870 emtpy_or_with_defined_p = false;
871 }
872
873 cond = last_stmt (cond_bb);
874 code = gimple_cond_code (cond);
875
876 /* This transformation is only valid for equality comparisons. */
877 if (code != NE_EXPR && code != EQ_EXPR)
878 return 0;
879
880 /* We need to know which is the true edge and which is the false
881 edge so that we know if have abs or negative abs. */
882 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
883
884 /* At this point we know we have a COND_EXPR with two successors.
885 One successor is BB, the other successor is an empty block which
886 falls through into BB.
887
888 The condition for the COND_EXPR is known to be NE_EXPR or EQ_EXPR.
889
890 There is a single PHI node at the join point (BB) with two arguments.
891
892 We now need to verify that the two arguments in the PHI node match
893 the two arguments to the equality comparison. */
894
895 if (operand_equal_for_value_replacement (arg0, arg1, &code, cond))
896 {
897 edge e;
898 tree arg;
899
900 /* For NE_EXPR, we want to build an assignment result = arg where
901 arg is the PHI argument associated with the true edge. For
902 EQ_EXPR we want the PHI argument associated with the false edge. */
903 e = (code == NE_EXPR ? true_edge : false_edge);
904
905 /* Unfortunately, E may not reach BB (it may instead have gone to
906 OTHER_BLOCK). If that is the case, then we want the single outgoing
907 edge from OTHER_BLOCK which reaches BB and represents the desired
908 path from COND_BLOCK. */
909 if (e->dest == middle_bb)
910 e = single_succ_edge (e->dest);
911
912 /* Now we know the incoming edge to BB that has the argument for the
913 RHS of our new assignment statement. */
914 if (e0 == e)
915 arg = arg0;
916 else
917 arg = arg1;
918
919 /* If the middle basic block was empty or is defining the
920 PHI arguments and this is a single phi where the args are different
921 for the edges e0 and e1 then we can remove the middle basic block. */
922 if (emtpy_or_with_defined_p
923 && single_non_singleton_phi_for_edges (phi_nodes (gimple_bb (phi)),
924 e0, e1) == phi)
925 {
926 replace_phi_edge_with_variable (cond_bb, e1, phi, arg);
927 /* Note that we optimized this PHI. */
928 return 2;
929 }
930 else
931 {
932 /* Replace the PHI arguments with arg. */
933 SET_PHI_ARG_DEF (phi, e0->dest_idx, arg);
934 SET_PHI_ARG_DEF (phi, e1->dest_idx, arg);
935 if (dump_file && (dump_flags & TDF_DETAILS))
936 {
937 fprintf (dump_file, "PHI ");
938 print_generic_expr (dump_file, gimple_phi_result (phi), 0);
939 fprintf (dump_file, " reduced for COND_EXPR in block %d to ",
940 cond_bb->index);
941 print_generic_expr (dump_file, arg, 0);
942 fprintf (dump_file, ".\n");
943 }
944 return 1;
945 }
946
947 }
948
949 /* Now optimize (x != 0) ? x + y : y to just y.
950 The following condition is too restrictive, there can easily be another
951 stmt in middle_bb, for instance a CONVERT_EXPR for the second argument. */
952 gimple *assign = last_and_only_stmt (middle_bb);
953 if (!assign || gimple_code (assign) != GIMPLE_ASSIGN
954 || gimple_assign_rhs_class (assign) != GIMPLE_BINARY_RHS
955 || (!INTEGRAL_TYPE_P (TREE_TYPE (arg0))
956 && !POINTER_TYPE_P (TREE_TYPE (arg0))))
957 return 0;
958
959 /* Punt if there are (degenerate) PHIs in middle_bb, there should not be. */
960 if (!gimple_seq_empty_p (phi_nodes (middle_bb)))
961 return 0;
962
963 /* Only transform if it removes the condition. */
964 if (!single_non_singleton_phi_for_edges (phi_nodes (gimple_bb (phi)), e0, e1))
965 return 0;
966
967 /* Size-wise, this is always profitable. */
968 if (optimize_bb_for_speed_p (cond_bb)
969 /* The special case is useless if it has a low probability. */
970 && profile_status_for_fn (cfun) != PROFILE_ABSENT
971 && EDGE_PRED (middle_bb, 0)->probability < PROB_EVEN
972 /* If assign is cheap, there is no point avoiding it. */
973 && estimate_num_insns (assign, &eni_time_weights)
974 >= 3 * estimate_num_insns (cond, &eni_time_weights))
975 return 0;
976
977 tree lhs = gimple_assign_lhs (assign);
978 tree rhs1 = gimple_assign_rhs1 (assign);
979 tree rhs2 = gimple_assign_rhs2 (assign);
980 enum tree_code code_def = gimple_assign_rhs_code (assign);
981 tree cond_lhs = gimple_cond_lhs (cond);
982 tree cond_rhs = gimple_cond_rhs (cond);
983
984 if (((code == NE_EXPR && e1 == false_edge)
985 || (code == EQ_EXPR && e1 == true_edge))
986 && arg0 == lhs
987 && ((arg1 == rhs1
988 && operand_equal_for_phi_arg_p (rhs2, cond_lhs)
989 && neutral_element_p (code_def, cond_rhs, true))
990 || (arg1 == rhs2
991 && operand_equal_for_phi_arg_p (rhs1, cond_lhs)
992 && neutral_element_p (code_def, cond_rhs, false))
993 || (operand_equal_for_phi_arg_p (arg1, cond_rhs)
994 && (operand_equal_for_phi_arg_p (rhs2, cond_lhs)
995 || operand_equal_for_phi_arg_p (rhs1, cond_lhs))
996 && absorbing_element_p (code_def, cond_rhs))))
997 {
998 gsi = gsi_for_stmt (cond);
999 if (INTEGRAL_TYPE_P (TREE_TYPE (lhs)))
1000 {
1001 /* Moving ASSIGN might change VR of lhs, e.g. when moving u_6
1002 def-stmt in:
1003 if (n_5 != 0)
1004 goto <bb 3>;
1005 else
1006 goto <bb 4>;
1007
1008 <bb 3>:
1009 # RANGE [0, 4294967294]
1010 u_6 = n_5 + 4294967295;
1011
1012 <bb 4>:
1013 # u_3 = PHI <u_6(3), 4294967295(2)> */
1014 SSA_NAME_RANGE_INFO (lhs) = NULL;
1015 /* If available, we can use VR of phi result at least. */
1016 tree phires = gimple_phi_result (phi);
1017 struct range_info_def *phires_range_info
1018 = SSA_NAME_RANGE_INFO (phires);
1019 if (phires_range_info)
1020 duplicate_ssa_name_range_info (lhs, SSA_NAME_RANGE_TYPE (phires),
1021 phires_range_info);
1022 }
1023 gimple_stmt_iterator gsi_from = gsi_for_stmt (assign);
1024 gsi_move_before (&gsi_from, &gsi);
1025 replace_phi_edge_with_variable (cond_bb, e1, phi, lhs);
1026 return 2;
1027 }
1028
1029 return 0;
1030 }
1031
1032 /* The function minmax_replacement does the main work of doing the minmax
1033 replacement. Return true if the replacement is done. Otherwise return
1034 false.
1035 BB is the basic block where the replacement is going to be done on. ARG0
1036 is argument 0 from the PHI. Likewise for ARG1. */
1037
1038 static bool
1039 minmax_replacement (basic_block cond_bb, basic_block middle_bb,
1040 edge e0, edge e1, gimple *phi,
1041 tree arg0, tree arg1)
1042 {
1043 tree result, type;
1044 gcond *cond;
1045 gassign *new_stmt;
1046 edge true_edge, false_edge;
1047 enum tree_code cmp, minmax, ass_code;
1048 tree smaller, larger, arg_true, arg_false;
1049 gimple_stmt_iterator gsi, gsi_from;
1050
1051 type = TREE_TYPE (PHI_RESULT (phi));
1052
1053 /* The optimization may be unsafe due to NaNs. */
1054 if (HONOR_NANS (type))
1055 return false;
1056
1057 cond = as_a <gcond *> (last_stmt (cond_bb));
1058 cmp = gimple_cond_code (cond);
1059
1060 /* This transformation is only valid for order comparisons. Record which
1061 operand is smaller/larger if the result of the comparison is true. */
1062 if (cmp == LT_EXPR || cmp == LE_EXPR)
1063 {
1064 smaller = gimple_cond_lhs (cond);
1065 larger = gimple_cond_rhs (cond);
1066 }
1067 else if (cmp == GT_EXPR || cmp == GE_EXPR)
1068 {
1069 smaller = gimple_cond_rhs (cond);
1070 larger = gimple_cond_lhs (cond);
1071 }
1072 else
1073 return false;
1074
1075 /* We need to know which is the true edge and which is the false
1076 edge so that we know if have abs or negative abs. */
1077 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
1078
1079 /* Forward the edges over the middle basic block. */
1080 if (true_edge->dest == middle_bb)
1081 true_edge = EDGE_SUCC (true_edge->dest, 0);
1082 if (false_edge->dest == middle_bb)
1083 false_edge = EDGE_SUCC (false_edge->dest, 0);
1084
1085 if (true_edge == e0)
1086 {
1087 gcc_assert (false_edge == e1);
1088 arg_true = arg0;
1089 arg_false = arg1;
1090 }
1091 else
1092 {
1093 gcc_assert (false_edge == e0);
1094 gcc_assert (true_edge == e1);
1095 arg_true = arg1;
1096 arg_false = arg0;
1097 }
1098
1099 if (empty_block_p (middle_bb))
1100 {
1101 if (operand_equal_for_phi_arg_p (arg_true, smaller)
1102 && operand_equal_for_phi_arg_p (arg_false, larger))
1103 {
1104 /* Case
1105
1106 if (smaller < larger)
1107 rslt = smaller;
1108 else
1109 rslt = larger; */
1110 minmax = MIN_EXPR;
1111 }
1112 else if (operand_equal_for_phi_arg_p (arg_false, smaller)
1113 && operand_equal_for_phi_arg_p (arg_true, larger))
1114 minmax = MAX_EXPR;
1115 else
1116 return false;
1117 }
1118 else
1119 {
1120 /* Recognize the following case, assuming d <= u:
1121
1122 if (a <= u)
1123 b = MAX (a, d);
1124 x = PHI <b, u>
1125
1126 This is equivalent to
1127
1128 b = MAX (a, d);
1129 x = MIN (b, u); */
1130
1131 gimple *assign = last_and_only_stmt (middle_bb);
1132 tree lhs, op0, op1, bound;
1133
1134 if (!assign
1135 || gimple_code (assign) != GIMPLE_ASSIGN)
1136 return false;
1137
1138 lhs = gimple_assign_lhs (assign);
1139 ass_code = gimple_assign_rhs_code (assign);
1140 if (ass_code != MAX_EXPR && ass_code != MIN_EXPR)
1141 return false;
1142 op0 = gimple_assign_rhs1 (assign);
1143 op1 = gimple_assign_rhs2 (assign);
1144
1145 if (true_edge->src == middle_bb)
1146 {
1147 /* We got here if the condition is true, i.e., SMALLER < LARGER. */
1148 if (!operand_equal_for_phi_arg_p (lhs, arg_true))
1149 return false;
1150
1151 if (operand_equal_for_phi_arg_p (arg_false, larger))
1152 {
1153 /* Case
1154
1155 if (smaller < larger)
1156 {
1157 r' = MAX_EXPR (smaller, bound)
1158 }
1159 r = PHI <r', larger> --> to be turned to MIN_EXPR. */
1160 if (ass_code != MAX_EXPR)
1161 return false;
1162
1163 minmax = MIN_EXPR;
1164 if (operand_equal_for_phi_arg_p (op0, smaller))
1165 bound = op1;
1166 else if (operand_equal_for_phi_arg_p (op1, smaller))
1167 bound = op0;
1168 else
1169 return false;
1170
1171 /* We need BOUND <= LARGER. */
1172 if (!integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node,
1173 bound, larger)))
1174 return false;
1175 }
1176 else if (operand_equal_for_phi_arg_p (arg_false, smaller))
1177 {
1178 /* Case
1179
1180 if (smaller < larger)
1181 {
1182 r' = MIN_EXPR (larger, bound)
1183 }
1184 r = PHI <r', smaller> --> to be turned to MAX_EXPR. */
1185 if (ass_code != MIN_EXPR)
1186 return false;
1187
1188 minmax = MAX_EXPR;
1189 if (operand_equal_for_phi_arg_p (op0, larger))
1190 bound = op1;
1191 else if (operand_equal_for_phi_arg_p (op1, larger))
1192 bound = op0;
1193 else
1194 return false;
1195
1196 /* We need BOUND >= SMALLER. */
1197 if (!integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node,
1198 bound, smaller)))
1199 return false;
1200 }
1201 else
1202 return false;
1203 }
1204 else
1205 {
1206 /* We got here if the condition is false, i.e., SMALLER > LARGER. */
1207 if (!operand_equal_for_phi_arg_p (lhs, arg_false))
1208 return false;
1209
1210 if (operand_equal_for_phi_arg_p (arg_true, larger))
1211 {
1212 /* Case
1213
1214 if (smaller > larger)
1215 {
1216 r' = MIN_EXPR (smaller, bound)
1217 }
1218 r = PHI <r', larger> --> to be turned to MAX_EXPR. */
1219 if (ass_code != MIN_EXPR)
1220 return false;
1221
1222 minmax = MAX_EXPR;
1223 if (operand_equal_for_phi_arg_p (op0, smaller))
1224 bound = op1;
1225 else if (operand_equal_for_phi_arg_p (op1, smaller))
1226 bound = op0;
1227 else
1228 return false;
1229
1230 /* We need BOUND >= LARGER. */
1231 if (!integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node,
1232 bound, larger)))
1233 return false;
1234 }
1235 else if (operand_equal_for_phi_arg_p (arg_true, smaller))
1236 {
1237 /* Case
1238
1239 if (smaller > larger)
1240 {
1241 r' = MAX_EXPR (larger, bound)
1242 }
1243 r = PHI <r', smaller> --> to be turned to MIN_EXPR. */
1244 if (ass_code != MAX_EXPR)
1245 return false;
1246
1247 minmax = MIN_EXPR;
1248 if (operand_equal_for_phi_arg_p (op0, larger))
1249 bound = op1;
1250 else if (operand_equal_for_phi_arg_p (op1, larger))
1251 bound = op0;
1252 else
1253 return false;
1254
1255 /* We need BOUND <= SMALLER. */
1256 if (!integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node,
1257 bound, smaller)))
1258 return false;
1259 }
1260 else
1261 return false;
1262 }
1263
1264 /* Move the statement from the middle block. */
1265 gsi = gsi_last_bb (cond_bb);
1266 gsi_from = gsi_last_nondebug_bb (middle_bb);
1267 gsi_move_before (&gsi_from, &gsi);
1268 }
1269
1270 /* Create an SSA var to hold the min/max result. If we're the only
1271 things setting the target PHI, then we can clone the PHI
1272 variable. Otherwise we must create a new one. */
1273 result = PHI_RESULT (phi);
1274 if (EDGE_COUNT (gimple_bb (phi)->preds) == 2)
1275 result = duplicate_ssa_name (result, NULL);
1276 else
1277 result = make_ssa_name (TREE_TYPE (result));
1278
1279 /* Emit the statement to compute min/max. */
1280 new_stmt = gimple_build_assign (result, minmax, arg0, arg1);
1281 gsi = gsi_last_bb (cond_bb);
1282 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
1283
1284 replace_phi_edge_with_variable (cond_bb, e1, phi, result);
1285 reset_flow_sensitive_info_in_bb (cond_bb);
1286
1287 return true;
1288 }
1289
1290 /* The function absolute_replacement does the main work of doing the absolute
1291 replacement. Return true if the replacement is done. Otherwise return
1292 false.
1293 bb is the basic block where the replacement is going to be done on. arg0
1294 is argument 0 from the phi. Likewise for arg1. */
1295
1296 static bool
1297 abs_replacement (basic_block cond_bb, basic_block middle_bb,
1298 edge e0 ATTRIBUTE_UNUSED, edge e1,
1299 gimple *phi, tree arg0, tree arg1)
1300 {
1301 tree result;
1302 gassign *new_stmt;
1303 gimple *cond;
1304 gimple_stmt_iterator gsi;
1305 edge true_edge, false_edge;
1306 gimple *assign;
1307 edge e;
1308 tree rhs, lhs;
1309 bool negate;
1310 enum tree_code cond_code;
1311
1312 /* If the type says honor signed zeros we cannot do this
1313 optimization. */
1314 if (HONOR_SIGNED_ZEROS (arg1))
1315 return false;
1316
1317 /* OTHER_BLOCK must have only one executable statement which must have the
1318 form arg0 = -arg1 or arg1 = -arg0. */
1319
1320 assign = last_and_only_stmt (middle_bb);
1321 /* If we did not find the proper negation assignment, then we can not
1322 optimize. */
1323 if (assign == NULL)
1324 return false;
1325
1326 /* If we got here, then we have found the only executable statement
1327 in OTHER_BLOCK. If it is anything other than arg = -arg1 or
1328 arg1 = -arg0, then we can not optimize. */
1329 if (gimple_code (assign) != GIMPLE_ASSIGN)
1330 return false;
1331
1332 lhs = gimple_assign_lhs (assign);
1333
1334 if (gimple_assign_rhs_code (assign) != NEGATE_EXPR)
1335 return false;
1336
1337 rhs = gimple_assign_rhs1 (assign);
1338
1339 /* The assignment has to be arg0 = -arg1 or arg1 = -arg0. */
1340 if (!(lhs == arg0 && rhs == arg1)
1341 && !(lhs == arg1 && rhs == arg0))
1342 return false;
1343
1344 cond = last_stmt (cond_bb);
1345 result = PHI_RESULT (phi);
1346
1347 /* Only relationals comparing arg[01] against zero are interesting. */
1348 cond_code = gimple_cond_code (cond);
1349 if (cond_code != GT_EXPR && cond_code != GE_EXPR
1350 && cond_code != LT_EXPR && cond_code != LE_EXPR)
1351 return false;
1352
1353 /* Make sure the conditional is arg[01] OP y. */
1354 if (gimple_cond_lhs (cond) != rhs)
1355 return false;
1356
1357 if (FLOAT_TYPE_P (TREE_TYPE (gimple_cond_rhs (cond)))
1358 ? real_zerop (gimple_cond_rhs (cond))
1359 : integer_zerop (gimple_cond_rhs (cond)))
1360 ;
1361 else
1362 return false;
1363
1364 /* We need to know which is the true edge and which is the false
1365 edge so that we know if have abs or negative abs. */
1366 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
1367
1368 /* For GT_EXPR/GE_EXPR, if the true edge goes to OTHER_BLOCK, then we
1369 will need to negate the result. Similarly for LT_EXPR/LE_EXPR if
1370 the false edge goes to OTHER_BLOCK. */
1371 if (cond_code == GT_EXPR || cond_code == GE_EXPR)
1372 e = true_edge;
1373 else
1374 e = false_edge;
1375
1376 if (e->dest == middle_bb)
1377 negate = true;
1378 else
1379 negate = false;
1380
1381 result = duplicate_ssa_name (result, NULL);
1382
1383 if (negate)
1384 lhs = make_ssa_name (TREE_TYPE (result));
1385 else
1386 lhs = result;
1387
1388 /* Build the modify expression with abs expression. */
1389 new_stmt = gimple_build_assign (lhs, ABS_EXPR, rhs);
1390
1391 gsi = gsi_last_bb (cond_bb);
1392 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
1393
1394 if (negate)
1395 {
1396 /* Get the right GSI. We want to insert after the recently
1397 added ABS_EXPR statement (which we know is the first statement
1398 in the block. */
1399 new_stmt = gimple_build_assign (result, NEGATE_EXPR, lhs);
1400
1401 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1402 }
1403
1404 replace_phi_edge_with_variable (cond_bb, e1, phi, result);
1405 reset_flow_sensitive_info_in_bb (cond_bb);
1406
1407 /* Note that we optimized this PHI. */
1408 return true;
1409 }
1410
1411 /* Auxiliary functions to determine the set of memory accesses which
1412 can't trap because they are preceded by accesses to the same memory
1413 portion. We do that for MEM_REFs, so we only need to track
1414 the SSA_NAME of the pointer indirectly referenced. The algorithm
1415 simply is a walk over all instructions in dominator order. When
1416 we see an MEM_REF we determine if we've already seen a same
1417 ref anywhere up to the root of the dominator tree. If we do the
1418 current access can't trap. If we don't see any dominating access
1419 the current access might trap, but might also make later accesses
1420 non-trapping, so we remember it. We need to be careful with loads
1421 or stores, for instance a load might not trap, while a store would,
1422 so if we see a dominating read access this doesn't mean that a later
1423 write access would not trap. Hence we also need to differentiate the
1424 type of access(es) seen.
1425
1426 ??? We currently are very conservative and assume that a load might
1427 trap even if a store doesn't (write-only memory). This probably is
1428 overly conservative. */
1429
1430 /* A hash-table of SSA_NAMEs, and in which basic block an MEM_REF
1431 through it was seen, which would constitute a no-trap region for
1432 same accesses. */
1433 struct name_to_bb
1434 {
1435 unsigned int ssa_name_ver;
1436 unsigned int phase;
1437 bool store;
1438 HOST_WIDE_INT offset, size;
1439 basic_block bb;
1440 };
1441
1442 /* Hashtable helpers. */
1443
1444 struct ssa_names_hasher : free_ptr_hash <name_to_bb>
1445 {
1446 static inline hashval_t hash (const name_to_bb *);
1447 static inline bool equal (const name_to_bb *, const name_to_bb *);
1448 };
1449
1450 /* Used for quick clearing of the hash-table when we see calls.
1451 Hash entries with phase < nt_call_phase are invalid. */
1452 static unsigned int nt_call_phase;
1453
1454 /* The hash function. */
1455
1456 inline hashval_t
1457 ssa_names_hasher::hash (const name_to_bb *n)
1458 {
1459 return n->ssa_name_ver ^ (((hashval_t) n->store) << 31)
1460 ^ (n->offset << 6) ^ (n->size << 3);
1461 }
1462
1463 /* The equality function of *P1 and *P2. */
1464
1465 inline bool
1466 ssa_names_hasher::equal (const name_to_bb *n1, const name_to_bb *n2)
1467 {
1468 return n1->ssa_name_ver == n2->ssa_name_ver
1469 && n1->store == n2->store
1470 && n1->offset == n2->offset
1471 && n1->size == n2->size;
1472 }
1473
1474 class nontrapping_dom_walker : public dom_walker
1475 {
1476 public:
1477 nontrapping_dom_walker (cdi_direction direction, hash_set<tree> *ps)
1478 : dom_walker (direction), m_nontrapping (ps), m_seen_ssa_names (128) {}
1479
1480 virtual void before_dom_children (basic_block);
1481 virtual void after_dom_children (basic_block);
1482
1483 private:
1484
1485 /* We see the expression EXP in basic block BB. If it's an interesting
1486 expression (an MEM_REF through an SSA_NAME) possibly insert the
1487 expression into the set NONTRAP or the hash table of seen expressions.
1488 STORE is true if this expression is on the LHS, otherwise it's on
1489 the RHS. */
1490 void add_or_mark_expr (basic_block, tree, bool);
1491
1492 hash_set<tree> *m_nontrapping;
1493
1494 /* The hash table for remembering what we've seen. */
1495 hash_table<ssa_names_hasher> m_seen_ssa_names;
1496 };
1497
1498 /* Called by walk_dominator_tree, when entering the block BB. */
1499 void
1500 nontrapping_dom_walker::before_dom_children (basic_block bb)
1501 {
1502 edge e;
1503 edge_iterator ei;
1504 gimple_stmt_iterator gsi;
1505
1506 /* If we haven't seen all our predecessors, clear the hash-table. */
1507 FOR_EACH_EDGE (e, ei, bb->preds)
1508 if ((((size_t)e->src->aux) & 2) == 0)
1509 {
1510 nt_call_phase++;
1511 break;
1512 }
1513
1514 /* Mark this BB as being on the path to dominator root and as visited. */
1515 bb->aux = (void*)(1 | 2);
1516
1517 /* And walk the statements in order. */
1518 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1519 {
1520 gimple *stmt = gsi_stmt (gsi);
1521
1522 if ((gimple_code (stmt) == GIMPLE_ASM && gimple_vdef (stmt))
1523 || (is_gimple_call (stmt)
1524 && (!nonfreeing_call_p (stmt) || !nonbarrier_call_p (stmt))))
1525 nt_call_phase++;
1526 else if (gimple_assign_single_p (stmt) && !gimple_has_volatile_ops (stmt))
1527 {
1528 add_or_mark_expr (bb, gimple_assign_lhs (stmt), true);
1529 add_or_mark_expr (bb, gimple_assign_rhs1 (stmt), false);
1530 }
1531 }
1532 }
1533
1534 /* Called by walk_dominator_tree, when basic block BB is exited. */
1535 void
1536 nontrapping_dom_walker::after_dom_children (basic_block bb)
1537 {
1538 /* This BB isn't on the path to dominator root anymore. */
1539 bb->aux = (void*)2;
1540 }
1541
1542 /* We see the expression EXP in basic block BB. If it's an interesting
1543 expression (an MEM_REF through an SSA_NAME) possibly insert the
1544 expression into the set NONTRAP or the hash table of seen expressions.
1545 STORE is true if this expression is on the LHS, otherwise it's on
1546 the RHS. */
1547 void
1548 nontrapping_dom_walker::add_or_mark_expr (basic_block bb, tree exp, bool store)
1549 {
1550 HOST_WIDE_INT size;
1551
1552 if (TREE_CODE (exp) == MEM_REF
1553 && TREE_CODE (TREE_OPERAND (exp, 0)) == SSA_NAME
1554 && tree_fits_shwi_p (TREE_OPERAND (exp, 1))
1555 && (size = int_size_in_bytes (TREE_TYPE (exp))) > 0)
1556 {
1557 tree name = TREE_OPERAND (exp, 0);
1558 struct name_to_bb map;
1559 name_to_bb **slot;
1560 struct name_to_bb *n2bb;
1561 basic_block found_bb = 0;
1562
1563 /* Try to find the last seen MEM_REF through the same
1564 SSA_NAME, which can trap. */
1565 map.ssa_name_ver = SSA_NAME_VERSION (name);
1566 map.phase = 0;
1567 map.bb = 0;
1568 map.store = store;
1569 map.offset = tree_to_shwi (TREE_OPERAND (exp, 1));
1570 map.size = size;
1571
1572 slot = m_seen_ssa_names.find_slot (&map, INSERT);
1573 n2bb = *slot;
1574 if (n2bb && n2bb->phase >= nt_call_phase)
1575 found_bb = n2bb->bb;
1576
1577 /* If we've found a trapping MEM_REF, _and_ it dominates EXP
1578 (it's in a basic block on the path from us to the dominator root)
1579 then we can't trap. */
1580 if (found_bb && (((size_t)found_bb->aux) & 1) == 1)
1581 {
1582 m_nontrapping->add (exp);
1583 }
1584 else
1585 {
1586 /* EXP might trap, so insert it into the hash table. */
1587 if (n2bb)
1588 {
1589 n2bb->phase = nt_call_phase;
1590 n2bb->bb = bb;
1591 }
1592 else
1593 {
1594 n2bb = XNEW (struct name_to_bb);
1595 n2bb->ssa_name_ver = SSA_NAME_VERSION (name);
1596 n2bb->phase = nt_call_phase;
1597 n2bb->bb = bb;
1598 n2bb->store = store;
1599 n2bb->offset = map.offset;
1600 n2bb->size = size;
1601 *slot = n2bb;
1602 }
1603 }
1604 }
1605 }
1606
1607 /* This is the entry point of gathering non trapping memory accesses.
1608 It will do a dominator walk over the whole function, and it will
1609 make use of the bb->aux pointers. It returns a set of trees
1610 (the MEM_REFs itself) which can't trap. */
1611 static hash_set<tree> *
1612 get_non_trapping (void)
1613 {
1614 nt_call_phase = 0;
1615 hash_set<tree> *nontrap = new hash_set<tree>;
1616 /* We're going to do a dominator walk, so ensure that we have
1617 dominance information. */
1618 calculate_dominance_info (CDI_DOMINATORS);
1619
1620 nontrapping_dom_walker (CDI_DOMINATORS, nontrap)
1621 .walk (cfun->cfg->x_entry_block_ptr);
1622
1623 clear_aux_for_blocks ();
1624 return nontrap;
1625 }
1626
1627 /* Do the main work of conditional store replacement. We already know
1628 that the recognized pattern looks like so:
1629
1630 split:
1631 if (cond) goto MIDDLE_BB; else goto JOIN_BB (edge E1)
1632 MIDDLE_BB:
1633 something
1634 fallthrough (edge E0)
1635 JOIN_BB:
1636 some more
1637
1638 We check that MIDDLE_BB contains only one store, that that store
1639 doesn't trap (not via NOTRAP, but via checking if an access to the same
1640 memory location dominates us) and that the store has a "simple" RHS. */
1641
1642 static bool
1643 cond_store_replacement (basic_block middle_bb, basic_block join_bb,
1644 edge e0, edge e1, hash_set<tree> *nontrap)
1645 {
1646 gimple *assign = last_and_only_stmt (middle_bb);
1647 tree lhs, rhs, name, name2;
1648 gphi *newphi;
1649 gassign *new_stmt;
1650 gimple_stmt_iterator gsi;
1651 source_location locus;
1652
1653 /* Check if middle_bb contains of only one store. */
1654 if (!assign
1655 || !gimple_assign_single_p (assign)
1656 || gimple_has_volatile_ops (assign))
1657 return false;
1658
1659 locus = gimple_location (assign);
1660 lhs = gimple_assign_lhs (assign);
1661 rhs = gimple_assign_rhs1 (assign);
1662 if (TREE_CODE (lhs) != MEM_REF
1663 || TREE_CODE (TREE_OPERAND (lhs, 0)) != SSA_NAME
1664 || !is_gimple_reg_type (TREE_TYPE (lhs)))
1665 return false;
1666
1667 /* Prove that we can move the store down. We could also check
1668 TREE_THIS_NOTRAP here, but in that case we also could move stores,
1669 whose value is not available readily, which we want to avoid. */
1670 if (!nontrap->contains (lhs))
1671 return false;
1672
1673 /* Now we've checked the constraints, so do the transformation:
1674 1) Remove the single store. */
1675 gsi = gsi_for_stmt (assign);
1676 unlink_stmt_vdef (assign);
1677 gsi_remove (&gsi, true);
1678 release_defs (assign);
1679
1680 /* 2) Insert a load from the memory of the store to the temporary
1681 on the edge which did not contain the store. */
1682 lhs = unshare_expr (lhs);
1683 name = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
1684 new_stmt = gimple_build_assign (name, lhs);
1685 gimple_set_location (new_stmt, locus);
1686 gsi_insert_on_edge (e1, new_stmt);
1687
1688 /* 3) Create a PHI node at the join block, with one argument
1689 holding the old RHS, and the other holding the temporary
1690 where we stored the old memory contents. */
1691 name2 = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
1692 newphi = create_phi_node (name2, join_bb);
1693 add_phi_arg (newphi, rhs, e0, locus);
1694 add_phi_arg (newphi, name, e1, locus);
1695
1696 lhs = unshare_expr (lhs);
1697 new_stmt = gimple_build_assign (lhs, PHI_RESULT (newphi));
1698
1699 /* 4) Insert that PHI node. */
1700 gsi = gsi_after_labels (join_bb);
1701 if (gsi_end_p (gsi))
1702 {
1703 gsi = gsi_last_bb (join_bb);
1704 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1705 }
1706 else
1707 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
1708
1709 return true;
1710 }
1711
1712 /* Do the main work of conditional store replacement. */
1713
1714 static bool
1715 cond_if_else_store_replacement_1 (basic_block then_bb, basic_block else_bb,
1716 basic_block join_bb, gimple *then_assign,
1717 gimple *else_assign)
1718 {
1719 tree lhs_base, lhs, then_rhs, else_rhs, name;
1720 source_location then_locus, else_locus;
1721 gimple_stmt_iterator gsi;
1722 gphi *newphi;
1723 gassign *new_stmt;
1724
1725 if (then_assign == NULL
1726 || !gimple_assign_single_p (then_assign)
1727 || gimple_clobber_p (then_assign)
1728 || gimple_has_volatile_ops (then_assign)
1729 || else_assign == NULL
1730 || !gimple_assign_single_p (else_assign)
1731 || gimple_clobber_p (else_assign)
1732 || gimple_has_volatile_ops (else_assign))
1733 return false;
1734
1735 lhs = gimple_assign_lhs (then_assign);
1736 if (!is_gimple_reg_type (TREE_TYPE (lhs))
1737 || !operand_equal_p (lhs, gimple_assign_lhs (else_assign), 0))
1738 return false;
1739
1740 lhs_base = get_base_address (lhs);
1741 if (lhs_base == NULL_TREE
1742 || (!DECL_P (lhs_base) && TREE_CODE (lhs_base) != MEM_REF))
1743 return false;
1744
1745 then_rhs = gimple_assign_rhs1 (then_assign);
1746 else_rhs = gimple_assign_rhs1 (else_assign);
1747 then_locus = gimple_location (then_assign);
1748 else_locus = gimple_location (else_assign);
1749
1750 /* Now we've checked the constraints, so do the transformation:
1751 1) Remove the stores. */
1752 gsi = gsi_for_stmt (then_assign);
1753 unlink_stmt_vdef (then_assign);
1754 gsi_remove (&gsi, true);
1755 release_defs (then_assign);
1756
1757 gsi = gsi_for_stmt (else_assign);
1758 unlink_stmt_vdef (else_assign);
1759 gsi_remove (&gsi, true);
1760 release_defs (else_assign);
1761
1762 /* 2) Create a PHI node at the join block, with one argument
1763 holding the old RHS, and the other holding the temporary
1764 where we stored the old memory contents. */
1765 name = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
1766 newphi = create_phi_node (name, join_bb);
1767 add_phi_arg (newphi, then_rhs, EDGE_SUCC (then_bb, 0), then_locus);
1768 add_phi_arg (newphi, else_rhs, EDGE_SUCC (else_bb, 0), else_locus);
1769
1770 new_stmt = gimple_build_assign (lhs, PHI_RESULT (newphi));
1771
1772 /* 3) Insert that PHI node. */
1773 gsi = gsi_after_labels (join_bb);
1774 if (gsi_end_p (gsi))
1775 {
1776 gsi = gsi_last_bb (join_bb);
1777 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1778 }
1779 else
1780 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
1781
1782 return true;
1783 }
1784
1785 /* Conditional store replacement. We already know
1786 that the recognized pattern looks like so:
1787
1788 split:
1789 if (cond) goto THEN_BB; else goto ELSE_BB (edge E1)
1790 THEN_BB:
1791 ...
1792 X = Y;
1793 ...
1794 goto JOIN_BB;
1795 ELSE_BB:
1796 ...
1797 X = Z;
1798 ...
1799 fallthrough (edge E0)
1800 JOIN_BB:
1801 some more
1802
1803 We check that it is safe to sink the store to JOIN_BB by verifying that
1804 there are no read-after-write or write-after-write dependencies in
1805 THEN_BB and ELSE_BB. */
1806
1807 static bool
1808 cond_if_else_store_replacement (basic_block then_bb, basic_block else_bb,
1809 basic_block join_bb)
1810 {
1811 gimple *then_assign = last_and_only_stmt (then_bb);
1812 gimple *else_assign = last_and_only_stmt (else_bb);
1813 vec<data_reference_p> then_datarefs, else_datarefs;
1814 vec<ddr_p> then_ddrs, else_ddrs;
1815 gimple *then_store, *else_store;
1816 bool found, ok = false, res;
1817 struct data_dependence_relation *ddr;
1818 data_reference_p then_dr, else_dr;
1819 int i, j;
1820 tree then_lhs, else_lhs;
1821 basic_block blocks[3];
1822
1823 if (MAX_STORES_TO_SINK == 0)
1824 return false;
1825
1826 /* Handle the case with single statement in THEN_BB and ELSE_BB. */
1827 if (then_assign && else_assign)
1828 return cond_if_else_store_replacement_1 (then_bb, else_bb, join_bb,
1829 then_assign, else_assign);
1830
1831 /* Find data references. */
1832 then_datarefs.create (1);
1833 else_datarefs.create (1);
1834 if ((find_data_references_in_bb (NULL, then_bb, &then_datarefs)
1835 == chrec_dont_know)
1836 || !then_datarefs.length ()
1837 || (find_data_references_in_bb (NULL, else_bb, &else_datarefs)
1838 == chrec_dont_know)
1839 || !else_datarefs.length ())
1840 {
1841 free_data_refs (then_datarefs);
1842 free_data_refs (else_datarefs);
1843 return false;
1844 }
1845
1846 /* Find pairs of stores with equal LHS. */
1847 auto_vec<gimple *, 1> then_stores, else_stores;
1848 FOR_EACH_VEC_ELT (then_datarefs, i, then_dr)
1849 {
1850 if (DR_IS_READ (then_dr))
1851 continue;
1852
1853 then_store = DR_STMT (then_dr);
1854 then_lhs = gimple_get_lhs (then_store);
1855 if (then_lhs == NULL_TREE)
1856 continue;
1857 found = false;
1858
1859 FOR_EACH_VEC_ELT (else_datarefs, j, else_dr)
1860 {
1861 if (DR_IS_READ (else_dr))
1862 continue;
1863
1864 else_store = DR_STMT (else_dr);
1865 else_lhs = gimple_get_lhs (else_store);
1866 if (else_lhs == NULL_TREE)
1867 continue;
1868
1869 if (operand_equal_p (then_lhs, else_lhs, 0))
1870 {
1871 found = true;
1872 break;
1873 }
1874 }
1875
1876 if (!found)
1877 continue;
1878
1879 then_stores.safe_push (then_store);
1880 else_stores.safe_push (else_store);
1881 }
1882
1883 /* No pairs of stores found. */
1884 if (!then_stores.length ()
1885 || then_stores.length () > (unsigned) MAX_STORES_TO_SINK)
1886 {
1887 free_data_refs (then_datarefs);
1888 free_data_refs (else_datarefs);
1889 return false;
1890 }
1891
1892 /* Compute and check data dependencies in both basic blocks. */
1893 then_ddrs.create (1);
1894 else_ddrs.create (1);
1895 if (!compute_all_dependences (then_datarefs, &then_ddrs,
1896 vNULL, false)
1897 || !compute_all_dependences (else_datarefs, &else_ddrs,
1898 vNULL, false))
1899 {
1900 free_dependence_relations (then_ddrs);
1901 free_dependence_relations (else_ddrs);
1902 free_data_refs (then_datarefs);
1903 free_data_refs (else_datarefs);
1904 return false;
1905 }
1906 blocks[0] = then_bb;
1907 blocks[1] = else_bb;
1908 blocks[2] = join_bb;
1909 renumber_gimple_stmt_uids_in_blocks (blocks, 3);
1910
1911 /* Check that there are no read-after-write or write-after-write dependencies
1912 in THEN_BB. */
1913 FOR_EACH_VEC_ELT (then_ddrs, i, ddr)
1914 {
1915 struct data_reference *dra = DDR_A (ddr);
1916 struct data_reference *drb = DDR_B (ddr);
1917
1918 if (DDR_ARE_DEPENDENT (ddr) != chrec_known
1919 && ((DR_IS_READ (dra) && DR_IS_WRITE (drb)
1920 && gimple_uid (DR_STMT (dra)) > gimple_uid (DR_STMT (drb)))
1921 || (DR_IS_READ (drb) && DR_IS_WRITE (dra)
1922 && gimple_uid (DR_STMT (drb)) > gimple_uid (DR_STMT (dra)))
1923 || (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))))
1924 {
1925 free_dependence_relations (then_ddrs);
1926 free_dependence_relations (else_ddrs);
1927 free_data_refs (then_datarefs);
1928 free_data_refs (else_datarefs);
1929 return false;
1930 }
1931 }
1932
1933 /* Check that there are no read-after-write or write-after-write dependencies
1934 in ELSE_BB. */
1935 FOR_EACH_VEC_ELT (else_ddrs, i, ddr)
1936 {
1937 struct data_reference *dra = DDR_A (ddr);
1938 struct data_reference *drb = DDR_B (ddr);
1939
1940 if (DDR_ARE_DEPENDENT (ddr) != chrec_known
1941 && ((DR_IS_READ (dra) && DR_IS_WRITE (drb)
1942 && gimple_uid (DR_STMT (dra)) > gimple_uid (DR_STMT (drb)))
1943 || (DR_IS_READ (drb) && DR_IS_WRITE (dra)
1944 && gimple_uid (DR_STMT (drb)) > gimple_uid (DR_STMT (dra)))
1945 || (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))))
1946 {
1947 free_dependence_relations (then_ddrs);
1948 free_dependence_relations (else_ddrs);
1949 free_data_refs (then_datarefs);
1950 free_data_refs (else_datarefs);
1951 return false;
1952 }
1953 }
1954
1955 /* Sink stores with same LHS. */
1956 FOR_EACH_VEC_ELT (then_stores, i, then_store)
1957 {
1958 else_store = else_stores[i];
1959 res = cond_if_else_store_replacement_1 (then_bb, else_bb, join_bb,
1960 then_store, else_store);
1961 ok = ok || res;
1962 }
1963
1964 free_dependence_relations (then_ddrs);
1965 free_dependence_relations (else_ddrs);
1966 free_data_refs (then_datarefs);
1967 free_data_refs (else_datarefs);
1968
1969 return ok;
1970 }
1971
1972 /* Return TRUE if STMT has a VUSE whose corresponding VDEF is in BB. */
1973
1974 static bool
1975 local_mem_dependence (gimple *stmt, basic_block bb)
1976 {
1977 tree vuse = gimple_vuse (stmt);
1978 gimple *def;
1979
1980 if (!vuse)
1981 return false;
1982
1983 def = SSA_NAME_DEF_STMT (vuse);
1984 return (def && gimple_bb (def) == bb);
1985 }
1986
1987 /* Given a "diamond" control-flow pattern where BB0 tests a condition,
1988 BB1 and BB2 are "then" and "else" blocks dependent on this test,
1989 and BB3 rejoins control flow following BB1 and BB2, look for
1990 opportunities to hoist loads as follows. If BB3 contains a PHI of
1991 two loads, one each occurring in BB1 and BB2, and the loads are
1992 provably of adjacent fields in the same structure, then move both
1993 loads into BB0. Of course this can only be done if there are no
1994 dependencies preventing such motion.
1995
1996 One of the hoisted loads will always be speculative, so the
1997 transformation is currently conservative:
1998
1999 - The fields must be strictly adjacent.
2000 - The two fields must occupy a single memory block that is
2001 guaranteed to not cross a page boundary.
2002
2003 The last is difficult to prove, as such memory blocks should be
2004 aligned on the minimum of the stack alignment boundary and the
2005 alignment guaranteed by heap allocation interfaces. Thus we rely
2006 on a parameter for the alignment value.
2007
2008 Provided a good value is used for the last case, the first
2009 restriction could possibly be relaxed. */
2010
2011 static void
2012 hoist_adjacent_loads (basic_block bb0, basic_block bb1,
2013 basic_block bb2, basic_block bb3)
2014 {
2015 int param_align = PARAM_VALUE (PARAM_L1_CACHE_LINE_SIZE);
2016 unsigned param_align_bits = (unsigned) (param_align * BITS_PER_UNIT);
2017 gphi_iterator gsi;
2018
2019 /* Walk the phis in bb3 looking for an opportunity. We are looking
2020 for phis of two SSA names, one each of which is defined in bb1 and
2021 bb2. */
2022 for (gsi = gsi_start_phis (bb3); !gsi_end_p (gsi); gsi_next (&gsi))
2023 {
2024 gphi *phi_stmt = gsi.phi ();
2025 gimple *def1, *def2;
2026 tree arg1, arg2, ref1, ref2, field1, field2;
2027 tree tree_offset1, tree_offset2, tree_size2, next;
2028 int offset1, offset2, size2;
2029 unsigned align1;
2030 gimple_stmt_iterator gsi2;
2031 basic_block bb_for_def1, bb_for_def2;
2032
2033 if (gimple_phi_num_args (phi_stmt) != 2
2034 || virtual_operand_p (gimple_phi_result (phi_stmt)))
2035 continue;
2036
2037 arg1 = gimple_phi_arg_def (phi_stmt, 0);
2038 arg2 = gimple_phi_arg_def (phi_stmt, 1);
2039
2040 if (TREE_CODE (arg1) != SSA_NAME
2041 || TREE_CODE (arg2) != SSA_NAME
2042 || SSA_NAME_IS_DEFAULT_DEF (arg1)
2043 || SSA_NAME_IS_DEFAULT_DEF (arg2))
2044 continue;
2045
2046 def1 = SSA_NAME_DEF_STMT (arg1);
2047 def2 = SSA_NAME_DEF_STMT (arg2);
2048
2049 if ((gimple_bb (def1) != bb1 || gimple_bb (def2) != bb2)
2050 && (gimple_bb (def2) != bb1 || gimple_bb (def1) != bb2))
2051 continue;
2052
2053 /* Check the mode of the arguments to be sure a conditional move
2054 can be generated for it. */
2055 if (optab_handler (movcc_optab, TYPE_MODE (TREE_TYPE (arg1)))
2056 == CODE_FOR_nothing)
2057 continue;
2058
2059 /* Both statements must be assignments whose RHS is a COMPONENT_REF. */
2060 if (!gimple_assign_single_p (def1)
2061 || !gimple_assign_single_p (def2)
2062 || gimple_has_volatile_ops (def1)
2063 || gimple_has_volatile_ops (def2))
2064 continue;
2065
2066 ref1 = gimple_assign_rhs1 (def1);
2067 ref2 = gimple_assign_rhs1 (def2);
2068
2069 if (TREE_CODE (ref1) != COMPONENT_REF
2070 || TREE_CODE (ref2) != COMPONENT_REF)
2071 continue;
2072
2073 /* The zeroth operand of the two component references must be
2074 identical. It is not sufficient to compare get_base_address of
2075 the two references, because this could allow for different
2076 elements of the same array in the two trees. It is not safe to
2077 assume that the existence of one array element implies the
2078 existence of a different one. */
2079 if (!operand_equal_p (TREE_OPERAND (ref1, 0), TREE_OPERAND (ref2, 0), 0))
2080 continue;
2081
2082 field1 = TREE_OPERAND (ref1, 1);
2083 field2 = TREE_OPERAND (ref2, 1);
2084
2085 /* Check for field adjacency, and ensure field1 comes first. */
2086 for (next = DECL_CHAIN (field1);
2087 next && TREE_CODE (next) != FIELD_DECL;
2088 next = DECL_CHAIN (next))
2089 ;
2090
2091 if (next != field2)
2092 {
2093 for (next = DECL_CHAIN (field2);
2094 next && TREE_CODE (next) != FIELD_DECL;
2095 next = DECL_CHAIN (next))
2096 ;
2097
2098 if (next != field1)
2099 continue;
2100
2101 std::swap (field1, field2);
2102 std::swap (def1, def2);
2103 }
2104
2105 bb_for_def1 = gimple_bb (def1);
2106 bb_for_def2 = gimple_bb (def2);
2107
2108 /* Check for proper alignment of the first field. */
2109 tree_offset1 = bit_position (field1);
2110 tree_offset2 = bit_position (field2);
2111 tree_size2 = DECL_SIZE (field2);
2112
2113 if (!tree_fits_uhwi_p (tree_offset1)
2114 || !tree_fits_uhwi_p (tree_offset2)
2115 || !tree_fits_uhwi_p (tree_size2))
2116 continue;
2117
2118 offset1 = tree_to_uhwi (tree_offset1);
2119 offset2 = tree_to_uhwi (tree_offset2);
2120 size2 = tree_to_uhwi (tree_size2);
2121 align1 = DECL_ALIGN (field1) % param_align_bits;
2122
2123 if (offset1 % BITS_PER_UNIT != 0)
2124 continue;
2125
2126 /* For profitability, the two field references should fit within
2127 a single cache line. */
2128 if (align1 + offset2 - offset1 + size2 > param_align_bits)
2129 continue;
2130
2131 /* The two expressions cannot be dependent upon vdefs defined
2132 in bb1/bb2. */
2133 if (local_mem_dependence (def1, bb_for_def1)
2134 || local_mem_dependence (def2, bb_for_def2))
2135 continue;
2136
2137 /* The conditions are satisfied; hoist the loads from bb1 and bb2 into
2138 bb0. We hoist the first one first so that a cache miss is handled
2139 efficiently regardless of hardware cache-fill policy. */
2140 gsi2 = gsi_for_stmt (def1);
2141 gsi_move_to_bb_end (&gsi2, bb0);
2142 gsi2 = gsi_for_stmt (def2);
2143 gsi_move_to_bb_end (&gsi2, bb0);
2144
2145 if (dump_file && (dump_flags & TDF_DETAILS))
2146 {
2147 fprintf (dump_file,
2148 "\nHoisting adjacent loads from %d and %d into %d: \n",
2149 bb_for_def1->index, bb_for_def2->index, bb0->index);
2150 print_gimple_stmt (dump_file, def1, 0, TDF_VOPS|TDF_MEMSYMS);
2151 print_gimple_stmt (dump_file, def2, 0, TDF_VOPS|TDF_MEMSYMS);
2152 }
2153 }
2154 }
2155
2156 /* Determine whether we should attempt to hoist adjacent loads out of
2157 diamond patterns in pass_phiopt. Always hoist loads if
2158 -fhoist-adjacent-loads is specified and the target machine has
2159 both a conditional move instruction and a defined cache line size. */
2160
2161 static bool
2162 gate_hoist_loads (void)
2163 {
2164 return (flag_hoist_adjacent_loads == 1
2165 && PARAM_VALUE (PARAM_L1_CACHE_LINE_SIZE)
2166 && HAVE_conditional_move);
2167 }
2168
2169 /* This pass tries to replaces an if-then-else block with an
2170 assignment. We have four kinds of transformations. Some of these
2171 transformations are also performed by the ifcvt RTL optimizer.
2172
2173 Conditional Replacement
2174 -----------------------
2175
2176 This transformation, implemented in conditional_replacement,
2177 replaces
2178
2179 bb0:
2180 if (cond) goto bb2; else goto bb1;
2181 bb1:
2182 bb2:
2183 x = PHI <0 (bb1), 1 (bb0), ...>;
2184
2185 with
2186
2187 bb0:
2188 x' = cond;
2189 goto bb2;
2190 bb2:
2191 x = PHI <x' (bb0), ...>;
2192
2193 We remove bb1 as it becomes unreachable. This occurs often due to
2194 gimplification of conditionals.
2195
2196 Value Replacement
2197 -----------------
2198
2199 This transformation, implemented in value_replacement, replaces
2200
2201 bb0:
2202 if (a != b) goto bb2; else goto bb1;
2203 bb1:
2204 bb2:
2205 x = PHI <a (bb1), b (bb0), ...>;
2206
2207 with
2208
2209 bb0:
2210 bb2:
2211 x = PHI <b (bb0), ...>;
2212
2213 This opportunity can sometimes occur as a result of other
2214 optimizations.
2215
2216
2217 Another case caught by value replacement looks like this:
2218
2219 bb0:
2220 t1 = a == CONST;
2221 t2 = b > c;
2222 t3 = t1 & t2;
2223 if (t3 != 0) goto bb1; else goto bb2;
2224 bb1:
2225 bb2:
2226 x = PHI (CONST, a)
2227
2228 Gets replaced with:
2229 bb0:
2230 bb2:
2231 t1 = a == CONST;
2232 t2 = b > c;
2233 t3 = t1 & t2;
2234 x = a;
2235
2236 ABS Replacement
2237 ---------------
2238
2239 This transformation, implemented in abs_replacement, replaces
2240
2241 bb0:
2242 if (a >= 0) goto bb2; else goto bb1;
2243 bb1:
2244 x = -a;
2245 bb2:
2246 x = PHI <x (bb1), a (bb0), ...>;
2247
2248 with
2249
2250 bb0:
2251 x' = ABS_EXPR< a >;
2252 bb2:
2253 x = PHI <x' (bb0), ...>;
2254
2255 MIN/MAX Replacement
2256 -------------------
2257
2258 This transformation, minmax_replacement replaces
2259
2260 bb0:
2261 if (a <= b) goto bb2; else goto bb1;
2262 bb1:
2263 bb2:
2264 x = PHI <b (bb1), a (bb0), ...>;
2265
2266 with
2267
2268 bb0:
2269 x' = MIN_EXPR (a, b)
2270 bb2:
2271 x = PHI <x' (bb0), ...>;
2272
2273 A similar transformation is done for MAX_EXPR.
2274
2275
2276 This pass also performs a fifth transformation of a slightly different
2277 flavor.
2278
2279 Factor conversion in COND_EXPR
2280 ------------------------------
2281
2282 This transformation factors the conversion out of COND_EXPR with
2283 factor_out_conditional_conversion.
2284
2285 For example:
2286 if (a <= CST) goto <bb 3>; else goto <bb 4>;
2287 <bb 3>:
2288 tmp = (int) a;
2289 <bb 4>:
2290 tmp = PHI <tmp, CST>
2291
2292 Into:
2293 if (a <= CST) goto <bb 3>; else goto <bb 4>;
2294 <bb 3>:
2295 <bb 4>:
2296 a = PHI <a, CST>
2297 tmp = (int) a;
2298
2299 Adjacent Load Hoisting
2300 ----------------------
2301
2302 This transformation replaces
2303
2304 bb0:
2305 if (...) goto bb2; else goto bb1;
2306 bb1:
2307 x1 = (<expr>).field1;
2308 goto bb3;
2309 bb2:
2310 x2 = (<expr>).field2;
2311 bb3:
2312 # x = PHI <x1, x2>;
2313
2314 with
2315
2316 bb0:
2317 x1 = (<expr>).field1;
2318 x2 = (<expr>).field2;
2319 if (...) goto bb2; else goto bb1;
2320 bb1:
2321 goto bb3;
2322 bb2:
2323 bb3:
2324 # x = PHI <x1, x2>;
2325
2326 The purpose of this transformation is to enable generation of conditional
2327 move instructions such as Intel CMOVE or PowerPC ISEL. Because one of
2328 the loads is speculative, the transformation is restricted to very
2329 specific cases to avoid introducing a page fault. We are looking for
2330 the common idiom:
2331
2332 if (...)
2333 x = y->left;
2334 else
2335 x = y->right;
2336
2337 where left and right are typically adjacent pointers in a tree structure. */
2338
2339 namespace {
2340
2341 const pass_data pass_data_phiopt =
2342 {
2343 GIMPLE_PASS, /* type */
2344 "phiopt", /* name */
2345 OPTGROUP_NONE, /* optinfo_flags */
2346 TV_TREE_PHIOPT, /* tv_id */
2347 ( PROP_cfg | PROP_ssa ), /* properties_required */
2348 0, /* properties_provided */
2349 0, /* properties_destroyed */
2350 0, /* todo_flags_start */
2351 0, /* todo_flags_finish */
2352 };
2353
2354 class pass_phiopt : public gimple_opt_pass
2355 {
2356 public:
2357 pass_phiopt (gcc::context *ctxt)
2358 : gimple_opt_pass (pass_data_phiopt, ctxt)
2359 {}
2360
2361 /* opt_pass methods: */
2362 opt_pass * clone () { return new pass_phiopt (m_ctxt); }
2363 virtual bool gate (function *) { return flag_ssa_phiopt; }
2364 virtual unsigned int execute (function *)
2365 {
2366 return tree_ssa_phiopt_worker (false, gate_hoist_loads ());
2367 }
2368
2369 }; // class pass_phiopt
2370
2371 } // anon namespace
2372
2373 gimple_opt_pass *
2374 make_pass_phiopt (gcc::context *ctxt)
2375 {
2376 return new pass_phiopt (ctxt);
2377 }
2378
2379 namespace {
2380
2381 const pass_data pass_data_cselim =
2382 {
2383 GIMPLE_PASS, /* type */
2384 "cselim", /* name */
2385 OPTGROUP_NONE, /* optinfo_flags */
2386 TV_TREE_PHIOPT, /* tv_id */
2387 ( PROP_cfg | PROP_ssa ), /* properties_required */
2388 0, /* properties_provided */
2389 0, /* properties_destroyed */
2390 0, /* todo_flags_start */
2391 0, /* todo_flags_finish */
2392 };
2393
2394 class pass_cselim : public gimple_opt_pass
2395 {
2396 public:
2397 pass_cselim (gcc::context *ctxt)
2398 : gimple_opt_pass (pass_data_cselim, ctxt)
2399 {}
2400
2401 /* opt_pass methods: */
2402 virtual bool gate (function *) { return flag_tree_cselim; }
2403 virtual unsigned int execute (function *) { return tree_ssa_cs_elim (); }
2404
2405 }; // class pass_cselim
2406
2407 } // anon namespace
2408
2409 gimple_opt_pass *
2410 make_pass_cselim (gcc::context *ctxt)
2411 {
2412 return new pass_cselim (ctxt);
2413 }