ggcplug.c: Shuffle includes to include gcc-plugin.h earlier.
[gcc.git] / gcc / tree-ssa-phiopt.c
1 /* Optimization of PHI nodes by converting them into straightline code.
2 Copyright (C) 2004-2014 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "hash-table.h"
24 #include "tm.h"
25 #include "tree.h"
26 #include "stor-layout.h"
27 #include "flags.h"
28 #include "tm_p.h"
29 #include "predict.h"
30 #include "vec.h"
31 #include "hashtab.h"
32 #include "hash-set.h"
33 #include "machmode.h"
34 #include "hard-reg-set.h"
35 #include "input.h"
36 #include "function.h"
37 #include "dominance.h"
38 #include "cfg.h"
39 #include "cfganal.h"
40 #include "basic-block.h"
41 #include "tree-ssa-alias.h"
42 #include "internal-fn.h"
43 #include "gimple-expr.h"
44 #include "is-a.h"
45 #include "gimple.h"
46 #include "gimplify.h"
47 #include "gimple-iterator.h"
48 #include "gimplify-me.h"
49 #include "gimple-ssa.h"
50 #include "tree-cfg.h"
51 #include "tree-phinodes.h"
52 #include "ssa-iterators.h"
53 #include "stringpool.h"
54 #include "tree-ssanames.h"
55 #include "expr.h"
56 #include "tree-dfa.h"
57 #include "tree-pass.h"
58 #include "langhooks.h"
59 #include "domwalk.h"
60 #include "cfgloop.h"
61 #include "tree-data-ref.h"
62 #include "gimple-pretty-print.h"
63 #include "insn-config.h"
64 #include "expr.h"
65 #include "optabs.h"
66 #include "tree-scalar-evolution.h"
67 #include "tree-inline.h"
68
69 #ifndef HAVE_conditional_move
70 #define HAVE_conditional_move (0)
71 #endif
72
73 static unsigned int tree_ssa_phiopt_worker (bool, bool);
74 static bool conditional_replacement (basic_block, basic_block,
75 edge, edge, gimple, tree, tree);
76 static int value_replacement (basic_block, basic_block,
77 edge, edge, gimple, tree, tree);
78 static bool minmax_replacement (basic_block, basic_block,
79 edge, edge, gimple, tree, tree);
80 static bool abs_replacement (basic_block, basic_block,
81 edge, edge, gimple, tree, tree);
82 static bool neg_replacement (basic_block, basic_block,
83 edge, edge, gimple, tree, tree);
84 static bool cond_store_replacement (basic_block, basic_block, edge, edge,
85 hash_set<tree> *);
86 static bool cond_if_else_store_replacement (basic_block, basic_block, basic_block);
87 static hash_set<tree> * get_non_trapping ();
88 static void replace_phi_edge_with_variable (basic_block, edge, gimple, tree);
89 static void hoist_adjacent_loads (basic_block, basic_block,
90 basic_block, basic_block);
91 static bool gate_hoist_loads (void);
92
93 /* This pass tries to transform conditional stores into unconditional
94 ones, enabling further simplifications with the simpler then and else
95 blocks. In particular it replaces this:
96
97 bb0:
98 if (cond) goto bb2; else goto bb1;
99 bb1:
100 *p = RHS;
101 bb2:
102
103 with
104
105 bb0:
106 if (cond) goto bb1; else goto bb2;
107 bb1:
108 condtmp' = *p;
109 bb2:
110 condtmp = PHI <RHS, condtmp'>
111 *p = condtmp;
112
113 This transformation can only be done under several constraints,
114 documented below. It also replaces:
115
116 bb0:
117 if (cond) goto bb2; else goto bb1;
118 bb1:
119 *p = RHS1;
120 goto bb3;
121 bb2:
122 *p = RHS2;
123 bb3:
124
125 with
126
127 bb0:
128 if (cond) goto bb3; else goto bb1;
129 bb1:
130 bb3:
131 condtmp = PHI <RHS1, RHS2>
132 *p = condtmp; */
133
134 static unsigned int
135 tree_ssa_cs_elim (void)
136 {
137 unsigned todo;
138 /* ??? We are not interested in loop related info, but the following
139 will create it, ICEing as we didn't init loops with pre-headers.
140 An interfacing issue of find_data_references_in_bb. */
141 loop_optimizer_init (LOOPS_NORMAL);
142 scev_initialize ();
143 todo = tree_ssa_phiopt_worker (true, false);
144 scev_finalize ();
145 loop_optimizer_finalize ();
146 return todo;
147 }
148
149 /* Return the singleton PHI in the SEQ of PHIs for edges E0 and E1. */
150
151 static gimple
152 single_non_singleton_phi_for_edges (gimple_seq seq, edge e0, edge e1)
153 {
154 gimple_stmt_iterator i;
155 gimple phi = NULL;
156 if (gimple_seq_singleton_p (seq))
157 return gsi_stmt (gsi_start (seq));
158 for (i = gsi_start (seq); !gsi_end_p (i); gsi_next (&i))
159 {
160 gimple p = gsi_stmt (i);
161 /* If the PHI arguments are equal then we can skip this PHI. */
162 if (operand_equal_for_phi_arg_p (gimple_phi_arg_def (p, e0->dest_idx),
163 gimple_phi_arg_def (p, e1->dest_idx)))
164 continue;
165
166 /* If we already have a PHI that has the two edge arguments are
167 different, then return it is not a singleton for these PHIs. */
168 if (phi)
169 return NULL;
170
171 phi = p;
172 }
173 return phi;
174 }
175
176 /* The core routine of conditional store replacement and normal
177 phi optimizations. Both share much of the infrastructure in how
178 to match applicable basic block patterns. DO_STORE_ELIM is true
179 when we want to do conditional store replacement, false otherwise.
180 DO_HOIST_LOADS is true when we want to hoist adjacent loads out
181 of diamond control flow patterns, false otherwise. */
182 static unsigned int
183 tree_ssa_phiopt_worker (bool do_store_elim, bool do_hoist_loads)
184 {
185 basic_block bb;
186 basic_block *bb_order;
187 unsigned n, i;
188 bool cfgchanged = false;
189 hash_set<tree> *nontrap = 0;
190
191 if (do_store_elim)
192 /* Calculate the set of non-trapping memory accesses. */
193 nontrap = get_non_trapping ();
194
195 /* The replacement of conditional negation with a non-branching
196 sequence is really only a win when optimizing for speed and we
197 can avoid transformations by gimple if-conversion that result
198 in poor RTL generation.
199
200 Ideally either gimple if-conversion or the RTL expanders will
201 be improved and the code to emit branchless conditional negation
202 can be removed. */
203 bool replace_conditional_negation = false;
204 if (!do_store_elim)
205 replace_conditional_negation
206 = ((!optimize_size && optimize >= 2)
207 || (((flag_tree_loop_vectorize || cfun->has_force_vectorize_loops)
208 && flag_tree_loop_if_convert != 0)
209 || flag_tree_loop_if_convert == 1
210 || flag_tree_loop_if_convert_stores == 1));
211
212 /* Search every basic block for COND_EXPR we may be able to optimize.
213
214 We walk the blocks in order that guarantees that a block with
215 a single predecessor is processed before the predecessor.
216 This ensures that we collapse inner ifs before visiting the
217 outer ones, and also that we do not try to visit a removed
218 block. */
219 bb_order = single_pred_before_succ_order ();
220 n = n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS;
221
222 for (i = 0; i < n; i++)
223 {
224 gimple cond_stmt, phi;
225 basic_block bb1, bb2;
226 edge e1, e2;
227 tree arg0, arg1;
228
229 bb = bb_order[i];
230
231 cond_stmt = last_stmt (bb);
232 /* Check to see if the last statement is a GIMPLE_COND. */
233 if (!cond_stmt
234 || gimple_code (cond_stmt) != GIMPLE_COND)
235 continue;
236
237 e1 = EDGE_SUCC (bb, 0);
238 bb1 = e1->dest;
239 e2 = EDGE_SUCC (bb, 1);
240 bb2 = e2->dest;
241
242 /* We cannot do the optimization on abnormal edges. */
243 if ((e1->flags & EDGE_ABNORMAL) != 0
244 || (e2->flags & EDGE_ABNORMAL) != 0)
245 continue;
246
247 /* If either bb1's succ or bb2 or bb2's succ is non NULL. */
248 if (EDGE_COUNT (bb1->succs) == 0
249 || bb2 == NULL
250 || EDGE_COUNT (bb2->succs) == 0)
251 continue;
252
253 /* Find the bb which is the fall through to the other. */
254 if (EDGE_SUCC (bb1, 0)->dest == bb2)
255 ;
256 else if (EDGE_SUCC (bb2, 0)->dest == bb1)
257 {
258 basic_block bb_tmp = bb1;
259 edge e_tmp = e1;
260 bb1 = bb2;
261 bb2 = bb_tmp;
262 e1 = e2;
263 e2 = e_tmp;
264 }
265 else if (do_store_elim
266 && EDGE_SUCC (bb1, 0)->dest == EDGE_SUCC (bb2, 0)->dest)
267 {
268 basic_block bb3 = EDGE_SUCC (bb1, 0)->dest;
269
270 if (!single_succ_p (bb1)
271 || (EDGE_SUCC (bb1, 0)->flags & EDGE_FALLTHRU) == 0
272 || !single_succ_p (bb2)
273 || (EDGE_SUCC (bb2, 0)->flags & EDGE_FALLTHRU) == 0
274 || EDGE_COUNT (bb3->preds) != 2)
275 continue;
276 if (cond_if_else_store_replacement (bb1, bb2, bb3))
277 cfgchanged = true;
278 continue;
279 }
280 else if (do_hoist_loads
281 && EDGE_SUCC (bb1, 0)->dest == EDGE_SUCC (bb2, 0)->dest)
282 {
283 basic_block bb3 = EDGE_SUCC (bb1, 0)->dest;
284
285 if (!FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (cond_stmt)))
286 && single_succ_p (bb1)
287 && single_succ_p (bb2)
288 && single_pred_p (bb1)
289 && single_pred_p (bb2)
290 && EDGE_COUNT (bb->succs) == 2
291 && EDGE_COUNT (bb3->preds) == 2
292 /* If one edge or the other is dominant, a conditional move
293 is likely to perform worse than the well-predicted branch. */
294 && !predictable_edge_p (EDGE_SUCC (bb, 0))
295 && !predictable_edge_p (EDGE_SUCC (bb, 1)))
296 hoist_adjacent_loads (bb, bb1, bb2, bb3);
297 continue;
298 }
299 else
300 continue;
301
302 e1 = EDGE_SUCC (bb1, 0);
303
304 /* Make sure that bb1 is just a fall through. */
305 if (!single_succ_p (bb1)
306 || (e1->flags & EDGE_FALLTHRU) == 0)
307 continue;
308
309 /* Also make sure that bb1 only have one predecessor and that it
310 is bb. */
311 if (!single_pred_p (bb1)
312 || single_pred (bb1) != bb)
313 continue;
314
315 if (do_store_elim)
316 {
317 /* bb1 is the middle block, bb2 the join block, bb the split block,
318 e1 the fallthrough edge from bb1 to bb2. We can't do the
319 optimization if the join block has more than two predecessors. */
320 if (EDGE_COUNT (bb2->preds) > 2)
321 continue;
322 if (cond_store_replacement (bb1, bb2, e1, e2, nontrap))
323 cfgchanged = true;
324 }
325 else
326 {
327 gimple_seq phis = phi_nodes (bb2);
328 gimple_stmt_iterator gsi;
329 bool candorest = true;
330
331 /* Value replacement can work with more than one PHI
332 so try that first. */
333 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
334 {
335 phi = gsi_stmt (gsi);
336 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
337 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
338 if (value_replacement (bb, bb1, e1, e2, phi, arg0, arg1) == 2)
339 {
340 candorest = false;
341 cfgchanged = true;
342 break;
343 }
344 }
345
346 if (!candorest)
347 continue;
348
349 phi = single_non_singleton_phi_for_edges (phis, e1, e2);
350 if (!phi)
351 continue;
352
353 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
354 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
355
356 /* Something is wrong if we cannot find the arguments in the PHI
357 node. */
358 gcc_assert (arg0 != NULL && arg1 != NULL);
359
360 /* Do the replacement of conditional if it can be done. */
361 if (conditional_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
362 cfgchanged = true;
363 else if (abs_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
364 cfgchanged = true;
365 else if (replace_conditional_negation
366 && neg_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
367 cfgchanged = true;
368 else if (minmax_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
369 cfgchanged = true;
370 }
371 }
372
373 free (bb_order);
374
375 if (do_store_elim)
376 delete nontrap;
377 /* If the CFG has changed, we should cleanup the CFG. */
378 if (cfgchanged && do_store_elim)
379 {
380 /* In cond-store replacement we have added some loads on edges
381 and new VOPS (as we moved the store, and created a load). */
382 gsi_commit_edge_inserts ();
383 return TODO_cleanup_cfg | TODO_update_ssa_only_virtuals;
384 }
385 else if (cfgchanged)
386 return TODO_cleanup_cfg;
387 return 0;
388 }
389
390 /* Replace PHI node element whose edge is E in block BB with variable NEW.
391 Remove the edge from COND_BLOCK which does not lead to BB (COND_BLOCK
392 is known to have two edges, one of which must reach BB). */
393
394 static void
395 replace_phi_edge_with_variable (basic_block cond_block,
396 edge e, gimple phi, tree new_tree)
397 {
398 basic_block bb = gimple_bb (phi);
399 basic_block block_to_remove;
400 gimple_stmt_iterator gsi;
401
402 /* Change the PHI argument to new. */
403 SET_USE (PHI_ARG_DEF_PTR (phi, e->dest_idx), new_tree);
404
405 /* Remove the empty basic block. */
406 if (EDGE_SUCC (cond_block, 0)->dest == bb)
407 {
408 EDGE_SUCC (cond_block, 0)->flags |= EDGE_FALLTHRU;
409 EDGE_SUCC (cond_block, 0)->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
410 EDGE_SUCC (cond_block, 0)->probability = REG_BR_PROB_BASE;
411 EDGE_SUCC (cond_block, 0)->count += EDGE_SUCC (cond_block, 1)->count;
412
413 block_to_remove = EDGE_SUCC (cond_block, 1)->dest;
414 }
415 else
416 {
417 EDGE_SUCC (cond_block, 1)->flags |= EDGE_FALLTHRU;
418 EDGE_SUCC (cond_block, 1)->flags
419 &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
420 EDGE_SUCC (cond_block, 1)->probability = REG_BR_PROB_BASE;
421 EDGE_SUCC (cond_block, 1)->count += EDGE_SUCC (cond_block, 0)->count;
422
423 block_to_remove = EDGE_SUCC (cond_block, 0)->dest;
424 }
425 delete_basic_block (block_to_remove);
426
427 /* Eliminate the COND_EXPR at the end of COND_BLOCK. */
428 gsi = gsi_last_bb (cond_block);
429 gsi_remove (&gsi, true);
430
431 if (dump_file && (dump_flags & TDF_DETAILS))
432 fprintf (dump_file,
433 "COND_EXPR in block %d and PHI in block %d converted to straightline code.\n",
434 cond_block->index,
435 bb->index);
436 }
437
438 /* The function conditional_replacement does the main work of doing the
439 conditional replacement. Return true if the replacement is done.
440 Otherwise return false.
441 BB is the basic block where the replacement is going to be done on. ARG0
442 is argument 0 from PHI. Likewise for ARG1. */
443
444 static bool
445 conditional_replacement (basic_block cond_bb, basic_block middle_bb,
446 edge e0, edge e1, gimple phi,
447 tree arg0, tree arg1)
448 {
449 tree result;
450 gimple stmt, new_stmt;
451 tree cond;
452 gimple_stmt_iterator gsi;
453 edge true_edge, false_edge;
454 tree new_var, new_var2;
455 bool neg;
456
457 /* FIXME: Gimplification of complex type is too hard for now. */
458 /* We aren't prepared to handle vectors either (and it is a question
459 if it would be worthwhile anyway). */
460 if (!(INTEGRAL_TYPE_P (TREE_TYPE (arg0))
461 || POINTER_TYPE_P (TREE_TYPE (arg0)))
462 || !(INTEGRAL_TYPE_P (TREE_TYPE (arg1))
463 || POINTER_TYPE_P (TREE_TYPE (arg1))))
464 return false;
465
466 /* The PHI arguments have the constants 0 and 1, or 0 and -1, then
467 convert it to the conditional. */
468 if ((integer_zerop (arg0) && integer_onep (arg1))
469 || (integer_zerop (arg1) && integer_onep (arg0)))
470 neg = false;
471 else if ((integer_zerop (arg0) && integer_all_onesp (arg1))
472 || (integer_zerop (arg1) && integer_all_onesp (arg0)))
473 neg = true;
474 else
475 return false;
476
477 if (!empty_block_p (middle_bb))
478 return false;
479
480 /* At this point we know we have a GIMPLE_COND with two successors.
481 One successor is BB, the other successor is an empty block which
482 falls through into BB.
483
484 There is a single PHI node at the join point (BB) and its arguments
485 are constants (0, 1) or (0, -1).
486
487 So, given the condition COND, and the two PHI arguments, we can
488 rewrite this PHI into non-branching code:
489
490 dest = (COND) or dest = COND'
491
492 We use the condition as-is if the argument associated with the
493 true edge has the value one or the argument associated with the
494 false edge as the value zero. Note that those conditions are not
495 the same since only one of the outgoing edges from the GIMPLE_COND
496 will directly reach BB and thus be associated with an argument. */
497
498 stmt = last_stmt (cond_bb);
499 result = PHI_RESULT (phi);
500
501 /* To handle special cases like floating point comparison, it is easier and
502 less error-prone to build a tree and gimplify it on the fly though it is
503 less efficient. */
504 cond = fold_build2_loc (gimple_location (stmt),
505 gimple_cond_code (stmt), boolean_type_node,
506 gimple_cond_lhs (stmt), gimple_cond_rhs (stmt));
507
508 /* We need to know which is the true edge and which is the false
509 edge so that we know when to invert the condition below. */
510 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
511 if ((e0 == true_edge && integer_zerop (arg0))
512 || (e0 == false_edge && !integer_zerop (arg0))
513 || (e1 == true_edge && integer_zerop (arg1))
514 || (e1 == false_edge && !integer_zerop (arg1)))
515 cond = fold_build1_loc (gimple_location (stmt),
516 TRUTH_NOT_EXPR, TREE_TYPE (cond), cond);
517
518 if (neg)
519 {
520 cond = fold_convert_loc (gimple_location (stmt),
521 TREE_TYPE (result), cond);
522 cond = fold_build1_loc (gimple_location (stmt),
523 NEGATE_EXPR, TREE_TYPE (cond), cond);
524 }
525
526 /* Insert our new statements at the end of conditional block before the
527 COND_STMT. */
528 gsi = gsi_for_stmt (stmt);
529 new_var = force_gimple_operand_gsi (&gsi, cond, true, NULL, true,
530 GSI_SAME_STMT);
531
532 if (!useless_type_conversion_p (TREE_TYPE (result), TREE_TYPE (new_var)))
533 {
534 source_location locus_0, locus_1;
535
536 new_var2 = make_ssa_name (TREE_TYPE (result), NULL);
537 new_stmt = gimple_build_assign_with_ops (CONVERT_EXPR, new_var2,
538 new_var, NULL);
539 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
540 new_var = new_var2;
541
542 /* Set the locus to the first argument, unless is doesn't have one. */
543 locus_0 = gimple_phi_arg_location (phi, 0);
544 locus_1 = gimple_phi_arg_location (phi, 1);
545 if (locus_0 == UNKNOWN_LOCATION)
546 locus_0 = locus_1;
547 gimple_set_location (new_stmt, locus_0);
548 }
549
550 replace_phi_edge_with_variable (cond_bb, e1, phi, new_var);
551
552 /* Note that we optimized this PHI. */
553 return true;
554 }
555
556 /* Update *ARG which is defined in STMT so that it contains the
557 computed value if that seems profitable. Return true if the
558 statement is made dead by that rewriting. */
559
560 static bool
561 jump_function_from_stmt (tree *arg, gimple stmt)
562 {
563 enum tree_code code = gimple_assign_rhs_code (stmt);
564 if (code == ADDR_EXPR)
565 {
566 /* For arg = &p->i transform it to p, if possible. */
567 tree rhs1 = gimple_assign_rhs1 (stmt);
568 HOST_WIDE_INT offset;
569 tree tem = get_addr_base_and_unit_offset (TREE_OPERAND (rhs1, 0),
570 &offset);
571 if (tem
572 && TREE_CODE (tem) == MEM_REF
573 && (mem_ref_offset (tem) + offset) == 0)
574 {
575 *arg = TREE_OPERAND (tem, 0);
576 return true;
577 }
578 }
579 /* TODO: Much like IPA-CP jump-functions we want to handle constant
580 additions symbolically here, and we'd need to update the comparison
581 code that compares the arg + cst tuples in our caller. For now the
582 code above exactly handles the VEC_BASE pattern from vec.h. */
583 return false;
584 }
585
586 /* RHS is a source argument in a BIT_AND_EXPR which feeds a conditional
587 of the form SSA_NAME NE 0.
588
589 If RHS is fed by a simple EQ_EXPR comparison of two values, see if
590 the two input values of the EQ_EXPR match arg0 and arg1.
591
592 If so update *code and return TRUE. Otherwise return FALSE. */
593
594 static bool
595 rhs_is_fed_for_value_replacement (const_tree arg0, const_tree arg1,
596 enum tree_code *code, const_tree rhs)
597 {
598 /* Obviously if RHS is not an SSA_NAME, we can't look at the defining
599 statement. */
600 if (TREE_CODE (rhs) == SSA_NAME)
601 {
602 gimple def1 = SSA_NAME_DEF_STMT (rhs);
603
604 /* Verify the defining statement has an EQ_EXPR on the RHS. */
605 if (is_gimple_assign (def1) && gimple_assign_rhs_code (def1) == EQ_EXPR)
606 {
607 /* Finally verify the source operands of the EQ_EXPR are equal
608 to arg0 and arg1. */
609 tree op0 = gimple_assign_rhs1 (def1);
610 tree op1 = gimple_assign_rhs2 (def1);
611 if ((operand_equal_for_phi_arg_p (arg0, op0)
612 && operand_equal_for_phi_arg_p (arg1, op1))
613 || (operand_equal_for_phi_arg_p (arg0, op1)
614 && operand_equal_for_phi_arg_p (arg1, op0)))
615 {
616 /* We will perform the optimization. */
617 *code = gimple_assign_rhs_code (def1);
618 return true;
619 }
620 }
621 }
622 return false;
623 }
624
625 /* Return TRUE if arg0/arg1 are equal to the rhs/lhs or lhs/rhs of COND.
626
627 Also return TRUE if arg0/arg1 are equal to the source arguments of a
628 an EQ comparison feeding a BIT_AND_EXPR which feeds COND.
629
630 Return FALSE otherwise. */
631
632 static bool
633 operand_equal_for_value_replacement (const_tree arg0, const_tree arg1,
634 enum tree_code *code, gimple cond)
635 {
636 gimple def;
637 tree lhs = gimple_cond_lhs (cond);
638 tree rhs = gimple_cond_rhs (cond);
639
640 if ((operand_equal_for_phi_arg_p (arg0, lhs)
641 && operand_equal_for_phi_arg_p (arg1, rhs))
642 || (operand_equal_for_phi_arg_p (arg1, lhs)
643 && operand_equal_for_phi_arg_p (arg0, rhs)))
644 return true;
645
646 /* Now handle more complex case where we have an EQ comparison
647 which feeds a BIT_AND_EXPR which feeds COND.
648
649 First verify that COND is of the form SSA_NAME NE 0. */
650 if (*code != NE_EXPR || !integer_zerop (rhs)
651 || TREE_CODE (lhs) != SSA_NAME)
652 return false;
653
654 /* Now ensure that SSA_NAME is set by a BIT_AND_EXPR. */
655 def = SSA_NAME_DEF_STMT (lhs);
656 if (!is_gimple_assign (def) || gimple_assign_rhs_code (def) != BIT_AND_EXPR)
657 return false;
658
659 /* Now verify arg0/arg1 correspond to the source arguments of an
660 EQ comparison feeding the BIT_AND_EXPR. */
661
662 tree tmp = gimple_assign_rhs1 (def);
663 if (rhs_is_fed_for_value_replacement (arg0, arg1, code, tmp))
664 return true;
665
666 tmp = gimple_assign_rhs2 (def);
667 if (rhs_is_fed_for_value_replacement (arg0, arg1, code, tmp))
668 return true;
669
670 return false;
671 }
672
673 /* Returns true if ARG is a neutral element for operation CODE
674 on the RIGHT side. */
675
676 static bool
677 neutral_element_p (tree_code code, tree arg, bool right)
678 {
679 switch (code)
680 {
681 case PLUS_EXPR:
682 case BIT_IOR_EXPR:
683 case BIT_XOR_EXPR:
684 return integer_zerop (arg);
685
686 case LROTATE_EXPR:
687 case RROTATE_EXPR:
688 case LSHIFT_EXPR:
689 case RSHIFT_EXPR:
690 case MINUS_EXPR:
691 case POINTER_PLUS_EXPR:
692 return right && integer_zerop (arg);
693
694 case MULT_EXPR:
695 return integer_onep (arg);
696
697 case TRUNC_DIV_EXPR:
698 case CEIL_DIV_EXPR:
699 case FLOOR_DIV_EXPR:
700 case ROUND_DIV_EXPR:
701 case EXACT_DIV_EXPR:
702 return right && integer_onep (arg);
703
704 case BIT_AND_EXPR:
705 return integer_all_onesp (arg);
706
707 default:
708 return false;
709 }
710 }
711
712 /* Returns true if ARG is an absorbing element for operation CODE. */
713
714 static bool
715 absorbing_element_p (tree_code code, tree arg)
716 {
717 switch (code)
718 {
719 case BIT_IOR_EXPR:
720 return integer_all_onesp (arg);
721
722 case MULT_EXPR:
723 case BIT_AND_EXPR:
724 return integer_zerop (arg);
725
726 default:
727 return false;
728 }
729 }
730
731 /* The function value_replacement does the main work of doing the value
732 replacement. Return non-zero if the replacement is done. Otherwise return
733 0. If we remove the middle basic block, return 2.
734 BB is the basic block where the replacement is going to be done on. ARG0
735 is argument 0 from the PHI. Likewise for ARG1. */
736
737 static int
738 value_replacement (basic_block cond_bb, basic_block middle_bb,
739 edge e0, edge e1, gimple phi,
740 tree arg0, tree arg1)
741 {
742 gimple_stmt_iterator gsi;
743 gimple cond;
744 edge true_edge, false_edge;
745 enum tree_code code;
746 bool emtpy_or_with_defined_p = true;
747
748 /* If the type says honor signed zeros we cannot do this
749 optimization. */
750 if (HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (arg1))))
751 return 0;
752
753 /* If there is a statement in MIDDLE_BB that defines one of the PHI
754 arguments, then adjust arg0 or arg1. */
755 gsi = gsi_start_nondebug_after_labels_bb (middle_bb);
756 while (!gsi_end_p (gsi))
757 {
758 gimple stmt = gsi_stmt (gsi);
759 tree lhs;
760 gsi_next_nondebug (&gsi);
761 if (!is_gimple_assign (stmt))
762 {
763 emtpy_or_with_defined_p = false;
764 continue;
765 }
766 /* Now try to adjust arg0 or arg1 according to the computation
767 in the statement. */
768 lhs = gimple_assign_lhs (stmt);
769 if (!(lhs == arg0
770 && jump_function_from_stmt (&arg0, stmt))
771 || (lhs == arg1
772 && jump_function_from_stmt (&arg1, stmt)))
773 emtpy_or_with_defined_p = false;
774 }
775
776 cond = last_stmt (cond_bb);
777 code = gimple_cond_code (cond);
778
779 /* This transformation is only valid for equality comparisons. */
780 if (code != NE_EXPR && code != EQ_EXPR)
781 return 0;
782
783 /* We need to know which is the true edge and which is the false
784 edge so that we know if have abs or negative abs. */
785 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
786
787 /* At this point we know we have a COND_EXPR with two successors.
788 One successor is BB, the other successor is an empty block which
789 falls through into BB.
790
791 The condition for the COND_EXPR is known to be NE_EXPR or EQ_EXPR.
792
793 There is a single PHI node at the join point (BB) with two arguments.
794
795 We now need to verify that the two arguments in the PHI node match
796 the two arguments to the equality comparison. */
797
798 if (operand_equal_for_value_replacement (arg0, arg1, &code, cond))
799 {
800 edge e;
801 tree arg;
802
803 /* For NE_EXPR, we want to build an assignment result = arg where
804 arg is the PHI argument associated with the true edge. For
805 EQ_EXPR we want the PHI argument associated with the false edge. */
806 e = (code == NE_EXPR ? true_edge : false_edge);
807
808 /* Unfortunately, E may not reach BB (it may instead have gone to
809 OTHER_BLOCK). If that is the case, then we want the single outgoing
810 edge from OTHER_BLOCK which reaches BB and represents the desired
811 path from COND_BLOCK. */
812 if (e->dest == middle_bb)
813 e = single_succ_edge (e->dest);
814
815 /* Now we know the incoming edge to BB that has the argument for the
816 RHS of our new assignment statement. */
817 if (e0 == e)
818 arg = arg0;
819 else
820 arg = arg1;
821
822 /* If the middle basic block was empty or is defining the
823 PHI arguments and this is a single phi where the args are different
824 for the edges e0 and e1 then we can remove the middle basic block. */
825 if (emtpy_or_with_defined_p
826 && single_non_singleton_phi_for_edges (phi_nodes (gimple_bb (phi)),
827 e0, e1) == phi)
828 {
829 replace_phi_edge_with_variable (cond_bb, e1, phi, arg);
830 /* Note that we optimized this PHI. */
831 return 2;
832 }
833 else
834 {
835 /* Replace the PHI arguments with arg. */
836 SET_PHI_ARG_DEF (phi, e0->dest_idx, arg);
837 SET_PHI_ARG_DEF (phi, e1->dest_idx, arg);
838 if (dump_file && (dump_flags & TDF_DETAILS))
839 {
840 fprintf (dump_file, "PHI ");
841 print_generic_expr (dump_file, gimple_phi_result (phi), 0);
842 fprintf (dump_file, " reduced for COND_EXPR in block %d to ",
843 cond_bb->index);
844 print_generic_expr (dump_file, arg, 0);
845 fprintf (dump_file, ".\n");
846 }
847 return 1;
848 }
849
850 }
851
852 /* Now optimize (x != 0) ? x + y : y to just y.
853 The following condition is too restrictive, there can easily be another
854 stmt in middle_bb, for instance a CONVERT_EXPR for the second argument. */
855 gimple assign = last_and_only_stmt (middle_bb);
856 if (!assign || gimple_code (assign) != GIMPLE_ASSIGN
857 || gimple_assign_rhs_class (assign) != GIMPLE_BINARY_RHS
858 || (!INTEGRAL_TYPE_P (TREE_TYPE (arg0))
859 && !POINTER_TYPE_P (TREE_TYPE (arg0))))
860 return 0;
861
862 /* Punt if there are (degenerate) PHIs in middle_bb, there should not be. */
863 if (!gimple_seq_empty_p (phi_nodes (middle_bb)))
864 return 0;
865
866 /* Only transform if it removes the condition. */
867 if (!single_non_singleton_phi_for_edges (phi_nodes (gimple_bb (phi)), e0, e1))
868 return 0;
869
870 /* Size-wise, this is always profitable. */
871 if (optimize_bb_for_speed_p (cond_bb)
872 /* The special case is useless if it has a low probability. */
873 && profile_status_for_fn (cfun) != PROFILE_ABSENT
874 && EDGE_PRED (middle_bb, 0)->probability < PROB_EVEN
875 /* If assign is cheap, there is no point avoiding it. */
876 && estimate_num_insns (assign, &eni_time_weights)
877 >= 3 * estimate_num_insns (cond, &eni_time_weights))
878 return 0;
879
880 tree lhs = gimple_assign_lhs (assign);
881 tree rhs1 = gimple_assign_rhs1 (assign);
882 tree rhs2 = gimple_assign_rhs2 (assign);
883 enum tree_code code_def = gimple_assign_rhs_code (assign);
884 tree cond_lhs = gimple_cond_lhs (cond);
885 tree cond_rhs = gimple_cond_rhs (cond);
886
887 if (((code == NE_EXPR && e1 == false_edge)
888 || (code == EQ_EXPR && e1 == true_edge))
889 && arg0 == lhs
890 && ((arg1 == rhs1
891 && operand_equal_for_phi_arg_p (rhs2, cond_lhs)
892 && neutral_element_p (code_def, cond_rhs, true))
893 || (arg1 == rhs2
894 && operand_equal_for_phi_arg_p (rhs1, cond_lhs)
895 && neutral_element_p (code_def, cond_rhs, false))
896 || (operand_equal_for_phi_arg_p (arg1, cond_rhs)
897 && (operand_equal_for_phi_arg_p (rhs2, cond_lhs)
898 || operand_equal_for_phi_arg_p (rhs1, cond_lhs))
899 && absorbing_element_p (code_def, cond_rhs))))
900 {
901 gsi = gsi_for_stmt (cond);
902 gimple_stmt_iterator gsi_from = gsi_for_stmt (assign);
903 gsi_move_before (&gsi_from, &gsi);
904 replace_phi_edge_with_variable (cond_bb, e1, phi, lhs);
905 return 2;
906 }
907
908 return 0;
909 }
910
911 /* The function minmax_replacement does the main work of doing the minmax
912 replacement. Return true if the replacement is done. Otherwise return
913 false.
914 BB is the basic block where the replacement is going to be done on. ARG0
915 is argument 0 from the PHI. Likewise for ARG1. */
916
917 static bool
918 minmax_replacement (basic_block cond_bb, basic_block middle_bb,
919 edge e0, edge e1, gimple phi,
920 tree arg0, tree arg1)
921 {
922 tree result, type;
923 gimple cond, new_stmt;
924 edge true_edge, false_edge;
925 enum tree_code cmp, minmax, ass_code;
926 tree smaller, larger, arg_true, arg_false;
927 gimple_stmt_iterator gsi, gsi_from;
928
929 type = TREE_TYPE (PHI_RESULT (phi));
930
931 /* The optimization may be unsafe due to NaNs. */
932 if (HONOR_NANS (TYPE_MODE (type)))
933 return false;
934
935 cond = last_stmt (cond_bb);
936 cmp = gimple_cond_code (cond);
937
938 /* This transformation is only valid for order comparisons. Record which
939 operand is smaller/larger if the result of the comparison is true. */
940 if (cmp == LT_EXPR || cmp == LE_EXPR)
941 {
942 smaller = gimple_cond_lhs (cond);
943 larger = gimple_cond_rhs (cond);
944 }
945 else if (cmp == GT_EXPR || cmp == GE_EXPR)
946 {
947 smaller = gimple_cond_rhs (cond);
948 larger = gimple_cond_lhs (cond);
949 }
950 else
951 return false;
952
953 /* We need to know which is the true edge and which is the false
954 edge so that we know if have abs or negative abs. */
955 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
956
957 /* Forward the edges over the middle basic block. */
958 if (true_edge->dest == middle_bb)
959 true_edge = EDGE_SUCC (true_edge->dest, 0);
960 if (false_edge->dest == middle_bb)
961 false_edge = EDGE_SUCC (false_edge->dest, 0);
962
963 if (true_edge == e0)
964 {
965 gcc_assert (false_edge == e1);
966 arg_true = arg0;
967 arg_false = arg1;
968 }
969 else
970 {
971 gcc_assert (false_edge == e0);
972 gcc_assert (true_edge == e1);
973 arg_true = arg1;
974 arg_false = arg0;
975 }
976
977 if (empty_block_p (middle_bb))
978 {
979 if (operand_equal_for_phi_arg_p (arg_true, smaller)
980 && operand_equal_for_phi_arg_p (arg_false, larger))
981 {
982 /* Case
983
984 if (smaller < larger)
985 rslt = smaller;
986 else
987 rslt = larger; */
988 minmax = MIN_EXPR;
989 }
990 else if (operand_equal_for_phi_arg_p (arg_false, smaller)
991 && operand_equal_for_phi_arg_p (arg_true, larger))
992 minmax = MAX_EXPR;
993 else
994 return false;
995 }
996 else
997 {
998 /* Recognize the following case, assuming d <= u:
999
1000 if (a <= u)
1001 b = MAX (a, d);
1002 x = PHI <b, u>
1003
1004 This is equivalent to
1005
1006 b = MAX (a, d);
1007 x = MIN (b, u); */
1008
1009 gimple assign = last_and_only_stmt (middle_bb);
1010 tree lhs, op0, op1, bound;
1011
1012 if (!assign
1013 || gimple_code (assign) != GIMPLE_ASSIGN)
1014 return false;
1015
1016 lhs = gimple_assign_lhs (assign);
1017 ass_code = gimple_assign_rhs_code (assign);
1018 if (ass_code != MAX_EXPR && ass_code != MIN_EXPR)
1019 return false;
1020 op0 = gimple_assign_rhs1 (assign);
1021 op1 = gimple_assign_rhs2 (assign);
1022
1023 if (true_edge->src == middle_bb)
1024 {
1025 /* We got here if the condition is true, i.e., SMALLER < LARGER. */
1026 if (!operand_equal_for_phi_arg_p (lhs, arg_true))
1027 return false;
1028
1029 if (operand_equal_for_phi_arg_p (arg_false, larger))
1030 {
1031 /* Case
1032
1033 if (smaller < larger)
1034 {
1035 r' = MAX_EXPR (smaller, bound)
1036 }
1037 r = PHI <r', larger> --> to be turned to MIN_EXPR. */
1038 if (ass_code != MAX_EXPR)
1039 return false;
1040
1041 minmax = MIN_EXPR;
1042 if (operand_equal_for_phi_arg_p (op0, smaller))
1043 bound = op1;
1044 else if (operand_equal_for_phi_arg_p (op1, smaller))
1045 bound = op0;
1046 else
1047 return false;
1048
1049 /* We need BOUND <= LARGER. */
1050 if (!integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node,
1051 bound, larger)))
1052 return false;
1053 }
1054 else if (operand_equal_for_phi_arg_p (arg_false, smaller))
1055 {
1056 /* Case
1057
1058 if (smaller < larger)
1059 {
1060 r' = MIN_EXPR (larger, bound)
1061 }
1062 r = PHI <r', smaller> --> to be turned to MAX_EXPR. */
1063 if (ass_code != MIN_EXPR)
1064 return false;
1065
1066 minmax = MAX_EXPR;
1067 if (operand_equal_for_phi_arg_p (op0, larger))
1068 bound = op1;
1069 else if (operand_equal_for_phi_arg_p (op1, larger))
1070 bound = op0;
1071 else
1072 return false;
1073
1074 /* We need BOUND >= SMALLER. */
1075 if (!integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node,
1076 bound, smaller)))
1077 return false;
1078 }
1079 else
1080 return false;
1081 }
1082 else
1083 {
1084 /* We got here if the condition is false, i.e., SMALLER > LARGER. */
1085 if (!operand_equal_for_phi_arg_p (lhs, arg_false))
1086 return false;
1087
1088 if (operand_equal_for_phi_arg_p (arg_true, larger))
1089 {
1090 /* Case
1091
1092 if (smaller > larger)
1093 {
1094 r' = MIN_EXPR (smaller, bound)
1095 }
1096 r = PHI <r', larger> --> to be turned to MAX_EXPR. */
1097 if (ass_code != MIN_EXPR)
1098 return false;
1099
1100 minmax = MAX_EXPR;
1101 if (operand_equal_for_phi_arg_p (op0, smaller))
1102 bound = op1;
1103 else if (operand_equal_for_phi_arg_p (op1, smaller))
1104 bound = op0;
1105 else
1106 return false;
1107
1108 /* We need BOUND >= LARGER. */
1109 if (!integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node,
1110 bound, larger)))
1111 return false;
1112 }
1113 else if (operand_equal_for_phi_arg_p (arg_true, smaller))
1114 {
1115 /* Case
1116
1117 if (smaller > larger)
1118 {
1119 r' = MAX_EXPR (larger, bound)
1120 }
1121 r = PHI <r', smaller> --> to be turned to MIN_EXPR. */
1122 if (ass_code != MAX_EXPR)
1123 return false;
1124
1125 minmax = MIN_EXPR;
1126 if (operand_equal_for_phi_arg_p (op0, larger))
1127 bound = op1;
1128 else if (operand_equal_for_phi_arg_p (op1, larger))
1129 bound = op0;
1130 else
1131 return false;
1132
1133 /* We need BOUND <= SMALLER. */
1134 if (!integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node,
1135 bound, smaller)))
1136 return false;
1137 }
1138 else
1139 return false;
1140 }
1141
1142 /* Move the statement from the middle block. */
1143 gsi = gsi_last_bb (cond_bb);
1144 gsi_from = gsi_last_nondebug_bb (middle_bb);
1145 gsi_move_before (&gsi_from, &gsi);
1146 }
1147
1148 /* Emit the statement to compute min/max. */
1149 result = duplicate_ssa_name (PHI_RESULT (phi), NULL);
1150 new_stmt = gimple_build_assign_with_ops (minmax, result, arg0, arg1);
1151 gsi = gsi_last_bb (cond_bb);
1152 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
1153
1154 replace_phi_edge_with_variable (cond_bb, e1, phi, result);
1155 return true;
1156 }
1157
1158 /* The function absolute_replacement does the main work of doing the absolute
1159 replacement. Return true if the replacement is done. Otherwise return
1160 false.
1161 bb is the basic block where the replacement is going to be done on. arg0
1162 is argument 0 from the phi. Likewise for arg1. */
1163
1164 static bool
1165 abs_replacement (basic_block cond_bb, basic_block middle_bb,
1166 edge e0 ATTRIBUTE_UNUSED, edge e1,
1167 gimple phi, tree arg0, tree arg1)
1168 {
1169 tree result;
1170 gimple new_stmt, cond;
1171 gimple_stmt_iterator gsi;
1172 edge true_edge, false_edge;
1173 gimple assign;
1174 edge e;
1175 tree rhs, lhs;
1176 bool negate;
1177 enum tree_code cond_code;
1178
1179 /* If the type says honor signed zeros we cannot do this
1180 optimization. */
1181 if (HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (arg1))))
1182 return false;
1183
1184 /* OTHER_BLOCK must have only one executable statement which must have the
1185 form arg0 = -arg1 or arg1 = -arg0. */
1186
1187 assign = last_and_only_stmt (middle_bb);
1188 /* If we did not find the proper negation assignment, then we can not
1189 optimize. */
1190 if (assign == NULL)
1191 return false;
1192
1193 /* If we got here, then we have found the only executable statement
1194 in OTHER_BLOCK. If it is anything other than arg = -arg1 or
1195 arg1 = -arg0, then we can not optimize. */
1196 if (gimple_code (assign) != GIMPLE_ASSIGN)
1197 return false;
1198
1199 lhs = gimple_assign_lhs (assign);
1200
1201 if (gimple_assign_rhs_code (assign) != NEGATE_EXPR)
1202 return false;
1203
1204 rhs = gimple_assign_rhs1 (assign);
1205
1206 /* The assignment has to be arg0 = -arg1 or arg1 = -arg0. */
1207 if (!(lhs == arg0 && rhs == arg1)
1208 && !(lhs == arg1 && rhs == arg0))
1209 return false;
1210
1211 cond = last_stmt (cond_bb);
1212 result = PHI_RESULT (phi);
1213
1214 /* Only relationals comparing arg[01] against zero are interesting. */
1215 cond_code = gimple_cond_code (cond);
1216 if (cond_code != GT_EXPR && cond_code != GE_EXPR
1217 && cond_code != LT_EXPR && cond_code != LE_EXPR)
1218 return false;
1219
1220 /* Make sure the conditional is arg[01] OP y. */
1221 if (gimple_cond_lhs (cond) != rhs)
1222 return false;
1223
1224 if (FLOAT_TYPE_P (TREE_TYPE (gimple_cond_rhs (cond)))
1225 ? real_zerop (gimple_cond_rhs (cond))
1226 : integer_zerop (gimple_cond_rhs (cond)))
1227 ;
1228 else
1229 return false;
1230
1231 /* We need to know which is the true edge and which is the false
1232 edge so that we know if have abs or negative abs. */
1233 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
1234
1235 /* For GT_EXPR/GE_EXPR, if the true edge goes to OTHER_BLOCK, then we
1236 will need to negate the result. Similarly for LT_EXPR/LE_EXPR if
1237 the false edge goes to OTHER_BLOCK. */
1238 if (cond_code == GT_EXPR || cond_code == GE_EXPR)
1239 e = true_edge;
1240 else
1241 e = false_edge;
1242
1243 if (e->dest == middle_bb)
1244 negate = true;
1245 else
1246 negate = false;
1247
1248 result = duplicate_ssa_name (result, NULL);
1249
1250 if (negate)
1251 lhs = make_ssa_name (TREE_TYPE (result), NULL);
1252 else
1253 lhs = result;
1254
1255 /* Build the modify expression with abs expression. */
1256 new_stmt = gimple_build_assign_with_ops (ABS_EXPR, lhs, rhs, NULL);
1257
1258 gsi = gsi_last_bb (cond_bb);
1259 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
1260
1261 if (negate)
1262 {
1263 /* Get the right GSI. We want to insert after the recently
1264 added ABS_EXPR statement (which we know is the first statement
1265 in the block. */
1266 new_stmt = gimple_build_assign_with_ops (NEGATE_EXPR, result, lhs, NULL);
1267
1268 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1269 }
1270
1271 replace_phi_edge_with_variable (cond_bb, e1, phi, result);
1272
1273 /* Note that we optimized this PHI. */
1274 return true;
1275 }
1276
1277 /* The function neg_replacement replaces conditional negation with
1278 equivalent straight line code. Returns TRUE if replacement is done,
1279 otherwise returns FALSE.
1280
1281 COND_BB branches around negation occuring in MIDDLE_BB.
1282
1283 E0 and E1 are edges out of COND_BB. E0 reaches MIDDLE_BB and
1284 E1 reaches the other successor which should contain PHI with
1285 arguments ARG0 and ARG1.
1286
1287 Assuming negation is to occur when the condition is true,
1288 then the non-branching sequence is:
1289
1290 result = (rhs ^ -cond) + cond
1291
1292 Inverting the condition or its result gives us negation
1293 when the original condition is false. */
1294
1295 static bool
1296 neg_replacement (basic_block cond_bb, basic_block middle_bb,
1297 edge e0 ATTRIBUTE_UNUSED, edge e1,
1298 gimple phi, tree arg0, tree arg1)
1299 {
1300 gimple new_stmt, cond;
1301 gimple_stmt_iterator gsi;
1302 gimple assign;
1303 edge true_edge, false_edge;
1304 tree rhs, lhs;
1305 enum tree_code cond_code;
1306 bool invert = false;
1307
1308 /* This transformation performs logical operations on the
1309 incoming arguments. So force them to be integral types. */
1310 if (!INTEGRAL_TYPE_P (TREE_TYPE (arg0)))
1311 return false;
1312
1313 /* OTHER_BLOCK must have only one executable statement which must have the
1314 form arg0 = -arg1 or arg1 = -arg0. */
1315
1316 assign = last_and_only_stmt (middle_bb);
1317 /* If we did not find the proper negation assignment, then we can not
1318 optimize. */
1319 if (assign == NULL)
1320 return false;
1321
1322 /* If we got here, then we have found the only executable statement
1323 in OTHER_BLOCK. If it is anything other than arg0 = -arg1 or
1324 arg1 = -arg0, then we can not optimize. */
1325 if (gimple_code (assign) != GIMPLE_ASSIGN)
1326 return false;
1327
1328 lhs = gimple_assign_lhs (assign);
1329
1330 if (gimple_assign_rhs_code (assign) != NEGATE_EXPR)
1331 return false;
1332
1333 rhs = gimple_assign_rhs1 (assign);
1334
1335 /* The assignment has to be arg0 = -arg1 or arg1 = -arg0. */
1336 if (!(lhs == arg0 && rhs == arg1)
1337 && !(lhs == arg1 && rhs == arg0))
1338 return false;
1339
1340 /* The basic sequence assumes we negate when the condition is true.
1341 If we need the opposite, then we will either need to invert the
1342 condition or its result. */
1343 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
1344 invert = false_edge->dest == middle_bb;
1345
1346 /* Unlike abs_replacement, we can handle arbitrary conditionals here. */
1347 cond = last_stmt (cond_bb);
1348 cond_code = gimple_cond_code (cond);
1349
1350 /* If inversion is needed, first try to invert the test since
1351 that's cheapest. */
1352 if (invert)
1353 {
1354 bool honor_nans
1355 = HONOR_NANS (TYPE_MODE (TREE_TYPE (gimple_cond_lhs (cond))));
1356 enum tree_code new_code = invert_tree_comparison (cond_code, honor_nans);
1357
1358 /* If invert_tree_comparison was successful, then use its return
1359 value as the new code and note that inversion is no longer
1360 needed. */
1361 if (new_code != ERROR_MARK)
1362 {
1363 cond_code = new_code;
1364 invert = false;
1365 }
1366 }
1367
1368 tree cond_val = make_ssa_name (boolean_type_node, NULL);
1369 new_stmt = gimple_build_assign_with_ops (cond_code, cond_val,
1370 gimple_cond_lhs (cond),
1371 gimple_cond_rhs (cond));
1372 gsi = gsi_last_bb (cond_bb);
1373 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
1374
1375 /* If we still need inversion, then invert the result of the
1376 condition. */
1377 if (invert)
1378 {
1379 tree tmp = make_ssa_name (boolean_type_node, NULL);
1380 new_stmt = gimple_build_assign_with_ops (BIT_XOR_EXPR, tmp,
1381 cond_val, boolean_true_node);
1382 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1383 cond_val = tmp;
1384 }
1385
1386 /* Get the condition in the right type so that we can perform
1387 logical and arithmetic operations on it. */
1388 tree cond_val_converted = make_ssa_name (TREE_TYPE (rhs), NULL);
1389 new_stmt = gimple_build_assign_with_ops (NOP_EXPR, cond_val_converted,
1390 cond_val, NULL_TREE);
1391 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1392
1393 tree neg_cond_val_converted = make_ssa_name (TREE_TYPE (rhs), NULL);
1394 new_stmt = gimple_build_assign_with_ops (NEGATE_EXPR, neg_cond_val_converted,
1395 cond_val_converted, NULL_TREE);
1396 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1397
1398 tree tmp = make_ssa_name (TREE_TYPE (rhs), NULL);
1399 new_stmt = gimple_build_assign_with_ops (BIT_XOR_EXPR, tmp,
1400 rhs, neg_cond_val_converted);
1401 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1402
1403 tree new_lhs = make_ssa_name (TREE_TYPE (rhs), NULL);
1404 new_stmt = gimple_build_assign_with_ops (PLUS_EXPR, new_lhs,
1405 tmp, cond_val_converted);
1406 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1407
1408 replace_phi_edge_with_variable (cond_bb, e1, phi, new_lhs);
1409
1410 /* Note that we optimized this PHI. */
1411 return true;
1412 }
1413
1414 /* Auxiliary functions to determine the set of memory accesses which
1415 can't trap because they are preceded by accesses to the same memory
1416 portion. We do that for MEM_REFs, so we only need to track
1417 the SSA_NAME of the pointer indirectly referenced. The algorithm
1418 simply is a walk over all instructions in dominator order. When
1419 we see an MEM_REF we determine if we've already seen a same
1420 ref anywhere up to the root of the dominator tree. If we do the
1421 current access can't trap. If we don't see any dominating access
1422 the current access might trap, but might also make later accesses
1423 non-trapping, so we remember it. We need to be careful with loads
1424 or stores, for instance a load might not trap, while a store would,
1425 so if we see a dominating read access this doesn't mean that a later
1426 write access would not trap. Hence we also need to differentiate the
1427 type of access(es) seen.
1428
1429 ??? We currently are very conservative and assume that a load might
1430 trap even if a store doesn't (write-only memory). This probably is
1431 overly conservative. */
1432
1433 /* A hash-table of SSA_NAMEs, and in which basic block an MEM_REF
1434 through it was seen, which would constitute a no-trap region for
1435 same accesses. */
1436 struct name_to_bb
1437 {
1438 unsigned int ssa_name_ver;
1439 unsigned int phase;
1440 bool store;
1441 HOST_WIDE_INT offset, size;
1442 basic_block bb;
1443 };
1444
1445 /* Hashtable helpers. */
1446
1447 struct ssa_names_hasher : typed_free_remove <name_to_bb>
1448 {
1449 typedef name_to_bb value_type;
1450 typedef name_to_bb compare_type;
1451 static inline hashval_t hash (const value_type *);
1452 static inline bool equal (const value_type *, const compare_type *);
1453 };
1454
1455 /* Used for quick clearing of the hash-table when we see calls.
1456 Hash entries with phase < nt_call_phase are invalid. */
1457 static unsigned int nt_call_phase;
1458
1459 /* The hash function. */
1460
1461 inline hashval_t
1462 ssa_names_hasher::hash (const value_type *n)
1463 {
1464 return n->ssa_name_ver ^ (((hashval_t) n->store) << 31)
1465 ^ (n->offset << 6) ^ (n->size << 3);
1466 }
1467
1468 /* The equality function of *P1 and *P2. */
1469
1470 inline bool
1471 ssa_names_hasher::equal (const value_type *n1, const compare_type *n2)
1472 {
1473 return n1->ssa_name_ver == n2->ssa_name_ver
1474 && n1->store == n2->store
1475 && n1->offset == n2->offset
1476 && n1->size == n2->size;
1477 }
1478
1479 class nontrapping_dom_walker : public dom_walker
1480 {
1481 public:
1482 nontrapping_dom_walker (cdi_direction direction, hash_set<tree> *ps)
1483 : dom_walker (direction), m_nontrapping (ps), m_seen_ssa_names (128) {}
1484
1485 virtual void before_dom_children (basic_block);
1486 virtual void after_dom_children (basic_block);
1487
1488 private:
1489
1490 /* We see the expression EXP in basic block BB. If it's an interesting
1491 expression (an MEM_REF through an SSA_NAME) possibly insert the
1492 expression into the set NONTRAP or the hash table of seen expressions.
1493 STORE is true if this expression is on the LHS, otherwise it's on
1494 the RHS. */
1495 void add_or_mark_expr (basic_block, tree, bool);
1496
1497 hash_set<tree> *m_nontrapping;
1498
1499 /* The hash table for remembering what we've seen. */
1500 hash_table<ssa_names_hasher> m_seen_ssa_names;
1501 };
1502
1503 /* Called by walk_dominator_tree, when entering the block BB. */
1504 void
1505 nontrapping_dom_walker::before_dom_children (basic_block bb)
1506 {
1507 edge e;
1508 edge_iterator ei;
1509 gimple_stmt_iterator gsi;
1510
1511 /* If we haven't seen all our predecessors, clear the hash-table. */
1512 FOR_EACH_EDGE (e, ei, bb->preds)
1513 if ((((size_t)e->src->aux) & 2) == 0)
1514 {
1515 nt_call_phase++;
1516 break;
1517 }
1518
1519 /* Mark this BB as being on the path to dominator root and as visited. */
1520 bb->aux = (void*)(1 | 2);
1521
1522 /* And walk the statements in order. */
1523 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1524 {
1525 gimple stmt = gsi_stmt (gsi);
1526
1527 if (is_gimple_call (stmt) && !nonfreeing_call_p (stmt))
1528 nt_call_phase++;
1529 else if (gimple_assign_single_p (stmt) && !gimple_has_volatile_ops (stmt))
1530 {
1531 add_or_mark_expr (bb, gimple_assign_lhs (stmt), true);
1532 add_or_mark_expr (bb, gimple_assign_rhs1 (stmt), false);
1533 }
1534 }
1535 }
1536
1537 /* Called by walk_dominator_tree, when basic block BB is exited. */
1538 void
1539 nontrapping_dom_walker::after_dom_children (basic_block bb)
1540 {
1541 /* This BB isn't on the path to dominator root anymore. */
1542 bb->aux = (void*)2;
1543 }
1544
1545 /* We see the expression EXP in basic block BB. If it's an interesting
1546 expression (an MEM_REF through an SSA_NAME) possibly insert the
1547 expression into the set NONTRAP or the hash table of seen expressions.
1548 STORE is true if this expression is on the LHS, otherwise it's on
1549 the RHS. */
1550 void
1551 nontrapping_dom_walker::add_or_mark_expr (basic_block bb, tree exp, bool store)
1552 {
1553 HOST_WIDE_INT size;
1554
1555 if (TREE_CODE (exp) == MEM_REF
1556 && TREE_CODE (TREE_OPERAND (exp, 0)) == SSA_NAME
1557 && tree_fits_shwi_p (TREE_OPERAND (exp, 1))
1558 && (size = int_size_in_bytes (TREE_TYPE (exp))) > 0)
1559 {
1560 tree name = TREE_OPERAND (exp, 0);
1561 struct name_to_bb map;
1562 name_to_bb **slot;
1563 struct name_to_bb *n2bb;
1564 basic_block found_bb = 0;
1565
1566 /* Try to find the last seen MEM_REF through the same
1567 SSA_NAME, which can trap. */
1568 map.ssa_name_ver = SSA_NAME_VERSION (name);
1569 map.phase = 0;
1570 map.bb = 0;
1571 map.store = store;
1572 map.offset = tree_to_shwi (TREE_OPERAND (exp, 1));
1573 map.size = size;
1574
1575 slot = m_seen_ssa_names.find_slot (&map, INSERT);
1576 n2bb = *slot;
1577 if (n2bb && n2bb->phase >= nt_call_phase)
1578 found_bb = n2bb->bb;
1579
1580 /* If we've found a trapping MEM_REF, _and_ it dominates EXP
1581 (it's in a basic block on the path from us to the dominator root)
1582 then we can't trap. */
1583 if (found_bb && (((size_t)found_bb->aux) & 1) == 1)
1584 {
1585 m_nontrapping->add (exp);
1586 }
1587 else
1588 {
1589 /* EXP might trap, so insert it into the hash table. */
1590 if (n2bb)
1591 {
1592 n2bb->phase = nt_call_phase;
1593 n2bb->bb = bb;
1594 }
1595 else
1596 {
1597 n2bb = XNEW (struct name_to_bb);
1598 n2bb->ssa_name_ver = SSA_NAME_VERSION (name);
1599 n2bb->phase = nt_call_phase;
1600 n2bb->bb = bb;
1601 n2bb->store = store;
1602 n2bb->offset = map.offset;
1603 n2bb->size = size;
1604 *slot = n2bb;
1605 }
1606 }
1607 }
1608 }
1609
1610 /* This is the entry point of gathering non trapping memory accesses.
1611 It will do a dominator walk over the whole function, and it will
1612 make use of the bb->aux pointers. It returns a set of trees
1613 (the MEM_REFs itself) which can't trap. */
1614 static hash_set<tree> *
1615 get_non_trapping (void)
1616 {
1617 nt_call_phase = 0;
1618 hash_set<tree> *nontrap = new hash_set<tree>;
1619 /* We're going to do a dominator walk, so ensure that we have
1620 dominance information. */
1621 calculate_dominance_info (CDI_DOMINATORS);
1622
1623 nontrapping_dom_walker (CDI_DOMINATORS, nontrap)
1624 .walk (cfun->cfg->x_entry_block_ptr);
1625
1626 clear_aux_for_blocks ();
1627 return nontrap;
1628 }
1629
1630 /* Do the main work of conditional store replacement. We already know
1631 that the recognized pattern looks like so:
1632
1633 split:
1634 if (cond) goto MIDDLE_BB; else goto JOIN_BB (edge E1)
1635 MIDDLE_BB:
1636 something
1637 fallthrough (edge E0)
1638 JOIN_BB:
1639 some more
1640
1641 We check that MIDDLE_BB contains only one store, that that store
1642 doesn't trap (not via NOTRAP, but via checking if an access to the same
1643 memory location dominates us) and that the store has a "simple" RHS. */
1644
1645 static bool
1646 cond_store_replacement (basic_block middle_bb, basic_block join_bb,
1647 edge e0, edge e1, hash_set<tree> *nontrap)
1648 {
1649 gimple assign = last_and_only_stmt (middle_bb);
1650 tree lhs, rhs, name, name2;
1651 gimple newphi, new_stmt;
1652 gimple_stmt_iterator gsi;
1653 source_location locus;
1654
1655 /* Check if middle_bb contains of only one store. */
1656 if (!assign
1657 || !gimple_assign_single_p (assign)
1658 || gimple_has_volatile_ops (assign))
1659 return false;
1660
1661 locus = gimple_location (assign);
1662 lhs = gimple_assign_lhs (assign);
1663 rhs = gimple_assign_rhs1 (assign);
1664 if (TREE_CODE (lhs) != MEM_REF
1665 || TREE_CODE (TREE_OPERAND (lhs, 0)) != SSA_NAME
1666 || !is_gimple_reg_type (TREE_TYPE (lhs)))
1667 return false;
1668
1669 /* Prove that we can move the store down. We could also check
1670 TREE_THIS_NOTRAP here, but in that case we also could move stores,
1671 whose value is not available readily, which we want to avoid. */
1672 if (!nontrap->contains (lhs))
1673 return false;
1674
1675 /* Now we've checked the constraints, so do the transformation:
1676 1) Remove the single store. */
1677 gsi = gsi_for_stmt (assign);
1678 unlink_stmt_vdef (assign);
1679 gsi_remove (&gsi, true);
1680 release_defs (assign);
1681
1682 /* 2) Insert a load from the memory of the store to the temporary
1683 on the edge which did not contain the store. */
1684 lhs = unshare_expr (lhs);
1685 name = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
1686 new_stmt = gimple_build_assign (name, lhs);
1687 gimple_set_location (new_stmt, locus);
1688 gsi_insert_on_edge (e1, new_stmt);
1689
1690 /* 3) Create a PHI node at the join block, with one argument
1691 holding the old RHS, and the other holding the temporary
1692 where we stored the old memory contents. */
1693 name2 = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
1694 newphi = create_phi_node (name2, join_bb);
1695 add_phi_arg (newphi, rhs, e0, locus);
1696 add_phi_arg (newphi, name, e1, locus);
1697
1698 lhs = unshare_expr (lhs);
1699 new_stmt = gimple_build_assign (lhs, PHI_RESULT (newphi));
1700
1701 /* 4) Insert that PHI node. */
1702 gsi = gsi_after_labels (join_bb);
1703 if (gsi_end_p (gsi))
1704 {
1705 gsi = gsi_last_bb (join_bb);
1706 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1707 }
1708 else
1709 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
1710
1711 return true;
1712 }
1713
1714 /* Do the main work of conditional store replacement. */
1715
1716 static bool
1717 cond_if_else_store_replacement_1 (basic_block then_bb, basic_block else_bb,
1718 basic_block join_bb, gimple then_assign,
1719 gimple else_assign)
1720 {
1721 tree lhs_base, lhs, then_rhs, else_rhs, name;
1722 source_location then_locus, else_locus;
1723 gimple_stmt_iterator gsi;
1724 gimple newphi, new_stmt;
1725
1726 if (then_assign == NULL
1727 || !gimple_assign_single_p (then_assign)
1728 || gimple_clobber_p (then_assign)
1729 || gimple_has_volatile_ops (then_assign)
1730 || else_assign == NULL
1731 || !gimple_assign_single_p (else_assign)
1732 || gimple_clobber_p (else_assign)
1733 || gimple_has_volatile_ops (else_assign))
1734 return false;
1735
1736 lhs = gimple_assign_lhs (then_assign);
1737 if (!is_gimple_reg_type (TREE_TYPE (lhs))
1738 || !operand_equal_p (lhs, gimple_assign_lhs (else_assign), 0))
1739 return false;
1740
1741 lhs_base = get_base_address (lhs);
1742 if (lhs_base == NULL_TREE
1743 || (!DECL_P (lhs_base) && TREE_CODE (lhs_base) != MEM_REF))
1744 return false;
1745
1746 then_rhs = gimple_assign_rhs1 (then_assign);
1747 else_rhs = gimple_assign_rhs1 (else_assign);
1748 then_locus = gimple_location (then_assign);
1749 else_locus = gimple_location (else_assign);
1750
1751 /* Now we've checked the constraints, so do the transformation:
1752 1) Remove the stores. */
1753 gsi = gsi_for_stmt (then_assign);
1754 unlink_stmt_vdef (then_assign);
1755 gsi_remove (&gsi, true);
1756 release_defs (then_assign);
1757
1758 gsi = gsi_for_stmt (else_assign);
1759 unlink_stmt_vdef (else_assign);
1760 gsi_remove (&gsi, true);
1761 release_defs (else_assign);
1762
1763 /* 2) Create a PHI node at the join block, with one argument
1764 holding the old RHS, and the other holding the temporary
1765 where we stored the old memory contents. */
1766 name = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
1767 newphi = create_phi_node (name, join_bb);
1768 add_phi_arg (newphi, then_rhs, EDGE_SUCC (then_bb, 0), then_locus);
1769 add_phi_arg (newphi, else_rhs, EDGE_SUCC (else_bb, 0), else_locus);
1770
1771 new_stmt = gimple_build_assign (lhs, PHI_RESULT (newphi));
1772
1773 /* 3) Insert that PHI node. */
1774 gsi = gsi_after_labels (join_bb);
1775 if (gsi_end_p (gsi))
1776 {
1777 gsi = gsi_last_bb (join_bb);
1778 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1779 }
1780 else
1781 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
1782
1783 return true;
1784 }
1785
1786 /* Conditional store replacement. We already know
1787 that the recognized pattern looks like so:
1788
1789 split:
1790 if (cond) goto THEN_BB; else goto ELSE_BB (edge E1)
1791 THEN_BB:
1792 ...
1793 X = Y;
1794 ...
1795 goto JOIN_BB;
1796 ELSE_BB:
1797 ...
1798 X = Z;
1799 ...
1800 fallthrough (edge E0)
1801 JOIN_BB:
1802 some more
1803
1804 We check that it is safe to sink the store to JOIN_BB by verifying that
1805 there are no read-after-write or write-after-write dependencies in
1806 THEN_BB and ELSE_BB. */
1807
1808 static bool
1809 cond_if_else_store_replacement (basic_block then_bb, basic_block else_bb,
1810 basic_block join_bb)
1811 {
1812 gimple then_assign = last_and_only_stmt (then_bb);
1813 gimple else_assign = last_and_only_stmt (else_bb);
1814 vec<data_reference_p> then_datarefs, else_datarefs;
1815 vec<ddr_p> then_ddrs, else_ddrs;
1816 gimple then_store, else_store;
1817 bool found, ok = false, res;
1818 struct data_dependence_relation *ddr;
1819 data_reference_p then_dr, else_dr;
1820 int i, j;
1821 tree then_lhs, else_lhs;
1822 basic_block blocks[3];
1823
1824 if (MAX_STORES_TO_SINK == 0)
1825 return false;
1826
1827 /* Handle the case with single statement in THEN_BB and ELSE_BB. */
1828 if (then_assign && else_assign)
1829 return cond_if_else_store_replacement_1 (then_bb, else_bb, join_bb,
1830 then_assign, else_assign);
1831
1832 /* Find data references. */
1833 then_datarefs.create (1);
1834 else_datarefs.create (1);
1835 if ((find_data_references_in_bb (NULL, then_bb, &then_datarefs)
1836 == chrec_dont_know)
1837 || !then_datarefs.length ()
1838 || (find_data_references_in_bb (NULL, else_bb, &else_datarefs)
1839 == chrec_dont_know)
1840 || !else_datarefs.length ())
1841 {
1842 free_data_refs (then_datarefs);
1843 free_data_refs (else_datarefs);
1844 return false;
1845 }
1846
1847 /* Find pairs of stores with equal LHS. */
1848 auto_vec<gimple, 1> then_stores, else_stores;
1849 FOR_EACH_VEC_ELT (then_datarefs, i, then_dr)
1850 {
1851 if (DR_IS_READ (then_dr))
1852 continue;
1853
1854 then_store = DR_STMT (then_dr);
1855 then_lhs = gimple_get_lhs (then_store);
1856 if (then_lhs == NULL_TREE)
1857 continue;
1858 found = false;
1859
1860 FOR_EACH_VEC_ELT (else_datarefs, j, else_dr)
1861 {
1862 if (DR_IS_READ (else_dr))
1863 continue;
1864
1865 else_store = DR_STMT (else_dr);
1866 else_lhs = gimple_get_lhs (else_store);
1867 if (else_lhs == NULL_TREE)
1868 continue;
1869
1870 if (operand_equal_p (then_lhs, else_lhs, 0))
1871 {
1872 found = true;
1873 break;
1874 }
1875 }
1876
1877 if (!found)
1878 continue;
1879
1880 then_stores.safe_push (then_store);
1881 else_stores.safe_push (else_store);
1882 }
1883
1884 /* No pairs of stores found. */
1885 if (!then_stores.length ()
1886 || then_stores.length () > (unsigned) MAX_STORES_TO_SINK)
1887 {
1888 free_data_refs (then_datarefs);
1889 free_data_refs (else_datarefs);
1890 return false;
1891 }
1892
1893 /* Compute and check data dependencies in both basic blocks. */
1894 then_ddrs.create (1);
1895 else_ddrs.create (1);
1896 if (!compute_all_dependences (then_datarefs, &then_ddrs,
1897 vNULL, false)
1898 || !compute_all_dependences (else_datarefs, &else_ddrs,
1899 vNULL, false))
1900 {
1901 free_dependence_relations (then_ddrs);
1902 free_dependence_relations (else_ddrs);
1903 free_data_refs (then_datarefs);
1904 free_data_refs (else_datarefs);
1905 return false;
1906 }
1907 blocks[0] = then_bb;
1908 blocks[1] = else_bb;
1909 blocks[2] = join_bb;
1910 renumber_gimple_stmt_uids_in_blocks (blocks, 3);
1911
1912 /* Check that there are no read-after-write or write-after-write dependencies
1913 in THEN_BB. */
1914 FOR_EACH_VEC_ELT (then_ddrs, i, ddr)
1915 {
1916 struct data_reference *dra = DDR_A (ddr);
1917 struct data_reference *drb = DDR_B (ddr);
1918
1919 if (DDR_ARE_DEPENDENT (ddr) != chrec_known
1920 && ((DR_IS_READ (dra) && DR_IS_WRITE (drb)
1921 && gimple_uid (DR_STMT (dra)) > gimple_uid (DR_STMT (drb)))
1922 || (DR_IS_READ (drb) && DR_IS_WRITE (dra)
1923 && gimple_uid (DR_STMT (drb)) > gimple_uid (DR_STMT (dra)))
1924 || (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))))
1925 {
1926 free_dependence_relations (then_ddrs);
1927 free_dependence_relations (else_ddrs);
1928 free_data_refs (then_datarefs);
1929 free_data_refs (else_datarefs);
1930 return false;
1931 }
1932 }
1933
1934 /* Check that there are no read-after-write or write-after-write dependencies
1935 in ELSE_BB. */
1936 FOR_EACH_VEC_ELT (else_ddrs, i, ddr)
1937 {
1938 struct data_reference *dra = DDR_A (ddr);
1939 struct data_reference *drb = DDR_B (ddr);
1940
1941 if (DDR_ARE_DEPENDENT (ddr) != chrec_known
1942 && ((DR_IS_READ (dra) && DR_IS_WRITE (drb)
1943 && gimple_uid (DR_STMT (dra)) > gimple_uid (DR_STMT (drb)))
1944 || (DR_IS_READ (drb) && DR_IS_WRITE (dra)
1945 && gimple_uid (DR_STMT (drb)) > gimple_uid (DR_STMT (dra)))
1946 || (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))))
1947 {
1948 free_dependence_relations (then_ddrs);
1949 free_dependence_relations (else_ddrs);
1950 free_data_refs (then_datarefs);
1951 free_data_refs (else_datarefs);
1952 return false;
1953 }
1954 }
1955
1956 /* Sink stores with same LHS. */
1957 FOR_EACH_VEC_ELT (then_stores, i, then_store)
1958 {
1959 else_store = else_stores[i];
1960 res = cond_if_else_store_replacement_1 (then_bb, else_bb, join_bb,
1961 then_store, else_store);
1962 ok = ok || res;
1963 }
1964
1965 free_dependence_relations (then_ddrs);
1966 free_dependence_relations (else_ddrs);
1967 free_data_refs (then_datarefs);
1968 free_data_refs (else_datarefs);
1969
1970 return ok;
1971 }
1972
1973 /* Return TRUE if STMT has a VUSE whose corresponding VDEF is in BB. */
1974
1975 static bool
1976 local_mem_dependence (gimple stmt, basic_block bb)
1977 {
1978 tree vuse = gimple_vuse (stmt);
1979 gimple def;
1980
1981 if (!vuse)
1982 return false;
1983
1984 def = SSA_NAME_DEF_STMT (vuse);
1985 return (def && gimple_bb (def) == bb);
1986 }
1987
1988 /* Given a "diamond" control-flow pattern where BB0 tests a condition,
1989 BB1 and BB2 are "then" and "else" blocks dependent on this test,
1990 and BB3 rejoins control flow following BB1 and BB2, look for
1991 opportunities to hoist loads as follows. If BB3 contains a PHI of
1992 two loads, one each occurring in BB1 and BB2, and the loads are
1993 provably of adjacent fields in the same structure, then move both
1994 loads into BB0. Of course this can only be done if there are no
1995 dependencies preventing such motion.
1996
1997 One of the hoisted loads will always be speculative, so the
1998 transformation is currently conservative:
1999
2000 - The fields must be strictly adjacent.
2001 - The two fields must occupy a single memory block that is
2002 guaranteed to not cross a page boundary.
2003
2004 The last is difficult to prove, as such memory blocks should be
2005 aligned on the minimum of the stack alignment boundary and the
2006 alignment guaranteed by heap allocation interfaces. Thus we rely
2007 on a parameter for the alignment value.
2008
2009 Provided a good value is used for the last case, the first
2010 restriction could possibly be relaxed. */
2011
2012 static void
2013 hoist_adjacent_loads (basic_block bb0, basic_block bb1,
2014 basic_block bb2, basic_block bb3)
2015 {
2016 int param_align = PARAM_VALUE (PARAM_L1_CACHE_LINE_SIZE);
2017 unsigned param_align_bits = (unsigned) (param_align * BITS_PER_UNIT);
2018 gimple_stmt_iterator gsi;
2019
2020 /* Walk the phis in bb3 looking for an opportunity. We are looking
2021 for phis of two SSA names, one each of which is defined in bb1 and
2022 bb2. */
2023 for (gsi = gsi_start_phis (bb3); !gsi_end_p (gsi); gsi_next (&gsi))
2024 {
2025 gimple phi_stmt = gsi_stmt (gsi);
2026 gimple def1, def2, defswap;
2027 tree arg1, arg2, ref1, ref2, field1, field2, fieldswap;
2028 tree tree_offset1, tree_offset2, tree_size2, next;
2029 int offset1, offset2, size2;
2030 unsigned align1;
2031 gimple_stmt_iterator gsi2;
2032 basic_block bb_for_def1, bb_for_def2;
2033
2034 if (gimple_phi_num_args (phi_stmt) != 2
2035 || virtual_operand_p (gimple_phi_result (phi_stmt)))
2036 continue;
2037
2038 arg1 = gimple_phi_arg_def (phi_stmt, 0);
2039 arg2 = gimple_phi_arg_def (phi_stmt, 1);
2040
2041 if (TREE_CODE (arg1) != SSA_NAME
2042 || TREE_CODE (arg2) != SSA_NAME
2043 || SSA_NAME_IS_DEFAULT_DEF (arg1)
2044 || SSA_NAME_IS_DEFAULT_DEF (arg2))
2045 continue;
2046
2047 def1 = SSA_NAME_DEF_STMT (arg1);
2048 def2 = SSA_NAME_DEF_STMT (arg2);
2049
2050 if ((gimple_bb (def1) != bb1 || gimple_bb (def2) != bb2)
2051 && (gimple_bb (def2) != bb1 || gimple_bb (def1) != bb2))
2052 continue;
2053
2054 /* Check the mode of the arguments to be sure a conditional move
2055 can be generated for it. */
2056 if (optab_handler (movcc_optab, TYPE_MODE (TREE_TYPE (arg1)))
2057 == CODE_FOR_nothing)
2058 continue;
2059
2060 /* Both statements must be assignments whose RHS is a COMPONENT_REF. */
2061 if (!gimple_assign_single_p (def1)
2062 || !gimple_assign_single_p (def2)
2063 || gimple_has_volatile_ops (def1)
2064 || gimple_has_volatile_ops (def2))
2065 continue;
2066
2067 ref1 = gimple_assign_rhs1 (def1);
2068 ref2 = gimple_assign_rhs1 (def2);
2069
2070 if (TREE_CODE (ref1) != COMPONENT_REF
2071 || TREE_CODE (ref2) != COMPONENT_REF)
2072 continue;
2073
2074 /* The zeroth operand of the two component references must be
2075 identical. It is not sufficient to compare get_base_address of
2076 the two references, because this could allow for different
2077 elements of the same array in the two trees. It is not safe to
2078 assume that the existence of one array element implies the
2079 existence of a different one. */
2080 if (!operand_equal_p (TREE_OPERAND (ref1, 0), TREE_OPERAND (ref2, 0), 0))
2081 continue;
2082
2083 field1 = TREE_OPERAND (ref1, 1);
2084 field2 = TREE_OPERAND (ref2, 1);
2085
2086 /* Check for field adjacency, and ensure field1 comes first. */
2087 for (next = DECL_CHAIN (field1);
2088 next && TREE_CODE (next) != FIELD_DECL;
2089 next = DECL_CHAIN (next))
2090 ;
2091
2092 if (next != field2)
2093 {
2094 for (next = DECL_CHAIN (field2);
2095 next && TREE_CODE (next) != FIELD_DECL;
2096 next = DECL_CHAIN (next))
2097 ;
2098
2099 if (next != field1)
2100 continue;
2101
2102 fieldswap = field1;
2103 field1 = field2;
2104 field2 = fieldswap;
2105 defswap = def1;
2106 def1 = def2;
2107 def2 = defswap;
2108 }
2109
2110 bb_for_def1 = gimple_bb (def1);
2111 bb_for_def2 = gimple_bb (def2);
2112
2113 /* Check for proper alignment of the first field. */
2114 tree_offset1 = bit_position (field1);
2115 tree_offset2 = bit_position (field2);
2116 tree_size2 = DECL_SIZE (field2);
2117
2118 if (!tree_fits_uhwi_p (tree_offset1)
2119 || !tree_fits_uhwi_p (tree_offset2)
2120 || !tree_fits_uhwi_p (tree_size2))
2121 continue;
2122
2123 offset1 = tree_to_uhwi (tree_offset1);
2124 offset2 = tree_to_uhwi (tree_offset2);
2125 size2 = tree_to_uhwi (tree_size2);
2126 align1 = DECL_ALIGN (field1) % param_align_bits;
2127
2128 if (offset1 % BITS_PER_UNIT != 0)
2129 continue;
2130
2131 /* For profitability, the two field references should fit within
2132 a single cache line. */
2133 if (align1 + offset2 - offset1 + size2 > param_align_bits)
2134 continue;
2135
2136 /* The two expressions cannot be dependent upon vdefs defined
2137 in bb1/bb2. */
2138 if (local_mem_dependence (def1, bb_for_def1)
2139 || local_mem_dependence (def2, bb_for_def2))
2140 continue;
2141
2142 /* The conditions are satisfied; hoist the loads from bb1 and bb2 into
2143 bb0. We hoist the first one first so that a cache miss is handled
2144 efficiently regardless of hardware cache-fill policy. */
2145 gsi2 = gsi_for_stmt (def1);
2146 gsi_move_to_bb_end (&gsi2, bb0);
2147 gsi2 = gsi_for_stmt (def2);
2148 gsi_move_to_bb_end (&gsi2, bb0);
2149
2150 if (dump_file && (dump_flags & TDF_DETAILS))
2151 {
2152 fprintf (dump_file,
2153 "\nHoisting adjacent loads from %d and %d into %d: \n",
2154 bb_for_def1->index, bb_for_def2->index, bb0->index);
2155 print_gimple_stmt (dump_file, def1, 0, TDF_VOPS|TDF_MEMSYMS);
2156 print_gimple_stmt (dump_file, def2, 0, TDF_VOPS|TDF_MEMSYMS);
2157 }
2158 }
2159 }
2160
2161 /* Determine whether we should attempt to hoist adjacent loads out of
2162 diamond patterns in pass_phiopt. Always hoist loads if
2163 -fhoist-adjacent-loads is specified and the target machine has
2164 both a conditional move instruction and a defined cache line size. */
2165
2166 static bool
2167 gate_hoist_loads (void)
2168 {
2169 return (flag_hoist_adjacent_loads == 1
2170 && PARAM_VALUE (PARAM_L1_CACHE_LINE_SIZE)
2171 && HAVE_conditional_move);
2172 }
2173
2174 /* This pass tries to replaces an if-then-else block with an
2175 assignment. We have four kinds of transformations. Some of these
2176 transformations are also performed by the ifcvt RTL optimizer.
2177
2178 Conditional Replacement
2179 -----------------------
2180
2181 This transformation, implemented in conditional_replacement,
2182 replaces
2183
2184 bb0:
2185 if (cond) goto bb2; else goto bb1;
2186 bb1:
2187 bb2:
2188 x = PHI <0 (bb1), 1 (bb0), ...>;
2189
2190 with
2191
2192 bb0:
2193 x' = cond;
2194 goto bb2;
2195 bb2:
2196 x = PHI <x' (bb0), ...>;
2197
2198 We remove bb1 as it becomes unreachable. This occurs often due to
2199 gimplification of conditionals.
2200
2201 Value Replacement
2202 -----------------
2203
2204 This transformation, implemented in value_replacement, replaces
2205
2206 bb0:
2207 if (a != b) goto bb2; else goto bb1;
2208 bb1:
2209 bb2:
2210 x = PHI <a (bb1), b (bb0), ...>;
2211
2212 with
2213
2214 bb0:
2215 bb2:
2216 x = PHI <b (bb0), ...>;
2217
2218 This opportunity can sometimes occur as a result of other
2219 optimizations.
2220
2221
2222 Another case caught by value replacement looks like this:
2223
2224 bb0:
2225 t1 = a == CONST;
2226 t2 = b > c;
2227 t3 = t1 & t2;
2228 if (t3 != 0) goto bb1; else goto bb2;
2229 bb1:
2230 bb2:
2231 x = PHI (CONST, a)
2232
2233 Gets replaced with:
2234 bb0:
2235 bb2:
2236 t1 = a == CONST;
2237 t2 = b > c;
2238 t3 = t1 & t2;
2239 x = a;
2240
2241 ABS Replacement
2242 ---------------
2243
2244 This transformation, implemented in abs_replacement, replaces
2245
2246 bb0:
2247 if (a >= 0) goto bb2; else goto bb1;
2248 bb1:
2249 x = -a;
2250 bb2:
2251 x = PHI <x (bb1), a (bb0), ...>;
2252
2253 with
2254
2255 bb0:
2256 x' = ABS_EXPR< a >;
2257 bb2:
2258 x = PHI <x' (bb0), ...>;
2259
2260 MIN/MAX Replacement
2261 -------------------
2262
2263 This transformation, minmax_replacement replaces
2264
2265 bb0:
2266 if (a <= b) goto bb2; else goto bb1;
2267 bb1:
2268 bb2:
2269 x = PHI <b (bb1), a (bb0), ...>;
2270
2271 with
2272
2273 bb0:
2274 x' = MIN_EXPR (a, b)
2275 bb2:
2276 x = PHI <x' (bb0), ...>;
2277
2278 A similar transformation is done for MAX_EXPR.
2279
2280
2281 This pass also performs a fifth transformation of a slightly different
2282 flavor.
2283
2284 Adjacent Load Hoisting
2285 ----------------------
2286
2287 This transformation replaces
2288
2289 bb0:
2290 if (...) goto bb2; else goto bb1;
2291 bb1:
2292 x1 = (<expr>).field1;
2293 goto bb3;
2294 bb2:
2295 x2 = (<expr>).field2;
2296 bb3:
2297 # x = PHI <x1, x2>;
2298
2299 with
2300
2301 bb0:
2302 x1 = (<expr>).field1;
2303 x2 = (<expr>).field2;
2304 if (...) goto bb2; else goto bb1;
2305 bb1:
2306 goto bb3;
2307 bb2:
2308 bb3:
2309 # x = PHI <x1, x2>;
2310
2311 The purpose of this transformation is to enable generation of conditional
2312 move instructions such as Intel CMOVE or PowerPC ISEL. Because one of
2313 the loads is speculative, the transformation is restricted to very
2314 specific cases to avoid introducing a page fault. We are looking for
2315 the common idiom:
2316
2317 if (...)
2318 x = y->left;
2319 else
2320 x = y->right;
2321
2322 where left and right are typically adjacent pointers in a tree structure. */
2323
2324 namespace {
2325
2326 const pass_data pass_data_phiopt =
2327 {
2328 GIMPLE_PASS, /* type */
2329 "phiopt", /* name */
2330 OPTGROUP_NONE, /* optinfo_flags */
2331 TV_TREE_PHIOPT, /* tv_id */
2332 ( PROP_cfg | PROP_ssa ), /* properties_required */
2333 0, /* properties_provided */
2334 0, /* properties_destroyed */
2335 0, /* todo_flags_start */
2336 0, /* todo_flags_finish */
2337 };
2338
2339 class pass_phiopt : public gimple_opt_pass
2340 {
2341 public:
2342 pass_phiopt (gcc::context *ctxt)
2343 : gimple_opt_pass (pass_data_phiopt, ctxt)
2344 {}
2345
2346 /* opt_pass methods: */
2347 opt_pass * clone () { return new pass_phiopt (m_ctxt); }
2348 virtual bool gate (function *) { return flag_ssa_phiopt; }
2349 virtual unsigned int execute (function *)
2350 {
2351 return tree_ssa_phiopt_worker (false, gate_hoist_loads ());
2352 }
2353
2354 }; // class pass_phiopt
2355
2356 } // anon namespace
2357
2358 gimple_opt_pass *
2359 make_pass_phiopt (gcc::context *ctxt)
2360 {
2361 return new pass_phiopt (ctxt);
2362 }
2363
2364 namespace {
2365
2366 const pass_data pass_data_cselim =
2367 {
2368 GIMPLE_PASS, /* type */
2369 "cselim", /* name */
2370 OPTGROUP_NONE, /* optinfo_flags */
2371 TV_TREE_PHIOPT, /* tv_id */
2372 ( PROP_cfg | PROP_ssa ), /* properties_required */
2373 0, /* properties_provided */
2374 0, /* properties_destroyed */
2375 0, /* todo_flags_start */
2376 0, /* todo_flags_finish */
2377 };
2378
2379 class pass_cselim : public gimple_opt_pass
2380 {
2381 public:
2382 pass_cselim (gcc::context *ctxt)
2383 : gimple_opt_pass (pass_data_cselim, ctxt)
2384 {}
2385
2386 /* opt_pass methods: */
2387 virtual bool gate (function *) { return flag_tree_cselim; }
2388 virtual unsigned int execute (function *) { return tree_ssa_cs_elim (); }
2389
2390 }; // class pass_cselim
2391
2392 } // anon namespace
2393
2394 gimple_opt_pass *
2395 make_pass_cselim (gcc::context *ctxt)
2396 {
2397 return new pass_cselim (ctxt);
2398 }