re PR tree-optimization/84178 (ICE in release_bb_predicate)
[gcc.git] / gcc / tree-if-conv.c
1 /* If-conversion for vectorizer.
2 Copyright (C) 2004-2018 Free Software Foundation, Inc.
3 Contributed by Devang Patel <dpatel@apple.com>
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 /* This pass implements a tree level if-conversion of loops. Its
22 initial goal is to help the vectorizer to vectorize loops with
23 conditions.
24
25 A short description of if-conversion:
26
27 o Decide if a loop is if-convertible or not.
28 o Walk all loop basic blocks in breadth first order (BFS order).
29 o Remove conditional statements (at the end of basic block)
30 and propagate condition into destination basic blocks'
31 predicate list.
32 o Replace modify expression with conditional modify expression
33 using current basic block's condition.
34 o Merge all basic blocks
35 o Replace phi nodes with conditional modify expr
36 o Merge all basic blocks into header
37
38 Sample transformation:
39
40 INPUT
41 -----
42
43 # i_23 = PHI <0(0), i_18(10)>;
44 <L0>:;
45 j_15 = A[i_23];
46 if (j_15 > 41) goto <L1>; else goto <L17>;
47
48 <L17>:;
49 goto <bb 3> (<L3>);
50
51 <L1>:;
52
53 # iftmp.2_4 = PHI <0(8), 42(2)>;
54 <L3>:;
55 A[i_23] = iftmp.2_4;
56 i_18 = i_23 + 1;
57 if (i_18 <= 15) goto <L19>; else goto <L18>;
58
59 <L19>:;
60 goto <bb 1> (<L0>);
61
62 <L18>:;
63
64 OUTPUT
65 ------
66
67 # i_23 = PHI <0(0), i_18(10)>;
68 <L0>:;
69 j_15 = A[i_23];
70
71 <L3>:;
72 iftmp.2_4 = j_15 > 41 ? 42 : 0;
73 A[i_23] = iftmp.2_4;
74 i_18 = i_23 + 1;
75 if (i_18 <= 15) goto <L19>; else goto <L18>;
76
77 <L19>:;
78 goto <bb 1> (<L0>);
79
80 <L18>:;
81 */
82
83 #include "config.h"
84 #include "system.h"
85 #include "coretypes.h"
86 #include "backend.h"
87 #include "rtl.h"
88 #include "tree.h"
89 #include "gimple.h"
90 #include "cfghooks.h"
91 #include "tree-pass.h"
92 #include "ssa.h"
93 #include "expmed.h"
94 #include "optabs-query.h"
95 #include "gimple-pretty-print.h"
96 #include "alias.h"
97 #include "fold-const.h"
98 #include "stor-layout.h"
99 #include "gimple-fold.h"
100 #include "gimplify.h"
101 #include "gimple-iterator.h"
102 #include "gimplify-me.h"
103 #include "tree-cfg.h"
104 #include "tree-into-ssa.h"
105 #include "tree-ssa.h"
106 #include "cfgloop.h"
107 #include "tree-data-ref.h"
108 #include "tree-scalar-evolution.h"
109 #include "tree-ssa-loop.h"
110 #include "tree-ssa-loop-niter.h"
111 #include "tree-ssa-loop-ivopts.h"
112 #include "tree-ssa-address.h"
113 #include "dbgcnt.h"
114 #include "tree-hash-traits.h"
115 #include "varasm.h"
116 #include "builtins.h"
117 #include "params.h"
118 #include "cfganal.h"
119
120 /* Only handle PHIs with no more arguments unless we are asked to by
121 simd pragma. */
122 #define MAX_PHI_ARG_NUM \
123 ((unsigned) PARAM_VALUE (PARAM_MAX_TREE_IF_CONVERSION_PHI_ARGS))
124
125 /* Indicate if new load/store that needs to be predicated is introduced
126 during if conversion. */
127 static bool any_pred_load_store;
128
129 /* Indicate if there are any complicated PHIs that need to be handled in
130 if-conversion. Complicated PHI has more than two arguments and can't
131 be degenerated to two arguments PHI. See more information in comment
132 before phi_convertible_by_degenerating_args. */
133 static bool any_complicated_phi;
134
135 /* Hash for struct innermost_loop_behavior. It depends on the user to
136 free the memory. */
137
138 struct innermost_loop_behavior_hash : nofree_ptr_hash <innermost_loop_behavior>
139 {
140 static inline hashval_t hash (const value_type &);
141 static inline bool equal (const value_type &,
142 const compare_type &);
143 };
144
145 inline hashval_t
146 innermost_loop_behavior_hash::hash (const value_type &e)
147 {
148 hashval_t hash;
149
150 hash = iterative_hash_expr (e->base_address, 0);
151 hash = iterative_hash_expr (e->offset, hash);
152 hash = iterative_hash_expr (e->init, hash);
153 return iterative_hash_expr (e->step, hash);
154 }
155
156 inline bool
157 innermost_loop_behavior_hash::equal (const value_type &e1,
158 const compare_type &e2)
159 {
160 if ((e1->base_address && !e2->base_address)
161 || (!e1->base_address && e2->base_address)
162 || (!e1->offset && e2->offset)
163 || (e1->offset && !e2->offset)
164 || (!e1->init && e2->init)
165 || (e1->init && !e2->init)
166 || (!e1->step && e2->step)
167 || (e1->step && !e2->step))
168 return false;
169
170 if (e1->base_address && e2->base_address
171 && !operand_equal_p (e1->base_address, e2->base_address, 0))
172 return false;
173 if (e1->offset && e2->offset
174 && !operand_equal_p (e1->offset, e2->offset, 0))
175 return false;
176 if (e1->init && e2->init
177 && !operand_equal_p (e1->init, e2->init, 0))
178 return false;
179 if (e1->step && e2->step
180 && !operand_equal_p (e1->step, e2->step, 0))
181 return false;
182
183 return true;
184 }
185
186 /* List of basic blocks in if-conversion-suitable order. */
187 static basic_block *ifc_bbs;
188
189 /* Hash table to store <DR's innermost loop behavior, DR> pairs. */
190 static hash_map<innermost_loop_behavior_hash,
191 data_reference_p> *innermost_DR_map;
192
193 /* Hash table to store <base reference, DR> pairs. */
194 static hash_map<tree_operand_hash, data_reference_p> *baseref_DR_map;
195
196 /* Structure used to predicate basic blocks. This is attached to the
197 ->aux field of the BBs in the loop to be if-converted. */
198 struct bb_predicate {
199
200 /* The condition under which this basic block is executed. */
201 tree predicate;
202
203 /* PREDICATE is gimplified, and the sequence of statements is
204 recorded here, in order to avoid the duplication of computations
205 that occur in previous conditions. See PR44483. */
206 gimple_seq predicate_gimplified_stmts;
207 };
208
209 /* Returns true when the basic block BB has a predicate. */
210
211 static inline bool
212 bb_has_predicate (basic_block bb)
213 {
214 return bb->aux != NULL;
215 }
216
217 /* Returns the gimplified predicate for basic block BB. */
218
219 static inline tree
220 bb_predicate (basic_block bb)
221 {
222 return ((struct bb_predicate *) bb->aux)->predicate;
223 }
224
225 /* Sets the gimplified predicate COND for basic block BB. */
226
227 static inline void
228 set_bb_predicate (basic_block bb, tree cond)
229 {
230 gcc_assert ((TREE_CODE (cond) == TRUTH_NOT_EXPR
231 && is_gimple_condexpr (TREE_OPERAND (cond, 0)))
232 || is_gimple_condexpr (cond));
233 ((struct bb_predicate *) bb->aux)->predicate = cond;
234 }
235
236 /* Returns the sequence of statements of the gimplification of the
237 predicate for basic block BB. */
238
239 static inline gimple_seq
240 bb_predicate_gimplified_stmts (basic_block bb)
241 {
242 return ((struct bb_predicate *) bb->aux)->predicate_gimplified_stmts;
243 }
244
245 /* Sets the sequence of statements STMTS of the gimplification of the
246 predicate for basic block BB. */
247
248 static inline void
249 set_bb_predicate_gimplified_stmts (basic_block bb, gimple_seq stmts)
250 {
251 ((struct bb_predicate *) bb->aux)->predicate_gimplified_stmts = stmts;
252 }
253
254 /* Adds the sequence of statements STMTS to the sequence of statements
255 of the predicate for basic block BB. */
256
257 static inline void
258 add_bb_predicate_gimplified_stmts (basic_block bb, gimple_seq stmts)
259 {
260 gimple_seq_add_seq_without_update
261 (&(((struct bb_predicate *) bb->aux)->predicate_gimplified_stmts), stmts);
262 }
263
264 /* Initializes to TRUE the predicate of basic block BB. */
265
266 static inline void
267 init_bb_predicate (basic_block bb)
268 {
269 bb->aux = XNEW (struct bb_predicate);
270 set_bb_predicate_gimplified_stmts (bb, NULL);
271 set_bb_predicate (bb, boolean_true_node);
272 }
273
274 /* Release the SSA_NAMEs associated with the predicate of basic block BB. */
275
276 static inline void
277 release_bb_predicate (basic_block bb)
278 {
279 gimple_seq stmts = bb_predicate_gimplified_stmts (bb);
280 if (stmts)
281 {
282 /* Ensure that these stmts haven't yet been added to a bb. */
283 if (flag_checking)
284 for (gimple_stmt_iterator i = gsi_start (stmts);
285 !gsi_end_p (i); gsi_next (&i))
286 gcc_assert (! gimple_bb (gsi_stmt (i)));
287
288 /* Discard them. */
289 gimple_seq_discard (stmts);
290 set_bb_predicate_gimplified_stmts (bb, NULL);
291 }
292 }
293
294 /* Free the predicate of basic block BB. */
295
296 static inline void
297 free_bb_predicate (basic_block bb)
298 {
299 if (!bb_has_predicate (bb))
300 return;
301
302 release_bb_predicate (bb);
303 free (bb->aux);
304 bb->aux = NULL;
305 }
306
307 /* Reinitialize predicate of BB with the true predicate. */
308
309 static inline void
310 reset_bb_predicate (basic_block bb)
311 {
312 if (!bb_has_predicate (bb))
313 init_bb_predicate (bb);
314 else
315 {
316 release_bb_predicate (bb);
317 set_bb_predicate (bb, boolean_true_node);
318 }
319 }
320
321 /* Returns a new SSA_NAME of type TYPE that is assigned the value of
322 the expression EXPR. Inserts the statement created for this
323 computation before GSI and leaves the iterator GSI at the same
324 statement. */
325
326 static tree
327 ifc_temp_var (tree type, tree expr, gimple_stmt_iterator *gsi)
328 {
329 tree new_name = make_temp_ssa_name (type, NULL, "_ifc_");
330 gimple *stmt = gimple_build_assign (new_name, expr);
331 gimple_set_vuse (stmt, gimple_vuse (gsi_stmt (*gsi)));
332 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
333 return new_name;
334 }
335
336 /* Return true when COND is a false predicate. */
337
338 static inline bool
339 is_false_predicate (tree cond)
340 {
341 return (cond != NULL_TREE
342 && (cond == boolean_false_node
343 || integer_zerop (cond)));
344 }
345
346 /* Return true when COND is a true predicate. */
347
348 static inline bool
349 is_true_predicate (tree cond)
350 {
351 return (cond == NULL_TREE
352 || cond == boolean_true_node
353 || integer_onep (cond));
354 }
355
356 /* Returns true when BB has a predicate that is not trivial: true or
357 NULL_TREE. */
358
359 static inline bool
360 is_predicated (basic_block bb)
361 {
362 return !is_true_predicate (bb_predicate (bb));
363 }
364
365 /* Parses the predicate COND and returns its comparison code and
366 operands OP0 and OP1. */
367
368 static enum tree_code
369 parse_predicate (tree cond, tree *op0, tree *op1)
370 {
371 gimple *s;
372
373 if (TREE_CODE (cond) == SSA_NAME
374 && is_gimple_assign (s = SSA_NAME_DEF_STMT (cond)))
375 {
376 if (TREE_CODE_CLASS (gimple_assign_rhs_code (s)) == tcc_comparison)
377 {
378 *op0 = gimple_assign_rhs1 (s);
379 *op1 = gimple_assign_rhs2 (s);
380 return gimple_assign_rhs_code (s);
381 }
382
383 else if (gimple_assign_rhs_code (s) == TRUTH_NOT_EXPR)
384 {
385 tree op = gimple_assign_rhs1 (s);
386 tree type = TREE_TYPE (op);
387 enum tree_code code = parse_predicate (op, op0, op1);
388
389 return code == ERROR_MARK ? ERROR_MARK
390 : invert_tree_comparison (code, HONOR_NANS (type));
391 }
392
393 return ERROR_MARK;
394 }
395
396 if (COMPARISON_CLASS_P (cond))
397 {
398 *op0 = TREE_OPERAND (cond, 0);
399 *op1 = TREE_OPERAND (cond, 1);
400 return TREE_CODE (cond);
401 }
402
403 return ERROR_MARK;
404 }
405
406 /* Returns the fold of predicate C1 OR C2 at location LOC. */
407
408 static tree
409 fold_or_predicates (location_t loc, tree c1, tree c2)
410 {
411 tree op1a, op1b, op2a, op2b;
412 enum tree_code code1 = parse_predicate (c1, &op1a, &op1b);
413 enum tree_code code2 = parse_predicate (c2, &op2a, &op2b);
414
415 if (code1 != ERROR_MARK && code2 != ERROR_MARK)
416 {
417 tree t = maybe_fold_or_comparisons (code1, op1a, op1b,
418 code2, op2a, op2b);
419 if (t)
420 return t;
421 }
422
423 return fold_build2_loc (loc, TRUTH_OR_EXPR, boolean_type_node, c1, c2);
424 }
425
426 /* Returns either a COND_EXPR or the folded expression if the folded
427 expression is a MIN_EXPR, a MAX_EXPR, an ABS_EXPR,
428 a constant or a SSA_NAME. */
429
430 static tree
431 fold_build_cond_expr (tree type, tree cond, tree rhs, tree lhs)
432 {
433 tree rhs1, lhs1, cond_expr;
434
435 /* If COND is comparison r != 0 and r has boolean type, convert COND
436 to SSA_NAME to accept by vect bool pattern. */
437 if (TREE_CODE (cond) == NE_EXPR)
438 {
439 tree op0 = TREE_OPERAND (cond, 0);
440 tree op1 = TREE_OPERAND (cond, 1);
441 if (TREE_CODE (op0) == SSA_NAME
442 && TREE_CODE (TREE_TYPE (op0)) == BOOLEAN_TYPE
443 && (integer_zerop (op1)))
444 cond = op0;
445 }
446 cond_expr = fold_ternary (COND_EXPR, type, cond, rhs, lhs);
447
448 if (cond_expr == NULL_TREE)
449 return build3 (COND_EXPR, type, cond, rhs, lhs);
450
451 STRIP_USELESS_TYPE_CONVERSION (cond_expr);
452
453 if (is_gimple_val (cond_expr))
454 return cond_expr;
455
456 if (TREE_CODE (cond_expr) == ABS_EXPR)
457 {
458 rhs1 = TREE_OPERAND (cond_expr, 1);
459 STRIP_USELESS_TYPE_CONVERSION (rhs1);
460 if (is_gimple_val (rhs1))
461 return build1 (ABS_EXPR, type, rhs1);
462 }
463
464 if (TREE_CODE (cond_expr) == MIN_EXPR
465 || TREE_CODE (cond_expr) == MAX_EXPR)
466 {
467 lhs1 = TREE_OPERAND (cond_expr, 0);
468 STRIP_USELESS_TYPE_CONVERSION (lhs1);
469 rhs1 = TREE_OPERAND (cond_expr, 1);
470 STRIP_USELESS_TYPE_CONVERSION (rhs1);
471 if (is_gimple_val (rhs1) && is_gimple_val (lhs1))
472 return build2 (TREE_CODE (cond_expr), type, lhs1, rhs1);
473 }
474 return build3 (COND_EXPR, type, cond, rhs, lhs);
475 }
476
477 /* Add condition NC to the predicate list of basic block BB. LOOP is
478 the loop to be if-converted. Use predicate of cd-equivalent block
479 for join bb if it exists: we call basic blocks bb1 and bb2
480 cd-equivalent if they are executed under the same condition. */
481
482 static inline void
483 add_to_predicate_list (struct loop *loop, basic_block bb, tree nc)
484 {
485 tree bc, *tp;
486 basic_block dom_bb;
487
488 if (is_true_predicate (nc))
489 return;
490
491 /* If dominance tells us this basic block is always executed,
492 don't record any predicates for it. */
493 if (dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
494 return;
495
496 dom_bb = get_immediate_dominator (CDI_DOMINATORS, bb);
497 /* We use notion of cd equivalence to get simpler predicate for
498 join block, e.g. if join block has 2 predecessors with predicates
499 p1 & p2 and p1 & !p2, we'd like to get p1 for it instead of
500 p1 & p2 | p1 & !p2. */
501 if (dom_bb != loop->header
502 && get_immediate_dominator (CDI_POST_DOMINATORS, dom_bb) == bb)
503 {
504 gcc_assert (flow_bb_inside_loop_p (loop, dom_bb));
505 bc = bb_predicate (dom_bb);
506 if (!is_true_predicate (bc))
507 set_bb_predicate (bb, bc);
508 else
509 gcc_assert (is_true_predicate (bb_predicate (bb)));
510 if (dump_file && (dump_flags & TDF_DETAILS))
511 fprintf (dump_file, "Use predicate of bb#%d for bb#%d\n",
512 dom_bb->index, bb->index);
513 return;
514 }
515
516 if (!is_predicated (bb))
517 bc = nc;
518 else
519 {
520 bc = bb_predicate (bb);
521 bc = fold_or_predicates (EXPR_LOCATION (bc), nc, bc);
522 if (is_true_predicate (bc))
523 {
524 reset_bb_predicate (bb);
525 return;
526 }
527 }
528
529 /* Allow a TRUTH_NOT_EXPR around the main predicate. */
530 if (TREE_CODE (bc) == TRUTH_NOT_EXPR)
531 tp = &TREE_OPERAND (bc, 0);
532 else
533 tp = &bc;
534 if (!is_gimple_condexpr (*tp))
535 {
536 gimple_seq stmts;
537 *tp = force_gimple_operand_1 (*tp, &stmts, is_gimple_condexpr, NULL_TREE);
538 add_bb_predicate_gimplified_stmts (bb, stmts);
539 }
540 set_bb_predicate (bb, bc);
541 }
542
543 /* Add the condition COND to the previous condition PREV_COND, and add
544 this to the predicate list of the destination of edge E. LOOP is
545 the loop to be if-converted. */
546
547 static void
548 add_to_dst_predicate_list (struct loop *loop, edge e,
549 tree prev_cond, tree cond)
550 {
551 if (!flow_bb_inside_loop_p (loop, e->dest))
552 return;
553
554 if (!is_true_predicate (prev_cond))
555 cond = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
556 prev_cond, cond);
557
558 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, e->dest))
559 add_to_predicate_list (loop, e->dest, cond);
560 }
561
562 /* Return true if one of the successor edges of BB exits LOOP. */
563
564 static bool
565 bb_with_exit_edge_p (struct loop *loop, basic_block bb)
566 {
567 edge e;
568 edge_iterator ei;
569
570 FOR_EACH_EDGE (e, ei, bb->succs)
571 if (loop_exit_edge_p (loop, e))
572 return true;
573
574 return false;
575 }
576
577 /* Given PHI which has more than two arguments, this function checks if
578 it's if-convertible by degenerating its arguments. Specifically, if
579 below two conditions are satisfied:
580
581 1) Number of PHI arguments with different values equals to 2 and one
582 argument has the only occurrence.
583 2) The edge corresponding to the unique argument isn't critical edge.
584
585 Such PHI can be handled as PHIs have only two arguments. For example,
586 below PHI:
587
588 res = PHI <A_1(e1), A_1(e2), A_2(e3)>;
589
590 can be transformed into:
591
592 res = (predicate of e3) ? A_2 : A_1;
593
594 Return TRUE if it is the case, FALSE otherwise. */
595
596 static bool
597 phi_convertible_by_degenerating_args (gphi *phi)
598 {
599 edge e;
600 tree arg, t1 = NULL, t2 = NULL;
601 unsigned int i, i1 = 0, i2 = 0, n1 = 0, n2 = 0;
602 unsigned int num_args = gimple_phi_num_args (phi);
603
604 gcc_assert (num_args > 2);
605
606 for (i = 0; i < num_args; i++)
607 {
608 arg = gimple_phi_arg_def (phi, i);
609 if (t1 == NULL || operand_equal_p (t1, arg, 0))
610 {
611 n1++;
612 i1 = i;
613 t1 = arg;
614 }
615 else if (t2 == NULL || operand_equal_p (t2, arg, 0))
616 {
617 n2++;
618 i2 = i;
619 t2 = arg;
620 }
621 else
622 return false;
623 }
624
625 if (n1 != 1 && n2 != 1)
626 return false;
627
628 /* Check if the edge corresponding to the unique arg is critical. */
629 e = gimple_phi_arg_edge (phi, (n1 == 1) ? i1 : i2);
630 if (EDGE_COUNT (e->src->succs) > 1)
631 return false;
632
633 return true;
634 }
635
636 /* Return true when PHI is if-convertible. PHI is part of loop LOOP
637 and it belongs to basic block BB. Note at this point, it is sure
638 that PHI is if-convertible. This function updates global variable
639 ANY_COMPLICATED_PHI if PHI is complicated. */
640
641 static bool
642 if_convertible_phi_p (struct loop *loop, basic_block bb, gphi *phi)
643 {
644 if (dump_file && (dump_flags & TDF_DETAILS))
645 {
646 fprintf (dump_file, "-------------------------\n");
647 print_gimple_stmt (dump_file, phi, 0, TDF_SLIM);
648 }
649
650 if (bb != loop->header
651 && gimple_phi_num_args (phi) > 2
652 && !phi_convertible_by_degenerating_args (phi))
653 any_complicated_phi = true;
654
655 return true;
656 }
657
658 /* Records the status of a data reference. This struct is attached to
659 each DR->aux field. */
660
661 struct ifc_dr {
662 bool rw_unconditionally;
663 bool w_unconditionally;
664 bool written_at_least_once;
665
666 tree rw_predicate;
667 tree w_predicate;
668 tree base_w_predicate;
669 };
670
671 #define IFC_DR(DR) ((struct ifc_dr *) (DR)->aux)
672 #define DR_BASE_W_UNCONDITIONALLY(DR) (IFC_DR (DR)->written_at_least_once)
673 #define DR_RW_UNCONDITIONALLY(DR) (IFC_DR (DR)->rw_unconditionally)
674 #define DR_W_UNCONDITIONALLY(DR) (IFC_DR (DR)->w_unconditionally)
675
676 /* Iterates over DR's and stores refs, DR and base refs, DR pairs in
677 HASH tables. While storing them in HASH table, it checks if the
678 reference is unconditionally read or written and stores that as a flag
679 information. For base reference it checks if it is written atlest once
680 unconditionally and stores it as flag information along with DR.
681 In other words for every data reference A in STMT there exist other
682 accesses to a data reference with the same base with predicates that
683 add up (OR-up) to the true predicate: this ensures that the data
684 reference A is touched (read or written) on every iteration of the
685 if-converted loop. */
686 static void
687 hash_memrefs_baserefs_and_store_DRs_read_written_info (data_reference_p a)
688 {
689
690 data_reference_p *master_dr, *base_master_dr;
691 tree base_ref = DR_BASE_OBJECT (a);
692 innermost_loop_behavior *innermost = &DR_INNERMOST (a);
693 tree ca = bb_predicate (gimple_bb (DR_STMT (a)));
694 bool exist1, exist2;
695
696 master_dr = &innermost_DR_map->get_or_insert (innermost, &exist1);
697 if (!exist1)
698 *master_dr = a;
699
700 if (DR_IS_WRITE (a))
701 {
702 IFC_DR (*master_dr)->w_predicate
703 = fold_or_predicates (UNKNOWN_LOCATION, ca,
704 IFC_DR (*master_dr)->w_predicate);
705 if (is_true_predicate (IFC_DR (*master_dr)->w_predicate))
706 DR_W_UNCONDITIONALLY (*master_dr) = true;
707 }
708 IFC_DR (*master_dr)->rw_predicate
709 = fold_or_predicates (UNKNOWN_LOCATION, ca,
710 IFC_DR (*master_dr)->rw_predicate);
711 if (is_true_predicate (IFC_DR (*master_dr)->rw_predicate))
712 DR_RW_UNCONDITIONALLY (*master_dr) = true;
713
714 if (DR_IS_WRITE (a))
715 {
716 base_master_dr = &baseref_DR_map->get_or_insert (base_ref, &exist2);
717 if (!exist2)
718 *base_master_dr = a;
719 IFC_DR (*base_master_dr)->base_w_predicate
720 = fold_or_predicates (UNKNOWN_LOCATION, ca,
721 IFC_DR (*base_master_dr)->base_w_predicate);
722 if (is_true_predicate (IFC_DR (*base_master_dr)->base_w_predicate))
723 DR_BASE_W_UNCONDITIONALLY (*base_master_dr) = true;
724 }
725 }
726
727 /* Return TRUE if can prove the index IDX of an array reference REF is
728 within array bound. Return false otherwise. */
729
730 static bool
731 idx_within_array_bound (tree ref, tree *idx, void *dta)
732 {
733 bool overflow;
734 widest_int niter, valid_niter, delta, wi_step;
735 tree ev, init, step;
736 tree low, high;
737 struct loop *loop = (struct loop*) dta;
738
739 /* Only support within-bound access for array references. */
740 if (TREE_CODE (ref) != ARRAY_REF)
741 return false;
742
743 /* For arrays at the end of the structure, we are not guaranteed that they
744 do not really extend over their declared size. However, for arrays of
745 size greater than one, this is unlikely to be intended. */
746 if (array_at_struct_end_p (ref))
747 return false;
748
749 ev = analyze_scalar_evolution (loop, *idx);
750 ev = instantiate_parameters (loop, ev);
751 init = initial_condition (ev);
752 step = evolution_part_in_loop_num (ev, loop->num);
753
754 if (!init || TREE_CODE (init) != INTEGER_CST
755 || (step && TREE_CODE (step) != INTEGER_CST))
756 return false;
757
758 low = array_ref_low_bound (ref);
759 high = array_ref_up_bound (ref);
760
761 /* The case of nonconstant bounds could be handled, but it would be
762 complicated. */
763 if (TREE_CODE (low) != INTEGER_CST
764 || !high || TREE_CODE (high) != INTEGER_CST)
765 return false;
766
767 /* Check if the intial idx is within bound. */
768 if (wi::to_widest (init) < wi::to_widest (low)
769 || wi::to_widest (init) > wi::to_widest (high))
770 return false;
771
772 /* The idx is always within bound. */
773 if (!step || integer_zerop (step))
774 return true;
775
776 if (!max_loop_iterations (loop, &niter))
777 return false;
778
779 if (wi::to_widest (step) < 0)
780 {
781 delta = wi::to_widest (init) - wi::to_widest (low);
782 wi_step = -wi::to_widest (step);
783 }
784 else
785 {
786 delta = wi::to_widest (high) - wi::to_widest (init);
787 wi_step = wi::to_widest (step);
788 }
789
790 valid_niter = wi::div_floor (delta, wi_step, SIGNED, &overflow);
791 /* The iteration space of idx is within array bound. */
792 if (!overflow && niter <= valid_niter)
793 return true;
794
795 return false;
796 }
797
798 /* Return TRUE if ref is a within bound array reference. */
799
800 static bool
801 ref_within_array_bound (gimple *stmt, tree ref)
802 {
803 struct loop *loop = loop_containing_stmt (stmt);
804
805 gcc_assert (loop != NULL);
806 return for_each_index (&ref, idx_within_array_bound, loop);
807 }
808
809
810 /* Given a memory reference expression T, return TRUE if base object
811 it refers to is writable. The base object of a memory reference
812 is the main object being referenced, which is returned by function
813 get_base_address. */
814
815 static bool
816 base_object_writable (tree ref)
817 {
818 tree base_tree = get_base_address (ref);
819
820 return (base_tree
821 && DECL_P (base_tree)
822 && decl_binds_to_current_def_p (base_tree)
823 && !TREE_READONLY (base_tree));
824 }
825
826 /* Return true when the memory references of STMT won't trap in the
827 if-converted code. There are two things that we have to check for:
828
829 - writes to memory occur to writable memory: if-conversion of
830 memory writes transforms the conditional memory writes into
831 unconditional writes, i.e. "if (cond) A[i] = foo" is transformed
832 into "A[i] = cond ? foo : A[i]", and as the write to memory may not
833 be executed at all in the original code, it may be a readonly
834 memory. To check that A is not const-qualified, we check that
835 there exists at least an unconditional write to A in the current
836 function.
837
838 - reads or writes to memory are valid memory accesses for every
839 iteration. To check that the memory accesses are correctly formed
840 and that we are allowed to read and write in these locations, we
841 check that the memory accesses to be if-converted occur at every
842 iteration unconditionally.
843
844 Returns true for the memory reference in STMT, same memory reference
845 is read or written unconditionally atleast once and the base memory
846 reference is written unconditionally once. This is to check reference
847 will not write fault. Also retuns true if the memory reference is
848 unconditionally read once then we are conditionally writing to memory
849 which is defined as read and write and is bound to the definition
850 we are seeing. */
851 static bool
852 ifcvt_memrefs_wont_trap (gimple *stmt, vec<data_reference_p> drs)
853 {
854 data_reference_p *master_dr, *base_master_dr;
855 data_reference_p a = drs[gimple_uid (stmt) - 1];
856
857 tree base = DR_BASE_OBJECT (a);
858 innermost_loop_behavior *innermost = &DR_INNERMOST (a);
859
860 gcc_assert (DR_STMT (a) == stmt);
861 gcc_assert (DR_BASE_ADDRESS (a) || DR_OFFSET (a)
862 || DR_INIT (a) || DR_STEP (a));
863
864 master_dr = innermost_DR_map->get (innermost);
865 gcc_assert (master_dr != NULL);
866
867 base_master_dr = baseref_DR_map->get (base);
868
869 /* If a is unconditionally written to it doesn't trap. */
870 if (DR_W_UNCONDITIONALLY (*master_dr))
871 return true;
872
873 /* If a is unconditionally accessed then ...
874
875 Even a is conditional access, we can treat it as an unconditional
876 one if it's an array reference and all its index are within array
877 bound. */
878 if (DR_RW_UNCONDITIONALLY (*master_dr)
879 || ref_within_array_bound (stmt, DR_REF (a)))
880 {
881 /* an unconditional read won't trap. */
882 if (DR_IS_READ (a))
883 return true;
884
885 /* an unconditionaly write won't trap if the base is written
886 to unconditionally. */
887 if (base_master_dr
888 && DR_BASE_W_UNCONDITIONALLY (*base_master_dr))
889 return PARAM_VALUE (PARAM_ALLOW_STORE_DATA_RACES);
890 /* or the base is known to be not readonly. */
891 else if (base_object_writable (DR_REF (a)))
892 return PARAM_VALUE (PARAM_ALLOW_STORE_DATA_RACES);
893 }
894
895 return false;
896 }
897
898 /* Return true if STMT could be converted into a masked load or store
899 (conditional load or store based on a mask computed from bb predicate). */
900
901 static bool
902 ifcvt_can_use_mask_load_store (gimple *stmt)
903 {
904 tree lhs, ref;
905 machine_mode mode;
906 basic_block bb = gimple_bb (stmt);
907 bool is_load;
908
909 if (!(flag_tree_loop_vectorize || bb->loop_father->force_vectorize)
910 || bb->loop_father->dont_vectorize
911 || !gimple_assign_single_p (stmt)
912 || gimple_has_volatile_ops (stmt))
913 return false;
914
915 /* Check whether this is a load or store. */
916 lhs = gimple_assign_lhs (stmt);
917 if (gimple_store_p (stmt))
918 {
919 if (!is_gimple_val (gimple_assign_rhs1 (stmt)))
920 return false;
921 is_load = false;
922 ref = lhs;
923 }
924 else if (gimple_assign_load_p (stmt))
925 {
926 is_load = true;
927 ref = gimple_assign_rhs1 (stmt);
928 }
929 else
930 return false;
931
932 if (may_be_nonaddressable_p (ref))
933 return false;
934
935 /* Mask should be integer mode of the same size as the load/store
936 mode. */
937 mode = TYPE_MODE (TREE_TYPE (lhs));
938 if (!int_mode_for_mode (mode).exists () || VECTOR_MODE_P (mode))
939 return false;
940
941 if (can_vec_mask_load_store_p (mode, VOIDmode, is_load))
942 return true;
943
944 return false;
945 }
946
947 /* Return true when STMT is if-convertible.
948
949 GIMPLE_ASSIGN statement is not if-convertible if,
950 - it is not movable,
951 - it could trap,
952 - LHS is not var decl. */
953
954 static bool
955 if_convertible_gimple_assign_stmt_p (gimple *stmt,
956 vec<data_reference_p> refs)
957 {
958 tree lhs = gimple_assign_lhs (stmt);
959
960 if (dump_file && (dump_flags & TDF_DETAILS))
961 {
962 fprintf (dump_file, "-------------------------\n");
963 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
964 }
965
966 if (!is_gimple_reg_type (TREE_TYPE (lhs)))
967 return false;
968
969 /* Some of these constrains might be too conservative. */
970 if (stmt_ends_bb_p (stmt)
971 || gimple_has_volatile_ops (stmt)
972 || (TREE_CODE (lhs) == SSA_NAME
973 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
974 || gimple_has_side_effects (stmt))
975 {
976 if (dump_file && (dump_flags & TDF_DETAILS))
977 fprintf (dump_file, "stmt not suitable for ifcvt\n");
978 return false;
979 }
980
981 /* tree-into-ssa.c uses GF_PLF_1, so avoid it, because
982 in between if_convertible_loop_p and combine_blocks
983 we can perform loop versioning. */
984 gimple_set_plf (stmt, GF_PLF_2, false);
985
986 if ((! gimple_vuse (stmt)
987 || gimple_could_trap_p_1 (stmt, false, false)
988 || ! ifcvt_memrefs_wont_trap (stmt, refs))
989 && gimple_could_trap_p (stmt))
990 {
991 if (ifcvt_can_use_mask_load_store (stmt))
992 {
993 gimple_set_plf (stmt, GF_PLF_2, true);
994 any_pred_load_store = true;
995 return true;
996 }
997 if (dump_file && (dump_flags & TDF_DETAILS))
998 fprintf (dump_file, "tree could trap...\n");
999 return false;
1000 }
1001
1002 /* When if-converting stores force versioning, likewise if we
1003 ended up generating store data races. */
1004 if (gimple_vdef (stmt))
1005 any_pred_load_store = true;
1006
1007 return true;
1008 }
1009
1010 /* Return true when STMT is if-convertible.
1011
1012 A statement is if-convertible if:
1013 - it is an if-convertible GIMPLE_ASSIGN,
1014 - it is a GIMPLE_LABEL or a GIMPLE_COND,
1015 - it is builtins call. */
1016
1017 static bool
1018 if_convertible_stmt_p (gimple *stmt, vec<data_reference_p> refs)
1019 {
1020 switch (gimple_code (stmt))
1021 {
1022 case GIMPLE_LABEL:
1023 case GIMPLE_DEBUG:
1024 case GIMPLE_COND:
1025 return true;
1026
1027 case GIMPLE_ASSIGN:
1028 return if_convertible_gimple_assign_stmt_p (stmt, refs);
1029
1030 case GIMPLE_CALL:
1031 {
1032 tree fndecl = gimple_call_fndecl (stmt);
1033 if (fndecl)
1034 {
1035 int flags = gimple_call_flags (stmt);
1036 if ((flags & ECF_CONST)
1037 && !(flags & ECF_LOOPING_CONST_OR_PURE)
1038 /* We can only vectorize some builtins at the moment,
1039 so restrict if-conversion to those. */
1040 && DECL_BUILT_IN (fndecl))
1041 return true;
1042 }
1043 return false;
1044 }
1045
1046 default:
1047 /* Don't know what to do with 'em so don't do anything. */
1048 if (dump_file && (dump_flags & TDF_DETAILS))
1049 {
1050 fprintf (dump_file, "don't know what to do\n");
1051 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
1052 }
1053 return false;
1054 }
1055
1056 return true;
1057 }
1058
1059 /* Assumes that BB has more than 1 predecessors.
1060 Returns false if at least one successor is not on critical edge
1061 and true otherwise. */
1062
1063 static inline bool
1064 all_preds_critical_p (basic_block bb)
1065 {
1066 edge e;
1067 edge_iterator ei;
1068
1069 FOR_EACH_EDGE (e, ei, bb->preds)
1070 if (EDGE_COUNT (e->src->succs) == 1)
1071 return false;
1072 return true;
1073 }
1074
1075 /* Returns true if at least one successor in on critical edge. */
1076 static inline bool
1077 has_pred_critical_p (basic_block bb)
1078 {
1079 edge e;
1080 edge_iterator ei;
1081
1082 FOR_EACH_EDGE (e, ei, bb->preds)
1083 if (EDGE_COUNT (e->src->succs) > 1)
1084 return true;
1085 return false;
1086 }
1087
1088 /* Return true when BB is if-convertible. This routine does not check
1089 basic block's statements and phis.
1090
1091 A basic block is not if-convertible if:
1092 - it is non-empty and it is after the exit block (in BFS order),
1093 - it is after the exit block but before the latch,
1094 - its edges are not normal.
1095
1096 EXIT_BB is the basic block containing the exit of the LOOP. BB is
1097 inside LOOP. */
1098
1099 static bool
1100 if_convertible_bb_p (struct loop *loop, basic_block bb, basic_block exit_bb)
1101 {
1102 edge e;
1103 edge_iterator ei;
1104
1105 if (dump_file && (dump_flags & TDF_DETAILS))
1106 fprintf (dump_file, "----------[%d]-------------\n", bb->index);
1107
1108 if (EDGE_COUNT (bb->succs) > 2)
1109 return false;
1110
1111 if (exit_bb)
1112 {
1113 if (bb != loop->latch)
1114 {
1115 if (dump_file && (dump_flags & TDF_DETAILS))
1116 fprintf (dump_file, "basic block after exit bb but before latch\n");
1117 return false;
1118 }
1119 else if (!empty_block_p (bb))
1120 {
1121 if (dump_file && (dump_flags & TDF_DETAILS))
1122 fprintf (dump_file, "non empty basic block after exit bb\n");
1123 return false;
1124 }
1125 else if (bb == loop->latch
1126 && bb != exit_bb
1127 && !dominated_by_p (CDI_DOMINATORS, bb, exit_bb))
1128 {
1129 if (dump_file && (dump_flags & TDF_DETAILS))
1130 fprintf (dump_file, "latch is not dominated by exit_block\n");
1131 return false;
1132 }
1133 }
1134
1135 /* Be less adventurous and handle only normal edges. */
1136 FOR_EACH_EDGE (e, ei, bb->succs)
1137 if (e->flags & (EDGE_EH | EDGE_ABNORMAL | EDGE_IRREDUCIBLE_LOOP))
1138 {
1139 if (dump_file && (dump_flags & TDF_DETAILS))
1140 fprintf (dump_file, "Difficult to handle edges\n");
1141 return false;
1142 }
1143
1144 return true;
1145 }
1146
1147 /* Return true when all predecessor blocks of BB are visited. The
1148 VISITED bitmap keeps track of the visited blocks. */
1149
1150 static bool
1151 pred_blocks_visited_p (basic_block bb, bitmap *visited)
1152 {
1153 edge e;
1154 edge_iterator ei;
1155 FOR_EACH_EDGE (e, ei, bb->preds)
1156 if (!bitmap_bit_p (*visited, e->src->index))
1157 return false;
1158
1159 return true;
1160 }
1161
1162 /* Get body of a LOOP in suitable order for if-conversion. It is
1163 caller's responsibility to deallocate basic block list.
1164 If-conversion suitable order is, breadth first sort (BFS) order
1165 with an additional constraint: select a block only if all its
1166 predecessors are already selected. */
1167
1168 static basic_block *
1169 get_loop_body_in_if_conv_order (const struct loop *loop)
1170 {
1171 basic_block *blocks, *blocks_in_bfs_order;
1172 basic_block bb;
1173 bitmap visited;
1174 unsigned int index = 0;
1175 unsigned int visited_count = 0;
1176
1177 gcc_assert (loop->num_nodes);
1178 gcc_assert (loop->latch != EXIT_BLOCK_PTR_FOR_FN (cfun));
1179
1180 blocks = XCNEWVEC (basic_block, loop->num_nodes);
1181 visited = BITMAP_ALLOC (NULL);
1182
1183 blocks_in_bfs_order = get_loop_body_in_bfs_order (loop);
1184
1185 index = 0;
1186 while (index < loop->num_nodes)
1187 {
1188 bb = blocks_in_bfs_order [index];
1189
1190 if (bb->flags & BB_IRREDUCIBLE_LOOP)
1191 {
1192 free (blocks_in_bfs_order);
1193 BITMAP_FREE (visited);
1194 free (blocks);
1195 return NULL;
1196 }
1197
1198 if (!bitmap_bit_p (visited, bb->index))
1199 {
1200 if (pred_blocks_visited_p (bb, &visited)
1201 || bb == loop->header)
1202 {
1203 /* This block is now visited. */
1204 bitmap_set_bit (visited, bb->index);
1205 blocks[visited_count++] = bb;
1206 }
1207 }
1208
1209 index++;
1210
1211 if (index == loop->num_nodes
1212 && visited_count != loop->num_nodes)
1213 /* Not done yet. */
1214 index = 0;
1215 }
1216 free (blocks_in_bfs_order);
1217 BITMAP_FREE (visited);
1218 return blocks;
1219 }
1220
1221 /* Returns true when the analysis of the predicates for all the basic
1222 blocks in LOOP succeeded.
1223
1224 predicate_bbs first allocates the predicates of the basic blocks.
1225 These fields are then initialized with the tree expressions
1226 representing the predicates under which a basic block is executed
1227 in the LOOP. As the loop->header is executed at each iteration, it
1228 has the "true" predicate. Other statements executed under a
1229 condition are predicated with that condition, for example
1230
1231 | if (x)
1232 | S1;
1233 | else
1234 | S2;
1235
1236 S1 will be predicated with "x", and
1237 S2 will be predicated with "!x". */
1238
1239 static void
1240 predicate_bbs (loop_p loop)
1241 {
1242 unsigned int i;
1243
1244 for (i = 0; i < loop->num_nodes; i++)
1245 init_bb_predicate (ifc_bbs[i]);
1246
1247 for (i = 0; i < loop->num_nodes; i++)
1248 {
1249 basic_block bb = ifc_bbs[i];
1250 tree cond;
1251 gimple *stmt;
1252
1253 /* The loop latch and loop exit block are always executed and
1254 have no extra conditions to be processed: skip them. */
1255 if (bb == loop->latch
1256 || bb_with_exit_edge_p (loop, bb))
1257 {
1258 reset_bb_predicate (bb);
1259 continue;
1260 }
1261
1262 cond = bb_predicate (bb);
1263 stmt = last_stmt (bb);
1264 if (stmt && gimple_code (stmt) == GIMPLE_COND)
1265 {
1266 tree c2;
1267 edge true_edge, false_edge;
1268 location_t loc = gimple_location (stmt);
1269 tree c = build2_loc (loc, gimple_cond_code (stmt),
1270 boolean_type_node,
1271 gimple_cond_lhs (stmt),
1272 gimple_cond_rhs (stmt));
1273
1274 /* Add new condition into destination's predicate list. */
1275 extract_true_false_edges_from_block (gimple_bb (stmt),
1276 &true_edge, &false_edge);
1277
1278 /* If C is true, then TRUE_EDGE is taken. */
1279 add_to_dst_predicate_list (loop, true_edge, unshare_expr (cond),
1280 unshare_expr (c));
1281
1282 /* If C is false, then FALSE_EDGE is taken. */
1283 c2 = build1_loc (loc, TRUTH_NOT_EXPR, boolean_type_node,
1284 unshare_expr (c));
1285 add_to_dst_predicate_list (loop, false_edge,
1286 unshare_expr (cond), c2);
1287
1288 cond = NULL_TREE;
1289 }
1290
1291 /* If current bb has only one successor, then consider it as an
1292 unconditional goto. */
1293 if (single_succ_p (bb))
1294 {
1295 basic_block bb_n = single_succ (bb);
1296
1297 /* The successor bb inherits the predicate of its
1298 predecessor. If there is no predicate in the predecessor
1299 bb, then consider the successor bb as always executed. */
1300 if (cond == NULL_TREE)
1301 cond = boolean_true_node;
1302
1303 add_to_predicate_list (loop, bb_n, cond);
1304 }
1305 }
1306
1307 /* The loop header is always executed. */
1308 reset_bb_predicate (loop->header);
1309 gcc_assert (bb_predicate_gimplified_stmts (loop->header) == NULL
1310 && bb_predicate_gimplified_stmts (loop->latch) == NULL);
1311 }
1312
1313 /* Build region by adding loop pre-header and post-header blocks. */
1314
1315 static vec<basic_block>
1316 build_region (struct loop *loop)
1317 {
1318 vec<basic_block> region = vNULL;
1319 basic_block exit_bb = NULL;
1320
1321 gcc_assert (ifc_bbs);
1322 /* The first element is loop pre-header. */
1323 region.safe_push (loop_preheader_edge (loop)->src);
1324
1325 for (unsigned int i = 0; i < loop->num_nodes; i++)
1326 {
1327 basic_block bb = ifc_bbs[i];
1328 region.safe_push (bb);
1329 /* Find loop postheader. */
1330 edge e;
1331 edge_iterator ei;
1332 FOR_EACH_EDGE (e, ei, bb->succs)
1333 if (loop_exit_edge_p (loop, e))
1334 {
1335 exit_bb = e->dest;
1336 break;
1337 }
1338 }
1339 /* The last element is loop post-header. */
1340 gcc_assert (exit_bb);
1341 region.safe_push (exit_bb);
1342 return region;
1343 }
1344
1345 /* Return true when LOOP is if-convertible. This is a helper function
1346 for if_convertible_loop_p. REFS and DDRS are initialized and freed
1347 in if_convertible_loop_p. */
1348
1349 static bool
1350 if_convertible_loop_p_1 (struct loop *loop, vec<data_reference_p> *refs)
1351 {
1352 unsigned int i;
1353 basic_block exit_bb = NULL;
1354 vec<basic_block> region;
1355
1356 if (find_data_references_in_loop (loop, refs) == chrec_dont_know)
1357 return false;
1358
1359 calculate_dominance_info (CDI_DOMINATORS);
1360
1361 /* Allow statements that can be handled during if-conversion. */
1362 ifc_bbs = get_loop_body_in_if_conv_order (loop);
1363 if (!ifc_bbs)
1364 {
1365 if (dump_file && (dump_flags & TDF_DETAILS))
1366 fprintf (dump_file, "Irreducible loop\n");
1367 return false;
1368 }
1369
1370 for (i = 0; i < loop->num_nodes; i++)
1371 {
1372 basic_block bb = ifc_bbs[i];
1373
1374 if (!if_convertible_bb_p (loop, bb, exit_bb))
1375 return false;
1376
1377 if (bb_with_exit_edge_p (loop, bb))
1378 exit_bb = bb;
1379 }
1380
1381 for (i = 0; i < loop->num_nodes; i++)
1382 {
1383 basic_block bb = ifc_bbs[i];
1384 gimple_stmt_iterator gsi;
1385
1386 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1387 switch (gimple_code (gsi_stmt (gsi)))
1388 {
1389 case GIMPLE_LABEL:
1390 case GIMPLE_ASSIGN:
1391 case GIMPLE_CALL:
1392 case GIMPLE_DEBUG:
1393 case GIMPLE_COND:
1394 gimple_set_uid (gsi_stmt (gsi), 0);
1395 break;
1396 default:
1397 return false;
1398 }
1399 }
1400
1401 data_reference_p dr;
1402
1403 innermost_DR_map
1404 = new hash_map<innermost_loop_behavior_hash, data_reference_p>;
1405 baseref_DR_map = new hash_map<tree_operand_hash, data_reference_p>;
1406
1407 /* Compute post-dominator tree locally. */
1408 region = build_region (loop);
1409 calculate_dominance_info_for_region (CDI_POST_DOMINATORS, region);
1410
1411 predicate_bbs (loop);
1412
1413 /* Free post-dominator tree since it is not used after predication. */
1414 free_dominance_info_for_region (cfun, CDI_POST_DOMINATORS, region);
1415 region.release ();
1416
1417 for (i = 0; refs->iterate (i, &dr); i++)
1418 {
1419 tree ref = DR_REF (dr);
1420
1421 dr->aux = XNEW (struct ifc_dr);
1422 DR_BASE_W_UNCONDITIONALLY (dr) = false;
1423 DR_RW_UNCONDITIONALLY (dr) = false;
1424 DR_W_UNCONDITIONALLY (dr) = false;
1425 IFC_DR (dr)->rw_predicate = boolean_false_node;
1426 IFC_DR (dr)->w_predicate = boolean_false_node;
1427 IFC_DR (dr)->base_w_predicate = boolean_false_node;
1428 if (gimple_uid (DR_STMT (dr)) == 0)
1429 gimple_set_uid (DR_STMT (dr), i + 1);
1430
1431 /* If DR doesn't have innermost loop behavior or it's a compound
1432 memory reference, we synthesize its innermost loop behavior
1433 for hashing. */
1434 if (TREE_CODE (ref) == COMPONENT_REF
1435 || TREE_CODE (ref) == IMAGPART_EXPR
1436 || TREE_CODE (ref) == REALPART_EXPR
1437 || !(DR_BASE_ADDRESS (dr) || DR_OFFSET (dr)
1438 || DR_INIT (dr) || DR_STEP (dr)))
1439 {
1440 while (TREE_CODE (ref) == COMPONENT_REF
1441 || TREE_CODE (ref) == IMAGPART_EXPR
1442 || TREE_CODE (ref) == REALPART_EXPR)
1443 ref = TREE_OPERAND (ref, 0);
1444
1445 memset (&DR_INNERMOST (dr), 0, sizeof (DR_INNERMOST (dr)));
1446 DR_BASE_ADDRESS (dr) = ref;
1447 }
1448 hash_memrefs_baserefs_and_store_DRs_read_written_info (dr);
1449 }
1450
1451 for (i = 0; i < loop->num_nodes; i++)
1452 {
1453 basic_block bb = ifc_bbs[i];
1454 gimple_stmt_iterator itr;
1455
1456 /* Check the if-convertibility of statements in predicated BBs. */
1457 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
1458 for (itr = gsi_start_bb (bb); !gsi_end_p (itr); gsi_next (&itr))
1459 if (!if_convertible_stmt_p (gsi_stmt (itr), *refs))
1460 return false;
1461 }
1462
1463 /* Checking PHIs needs to be done after stmts, as the fact whether there
1464 are any masked loads or stores affects the tests. */
1465 for (i = 0; i < loop->num_nodes; i++)
1466 {
1467 basic_block bb = ifc_bbs[i];
1468 gphi_iterator itr;
1469
1470 for (itr = gsi_start_phis (bb); !gsi_end_p (itr); gsi_next (&itr))
1471 if (!if_convertible_phi_p (loop, bb, itr.phi ()))
1472 return false;
1473 }
1474
1475 if (dump_file)
1476 fprintf (dump_file, "Applying if-conversion\n");
1477
1478 return true;
1479 }
1480
1481 /* Return true when LOOP is if-convertible.
1482 LOOP is if-convertible if:
1483 - it is innermost,
1484 - it has two or more basic blocks,
1485 - it has only one exit,
1486 - loop header is not the exit edge,
1487 - if its basic blocks and phi nodes are if convertible. */
1488
1489 static bool
1490 if_convertible_loop_p (struct loop *loop)
1491 {
1492 edge e;
1493 edge_iterator ei;
1494 bool res = false;
1495 vec<data_reference_p> refs;
1496
1497 /* Handle only innermost loop. */
1498 if (!loop || loop->inner)
1499 {
1500 if (dump_file && (dump_flags & TDF_DETAILS))
1501 fprintf (dump_file, "not innermost loop\n");
1502 return false;
1503 }
1504
1505 /* If only one block, no need for if-conversion. */
1506 if (loop->num_nodes <= 2)
1507 {
1508 if (dump_file && (dump_flags & TDF_DETAILS))
1509 fprintf (dump_file, "less than 2 basic blocks\n");
1510 return false;
1511 }
1512
1513 /* More than one loop exit is too much to handle. */
1514 if (!single_exit (loop))
1515 {
1516 if (dump_file && (dump_flags & TDF_DETAILS))
1517 fprintf (dump_file, "multiple exits\n");
1518 return false;
1519 }
1520
1521 /* If one of the loop header's edge is an exit edge then do not
1522 apply if-conversion. */
1523 FOR_EACH_EDGE (e, ei, loop->header->succs)
1524 if (loop_exit_edge_p (loop, e))
1525 return false;
1526
1527 refs.create (5);
1528 res = if_convertible_loop_p_1 (loop, &refs);
1529
1530 data_reference_p dr;
1531 unsigned int i;
1532 for (i = 0; refs.iterate (i, &dr); i++)
1533 free (dr->aux);
1534
1535 free_data_refs (refs);
1536
1537 delete innermost_DR_map;
1538 innermost_DR_map = NULL;
1539
1540 delete baseref_DR_map;
1541 baseref_DR_map = NULL;
1542
1543 return res;
1544 }
1545
1546 /* Returns true if def-stmt for phi argument ARG is simple increment/decrement
1547 which is in predicated basic block.
1548 In fact, the following PHI pattern is searching:
1549 loop-header:
1550 reduc_1 = PHI <..., reduc_2>
1551 ...
1552 if (...)
1553 reduc_3 = ...
1554 reduc_2 = PHI <reduc_1, reduc_3>
1555
1556 ARG_0 and ARG_1 are correspondent PHI arguments.
1557 REDUC, OP0 and OP1 contain reduction stmt and its operands.
1558 EXTENDED is true if PHI has > 2 arguments. */
1559
1560 static bool
1561 is_cond_scalar_reduction (gimple *phi, gimple **reduc, tree arg_0, tree arg_1,
1562 tree *op0, tree *op1, bool extended)
1563 {
1564 tree lhs, r_op1, r_op2;
1565 gimple *stmt;
1566 gimple *header_phi = NULL;
1567 enum tree_code reduction_op;
1568 basic_block bb = gimple_bb (phi);
1569 struct loop *loop = bb->loop_father;
1570 edge latch_e = loop_latch_edge (loop);
1571 imm_use_iterator imm_iter;
1572 use_operand_p use_p;
1573 edge e;
1574 edge_iterator ei;
1575 bool result = false;
1576 if (TREE_CODE (arg_0) != SSA_NAME || TREE_CODE (arg_1) != SSA_NAME)
1577 return false;
1578
1579 if (!extended && gimple_code (SSA_NAME_DEF_STMT (arg_0)) == GIMPLE_PHI)
1580 {
1581 lhs = arg_1;
1582 header_phi = SSA_NAME_DEF_STMT (arg_0);
1583 stmt = SSA_NAME_DEF_STMT (arg_1);
1584 }
1585 else if (gimple_code (SSA_NAME_DEF_STMT (arg_1)) == GIMPLE_PHI)
1586 {
1587 lhs = arg_0;
1588 header_phi = SSA_NAME_DEF_STMT (arg_1);
1589 stmt = SSA_NAME_DEF_STMT (arg_0);
1590 }
1591 else
1592 return false;
1593 if (gimple_bb (header_phi) != loop->header)
1594 return false;
1595
1596 if (PHI_ARG_DEF_FROM_EDGE (header_phi, latch_e) != PHI_RESULT (phi))
1597 return false;
1598
1599 if (gimple_code (stmt) != GIMPLE_ASSIGN
1600 || gimple_has_volatile_ops (stmt))
1601 return false;
1602
1603 if (!flow_bb_inside_loop_p (loop, gimple_bb (stmt)))
1604 return false;
1605
1606 if (!is_predicated (gimple_bb (stmt)))
1607 return false;
1608
1609 /* Check that stmt-block is predecessor of phi-block. */
1610 FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs)
1611 if (e->dest == bb)
1612 {
1613 result = true;
1614 break;
1615 }
1616 if (!result)
1617 return false;
1618
1619 if (!has_single_use (lhs))
1620 return false;
1621
1622 reduction_op = gimple_assign_rhs_code (stmt);
1623 if (reduction_op != PLUS_EXPR && reduction_op != MINUS_EXPR)
1624 return false;
1625 r_op1 = gimple_assign_rhs1 (stmt);
1626 r_op2 = gimple_assign_rhs2 (stmt);
1627
1628 /* Make R_OP1 to hold reduction variable. */
1629 if (r_op2 == PHI_RESULT (header_phi)
1630 && reduction_op == PLUS_EXPR)
1631 std::swap (r_op1, r_op2);
1632 else if (r_op1 != PHI_RESULT (header_phi))
1633 return false;
1634
1635 /* Check that R_OP1 is used in reduction stmt or in PHI only. */
1636 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, r_op1)
1637 {
1638 gimple *use_stmt = USE_STMT (use_p);
1639 if (is_gimple_debug (use_stmt))
1640 continue;
1641 if (use_stmt == stmt)
1642 continue;
1643 if (gimple_code (use_stmt) != GIMPLE_PHI)
1644 return false;
1645 }
1646
1647 *op0 = r_op1; *op1 = r_op2;
1648 *reduc = stmt;
1649 return true;
1650 }
1651
1652 /* Converts conditional scalar reduction into unconditional form, e.g.
1653 bb_4
1654 if (_5 != 0) goto bb_5 else goto bb_6
1655 end_bb_4
1656 bb_5
1657 res_6 = res_13 + 1;
1658 end_bb_5
1659 bb_6
1660 # res_2 = PHI <res_13(4), res_6(5)>
1661 end_bb_6
1662
1663 will be converted into sequence
1664 _ifc__1 = _5 != 0 ? 1 : 0;
1665 res_2 = res_13 + _ifc__1;
1666 Argument SWAP tells that arguments of conditional expression should be
1667 swapped.
1668 Returns rhs of resulting PHI assignment. */
1669
1670 static tree
1671 convert_scalar_cond_reduction (gimple *reduc, gimple_stmt_iterator *gsi,
1672 tree cond, tree op0, tree op1, bool swap)
1673 {
1674 gimple_stmt_iterator stmt_it;
1675 gimple *new_assign;
1676 tree rhs;
1677 tree rhs1 = gimple_assign_rhs1 (reduc);
1678 tree tmp = make_temp_ssa_name (TREE_TYPE (rhs1), NULL, "_ifc_");
1679 tree c;
1680 tree zero = build_zero_cst (TREE_TYPE (rhs1));
1681
1682 if (dump_file && (dump_flags & TDF_DETAILS))
1683 {
1684 fprintf (dump_file, "Found cond scalar reduction.\n");
1685 print_gimple_stmt (dump_file, reduc, 0, TDF_SLIM);
1686 }
1687
1688 /* Build cond expression using COND and constant operand
1689 of reduction rhs. */
1690 c = fold_build_cond_expr (TREE_TYPE (rhs1),
1691 unshare_expr (cond),
1692 swap ? zero : op1,
1693 swap ? op1 : zero);
1694
1695 /* Create assignment stmt and insert it at GSI. */
1696 new_assign = gimple_build_assign (tmp, c);
1697 gsi_insert_before (gsi, new_assign, GSI_SAME_STMT);
1698 /* Build rhs for unconditional increment/decrement. */
1699 rhs = fold_build2 (gimple_assign_rhs_code (reduc),
1700 TREE_TYPE (rhs1), op0, tmp);
1701
1702 /* Delete original reduction stmt. */
1703 stmt_it = gsi_for_stmt (reduc);
1704 gsi_remove (&stmt_it, true);
1705 release_defs (reduc);
1706 return rhs;
1707 }
1708
1709 /* Produce condition for all occurrences of ARG in PHI node. */
1710
1711 static tree
1712 gen_phi_arg_condition (gphi *phi, vec<int> *occur,
1713 gimple_stmt_iterator *gsi)
1714 {
1715 int len;
1716 int i;
1717 tree cond = NULL_TREE;
1718 tree c;
1719 edge e;
1720
1721 len = occur->length ();
1722 gcc_assert (len > 0);
1723 for (i = 0; i < len; i++)
1724 {
1725 e = gimple_phi_arg_edge (phi, (*occur)[i]);
1726 c = bb_predicate (e->src);
1727 if (is_true_predicate (c))
1728 {
1729 cond = c;
1730 break;
1731 }
1732 c = force_gimple_operand_gsi_1 (gsi, unshare_expr (c),
1733 is_gimple_condexpr, NULL_TREE,
1734 true, GSI_SAME_STMT);
1735 if (cond != NULL_TREE)
1736 {
1737 /* Must build OR expression. */
1738 cond = fold_or_predicates (EXPR_LOCATION (c), c, cond);
1739 cond = force_gimple_operand_gsi_1 (gsi, unshare_expr (cond),
1740 is_gimple_condexpr, NULL_TREE,
1741 true, GSI_SAME_STMT);
1742 }
1743 else
1744 cond = c;
1745 }
1746 gcc_assert (cond != NULL_TREE);
1747 return cond;
1748 }
1749
1750 /* Local valueization callback that follows all-use SSA edges. */
1751
1752 static tree
1753 ifcvt_follow_ssa_use_edges (tree val)
1754 {
1755 return val;
1756 }
1757
1758 /* Replace a scalar PHI node with a COND_EXPR using COND as condition.
1759 This routine can handle PHI nodes with more than two arguments.
1760
1761 For example,
1762 S1: A = PHI <x1(1), x2(5)>
1763 is converted into,
1764 S2: A = cond ? x1 : x2;
1765
1766 The generated code is inserted at GSI that points to the top of
1767 basic block's statement list.
1768 If PHI node has more than two arguments a chain of conditional
1769 expression is produced. */
1770
1771
1772 static void
1773 predicate_scalar_phi (gphi *phi, gimple_stmt_iterator *gsi)
1774 {
1775 gimple *new_stmt = NULL, *reduc;
1776 tree rhs, res, arg0, arg1, op0, op1, scev;
1777 tree cond;
1778 unsigned int index0;
1779 unsigned int max, args_len;
1780 edge e;
1781 basic_block bb;
1782 unsigned int i;
1783
1784 res = gimple_phi_result (phi);
1785 if (virtual_operand_p (res))
1786 return;
1787
1788 if ((rhs = degenerate_phi_result (phi))
1789 || ((scev = analyze_scalar_evolution (gimple_bb (phi)->loop_father,
1790 res))
1791 && !chrec_contains_undetermined (scev)
1792 && scev != res
1793 && (rhs = gimple_phi_arg_def (phi, 0))))
1794 {
1795 if (dump_file && (dump_flags & TDF_DETAILS))
1796 {
1797 fprintf (dump_file, "Degenerate phi!\n");
1798 print_gimple_stmt (dump_file, phi, 0, TDF_SLIM);
1799 }
1800 new_stmt = gimple_build_assign (res, rhs);
1801 gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
1802 update_stmt (new_stmt);
1803 return;
1804 }
1805
1806 bb = gimple_bb (phi);
1807 if (EDGE_COUNT (bb->preds) == 2)
1808 {
1809 /* Predicate ordinary PHI node with 2 arguments. */
1810 edge first_edge, second_edge;
1811 basic_block true_bb;
1812 first_edge = EDGE_PRED (bb, 0);
1813 second_edge = EDGE_PRED (bb, 1);
1814 cond = bb_predicate (first_edge->src);
1815 if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
1816 std::swap (first_edge, second_edge);
1817 if (EDGE_COUNT (first_edge->src->succs) > 1)
1818 {
1819 cond = bb_predicate (second_edge->src);
1820 if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
1821 cond = TREE_OPERAND (cond, 0);
1822 else
1823 first_edge = second_edge;
1824 }
1825 else
1826 cond = bb_predicate (first_edge->src);
1827 /* Gimplify the condition to a valid cond-expr conditonal operand. */
1828 cond = force_gimple_operand_gsi_1 (gsi, unshare_expr (cond),
1829 is_gimple_condexpr, NULL_TREE,
1830 true, GSI_SAME_STMT);
1831 true_bb = first_edge->src;
1832 if (EDGE_PRED (bb, 1)->src == true_bb)
1833 {
1834 arg0 = gimple_phi_arg_def (phi, 1);
1835 arg1 = gimple_phi_arg_def (phi, 0);
1836 }
1837 else
1838 {
1839 arg0 = gimple_phi_arg_def (phi, 0);
1840 arg1 = gimple_phi_arg_def (phi, 1);
1841 }
1842 if (is_cond_scalar_reduction (phi, &reduc, arg0, arg1,
1843 &op0, &op1, false))
1844 /* Convert reduction stmt into vectorizable form. */
1845 rhs = convert_scalar_cond_reduction (reduc, gsi, cond, op0, op1,
1846 true_bb != gimple_bb (reduc));
1847 else
1848 /* Build new RHS using selected condition and arguments. */
1849 rhs = fold_build_cond_expr (TREE_TYPE (res), unshare_expr (cond),
1850 arg0, arg1);
1851 new_stmt = gimple_build_assign (res, rhs);
1852 gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
1853 gimple_stmt_iterator new_gsi = gsi_for_stmt (new_stmt);
1854 if (fold_stmt (&new_gsi, ifcvt_follow_ssa_use_edges))
1855 {
1856 new_stmt = gsi_stmt (new_gsi);
1857 update_stmt (new_stmt);
1858 }
1859
1860 if (dump_file && (dump_flags & TDF_DETAILS))
1861 {
1862 fprintf (dump_file, "new phi replacement stmt\n");
1863 print_gimple_stmt (dump_file, new_stmt, 0, TDF_SLIM);
1864 }
1865 return;
1866 }
1867
1868 /* Create hashmap for PHI node which contain vector of argument indexes
1869 having the same value. */
1870 bool swap = false;
1871 hash_map<tree_operand_hash, auto_vec<int> > phi_arg_map;
1872 unsigned int num_args = gimple_phi_num_args (phi);
1873 int max_ind = -1;
1874 /* Vector of different PHI argument values. */
1875 auto_vec<tree> args (num_args);
1876
1877 /* Compute phi_arg_map. */
1878 for (i = 0; i < num_args; i++)
1879 {
1880 tree arg;
1881
1882 arg = gimple_phi_arg_def (phi, i);
1883 if (!phi_arg_map.get (arg))
1884 args.quick_push (arg);
1885 phi_arg_map.get_or_insert (arg).safe_push (i);
1886 }
1887
1888 /* Determine element with max number of occurrences. */
1889 max_ind = -1;
1890 max = 1;
1891 args_len = args.length ();
1892 for (i = 0; i < args_len; i++)
1893 {
1894 unsigned int len;
1895 if ((len = phi_arg_map.get (args[i])->length ()) > max)
1896 {
1897 max_ind = (int) i;
1898 max = len;
1899 }
1900 }
1901
1902 /* Put element with max number of occurences to the end of ARGS. */
1903 if (max_ind != -1 && max_ind +1 != (int) args_len)
1904 std::swap (args[args_len - 1], args[max_ind]);
1905
1906 /* Handle one special case when number of arguments with different values
1907 is equal 2 and one argument has the only occurrence. Such PHI can be
1908 handled as if would have only 2 arguments. */
1909 if (args_len == 2 && phi_arg_map.get (args[0])->length () == 1)
1910 {
1911 vec<int> *indexes;
1912 indexes = phi_arg_map.get (args[0]);
1913 index0 = (*indexes)[0];
1914 arg0 = args[0];
1915 arg1 = args[1];
1916 e = gimple_phi_arg_edge (phi, index0);
1917 cond = bb_predicate (e->src);
1918 if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
1919 {
1920 swap = true;
1921 cond = TREE_OPERAND (cond, 0);
1922 }
1923 /* Gimplify the condition to a valid cond-expr conditonal operand. */
1924 cond = force_gimple_operand_gsi_1 (gsi, unshare_expr (cond),
1925 is_gimple_condexpr, NULL_TREE,
1926 true, GSI_SAME_STMT);
1927 if (!(is_cond_scalar_reduction (phi, &reduc, arg0 , arg1,
1928 &op0, &op1, true)))
1929 rhs = fold_build_cond_expr (TREE_TYPE (res), unshare_expr (cond),
1930 swap? arg1 : arg0,
1931 swap? arg0 : arg1);
1932 else
1933 /* Convert reduction stmt into vectorizable form. */
1934 rhs = convert_scalar_cond_reduction (reduc, gsi, cond, op0, op1,
1935 swap);
1936 new_stmt = gimple_build_assign (res, rhs);
1937 gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
1938 update_stmt (new_stmt);
1939 }
1940 else
1941 {
1942 /* Common case. */
1943 vec<int> *indexes;
1944 tree type = TREE_TYPE (gimple_phi_result (phi));
1945 tree lhs;
1946 arg1 = args[1];
1947 for (i = 0; i < args_len; i++)
1948 {
1949 arg0 = args[i];
1950 indexes = phi_arg_map.get (args[i]);
1951 if (i != args_len - 1)
1952 lhs = make_temp_ssa_name (type, NULL, "_ifc_");
1953 else
1954 lhs = res;
1955 cond = gen_phi_arg_condition (phi, indexes, gsi);
1956 rhs = fold_build_cond_expr (type, unshare_expr (cond),
1957 arg0, arg1);
1958 new_stmt = gimple_build_assign (lhs, rhs);
1959 gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
1960 update_stmt (new_stmt);
1961 arg1 = lhs;
1962 }
1963 }
1964
1965 if (dump_file && (dump_flags & TDF_DETAILS))
1966 {
1967 fprintf (dump_file, "new extended phi replacement stmt\n");
1968 print_gimple_stmt (dump_file, new_stmt, 0, TDF_SLIM);
1969 }
1970 }
1971
1972 /* Replaces in LOOP all the scalar phi nodes other than those in the
1973 LOOP->header block with conditional modify expressions. */
1974
1975 static void
1976 predicate_all_scalar_phis (struct loop *loop)
1977 {
1978 basic_block bb;
1979 unsigned int orig_loop_num_nodes = loop->num_nodes;
1980 unsigned int i;
1981
1982 for (i = 1; i < orig_loop_num_nodes; i++)
1983 {
1984 gphi *phi;
1985 gimple_stmt_iterator gsi;
1986 gphi_iterator phi_gsi;
1987 bb = ifc_bbs[i];
1988
1989 if (bb == loop->header)
1990 continue;
1991
1992 phi_gsi = gsi_start_phis (bb);
1993 if (gsi_end_p (phi_gsi))
1994 continue;
1995
1996 gsi = gsi_after_labels (bb);
1997 while (!gsi_end_p (phi_gsi))
1998 {
1999 phi = phi_gsi.phi ();
2000 if (virtual_operand_p (gimple_phi_result (phi)))
2001 gsi_next (&phi_gsi);
2002 else
2003 {
2004 predicate_scalar_phi (phi, &gsi);
2005 remove_phi_node (&phi_gsi, false);
2006 }
2007 }
2008 }
2009 }
2010
2011 /* Insert in each basic block of LOOP the statements produced by the
2012 gimplification of the predicates. */
2013
2014 static void
2015 insert_gimplified_predicates (loop_p loop)
2016 {
2017 unsigned int i;
2018
2019 for (i = 0; i < loop->num_nodes; i++)
2020 {
2021 basic_block bb = ifc_bbs[i];
2022 gimple_seq stmts;
2023 if (!is_predicated (bb))
2024 gcc_assert (bb_predicate_gimplified_stmts (bb) == NULL);
2025 if (!is_predicated (bb))
2026 {
2027 /* Do not insert statements for a basic block that is not
2028 predicated. Also make sure that the predicate of the
2029 basic block is set to true. */
2030 reset_bb_predicate (bb);
2031 continue;
2032 }
2033
2034 stmts = bb_predicate_gimplified_stmts (bb);
2035 if (stmts)
2036 {
2037 if (any_pred_load_store)
2038 {
2039 /* Insert the predicate of the BB just after the label,
2040 as the if-conversion of memory writes will use this
2041 predicate. */
2042 gimple_stmt_iterator gsi = gsi_after_labels (bb);
2043 gsi_insert_seq_before (&gsi, stmts, GSI_SAME_STMT);
2044 }
2045 else
2046 {
2047 /* Insert the predicate of the BB at the end of the BB
2048 as this would reduce the register pressure: the only
2049 use of this predicate will be in successor BBs. */
2050 gimple_stmt_iterator gsi = gsi_last_bb (bb);
2051
2052 if (gsi_end_p (gsi)
2053 || stmt_ends_bb_p (gsi_stmt (gsi)))
2054 gsi_insert_seq_before (&gsi, stmts, GSI_SAME_STMT);
2055 else
2056 gsi_insert_seq_after (&gsi, stmts, GSI_SAME_STMT);
2057 }
2058
2059 /* Once the sequence is code generated, set it to NULL. */
2060 set_bb_predicate_gimplified_stmts (bb, NULL);
2061 }
2062 }
2063 }
2064
2065 /* Helper function for predicate_mem_writes. Returns index of existent
2066 mask if it was created for given SIZE and -1 otherwise. */
2067
2068 static int
2069 mask_exists (int size, vec<int> vec)
2070 {
2071 unsigned int ix;
2072 int v;
2073 FOR_EACH_VEC_ELT (vec, ix, v)
2074 if (v == size)
2075 return (int) ix;
2076 return -1;
2077 }
2078
2079 /* Predicate each write to memory in LOOP.
2080
2081 This function transforms control flow constructs containing memory
2082 writes of the form:
2083
2084 | for (i = 0; i < N; i++)
2085 | if (cond)
2086 | A[i] = expr;
2087
2088 into the following form that does not contain control flow:
2089
2090 | for (i = 0; i < N; i++)
2091 | A[i] = cond ? expr : A[i];
2092
2093 The original CFG looks like this:
2094
2095 | bb_0
2096 | i = 0
2097 | end_bb_0
2098 |
2099 | bb_1
2100 | if (i < N) goto bb_5 else goto bb_2
2101 | end_bb_1
2102 |
2103 | bb_2
2104 | cond = some_computation;
2105 | if (cond) goto bb_3 else goto bb_4
2106 | end_bb_2
2107 |
2108 | bb_3
2109 | A[i] = expr;
2110 | goto bb_4
2111 | end_bb_3
2112 |
2113 | bb_4
2114 | goto bb_1
2115 | end_bb_4
2116
2117 insert_gimplified_predicates inserts the computation of the COND
2118 expression at the beginning of the destination basic block:
2119
2120 | bb_0
2121 | i = 0
2122 | end_bb_0
2123 |
2124 | bb_1
2125 | if (i < N) goto bb_5 else goto bb_2
2126 | end_bb_1
2127 |
2128 | bb_2
2129 | cond = some_computation;
2130 | if (cond) goto bb_3 else goto bb_4
2131 | end_bb_2
2132 |
2133 | bb_3
2134 | cond = some_computation;
2135 | A[i] = expr;
2136 | goto bb_4
2137 | end_bb_3
2138 |
2139 | bb_4
2140 | goto bb_1
2141 | end_bb_4
2142
2143 predicate_mem_writes is then predicating the memory write as follows:
2144
2145 | bb_0
2146 | i = 0
2147 | end_bb_0
2148 |
2149 | bb_1
2150 | if (i < N) goto bb_5 else goto bb_2
2151 | end_bb_1
2152 |
2153 | bb_2
2154 | if (cond) goto bb_3 else goto bb_4
2155 | end_bb_2
2156 |
2157 | bb_3
2158 | cond = some_computation;
2159 | A[i] = cond ? expr : A[i];
2160 | goto bb_4
2161 | end_bb_3
2162 |
2163 | bb_4
2164 | goto bb_1
2165 | end_bb_4
2166
2167 and finally combine_blocks removes the basic block boundaries making
2168 the loop vectorizable:
2169
2170 | bb_0
2171 | i = 0
2172 | if (i < N) goto bb_5 else goto bb_1
2173 | end_bb_0
2174 |
2175 | bb_1
2176 | cond = some_computation;
2177 | A[i] = cond ? expr : A[i];
2178 | if (i < N) goto bb_5 else goto bb_4
2179 | end_bb_1
2180 |
2181 | bb_4
2182 | goto bb_1
2183 | end_bb_4
2184 */
2185
2186 static void
2187 predicate_mem_writes (loop_p loop)
2188 {
2189 unsigned int i, orig_loop_num_nodes = loop->num_nodes;
2190 auto_vec<int, 1> vect_sizes;
2191 auto_vec<tree, 1> vect_masks;
2192
2193 for (i = 1; i < orig_loop_num_nodes; i++)
2194 {
2195 gimple_stmt_iterator gsi;
2196 basic_block bb = ifc_bbs[i];
2197 tree cond = bb_predicate (bb);
2198 bool swap;
2199 gimple *stmt;
2200 int index;
2201
2202 if (is_true_predicate (cond))
2203 continue;
2204
2205 swap = false;
2206 if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
2207 {
2208 swap = true;
2209 cond = TREE_OPERAND (cond, 0);
2210 }
2211
2212 vect_sizes.truncate (0);
2213 vect_masks.truncate (0);
2214
2215 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi);)
2216 {
2217 if (!gimple_assign_single_p (stmt = gsi_stmt (gsi)))
2218 ;
2219 else if (is_false_predicate (cond)
2220 && gimple_vdef (stmt))
2221 {
2222 unlink_stmt_vdef (stmt);
2223 gsi_remove (&gsi, true);
2224 release_defs (stmt);
2225 continue;
2226 }
2227 else if (gimple_plf (stmt, GF_PLF_2))
2228 {
2229 tree lhs = gimple_assign_lhs (stmt);
2230 tree rhs = gimple_assign_rhs1 (stmt);
2231 tree ref, addr, ptr, mask;
2232 gcall *new_stmt;
2233 gimple_seq stmts = NULL;
2234 machine_mode mode = TYPE_MODE (TREE_TYPE (lhs));
2235 /* We checked before setting GF_PLF_2 that an equivalent
2236 integer mode exists. */
2237 int bitsize = GET_MODE_BITSIZE (mode).to_constant ();
2238 ref = TREE_CODE (lhs) == SSA_NAME ? rhs : lhs;
2239 mark_addressable (ref);
2240 addr = force_gimple_operand_gsi (&gsi, build_fold_addr_expr (ref),
2241 true, NULL_TREE, true,
2242 GSI_SAME_STMT);
2243 if (!vect_sizes.is_empty ()
2244 && (index = mask_exists (bitsize, vect_sizes)) != -1)
2245 /* Use created mask. */
2246 mask = vect_masks[index];
2247 else
2248 {
2249 if (COMPARISON_CLASS_P (cond))
2250 mask = gimple_build (&stmts, TREE_CODE (cond),
2251 boolean_type_node,
2252 TREE_OPERAND (cond, 0),
2253 TREE_OPERAND (cond, 1));
2254 else
2255 mask = cond;
2256
2257 if (swap)
2258 {
2259 tree true_val
2260 = constant_boolean_node (true, TREE_TYPE (mask));
2261 mask = gimple_build (&stmts, BIT_XOR_EXPR,
2262 TREE_TYPE (mask), mask, true_val);
2263 }
2264 gsi_insert_seq_before (&gsi, stmts, GSI_SAME_STMT);
2265
2266 /* Save mask and its size for further use. */
2267 vect_sizes.safe_push (bitsize);
2268 vect_masks.safe_push (mask);
2269 }
2270 ptr = build_int_cst (reference_alias_ptr_type (ref),
2271 get_object_alignment (ref));
2272 /* Copy points-to info if possible. */
2273 if (TREE_CODE (addr) == SSA_NAME && !SSA_NAME_PTR_INFO (addr))
2274 copy_ref_info (build2 (MEM_REF, TREE_TYPE (ref), addr, ptr),
2275 ref);
2276 if (TREE_CODE (lhs) == SSA_NAME)
2277 {
2278 new_stmt
2279 = gimple_build_call_internal (IFN_MASK_LOAD, 3, addr,
2280 ptr, mask);
2281 gimple_call_set_lhs (new_stmt, lhs);
2282 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
2283 }
2284 else
2285 {
2286 new_stmt
2287 = gimple_build_call_internal (IFN_MASK_STORE, 4, addr, ptr,
2288 mask, rhs);
2289 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
2290 gimple_set_vdef (new_stmt, gimple_vdef (stmt));
2291 SSA_NAME_DEF_STMT (gimple_vdef (new_stmt)) = new_stmt;
2292 }
2293 gimple_call_set_nothrow (new_stmt, true);
2294
2295 gsi_replace (&gsi, new_stmt, true);
2296 }
2297 else if (gimple_vdef (stmt))
2298 {
2299 tree lhs = gimple_assign_lhs (stmt);
2300 tree rhs = gimple_assign_rhs1 (stmt);
2301 tree type = TREE_TYPE (lhs);
2302
2303 lhs = ifc_temp_var (type, unshare_expr (lhs), &gsi);
2304 rhs = ifc_temp_var (type, unshare_expr (rhs), &gsi);
2305 if (swap)
2306 std::swap (lhs, rhs);
2307 cond = force_gimple_operand_gsi_1 (&gsi, unshare_expr (cond),
2308 is_gimple_condexpr, NULL_TREE,
2309 true, GSI_SAME_STMT);
2310 rhs = fold_build_cond_expr (type, unshare_expr (cond), rhs, lhs);
2311 gimple_assign_set_rhs1 (stmt, ifc_temp_var (type, rhs, &gsi));
2312 update_stmt (stmt);
2313 }
2314 gsi_next (&gsi);
2315 }
2316 }
2317 }
2318
2319 /* Remove all GIMPLE_CONDs and GIMPLE_LABELs of all the basic blocks
2320 other than the exit and latch of the LOOP. Also resets the
2321 GIMPLE_DEBUG information. */
2322
2323 static void
2324 remove_conditions_and_labels (loop_p loop)
2325 {
2326 gimple_stmt_iterator gsi;
2327 unsigned int i;
2328
2329 for (i = 0; i < loop->num_nodes; i++)
2330 {
2331 basic_block bb = ifc_bbs[i];
2332
2333 if (bb_with_exit_edge_p (loop, bb)
2334 || bb == loop->latch)
2335 continue;
2336
2337 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); )
2338 switch (gimple_code (gsi_stmt (gsi)))
2339 {
2340 case GIMPLE_COND:
2341 case GIMPLE_LABEL:
2342 gsi_remove (&gsi, true);
2343 break;
2344
2345 case GIMPLE_DEBUG:
2346 /* ??? Should there be conditional GIMPLE_DEBUG_BINDs? */
2347 if (gimple_debug_bind_p (gsi_stmt (gsi)))
2348 {
2349 gimple_debug_bind_reset_value (gsi_stmt (gsi));
2350 update_stmt (gsi_stmt (gsi));
2351 }
2352 gsi_next (&gsi);
2353 break;
2354
2355 default:
2356 gsi_next (&gsi);
2357 }
2358 }
2359 }
2360
2361 /* Combine all the basic blocks from LOOP into one or two super basic
2362 blocks. Replace PHI nodes with conditional modify expressions. */
2363
2364 static void
2365 combine_blocks (struct loop *loop)
2366 {
2367 basic_block bb, exit_bb, merge_target_bb;
2368 unsigned int orig_loop_num_nodes = loop->num_nodes;
2369 unsigned int i;
2370 edge e;
2371 edge_iterator ei;
2372
2373 remove_conditions_and_labels (loop);
2374 predicate_all_scalar_phis (loop);
2375
2376 if (any_pred_load_store)
2377 predicate_mem_writes (loop);
2378
2379 /* Merge basic blocks: first remove all the edges in the loop,
2380 except for those from the exit block. */
2381 exit_bb = NULL;
2382 bool *predicated = XNEWVEC (bool, orig_loop_num_nodes);
2383 for (i = 0; i < orig_loop_num_nodes; i++)
2384 {
2385 bb = ifc_bbs[i];
2386 predicated[i] = !is_true_predicate (bb_predicate (bb));
2387 free_bb_predicate (bb);
2388 if (bb_with_exit_edge_p (loop, bb))
2389 {
2390 gcc_assert (exit_bb == NULL);
2391 exit_bb = bb;
2392 }
2393 }
2394 gcc_assert (exit_bb != loop->latch);
2395
2396 for (i = 1; i < orig_loop_num_nodes; i++)
2397 {
2398 bb = ifc_bbs[i];
2399
2400 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei));)
2401 {
2402 if (e->src == exit_bb)
2403 ei_next (&ei);
2404 else
2405 remove_edge (e);
2406 }
2407 }
2408
2409 if (exit_bb != NULL)
2410 {
2411 if (exit_bb != loop->header)
2412 {
2413 /* Connect this node to loop header. */
2414 make_single_succ_edge (loop->header, exit_bb, EDGE_FALLTHRU);
2415 set_immediate_dominator (CDI_DOMINATORS, exit_bb, loop->header);
2416 }
2417
2418 /* Redirect non-exit edges to loop->latch. */
2419 FOR_EACH_EDGE (e, ei, exit_bb->succs)
2420 {
2421 if (!loop_exit_edge_p (loop, e))
2422 redirect_edge_and_branch (e, loop->latch);
2423 }
2424 set_immediate_dominator (CDI_DOMINATORS, loop->latch, exit_bb);
2425 }
2426 else
2427 {
2428 /* If the loop does not have an exit, reconnect header and latch. */
2429 make_edge (loop->header, loop->latch, EDGE_FALLTHRU);
2430 set_immediate_dominator (CDI_DOMINATORS, loop->latch, loop->header);
2431 }
2432
2433 merge_target_bb = loop->header;
2434
2435 /* Get at the virtual def valid for uses starting at the first block
2436 we merge into the header. Without a virtual PHI the loop has the
2437 same virtual use on all stmts. */
2438 gphi *vphi = get_virtual_phi (loop->header);
2439 tree last_vdef = NULL_TREE;
2440 if (vphi)
2441 {
2442 last_vdef = gimple_phi_result (vphi);
2443 for (gimple_stmt_iterator gsi = gsi_start_bb (loop->header);
2444 ! gsi_end_p (gsi); gsi_next (&gsi))
2445 if (gimple_vdef (gsi_stmt (gsi)))
2446 last_vdef = gimple_vdef (gsi_stmt (gsi));
2447 }
2448 for (i = 1; i < orig_loop_num_nodes; i++)
2449 {
2450 gimple_stmt_iterator gsi;
2451 gimple_stmt_iterator last;
2452
2453 bb = ifc_bbs[i];
2454
2455 if (bb == exit_bb || bb == loop->latch)
2456 continue;
2457
2458 /* We release virtual PHIs late because we have to propagate them
2459 out using the current VUSE. The def might be the one used
2460 after the loop. */
2461 vphi = get_virtual_phi (bb);
2462 if (vphi)
2463 {
2464 imm_use_iterator iter;
2465 use_operand_p use_p;
2466 gimple *use_stmt;
2467 FOR_EACH_IMM_USE_STMT (use_stmt, iter, gimple_phi_result (vphi))
2468 {
2469 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2470 SET_USE (use_p, last_vdef);
2471 }
2472 gsi = gsi_for_stmt (vphi);
2473 remove_phi_node (&gsi, true);
2474 }
2475
2476 /* Make stmts member of loop->header and clear range info from all stmts
2477 in BB which is now no longer executed conditional on a predicate we
2478 could have derived it from. */
2479 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2480 {
2481 gimple *stmt = gsi_stmt (gsi);
2482 gimple_set_bb (stmt, merge_target_bb);
2483 /* Update virtual operands. */
2484 if (last_vdef)
2485 {
2486 use_operand_p use_p = ssa_vuse_operand (stmt);
2487 if (use_p
2488 && USE_FROM_PTR (use_p) != last_vdef)
2489 SET_USE (use_p, last_vdef);
2490 if (gimple_vdef (stmt))
2491 last_vdef = gimple_vdef (stmt);
2492 }
2493 if (predicated[i])
2494 {
2495 ssa_op_iter i;
2496 tree op;
2497 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_DEF)
2498 reset_flow_sensitive_info (op);
2499 }
2500 }
2501
2502 /* Update stmt list. */
2503 last = gsi_last_bb (merge_target_bb);
2504 gsi_insert_seq_after_without_update (&last, bb_seq (bb), GSI_NEW_STMT);
2505 set_bb_seq (bb, NULL);
2506
2507 delete_basic_block (bb);
2508 }
2509
2510 /* If possible, merge loop header to the block with the exit edge.
2511 This reduces the number of basic blocks to two, to please the
2512 vectorizer that handles only loops with two nodes. */
2513 if (exit_bb
2514 && exit_bb != loop->header)
2515 {
2516 /* We release virtual PHIs late because we have to propagate them
2517 out using the current VUSE. The def might be the one used
2518 after the loop. */
2519 vphi = get_virtual_phi (exit_bb);
2520 if (vphi)
2521 {
2522 imm_use_iterator iter;
2523 use_operand_p use_p;
2524 gimple *use_stmt;
2525 FOR_EACH_IMM_USE_STMT (use_stmt, iter, gimple_phi_result (vphi))
2526 {
2527 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2528 SET_USE (use_p, last_vdef);
2529 }
2530 gimple_stmt_iterator gsi = gsi_for_stmt (vphi);
2531 remove_phi_node (&gsi, true);
2532 }
2533
2534 if (can_merge_blocks_p (loop->header, exit_bb))
2535 merge_blocks (loop->header, exit_bb);
2536 }
2537
2538 free (ifc_bbs);
2539 ifc_bbs = NULL;
2540 free (predicated);
2541 }
2542
2543 /* Version LOOP before if-converting it; the original loop
2544 will be if-converted, the new copy of the loop will not,
2545 and the LOOP_VECTORIZED internal call will be guarding which
2546 loop to execute. The vectorizer pass will fold this
2547 internal call into either true or false.
2548
2549 Note that this function intentionally invalidates profile. Both edges
2550 out of LOOP_VECTORIZED must have 100% probability so the profile remains
2551 consistent after the condition is folded in the vectorizer. */
2552
2553 static struct loop *
2554 version_loop_for_if_conversion (struct loop *loop)
2555 {
2556 basic_block cond_bb;
2557 tree cond = make_ssa_name (boolean_type_node);
2558 struct loop *new_loop;
2559 gimple *g;
2560 gimple_stmt_iterator gsi;
2561 unsigned int save_length;
2562
2563 g = gimple_build_call_internal (IFN_LOOP_VECTORIZED, 2,
2564 build_int_cst (integer_type_node, loop->num),
2565 integer_zero_node);
2566 gimple_call_set_lhs (g, cond);
2567
2568 /* Save BB->aux around loop_version as that uses the same field. */
2569 save_length = loop->inner ? loop->inner->num_nodes : loop->num_nodes;
2570 void **saved_preds = XALLOCAVEC (void *, save_length);
2571 for (unsigned i = 0; i < save_length; i++)
2572 saved_preds[i] = ifc_bbs[i]->aux;
2573
2574 initialize_original_copy_tables ();
2575 /* At this point we invalidate porfile confistency until IFN_LOOP_VECTORIZED
2576 is re-merged in the vectorizer. */
2577 new_loop = loop_version (loop, cond, &cond_bb,
2578 profile_probability::always (),
2579 profile_probability::always (),
2580 profile_probability::always (),
2581 profile_probability::always (), true);
2582 free_original_copy_tables ();
2583
2584 for (unsigned i = 0; i < save_length; i++)
2585 ifc_bbs[i]->aux = saved_preds[i];
2586
2587 if (new_loop == NULL)
2588 return NULL;
2589
2590 new_loop->dont_vectorize = true;
2591 new_loop->force_vectorize = false;
2592 gsi = gsi_last_bb (cond_bb);
2593 gimple_call_set_arg (g, 1, build_int_cst (integer_type_node, new_loop->num));
2594 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
2595 return new_loop;
2596 }
2597
2598 /* Return true when LOOP satisfies the follow conditions that will
2599 allow it to be recognized by the vectorizer for outer-loop
2600 vectorization:
2601 - The loop is not the root node of the loop tree.
2602 - The loop has exactly one inner loop.
2603 - The loop has a single exit.
2604 - The loop header has a single successor, which is the inner
2605 loop header.
2606 - Each of the inner and outer loop latches have a single
2607 predecessor.
2608 - The loop exit block has a single predecessor, which is the
2609 inner loop's exit block. */
2610
2611 static bool
2612 versionable_outer_loop_p (struct loop *loop)
2613 {
2614 if (!loop_outer (loop)
2615 || loop->dont_vectorize
2616 || !loop->inner
2617 || loop->inner->next
2618 || !single_exit (loop)
2619 || !single_succ_p (loop->header)
2620 || single_succ (loop->header) != loop->inner->header
2621 || !single_pred_p (loop->latch)
2622 || !single_pred_p (loop->inner->latch))
2623 return false;
2624
2625 basic_block outer_exit = single_pred (loop->latch);
2626 basic_block inner_exit = single_pred (loop->inner->latch);
2627
2628 if (!single_pred_p (outer_exit) || single_pred (outer_exit) != inner_exit)
2629 return false;
2630
2631 if (dump_file)
2632 fprintf (dump_file, "Found vectorizable outer loop for versioning\n");
2633
2634 return true;
2635 }
2636
2637 /* Performs splitting of critical edges. Skip splitting and return false
2638 if LOOP will not be converted because:
2639
2640 - LOOP is not well formed.
2641 - LOOP has PHI with more than MAX_PHI_ARG_NUM arguments.
2642
2643 Last restriction is valid only if AGGRESSIVE_IF_CONV is false. */
2644
2645 static bool
2646 ifcvt_split_critical_edges (struct loop *loop, bool aggressive_if_conv)
2647 {
2648 basic_block *body;
2649 basic_block bb;
2650 unsigned int num = loop->num_nodes;
2651 unsigned int i;
2652 gimple *stmt;
2653 edge e;
2654 edge_iterator ei;
2655 auto_vec<edge> critical_edges;
2656
2657 /* Loop is not well formed. */
2658 if (num <= 2 || loop->inner || !single_exit (loop))
2659 return false;
2660
2661 body = get_loop_body (loop);
2662 for (i = 0; i < num; i++)
2663 {
2664 bb = body[i];
2665 if (!aggressive_if_conv
2666 && phi_nodes (bb)
2667 && EDGE_COUNT (bb->preds) > MAX_PHI_ARG_NUM)
2668 {
2669 if (dump_file && (dump_flags & TDF_DETAILS))
2670 fprintf (dump_file,
2671 "BB %d has complicated PHI with more than %u args.\n",
2672 bb->index, MAX_PHI_ARG_NUM);
2673
2674 free (body);
2675 return false;
2676 }
2677 if (bb == loop->latch || bb_with_exit_edge_p (loop, bb))
2678 continue;
2679
2680 stmt = last_stmt (bb);
2681 /* Skip basic blocks not ending with conditional branch. */
2682 if (!stmt || gimple_code (stmt) != GIMPLE_COND)
2683 continue;
2684
2685 FOR_EACH_EDGE (e, ei, bb->succs)
2686 if (EDGE_CRITICAL_P (e) && e->dest->loop_father == loop)
2687 critical_edges.safe_push (e);
2688 }
2689 free (body);
2690
2691 while (critical_edges.length () > 0)
2692 {
2693 e = critical_edges.pop ();
2694 /* Don't split if bb can be predicated along non-critical edge. */
2695 if (EDGE_COUNT (e->dest->preds) > 2 || all_preds_critical_p (e->dest))
2696 split_edge (e);
2697 }
2698
2699 return true;
2700 }
2701
2702 /* Delete redundant statements produced by predication which prevents
2703 loop vectorization. */
2704
2705 static void
2706 ifcvt_local_dce (basic_block bb)
2707 {
2708 gimple *stmt;
2709 gimple *stmt1;
2710 gimple *phi;
2711 gimple_stmt_iterator gsi;
2712 auto_vec<gimple *> worklist;
2713 enum gimple_code code;
2714 use_operand_p use_p;
2715 imm_use_iterator imm_iter;
2716
2717 worklist.create (64);
2718 /* Consider all phi as live statements. */
2719 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2720 {
2721 phi = gsi_stmt (gsi);
2722 gimple_set_plf (phi, GF_PLF_2, true);
2723 worklist.safe_push (phi);
2724 }
2725 /* Consider load/store statements, CALL and COND as live. */
2726 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2727 {
2728 stmt = gsi_stmt (gsi);
2729 if (gimple_store_p (stmt)
2730 || gimple_assign_load_p (stmt)
2731 || is_gimple_debug (stmt))
2732 {
2733 gimple_set_plf (stmt, GF_PLF_2, true);
2734 worklist.safe_push (stmt);
2735 continue;
2736 }
2737 code = gimple_code (stmt);
2738 if (code == GIMPLE_COND || code == GIMPLE_CALL)
2739 {
2740 gimple_set_plf (stmt, GF_PLF_2, true);
2741 worklist.safe_push (stmt);
2742 continue;
2743 }
2744 gimple_set_plf (stmt, GF_PLF_2, false);
2745
2746 if (code == GIMPLE_ASSIGN)
2747 {
2748 tree lhs = gimple_assign_lhs (stmt);
2749 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs)
2750 {
2751 stmt1 = USE_STMT (use_p);
2752 if (gimple_bb (stmt1) != bb)
2753 {
2754 gimple_set_plf (stmt, GF_PLF_2, true);
2755 worklist.safe_push (stmt);
2756 break;
2757 }
2758 }
2759 }
2760 }
2761 /* Propagate liveness through arguments of live stmt. */
2762 while (worklist.length () > 0)
2763 {
2764 ssa_op_iter iter;
2765 use_operand_p use_p;
2766 tree use;
2767
2768 stmt = worklist.pop ();
2769 FOR_EACH_PHI_OR_STMT_USE (use_p, stmt, iter, SSA_OP_USE)
2770 {
2771 use = USE_FROM_PTR (use_p);
2772 if (TREE_CODE (use) != SSA_NAME)
2773 continue;
2774 stmt1 = SSA_NAME_DEF_STMT (use);
2775 if (gimple_bb (stmt1) != bb
2776 || gimple_plf (stmt1, GF_PLF_2))
2777 continue;
2778 gimple_set_plf (stmt1, GF_PLF_2, true);
2779 worklist.safe_push (stmt1);
2780 }
2781 }
2782 /* Delete dead statements. */
2783 gsi = gsi_start_bb (bb);
2784 while (!gsi_end_p (gsi))
2785 {
2786 stmt = gsi_stmt (gsi);
2787 if (gimple_plf (stmt, GF_PLF_2))
2788 {
2789 gsi_next (&gsi);
2790 continue;
2791 }
2792 if (dump_file && (dump_flags & TDF_DETAILS))
2793 {
2794 fprintf (dump_file, "Delete dead stmt in bb#%d\n", bb->index);
2795 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2796 }
2797 gsi_remove (&gsi, true);
2798 release_defs (stmt);
2799 }
2800 }
2801
2802 /* If-convert LOOP when it is legal. For the moment this pass has no
2803 profitability analysis. Returns non-zero todo flags when something
2804 changed. */
2805
2806 unsigned int
2807 tree_if_conversion (struct loop *loop)
2808 {
2809 unsigned int todo = 0;
2810 bool aggressive_if_conv;
2811 struct loop *rloop;
2812 bool need_update_ssa = false;
2813
2814 again:
2815 rloop = NULL;
2816 ifc_bbs = NULL;
2817 any_pred_load_store = false;
2818 any_complicated_phi = false;
2819
2820 /* Apply more aggressive if-conversion when loop or its outer loop were
2821 marked with simd pragma. When that's the case, we try to if-convert
2822 loop containing PHIs with more than MAX_PHI_ARG_NUM arguments. */
2823 aggressive_if_conv = loop->force_vectorize;
2824 if (!aggressive_if_conv)
2825 {
2826 struct loop *outer_loop = loop_outer (loop);
2827 if (outer_loop && outer_loop->force_vectorize)
2828 aggressive_if_conv = true;
2829 }
2830
2831 if (!ifcvt_split_critical_edges (loop, aggressive_if_conv))
2832 goto cleanup;
2833
2834 if (!if_convertible_loop_p (loop)
2835 || !dbg_cnt (if_conversion_tree))
2836 goto cleanup;
2837
2838 if ((any_pred_load_store || any_complicated_phi)
2839 && ((!flag_tree_loop_vectorize && !loop->force_vectorize)
2840 || loop->dont_vectorize))
2841 goto cleanup;
2842
2843 /* Since we have no cost model, always version loops unless the user
2844 specified -ftree-loop-if-convert or unless versioning is required.
2845 Either version this loop, or if the pattern is right for outer-loop
2846 vectorization, version the outer loop. In the latter case we will
2847 still if-convert the original inner loop. */
2848 if (any_pred_load_store
2849 || any_complicated_phi
2850 || flag_tree_loop_if_convert != 1)
2851 {
2852 struct loop *vloop
2853 = (versionable_outer_loop_p (loop_outer (loop))
2854 ? loop_outer (loop) : loop);
2855 struct loop *nloop = version_loop_for_if_conversion (vloop);
2856 if (nloop == NULL)
2857 goto cleanup;
2858 need_update_ssa = true;
2859 if (vloop != loop)
2860 {
2861 /* If versionable_outer_loop_p decided to version the
2862 outer loop, version also the inner loop of the non-vectorized
2863 loop copy. So we transform:
2864 loop1
2865 loop2
2866 into:
2867 if (LOOP_VECTORIZED (1, 3))
2868 {
2869 loop1
2870 loop2
2871 }
2872 else
2873 loop3 (copy of loop1)
2874 if (LOOP_VECTORIZED (4, 5))
2875 loop4 (copy of loop2)
2876 else
2877 loop5 (copy of loop4) */
2878 gcc_assert (nloop->inner && nloop->inner->next == NULL);
2879 rloop = nloop->inner;
2880 }
2881 }
2882
2883 /* Due to hard to fix issues we end up with immediate uses recorded
2884 for not yet inserted predicates which will confuse SSA update so
2885 we delayed this from after versioning to after predicate insertion. */
2886 insert_gimplified_predicates (loop);
2887 if (need_update_ssa)
2888 update_ssa (TODO_update_ssa);
2889
2890 /* Now all statements are if-convertible. Combine all the basic
2891 blocks into one huge basic block doing the if-conversion
2892 on-the-fly. */
2893 combine_blocks (loop);
2894
2895 /* Delete dead predicate computations. */
2896 ifcvt_local_dce (loop->header);
2897
2898 todo |= TODO_cleanup_cfg;
2899
2900 cleanup:
2901 if (ifc_bbs)
2902 {
2903 unsigned int i;
2904
2905 for (i = 0; i < loop->num_nodes; i++)
2906 free_bb_predicate (ifc_bbs[i]);
2907
2908 free (ifc_bbs);
2909 ifc_bbs = NULL;
2910 }
2911 if (rloop != NULL)
2912 {
2913 loop = rloop;
2914 goto again;
2915 }
2916
2917 return todo;
2918 }
2919
2920 /* Tree if-conversion pass management. */
2921
2922 namespace {
2923
2924 const pass_data pass_data_if_conversion =
2925 {
2926 GIMPLE_PASS, /* type */
2927 "ifcvt", /* name */
2928 OPTGROUP_NONE, /* optinfo_flags */
2929 TV_TREE_LOOP_IFCVT, /* tv_id */
2930 ( PROP_cfg | PROP_ssa ), /* properties_required */
2931 0, /* properties_provided */
2932 0, /* properties_destroyed */
2933 0, /* todo_flags_start */
2934 0, /* todo_flags_finish */
2935 };
2936
2937 class pass_if_conversion : public gimple_opt_pass
2938 {
2939 public:
2940 pass_if_conversion (gcc::context *ctxt)
2941 : gimple_opt_pass (pass_data_if_conversion, ctxt)
2942 {}
2943
2944 /* opt_pass methods: */
2945 virtual bool gate (function *);
2946 virtual unsigned int execute (function *);
2947
2948 }; // class pass_if_conversion
2949
2950 bool
2951 pass_if_conversion::gate (function *fun)
2952 {
2953 return (((flag_tree_loop_vectorize || fun->has_force_vectorize_loops)
2954 && flag_tree_loop_if_convert != 0)
2955 || flag_tree_loop_if_convert == 1);
2956 }
2957
2958 unsigned int
2959 pass_if_conversion::execute (function *fun)
2960 {
2961 struct loop *loop;
2962 unsigned todo = 0;
2963
2964 if (number_of_loops (fun) <= 1)
2965 return 0;
2966
2967 FOR_EACH_LOOP (loop, 0)
2968 if (flag_tree_loop_if_convert == 1
2969 || ((flag_tree_loop_vectorize || loop->force_vectorize)
2970 && !loop->dont_vectorize))
2971 todo |= tree_if_conversion (loop);
2972
2973 if (todo)
2974 {
2975 free_numbers_of_iterations_estimates (fun);
2976 scev_reset ();
2977 }
2978
2979 if (flag_checking)
2980 {
2981 basic_block bb;
2982 FOR_EACH_BB_FN (bb, fun)
2983 gcc_assert (!bb->aux);
2984 }
2985
2986 return todo;
2987 }
2988
2989 } // anon namespace
2990
2991 gimple_opt_pass *
2992 make_pass_if_conversion (gcc::context *ctxt)
2993 {
2994 return new pass_if_conversion (ctxt);
2995 }