re PR tree-optimization/70771 (ICE on valid code at -O3 on x86_64-linux-gnu in operat...
[gcc.git] / gcc / tree-if-conv.c
1 /* If-conversion for vectorizer.
2 Copyright (C) 2004-2016 Free Software Foundation, Inc.
3 Contributed by Devang Patel <dpatel@apple.com>
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 /* This pass implements a tree level if-conversion of loops. Its
22 initial goal is to help the vectorizer to vectorize loops with
23 conditions.
24
25 A short description of if-conversion:
26
27 o Decide if a loop is if-convertible or not.
28 o Walk all loop basic blocks in breadth first order (BFS order).
29 o Remove conditional statements (at the end of basic block)
30 and propagate condition into destination basic blocks'
31 predicate list.
32 o Replace modify expression with conditional modify expression
33 using current basic block's condition.
34 o Merge all basic blocks
35 o Replace phi nodes with conditional modify expr
36 o Merge all basic blocks into header
37
38 Sample transformation:
39
40 INPUT
41 -----
42
43 # i_23 = PHI <0(0), i_18(10)>;
44 <L0>:;
45 j_15 = A[i_23];
46 if (j_15 > 41) goto <L1>; else goto <L17>;
47
48 <L17>:;
49 goto <bb 3> (<L3>);
50
51 <L1>:;
52
53 # iftmp.2_4 = PHI <0(8), 42(2)>;
54 <L3>:;
55 A[i_23] = iftmp.2_4;
56 i_18 = i_23 + 1;
57 if (i_18 <= 15) goto <L19>; else goto <L18>;
58
59 <L19>:;
60 goto <bb 1> (<L0>);
61
62 <L18>:;
63
64 OUTPUT
65 ------
66
67 # i_23 = PHI <0(0), i_18(10)>;
68 <L0>:;
69 j_15 = A[i_23];
70
71 <L3>:;
72 iftmp.2_4 = j_15 > 41 ? 42 : 0;
73 A[i_23] = iftmp.2_4;
74 i_18 = i_23 + 1;
75 if (i_18 <= 15) goto <L19>; else goto <L18>;
76
77 <L19>:;
78 goto <bb 1> (<L0>);
79
80 <L18>:;
81 */
82
83 #include "config.h"
84 #include "system.h"
85 #include "coretypes.h"
86 #include "backend.h"
87 #include "rtl.h"
88 #include "tree.h"
89 #include "gimple.h"
90 #include "cfghooks.h"
91 #include "tree-pass.h"
92 #include "ssa.h"
93 #include "expmed.h"
94 #include "optabs-query.h"
95 #include "gimple-pretty-print.h"
96 #include "alias.h"
97 #include "fold-const.h"
98 #include "stor-layout.h"
99 #include "gimple-fold.h"
100 #include "gimplify.h"
101 #include "gimple-iterator.h"
102 #include "gimplify-me.h"
103 #include "tree-cfg.h"
104 #include "tree-into-ssa.h"
105 #include "tree-ssa.h"
106 #include "cfgloop.h"
107 #include "tree-data-ref.h"
108 #include "tree-scalar-evolution.h"
109 #include "tree-ssa-loop-ivopts.h"
110 #include "tree-ssa-address.h"
111 #include "dbgcnt.h"
112 #include "tree-hash-traits.h"
113 #include "varasm.h"
114 #include "builtins.h"
115 #include "params.h"
116
117 /* Hash for struct innermost_loop_behavior. It depends on the user to
118 free the memory. */
119
120 struct innermost_loop_behavior_hash : nofree_ptr_hash <innermost_loop_behavior>
121 {
122 static inline hashval_t hash (const value_type &);
123 static inline bool equal (const value_type &,
124 const compare_type &);
125 };
126
127 inline hashval_t
128 innermost_loop_behavior_hash::hash (const value_type &e)
129 {
130 hashval_t hash;
131
132 hash = iterative_hash_expr (e->base_address, 0);
133 hash = iterative_hash_expr (e->offset, hash);
134 hash = iterative_hash_expr (e->init, hash);
135 return iterative_hash_expr (e->step, hash);
136 }
137
138 inline bool
139 innermost_loop_behavior_hash::equal (const value_type &e1,
140 const compare_type &e2)
141 {
142 if ((e1->base_address && !e2->base_address)
143 || (!e1->base_address && e2->base_address)
144 || (!e1->offset && e2->offset)
145 || (e1->offset && !e2->offset)
146 || (!e1->init && e2->init)
147 || (e1->init && !e2->init)
148 || (!e1->step && e2->step)
149 || (e1->step && !e2->step))
150 return false;
151
152 if (e1->base_address && e2->base_address
153 && !operand_equal_p (e1->base_address, e2->base_address, 0))
154 return false;
155 if (e1->offset && e2->offset
156 && !operand_equal_p (e1->offset, e2->offset, 0))
157 return false;
158 if (e1->init && e2->init
159 && !operand_equal_p (e1->init, e2->init, 0))
160 return false;
161 if (e1->step && e2->step
162 && !operand_equal_p (e1->step, e2->step, 0))
163 return false;
164
165 return true;
166 }
167
168 /* List of basic blocks in if-conversion-suitable order. */
169 static basic_block *ifc_bbs;
170
171 /* Apply more aggressive (extended) if-conversion if true. */
172 static bool aggressive_if_conv;
173
174 /* Hash table to store <DR's innermost loop behavior, DR> pairs. */
175 static hash_map<innermost_loop_behavior_hash,
176 data_reference_p> *innermost_DR_map;
177
178 /* Hash table to store <base reference, DR> pairs. */
179 static hash_map<tree_operand_hash, data_reference_p> *baseref_DR_map;
180
181 /* Structure used to predicate basic blocks. This is attached to the
182 ->aux field of the BBs in the loop to be if-converted. */
183 struct bb_predicate {
184
185 /* The condition under which this basic block is executed. */
186 tree predicate;
187
188 /* PREDICATE is gimplified, and the sequence of statements is
189 recorded here, in order to avoid the duplication of computations
190 that occur in previous conditions. See PR44483. */
191 gimple_seq predicate_gimplified_stmts;
192 };
193
194 /* Returns true when the basic block BB has a predicate. */
195
196 static inline bool
197 bb_has_predicate (basic_block bb)
198 {
199 return bb->aux != NULL;
200 }
201
202 /* Returns the gimplified predicate for basic block BB. */
203
204 static inline tree
205 bb_predicate (basic_block bb)
206 {
207 return ((struct bb_predicate *) bb->aux)->predicate;
208 }
209
210 /* Sets the gimplified predicate COND for basic block BB. */
211
212 static inline void
213 set_bb_predicate (basic_block bb, tree cond)
214 {
215 gcc_assert ((TREE_CODE (cond) == TRUTH_NOT_EXPR
216 && is_gimple_condexpr (TREE_OPERAND (cond, 0)))
217 || is_gimple_condexpr (cond));
218 ((struct bb_predicate *) bb->aux)->predicate = cond;
219 }
220
221 /* Returns the sequence of statements of the gimplification of the
222 predicate for basic block BB. */
223
224 static inline gimple_seq
225 bb_predicate_gimplified_stmts (basic_block bb)
226 {
227 return ((struct bb_predicate *) bb->aux)->predicate_gimplified_stmts;
228 }
229
230 /* Sets the sequence of statements STMTS of the gimplification of the
231 predicate for basic block BB. */
232
233 static inline void
234 set_bb_predicate_gimplified_stmts (basic_block bb, gimple_seq stmts)
235 {
236 ((struct bb_predicate *) bb->aux)->predicate_gimplified_stmts = stmts;
237 }
238
239 /* Adds the sequence of statements STMTS to the sequence of statements
240 of the predicate for basic block BB. */
241
242 static inline void
243 add_bb_predicate_gimplified_stmts (basic_block bb, gimple_seq stmts)
244 {
245 gimple_seq_add_seq
246 (&(((struct bb_predicate *) bb->aux)->predicate_gimplified_stmts), stmts);
247 }
248
249 /* Initializes to TRUE the predicate of basic block BB. */
250
251 static inline void
252 init_bb_predicate (basic_block bb)
253 {
254 bb->aux = XNEW (struct bb_predicate);
255 set_bb_predicate_gimplified_stmts (bb, NULL);
256 set_bb_predicate (bb, boolean_true_node);
257 }
258
259 /* Release the SSA_NAMEs associated with the predicate of basic block BB,
260 but don't actually free it. */
261
262 static inline void
263 release_bb_predicate (basic_block bb)
264 {
265 gimple_seq stmts = bb_predicate_gimplified_stmts (bb);
266 if (stmts)
267 {
268 gimple_stmt_iterator i;
269
270 for (i = gsi_start (stmts); !gsi_end_p (i); gsi_next (&i))
271 free_stmt_operands (cfun, gsi_stmt (i));
272 set_bb_predicate_gimplified_stmts (bb, NULL);
273 }
274 }
275
276 /* Free the predicate of basic block BB. */
277
278 static inline void
279 free_bb_predicate (basic_block bb)
280 {
281 if (!bb_has_predicate (bb))
282 return;
283
284 release_bb_predicate (bb);
285 free (bb->aux);
286 bb->aux = NULL;
287 }
288
289 /* Reinitialize predicate of BB with the true predicate. */
290
291 static inline void
292 reset_bb_predicate (basic_block bb)
293 {
294 if (!bb_has_predicate (bb))
295 init_bb_predicate (bb);
296 else
297 {
298 release_bb_predicate (bb);
299 set_bb_predicate (bb, boolean_true_node);
300 }
301 }
302
303 /* Returns a new SSA_NAME of type TYPE that is assigned the value of
304 the expression EXPR. Inserts the statement created for this
305 computation before GSI and leaves the iterator GSI at the same
306 statement. */
307
308 static tree
309 ifc_temp_var (tree type, tree expr, gimple_stmt_iterator *gsi)
310 {
311 tree new_name = make_temp_ssa_name (type, NULL, "_ifc_");
312 gimple *stmt = gimple_build_assign (new_name, expr);
313 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
314 return new_name;
315 }
316
317 /* Return true when COND is a false predicate. */
318
319 static inline bool
320 is_false_predicate (tree cond)
321 {
322 return (cond != NULL_TREE
323 && (cond == boolean_false_node
324 || integer_zerop (cond)));
325 }
326
327 /* Return true when COND is a true predicate. */
328
329 static inline bool
330 is_true_predicate (tree cond)
331 {
332 return (cond == NULL_TREE
333 || cond == boolean_true_node
334 || integer_onep (cond));
335 }
336
337 /* Returns true when BB has a predicate that is not trivial: true or
338 NULL_TREE. */
339
340 static inline bool
341 is_predicated (basic_block bb)
342 {
343 return !is_true_predicate (bb_predicate (bb));
344 }
345
346 /* Parses the predicate COND and returns its comparison code and
347 operands OP0 and OP1. */
348
349 static enum tree_code
350 parse_predicate (tree cond, tree *op0, tree *op1)
351 {
352 gimple *s;
353
354 if (TREE_CODE (cond) == SSA_NAME
355 && is_gimple_assign (s = SSA_NAME_DEF_STMT (cond)))
356 {
357 if (TREE_CODE_CLASS (gimple_assign_rhs_code (s)) == tcc_comparison)
358 {
359 *op0 = gimple_assign_rhs1 (s);
360 *op1 = gimple_assign_rhs2 (s);
361 return gimple_assign_rhs_code (s);
362 }
363
364 else if (gimple_assign_rhs_code (s) == TRUTH_NOT_EXPR)
365 {
366 tree op = gimple_assign_rhs1 (s);
367 tree type = TREE_TYPE (op);
368 enum tree_code code = parse_predicate (op, op0, op1);
369
370 return code == ERROR_MARK ? ERROR_MARK
371 : invert_tree_comparison (code, HONOR_NANS (type));
372 }
373
374 return ERROR_MARK;
375 }
376
377 if (COMPARISON_CLASS_P (cond))
378 {
379 *op0 = TREE_OPERAND (cond, 0);
380 *op1 = TREE_OPERAND (cond, 1);
381 return TREE_CODE (cond);
382 }
383
384 return ERROR_MARK;
385 }
386
387 /* Returns the fold of predicate C1 OR C2 at location LOC. */
388
389 static tree
390 fold_or_predicates (location_t loc, tree c1, tree c2)
391 {
392 tree op1a, op1b, op2a, op2b;
393 enum tree_code code1 = parse_predicate (c1, &op1a, &op1b);
394 enum tree_code code2 = parse_predicate (c2, &op2a, &op2b);
395
396 if (code1 != ERROR_MARK && code2 != ERROR_MARK)
397 {
398 tree t = maybe_fold_or_comparisons (code1, op1a, op1b,
399 code2, op2a, op2b);
400 if (t)
401 return t;
402 }
403
404 return fold_build2_loc (loc, TRUTH_OR_EXPR, boolean_type_node, c1, c2);
405 }
406
407 /* Returns true if N is either a constant or a SSA_NAME. */
408
409 static bool
410 constant_or_ssa_name (tree n)
411 {
412 switch (TREE_CODE (n))
413 {
414 case SSA_NAME:
415 case INTEGER_CST:
416 case REAL_CST:
417 case COMPLEX_CST:
418 case VECTOR_CST:
419 return true;
420 default:
421 return false;
422 }
423 }
424
425 /* Returns either a COND_EXPR or the folded expression if the folded
426 expression is a MIN_EXPR, a MAX_EXPR, an ABS_EXPR,
427 a constant or a SSA_NAME. */
428
429 static tree
430 fold_build_cond_expr (tree type, tree cond, tree rhs, tree lhs)
431 {
432 tree rhs1, lhs1, cond_expr;
433
434 /* If COND is comparison r != 0 and r has boolean type, convert COND
435 to SSA_NAME to accept by vect bool pattern. */
436 if (TREE_CODE (cond) == NE_EXPR)
437 {
438 tree op0 = TREE_OPERAND (cond, 0);
439 tree op1 = TREE_OPERAND (cond, 1);
440 if (TREE_CODE (op0) == SSA_NAME
441 && TREE_CODE (TREE_TYPE (op0)) == BOOLEAN_TYPE
442 && (integer_zerop (op1)))
443 cond = op0;
444 }
445 cond_expr = fold_ternary (COND_EXPR, type, cond,
446 rhs, lhs);
447
448 if (cond_expr == NULL_TREE)
449 return build3 (COND_EXPR, type, cond, rhs, lhs);
450
451 STRIP_USELESS_TYPE_CONVERSION (cond_expr);
452
453 if (constant_or_ssa_name (cond_expr))
454 return cond_expr;
455
456 if (TREE_CODE (cond_expr) == ABS_EXPR)
457 {
458 rhs1 = TREE_OPERAND (cond_expr, 1);
459 STRIP_USELESS_TYPE_CONVERSION (rhs1);
460 if (constant_or_ssa_name (rhs1))
461 return build1 (ABS_EXPR, type, rhs1);
462 }
463
464 if (TREE_CODE (cond_expr) == MIN_EXPR
465 || TREE_CODE (cond_expr) == MAX_EXPR)
466 {
467 lhs1 = TREE_OPERAND (cond_expr, 0);
468 STRIP_USELESS_TYPE_CONVERSION (lhs1);
469 rhs1 = TREE_OPERAND (cond_expr, 1);
470 STRIP_USELESS_TYPE_CONVERSION (rhs1);
471 if (constant_or_ssa_name (rhs1)
472 && constant_or_ssa_name (lhs1))
473 return build2 (TREE_CODE (cond_expr), type, lhs1, rhs1);
474 }
475 return build3 (COND_EXPR, type, cond, rhs, lhs);
476 }
477
478 /* Add condition NC to the predicate list of basic block BB. LOOP is
479 the loop to be if-converted. Use predicate of cd-equivalent block
480 for join bb if it exists: we call basic blocks bb1 and bb2
481 cd-equivalent if they are executed under the same condition. */
482
483 static inline void
484 add_to_predicate_list (struct loop *loop, basic_block bb, tree nc)
485 {
486 tree bc, *tp;
487 basic_block dom_bb;
488
489 if (is_true_predicate (nc))
490 return;
491
492 /* If dominance tells us this basic block is always executed,
493 don't record any predicates for it. */
494 if (dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
495 return;
496
497 dom_bb = get_immediate_dominator (CDI_DOMINATORS, bb);
498 /* We use notion of cd equivalence to get simpler predicate for
499 join block, e.g. if join block has 2 predecessors with predicates
500 p1 & p2 and p1 & !p2, we'd like to get p1 for it instead of
501 p1 & p2 | p1 & !p2. */
502 if (dom_bb != loop->header
503 && get_immediate_dominator (CDI_POST_DOMINATORS, dom_bb) == bb)
504 {
505 gcc_assert (flow_bb_inside_loop_p (loop, dom_bb));
506 bc = bb_predicate (dom_bb);
507 if (!is_true_predicate (bc))
508 set_bb_predicate (bb, bc);
509 else
510 gcc_assert (is_true_predicate (bb_predicate (bb)));
511 if (dump_file && (dump_flags & TDF_DETAILS))
512 fprintf (dump_file, "Use predicate of bb#%d for bb#%d\n",
513 dom_bb->index, bb->index);
514 return;
515 }
516
517 if (!is_predicated (bb))
518 bc = nc;
519 else
520 {
521 bc = bb_predicate (bb);
522 bc = fold_or_predicates (EXPR_LOCATION (bc), nc, bc);
523 if (is_true_predicate (bc))
524 {
525 reset_bb_predicate (bb);
526 return;
527 }
528 }
529
530 /* Allow a TRUTH_NOT_EXPR around the main predicate. */
531 if (TREE_CODE (bc) == TRUTH_NOT_EXPR)
532 tp = &TREE_OPERAND (bc, 0);
533 else
534 tp = &bc;
535 if (!is_gimple_condexpr (*tp))
536 {
537 gimple_seq stmts;
538 *tp = force_gimple_operand_1 (*tp, &stmts, is_gimple_condexpr, NULL_TREE);
539 add_bb_predicate_gimplified_stmts (bb, stmts);
540 }
541 set_bb_predicate (bb, bc);
542 }
543
544 /* Add the condition COND to the previous condition PREV_COND, and add
545 this to the predicate list of the destination of edge E. LOOP is
546 the loop to be if-converted. */
547
548 static void
549 add_to_dst_predicate_list (struct loop *loop, edge e,
550 tree prev_cond, tree cond)
551 {
552 if (!flow_bb_inside_loop_p (loop, e->dest))
553 return;
554
555 if (!is_true_predicate (prev_cond))
556 cond = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
557 prev_cond, cond);
558
559 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, e->dest))
560 add_to_predicate_list (loop, e->dest, cond);
561 }
562
563 /* Return true if one of the successor edges of BB exits LOOP. */
564
565 static bool
566 bb_with_exit_edge_p (struct loop *loop, basic_block bb)
567 {
568 edge e;
569 edge_iterator ei;
570
571 FOR_EACH_EDGE (e, ei, bb->succs)
572 if (loop_exit_edge_p (loop, e))
573 return true;
574
575 return false;
576 }
577
578 /* Given PHI which has more than two arguments, this function checks if
579 it's if-convertible by degenerating its arguments. Specifically, if
580 below two conditions are satisfied:
581
582 1) Number of PHI arguments with different values equals to 2 and one
583 argument has the only occurrence.
584 2) The edge corresponding to the unique argument isn't critical edge.
585
586 Such PHI can be handled as PHIs have only two arguments. For example,
587 below PHI:
588
589 res = PHI <A_1(e1), A_1(e2), A_2(e3)>;
590
591 can be transformed into:
592
593 res = (predicate of e3) ? A_2 : A_1;
594
595 Return TRUE if it is the case, FALSE otherwise. */
596
597 static bool
598 phi_convertible_by_degenerating_args (gphi *phi)
599 {
600 edge e;
601 tree arg, t1 = NULL, t2 = NULL;
602 unsigned int i, i1 = 0, i2 = 0, n1 = 0, n2 = 0;
603 unsigned int num_args = gimple_phi_num_args (phi);
604
605 gcc_assert (num_args > 2);
606
607 for (i = 0; i < num_args; i++)
608 {
609 arg = gimple_phi_arg_def (phi, i);
610 if (t1 == NULL || operand_equal_p (t1, arg, 0))
611 {
612 n1++;
613 i1 = i;
614 t1 = arg;
615 }
616 else if (t2 == NULL || operand_equal_p (t2, arg, 0))
617 {
618 n2++;
619 i2 = i;
620 t2 = arg;
621 }
622 else
623 return false;
624 }
625
626 if (n1 != 1 && n2 != 1)
627 return false;
628
629 /* Check if the edge corresponding to the unique arg is critical. */
630 e = gimple_phi_arg_edge (phi, (n1 == 1) ? i1 : i2);
631 if (EDGE_COUNT (e->src->succs) > 1)
632 return false;
633
634 return true;
635 }
636
637 /* Return true when PHI is if-convertible. PHI is part of loop LOOP
638 and it belongs to basic block BB.
639
640 PHI is not if-convertible if:
641 - it has more than 2 arguments.
642
643 When the aggressive_if_conv is set, PHI can have more than
644 two arguments. */
645
646 static bool
647 if_convertible_phi_p (struct loop *loop, basic_block bb, gphi *phi)
648 {
649 if (dump_file && (dump_flags & TDF_DETAILS))
650 {
651 fprintf (dump_file, "-------------------------\n");
652 print_gimple_stmt (dump_file, phi, 0, TDF_SLIM);
653 }
654
655 if (bb != loop->header)
656 {
657 if (gimple_phi_num_args (phi) > 2
658 && !aggressive_if_conv
659 && !phi_convertible_by_degenerating_args (phi))
660 {
661 if (dump_file && (dump_flags & TDF_DETAILS))
662 fprintf (dump_file, "Phi can't be predicated by single cond.\n");
663 return false;
664 }
665 }
666
667 return true;
668 }
669
670 /* Records the status of a data reference. This struct is attached to
671 each DR->aux field. */
672
673 struct ifc_dr {
674 bool rw_unconditionally;
675 bool w_unconditionally;
676 bool written_at_least_once;
677
678 tree rw_predicate;
679 tree w_predicate;
680 tree base_w_predicate;
681 };
682
683 #define IFC_DR(DR) ((struct ifc_dr *) (DR)->aux)
684 #define DR_BASE_W_UNCONDITIONALLY(DR) (IFC_DR (DR)->written_at_least_once)
685 #define DR_RW_UNCONDITIONALLY(DR) (IFC_DR (DR)->rw_unconditionally)
686 #define DR_W_UNCONDITIONALLY(DR) (IFC_DR (DR)->w_unconditionally)
687
688 /* Iterates over DR's and stores refs, DR and base refs, DR pairs in
689 HASH tables. While storing them in HASH table, it checks if the
690 reference is unconditionally read or written and stores that as a flag
691 information. For base reference it checks if it is written atlest once
692 unconditionally and stores it as flag information along with DR.
693 In other words for every data reference A in STMT there exist other
694 accesses to a data reference with the same base with predicates that
695 add up (OR-up) to the true predicate: this ensures that the data
696 reference A is touched (read or written) on every iteration of the
697 if-converted loop. */
698 static void
699 hash_memrefs_baserefs_and_store_DRs_read_written_info (data_reference_p a)
700 {
701
702 data_reference_p *master_dr, *base_master_dr;
703 tree base_ref = DR_BASE_OBJECT (a);
704 innermost_loop_behavior *innermost = &DR_INNERMOST (a);
705 tree ca = bb_predicate (gimple_bb (DR_STMT (a)));
706 bool exist1, exist2;
707
708 master_dr = &innermost_DR_map->get_or_insert (innermost, &exist1);
709 if (!exist1)
710 *master_dr = a;
711
712 if (DR_IS_WRITE (a))
713 {
714 IFC_DR (*master_dr)->w_predicate
715 = fold_or_predicates (UNKNOWN_LOCATION, ca,
716 IFC_DR (*master_dr)->w_predicate);
717 if (is_true_predicate (IFC_DR (*master_dr)->w_predicate))
718 DR_W_UNCONDITIONALLY (*master_dr) = true;
719 }
720 IFC_DR (*master_dr)->rw_predicate
721 = fold_or_predicates (UNKNOWN_LOCATION, ca,
722 IFC_DR (*master_dr)->rw_predicate);
723 if (is_true_predicate (IFC_DR (*master_dr)->rw_predicate))
724 DR_RW_UNCONDITIONALLY (*master_dr) = true;
725
726 if (DR_IS_WRITE (a))
727 {
728 base_master_dr = &baseref_DR_map->get_or_insert (base_ref, &exist2);
729 if (!exist2)
730 *base_master_dr = a;
731 IFC_DR (*base_master_dr)->base_w_predicate
732 = fold_or_predicates (UNKNOWN_LOCATION, ca,
733 IFC_DR (*base_master_dr)->base_w_predicate);
734 if (is_true_predicate (IFC_DR (*base_master_dr)->base_w_predicate))
735 DR_BASE_W_UNCONDITIONALLY (*base_master_dr) = true;
736 }
737 }
738
739 /* Return true when the memory references of STMT won't trap in the
740 if-converted code. There are two things that we have to check for:
741
742 - writes to memory occur to writable memory: if-conversion of
743 memory writes transforms the conditional memory writes into
744 unconditional writes, i.e. "if (cond) A[i] = foo" is transformed
745 into "A[i] = cond ? foo : A[i]", and as the write to memory may not
746 be executed at all in the original code, it may be a readonly
747 memory. To check that A is not const-qualified, we check that
748 there exists at least an unconditional write to A in the current
749 function.
750
751 - reads or writes to memory are valid memory accesses for every
752 iteration. To check that the memory accesses are correctly formed
753 and that we are allowed to read and write in these locations, we
754 check that the memory accesses to be if-converted occur at every
755 iteration unconditionally.
756
757 Returns true for the memory reference in STMT, same memory reference
758 is read or written unconditionally atleast once and the base memory
759 reference is written unconditionally once. This is to check reference
760 will not write fault. Also retuns true if the memory reference is
761 unconditionally read once then we are conditionally writing to memory
762 which is defined as read and write and is bound to the definition
763 we are seeing. */
764 static bool
765 ifcvt_memrefs_wont_trap (gimple *stmt, vec<data_reference_p> drs)
766 {
767 data_reference_p *master_dr, *base_master_dr;
768 data_reference_p a = drs[gimple_uid (stmt) - 1];
769
770 tree base = DR_BASE_OBJECT (a);
771 innermost_loop_behavior *innermost = &DR_INNERMOST (a);
772
773 gcc_assert (DR_STMT (a) == stmt);
774 gcc_assert (DR_BASE_ADDRESS (a) || DR_OFFSET (a)
775 || DR_INIT (a) || DR_STEP (a));
776
777 master_dr = innermost_DR_map->get (innermost);
778 gcc_assert (master_dr != NULL);
779
780 base_master_dr = baseref_DR_map->get (base);
781
782 /* If a is unconditionally written to it doesn't trap. */
783 if (DR_W_UNCONDITIONALLY (*master_dr))
784 return true;
785
786 /* If a is unconditionally accessed then ... */
787 if (DR_RW_UNCONDITIONALLY (*master_dr))
788 {
789 /* an unconditional read won't trap. */
790 if (DR_IS_READ (a))
791 return true;
792
793 /* an unconditionaly write won't trap if the base is written
794 to unconditionally. */
795 if (base_master_dr
796 && DR_BASE_W_UNCONDITIONALLY (*base_master_dr))
797 return PARAM_VALUE (PARAM_ALLOW_STORE_DATA_RACES);
798 else
799 {
800 /* or the base is know to be not readonly. */
801 tree base_tree = get_base_address (DR_REF (a));
802 if (DECL_P (base_tree)
803 && decl_binds_to_current_def_p (base_tree)
804 && ! TREE_READONLY (base_tree))
805 return PARAM_VALUE (PARAM_ALLOW_STORE_DATA_RACES);
806 }
807 }
808 return false;
809 }
810
811 /* Return true if STMT could be converted into a masked load or store
812 (conditional load or store based on a mask computed from bb predicate). */
813
814 static bool
815 ifcvt_can_use_mask_load_store (gimple *stmt)
816 {
817 tree lhs, ref;
818 machine_mode mode;
819 basic_block bb = gimple_bb (stmt);
820 bool is_load;
821
822 if (!(flag_tree_loop_vectorize || bb->loop_father->force_vectorize)
823 || bb->loop_father->dont_vectorize
824 || !gimple_assign_single_p (stmt)
825 || gimple_has_volatile_ops (stmt))
826 return false;
827
828 /* Check whether this is a load or store. */
829 lhs = gimple_assign_lhs (stmt);
830 if (gimple_store_p (stmt))
831 {
832 if (!is_gimple_val (gimple_assign_rhs1 (stmt)))
833 return false;
834 is_load = false;
835 ref = lhs;
836 }
837 else if (gimple_assign_load_p (stmt))
838 {
839 is_load = true;
840 ref = gimple_assign_rhs1 (stmt);
841 }
842 else
843 return false;
844
845 if (may_be_nonaddressable_p (ref))
846 return false;
847
848 /* Mask should be integer mode of the same size as the load/store
849 mode. */
850 mode = TYPE_MODE (TREE_TYPE (lhs));
851 if (int_mode_for_mode (mode) == BLKmode
852 || VECTOR_MODE_P (mode))
853 return false;
854
855 if (can_vec_mask_load_store_p (mode, VOIDmode, is_load))
856 return true;
857
858 return false;
859 }
860
861 /* Return true when STMT is if-convertible.
862
863 GIMPLE_ASSIGN statement is not if-convertible if,
864 - it is not movable,
865 - it could trap,
866 - LHS is not var decl. */
867
868 static bool
869 if_convertible_gimple_assign_stmt_p (gimple *stmt,
870 vec<data_reference_p> refs,
871 bool *any_mask_load_store)
872 {
873 tree lhs = gimple_assign_lhs (stmt);
874
875 if (dump_file && (dump_flags & TDF_DETAILS))
876 {
877 fprintf (dump_file, "-------------------------\n");
878 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
879 }
880
881 if (!is_gimple_reg_type (TREE_TYPE (lhs)))
882 return false;
883
884 /* Some of these constrains might be too conservative. */
885 if (stmt_ends_bb_p (stmt)
886 || gimple_has_volatile_ops (stmt)
887 || (TREE_CODE (lhs) == SSA_NAME
888 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
889 || gimple_has_side_effects (stmt))
890 {
891 if (dump_file && (dump_flags & TDF_DETAILS))
892 fprintf (dump_file, "stmt not suitable for ifcvt\n");
893 return false;
894 }
895
896 /* tree-into-ssa.c uses GF_PLF_1, so avoid it, because
897 in between if_convertible_loop_p and combine_blocks
898 we can perform loop versioning. */
899 gimple_set_plf (stmt, GF_PLF_2, false);
900
901 if ((! gimple_vuse (stmt)
902 || gimple_could_trap_p_1 (stmt, false, false)
903 || ! ifcvt_memrefs_wont_trap (stmt, refs))
904 && gimple_could_trap_p (stmt))
905 {
906 if (ifcvt_can_use_mask_load_store (stmt))
907 {
908 gimple_set_plf (stmt, GF_PLF_2, true);
909 *any_mask_load_store = true;
910 return true;
911 }
912 if (dump_file && (dump_flags & TDF_DETAILS))
913 fprintf (dump_file, "tree could trap...\n");
914 return false;
915 }
916
917 /* When if-converting stores force versioning, likewise if we
918 ended up generating store data races. */
919 if (gimple_vdef (stmt))
920 *any_mask_load_store = true;
921
922 return true;
923 }
924
925 /* Return true when STMT is if-convertible.
926
927 A statement is if-convertible if:
928 - it is an if-convertible GIMPLE_ASSIGN,
929 - it is a GIMPLE_LABEL or a GIMPLE_COND,
930 - it is builtins call. */
931
932 static bool
933 if_convertible_stmt_p (gimple *stmt, vec<data_reference_p> refs,
934 bool *any_mask_load_store)
935 {
936 switch (gimple_code (stmt))
937 {
938 case GIMPLE_LABEL:
939 case GIMPLE_DEBUG:
940 case GIMPLE_COND:
941 return true;
942
943 case GIMPLE_ASSIGN:
944 return if_convertible_gimple_assign_stmt_p (stmt, refs,
945 any_mask_load_store);
946
947 case GIMPLE_CALL:
948 {
949 tree fndecl = gimple_call_fndecl (stmt);
950 if (fndecl)
951 {
952 int flags = gimple_call_flags (stmt);
953 if ((flags & ECF_CONST)
954 && !(flags & ECF_LOOPING_CONST_OR_PURE)
955 /* We can only vectorize some builtins at the moment,
956 so restrict if-conversion to those. */
957 && DECL_BUILT_IN (fndecl))
958 return true;
959 }
960 return false;
961 }
962
963 default:
964 /* Don't know what to do with 'em so don't do anything. */
965 if (dump_file && (dump_flags & TDF_DETAILS))
966 {
967 fprintf (dump_file, "don't know what to do\n");
968 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
969 }
970 return false;
971 break;
972 }
973
974 return true;
975 }
976
977 /* Assumes that BB has more than 1 predecessors.
978 Returns false if at least one successor is not on critical edge
979 and true otherwise. */
980
981 static inline bool
982 all_preds_critical_p (basic_block bb)
983 {
984 edge e;
985 edge_iterator ei;
986
987 FOR_EACH_EDGE (e, ei, bb->preds)
988 if (EDGE_COUNT (e->src->succs) == 1)
989 return false;
990 return true;
991 }
992
993 /* Returns true if at least one successor in on critical edge. */
994 static inline bool
995 has_pred_critical_p (basic_block bb)
996 {
997 edge e;
998 edge_iterator ei;
999
1000 FOR_EACH_EDGE (e, ei, bb->preds)
1001 if (EDGE_COUNT (e->src->succs) > 1)
1002 return true;
1003 return false;
1004 }
1005
1006 /* Return true when BB is if-convertible. This routine does not check
1007 basic block's statements and phis.
1008
1009 A basic block is not if-convertible if:
1010 - it is non-empty and it is after the exit block (in BFS order),
1011 - it is after the exit block but before the latch,
1012 - its edges are not normal.
1013
1014 Last restriction is valid if aggressive_if_conv is false.
1015
1016 EXIT_BB is the basic block containing the exit of the LOOP. BB is
1017 inside LOOP. */
1018
1019 static bool
1020 if_convertible_bb_p (struct loop *loop, basic_block bb, basic_block exit_bb)
1021 {
1022 edge e;
1023 edge_iterator ei;
1024
1025 if (dump_file && (dump_flags & TDF_DETAILS))
1026 fprintf (dump_file, "----------[%d]-------------\n", bb->index);
1027
1028 if (EDGE_COUNT (bb->succs) > 2)
1029 return false;
1030
1031 if (exit_bb)
1032 {
1033 if (bb != loop->latch)
1034 {
1035 if (dump_file && (dump_flags & TDF_DETAILS))
1036 fprintf (dump_file, "basic block after exit bb but before latch\n");
1037 return false;
1038 }
1039 else if (!empty_block_p (bb))
1040 {
1041 if (dump_file && (dump_flags & TDF_DETAILS))
1042 fprintf (dump_file, "non empty basic block after exit bb\n");
1043 return false;
1044 }
1045 else if (bb == loop->latch
1046 && bb != exit_bb
1047 && !dominated_by_p (CDI_DOMINATORS, bb, exit_bb))
1048 {
1049 if (dump_file && (dump_flags & TDF_DETAILS))
1050 fprintf (dump_file, "latch is not dominated by exit_block\n");
1051 return false;
1052 }
1053 }
1054
1055 /* Be less adventurous and handle only normal edges. */
1056 FOR_EACH_EDGE (e, ei, bb->succs)
1057 if (e->flags & (EDGE_EH | EDGE_ABNORMAL | EDGE_IRREDUCIBLE_LOOP))
1058 {
1059 if (dump_file && (dump_flags & TDF_DETAILS))
1060 fprintf (dump_file, "Difficult to handle edges\n");
1061 return false;
1062 }
1063
1064 /* At least one incoming edge has to be non-critical as otherwise edge
1065 predicates are not equal to basic-block predicates of the edge
1066 source. This check is skipped if aggressive_if_conv is true. */
1067 if (!aggressive_if_conv
1068 && EDGE_COUNT (bb->preds) > 1
1069 && bb != loop->header
1070 && all_preds_critical_p (bb))
1071 {
1072 if (dump_file && (dump_flags & TDF_DETAILS))
1073 fprintf (dump_file, "only critical predecessors\n");
1074 return false;
1075 }
1076
1077 return true;
1078 }
1079
1080 /* Return true when all predecessor blocks of BB are visited. The
1081 VISITED bitmap keeps track of the visited blocks. */
1082
1083 static bool
1084 pred_blocks_visited_p (basic_block bb, bitmap *visited)
1085 {
1086 edge e;
1087 edge_iterator ei;
1088 FOR_EACH_EDGE (e, ei, bb->preds)
1089 if (!bitmap_bit_p (*visited, e->src->index))
1090 return false;
1091
1092 return true;
1093 }
1094
1095 /* Get body of a LOOP in suitable order for if-conversion. It is
1096 caller's responsibility to deallocate basic block list.
1097 If-conversion suitable order is, breadth first sort (BFS) order
1098 with an additional constraint: select a block only if all its
1099 predecessors are already selected. */
1100
1101 static basic_block *
1102 get_loop_body_in_if_conv_order (const struct loop *loop)
1103 {
1104 basic_block *blocks, *blocks_in_bfs_order;
1105 basic_block bb;
1106 bitmap visited;
1107 unsigned int index = 0;
1108 unsigned int visited_count = 0;
1109
1110 gcc_assert (loop->num_nodes);
1111 gcc_assert (loop->latch != EXIT_BLOCK_PTR_FOR_FN (cfun));
1112
1113 blocks = XCNEWVEC (basic_block, loop->num_nodes);
1114 visited = BITMAP_ALLOC (NULL);
1115
1116 blocks_in_bfs_order = get_loop_body_in_bfs_order (loop);
1117
1118 index = 0;
1119 while (index < loop->num_nodes)
1120 {
1121 bb = blocks_in_bfs_order [index];
1122
1123 if (bb->flags & BB_IRREDUCIBLE_LOOP)
1124 {
1125 free (blocks_in_bfs_order);
1126 BITMAP_FREE (visited);
1127 free (blocks);
1128 return NULL;
1129 }
1130
1131 if (!bitmap_bit_p (visited, bb->index))
1132 {
1133 if (pred_blocks_visited_p (bb, &visited)
1134 || bb == loop->header)
1135 {
1136 /* This block is now visited. */
1137 bitmap_set_bit (visited, bb->index);
1138 blocks[visited_count++] = bb;
1139 }
1140 }
1141
1142 index++;
1143
1144 if (index == loop->num_nodes
1145 && visited_count != loop->num_nodes)
1146 /* Not done yet. */
1147 index = 0;
1148 }
1149 free (blocks_in_bfs_order);
1150 BITMAP_FREE (visited);
1151 return blocks;
1152 }
1153
1154 /* Returns true when the analysis of the predicates for all the basic
1155 blocks in LOOP succeeded.
1156
1157 predicate_bbs first allocates the predicates of the basic blocks.
1158 These fields are then initialized with the tree expressions
1159 representing the predicates under which a basic block is executed
1160 in the LOOP. As the loop->header is executed at each iteration, it
1161 has the "true" predicate. Other statements executed under a
1162 condition are predicated with that condition, for example
1163
1164 | if (x)
1165 | S1;
1166 | else
1167 | S2;
1168
1169 S1 will be predicated with "x", and
1170 S2 will be predicated with "!x". */
1171
1172 static void
1173 predicate_bbs (loop_p loop)
1174 {
1175 unsigned int i;
1176
1177 for (i = 0; i < loop->num_nodes; i++)
1178 init_bb_predicate (ifc_bbs[i]);
1179
1180 for (i = 0; i < loop->num_nodes; i++)
1181 {
1182 basic_block bb = ifc_bbs[i];
1183 tree cond;
1184 gimple *stmt;
1185
1186 /* The loop latch and loop exit block are always executed and
1187 have no extra conditions to be processed: skip them. */
1188 if (bb == loop->latch
1189 || bb_with_exit_edge_p (loop, bb))
1190 {
1191 reset_bb_predicate (bb);
1192 continue;
1193 }
1194
1195 cond = bb_predicate (bb);
1196 stmt = last_stmt (bb);
1197 if (stmt && gimple_code (stmt) == GIMPLE_COND)
1198 {
1199 tree c2;
1200 edge true_edge, false_edge;
1201 location_t loc = gimple_location (stmt);
1202 tree c = build2_loc (loc, gimple_cond_code (stmt),
1203 boolean_type_node,
1204 gimple_cond_lhs (stmt),
1205 gimple_cond_rhs (stmt));
1206
1207 /* Add new condition into destination's predicate list. */
1208 extract_true_false_edges_from_block (gimple_bb (stmt),
1209 &true_edge, &false_edge);
1210
1211 /* If C is true, then TRUE_EDGE is taken. */
1212 add_to_dst_predicate_list (loop, true_edge, unshare_expr (cond),
1213 unshare_expr (c));
1214
1215 /* If C is false, then FALSE_EDGE is taken. */
1216 c2 = build1_loc (loc, TRUTH_NOT_EXPR, boolean_type_node,
1217 unshare_expr (c));
1218 add_to_dst_predicate_list (loop, false_edge,
1219 unshare_expr (cond), c2);
1220
1221 cond = NULL_TREE;
1222 }
1223
1224 /* If current bb has only one successor, then consider it as an
1225 unconditional goto. */
1226 if (single_succ_p (bb))
1227 {
1228 basic_block bb_n = single_succ (bb);
1229
1230 /* The successor bb inherits the predicate of its
1231 predecessor. If there is no predicate in the predecessor
1232 bb, then consider the successor bb as always executed. */
1233 if (cond == NULL_TREE)
1234 cond = boolean_true_node;
1235
1236 add_to_predicate_list (loop, bb_n, cond);
1237 }
1238 }
1239
1240 /* The loop header is always executed. */
1241 reset_bb_predicate (loop->header);
1242 gcc_assert (bb_predicate_gimplified_stmts (loop->header) == NULL
1243 && bb_predicate_gimplified_stmts (loop->latch) == NULL);
1244 }
1245
1246 /* Return true when LOOP is if-convertible. This is a helper function
1247 for if_convertible_loop_p. REFS and DDRS are initialized and freed
1248 in if_convertible_loop_p. */
1249
1250 static bool
1251 if_convertible_loop_p_1 (struct loop *loop,
1252 vec<data_reference_p> *refs,
1253 bool *any_mask_load_store)
1254 {
1255 unsigned int i;
1256 basic_block exit_bb = NULL;
1257
1258 if (find_data_references_in_loop (loop, refs) == chrec_dont_know)
1259 return false;
1260
1261 calculate_dominance_info (CDI_DOMINATORS);
1262 calculate_dominance_info (CDI_POST_DOMINATORS);
1263
1264 /* Allow statements that can be handled during if-conversion. */
1265 ifc_bbs = get_loop_body_in_if_conv_order (loop);
1266 if (!ifc_bbs)
1267 {
1268 if (dump_file && (dump_flags & TDF_DETAILS))
1269 fprintf (dump_file, "Irreducible loop\n");
1270 return false;
1271 }
1272
1273 for (i = 0; i < loop->num_nodes; i++)
1274 {
1275 basic_block bb = ifc_bbs[i];
1276
1277 if (!if_convertible_bb_p (loop, bb, exit_bb))
1278 return false;
1279
1280 if (bb_with_exit_edge_p (loop, bb))
1281 exit_bb = bb;
1282 }
1283
1284 for (i = 0; i < loop->num_nodes; i++)
1285 {
1286 basic_block bb = ifc_bbs[i];
1287 gimple_stmt_iterator gsi;
1288
1289 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1290 switch (gimple_code (gsi_stmt (gsi)))
1291 {
1292 case GIMPLE_LABEL:
1293 case GIMPLE_ASSIGN:
1294 case GIMPLE_CALL:
1295 case GIMPLE_DEBUG:
1296 case GIMPLE_COND:
1297 gimple_set_uid (gsi_stmt (gsi), 0);
1298 break;
1299 default:
1300 return false;
1301 }
1302 }
1303
1304 data_reference_p dr;
1305
1306 innermost_DR_map
1307 = new hash_map<innermost_loop_behavior_hash, data_reference_p>;
1308 baseref_DR_map = new hash_map<tree_operand_hash, data_reference_p>;
1309
1310 predicate_bbs (loop);
1311
1312 for (i = 0; refs->iterate (i, &dr); i++)
1313 {
1314 tree ref = DR_REF (dr);
1315
1316 dr->aux = XNEW (struct ifc_dr);
1317 DR_BASE_W_UNCONDITIONALLY (dr) = false;
1318 DR_RW_UNCONDITIONALLY (dr) = false;
1319 DR_W_UNCONDITIONALLY (dr) = false;
1320 IFC_DR (dr)->rw_predicate = boolean_false_node;
1321 IFC_DR (dr)->w_predicate = boolean_false_node;
1322 IFC_DR (dr)->base_w_predicate = boolean_false_node;
1323 if (gimple_uid (DR_STMT (dr)) == 0)
1324 gimple_set_uid (DR_STMT (dr), i + 1);
1325
1326 /* If DR doesn't have innermost loop behavior or it's a compound
1327 memory reference, we synthesize its innermost loop behavior
1328 for hashing. */
1329 if (TREE_CODE (ref) == COMPONENT_REF
1330 || TREE_CODE (ref) == IMAGPART_EXPR
1331 || TREE_CODE (ref) == REALPART_EXPR
1332 || !(DR_BASE_ADDRESS (dr) || DR_OFFSET (dr)
1333 || DR_INIT (dr) || DR_STEP (dr)))
1334 {
1335 while (TREE_CODE (ref) == COMPONENT_REF
1336 || TREE_CODE (ref) == IMAGPART_EXPR
1337 || TREE_CODE (ref) == REALPART_EXPR)
1338 ref = TREE_OPERAND (ref, 0);
1339
1340 DR_BASE_ADDRESS (dr) = ref;
1341 DR_OFFSET (dr) = NULL;
1342 DR_INIT (dr) = NULL;
1343 DR_STEP (dr) = NULL;
1344 DR_ALIGNED_TO (dr) = NULL;
1345 }
1346 hash_memrefs_baserefs_and_store_DRs_read_written_info (dr);
1347 }
1348
1349 for (i = 0; i < loop->num_nodes; i++)
1350 {
1351 basic_block bb = ifc_bbs[i];
1352 gimple_stmt_iterator itr;
1353
1354 /* Check the if-convertibility of statements in predicated BBs. */
1355 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
1356 for (itr = gsi_start_bb (bb); !gsi_end_p (itr); gsi_next (&itr))
1357 if (!if_convertible_stmt_p (gsi_stmt (itr), *refs,
1358 any_mask_load_store))
1359 return false;
1360 }
1361
1362 for (i = 0; i < loop->num_nodes; i++)
1363 free_bb_predicate (ifc_bbs[i]);
1364
1365 /* Checking PHIs needs to be done after stmts, as the fact whether there
1366 are any masked loads or stores affects the tests. */
1367 for (i = 0; i < loop->num_nodes; i++)
1368 {
1369 basic_block bb = ifc_bbs[i];
1370 gphi_iterator itr;
1371
1372 for (itr = gsi_start_phis (bb); !gsi_end_p (itr); gsi_next (&itr))
1373 if (!if_convertible_phi_p (loop, bb, itr.phi ()))
1374 return false;
1375 }
1376
1377 if (dump_file)
1378 fprintf (dump_file, "Applying if-conversion\n");
1379
1380 return true;
1381 }
1382
1383 /* Return true when LOOP is if-convertible.
1384 LOOP is if-convertible if:
1385 - it is innermost,
1386 - it has two or more basic blocks,
1387 - it has only one exit,
1388 - loop header is not the exit edge,
1389 - if its basic blocks and phi nodes are if convertible. */
1390
1391 static bool
1392 if_convertible_loop_p (struct loop *loop, bool *any_mask_load_store)
1393 {
1394 edge e;
1395 edge_iterator ei;
1396 bool res = false;
1397 vec<data_reference_p> refs;
1398
1399 /* Handle only innermost loop. */
1400 if (!loop || loop->inner)
1401 {
1402 if (dump_file && (dump_flags & TDF_DETAILS))
1403 fprintf (dump_file, "not innermost loop\n");
1404 return false;
1405 }
1406
1407 /* If only one block, no need for if-conversion. */
1408 if (loop->num_nodes <= 2)
1409 {
1410 if (dump_file && (dump_flags & TDF_DETAILS))
1411 fprintf (dump_file, "less than 2 basic blocks\n");
1412 return false;
1413 }
1414
1415 /* More than one loop exit is too much to handle. */
1416 if (!single_exit (loop))
1417 {
1418 if (dump_file && (dump_flags & TDF_DETAILS))
1419 fprintf (dump_file, "multiple exits\n");
1420 return false;
1421 }
1422
1423 /* If one of the loop header's edge is an exit edge then do not
1424 apply if-conversion. */
1425 FOR_EACH_EDGE (e, ei, loop->header->succs)
1426 if (loop_exit_edge_p (loop, e))
1427 return false;
1428
1429 refs.create (5);
1430 res = if_convertible_loop_p_1 (loop, &refs, any_mask_load_store);
1431
1432 data_reference_p dr;
1433 unsigned int i;
1434 for (i = 0; refs.iterate (i, &dr); i++)
1435 free (dr->aux);
1436
1437 free_data_refs (refs);
1438
1439 delete innermost_DR_map;
1440 innermost_DR_map = NULL;
1441
1442 delete baseref_DR_map;
1443 baseref_DR_map = NULL;
1444
1445 return res;
1446 }
1447
1448 /* Returns true if def-stmt for phi argument ARG is simple increment/decrement
1449 which is in predicated basic block.
1450 In fact, the following PHI pattern is searching:
1451 loop-header:
1452 reduc_1 = PHI <..., reduc_2>
1453 ...
1454 if (...)
1455 reduc_3 = ...
1456 reduc_2 = PHI <reduc_1, reduc_3>
1457
1458 ARG_0 and ARG_1 are correspondent PHI arguments.
1459 REDUC, OP0 and OP1 contain reduction stmt and its operands.
1460 EXTENDED is true if PHI has > 2 arguments. */
1461
1462 static bool
1463 is_cond_scalar_reduction (gimple *phi, gimple **reduc, tree arg_0, tree arg_1,
1464 tree *op0, tree *op1, bool extended)
1465 {
1466 tree lhs, r_op1, r_op2;
1467 gimple *stmt;
1468 gimple *header_phi = NULL;
1469 enum tree_code reduction_op;
1470 basic_block bb = gimple_bb (phi);
1471 struct loop *loop = bb->loop_father;
1472 edge latch_e = loop_latch_edge (loop);
1473 imm_use_iterator imm_iter;
1474 use_operand_p use_p;
1475 edge e;
1476 edge_iterator ei;
1477 bool result = false;
1478 if (TREE_CODE (arg_0) != SSA_NAME || TREE_CODE (arg_1) != SSA_NAME)
1479 return false;
1480
1481 if (!extended && gimple_code (SSA_NAME_DEF_STMT (arg_0)) == GIMPLE_PHI)
1482 {
1483 lhs = arg_1;
1484 header_phi = SSA_NAME_DEF_STMT (arg_0);
1485 stmt = SSA_NAME_DEF_STMT (arg_1);
1486 }
1487 else if (gimple_code (SSA_NAME_DEF_STMT (arg_1)) == GIMPLE_PHI)
1488 {
1489 lhs = arg_0;
1490 header_phi = SSA_NAME_DEF_STMT (arg_1);
1491 stmt = SSA_NAME_DEF_STMT (arg_0);
1492 }
1493 else
1494 return false;
1495 if (gimple_bb (header_phi) != loop->header)
1496 return false;
1497
1498 if (PHI_ARG_DEF_FROM_EDGE (header_phi, latch_e) != PHI_RESULT (phi))
1499 return false;
1500
1501 if (gimple_code (stmt) != GIMPLE_ASSIGN
1502 || gimple_has_volatile_ops (stmt))
1503 return false;
1504
1505 if (!flow_bb_inside_loop_p (loop, gimple_bb (stmt)))
1506 return false;
1507
1508 if (!is_predicated (gimple_bb (stmt)))
1509 return false;
1510
1511 /* Check that stmt-block is predecessor of phi-block. */
1512 FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs)
1513 if (e->dest == bb)
1514 {
1515 result = true;
1516 break;
1517 }
1518 if (!result)
1519 return false;
1520
1521 if (!has_single_use (lhs))
1522 return false;
1523
1524 reduction_op = gimple_assign_rhs_code (stmt);
1525 if (reduction_op != PLUS_EXPR && reduction_op != MINUS_EXPR)
1526 return false;
1527 r_op1 = gimple_assign_rhs1 (stmt);
1528 r_op2 = gimple_assign_rhs2 (stmt);
1529
1530 /* Make R_OP1 to hold reduction variable. */
1531 if (r_op2 == PHI_RESULT (header_phi)
1532 && reduction_op == PLUS_EXPR)
1533 std::swap (r_op1, r_op2);
1534 else if (r_op1 != PHI_RESULT (header_phi))
1535 return false;
1536
1537 /* Check that R_OP1 is used in reduction stmt or in PHI only. */
1538 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, r_op1)
1539 {
1540 gimple *use_stmt = USE_STMT (use_p);
1541 if (is_gimple_debug (use_stmt))
1542 continue;
1543 if (use_stmt == stmt)
1544 continue;
1545 if (gimple_code (use_stmt) != GIMPLE_PHI)
1546 return false;
1547 }
1548
1549 *op0 = r_op1; *op1 = r_op2;
1550 *reduc = stmt;
1551 return true;
1552 }
1553
1554 /* Converts conditional scalar reduction into unconditional form, e.g.
1555 bb_4
1556 if (_5 != 0) goto bb_5 else goto bb_6
1557 end_bb_4
1558 bb_5
1559 res_6 = res_13 + 1;
1560 end_bb_5
1561 bb_6
1562 # res_2 = PHI <res_13(4), res_6(5)>
1563 end_bb_6
1564
1565 will be converted into sequence
1566 _ifc__1 = _5 != 0 ? 1 : 0;
1567 res_2 = res_13 + _ifc__1;
1568 Argument SWAP tells that arguments of conditional expression should be
1569 swapped.
1570 Returns rhs of resulting PHI assignment. */
1571
1572 static tree
1573 convert_scalar_cond_reduction (gimple *reduc, gimple_stmt_iterator *gsi,
1574 tree cond, tree op0, tree op1, bool swap)
1575 {
1576 gimple_stmt_iterator stmt_it;
1577 gimple *new_assign;
1578 tree rhs;
1579 tree rhs1 = gimple_assign_rhs1 (reduc);
1580 tree tmp = make_temp_ssa_name (TREE_TYPE (rhs1), NULL, "_ifc_");
1581 tree c;
1582 tree zero = build_zero_cst (TREE_TYPE (rhs1));
1583
1584 if (dump_file && (dump_flags & TDF_DETAILS))
1585 {
1586 fprintf (dump_file, "Found cond scalar reduction.\n");
1587 print_gimple_stmt (dump_file, reduc, 0, TDF_SLIM);
1588 }
1589
1590 /* Build cond expression using COND and constant operand
1591 of reduction rhs. */
1592 c = fold_build_cond_expr (TREE_TYPE (rhs1),
1593 unshare_expr (cond),
1594 swap ? zero : op1,
1595 swap ? op1 : zero);
1596
1597 /* Create assignment stmt and insert it at GSI. */
1598 new_assign = gimple_build_assign (tmp, c);
1599 gsi_insert_before (gsi, new_assign, GSI_SAME_STMT);
1600 /* Build rhs for unconditional increment/decrement. */
1601 rhs = fold_build2 (gimple_assign_rhs_code (reduc),
1602 TREE_TYPE (rhs1), op0, tmp);
1603
1604 /* Delete original reduction stmt. */
1605 stmt_it = gsi_for_stmt (reduc);
1606 gsi_remove (&stmt_it, true);
1607 release_defs (reduc);
1608 return rhs;
1609 }
1610
1611 /* Produce condition for all occurrences of ARG in PHI node. */
1612
1613 static tree
1614 gen_phi_arg_condition (gphi *phi, vec<int> *occur,
1615 gimple_stmt_iterator *gsi)
1616 {
1617 int len;
1618 int i;
1619 tree cond = NULL_TREE;
1620 tree c;
1621 edge e;
1622
1623 len = occur->length ();
1624 gcc_assert (len > 0);
1625 for (i = 0; i < len; i++)
1626 {
1627 e = gimple_phi_arg_edge (phi, (*occur)[i]);
1628 c = bb_predicate (e->src);
1629 if (is_true_predicate (c))
1630 continue;
1631 c = force_gimple_operand_gsi_1 (gsi, unshare_expr (c),
1632 is_gimple_condexpr, NULL_TREE,
1633 true, GSI_SAME_STMT);
1634 if (cond != NULL_TREE)
1635 {
1636 /* Must build OR expression. */
1637 cond = fold_or_predicates (EXPR_LOCATION (c), c, cond);
1638 cond = force_gimple_operand_gsi_1 (gsi, unshare_expr (cond),
1639 is_gimple_condexpr, NULL_TREE,
1640 true, GSI_SAME_STMT);
1641 }
1642 else
1643 cond = c;
1644 }
1645 gcc_assert (cond != NULL_TREE);
1646 return cond;
1647 }
1648
1649 /* Replace a scalar PHI node with a COND_EXPR using COND as condition.
1650 This routine can handle PHI nodes with more than two arguments.
1651
1652 For example,
1653 S1: A = PHI <x1(1), x2(5)>
1654 is converted into,
1655 S2: A = cond ? x1 : x2;
1656
1657 The generated code is inserted at GSI that points to the top of
1658 basic block's statement list.
1659 If PHI node has more than two arguments a chain of conditional
1660 expression is produced. */
1661
1662
1663 static void
1664 predicate_scalar_phi (gphi *phi, gimple_stmt_iterator *gsi)
1665 {
1666 gimple *new_stmt = NULL, *reduc;
1667 tree rhs, res, arg0, arg1, op0, op1, scev;
1668 tree cond;
1669 unsigned int index0;
1670 unsigned int max, args_len;
1671 edge e;
1672 basic_block bb;
1673 unsigned int i;
1674
1675 res = gimple_phi_result (phi);
1676 if (virtual_operand_p (res))
1677 return;
1678
1679 if ((rhs = degenerate_phi_result (phi))
1680 || ((scev = analyze_scalar_evolution (gimple_bb (phi)->loop_father,
1681 res))
1682 && !chrec_contains_undetermined (scev)
1683 && scev != res
1684 && (rhs = gimple_phi_arg_def (phi, 0))))
1685 {
1686 if (dump_file && (dump_flags & TDF_DETAILS))
1687 {
1688 fprintf (dump_file, "Degenerate phi!\n");
1689 print_gimple_stmt (dump_file, phi, 0, TDF_SLIM);
1690 }
1691 new_stmt = gimple_build_assign (res, rhs);
1692 gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
1693 update_stmt (new_stmt);
1694 return;
1695 }
1696
1697 bb = gimple_bb (phi);
1698 if (EDGE_COUNT (bb->preds) == 2)
1699 {
1700 /* Predicate ordinary PHI node with 2 arguments. */
1701 edge first_edge, second_edge;
1702 basic_block true_bb;
1703 first_edge = EDGE_PRED (bb, 0);
1704 second_edge = EDGE_PRED (bb, 1);
1705 cond = bb_predicate (first_edge->src);
1706 if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
1707 std::swap (first_edge, second_edge);
1708 if (EDGE_COUNT (first_edge->src->succs) > 1)
1709 {
1710 cond = bb_predicate (second_edge->src);
1711 if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
1712 cond = TREE_OPERAND (cond, 0);
1713 else
1714 first_edge = second_edge;
1715 }
1716 else
1717 cond = bb_predicate (first_edge->src);
1718 /* Gimplify the condition to a valid cond-expr conditonal operand. */
1719 cond = force_gimple_operand_gsi_1 (gsi, unshare_expr (cond),
1720 is_gimple_condexpr, NULL_TREE,
1721 true, GSI_SAME_STMT);
1722 true_bb = first_edge->src;
1723 if (EDGE_PRED (bb, 1)->src == true_bb)
1724 {
1725 arg0 = gimple_phi_arg_def (phi, 1);
1726 arg1 = gimple_phi_arg_def (phi, 0);
1727 }
1728 else
1729 {
1730 arg0 = gimple_phi_arg_def (phi, 0);
1731 arg1 = gimple_phi_arg_def (phi, 1);
1732 }
1733 if (is_cond_scalar_reduction (phi, &reduc, arg0, arg1,
1734 &op0, &op1, false))
1735 /* Convert reduction stmt into vectorizable form. */
1736 rhs = convert_scalar_cond_reduction (reduc, gsi, cond, op0, op1,
1737 true_bb != gimple_bb (reduc));
1738 else
1739 /* Build new RHS using selected condition and arguments. */
1740 rhs = fold_build_cond_expr (TREE_TYPE (res), unshare_expr (cond),
1741 arg0, arg1);
1742 new_stmt = gimple_build_assign (res, rhs);
1743 gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
1744 update_stmt (new_stmt);
1745
1746 if (dump_file && (dump_flags & TDF_DETAILS))
1747 {
1748 fprintf (dump_file, "new phi replacement stmt\n");
1749 print_gimple_stmt (dump_file, new_stmt, 0, TDF_SLIM);
1750 }
1751 return;
1752 }
1753
1754 /* Create hashmap for PHI node which contain vector of argument indexes
1755 having the same value. */
1756 bool swap = false;
1757 hash_map<tree_operand_hash, auto_vec<int> > phi_arg_map;
1758 unsigned int num_args = gimple_phi_num_args (phi);
1759 int max_ind = -1;
1760 /* Vector of different PHI argument values. */
1761 auto_vec<tree> args (num_args);
1762
1763 /* Compute phi_arg_map. */
1764 for (i = 0; i < num_args; i++)
1765 {
1766 tree arg;
1767
1768 arg = gimple_phi_arg_def (phi, i);
1769 if (!phi_arg_map.get (arg))
1770 args.quick_push (arg);
1771 phi_arg_map.get_or_insert (arg).safe_push (i);
1772 }
1773
1774 /* Determine element with max number of occurrences. */
1775 max_ind = -1;
1776 max = 1;
1777 args_len = args.length ();
1778 for (i = 0; i < args_len; i++)
1779 {
1780 unsigned int len;
1781 if ((len = phi_arg_map.get (args[i])->length ()) > max)
1782 {
1783 max_ind = (int) i;
1784 max = len;
1785 }
1786 }
1787
1788 /* Put element with max number of occurences to the end of ARGS. */
1789 if (max_ind != -1 && max_ind +1 != (int) args_len)
1790 std::swap (args[args_len - 1], args[max_ind]);
1791
1792 /* Handle one special case when number of arguments with different values
1793 is equal 2 and one argument has the only occurrence. Such PHI can be
1794 handled as if would have only 2 arguments. */
1795 if (args_len == 2 && phi_arg_map.get (args[0])->length () == 1)
1796 {
1797 vec<int> *indexes;
1798 indexes = phi_arg_map.get (args[0]);
1799 index0 = (*indexes)[0];
1800 arg0 = args[0];
1801 arg1 = args[1];
1802 e = gimple_phi_arg_edge (phi, index0);
1803 cond = bb_predicate (e->src);
1804 if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
1805 {
1806 swap = true;
1807 cond = TREE_OPERAND (cond, 0);
1808 }
1809 /* Gimplify the condition to a valid cond-expr conditonal operand. */
1810 cond = force_gimple_operand_gsi_1 (gsi, unshare_expr (cond),
1811 is_gimple_condexpr, NULL_TREE,
1812 true, GSI_SAME_STMT);
1813 if (!(is_cond_scalar_reduction (phi, &reduc, arg0 , arg1,
1814 &op0, &op1, true)))
1815 rhs = fold_build_cond_expr (TREE_TYPE (res), unshare_expr (cond),
1816 swap? arg1 : arg0,
1817 swap? arg0 : arg1);
1818 else
1819 /* Convert reduction stmt into vectorizable form. */
1820 rhs = convert_scalar_cond_reduction (reduc, gsi, cond, op0, op1,
1821 swap);
1822 new_stmt = gimple_build_assign (res, rhs);
1823 gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
1824 update_stmt (new_stmt);
1825 }
1826 else
1827 {
1828 /* Common case. */
1829 vec<int> *indexes;
1830 tree type = TREE_TYPE (gimple_phi_result (phi));
1831 tree lhs;
1832 arg1 = args[1];
1833 for (i = 0; i < args_len; i++)
1834 {
1835 arg0 = args[i];
1836 indexes = phi_arg_map.get (args[i]);
1837 if (i != args_len - 1)
1838 lhs = make_temp_ssa_name (type, NULL, "_ifc_");
1839 else
1840 lhs = res;
1841 cond = gen_phi_arg_condition (phi, indexes, gsi);
1842 rhs = fold_build_cond_expr (type, unshare_expr (cond),
1843 arg0, arg1);
1844 new_stmt = gimple_build_assign (lhs, rhs);
1845 gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
1846 update_stmt (new_stmt);
1847 arg1 = lhs;
1848 }
1849 }
1850
1851 if (dump_file && (dump_flags & TDF_DETAILS))
1852 {
1853 fprintf (dump_file, "new extended phi replacement stmt\n");
1854 print_gimple_stmt (dump_file, new_stmt, 0, TDF_SLIM);
1855 }
1856 }
1857
1858 /* Replaces in LOOP all the scalar phi nodes other than those in the
1859 LOOP->header block with conditional modify expressions. */
1860
1861 static void
1862 predicate_all_scalar_phis (struct loop *loop)
1863 {
1864 basic_block bb;
1865 unsigned int orig_loop_num_nodes = loop->num_nodes;
1866 unsigned int i;
1867
1868 for (i = 1; i < orig_loop_num_nodes; i++)
1869 {
1870 gphi *phi;
1871 gimple_stmt_iterator gsi;
1872 gphi_iterator phi_gsi;
1873 bb = ifc_bbs[i];
1874
1875 if (bb == loop->header)
1876 continue;
1877
1878 phi_gsi = gsi_start_phis (bb);
1879 if (gsi_end_p (phi_gsi))
1880 continue;
1881
1882 gsi = gsi_after_labels (bb);
1883 while (!gsi_end_p (phi_gsi))
1884 {
1885 phi = phi_gsi.phi ();
1886 predicate_scalar_phi (phi, &gsi);
1887 release_phi_node (phi);
1888 gsi_next (&phi_gsi);
1889 }
1890
1891 set_phi_nodes (bb, NULL);
1892 }
1893 }
1894
1895 /* Insert in each basic block of LOOP the statements produced by the
1896 gimplification of the predicates. */
1897
1898 static void
1899 insert_gimplified_predicates (loop_p loop, bool any_mask_load_store)
1900 {
1901 unsigned int i;
1902
1903 for (i = 0; i < loop->num_nodes; i++)
1904 {
1905 basic_block bb = ifc_bbs[i];
1906 gimple_seq stmts;
1907 if (!is_predicated (bb))
1908 gcc_assert (bb_predicate_gimplified_stmts (bb) == NULL);
1909 if (!is_predicated (bb))
1910 {
1911 /* Do not insert statements for a basic block that is not
1912 predicated. Also make sure that the predicate of the
1913 basic block is set to true. */
1914 reset_bb_predicate (bb);
1915 continue;
1916 }
1917
1918 stmts = bb_predicate_gimplified_stmts (bb);
1919 if (stmts)
1920 {
1921 if (any_mask_load_store)
1922 {
1923 /* Insert the predicate of the BB just after the label,
1924 as the if-conversion of memory writes will use this
1925 predicate. */
1926 gimple_stmt_iterator gsi = gsi_after_labels (bb);
1927 gsi_insert_seq_before (&gsi, stmts, GSI_SAME_STMT);
1928 }
1929 else
1930 {
1931 /* Insert the predicate of the BB at the end of the BB
1932 as this would reduce the register pressure: the only
1933 use of this predicate will be in successor BBs. */
1934 gimple_stmt_iterator gsi = gsi_last_bb (bb);
1935
1936 if (gsi_end_p (gsi)
1937 || stmt_ends_bb_p (gsi_stmt (gsi)))
1938 gsi_insert_seq_before (&gsi, stmts, GSI_SAME_STMT);
1939 else
1940 gsi_insert_seq_after (&gsi, stmts, GSI_SAME_STMT);
1941 }
1942
1943 /* Once the sequence is code generated, set it to NULL. */
1944 set_bb_predicate_gimplified_stmts (bb, NULL);
1945 }
1946 }
1947 }
1948
1949 /* Helper function for predicate_mem_writes. Returns index of existent
1950 mask if it was created for given SIZE and -1 otherwise. */
1951
1952 static int
1953 mask_exists (int size, vec<int> vec)
1954 {
1955 unsigned int ix;
1956 int v;
1957 FOR_EACH_VEC_ELT (vec, ix, v)
1958 if (v == size)
1959 return (int) ix;
1960 return -1;
1961 }
1962
1963 /* Predicate each write to memory in LOOP.
1964
1965 This function transforms control flow constructs containing memory
1966 writes of the form:
1967
1968 | for (i = 0; i < N; i++)
1969 | if (cond)
1970 | A[i] = expr;
1971
1972 into the following form that does not contain control flow:
1973
1974 | for (i = 0; i < N; i++)
1975 | A[i] = cond ? expr : A[i];
1976
1977 The original CFG looks like this:
1978
1979 | bb_0
1980 | i = 0
1981 | end_bb_0
1982 |
1983 | bb_1
1984 | if (i < N) goto bb_5 else goto bb_2
1985 | end_bb_1
1986 |
1987 | bb_2
1988 | cond = some_computation;
1989 | if (cond) goto bb_3 else goto bb_4
1990 | end_bb_2
1991 |
1992 | bb_3
1993 | A[i] = expr;
1994 | goto bb_4
1995 | end_bb_3
1996 |
1997 | bb_4
1998 | goto bb_1
1999 | end_bb_4
2000
2001 insert_gimplified_predicates inserts the computation of the COND
2002 expression at the beginning of the destination basic block:
2003
2004 | bb_0
2005 | i = 0
2006 | end_bb_0
2007 |
2008 | bb_1
2009 | if (i < N) goto bb_5 else goto bb_2
2010 | end_bb_1
2011 |
2012 | bb_2
2013 | cond = some_computation;
2014 | if (cond) goto bb_3 else goto bb_4
2015 | end_bb_2
2016 |
2017 | bb_3
2018 | cond = some_computation;
2019 | A[i] = expr;
2020 | goto bb_4
2021 | end_bb_3
2022 |
2023 | bb_4
2024 | goto bb_1
2025 | end_bb_4
2026
2027 predicate_mem_writes is then predicating the memory write as follows:
2028
2029 | bb_0
2030 | i = 0
2031 | end_bb_0
2032 |
2033 | bb_1
2034 | if (i < N) goto bb_5 else goto bb_2
2035 | end_bb_1
2036 |
2037 | bb_2
2038 | if (cond) goto bb_3 else goto bb_4
2039 | end_bb_2
2040 |
2041 | bb_3
2042 | cond = some_computation;
2043 | A[i] = cond ? expr : A[i];
2044 | goto bb_4
2045 | end_bb_3
2046 |
2047 | bb_4
2048 | goto bb_1
2049 | end_bb_4
2050
2051 and finally combine_blocks removes the basic block boundaries making
2052 the loop vectorizable:
2053
2054 | bb_0
2055 | i = 0
2056 | if (i < N) goto bb_5 else goto bb_1
2057 | end_bb_0
2058 |
2059 | bb_1
2060 | cond = some_computation;
2061 | A[i] = cond ? expr : A[i];
2062 | if (i < N) goto bb_5 else goto bb_4
2063 | end_bb_1
2064 |
2065 | bb_4
2066 | goto bb_1
2067 | end_bb_4
2068 */
2069
2070 static void
2071 predicate_mem_writes (loop_p loop)
2072 {
2073 unsigned int i, orig_loop_num_nodes = loop->num_nodes;
2074 auto_vec<int, 1> vect_sizes;
2075 auto_vec<tree, 1> vect_masks;
2076
2077 for (i = 1; i < orig_loop_num_nodes; i++)
2078 {
2079 gimple_stmt_iterator gsi;
2080 basic_block bb = ifc_bbs[i];
2081 tree cond = bb_predicate (bb);
2082 bool swap;
2083 gimple *stmt;
2084 int index;
2085
2086 if (is_true_predicate (cond) || is_false_predicate (cond))
2087 continue;
2088
2089 swap = false;
2090 if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
2091 {
2092 swap = true;
2093 cond = TREE_OPERAND (cond, 0);
2094 }
2095
2096 vect_sizes.truncate (0);
2097 vect_masks.truncate (0);
2098
2099 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2100 if (!gimple_assign_single_p (stmt = gsi_stmt (gsi)))
2101 continue;
2102 else if (gimple_plf (stmt, GF_PLF_2))
2103 {
2104 tree lhs = gimple_assign_lhs (stmt);
2105 tree rhs = gimple_assign_rhs1 (stmt);
2106 tree ref, addr, ptr, mask;
2107 gimple *new_stmt;
2108 gimple_seq stmts = NULL;
2109 int bitsize = GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (lhs)));
2110 ref = TREE_CODE (lhs) == SSA_NAME ? rhs : lhs;
2111 mark_addressable (ref);
2112 addr = force_gimple_operand_gsi (&gsi, build_fold_addr_expr (ref),
2113 true, NULL_TREE, true,
2114 GSI_SAME_STMT);
2115 if (!vect_sizes.is_empty ()
2116 && (index = mask_exists (bitsize, vect_sizes)) != -1)
2117 /* Use created mask. */
2118 mask = vect_masks[index];
2119 else
2120 {
2121 if (COMPARISON_CLASS_P (cond))
2122 mask = gimple_build (&stmts, TREE_CODE (cond),
2123 boolean_type_node,
2124 TREE_OPERAND (cond, 0),
2125 TREE_OPERAND (cond, 1));
2126 else
2127 {
2128 gcc_assert (TREE_CODE (cond) == SSA_NAME);
2129 mask = cond;
2130 }
2131
2132 if (swap)
2133 {
2134 tree true_val
2135 = constant_boolean_node (true, TREE_TYPE (mask));
2136 mask = gimple_build (&stmts, BIT_XOR_EXPR,
2137 TREE_TYPE (mask), mask, true_val);
2138 }
2139 gsi_insert_seq_before (&gsi, stmts, GSI_SAME_STMT);
2140
2141 mask = ifc_temp_var (TREE_TYPE (mask), mask, &gsi);
2142 /* Save mask and its size for further use. */
2143 vect_sizes.safe_push (bitsize);
2144 vect_masks.safe_push (mask);
2145 }
2146 ptr = build_int_cst (reference_alias_ptr_type (ref),
2147 get_object_alignment (ref));
2148 /* Copy points-to info if possible. */
2149 if (TREE_CODE (addr) == SSA_NAME && !SSA_NAME_PTR_INFO (addr))
2150 copy_ref_info (build2 (MEM_REF, TREE_TYPE (ref), addr, ptr),
2151 ref);
2152 if (TREE_CODE (lhs) == SSA_NAME)
2153 {
2154 new_stmt
2155 = gimple_build_call_internal (IFN_MASK_LOAD, 3, addr,
2156 ptr, mask);
2157 gimple_call_set_lhs (new_stmt, lhs);
2158 }
2159 else
2160 new_stmt
2161 = gimple_build_call_internal (IFN_MASK_STORE, 4, addr, ptr,
2162 mask, rhs);
2163 gsi_replace (&gsi, new_stmt, true);
2164 }
2165 else if (gimple_vdef (stmt))
2166 {
2167 tree lhs = gimple_assign_lhs (stmt);
2168 tree rhs = gimple_assign_rhs1 (stmt);
2169 tree type = TREE_TYPE (lhs);
2170
2171 lhs = ifc_temp_var (type, unshare_expr (lhs), &gsi);
2172 rhs = ifc_temp_var (type, unshare_expr (rhs), &gsi);
2173 if (swap)
2174 std::swap (lhs, rhs);
2175 cond = force_gimple_operand_gsi_1 (&gsi, unshare_expr (cond),
2176 is_gimple_condexpr, NULL_TREE,
2177 true, GSI_SAME_STMT);
2178 rhs = fold_build_cond_expr (type, unshare_expr (cond), rhs, lhs);
2179 gimple_assign_set_rhs1 (stmt, ifc_temp_var (type, rhs, &gsi));
2180 update_stmt (stmt);
2181 }
2182 }
2183 }
2184
2185 /* Remove all GIMPLE_CONDs and GIMPLE_LABELs of all the basic blocks
2186 other than the exit and latch of the LOOP. Also resets the
2187 GIMPLE_DEBUG information. */
2188
2189 static void
2190 remove_conditions_and_labels (loop_p loop)
2191 {
2192 gimple_stmt_iterator gsi;
2193 unsigned int i;
2194
2195 for (i = 0; i < loop->num_nodes; i++)
2196 {
2197 basic_block bb = ifc_bbs[i];
2198
2199 if (bb_with_exit_edge_p (loop, bb)
2200 || bb == loop->latch)
2201 continue;
2202
2203 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); )
2204 switch (gimple_code (gsi_stmt (gsi)))
2205 {
2206 case GIMPLE_COND:
2207 case GIMPLE_LABEL:
2208 gsi_remove (&gsi, true);
2209 break;
2210
2211 case GIMPLE_DEBUG:
2212 /* ??? Should there be conditional GIMPLE_DEBUG_BINDs? */
2213 if (gimple_debug_bind_p (gsi_stmt (gsi)))
2214 {
2215 gimple_debug_bind_reset_value (gsi_stmt (gsi));
2216 update_stmt (gsi_stmt (gsi));
2217 }
2218 gsi_next (&gsi);
2219 break;
2220
2221 default:
2222 gsi_next (&gsi);
2223 }
2224 }
2225 }
2226
2227 /* Combine all the basic blocks from LOOP into one or two super basic
2228 blocks. Replace PHI nodes with conditional modify expressions. */
2229
2230 static void
2231 combine_blocks (struct loop *loop, bool any_mask_load_store)
2232 {
2233 basic_block bb, exit_bb, merge_target_bb;
2234 unsigned int orig_loop_num_nodes = loop->num_nodes;
2235 unsigned int i;
2236 edge e;
2237 edge_iterator ei;
2238
2239 predicate_bbs (loop);
2240 remove_conditions_and_labels (loop);
2241 insert_gimplified_predicates (loop, any_mask_load_store);
2242 predicate_all_scalar_phis (loop);
2243
2244 if (any_mask_load_store)
2245 predicate_mem_writes (loop);
2246
2247 /* Merge basic blocks: first remove all the edges in the loop,
2248 except for those from the exit block. */
2249 exit_bb = NULL;
2250 bool *predicated = XNEWVEC (bool, orig_loop_num_nodes);
2251 for (i = 0; i < orig_loop_num_nodes; i++)
2252 {
2253 bb = ifc_bbs[i];
2254 predicated[i] = !is_true_predicate (bb_predicate (bb));
2255 free_bb_predicate (bb);
2256 if (bb_with_exit_edge_p (loop, bb))
2257 {
2258 gcc_assert (exit_bb == NULL);
2259 exit_bb = bb;
2260 }
2261 }
2262 gcc_assert (exit_bb != loop->latch);
2263
2264 for (i = 1; i < orig_loop_num_nodes; i++)
2265 {
2266 bb = ifc_bbs[i];
2267
2268 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei));)
2269 {
2270 if (e->src == exit_bb)
2271 ei_next (&ei);
2272 else
2273 remove_edge (e);
2274 }
2275 }
2276
2277 if (exit_bb != NULL)
2278 {
2279 if (exit_bb != loop->header)
2280 {
2281 /* Connect this node to loop header. */
2282 make_edge (loop->header, exit_bb, EDGE_FALLTHRU);
2283 set_immediate_dominator (CDI_DOMINATORS, exit_bb, loop->header);
2284 }
2285
2286 /* Redirect non-exit edges to loop->latch. */
2287 FOR_EACH_EDGE (e, ei, exit_bb->succs)
2288 {
2289 if (!loop_exit_edge_p (loop, e))
2290 redirect_edge_and_branch (e, loop->latch);
2291 }
2292 set_immediate_dominator (CDI_DOMINATORS, loop->latch, exit_bb);
2293 }
2294 else
2295 {
2296 /* If the loop does not have an exit, reconnect header and latch. */
2297 make_edge (loop->header, loop->latch, EDGE_FALLTHRU);
2298 set_immediate_dominator (CDI_DOMINATORS, loop->latch, loop->header);
2299 }
2300
2301 merge_target_bb = loop->header;
2302 for (i = 1; i < orig_loop_num_nodes; i++)
2303 {
2304 gimple_stmt_iterator gsi;
2305 gimple_stmt_iterator last;
2306
2307 bb = ifc_bbs[i];
2308
2309 if (bb == exit_bb || bb == loop->latch)
2310 continue;
2311
2312 /* Make stmts member of loop->header and clear range info from all stmts
2313 in BB which is now no longer executed conditional on a predicate we
2314 could have derived it from. */
2315 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2316 {
2317 gimple *stmt = gsi_stmt (gsi);
2318 gimple_set_bb (stmt, merge_target_bb);
2319 if (predicated[i])
2320 {
2321 ssa_op_iter i;
2322 tree op;
2323 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_DEF)
2324 reset_flow_sensitive_info (op);
2325 }
2326 }
2327
2328 /* Update stmt list. */
2329 last = gsi_last_bb (merge_target_bb);
2330 gsi_insert_seq_after (&last, bb_seq (bb), GSI_NEW_STMT);
2331 set_bb_seq (bb, NULL);
2332
2333 delete_basic_block (bb);
2334 }
2335
2336 /* If possible, merge loop header to the block with the exit edge.
2337 This reduces the number of basic blocks to two, to please the
2338 vectorizer that handles only loops with two nodes. */
2339 if (exit_bb
2340 && exit_bb != loop->header
2341 && can_merge_blocks_p (loop->header, exit_bb))
2342 merge_blocks (loop->header, exit_bb);
2343
2344 free (ifc_bbs);
2345 ifc_bbs = NULL;
2346 free (predicated);
2347 }
2348
2349 /* Version LOOP before if-converting it; the original loop
2350 will be if-converted, the new copy of the loop will not,
2351 and the LOOP_VECTORIZED internal call will be guarding which
2352 loop to execute. The vectorizer pass will fold this
2353 internal call into either true or false. */
2354
2355 static bool
2356 version_loop_for_if_conversion (struct loop *loop)
2357 {
2358 basic_block cond_bb;
2359 tree cond = make_ssa_name (boolean_type_node);
2360 struct loop *new_loop;
2361 gimple *g;
2362 gimple_stmt_iterator gsi;
2363
2364 g = gimple_build_call_internal (IFN_LOOP_VECTORIZED, 2,
2365 build_int_cst (integer_type_node, loop->num),
2366 integer_zero_node);
2367 gimple_call_set_lhs (g, cond);
2368
2369 initialize_original_copy_tables ();
2370 new_loop = loop_version (loop, cond, &cond_bb,
2371 REG_BR_PROB_BASE, REG_BR_PROB_BASE,
2372 REG_BR_PROB_BASE, true);
2373 free_original_copy_tables ();
2374 if (new_loop == NULL)
2375 return false;
2376 new_loop->dont_vectorize = true;
2377 new_loop->force_vectorize = false;
2378 gsi = gsi_last_bb (cond_bb);
2379 gimple_call_set_arg (g, 1, build_int_cst (integer_type_node, new_loop->num));
2380 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
2381 update_ssa (TODO_update_ssa);
2382 return true;
2383 }
2384
2385 /* Performs splitting of critical edges if aggressive_if_conv is true.
2386 Returns false if loop won't be if converted and true otherwise. */
2387
2388 static bool
2389 ifcvt_split_critical_edges (struct loop *loop)
2390 {
2391 basic_block *body;
2392 basic_block bb;
2393 unsigned int num = loop->num_nodes;
2394 unsigned int i;
2395 gimple *stmt;
2396 edge e;
2397 edge_iterator ei;
2398
2399 if (num <= 2)
2400 return false;
2401 if (loop->inner)
2402 return false;
2403 if (!single_exit (loop))
2404 return false;
2405
2406 body = get_loop_body (loop);
2407 for (i = 0; i < num; i++)
2408 {
2409 bb = body[i];
2410 if (bb == loop->latch
2411 || bb_with_exit_edge_p (loop, bb))
2412 continue;
2413 stmt = last_stmt (bb);
2414 /* Skip basic blocks not ending with conditional branch. */
2415 if (!(stmt && gimple_code (stmt) == GIMPLE_COND))
2416 continue;
2417 FOR_EACH_EDGE (e, ei, bb->succs)
2418 if (EDGE_CRITICAL_P (e) && e->dest->loop_father == loop)
2419 split_edge (e);
2420 }
2421 free (body);
2422 return true;
2423 }
2424
2425 /* Assumes that lhs of DEF_STMT have multiple uses.
2426 Delete one use by (1) creation of copy DEF_STMT with
2427 unique lhs; (2) change original use of lhs in one
2428 use statement with newly created lhs. */
2429
2430 static void
2431 ifcvt_split_def_stmt (gimple *def_stmt, gimple *use_stmt)
2432 {
2433 tree var;
2434 tree lhs;
2435 gimple *copy_stmt;
2436 gimple_stmt_iterator gsi;
2437 use_operand_p use_p;
2438 imm_use_iterator imm_iter;
2439
2440 var = gimple_assign_lhs (def_stmt);
2441 copy_stmt = gimple_copy (def_stmt);
2442 lhs = make_temp_ssa_name (TREE_TYPE (var), NULL, "_ifc_");
2443 gimple_assign_set_lhs (copy_stmt, lhs);
2444 SSA_NAME_DEF_STMT (lhs) = copy_stmt;
2445 /* Insert copy of DEF_STMT. */
2446 gsi = gsi_for_stmt (def_stmt);
2447 gsi_insert_after (&gsi, copy_stmt, GSI_SAME_STMT);
2448 /* Change use of var to lhs in use_stmt. */
2449 if (dump_file && (dump_flags & TDF_DETAILS))
2450 {
2451 fprintf (dump_file, "Change use of var ");
2452 print_generic_expr (dump_file, var, TDF_SLIM);
2453 fprintf (dump_file, " to ");
2454 print_generic_expr (dump_file, lhs, TDF_SLIM);
2455 fprintf (dump_file, "\n");
2456 }
2457 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, var)
2458 {
2459 if (USE_STMT (use_p) != use_stmt)
2460 continue;
2461 SET_USE (use_p, lhs);
2462 break;
2463 }
2464 }
2465
2466 /* Traverse bool pattern recursively starting from VAR.
2467 Save its def and use statements to defuse_list if VAR does
2468 not have single use. */
2469
2470 static void
2471 ifcvt_walk_pattern_tree (tree var, vec<gimple *> *defuse_list,
2472 gimple *use_stmt)
2473 {
2474 tree rhs1, rhs2;
2475 enum tree_code code;
2476 gimple *def_stmt;
2477
2478 def_stmt = SSA_NAME_DEF_STMT (var);
2479 if (gimple_code (def_stmt) != GIMPLE_ASSIGN)
2480 return;
2481 if (!has_single_use (var))
2482 {
2483 /* Put def and use stmts into defuse_list. */
2484 defuse_list->safe_push (def_stmt);
2485 defuse_list->safe_push (use_stmt);
2486 if (dump_file && (dump_flags & TDF_DETAILS))
2487 {
2488 fprintf (dump_file, "Multiple lhs uses in stmt\n");
2489 print_gimple_stmt (dump_file, def_stmt, 0, TDF_SLIM);
2490 }
2491 }
2492 rhs1 = gimple_assign_rhs1 (def_stmt);
2493 code = gimple_assign_rhs_code (def_stmt);
2494 switch (code)
2495 {
2496 case SSA_NAME:
2497 ifcvt_walk_pattern_tree (rhs1, defuse_list, def_stmt);
2498 break;
2499 CASE_CONVERT:
2500 if ((TYPE_PRECISION (TREE_TYPE (rhs1)) != 1
2501 || !TYPE_UNSIGNED (TREE_TYPE (rhs1)))
2502 && TREE_CODE (TREE_TYPE (rhs1)) != BOOLEAN_TYPE)
2503 break;
2504 ifcvt_walk_pattern_tree (rhs1, defuse_list, def_stmt);
2505 break;
2506 case BIT_NOT_EXPR:
2507 ifcvt_walk_pattern_tree (rhs1, defuse_list, def_stmt);
2508 break;
2509 case BIT_AND_EXPR:
2510 case BIT_IOR_EXPR:
2511 case BIT_XOR_EXPR:
2512 ifcvt_walk_pattern_tree (rhs1, defuse_list, def_stmt);
2513 rhs2 = gimple_assign_rhs2 (def_stmt);
2514 ifcvt_walk_pattern_tree (rhs2, defuse_list, def_stmt);
2515 break;
2516 default:
2517 break;
2518 }
2519 return;
2520 }
2521
2522 /* Returns true if STMT can be a root of bool pattern applied
2523 by vectorizer. */
2524
2525 static bool
2526 stmt_is_root_of_bool_pattern (gimple *stmt)
2527 {
2528 enum tree_code code;
2529 tree lhs, rhs;
2530
2531 code = gimple_assign_rhs_code (stmt);
2532 if (CONVERT_EXPR_CODE_P (code))
2533 {
2534 lhs = gimple_assign_lhs (stmt);
2535 rhs = gimple_assign_rhs1 (stmt);
2536 if (TREE_CODE (TREE_TYPE (rhs)) != BOOLEAN_TYPE)
2537 return false;
2538 if (TREE_CODE (TREE_TYPE (lhs)) == BOOLEAN_TYPE)
2539 return false;
2540 return true;
2541 }
2542 else if (code == COND_EXPR)
2543 {
2544 rhs = gimple_assign_rhs1 (stmt);
2545 if (TREE_CODE (rhs) != SSA_NAME)
2546 return false;
2547 return true;
2548 }
2549 return false;
2550 }
2551
2552 /* Traverse all statements in BB which correspond to loop header to
2553 find out all statements which can start bool pattern applied by
2554 vectorizer and convert multiple uses in it to conform pattern
2555 restrictions. Such case can occur if the same predicate is used both
2556 for phi node conversion and load/store mask. */
2557
2558 static void
2559 ifcvt_repair_bool_pattern (basic_block bb)
2560 {
2561 tree rhs;
2562 gimple *stmt;
2563 gimple_stmt_iterator gsi;
2564 vec<gimple *> defuse_list = vNULL;
2565 vec<gimple *> pattern_roots = vNULL;
2566 bool repeat = true;
2567 int niter = 0;
2568 unsigned int ix;
2569
2570 /* Collect all root pattern statements. */
2571 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2572 {
2573 stmt = gsi_stmt (gsi);
2574 if (gimple_code (stmt) != GIMPLE_ASSIGN)
2575 continue;
2576 if (!stmt_is_root_of_bool_pattern (stmt))
2577 continue;
2578 pattern_roots.safe_push (stmt);
2579 }
2580
2581 if (pattern_roots.is_empty ())
2582 return;
2583
2584 /* Split all statements with multiple uses iteratively since splitting
2585 may create new multiple uses. */
2586 while (repeat)
2587 {
2588 repeat = false;
2589 niter++;
2590 FOR_EACH_VEC_ELT (pattern_roots, ix, stmt)
2591 {
2592 rhs = gimple_assign_rhs1 (stmt);
2593 ifcvt_walk_pattern_tree (rhs, &defuse_list, stmt);
2594 while (defuse_list.length () > 0)
2595 {
2596 repeat = true;
2597 gimple *def_stmt, *use_stmt;
2598 use_stmt = defuse_list.pop ();
2599 def_stmt = defuse_list.pop ();
2600 ifcvt_split_def_stmt (def_stmt, use_stmt);
2601 }
2602
2603 }
2604 }
2605 if (dump_file && (dump_flags & TDF_DETAILS))
2606 fprintf (dump_file, "Repair bool pattern takes %d iterations. \n",
2607 niter);
2608 }
2609
2610 /* Delete redundant statements produced by predication which prevents
2611 loop vectorization. */
2612
2613 static void
2614 ifcvt_local_dce (basic_block bb)
2615 {
2616 gimple *stmt;
2617 gimple *stmt1;
2618 gimple *phi;
2619 gimple_stmt_iterator gsi;
2620 auto_vec<gimple *> worklist;
2621 enum gimple_code code;
2622 use_operand_p use_p;
2623 imm_use_iterator imm_iter;
2624
2625 worklist.create (64);
2626 /* Consider all phi as live statements. */
2627 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2628 {
2629 phi = gsi_stmt (gsi);
2630 gimple_set_plf (phi, GF_PLF_2, true);
2631 worklist.safe_push (phi);
2632 }
2633 /* Consider load/store statements, CALL and COND as live. */
2634 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2635 {
2636 stmt = gsi_stmt (gsi);
2637 if (gimple_store_p (stmt)
2638 || gimple_assign_load_p (stmt)
2639 || is_gimple_debug (stmt))
2640 {
2641 gimple_set_plf (stmt, GF_PLF_2, true);
2642 worklist.safe_push (stmt);
2643 continue;
2644 }
2645 code = gimple_code (stmt);
2646 if (code == GIMPLE_COND || code == GIMPLE_CALL)
2647 {
2648 gimple_set_plf (stmt, GF_PLF_2, true);
2649 worklist.safe_push (stmt);
2650 continue;
2651 }
2652 gimple_set_plf (stmt, GF_PLF_2, false);
2653
2654 if (code == GIMPLE_ASSIGN)
2655 {
2656 tree lhs = gimple_assign_lhs (stmt);
2657 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs)
2658 {
2659 stmt1 = USE_STMT (use_p);
2660 if (gimple_bb (stmt1) != bb)
2661 {
2662 gimple_set_plf (stmt, GF_PLF_2, true);
2663 worklist.safe_push (stmt);
2664 break;
2665 }
2666 }
2667 }
2668 }
2669 /* Propagate liveness through arguments of live stmt. */
2670 while (worklist.length () > 0)
2671 {
2672 ssa_op_iter iter;
2673 use_operand_p use_p;
2674 tree use;
2675
2676 stmt = worklist.pop ();
2677 FOR_EACH_PHI_OR_STMT_USE (use_p, stmt, iter, SSA_OP_USE)
2678 {
2679 use = USE_FROM_PTR (use_p);
2680 if (TREE_CODE (use) != SSA_NAME)
2681 continue;
2682 stmt1 = SSA_NAME_DEF_STMT (use);
2683 if (gimple_bb (stmt1) != bb
2684 || gimple_plf (stmt1, GF_PLF_2))
2685 continue;
2686 gimple_set_plf (stmt1, GF_PLF_2, true);
2687 worklist.safe_push (stmt1);
2688 }
2689 }
2690 /* Delete dead statements. */
2691 gsi = gsi_start_bb (bb);
2692 while (!gsi_end_p (gsi))
2693 {
2694 stmt = gsi_stmt (gsi);
2695 if (gimple_plf (stmt, GF_PLF_2))
2696 {
2697 gsi_next (&gsi);
2698 continue;
2699 }
2700 if (dump_file && (dump_flags & TDF_DETAILS))
2701 {
2702 fprintf (dump_file, "Delete dead stmt in bb#%d\n", bb->index);
2703 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2704 }
2705 gsi_remove (&gsi, true);
2706 release_defs (stmt);
2707 }
2708 }
2709
2710 /* If-convert LOOP when it is legal. For the moment this pass has no
2711 profitability analysis. Returns non-zero todo flags when something
2712 changed. */
2713
2714 static unsigned int
2715 tree_if_conversion (struct loop *loop)
2716 {
2717 unsigned int todo = 0;
2718 ifc_bbs = NULL;
2719 bool any_mask_load_store = false;
2720
2721 /* Set up aggressive if-conversion for loops marked with simd pragma. */
2722 aggressive_if_conv = loop->force_vectorize;
2723 /* Check either outer loop was marked with simd pragma. */
2724 if (!aggressive_if_conv)
2725 {
2726 struct loop *outer_loop = loop_outer (loop);
2727 if (outer_loop && outer_loop->force_vectorize)
2728 aggressive_if_conv = true;
2729 }
2730
2731 if (aggressive_if_conv)
2732 if (!ifcvt_split_critical_edges (loop))
2733 goto cleanup;
2734
2735 if (!if_convertible_loop_p (loop, &any_mask_load_store)
2736 || !dbg_cnt (if_conversion_tree))
2737 goto cleanup;
2738
2739 if (any_mask_load_store
2740 && ((!flag_tree_loop_vectorize && !loop->force_vectorize)
2741 || loop->dont_vectorize))
2742 goto cleanup;
2743
2744 if (any_mask_load_store && !version_loop_for_if_conversion (loop))
2745 goto cleanup;
2746
2747 /* Now all statements are if-convertible. Combine all the basic
2748 blocks into one huge basic block doing the if-conversion
2749 on-the-fly. */
2750 combine_blocks (loop, any_mask_load_store);
2751
2752 /* Delete dead predicate computations and repair tree correspondent
2753 to bool pattern to delete multiple uses of predicates. */
2754 if (aggressive_if_conv)
2755 {
2756 ifcvt_local_dce (loop->header);
2757 ifcvt_repair_bool_pattern (loop->header);
2758 }
2759
2760 todo |= TODO_cleanup_cfg;
2761 mark_virtual_operands_for_renaming (cfun);
2762 todo |= TODO_update_ssa_only_virtuals;
2763
2764 cleanup:
2765 if (ifc_bbs)
2766 {
2767 unsigned int i;
2768
2769 for (i = 0; i < loop->num_nodes; i++)
2770 free_bb_predicate (ifc_bbs[i]);
2771
2772 free (ifc_bbs);
2773 ifc_bbs = NULL;
2774 }
2775 free_dominance_info (CDI_POST_DOMINATORS);
2776
2777 return todo;
2778 }
2779
2780 /* Tree if-conversion pass management. */
2781
2782 namespace {
2783
2784 const pass_data pass_data_if_conversion =
2785 {
2786 GIMPLE_PASS, /* type */
2787 "ifcvt", /* name */
2788 OPTGROUP_NONE, /* optinfo_flags */
2789 TV_NONE, /* tv_id */
2790 ( PROP_cfg | PROP_ssa ), /* properties_required */
2791 0, /* properties_provided */
2792 0, /* properties_destroyed */
2793 0, /* todo_flags_start */
2794 0, /* todo_flags_finish */
2795 };
2796
2797 class pass_if_conversion : public gimple_opt_pass
2798 {
2799 public:
2800 pass_if_conversion (gcc::context *ctxt)
2801 : gimple_opt_pass (pass_data_if_conversion, ctxt)
2802 {}
2803
2804 /* opt_pass methods: */
2805 virtual bool gate (function *);
2806 virtual unsigned int execute (function *);
2807
2808 }; // class pass_if_conversion
2809
2810 bool
2811 pass_if_conversion::gate (function *fun)
2812 {
2813 return (((flag_tree_loop_vectorize || fun->has_force_vectorize_loops)
2814 && flag_tree_loop_if_convert != 0)
2815 || flag_tree_loop_if_convert == 1
2816 || flag_tree_loop_if_convert_stores == 1);
2817 }
2818
2819 unsigned int
2820 pass_if_conversion::execute (function *fun)
2821 {
2822 struct loop *loop;
2823 unsigned todo = 0;
2824
2825 if (number_of_loops (fun) <= 1)
2826 return 0;
2827
2828 FOR_EACH_LOOP (loop, 0)
2829 if (flag_tree_loop_if_convert == 1
2830 || flag_tree_loop_if_convert_stores == 1
2831 || ((flag_tree_loop_vectorize || loop->force_vectorize)
2832 && !loop->dont_vectorize))
2833 todo |= tree_if_conversion (loop);
2834
2835 if (flag_checking)
2836 {
2837 basic_block bb;
2838 FOR_EACH_BB_FN (bb, fun)
2839 gcc_assert (!bb->aux);
2840 }
2841
2842 return todo;
2843 }
2844
2845 } // anon namespace
2846
2847 gimple_opt_pass *
2848 make_pass_if_conversion (gcc::context *ctxt)
2849 {
2850 return new pass_if_conversion (ctxt);
2851 }