2 Copyright (C) 2003-2013 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com> and
4 Ira Rosen <irar@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
29 #include "stor-layout.h"
30 #include "basic-block.h"
31 #include "gimple-pretty-print.h"
34 #include "gimple-iterator.h"
35 #include "gimplify-me.h"
36 #include "gimple-ssa.h"
37 #include "tree-phinodes.h"
38 #include "ssa-iterators.h"
39 #include "stringpool.h"
40 #include "tree-ssanames.h"
41 #include "tree-ssa-loop-ivopts.h"
42 #include "tree-ssa-loop-manip.h"
43 #include "tree-ssa-loop-niter.h"
44 #include "tree-pass.h"
50 #include "diagnostic-core.h"
51 #include "tree-chrec.h"
52 #include "tree-scalar-evolution.h"
53 #include "tree-vectorizer.h"
56 /* Loop Vectorization Pass.
58 This pass tries to vectorize loops.
60 For example, the vectorizer transforms the following simple loop:
62 short a[N]; short b[N]; short c[N]; int i;
68 as if it was manually vectorized by rewriting the source code into:
70 typedef int __attribute__((mode(V8HI))) v8hi;
71 short a[N]; short b[N]; short c[N]; int i;
72 v8hi *pa = (v8hi*)a, *pb = (v8hi*)b, *pc = (v8hi*)c;
75 for (i=0; i<N/8; i++){
82 The main entry to this pass is vectorize_loops(), in which
83 the vectorizer applies a set of analyses on a given set of loops,
84 followed by the actual vectorization transformation for the loops that
85 had successfully passed the analysis phase.
86 Throughout this pass we make a distinction between two types of
87 data: scalars (which are represented by SSA_NAMES), and memory references
88 ("data-refs"). These two types of data require different handling both
89 during analysis and transformation. The types of data-refs that the
90 vectorizer currently supports are ARRAY_REFS which base is an array DECL
91 (not a pointer), and INDIRECT_REFS through pointers; both array and pointer
92 accesses are required to have a simple (consecutive) access pattern.
96 The driver for the analysis phase is vect_analyze_loop().
97 It applies a set of analyses, some of which rely on the scalar evolution
98 analyzer (scev) developed by Sebastian Pop.
100 During the analysis phase the vectorizer records some information
101 per stmt in a "stmt_vec_info" struct which is attached to each stmt in the
102 loop, as well as general information about the loop as a whole, which is
103 recorded in a "loop_vec_info" struct attached to each loop.
105 Transformation phase:
106 =====================
107 The loop transformation phase scans all the stmts in the loop, and
108 creates a vector stmt (or a sequence of stmts) for each scalar stmt S in
109 the loop that needs to be vectorized. It inserts the vector code sequence
110 just before the scalar stmt S, and records a pointer to the vector code
111 in STMT_VINFO_VEC_STMT (stmt_info) (stmt_info is the stmt_vec_info struct
112 attached to S). This pointer will be used for the vectorization of following
113 stmts which use the def of stmt S. Stmt S is removed if it writes to memory;
114 otherwise, we rely on dead code elimination for removing it.
116 For example, say stmt S1 was vectorized into stmt VS1:
119 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
122 To vectorize stmt S2, the vectorizer first finds the stmt that defines
123 the operand 'b' (S1), and gets the relevant vector def 'vb' from the
124 vector stmt VS1 pointed to by STMT_VINFO_VEC_STMT (stmt_info (S1)). The
125 resulting sequence would be:
128 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
130 S2: a = b; STMT_VINFO_VEC_STMT (stmt_info (S2)) = VS2
132 Operands that are not SSA_NAMEs, are data-refs that appear in
133 load/store operations (like 'x[i]' in S1), and are handled differently.
137 Currently the only target specific information that is used is the
138 size of the vector (in bytes) - "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD".
139 Targets that can support different sizes of vectors, for now will need
140 to specify one value for "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD". More
141 flexibility will be added in the future.
143 Since we only vectorize operations which vector form can be
144 expressed using existing tree codes, to verify that an operation is
145 supported, the vectorizer checks the relevant optab at the relevant
146 machine_mode (e.g, optab_handler (add_optab, V8HImode)). If
147 the value found is CODE_FOR_nothing, then there's no target support, and
148 we can't vectorize the stmt.
150 For additional information on this project see:
151 http://gcc.gnu.org/projects/tree-ssa/vectorization.html
154 static void vect_estimate_min_profitable_iters (loop_vec_info
, int *, int *);
156 /* Function vect_determine_vectorization_factor
158 Determine the vectorization factor (VF). VF is the number of data elements
159 that are operated upon in parallel in a single iteration of the vectorized
160 loop. For example, when vectorizing a loop that operates on 4byte elements,
161 on a target with vector size (VS) 16byte, the VF is set to 4, since 4
162 elements can fit in a single vector register.
164 We currently support vectorization of loops in which all types operated upon
165 are of the same size. Therefore this function currently sets VF according to
166 the size of the types operated upon, and fails if there are multiple sizes
169 VF is also the factor by which the loop iterations are strip-mined, e.g.:
176 for (i=0; i<N; i+=VF){
177 a[i:VF] = b[i:VF] + c[i:VF];
182 vect_determine_vectorization_factor (loop_vec_info loop_vinfo
)
184 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
185 basic_block
*bbs
= LOOP_VINFO_BBS (loop_vinfo
);
186 int nbbs
= loop
->num_nodes
;
187 gimple_stmt_iterator si
;
188 unsigned int vectorization_factor
= 0;
193 stmt_vec_info stmt_info
;
196 gimple stmt
, pattern_stmt
= NULL
;
197 gimple_seq pattern_def_seq
= NULL
;
198 gimple_stmt_iterator pattern_def_si
= gsi_none ();
199 bool analyze_pattern_stmt
= false;
201 if (dump_enabled_p ())
202 dump_printf_loc (MSG_NOTE
, vect_location
,
203 "=== vect_determine_vectorization_factor ===\n");
205 for (i
= 0; i
< nbbs
; i
++)
207 basic_block bb
= bbs
[i
];
209 for (si
= gsi_start_phis (bb
); !gsi_end_p (si
); gsi_next (&si
))
212 stmt_info
= vinfo_for_stmt (phi
);
213 if (dump_enabled_p ())
215 dump_printf_loc (MSG_NOTE
, vect_location
, "==> examining phi: ");
216 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, phi
, 0);
217 dump_printf (MSG_NOTE
, "\n");
220 gcc_assert (stmt_info
);
222 if (STMT_VINFO_RELEVANT_P (stmt_info
))
224 gcc_assert (!STMT_VINFO_VECTYPE (stmt_info
));
225 scalar_type
= TREE_TYPE (PHI_RESULT (phi
));
227 if (dump_enabled_p ())
229 dump_printf_loc (MSG_NOTE
, vect_location
,
230 "get vectype for scalar type: ");
231 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, scalar_type
);
232 dump_printf (MSG_NOTE
, "\n");
235 vectype
= get_vectype_for_scalar_type (scalar_type
);
238 if (dump_enabled_p ())
240 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
241 "not vectorized: unsupported "
243 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
245 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
249 STMT_VINFO_VECTYPE (stmt_info
) = vectype
;
251 if (dump_enabled_p ())
253 dump_printf_loc (MSG_NOTE
, vect_location
, "vectype: ");
254 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, vectype
);
255 dump_printf (MSG_NOTE
, "\n");
258 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
259 if (dump_enabled_p ())
260 dump_printf_loc (MSG_NOTE
, vect_location
, "nunits = %d\n",
263 if (!vectorization_factor
264 || (nunits
> vectorization_factor
))
265 vectorization_factor
= nunits
;
269 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
) || analyze_pattern_stmt
;)
273 if (analyze_pattern_stmt
)
276 stmt
= gsi_stmt (si
);
278 stmt_info
= vinfo_for_stmt (stmt
);
280 if (dump_enabled_p ())
282 dump_printf_loc (MSG_NOTE
, vect_location
,
283 "==> examining statement: ");
284 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
285 dump_printf (MSG_NOTE
, "\n");
288 gcc_assert (stmt_info
);
290 /* Skip stmts which do not need to be vectorized. */
291 if ((!STMT_VINFO_RELEVANT_P (stmt_info
)
292 && !STMT_VINFO_LIVE_P (stmt_info
))
293 || gimple_clobber_p (stmt
))
295 if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
296 && (pattern_stmt
= STMT_VINFO_RELATED_STMT (stmt_info
))
297 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt
))
298 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt
))))
301 stmt_info
= vinfo_for_stmt (pattern_stmt
);
302 if (dump_enabled_p ())
304 dump_printf_loc (MSG_NOTE
, vect_location
,
305 "==> examining pattern statement: ");
306 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
307 dump_printf (MSG_NOTE
, "\n");
312 if (dump_enabled_p ())
313 dump_printf_loc (MSG_NOTE
, vect_location
, "skip.\n");
318 else if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
319 && (pattern_stmt
= STMT_VINFO_RELATED_STMT (stmt_info
))
320 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt
))
321 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt
))))
322 analyze_pattern_stmt
= true;
324 /* If a pattern statement has def stmts, analyze them too. */
325 if (is_pattern_stmt_p (stmt_info
))
327 if (pattern_def_seq
== NULL
)
329 pattern_def_seq
= STMT_VINFO_PATTERN_DEF_SEQ (stmt_info
);
330 pattern_def_si
= gsi_start (pattern_def_seq
);
332 else if (!gsi_end_p (pattern_def_si
))
333 gsi_next (&pattern_def_si
);
334 if (pattern_def_seq
!= NULL
)
336 gimple pattern_def_stmt
= NULL
;
337 stmt_vec_info pattern_def_stmt_info
= NULL
;
339 while (!gsi_end_p (pattern_def_si
))
341 pattern_def_stmt
= gsi_stmt (pattern_def_si
);
342 pattern_def_stmt_info
343 = vinfo_for_stmt (pattern_def_stmt
);
344 if (STMT_VINFO_RELEVANT_P (pattern_def_stmt_info
)
345 || STMT_VINFO_LIVE_P (pattern_def_stmt_info
))
347 gsi_next (&pattern_def_si
);
350 if (!gsi_end_p (pattern_def_si
))
352 if (dump_enabled_p ())
354 dump_printf_loc (MSG_NOTE
, vect_location
,
355 "==> examining pattern def stmt: ");
356 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
,
357 pattern_def_stmt
, 0);
358 dump_printf (MSG_NOTE
, "\n");
361 stmt
= pattern_def_stmt
;
362 stmt_info
= pattern_def_stmt_info
;
366 pattern_def_si
= gsi_none ();
367 analyze_pattern_stmt
= false;
371 analyze_pattern_stmt
= false;
374 if (gimple_get_lhs (stmt
) == NULL_TREE
)
376 if (dump_enabled_p ())
378 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
379 "not vectorized: irregular stmt.");
380 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
,
382 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
387 if (VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt
))))
389 if (dump_enabled_p ())
391 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
392 "not vectorized: vector stmt in loop:");
393 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
394 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
399 if (STMT_VINFO_VECTYPE (stmt_info
))
401 /* The only case when a vectype had been already set is for stmts
402 that contain a dataref, or for "pattern-stmts" (stmts
403 generated by the vectorizer to represent/replace a certain
405 gcc_assert (STMT_VINFO_DATA_REF (stmt_info
)
406 || is_pattern_stmt_p (stmt_info
)
407 || !gsi_end_p (pattern_def_si
));
408 vectype
= STMT_VINFO_VECTYPE (stmt_info
);
412 gcc_assert (!STMT_VINFO_DATA_REF (stmt_info
));
413 scalar_type
= TREE_TYPE (gimple_get_lhs (stmt
));
414 if (dump_enabled_p ())
416 dump_printf_loc (MSG_NOTE
, vect_location
,
417 "get vectype for scalar type: ");
418 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, scalar_type
);
419 dump_printf (MSG_NOTE
, "\n");
421 vectype
= get_vectype_for_scalar_type (scalar_type
);
424 if (dump_enabled_p ())
426 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
427 "not vectorized: unsupported "
429 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
431 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
436 STMT_VINFO_VECTYPE (stmt_info
) = vectype
;
438 if (dump_enabled_p ())
440 dump_printf_loc (MSG_NOTE
, vect_location
, "vectype: ");
441 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, vectype
);
442 dump_printf (MSG_NOTE
, "\n");
446 /* The vectorization factor is according to the smallest
447 scalar type (or the largest vector size, but we only
448 support one vector size per loop). */
449 scalar_type
= vect_get_smallest_scalar_type (stmt
, &dummy
,
451 if (dump_enabled_p ())
453 dump_printf_loc (MSG_NOTE
, vect_location
,
454 "get vectype for scalar type: ");
455 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, scalar_type
);
456 dump_printf (MSG_NOTE
, "\n");
458 vf_vectype
= get_vectype_for_scalar_type (scalar_type
);
461 if (dump_enabled_p ())
463 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
464 "not vectorized: unsupported data-type ");
465 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
467 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
472 if ((GET_MODE_SIZE (TYPE_MODE (vectype
))
473 != GET_MODE_SIZE (TYPE_MODE (vf_vectype
))))
475 if (dump_enabled_p ())
477 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
478 "not vectorized: different sized vector "
479 "types in statement, ");
480 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
482 dump_printf (MSG_MISSED_OPTIMIZATION
, " and ");
483 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
485 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
490 if (dump_enabled_p ())
492 dump_printf_loc (MSG_NOTE
, vect_location
, "vectype: ");
493 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, vf_vectype
);
494 dump_printf (MSG_NOTE
, "\n");
497 nunits
= TYPE_VECTOR_SUBPARTS (vf_vectype
);
498 if (dump_enabled_p ())
499 dump_printf_loc (MSG_NOTE
, vect_location
, "nunits = %d\n", nunits
);
500 if (!vectorization_factor
501 || (nunits
> vectorization_factor
))
502 vectorization_factor
= nunits
;
504 if (!analyze_pattern_stmt
&& gsi_end_p (pattern_def_si
))
506 pattern_def_seq
= NULL
;
512 /* TODO: Analyze cost. Decide if worth while to vectorize. */
513 if (dump_enabled_p ())
514 dump_printf_loc (MSG_NOTE
, vect_location
, "vectorization factor = %d\n",
515 vectorization_factor
);
516 if (vectorization_factor
<= 1)
518 if (dump_enabled_p ())
519 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
520 "not vectorized: unsupported data-type\n");
523 LOOP_VINFO_VECT_FACTOR (loop_vinfo
) = vectorization_factor
;
529 /* Function vect_is_simple_iv_evolution.
531 FORNOW: A simple evolution of an induction variables in the loop is
532 considered a polynomial evolution. */
535 vect_is_simple_iv_evolution (unsigned loop_nb
, tree access_fn
, tree
* init
,
540 tree evolution_part
= evolution_part_in_loop_num (access_fn
, loop_nb
);
543 /* When there is no evolution in this loop, the evolution function
545 if (evolution_part
== NULL_TREE
)
548 /* When the evolution is a polynomial of degree >= 2
549 the evolution function is not "simple". */
550 if (tree_is_chrec (evolution_part
))
553 step_expr
= evolution_part
;
554 init_expr
= unshare_expr (initial_condition_in_loop_num (access_fn
, loop_nb
));
556 if (dump_enabled_p ())
558 dump_printf_loc (MSG_NOTE
, vect_location
, "step: ");
559 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, step_expr
);
560 dump_printf (MSG_NOTE
, ", init: ");
561 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, init_expr
);
562 dump_printf (MSG_NOTE
, "\n");
568 if (TREE_CODE (step_expr
) != INTEGER_CST
569 && (TREE_CODE (step_expr
) != SSA_NAME
570 || ((bb
= gimple_bb (SSA_NAME_DEF_STMT (step_expr
)))
571 && flow_bb_inside_loop_p (get_loop (cfun
, loop_nb
), bb
))
572 || (!INTEGRAL_TYPE_P (TREE_TYPE (step_expr
))
573 && (!SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr
))
574 || !flag_associative_math
)))
575 && (TREE_CODE (step_expr
) != REAL_CST
576 || !flag_associative_math
))
578 if (dump_enabled_p ())
579 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
587 /* Function vect_analyze_scalar_cycles_1.
589 Examine the cross iteration def-use cycles of scalar variables
590 in LOOP. LOOP_VINFO represents the loop that is now being
591 considered for vectorization (can be LOOP, or an outer-loop
595 vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo
, struct loop
*loop
)
597 basic_block bb
= loop
->header
;
599 stack_vec
<gimple
, 64> worklist
;
600 gimple_stmt_iterator gsi
;
603 if (dump_enabled_p ())
604 dump_printf_loc (MSG_NOTE
, vect_location
,
605 "=== vect_analyze_scalar_cycles ===\n");
607 /* First - identify all inductions. Reduction detection assumes that all the
608 inductions have been identified, therefore, this order must not be
610 for (gsi
= gsi_start_phis (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
612 gimple phi
= gsi_stmt (gsi
);
613 tree access_fn
= NULL
;
614 tree def
= PHI_RESULT (phi
);
615 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (phi
);
617 if (dump_enabled_p ())
619 dump_printf_loc (MSG_NOTE
, vect_location
, "Analyze phi: ");
620 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, phi
, 0);
621 dump_printf (MSG_NOTE
, "\n");
624 /* Skip virtual phi's. The data dependences that are associated with
625 virtual defs/uses (i.e., memory accesses) are analyzed elsewhere. */
626 if (virtual_operand_p (def
))
629 STMT_VINFO_DEF_TYPE (stmt_vinfo
) = vect_unknown_def_type
;
631 /* Analyze the evolution function. */
632 access_fn
= analyze_scalar_evolution (loop
, def
);
635 STRIP_NOPS (access_fn
);
636 if (dump_enabled_p ())
638 dump_printf_loc (MSG_NOTE
, vect_location
,
639 "Access function of PHI: ");
640 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, access_fn
);
641 dump_printf (MSG_NOTE
, "\n");
643 STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo
)
644 = evolution_part_in_loop_num (access_fn
, loop
->num
);
648 || !vect_is_simple_iv_evolution (loop
->num
, access_fn
, &init
, &step
)
649 || (LOOP_VINFO_LOOP (loop_vinfo
) != loop
650 && TREE_CODE (step
) != INTEGER_CST
))
652 worklist
.safe_push (phi
);
656 gcc_assert (STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo
) != NULL_TREE
);
658 if (dump_enabled_p ())
659 dump_printf_loc (MSG_NOTE
, vect_location
, "Detected induction.\n");
660 STMT_VINFO_DEF_TYPE (stmt_vinfo
) = vect_induction_def
;
664 /* Second - identify all reductions and nested cycles. */
665 while (worklist
.length () > 0)
667 gimple phi
= worklist
.pop ();
668 tree def
= PHI_RESULT (phi
);
669 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (phi
);
673 if (dump_enabled_p ())
675 dump_printf_loc (MSG_NOTE
, vect_location
, "Analyze phi: ");
676 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, phi
, 0);
677 dump_printf (MSG_NOTE
, "\n");
680 gcc_assert (!virtual_operand_p (def
)
681 && STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_unknown_def_type
);
683 nested_cycle
= (loop
!= LOOP_VINFO_LOOP (loop_vinfo
));
684 reduc_stmt
= vect_force_simple_reduction (loop_vinfo
, phi
, !nested_cycle
,
690 if (dump_enabled_p ())
691 dump_printf_loc (MSG_NOTE
, vect_location
,
692 "Detected double reduction.\n");
694 STMT_VINFO_DEF_TYPE (stmt_vinfo
) = vect_double_reduction_def
;
695 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt
)) =
696 vect_double_reduction_def
;
702 if (dump_enabled_p ())
703 dump_printf_loc (MSG_NOTE
, vect_location
,
704 "Detected vectorizable nested cycle.\n");
706 STMT_VINFO_DEF_TYPE (stmt_vinfo
) = vect_nested_cycle
;
707 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt
)) =
712 if (dump_enabled_p ())
713 dump_printf_loc (MSG_NOTE
, vect_location
,
714 "Detected reduction.\n");
716 STMT_VINFO_DEF_TYPE (stmt_vinfo
) = vect_reduction_def
;
717 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt
)) =
719 /* Store the reduction cycles for possible vectorization in
721 LOOP_VINFO_REDUCTIONS (loop_vinfo
).safe_push (reduc_stmt
);
726 if (dump_enabled_p ())
727 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
728 "Unknown def-use cycle pattern.\n");
733 /* Function vect_analyze_scalar_cycles.
735 Examine the cross iteration def-use cycles of scalar variables, by
736 analyzing the loop-header PHIs of scalar variables. Classify each
737 cycle as one of the following: invariant, induction, reduction, unknown.
738 We do that for the loop represented by LOOP_VINFO, and also to its
739 inner-loop, if exists.
740 Examples for scalar cycles:
755 vect_analyze_scalar_cycles (loop_vec_info loop_vinfo
)
757 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
759 vect_analyze_scalar_cycles_1 (loop_vinfo
, loop
);
761 /* When vectorizing an outer-loop, the inner-loop is executed sequentially.
762 Reductions in such inner-loop therefore have different properties than
763 the reductions in the nest that gets vectorized:
764 1. When vectorized, they are executed in the same order as in the original
765 scalar loop, so we can't change the order of computation when
767 2. FIXME: Inner-loop reductions can be used in the inner-loop, so the
768 current checks are too strict. */
771 vect_analyze_scalar_cycles_1 (loop_vinfo
, loop
->inner
);
775 /* Function vect_get_loop_niters.
777 Determine how many iterations the loop is executed and place it
778 in NUMBER_OF_ITERATIONS.
780 Return the loop exit condition. */
783 vect_get_loop_niters (struct loop
*loop
, tree
*number_of_iterations
)
787 if (dump_enabled_p ())
788 dump_printf_loc (MSG_NOTE
, vect_location
,
789 "=== get_loop_niters ===\n");
791 niters
= number_of_latch_executions (loop
);
792 /* We want the number of loop header executions which is the number
793 of latch executions plus one.
794 ??? For UINT_MAX latch executions this number overflows to zero
795 for loops like do { n++; } while (n != 0); */
796 if (niters
&& !chrec_contains_undetermined (niters
))
797 niters
= fold_build2 (PLUS_EXPR
, TREE_TYPE (niters
), niters
,
798 build_int_cst (TREE_TYPE (niters
), 1));
799 *number_of_iterations
= niters
;
801 return get_loop_exit_condition (loop
);
805 /* Function bb_in_loop_p
807 Used as predicate for dfs order traversal of the loop bbs. */
810 bb_in_loop_p (const_basic_block bb
, const void *data
)
812 const struct loop
*const loop
= (const struct loop
*)data
;
813 if (flow_bb_inside_loop_p (loop
, bb
))
819 /* Function new_loop_vec_info.
821 Create and initialize a new loop_vec_info struct for LOOP, as well as
822 stmt_vec_info structs for all the stmts in LOOP. */
825 new_loop_vec_info (struct loop
*loop
)
829 gimple_stmt_iterator si
;
830 unsigned int i
, nbbs
;
832 res
= (loop_vec_info
) xcalloc (1, sizeof (struct _loop_vec_info
));
833 LOOP_VINFO_LOOP (res
) = loop
;
835 bbs
= get_loop_body (loop
);
837 /* Create/Update stmt_info for all stmts in the loop. */
838 for (i
= 0; i
< loop
->num_nodes
; i
++)
840 basic_block bb
= bbs
[i
];
842 /* BBs in a nested inner-loop will have been already processed (because
843 we will have called vect_analyze_loop_form for any nested inner-loop).
844 Therefore, for stmts in an inner-loop we just want to update the
845 STMT_VINFO_LOOP_VINFO field of their stmt_info to point to the new
846 loop_info of the outer-loop we are currently considering to vectorize
847 (instead of the loop_info of the inner-loop).
848 For stmts in other BBs we need to create a stmt_info from scratch. */
849 if (bb
->loop_father
!= loop
)
852 gcc_assert (loop
->inner
&& bb
->loop_father
== loop
->inner
);
853 for (si
= gsi_start_phis (bb
); !gsi_end_p (si
); gsi_next (&si
))
855 gimple phi
= gsi_stmt (si
);
856 stmt_vec_info stmt_info
= vinfo_for_stmt (phi
);
857 loop_vec_info inner_loop_vinfo
=
858 STMT_VINFO_LOOP_VINFO (stmt_info
);
859 gcc_assert (loop
->inner
== LOOP_VINFO_LOOP (inner_loop_vinfo
));
860 STMT_VINFO_LOOP_VINFO (stmt_info
) = res
;
862 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
864 gimple stmt
= gsi_stmt (si
);
865 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
866 loop_vec_info inner_loop_vinfo
=
867 STMT_VINFO_LOOP_VINFO (stmt_info
);
868 gcc_assert (loop
->inner
== LOOP_VINFO_LOOP (inner_loop_vinfo
));
869 STMT_VINFO_LOOP_VINFO (stmt_info
) = res
;
874 /* bb in current nest. */
875 for (si
= gsi_start_phis (bb
); !gsi_end_p (si
); gsi_next (&si
))
877 gimple phi
= gsi_stmt (si
);
878 gimple_set_uid (phi
, 0);
879 set_vinfo_for_stmt (phi
, new_stmt_vec_info (phi
, res
, NULL
));
882 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
884 gimple stmt
= gsi_stmt (si
);
885 gimple_set_uid (stmt
, 0);
886 set_vinfo_for_stmt (stmt
, new_stmt_vec_info (stmt
, res
, NULL
));
891 /* CHECKME: We want to visit all BBs before their successors (except for
892 latch blocks, for which this assertion wouldn't hold). In the simple
893 case of the loop forms we allow, a dfs order of the BBs would the same
894 as reversed postorder traversal, so we are safe. */
897 bbs
= XCNEWVEC (basic_block
, loop
->num_nodes
);
898 nbbs
= dfs_enumerate_from (loop
->header
, 0, bb_in_loop_p
,
899 bbs
, loop
->num_nodes
, loop
);
900 gcc_assert (nbbs
== loop
->num_nodes
);
902 LOOP_VINFO_BBS (res
) = bbs
;
903 LOOP_VINFO_NITERS (res
) = NULL
;
904 LOOP_VINFO_NITERS_UNCHANGED (res
) = NULL
;
905 LOOP_VINFO_COST_MODEL_MIN_ITERS (res
) = 0;
906 LOOP_VINFO_VECTORIZABLE_P (res
) = 0;
907 LOOP_VINFO_PEELING_FOR_ALIGNMENT (res
) = 0;
908 LOOP_VINFO_VECT_FACTOR (res
) = 0;
909 LOOP_VINFO_LOOP_NEST (res
).create (3);
910 LOOP_VINFO_DATAREFS (res
).create (10);
911 LOOP_VINFO_DDRS (res
).create (10 * 10);
912 LOOP_VINFO_UNALIGNED_DR (res
) = NULL
;
913 LOOP_VINFO_MAY_MISALIGN_STMTS (res
).create (
914 PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIGNMENT_CHECKS
));
915 LOOP_VINFO_MAY_ALIAS_DDRS (res
).create (
916 PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS
));
917 LOOP_VINFO_GROUPED_STORES (res
).create (10);
918 LOOP_VINFO_REDUCTIONS (res
).create (10);
919 LOOP_VINFO_REDUCTION_CHAINS (res
).create (10);
920 LOOP_VINFO_SLP_INSTANCES (res
).create (10);
921 LOOP_VINFO_SLP_UNROLLING_FACTOR (res
) = 1;
922 LOOP_VINFO_TARGET_COST_DATA (res
) = init_cost (loop
);
923 LOOP_VINFO_PEELING_FOR_GAPS (res
) = false;
924 LOOP_VINFO_PEELING_FOR_NITER (res
) = false;
925 LOOP_VINFO_OPERANDS_SWAPPED (res
) = false;
931 /* Function destroy_loop_vec_info.
933 Free LOOP_VINFO struct, as well as all the stmt_vec_info structs of all the
934 stmts in the loop. */
937 destroy_loop_vec_info (loop_vec_info loop_vinfo
, bool clean_stmts
)
942 gimple_stmt_iterator si
;
944 vec
<slp_instance
> slp_instances
;
945 slp_instance instance
;
951 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
953 bbs
= LOOP_VINFO_BBS (loop_vinfo
);
954 nbbs
= clean_stmts
? loop
->num_nodes
: 0;
955 swapped
= LOOP_VINFO_OPERANDS_SWAPPED (loop_vinfo
);
957 for (j
= 0; j
< nbbs
; j
++)
959 basic_block bb
= bbs
[j
];
960 for (si
= gsi_start_phis (bb
); !gsi_end_p (si
); gsi_next (&si
))
961 free_stmt_vec_info (gsi_stmt (si
));
963 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); )
965 gimple stmt
= gsi_stmt (si
);
967 /* We may have broken canonical form by moving a constant
968 into RHS1 of a commutative op. Fix such occurrences. */
969 if (swapped
&& is_gimple_assign (stmt
))
971 enum tree_code code
= gimple_assign_rhs_code (stmt
);
973 if ((code
== PLUS_EXPR
974 || code
== POINTER_PLUS_EXPR
975 || code
== MULT_EXPR
)
976 && CONSTANT_CLASS_P (gimple_assign_rhs1 (stmt
)))
977 swap_ssa_operands (stmt
,
978 gimple_assign_rhs1_ptr (stmt
),
979 gimple_assign_rhs2_ptr (stmt
));
982 /* Free stmt_vec_info. */
983 free_stmt_vec_info (stmt
);
988 free (LOOP_VINFO_BBS (loop_vinfo
));
989 vect_destroy_datarefs (loop_vinfo
, NULL
);
990 free_dependence_relations (LOOP_VINFO_DDRS (loop_vinfo
));
991 LOOP_VINFO_LOOP_NEST (loop_vinfo
).release ();
992 LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo
).release ();
993 LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo
).release ();
994 slp_instances
= LOOP_VINFO_SLP_INSTANCES (loop_vinfo
);
995 FOR_EACH_VEC_ELT (slp_instances
, j
, instance
)
996 vect_free_slp_instance (instance
);
998 LOOP_VINFO_SLP_INSTANCES (loop_vinfo
).release ();
999 LOOP_VINFO_GROUPED_STORES (loop_vinfo
).release ();
1000 LOOP_VINFO_REDUCTIONS (loop_vinfo
).release ();
1001 LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo
).release ();
1003 if (LOOP_VINFO_PEELING_HTAB (loop_vinfo
).is_created ())
1004 LOOP_VINFO_PEELING_HTAB (loop_vinfo
).dispose ();
1006 destroy_cost_data (LOOP_VINFO_TARGET_COST_DATA (loop_vinfo
));
1013 /* Function vect_analyze_loop_1.
1015 Apply a set of analyses on LOOP, and create a loop_vec_info struct
1016 for it. The different analyses will record information in the
1017 loop_vec_info struct. This is a subset of the analyses applied in
1018 vect_analyze_loop, to be applied on an inner-loop nested in the loop
1019 that is now considered for (outer-loop) vectorization. */
1021 static loop_vec_info
1022 vect_analyze_loop_1 (struct loop
*loop
)
1024 loop_vec_info loop_vinfo
;
1026 if (dump_enabled_p ())
1027 dump_printf_loc (MSG_NOTE
, vect_location
,
1028 "===== analyze_loop_nest_1 =====\n");
1030 /* Check the CFG characteristics of the loop (nesting, entry/exit, etc. */
1032 loop_vinfo
= vect_analyze_loop_form (loop
);
1035 if (dump_enabled_p ())
1036 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1037 "bad inner-loop form.\n");
1045 /* Function vect_analyze_loop_form.
1047 Verify that certain CFG restrictions hold, including:
1048 - the loop has a pre-header
1049 - the loop has a single entry and exit
1050 - the loop exit condition is simple enough, and the number of iterations
1051 can be analyzed (a countable loop). */
1054 vect_analyze_loop_form (struct loop
*loop
)
1056 loop_vec_info loop_vinfo
;
1058 tree number_of_iterations
= NULL
;
1059 loop_vec_info inner_loop_vinfo
= NULL
;
1061 if (dump_enabled_p ())
1062 dump_printf_loc (MSG_NOTE
, vect_location
,
1063 "=== vect_analyze_loop_form ===\n");
1065 /* Different restrictions apply when we are considering an inner-most loop,
1066 vs. an outer (nested) loop.
1067 (FORNOW. May want to relax some of these restrictions in the future). */
1071 /* Inner-most loop. We currently require that the number of BBs is
1072 exactly 2 (the header and latch). Vectorizable inner-most loops
1083 if (loop
->num_nodes
!= 2)
1085 if (dump_enabled_p ())
1086 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1087 "not vectorized: control flow in loop.\n");
1091 if (empty_block_p (loop
->header
))
1093 if (dump_enabled_p ())
1094 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1095 "not vectorized: empty loop.\n");
1101 struct loop
*innerloop
= loop
->inner
;
1104 /* Nested loop. We currently require that the loop is doubly-nested,
1105 contains a single inner loop, and the number of BBs is exactly 5.
1106 Vectorizable outer-loops look like this:
1118 The inner-loop has the properties expected of inner-most loops
1119 as described above. */
1121 if ((loop
->inner
)->inner
|| (loop
->inner
)->next
)
1123 if (dump_enabled_p ())
1124 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1125 "not vectorized: multiple nested loops.\n");
1129 /* Analyze the inner-loop. */
1130 inner_loop_vinfo
= vect_analyze_loop_1 (loop
->inner
);
1131 if (!inner_loop_vinfo
)
1133 if (dump_enabled_p ())
1134 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1135 "not vectorized: Bad inner loop.\n");
1139 if (!expr_invariant_in_loop_p (loop
,
1140 LOOP_VINFO_NITERS (inner_loop_vinfo
)))
1142 if (dump_enabled_p ())
1143 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1144 "not vectorized: inner-loop count not"
1146 destroy_loop_vec_info (inner_loop_vinfo
, true);
1150 if (loop
->num_nodes
!= 5)
1152 if (dump_enabled_p ())
1153 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1154 "not vectorized: control flow in loop.\n");
1155 destroy_loop_vec_info (inner_loop_vinfo
, true);
1159 gcc_assert (EDGE_COUNT (innerloop
->header
->preds
) == 2);
1160 entryedge
= EDGE_PRED (innerloop
->header
, 0);
1161 if (EDGE_PRED (innerloop
->header
, 0)->src
== innerloop
->latch
)
1162 entryedge
= EDGE_PRED (innerloop
->header
, 1);
1164 if (entryedge
->src
!= loop
->header
1165 || !single_exit (innerloop
)
1166 || single_exit (innerloop
)->dest
!= EDGE_PRED (loop
->latch
, 0)->src
)
1168 if (dump_enabled_p ())
1169 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1170 "not vectorized: unsupported outerloop form.\n");
1171 destroy_loop_vec_info (inner_loop_vinfo
, true);
1175 if (dump_enabled_p ())
1176 dump_printf_loc (MSG_NOTE
, vect_location
,
1177 "Considering outer-loop vectorization.\n");
1180 if (!single_exit (loop
)
1181 || EDGE_COUNT (loop
->header
->preds
) != 2)
1183 if (dump_enabled_p ())
1185 if (!single_exit (loop
))
1186 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1187 "not vectorized: multiple exits.\n");
1188 else if (EDGE_COUNT (loop
->header
->preds
) != 2)
1189 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1190 "not vectorized: too many incoming edges.\n");
1192 if (inner_loop_vinfo
)
1193 destroy_loop_vec_info (inner_loop_vinfo
, true);
1197 /* We assume that the loop exit condition is at the end of the loop. i.e,
1198 that the loop is represented as a do-while (with a proper if-guard
1199 before the loop if needed), where the loop header contains all the
1200 executable statements, and the latch is empty. */
1201 if (!empty_block_p (loop
->latch
)
1202 || !gimple_seq_empty_p (phi_nodes (loop
->latch
)))
1204 if (dump_enabled_p ())
1205 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1206 "not vectorized: latch block not empty.\n");
1207 if (inner_loop_vinfo
)
1208 destroy_loop_vec_info (inner_loop_vinfo
, true);
1212 /* Make sure there exists a single-predecessor exit bb: */
1213 if (!single_pred_p (single_exit (loop
)->dest
))
1215 edge e
= single_exit (loop
);
1216 if (!(e
->flags
& EDGE_ABNORMAL
))
1218 split_loop_exit_edge (e
);
1219 if (dump_enabled_p ())
1220 dump_printf (MSG_NOTE
, "split exit edge.\n");
1224 if (dump_enabled_p ())
1225 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1226 "not vectorized: abnormal loop exit edge.\n");
1227 if (inner_loop_vinfo
)
1228 destroy_loop_vec_info (inner_loop_vinfo
, true);
1233 loop_cond
= vect_get_loop_niters (loop
, &number_of_iterations
);
1236 if (dump_enabled_p ())
1237 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1238 "not vectorized: complicated exit condition.\n");
1239 if (inner_loop_vinfo
)
1240 destroy_loop_vec_info (inner_loop_vinfo
, true);
1244 if (!number_of_iterations
1245 || chrec_contains_undetermined (number_of_iterations
))
1247 if (dump_enabled_p ())
1248 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1249 "not vectorized: number of iterations cannot be "
1251 if (inner_loop_vinfo
)
1252 destroy_loop_vec_info (inner_loop_vinfo
, true);
1256 if (integer_zerop (number_of_iterations
))
1258 if (dump_enabled_p ())
1259 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1260 "not vectorized: number of iterations = 0.\n");
1261 if (inner_loop_vinfo
)
1262 destroy_loop_vec_info (inner_loop_vinfo
, true);
1266 loop_vinfo
= new_loop_vec_info (loop
);
1267 LOOP_VINFO_NITERS (loop_vinfo
) = number_of_iterations
;
1268 LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo
) = number_of_iterations
;
1270 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
))
1272 if (dump_enabled_p ())
1274 dump_printf_loc (MSG_NOTE
, vect_location
,
1275 "Symbolic number of iterations is ");
1276 dump_generic_expr (MSG_NOTE
, TDF_DETAILS
, number_of_iterations
);
1277 dump_printf (MSG_NOTE
, "\n");
1281 STMT_VINFO_TYPE (vinfo_for_stmt (loop_cond
)) = loop_exit_ctrl_vec_info_type
;
1283 /* CHECKME: May want to keep it around it in the future. */
1284 if (inner_loop_vinfo
)
1285 destroy_loop_vec_info (inner_loop_vinfo
, false);
1287 gcc_assert (!loop
->aux
);
1288 loop
->aux
= loop_vinfo
;
1293 /* Function vect_analyze_loop_operations.
1295 Scan the loop stmts and make sure they are all vectorizable. */
1298 vect_analyze_loop_operations (loop_vec_info loop_vinfo
, bool slp
)
1300 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1301 basic_block
*bbs
= LOOP_VINFO_BBS (loop_vinfo
);
1302 int nbbs
= loop
->num_nodes
;
1303 gimple_stmt_iterator si
;
1304 unsigned int vectorization_factor
= 0;
1307 stmt_vec_info stmt_info
;
1308 bool need_to_vectorize
= false;
1309 int min_profitable_iters
;
1310 int min_scalar_loop_bound
;
1312 bool only_slp_in_loop
= true, ok
;
1313 HOST_WIDE_INT max_niter
;
1314 HOST_WIDE_INT estimated_niter
;
1315 int min_profitable_estimate
;
1317 if (dump_enabled_p ())
1318 dump_printf_loc (MSG_NOTE
, vect_location
,
1319 "=== vect_analyze_loop_operations ===\n");
1321 gcc_assert (LOOP_VINFO_VECT_FACTOR (loop_vinfo
));
1322 vectorization_factor
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
1325 /* If all the stmts in the loop can be SLPed, we perform only SLP, and
1326 vectorization factor of the loop is the unrolling factor required by
1327 the SLP instances. If that unrolling factor is 1, we say, that we
1328 perform pure SLP on loop - cross iteration parallelism is not
1330 for (i
= 0; i
< nbbs
; i
++)
1332 basic_block bb
= bbs
[i
];
1333 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
1335 gimple stmt
= gsi_stmt (si
);
1336 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1337 gcc_assert (stmt_info
);
1338 if ((STMT_VINFO_RELEVANT_P (stmt_info
)
1339 || VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info
)))
1340 && !PURE_SLP_STMT (stmt_info
))
1341 /* STMT needs both SLP and loop-based vectorization. */
1342 only_slp_in_loop
= false;
1346 if (only_slp_in_loop
)
1347 vectorization_factor
= LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo
);
1349 vectorization_factor
= least_common_multiple (vectorization_factor
,
1350 LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo
));
1352 LOOP_VINFO_VECT_FACTOR (loop_vinfo
) = vectorization_factor
;
1353 if (dump_enabled_p ())
1354 dump_printf_loc (MSG_NOTE
, vect_location
,
1355 "Updating vectorization factor to %d\n",
1356 vectorization_factor
);
1359 for (i
= 0; i
< nbbs
; i
++)
1361 basic_block bb
= bbs
[i
];
1363 for (si
= gsi_start_phis (bb
); !gsi_end_p (si
); gsi_next (&si
))
1365 phi
= gsi_stmt (si
);
1368 stmt_info
= vinfo_for_stmt (phi
);
1369 if (dump_enabled_p ())
1371 dump_printf_loc (MSG_NOTE
, vect_location
, "examining phi: ");
1372 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, phi
, 0);
1373 dump_printf (MSG_NOTE
, "\n");
1376 /* Inner-loop loop-closed exit phi in outer-loop vectorization
1377 (i.e., a phi in the tail of the outer-loop). */
1378 if (! is_loop_header_bb_p (bb
))
1380 /* FORNOW: we currently don't support the case that these phis
1381 are not used in the outerloop (unless it is double reduction,
1382 i.e., this phi is vect_reduction_def), cause this case
1383 requires to actually do something here. */
1384 if ((!STMT_VINFO_RELEVANT_P (stmt_info
)
1385 || STMT_VINFO_LIVE_P (stmt_info
))
1386 && STMT_VINFO_DEF_TYPE (stmt_info
)
1387 != vect_double_reduction_def
)
1389 if (dump_enabled_p ())
1390 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1391 "Unsupported loop-closed phi in "
1396 /* If PHI is used in the outer loop, we check that its operand
1397 is defined in the inner loop. */
1398 if (STMT_VINFO_RELEVANT_P (stmt_info
))
1403 if (gimple_phi_num_args (phi
) != 1)
1406 phi_op
= PHI_ARG_DEF (phi
, 0);
1407 if (TREE_CODE (phi_op
) != SSA_NAME
)
1410 op_def_stmt
= SSA_NAME_DEF_STMT (phi_op
);
1411 if (gimple_nop_p (op_def_stmt
)
1412 || !flow_bb_inside_loop_p (loop
, gimple_bb (op_def_stmt
))
1413 || !vinfo_for_stmt (op_def_stmt
))
1416 if (STMT_VINFO_RELEVANT (vinfo_for_stmt (op_def_stmt
))
1417 != vect_used_in_outer
1418 && STMT_VINFO_RELEVANT (vinfo_for_stmt (op_def_stmt
))
1419 != vect_used_in_outer_by_reduction
)
1426 gcc_assert (stmt_info
);
1428 if (STMT_VINFO_LIVE_P (stmt_info
))
1430 /* FORNOW: not yet supported. */
1431 if (dump_enabled_p ())
1432 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1433 "not vectorized: value used after loop.\n");
1437 if (STMT_VINFO_RELEVANT (stmt_info
) == vect_used_in_scope
1438 && STMT_VINFO_DEF_TYPE (stmt_info
) != vect_induction_def
)
1440 /* A scalar-dependence cycle that we don't support. */
1441 if (dump_enabled_p ())
1442 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1443 "not vectorized: scalar dependence cycle.\n");
1447 if (STMT_VINFO_RELEVANT_P (stmt_info
))
1449 need_to_vectorize
= true;
1450 if (STMT_VINFO_DEF_TYPE (stmt_info
) == vect_induction_def
)
1451 ok
= vectorizable_induction (phi
, NULL
, NULL
);
1456 if (dump_enabled_p ())
1458 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1459 "not vectorized: relevant phi not "
1461 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, phi
, 0);
1462 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
1468 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
1470 gimple stmt
= gsi_stmt (si
);
1471 if (!gimple_clobber_p (stmt
)
1472 && !vect_analyze_stmt (stmt
, &need_to_vectorize
, NULL
))
1477 /* All operations in the loop are either irrelevant (deal with loop
1478 control, or dead), or only used outside the loop and can be moved
1479 out of the loop (e.g. invariants, inductions). The loop can be
1480 optimized away by scalar optimizations. We're better off not
1481 touching this loop. */
1482 if (!need_to_vectorize
)
1484 if (dump_enabled_p ())
1485 dump_printf_loc (MSG_NOTE
, vect_location
,
1486 "All the computation can be taken out of the loop.\n");
1487 if (dump_enabled_p ())
1488 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1489 "not vectorized: redundant loop. no profit to "
1494 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
) && dump_enabled_p ())
1495 dump_printf_loc (MSG_NOTE
, vect_location
,
1496 "vectorization_factor = %d, niters = "
1497 HOST_WIDE_INT_PRINT_DEC
"\n", vectorization_factor
,
1498 LOOP_VINFO_INT_NITERS (loop_vinfo
));
1500 if ((LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
)
1501 && (LOOP_VINFO_INT_NITERS (loop_vinfo
) < vectorization_factor
))
1502 || ((max_niter
= max_stmt_executions_int (loop
)) != -1
1503 && (unsigned HOST_WIDE_INT
) max_niter
< vectorization_factor
))
1505 if (dump_enabled_p ())
1506 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1507 "not vectorized: iteration count too small.\n");
1508 if (dump_enabled_p ())
1509 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1510 "not vectorized: iteration count smaller than "
1511 "vectorization factor.\n");
1515 /* Analyze cost. Decide if worth while to vectorize. */
1517 /* Once VF is set, SLP costs should be updated since the number of created
1518 vector stmts depends on VF. */
1519 vect_update_slp_costs_according_to_vf (loop_vinfo
);
1521 vect_estimate_min_profitable_iters (loop_vinfo
, &min_profitable_iters
,
1522 &min_profitable_estimate
);
1523 LOOP_VINFO_COST_MODEL_MIN_ITERS (loop_vinfo
) = min_profitable_iters
;
1525 if (min_profitable_iters
< 0)
1527 if (dump_enabled_p ())
1528 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1529 "not vectorized: vectorization not profitable.\n");
1530 if (dump_enabled_p ())
1531 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1532 "not vectorized: vector version will never be "
1537 min_scalar_loop_bound
= ((PARAM_VALUE (PARAM_MIN_VECT_LOOP_BOUND
)
1538 * vectorization_factor
) - 1);
1541 /* Use the cost model only if it is more conservative than user specified
1544 th
= (unsigned) min_scalar_loop_bound
;
1545 if (min_profitable_iters
1546 && (!min_scalar_loop_bound
1547 || min_profitable_iters
> min_scalar_loop_bound
))
1548 th
= (unsigned) min_profitable_iters
;
1550 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
)
1551 && LOOP_VINFO_INT_NITERS (loop_vinfo
) <= th
)
1553 if (dump_enabled_p ())
1554 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1555 "not vectorized: vectorization not profitable.\n");
1556 if (dump_enabled_p ())
1557 dump_printf_loc (MSG_NOTE
, vect_location
,
1558 "not vectorized: iteration count smaller than user "
1559 "specified loop bound parameter or minimum profitable "
1560 "iterations (whichever is more conservative).\n");
1564 if ((estimated_niter
= estimated_stmt_executions_int (loop
)) != -1
1565 && ((unsigned HOST_WIDE_INT
) estimated_niter
1566 <= MAX (th
, (unsigned)min_profitable_estimate
)))
1568 if (dump_enabled_p ())
1569 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1570 "not vectorized: estimated iteration count too "
1572 if (dump_enabled_p ())
1573 dump_printf_loc (MSG_NOTE
, vect_location
,
1574 "not vectorized: estimated iteration count smaller "
1575 "than specified loop bound parameter or minimum "
1576 "profitable iterations (whichever is more "
1577 "conservative).\n");
1585 /* Function vect_analyze_loop_2.
1587 Apply a set of analyses on LOOP, and create a loop_vec_info struct
1588 for it. The different analyses will record information in the
1589 loop_vec_info struct. */
1591 vect_analyze_loop_2 (loop_vec_info loop_vinfo
)
1593 bool ok
, slp
= false;
1594 int max_vf
= MAX_VECTORIZATION_FACTOR
;
1597 /* Find all data references in the loop (which correspond to vdefs/vuses)
1598 and analyze their evolution in the loop. Also adjust the minimal
1599 vectorization factor according to the loads and stores.
1601 FORNOW: Handle only simple, array references, which
1602 alignment can be forced, and aligned pointer-references. */
1604 ok
= vect_analyze_data_refs (loop_vinfo
, NULL
, &min_vf
);
1607 if (dump_enabled_p ())
1608 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1609 "bad data references.\n");
1613 /* Analyze the access patterns of the data-refs in the loop (consecutive,
1614 complex, etc.). FORNOW: Only handle consecutive access pattern. */
1616 ok
= vect_analyze_data_ref_accesses (loop_vinfo
, NULL
);
1619 if (dump_enabled_p ())
1620 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1621 "bad data access.\n");
1625 /* Classify all cross-iteration scalar data-flow cycles.
1626 Cross-iteration cycles caused by virtual phis are analyzed separately. */
1628 vect_analyze_scalar_cycles (loop_vinfo
);
1630 vect_pattern_recog (loop_vinfo
, NULL
);
1632 /* Data-flow analysis to detect stmts that do not need to be vectorized. */
1634 ok
= vect_mark_stmts_to_be_vectorized (loop_vinfo
);
1637 if (dump_enabled_p ())
1638 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1639 "unexpected pattern.\n");
1643 /* Analyze data dependences between the data-refs in the loop
1644 and adjust the maximum vectorization factor according to
1646 FORNOW: fail at the first data dependence that we encounter. */
1648 ok
= vect_analyze_data_ref_dependences (loop_vinfo
, &max_vf
);
1652 if (dump_enabled_p ())
1653 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1654 "bad data dependence.\n");
1658 ok
= vect_determine_vectorization_factor (loop_vinfo
);
1661 if (dump_enabled_p ())
1662 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1663 "can't determine vectorization factor.\n");
1666 if (max_vf
< LOOP_VINFO_VECT_FACTOR (loop_vinfo
))
1668 if (dump_enabled_p ())
1669 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1670 "bad data dependence.\n");
1674 /* Analyze the alignment of the data-refs in the loop.
1675 Fail if a data reference is found that cannot be vectorized. */
1677 ok
= vect_analyze_data_refs_alignment (loop_vinfo
, NULL
);
1680 if (dump_enabled_p ())
1681 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1682 "bad data alignment.\n");
1686 /* Prune the list of ddrs to be tested at run-time by versioning for alias.
1687 It is important to call pruning after vect_analyze_data_ref_accesses,
1688 since we use grouping information gathered by interleaving analysis. */
1689 ok
= vect_prune_runtime_alias_test_list (loop_vinfo
);
1692 if (dump_enabled_p ())
1693 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1694 "too long list of versioning for alias "
1695 "run-time tests.\n");
1699 /* This pass will decide on using loop versioning and/or loop peeling in
1700 order to enhance the alignment of data references in the loop. */
1702 ok
= vect_enhance_data_refs_alignment (loop_vinfo
);
1705 if (dump_enabled_p ())
1706 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1707 "bad data alignment.\n");
1711 /* Check the SLP opportunities in the loop, analyze and build SLP trees. */
1712 ok
= vect_analyze_slp (loop_vinfo
, NULL
);
1715 /* Decide which possible SLP instances to SLP. */
1716 slp
= vect_make_slp_decision (loop_vinfo
);
1718 /* Find stmts that need to be both vectorized and SLPed. */
1719 vect_detect_hybrid_slp (loop_vinfo
);
1724 /* Scan all the operations in the loop and make sure they are
1727 ok
= vect_analyze_loop_operations (loop_vinfo
, slp
);
1730 if (dump_enabled_p ())
1731 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1732 "bad operation or unsupported loop bound.\n");
1736 /* Decide whether we need to create an epilogue loop to handle
1737 remaining scalar iterations. */
1738 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
)
1739 && LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo
) > 0)
1741 if (ctz_hwi (LOOP_VINFO_INT_NITERS (loop_vinfo
)
1742 - LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo
))
1743 < exact_log2 (LOOP_VINFO_VECT_FACTOR (loop_vinfo
)))
1744 LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo
) = true;
1746 else if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo
)
1747 || (tree_ctz (LOOP_VINFO_NITERS (loop_vinfo
))
1748 < (unsigned)exact_log2 (LOOP_VINFO_VECT_FACTOR (loop_vinfo
))))
1749 LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo
) = true;
1751 /* If an epilogue loop is required make sure we can create one. */
1752 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo
)
1753 || LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo
))
1755 if (dump_enabled_p ())
1756 dump_printf_loc (MSG_NOTE
, vect_location
, "epilog loop required\n");
1757 if (!vect_can_advance_ivs_p (loop_vinfo
)
1758 || !slpeel_can_duplicate_loop_p (LOOP_VINFO_LOOP (loop_vinfo
),
1759 single_exit (LOOP_VINFO_LOOP
1762 if (dump_enabled_p ())
1763 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1764 "not vectorized: can't create required "
1773 /* Function vect_analyze_loop.
1775 Apply a set of analyses on LOOP, and create a loop_vec_info struct
1776 for it. The different analyses will record information in the
1777 loop_vec_info struct. */
1779 vect_analyze_loop (struct loop
*loop
)
1781 loop_vec_info loop_vinfo
;
1782 unsigned int vector_sizes
;
1784 /* Autodetect first vector size we try. */
1785 current_vector_size
= 0;
1786 vector_sizes
= targetm
.vectorize
.autovectorize_vector_sizes ();
1788 if (dump_enabled_p ())
1789 dump_printf_loc (MSG_NOTE
, vect_location
,
1790 "===== analyze_loop_nest =====\n");
1792 if (loop_outer (loop
)
1793 && loop_vec_info_for_loop (loop_outer (loop
))
1794 && LOOP_VINFO_VECTORIZABLE_P (loop_vec_info_for_loop (loop_outer (loop
))))
1796 if (dump_enabled_p ())
1797 dump_printf_loc (MSG_NOTE
, vect_location
,
1798 "outer-loop already vectorized.\n");
1804 /* Check the CFG characteristics of the loop (nesting, entry/exit). */
1805 loop_vinfo
= vect_analyze_loop_form (loop
);
1808 if (dump_enabled_p ())
1809 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1810 "bad loop form.\n");
1814 if (vect_analyze_loop_2 (loop_vinfo
))
1816 LOOP_VINFO_VECTORIZABLE_P (loop_vinfo
) = 1;
1821 destroy_loop_vec_info (loop_vinfo
, true);
1823 vector_sizes
&= ~current_vector_size
;
1824 if (vector_sizes
== 0
1825 || current_vector_size
== 0)
1828 /* Try the next biggest vector size. */
1829 current_vector_size
= 1 << floor_log2 (vector_sizes
);
1830 if (dump_enabled_p ())
1831 dump_printf_loc (MSG_NOTE
, vect_location
,
1832 "***** Re-trying analysis with "
1833 "vector size %d\n", current_vector_size
);
1838 /* Function reduction_code_for_scalar_code
1841 CODE - tree_code of a reduction operations.
1844 REDUC_CODE - the corresponding tree-code to be used to reduce the
1845 vector of partial results into a single scalar result (which
1846 will also reside in a vector) or ERROR_MARK if the operation is
1847 a supported reduction operation, but does not have such tree-code.
1849 Return FALSE if CODE currently cannot be vectorized as reduction. */
1852 reduction_code_for_scalar_code (enum tree_code code
,
1853 enum tree_code
*reduc_code
)
1858 *reduc_code
= REDUC_MAX_EXPR
;
1862 *reduc_code
= REDUC_MIN_EXPR
;
1866 *reduc_code
= REDUC_PLUS_EXPR
;
1874 *reduc_code
= ERROR_MARK
;
1883 /* Error reporting helper for vect_is_simple_reduction below. GIMPLE statement
1884 STMT is printed with a message MSG. */
1887 report_vect_op (int msg_type
, gimple stmt
, const char *msg
)
1889 dump_printf_loc (msg_type
, vect_location
, "%s", msg
);
1890 dump_gimple_stmt (msg_type
, TDF_SLIM
, stmt
, 0);
1891 dump_printf (msg_type
, "\n");
1895 /* Detect SLP reduction of the form:
1905 PHI is the reduction phi node (#a1 = phi <a5, a0> above)
1906 FIRST_STMT is the first reduction stmt in the chain
1907 (a2 = operation (a1)).
1909 Return TRUE if a reduction chain was detected. */
1912 vect_is_slp_reduction (loop_vec_info loop_info
, gimple phi
, gimple first_stmt
)
1914 struct loop
*loop
= (gimple_bb (phi
))->loop_father
;
1915 struct loop
*vect_loop
= LOOP_VINFO_LOOP (loop_info
);
1916 enum tree_code code
;
1917 gimple current_stmt
= NULL
, loop_use_stmt
= NULL
, first
, next_stmt
;
1918 stmt_vec_info use_stmt_info
, current_stmt_info
;
1920 imm_use_iterator imm_iter
;
1921 use_operand_p use_p
;
1922 int nloop_uses
, size
= 0, n_out_of_loop_uses
;
1925 if (loop
!= vect_loop
)
1928 lhs
= PHI_RESULT (phi
);
1929 code
= gimple_assign_rhs_code (first_stmt
);
1933 n_out_of_loop_uses
= 0;
1934 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, lhs
)
1936 gimple use_stmt
= USE_STMT (use_p
);
1937 if (is_gimple_debug (use_stmt
))
1940 use_stmt
= USE_STMT (use_p
);
1942 /* Check if we got back to the reduction phi. */
1943 if (use_stmt
== phi
)
1945 loop_use_stmt
= use_stmt
;
1950 if (flow_bb_inside_loop_p (loop
, gimple_bb (use_stmt
)))
1952 if (vinfo_for_stmt (use_stmt
)
1953 && !STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (use_stmt
)))
1955 loop_use_stmt
= use_stmt
;
1960 n_out_of_loop_uses
++;
1962 /* There are can be either a single use in the loop or two uses in
1964 if (nloop_uses
> 1 || (n_out_of_loop_uses
&& nloop_uses
))
1971 /* We reached a statement with no loop uses. */
1972 if (nloop_uses
== 0)
1975 /* This is a loop exit phi, and we haven't reached the reduction phi. */
1976 if (gimple_code (loop_use_stmt
) == GIMPLE_PHI
)
1979 if (!is_gimple_assign (loop_use_stmt
)
1980 || code
!= gimple_assign_rhs_code (loop_use_stmt
)
1981 || !flow_bb_inside_loop_p (loop
, gimple_bb (loop_use_stmt
)))
1984 /* Insert USE_STMT into reduction chain. */
1985 use_stmt_info
= vinfo_for_stmt (loop_use_stmt
);
1988 current_stmt_info
= vinfo_for_stmt (current_stmt
);
1989 GROUP_NEXT_ELEMENT (current_stmt_info
) = loop_use_stmt
;
1990 GROUP_FIRST_ELEMENT (use_stmt_info
)
1991 = GROUP_FIRST_ELEMENT (current_stmt_info
);
1994 GROUP_FIRST_ELEMENT (use_stmt_info
) = loop_use_stmt
;
1996 lhs
= gimple_assign_lhs (loop_use_stmt
);
1997 current_stmt
= loop_use_stmt
;
2001 if (!found
|| loop_use_stmt
!= phi
|| size
< 2)
2004 /* Swap the operands, if needed, to make the reduction operand be the second
2006 lhs
= PHI_RESULT (phi
);
2007 next_stmt
= GROUP_FIRST_ELEMENT (vinfo_for_stmt (current_stmt
));
2010 if (gimple_assign_rhs2 (next_stmt
) == lhs
)
2012 tree op
= gimple_assign_rhs1 (next_stmt
);
2013 gimple def_stmt
= NULL
;
2015 if (TREE_CODE (op
) == SSA_NAME
)
2016 def_stmt
= SSA_NAME_DEF_STMT (op
);
2018 /* Check that the other def is either defined in the loop
2019 ("vect_internal_def"), or it's an induction (defined by a
2020 loop-header phi-node). */
2022 && gimple_bb (def_stmt
)
2023 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt
))
2024 && (is_gimple_assign (def_stmt
)
2025 || is_gimple_call (def_stmt
)
2026 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt
))
2027 == vect_induction_def
2028 || (gimple_code (def_stmt
) == GIMPLE_PHI
2029 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt
))
2030 == vect_internal_def
2031 && !is_loop_header_bb_p (gimple_bb (def_stmt
)))))
2033 lhs
= gimple_assign_lhs (next_stmt
);
2034 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
2042 tree op
= gimple_assign_rhs2 (next_stmt
);
2043 gimple def_stmt
= NULL
;
2045 if (TREE_CODE (op
) == SSA_NAME
)
2046 def_stmt
= SSA_NAME_DEF_STMT (op
);
2048 /* Check that the other def is either defined in the loop
2049 ("vect_internal_def"), or it's an induction (defined by a
2050 loop-header phi-node). */
2052 && gimple_bb (def_stmt
)
2053 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt
))
2054 && (is_gimple_assign (def_stmt
)
2055 || is_gimple_call (def_stmt
)
2056 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt
))
2057 == vect_induction_def
2058 || (gimple_code (def_stmt
) == GIMPLE_PHI
2059 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt
))
2060 == vect_internal_def
2061 && !is_loop_header_bb_p (gimple_bb (def_stmt
)))))
2063 if (dump_enabled_p ())
2065 dump_printf_loc (MSG_NOTE
, vect_location
, "swapping oprnds: ");
2066 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, next_stmt
, 0);
2067 dump_printf (MSG_NOTE
, "\n");
2070 swap_ssa_operands (next_stmt
,
2071 gimple_assign_rhs1_ptr (next_stmt
),
2072 gimple_assign_rhs2_ptr (next_stmt
));
2073 update_stmt (next_stmt
);
2075 if (CONSTANT_CLASS_P (gimple_assign_rhs1 (next_stmt
)))
2076 LOOP_VINFO_OPERANDS_SWAPPED (loop_info
) = true;
2082 lhs
= gimple_assign_lhs (next_stmt
);
2083 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
2086 /* Save the chain for further analysis in SLP detection. */
2087 first
= GROUP_FIRST_ELEMENT (vinfo_for_stmt (current_stmt
));
2088 LOOP_VINFO_REDUCTION_CHAINS (loop_info
).safe_push (first
);
2089 GROUP_SIZE (vinfo_for_stmt (first
)) = size
;
2095 /* Function vect_is_simple_reduction_1
2097 (1) Detect a cross-iteration def-use cycle that represents a simple
2098 reduction computation. We look for the following pattern:
2103 a2 = operation (a3, a1)
2110 a2 = operation (a3, a1)
2113 1. operation is commutative and associative and it is safe to
2114 change the order of the computation (if CHECK_REDUCTION is true)
2115 2. no uses for a2 in the loop (a2 is used out of the loop)
2116 3. no uses of a1 in the loop besides the reduction operation
2117 4. no uses of a1 outside the loop.
2119 Conditions 1,4 are tested here.
2120 Conditions 2,3 are tested in vect_mark_stmts_to_be_vectorized.
2122 (2) Detect a cross-iteration def-use cycle in nested loops, i.e.,
2123 nested cycles, if CHECK_REDUCTION is false.
2125 (3) Detect cycles of phi nodes in outer-loop vectorization, i.e., double
2129 inner loop (def of a3)
2132 If MODIFY is true it tries also to rework the code in-place to enable
2133 detection of more reduction patterns. For the time being we rewrite
2134 "res -= RHS" into "rhs += -RHS" when it seems worthwhile.
2138 vect_is_simple_reduction_1 (loop_vec_info loop_info
, gimple phi
,
2139 bool check_reduction
, bool *double_reduc
,
2142 struct loop
*loop
= (gimple_bb (phi
))->loop_father
;
2143 struct loop
*vect_loop
= LOOP_VINFO_LOOP (loop_info
);
2144 edge latch_e
= loop_latch_edge (loop
);
2145 tree loop_arg
= PHI_ARG_DEF_FROM_EDGE (phi
, latch_e
);
2146 gimple def_stmt
, def1
= NULL
, def2
= NULL
;
2147 enum tree_code orig_code
, code
;
2148 tree op1
, op2
, op3
= NULL_TREE
, op4
= NULL_TREE
;
2152 imm_use_iterator imm_iter
;
2153 use_operand_p use_p
;
2156 *double_reduc
= false;
2158 /* If CHECK_REDUCTION is true, we assume inner-most loop vectorization,
2159 otherwise, we assume outer loop vectorization. */
2160 gcc_assert ((check_reduction
&& loop
== vect_loop
)
2161 || (!check_reduction
&& flow_loop_nested_p (vect_loop
, loop
)));
2163 name
= PHI_RESULT (phi
);
2165 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, name
)
2167 gimple use_stmt
= USE_STMT (use_p
);
2168 if (is_gimple_debug (use_stmt
))
2171 if (!flow_bb_inside_loop_p (loop
, gimple_bb (use_stmt
)))
2173 if (dump_enabled_p ())
2174 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2175 "intermediate value used outside loop.\n");
2180 if (vinfo_for_stmt (use_stmt
)
2181 && !is_pattern_stmt_p (vinfo_for_stmt (use_stmt
)))
2185 if (dump_enabled_p ())
2186 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2187 "reduction used in loop.\n");
2192 if (TREE_CODE (loop_arg
) != SSA_NAME
)
2194 if (dump_enabled_p ())
2196 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2197 "reduction: not ssa_name: ");
2198 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, loop_arg
);
2199 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
2204 def_stmt
= SSA_NAME_DEF_STMT (loop_arg
);
2207 if (dump_enabled_p ())
2208 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2209 "reduction: no def_stmt.\n");
2213 if (!is_gimple_assign (def_stmt
) && gimple_code (def_stmt
) != GIMPLE_PHI
)
2215 if (dump_enabled_p ())
2217 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, def_stmt
, 0);
2218 dump_printf (MSG_NOTE
, "\n");
2223 if (is_gimple_assign (def_stmt
))
2225 name
= gimple_assign_lhs (def_stmt
);
2230 name
= PHI_RESULT (def_stmt
);
2235 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, name
)
2237 gimple use_stmt
= USE_STMT (use_p
);
2238 if (is_gimple_debug (use_stmt
))
2240 if (flow_bb_inside_loop_p (loop
, gimple_bb (use_stmt
))
2241 && vinfo_for_stmt (use_stmt
)
2242 && !is_pattern_stmt_p (vinfo_for_stmt (use_stmt
)))
2246 if (dump_enabled_p ())
2247 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2248 "reduction used in loop.\n");
2253 /* If DEF_STMT is a phi node itself, we expect it to have a single argument
2254 defined in the inner loop. */
2257 op1
= PHI_ARG_DEF (def_stmt
, 0);
2259 if (gimple_phi_num_args (def_stmt
) != 1
2260 || TREE_CODE (op1
) != SSA_NAME
)
2262 if (dump_enabled_p ())
2263 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2264 "unsupported phi node definition.\n");
2269 def1
= SSA_NAME_DEF_STMT (op1
);
2270 if (flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt
))
2272 && flow_bb_inside_loop_p (loop
->inner
, gimple_bb (def1
))
2273 && is_gimple_assign (def1
))
2275 if (dump_enabled_p ())
2276 report_vect_op (MSG_NOTE
, def_stmt
,
2277 "detected double reduction: ");
2279 *double_reduc
= true;
2286 code
= orig_code
= gimple_assign_rhs_code (def_stmt
);
2288 /* We can handle "res -= x[i]", which is non-associative by
2289 simply rewriting this into "res += -x[i]". Avoid changing
2290 gimple instruction for the first simple tests and only do this
2291 if we're allowed to change code at all. */
2292 if (code
== MINUS_EXPR
2294 && (op1
= gimple_assign_rhs1 (def_stmt
))
2295 && TREE_CODE (op1
) == SSA_NAME
2296 && SSA_NAME_DEF_STMT (op1
) == phi
)
2300 && (!commutative_tree_code (code
) || !associative_tree_code (code
)))
2302 if (dump_enabled_p ())
2303 report_vect_op (MSG_MISSED_OPTIMIZATION
, def_stmt
,
2304 "reduction: not commutative/associative: ");
2308 if (get_gimple_rhs_class (code
) != GIMPLE_BINARY_RHS
)
2310 if (code
!= COND_EXPR
)
2312 if (dump_enabled_p ())
2313 report_vect_op (MSG_MISSED_OPTIMIZATION
, def_stmt
,
2314 "reduction: not binary operation: ");
2319 op3
= gimple_assign_rhs1 (def_stmt
);
2320 if (COMPARISON_CLASS_P (op3
))
2322 op4
= TREE_OPERAND (op3
, 1);
2323 op3
= TREE_OPERAND (op3
, 0);
2326 op1
= gimple_assign_rhs2 (def_stmt
);
2327 op2
= gimple_assign_rhs3 (def_stmt
);
2329 if (TREE_CODE (op1
) != SSA_NAME
&& TREE_CODE (op2
) != SSA_NAME
)
2331 if (dump_enabled_p ())
2332 report_vect_op (MSG_MISSED_OPTIMIZATION
, def_stmt
,
2333 "reduction: uses not ssa_names: ");
2340 op1
= gimple_assign_rhs1 (def_stmt
);
2341 op2
= gimple_assign_rhs2 (def_stmt
);
2343 if (TREE_CODE (op1
) != SSA_NAME
&& TREE_CODE (op2
) != SSA_NAME
)
2345 if (dump_enabled_p ())
2346 report_vect_op (MSG_MISSED_OPTIMIZATION
, def_stmt
,
2347 "reduction: uses not ssa_names: ");
2353 type
= TREE_TYPE (gimple_assign_lhs (def_stmt
));
2354 if ((TREE_CODE (op1
) == SSA_NAME
2355 && !types_compatible_p (type
,TREE_TYPE (op1
)))
2356 || (TREE_CODE (op2
) == SSA_NAME
2357 && !types_compatible_p (type
, TREE_TYPE (op2
)))
2358 || (op3
&& TREE_CODE (op3
) == SSA_NAME
2359 && !types_compatible_p (type
, TREE_TYPE (op3
)))
2360 || (op4
&& TREE_CODE (op4
) == SSA_NAME
2361 && !types_compatible_p (type
, TREE_TYPE (op4
))))
2363 if (dump_enabled_p ())
2365 dump_printf_loc (MSG_NOTE
, vect_location
,
2366 "reduction: multiple types: operation type: ");
2367 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, type
);
2368 dump_printf (MSG_NOTE
, ", operands types: ");
2369 dump_generic_expr (MSG_NOTE
, TDF_SLIM
,
2371 dump_printf (MSG_NOTE
, ",");
2372 dump_generic_expr (MSG_NOTE
, TDF_SLIM
,
2376 dump_printf (MSG_NOTE
, ",");
2377 dump_generic_expr (MSG_NOTE
, TDF_SLIM
,
2383 dump_printf (MSG_NOTE
, ",");
2384 dump_generic_expr (MSG_NOTE
, TDF_SLIM
,
2387 dump_printf (MSG_NOTE
, "\n");
2393 /* Check that it's ok to change the order of the computation.
2394 Generally, when vectorizing a reduction we change the order of the
2395 computation. This may change the behavior of the program in some
2396 cases, so we need to check that this is ok. One exception is when
2397 vectorizing an outer-loop: the inner-loop is executed sequentially,
2398 and therefore vectorizing reductions in the inner-loop during
2399 outer-loop vectorization is safe. */
2401 /* CHECKME: check for !flag_finite_math_only too? */
2402 if (SCALAR_FLOAT_TYPE_P (type
) && !flag_associative_math
2405 /* Changing the order of operations changes the semantics. */
2406 if (dump_enabled_p ())
2407 report_vect_op (MSG_MISSED_OPTIMIZATION
, def_stmt
,
2408 "reduction: unsafe fp math optimization: ");
2411 else if (INTEGRAL_TYPE_P (type
) && TYPE_OVERFLOW_TRAPS (type
)
2414 /* Changing the order of operations changes the semantics. */
2415 if (dump_enabled_p ())
2416 report_vect_op (MSG_MISSED_OPTIMIZATION
, def_stmt
,
2417 "reduction: unsafe int math optimization: ");
2420 else if (SAT_FIXED_POINT_TYPE_P (type
) && check_reduction
)
2422 /* Changing the order of operations changes the semantics. */
2423 if (dump_enabled_p ())
2424 report_vect_op (MSG_MISSED_OPTIMIZATION
, def_stmt
,
2425 "reduction: unsafe fixed-point math optimization: ");
2429 /* If we detected "res -= x[i]" earlier, rewrite it into
2430 "res += -x[i]" now. If this turns out to be useless reassoc
2431 will clean it up again. */
2432 if (orig_code
== MINUS_EXPR
)
2434 tree rhs
= gimple_assign_rhs2 (def_stmt
);
2435 tree negrhs
= make_ssa_name (TREE_TYPE (rhs
), NULL
);
2436 gimple negate_stmt
= gimple_build_assign_with_ops (NEGATE_EXPR
, negrhs
,
2438 gimple_stmt_iterator gsi
= gsi_for_stmt (def_stmt
);
2439 set_vinfo_for_stmt (negate_stmt
, new_stmt_vec_info (negate_stmt
,
2441 gsi_insert_before (&gsi
, negate_stmt
, GSI_NEW_STMT
);
2442 gimple_assign_set_rhs2 (def_stmt
, negrhs
);
2443 gimple_assign_set_rhs_code (def_stmt
, PLUS_EXPR
);
2444 update_stmt (def_stmt
);
2447 /* Reduction is safe. We're dealing with one of the following:
2448 1) integer arithmetic and no trapv
2449 2) floating point arithmetic, and special flags permit this optimization
2450 3) nested cycle (i.e., outer loop vectorization). */
2451 if (TREE_CODE (op1
) == SSA_NAME
)
2452 def1
= SSA_NAME_DEF_STMT (op1
);
2454 if (TREE_CODE (op2
) == SSA_NAME
)
2455 def2
= SSA_NAME_DEF_STMT (op2
);
2457 if (code
!= COND_EXPR
2458 && ((!def1
|| gimple_nop_p (def1
)) && (!def2
|| gimple_nop_p (def2
))))
2460 if (dump_enabled_p ())
2461 report_vect_op (MSG_NOTE
, def_stmt
, "reduction: no defs for operands: ");
2465 /* Check that one def is the reduction def, defined by PHI,
2466 the other def is either defined in the loop ("vect_internal_def"),
2467 or it's an induction (defined by a loop-header phi-node). */
2469 if (def2
&& def2
== phi
2470 && (code
== COND_EXPR
2471 || !def1
|| gimple_nop_p (def1
)
2472 || !flow_bb_inside_loop_p (loop
, gimple_bb (def1
))
2473 || (def1
&& flow_bb_inside_loop_p (loop
, gimple_bb (def1
))
2474 && (is_gimple_assign (def1
)
2475 || is_gimple_call (def1
)
2476 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def1
))
2477 == vect_induction_def
2478 || (gimple_code (def1
) == GIMPLE_PHI
2479 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def1
))
2480 == vect_internal_def
2481 && !is_loop_header_bb_p (gimple_bb (def1
)))))))
2483 if (dump_enabled_p ())
2484 report_vect_op (MSG_NOTE
, def_stmt
, "detected reduction: ");
2488 if (def1
&& def1
== phi
2489 && (code
== COND_EXPR
2490 || !def2
|| gimple_nop_p (def2
)
2491 || !flow_bb_inside_loop_p (loop
, gimple_bb (def2
))
2492 || (def2
&& flow_bb_inside_loop_p (loop
, gimple_bb (def2
))
2493 && (is_gimple_assign (def2
)
2494 || is_gimple_call (def2
)
2495 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def2
))
2496 == vect_induction_def
2497 || (gimple_code (def2
) == GIMPLE_PHI
2498 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def2
))
2499 == vect_internal_def
2500 && !is_loop_header_bb_p (gimple_bb (def2
)))))))
2502 if (check_reduction
)
2504 /* Swap operands (just for simplicity - so that the rest of the code
2505 can assume that the reduction variable is always the last (second)
2507 if (dump_enabled_p ())
2508 report_vect_op (MSG_NOTE
, def_stmt
,
2509 "detected reduction: need to swap operands: ");
2511 swap_ssa_operands (def_stmt
, gimple_assign_rhs1_ptr (def_stmt
),
2512 gimple_assign_rhs2_ptr (def_stmt
));
2514 if (CONSTANT_CLASS_P (gimple_assign_rhs1 (def_stmt
)))
2515 LOOP_VINFO_OPERANDS_SWAPPED (loop_info
) = true;
2519 if (dump_enabled_p ())
2520 report_vect_op (MSG_NOTE
, def_stmt
, "detected reduction: ");
2526 /* Try to find SLP reduction chain. */
2527 if (check_reduction
&& vect_is_slp_reduction (loop_info
, phi
, def_stmt
))
2529 if (dump_enabled_p ())
2530 report_vect_op (MSG_NOTE
, def_stmt
,
2531 "reduction: detected reduction chain: ");
2536 if (dump_enabled_p ())
2537 report_vect_op (MSG_MISSED_OPTIMIZATION
, def_stmt
,
2538 "reduction: unknown pattern: ");
2543 /* Wrapper around vect_is_simple_reduction_1, that won't modify code
2544 in-place. Arguments as there. */
2547 vect_is_simple_reduction (loop_vec_info loop_info
, gimple phi
,
2548 bool check_reduction
, bool *double_reduc
)
2550 return vect_is_simple_reduction_1 (loop_info
, phi
, check_reduction
,
2551 double_reduc
, false);
2554 /* Wrapper around vect_is_simple_reduction_1, which will modify code
2555 in-place if it enables detection of more reductions. Arguments
2559 vect_force_simple_reduction (loop_vec_info loop_info
, gimple phi
,
2560 bool check_reduction
, bool *double_reduc
)
2562 return vect_is_simple_reduction_1 (loop_info
, phi
, check_reduction
,
2563 double_reduc
, true);
2566 /* Calculate the cost of one scalar iteration of the loop. */
2568 vect_get_single_scalar_iteration_cost (loop_vec_info loop_vinfo
)
2570 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
2571 basic_block
*bbs
= LOOP_VINFO_BBS (loop_vinfo
);
2572 int nbbs
= loop
->num_nodes
, factor
, scalar_single_iter_cost
= 0;
2573 int innerloop_iters
, i
, stmt_cost
;
2575 /* Count statements in scalar loop. Using this as scalar cost for a single
2578 TODO: Add outer loop support.
2580 TODO: Consider assigning different costs to different scalar
2584 innerloop_iters
= 1;
2586 innerloop_iters
= 50; /* FIXME */
2588 for (i
= 0; i
< nbbs
; i
++)
2590 gimple_stmt_iterator si
;
2591 basic_block bb
= bbs
[i
];
2593 if (bb
->loop_father
== loop
->inner
)
2594 factor
= innerloop_iters
;
2598 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
2600 gimple stmt
= gsi_stmt (si
);
2601 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
2603 if (!is_gimple_assign (stmt
) && !is_gimple_call (stmt
))
2606 /* Skip stmts that are not vectorized inside the loop. */
2608 && !STMT_VINFO_RELEVANT_P (stmt_info
)
2609 && (!STMT_VINFO_LIVE_P (stmt_info
)
2610 || !VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info
)))
2611 && !STMT_VINFO_IN_PATTERN_P (stmt_info
))
2614 if (STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt
)))
2616 if (DR_IS_READ (STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt
))))
2617 stmt_cost
= vect_get_stmt_cost (scalar_load
);
2619 stmt_cost
= vect_get_stmt_cost (scalar_store
);
2622 stmt_cost
= vect_get_stmt_cost (scalar_stmt
);
2624 scalar_single_iter_cost
+= stmt_cost
* factor
;
2627 return scalar_single_iter_cost
;
2630 /* Calculate cost of peeling the loop PEEL_ITERS_PROLOGUE times. */
2632 vect_get_known_peeling_cost (loop_vec_info loop_vinfo
, int peel_iters_prologue
,
2633 int *peel_iters_epilogue
,
2634 int scalar_single_iter_cost
,
2635 stmt_vector_for_cost
*prologue_cost_vec
,
2636 stmt_vector_for_cost
*epilogue_cost_vec
)
2639 int vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
2641 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
))
2643 *peel_iters_epilogue
= vf
/2;
2644 if (dump_enabled_p ())
2645 dump_printf_loc (MSG_NOTE
, vect_location
,
2646 "cost model: epilogue peel iters set to vf/2 "
2647 "because loop iterations are unknown .\n");
2649 /* If peeled iterations are known but number of scalar loop
2650 iterations are unknown, count a taken branch per peeled loop. */
2651 retval
= record_stmt_cost (prologue_cost_vec
, 2, cond_branch_taken
,
2652 NULL
, 0, vect_prologue
);
2656 int niters
= LOOP_VINFO_INT_NITERS (loop_vinfo
);
2657 peel_iters_prologue
= niters
< peel_iters_prologue
?
2658 niters
: peel_iters_prologue
;
2659 *peel_iters_epilogue
= (niters
- peel_iters_prologue
) % vf
;
2660 /* If we need to peel for gaps, but no peeling is required, we have to
2661 peel VF iterations. */
2662 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo
) && !*peel_iters_epilogue
)
2663 *peel_iters_epilogue
= vf
;
2666 if (peel_iters_prologue
)
2667 retval
+= record_stmt_cost (prologue_cost_vec
,
2668 peel_iters_prologue
* scalar_single_iter_cost
,
2669 scalar_stmt
, NULL
, 0, vect_prologue
);
2670 if (*peel_iters_epilogue
)
2671 retval
+= record_stmt_cost (epilogue_cost_vec
,
2672 *peel_iters_epilogue
* scalar_single_iter_cost
,
2673 scalar_stmt
, NULL
, 0, vect_epilogue
);
2677 /* Function vect_estimate_min_profitable_iters
2679 Return the number of iterations required for the vector version of the
2680 loop to be profitable relative to the cost of the scalar version of the
2684 vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo
,
2685 int *ret_min_profitable_niters
,
2686 int *ret_min_profitable_estimate
)
2688 int min_profitable_iters
;
2689 int min_profitable_estimate
;
2690 int peel_iters_prologue
;
2691 int peel_iters_epilogue
;
2692 unsigned vec_inside_cost
= 0;
2693 int vec_outside_cost
= 0;
2694 unsigned vec_prologue_cost
= 0;
2695 unsigned vec_epilogue_cost
= 0;
2696 int scalar_single_iter_cost
= 0;
2697 int scalar_outside_cost
= 0;
2698 int vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
2699 int npeel
= LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo
);
2700 void *target_cost_data
= LOOP_VINFO_TARGET_COST_DATA (loop_vinfo
);
2702 /* Cost model disabled. */
2703 if (unlimited_cost_model ())
2705 dump_printf_loc (MSG_NOTE
, vect_location
, "cost model disabled.\n");
2706 *ret_min_profitable_niters
= 0;
2707 *ret_min_profitable_estimate
= 0;
2711 /* Requires loop versioning tests to handle misalignment. */
2712 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo
))
2714 /* FIXME: Make cost depend on complexity of individual check. */
2715 unsigned len
= LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo
).length ();
2716 (void) add_stmt_cost (target_cost_data
, len
, vector_stmt
, NULL
, 0,
2718 dump_printf (MSG_NOTE
,
2719 "cost model: Adding cost of checks for loop "
2720 "versioning to treat misalignment.\n");
2723 /* Requires loop versioning with alias checks. */
2724 if (LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo
))
2726 /* FIXME: Make cost depend on complexity of individual check. */
2727 unsigned len
= LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo
).length ();
2728 (void) add_stmt_cost (target_cost_data
, len
, vector_stmt
, NULL
, 0,
2730 dump_printf (MSG_NOTE
,
2731 "cost model: Adding cost of checks for loop "
2732 "versioning aliasing.\n");
2735 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo
)
2736 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo
))
2737 (void) add_stmt_cost (target_cost_data
, 1, cond_branch_taken
, NULL
, 0,
2740 /* Count statements in scalar loop. Using this as scalar cost for a single
2743 TODO: Add outer loop support.
2745 TODO: Consider assigning different costs to different scalar
2748 scalar_single_iter_cost
= vect_get_single_scalar_iteration_cost (loop_vinfo
);
2750 /* Add additional cost for the peeled instructions in prologue and epilogue
2753 FORNOW: If we don't know the value of peel_iters for prologue or epilogue
2754 at compile-time - we assume it's vf/2 (the worst would be vf-1).
2756 TODO: Build an expression that represents peel_iters for prologue and
2757 epilogue to be used in a run-time test. */
2761 peel_iters_prologue
= vf
/2;
2762 dump_printf (MSG_NOTE
, "cost model: "
2763 "prologue peel iters set to vf/2.\n");
2765 /* If peeling for alignment is unknown, loop bound of main loop becomes
2767 peel_iters_epilogue
= vf
/2;
2768 dump_printf (MSG_NOTE
, "cost model: "
2769 "epilogue peel iters set to vf/2 because "
2770 "peeling for alignment is unknown.\n");
2772 /* If peeled iterations are unknown, count a taken branch and a not taken
2773 branch per peeled loop. Even if scalar loop iterations are known,
2774 vector iterations are not known since peeled prologue iterations are
2775 not known. Hence guards remain the same. */
2776 (void) add_stmt_cost (target_cost_data
, 2, cond_branch_taken
,
2777 NULL
, 0, vect_prologue
);
2778 (void) add_stmt_cost (target_cost_data
, 2, cond_branch_not_taken
,
2779 NULL
, 0, vect_prologue
);
2780 /* FORNOW: Don't attempt to pass individual scalar instructions to
2781 the model; just assume linear cost for scalar iterations. */
2782 (void) add_stmt_cost (target_cost_data
,
2783 peel_iters_prologue
* scalar_single_iter_cost
,
2784 scalar_stmt
, NULL
, 0, vect_prologue
);
2785 (void) add_stmt_cost (target_cost_data
,
2786 peel_iters_epilogue
* scalar_single_iter_cost
,
2787 scalar_stmt
, NULL
, 0, vect_epilogue
);
2791 stmt_vector_for_cost prologue_cost_vec
, epilogue_cost_vec
;
2792 stmt_info_for_cost
*si
;
2794 void *data
= LOOP_VINFO_TARGET_COST_DATA (loop_vinfo
);
2796 prologue_cost_vec
.create (2);
2797 epilogue_cost_vec
.create (2);
2798 peel_iters_prologue
= npeel
;
2800 (void) vect_get_known_peeling_cost (loop_vinfo
, peel_iters_prologue
,
2801 &peel_iters_epilogue
,
2802 scalar_single_iter_cost
,
2804 &epilogue_cost_vec
);
2806 FOR_EACH_VEC_ELT (prologue_cost_vec
, j
, si
)
2808 struct _stmt_vec_info
*stmt_info
2809 = si
->stmt
? vinfo_for_stmt (si
->stmt
) : NULL
;
2810 (void) add_stmt_cost (data
, si
->count
, si
->kind
, stmt_info
,
2811 si
->misalign
, vect_prologue
);
2814 FOR_EACH_VEC_ELT (epilogue_cost_vec
, j
, si
)
2816 struct _stmt_vec_info
*stmt_info
2817 = si
->stmt
? vinfo_for_stmt (si
->stmt
) : NULL
;
2818 (void) add_stmt_cost (data
, si
->count
, si
->kind
, stmt_info
,
2819 si
->misalign
, vect_epilogue
);
2822 prologue_cost_vec
.release ();
2823 epilogue_cost_vec
.release ();
2826 /* FORNOW: The scalar outside cost is incremented in one of the
2829 1. The vectorizer checks for alignment and aliasing and generates
2830 a condition that allows dynamic vectorization. A cost model
2831 check is ANDED with the versioning condition. Hence scalar code
2832 path now has the added cost of the versioning check.
2834 if (cost > th & versioning_check)
2837 Hence run-time scalar is incremented by not-taken branch cost.
2839 2. The vectorizer then checks if a prologue is required. If the
2840 cost model check was not done before during versioning, it has to
2841 be done before the prologue check.
2844 prologue = scalar_iters
2849 if (prologue == num_iters)
2852 Hence the run-time scalar cost is incremented by a taken branch,
2853 plus a not-taken branch, plus a taken branch cost.
2855 3. The vectorizer then checks if an epilogue is required. If the
2856 cost model check was not done before during prologue check, it
2857 has to be done with the epilogue check.
2863 if (prologue == num_iters)
2866 if ((cost <= th) | (scalar_iters-prologue-epilogue == 0))
2869 Hence the run-time scalar cost should be incremented by 2 taken
2872 TODO: The back end may reorder the BBS's differently and reverse
2873 conditions/branch directions. Change the estimates below to
2874 something more reasonable. */
2876 /* If the number of iterations is known and we do not do versioning, we can
2877 decide whether to vectorize at compile time. Hence the scalar version
2878 do not carry cost model guard costs. */
2879 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
)
2880 || LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo
)
2881 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo
))
2883 /* Cost model check occurs at versioning. */
2884 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo
)
2885 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo
))
2886 scalar_outside_cost
+= vect_get_stmt_cost (cond_branch_not_taken
);
2889 /* Cost model check occurs at prologue generation. */
2890 if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo
) < 0)
2891 scalar_outside_cost
+= 2 * vect_get_stmt_cost (cond_branch_taken
)
2892 + vect_get_stmt_cost (cond_branch_not_taken
);
2893 /* Cost model check occurs at epilogue generation. */
2895 scalar_outside_cost
+= 2 * vect_get_stmt_cost (cond_branch_taken
);
2899 /* Complete the target-specific cost calculations. */
2900 finish_cost (LOOP_VINFO_TARGET_COST_DATA (loop_vinfo
), &vec_prologue_cost
,
2901 &vec_inside_cost
, &vec_epilogue_cost
);
2903 vec_outside_cost
= (int)(vec_prologue_cost
+ vec_epilogue_cost
);
2905 /* Calculate number of iterations required to make the vector version
2906 profitable, relative to the loop bodies only. The following condition
2908 SIC * niters + SOC > VIC * ((niters-PL_ITERS-EP_ITERS)/VF) + VOC
2910 SIC = scalar iteration cost, VIC = vector iteration cost,
2911 VOC = vector outside cost, VF = vectorization factor,
2912 PL_ITERS = prologue iterations, EP_ITERS= epilogue iterations
2913 SOC = scalar outside cost for run time cost model check. */
2915 if ((scalar_single_iter_cost
* vf
) > (int) vec_inside_cost
)
2917 if (vec_outside_cost
<= 0)
2918 min_profitable_iters
= 1;
2921 min_profitable_iters
= ((vec_outside_cost
- scalar_outside_cost
) * vf
2922 - vec_inside_cost
* peel_iters_prologue
2923 - vec_inside_cost
* peel_iters_epilogue
)
2924 / ((scalar_single_iter_cost
* vf
)
2927 if ((scalar_single_iter_cost
* vf
* min_profitable_iters
)
2928 <= (((int) vec_inside_cost
* min_profitable_iters
)
2929 + (((int) vec_outside_cost
- scalar_outside_cost
) * vf
)))
2930 min_profitable_iters
++;
2933 /* vector version will never be profitable. */
2936 if (dump_enabled_p ())
2937 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2938 "cost model: the vector iteration cost = %d "
2939 "divided by the scalar iteration cost = %d "
2940 "is greater or equal to the vectorization factor = %d"
2942 vec_inside_cost
, scalar_single_iter_cost
, vf
);
2943 *ret_min_profitable_niters
= -1;
2944 *ret_min_profitable_estimate
= -1;
2948 if (dump_enabled_p ())
2950 dump_printf_loc (MSG_NOTE
, vect_location
, "Cost model analysis: \n");
2951 dump_printf (MSG_NOTE
, " Vector inside of loop cost: %d\n",
2953 dump_printf (MSG_NOTE
, " Vector prologue cost: %d\n",
2955 dump_printf (MSG_NOTE
, " Vector epilogue cost: %d\n",
2957 dump_printf (MSG_NOTE
, " Scalar iteration cost: %d\n",
2958 scalar_single_iter_cost
);
2959 dump_printf (MSG_NOTE
, " Scalar outside cost: %d\n",
2960 scalar_outside_cost
);
2961 dump_printf (MSG_NOTE
, " Vector outside cost: %d\n",
2963 dump_printf (MSG_NOTE
, " prologue iterations: %d\n",
2964 peel_iters_prologue
);
2965 dump_printf (MSG_NOTE
, " epilogue iterations: %d\n",
2966 peel_iters_epilogue
);
2967 dump_printf (MSG_NOTE
,
2968 " Calculated minimum iters for profitability: %d\n",
2969 min_profitable_iters
);
2970 dump_printf (MSG_NOTE
, "\n");
2973 min_profitable_iters
=
2974 min_profitable_iters
< vf
? vf
: min_profitable_iters
;
2976 /* Because the condition we create is:
2977 if (niters <= min_profitable_iters)
2978 then skip the vectorized loop. */
2979 min_profitable_iters
--;
2981 if (dump_enabled_p ())
2982 dump_printf_loc (MSG_NOTE
, vect_location
,
2983 " Runtime profitability threshold = %d\n",
2984 min_profitable_iters
);
2986 *ret_min_profitable_niters
= min_profitable_iters
;
2988 /* Calculate number of iterations required to make the vector version
2989 profitable, relative to the loop bodies only.
2991 Non-vectorized variant is SIC * niters and it must win over vector
2992 variant on the expected loop trip count. The following condition must hold true:
2993 SIC * niters > VIC * ((niters-PL_ITERS-EP_ITERS)/VF) + VOC + SOC */
2995 if (vec_outside_cost
<= 0)
2996 min_profitable_estimate
= 1;
2999 min_profitable_estimate
= ((vec_outside_cost
+ scalar_outside_cost
) * vf
3000 - vec_inside_cost
* peel_iters_prologue
3001 - vec_inside_cost
* peel_iters_epilogue
)
3002 / ((scalar_single_iter_cost
* vf
)
3005 min_profitable_estimate
--;
3006 min_profitable_estimate
= MAX (min_profitable_estimate
, min_profitable_iters
);
3007 if (dump_enabled_p ())
3008 dump_printf_loc (MSG_NOTE
, vect_location
,
3009 " Static estimate profitability threshold = %d\n",
3010 min_profitable_iters
);
3012 *ret_min_profitable_estimate
= min_profitable_estimate
;
3016 /* TODO: Close dependency between vect_model_*_cost and vectorizable_*
3017 functions. Design better to avoid maintenance issues. */
3019 /* Function vect_model_reduction_cost.
3021 Models cost for a reduction operation, including the vector ops
3022 generated within the strip-mine loop, the initial definition before
3023 the loop, and the epilogue code that must be generated. */
3026 vect_model_reduction_cost (stmt_vec_info stmt_info
, enum tree_code reduc_code
,
3029 int prologue_cost
= 0, epilogue_cost
= 0;
3030 enum tree_code code
;
3033 gimple stmt
, orig_stmt
;
3035 enum machine_mode mode
;
3036 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
3037 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
3038 void *target_cost_data
= LOOP_VINFO_TARGET_COST_DATA (loop_vinfo
);
3040 /* Cost of reduction op inside loop. */
3041 unsigned inside_cost
= add_stmt_cost (target_cost_data
, ncopies
, vector_stmt
,
3042 stmt_info
, 0, vect_body
);
3043 stmt
= STMT_VINFO_STMT (stmt_info
);
3045 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt
)))
3047 case GIMPLE_SINGLE_RHS
:
3048 gcc_assert (TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt
)) == ternary_op
);
3049 reduction_op
= TREE_OPERAND (gimple_assign_rhs1 (stmt
), 2);
3051 case GIMPLE_UNARY_RHS
:
3052 reduction_op
= gimple_assign_rhs1 (stmt
);
3054 case GIMPLE_BINARY_RHS
:
3055 reduction_op
= gimple_assign_rhs2 (stmt
);
3057 case GIMPLE_TERNARY_RHS
:
3058 reduction_op
= gimple_assign_rhs3 (stmt
);
3064 vectype
= get_vectype_for_scalar_type (TREE_TYPE (reduction_op
));
3067 if (dump_enabled_p ())
3069 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3070 "unsupported data-type ");
3071 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
3072 TREE_TYPE (reduction_op
));
3073 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
3078 mode
= TYPE_MODE (vectype
);
3079 orig_stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
3082 orig_stmt
= STMT_VINFO_STMT (stmt_info
);
3084 code
= gimple_assign_rhs_code (orig_stmt
);
3086 /* Add in cost for initial definition. */
3087 prologue_cost
+= add_stmt_cost (target_cost_data
, 1, scalar_to_vec
,
3088 stmt_info
, 0, vect_prologue
);
3090 /* Determine cost of epilogue code.
3092 We have a reduction operator that will reduce the vector in one statement.
3093 Also requires scalar extract. */
3095 if (!nested_in_vect_loop_p (loop
, orig_stmt
))
3097 if (reduc_code
!= ERROR_MARK
)
3099 epilogue_cost
+= add_stmt_cost (target_cost_data
, 1, vector_stmt
,
3100 stmt_info
, 0, vect_epilogue
);
3101 epilogue_cost
+= add_stmt_cost (target_cost_data
, 1, vec_to_scalar
,
3102 stmt_info
, 0, vect_epilogue
);
3106 int vec_size_in_bits
= tree_to_uhwi (TYPE_SIZE (vectype
));
3108 TYPE_SIZE (TREE_TYPE (gimple_assign_lhs (orig_stmt
)));
3109 int element_bitsize
= tree_to_uhwi (bitsize
);
3110 int nelements
= vec_size_in_bits
/ element_bitsize
;
3112 optab
= optab_for_tree_code (code
, vectype
, optab_default
);
3114 /* We have a whole vector shift available. */
3115 if (VECTOR_MODE_P (mode
)
3116 && optab_handler (optab
, mode
) != CODE_FOR_nothing
3117 && optab_handler (vec_shr_optab
, mode
) != CODE_FOR_nothing
)
3119 /* Final reduction via vector shifts and the reduction operator.
3120 Also requires scalar extract. */
3121 epilogue_cost
+= add_stmt_cost (target_cost_data
,
3122 exact_log2 (nelements
) * 2,
3123 vector_stmt
, stmt_info
, 0,
3125 epilogue_cost
+= add_stmt_cost (target_cost_data
, 1,
3126 vec_to_scalar
, stmt_info
, 0,
3130 /* Use extracts and reduction op for final reduction. For N
3131 elements, we have N extracts and N-1 reduction ops. */
3132 epilogue_cost
+= add_stmt_cost (target_cost_data
,
3133 nelements
+ nelements
- 1,
3134 vector_stmt
, stmt_info
, 0,
3139 if (dump_enabled_p ())
3140 dump_printf (MSG_NOTE
,
3141 "vect_model_reduction_cost: inside_cost = %d, "
3142 "prologue_cost = %d, epilogue_cost = %d .\n", inside_cost
,
3143 prologue_cost
, epilogue_cost
);
3149 /* Function vect_model_induction_cost.
3151 Models cost for induction operations. */
3154 vect_model_induction_cost (stmt_vec_info stmt_info
, int ncopies
)
3156 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
3157 void *target_cost_data
= LOOP_VINFO_TARGET_COST_DATA (loop_vinfo
);
3158 unsigned inside_cost
, prologue_cost
;
3160 /* loop cost for vec_loop. */
3161 inside_cost
= add_stmt_cost (target_cost_data
, ncopies
, vector_stmt
,
3162 stmt_info
, 0, vect_body
);
3164 /* prologue cost for vec_init and vec_step. */
3165 prologue_cost
= add_stmt_cost (target_cost_data
, 2, scalar_to_vec
,
3166 stmt_info
, 0, vect_prologue
);
3168 if (dump_enabled_p ())
3169 dump_printf_loc (MSG_NOTE
, vect_location
,
3170 "vect_model_induction_cost: inside_cost = %d, "
3171 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
3175 /* Function get_initial_def_for_induction
3178 STMT - a stmt that performs an induction operation in the loop.
3179 IV_PHI - the initial value of the induction variable
3182 Return a vector variable, initialized with the first VF values of
3183 the induction variable. E.g., for an iv with IV_PHI='X' and
3184 evolution S, for a vector of 4 units, we want to return:
3185 [X, X + S, X + 2*S, X + 3*S]. */
3188 get_initial_def_for_induction (gimple iv_phi
)
3190 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (iv_phi
);
3191 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
3192 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
3195 edge pe
= loop_preheader_edge (loop
);
3196 struct loop
*iv_loop
;
3198 tree new_vec
, vec_init
, vec_step
, t
;
3202 gimple init_stmt
, induction_phi
, new_stmt
;
3203 tree induc_def
, vec_def
, vec_dest
;
3204 tree init_expr
, step_expr
;
3205 int vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
3210 stmt_vec_info phi_info
= vinfo_for_stmt (iv_phi
);
3211 bool nested_in_vect_loop
= false;
3212 gimple_seq stmts
= NULL
;
3213 imm_use_iterator imm_iter
;
3214 use_operand_p use_p
;
3218 gimple_stmt_iterator si
;
3219 basic_block bb
= gimple_bb (iv_phi
);
3223 /* Is phi in an inner-loop, while vectorizing an enclosing outer-loop? */
3224 if (nested_in_vect_loop_p (loop
, iv_phi
))
3226 nested_in_vect_loop
= true;
3227 iv_loop
= loop
->inner
;
3231 gcc_assert (iv_loop
== (gimple_bb (iv_phi
))->loop_father
);
3233 latch_e
= loop_latch_edge (iv_loop
);
3234 loop_arg
= PHI_ARG_DEF_FROM_EDGE (iv_phi
, latch_e
);
3236 access_fn
= analyze_scalar_evolution (iv_loop
, PHI_RESULT (iv_phi
));
3237 gcc_assert (access_fn
);
3238 STRIP_NOPS (access_fn
);
3239 ok
= vect_is_simple_iv_evolution (iv_loop
->num
, access_fn
,
3240 &init_expr
, &step_expr
);
3242 pe
= loop_preheader_edge (iv_loop
);
3244 vectype
= get_vectype_for_scalar_type (TREE_TYPE (init_expr
));
3245 resvectype
= get_vectype_for_scalar_type (TREE_TYPE (PHI_RESULT (iv_phi
)));
3246 gcc_assert (vectype
);
3247 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
3248 ncopies
= vf
/ nunits
;
3250 gcc_assert (phi_info
);
3251 gcc_assert (ncopies
>= 1);
3253 /* Find the first insertion point in the BB. */
3254 si
= gsi_after_labels (bb
);
3256 /* Create the vector that holds the initial_value of the induction. */
3257 if (nested_in_vect_loop
)
3259 /* iv_loop is nested in the loop to be vectorized. init_expr had already
3260 been created during vectorization of previous stmts. We obtain it
3261 from the STMT_VINFO_VEC_STMT of the defining stmt. */
3262 tree iv_def
= PHI_ARG_DEF_FROM_EDGE (iv_phi
,
3263 loop_preheader_edge (iv_loop
));
3264 vec_init
= vect_get_vec_def_for_operand (iv_def
, iv_phi
, NULL
);
3265 /* If the initial value is not of proper type, convert it. */
3266 if (!useless_type_conversion_p (vectype
, TREE_TYPE (vec_init
)))
3268 new_stmt
= gimple_build_assign_with_ops
3270 vect_get_new_vect_var (vectype
, vect_simple_var
, "vec_iv_"),
3271 build1 (VIEW_CONVERT_EXPR
, vectype
, vec_init
), NULL_TREE
);
3272 vec_init
= make_ssa_name (gimple_assign_lhs (new_stmt
), new_stmt
);
3273 gimple_assign_set_lhs (new_stmt
, vec_init
);
3274 new_bb
= gsi_insert_on_edge_immediate (loop_preheader_edge (iv_loop
),
3276 gcc_assert (!new_bb
);
3277 set_vinfo_for_stmt (new_stmt
,
3278 new_stmt_vec_info (new_stmt
, loop_vinfo
, NULL
));
3283 vec
<constructor_elt
, va_gc
> *v
;
3285 /* iv_loop is the loop to be vectorized. Create:
3286 vec_init = [X, X+S, X+2*S, X+3*S] (S = step_expr, X = init_expr) */
3287 new_var
= vect_get_new_vect_var (TREE_TYPE (vectype
),
3288 vect_scalar_var
, "var_");
3289 new_name
= force_gimple_operand (fold_convert (TREE_TYPE (vectype
),
3291 &stmts
, false, new_var
);
3294 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, stmts
);
3295 gcc_assert (!new_bb
);
3298 vec_alloc (v
, nunits
);
3299 bool constant_p
= is_gimple_min_invariant (new_name
);
3300 CONSTRUCTOR_APPEND_ELT (v
, NULL_TREE
, new_name
);
3301 for (i
= 1; i
< nunits
; i
++)
3303 /* Create: new_name_i = new_name + step_expr */
3304 new_name
= fold_build2 (PLUS_EXPR
, TREE_TYPE (new_name
),
3305 new_name
, step_expr
);
3306 if (!is_gimple_min_invariant (new_name
))
3308 init_stmt
= gimple_build_assign (new_var
, new_name
);
3309 new_name
= make_ssa_name (new_var
, init_stmt
);
3310 gimple_assign_set_lhs (init_stmt
, new_name
);
3311 new_bb
= gsi_insert_on_edge_immediate (pe
, init_stmt
);
3312 gcc_assert (!new_bb
);
3313 if (dump_enabled_p ())
3315 dump_printf_loc (MSG_NOTE
, vect_location
,
3316 "created new init_stmt: ");
3317 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, init_stmt
, 0);
3318 dump_printf (MSG_NOTE
, "\n");
3322 CONSTRUCTOR_APPEND_ELT (v
, NULL_TREE
, new_name
);
3324 /* Create a vector from [new_name_0, new_name_1, ..., new_name_nunits-1] */
3326 new_vec
= build_vector_from_ctor (vectype
, v
);
3328 new_vec
= build_constructor (vectype
, v
);
3329 vec_init
= vect_init_vector (iv_phi
, new_vec
, vectype
, NULL
);
3333 /* Create the vector that holds the step of the induction. */
3334 if (nested_in_vect_loop
)
3335 /* iv_loop is nested in the loop to be vectorized. Generate:
3336 vec_step = [S, S, S, S] */
3337 new_name
= step_expr
;
3340 /* iv_loop is the loop to be vectorized. Generate:
3341 vec_step = [VF*S, VF*S, VF*S, VF*S] */
3342 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr
)))
3344 expr
= build_int_cst (integer_type_node
, vf
);
3345 expr
= fold_convert (TREE_TYPE (step_expr
), expr
);
3348 expr
= build_int_cst (TREE_TYPE (step_expr
), vf
);
3349 new_name
= fold_build2 (MULT_EXPR
, TREE_TYPE (step_expr
),
3351 if (TREE_CODE (step_expr
) == SSA_NAME
)
3352 new_name
= vect_init_vector (iv_phi
, new_name
,
3353 TREE_TYPE (step_expr
), NULL
);
3356 t
= unshare_expr (new_name
);
3357 gcc_assert (CONSTANT_CLASS_P (new_name
)
3358 || TREE_CODE (new_name
) == SSA_NAME
);
3359 stepvectype
= get_vectype_for_scalar_type (TREE_TYPE (new_name
));
3360 gcc_assert (stepvectype
);
3361 new_vec
= build_vector_from_val (stepvectype
, t
);
3362 vec_step
= vect_init_vector (iv_phi
, new_vec
, stepvectype
, NULL
);
3365 /* Create the following def-use cycle:
3370 vec_iv = PHI <vec_init, vec_loop>
3374 vec_loop = vec_iv + vec_step; */
3376 /* Create the induction-phi that defines the induction-operand. */
3377 vec_dest
= vect_get_new_vect_var (vectype
, vect_simple_var
, "vec_iv_");
3378 induction_phi
= create_phi_node (vec_dest
, iv_loop
->header
);
3379 set_vinfo_for_stmt (induction_phi
,
3380 new_stmt_vec_info (induction_phi
, loop_vinfo
, NULL
));
3381 induc_def
= PHI_RESULT (induction_phi
);
3383 /* Create the iv update inside the loop */
3384 new_stmt
= gimple_build_assign_with_ops (PLUS_EXPR
, vec_dest
,
3385 induc_def
, vec_step
);
3386 vec_def
= make_ssa_name (vec_dest
, new_stmt
);
3387 gimple_assign_set_lhs (new_stmt
, vec_def
);
3388 gsi_insert_before (&si
, new_stmt
, GSI_SAME_STMT
);
3389 set_vinfo_for_stmt (new_stmt
, new_stmt_vec_info (new_stmt
, loop_vinfo
,
3392 /* Set the arguments of the phi node: */
3393 add_phi_arg (induction_phi
, vec_init
, pe
, UNKNOWN_LOCATION
);
3394 add_phi_arg (induction_phi
, vec_def
, loop_latch_edge (iv_loop
),
3398 /* In case that vectorization factor (VF) is bigger than the number
3399 of elements that we can fit in a vectype (nunits), we have to generate
3400 more than one vector stmt - i.e - we need to "unroll" the
3401 vector stmt by a factor VF/nunits. For more details see documentation
3402 in vectorizable_operation. */
3406 stmt_vec_info prev_stmt_vinfo
;
3407 /* FORNOW. This restriction should be relaxed. */
3408 gcc_assert (!nested_in_vect_loop
);
3410 /* Create the vector that holds the step of the induction. */
3411 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr
)))
3413 expr
= build_int_cst (integer_type_node
, nunits
);
3414 expr
= fold_convert (TREE_TYPE (step_expr
), expr
);
3417 expr
= build_int_cst (TREE_TYPE (step_expr
), nunits
);
3418 new_name
= fold_build2 (MULT_EXPR
, TREE_TYPE (step_expr
),
3420 if (TREE_CODE (step_expr
) == SSA_NAME
)
3421 new_name
= vect_init_vector (iv_phi
, new_name
,
3422 TREE_TYPE (step_expr
), NULL
);
3423 t
= unshare_expr (new_name
);
3424 gcc_assert (CONSTANT_CLASS_P (new_name
)
3425 || TREE_CODE (new_name
) == SSA_NAME
);
3426 new_vec
= build_vector_from_val (stepvectype
, t
);
3427 vec_step
= vect_init_vector (iv_phi
, new_vec
, stepvectype
, NULL
);
3429 vec_def
= induc_def
;
3430 prev_stmt_vinfo
= vinfo_for_stmt (induction_phi
);
3431 for (i
= 1; i
< ncopies
; i
++)
3433 /* vec_i = vec_prev + vec_step */
3434 new_stmt
= gimple_build_assign_with_ops (PLUS_EXPR
, vec_dest
,
3436 vec_def
= make_ssa_name (vec_dest
, new_stmt
);
3437 gimple_assign_set_lhs (new_stmt
, vec_def
);
3439 gsi_insert_before (&si
, new_stmt
, GSI_SAME_STMT
);
3440 if (!useless_type_conversion_p (resvectype
, vectype
))
3442 new_stmt
= gimple_build_assign_with_ops
3444 vect_get_new_vect_var (resvectype
, vect_simple_var
,
3446 build1 (VIEW_CONVERT_EXPR
, resvectype
,
3447 gimple_assign_lhs (new_stmt
)), NULL_TREE
);
3448 gimple_assign_set_lhs (new_stmt
,
3450 (gimple_assign_lhs (new_stmt
), new_stmt
));
3451 gsi_insert_before (&si
, new_stmt
, GSI_SAME_STMT
);
3453 set_vinfo_for_stmt (new_stmt
,
3454 new_stmt_vec_info (new_stmt
, loop_vinfo
, NULL
));
3455 STMT_VINFO_RELATED_STMT (prev_stmt_vinfo
) = new_stmt
;
3456 prev_stmt_vinfo
= vinfo_for_stmt (new_stmt
);
3460 if (nested_in_vect_loop
)
3462 /* Find the loop-closed exit-phi of the induction, and record
3463 the final vector of induction results: */
3465 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, loop_arg
)
3467 if (!flow_bb_inside_loop_p (iv_loop
, gimple_bb (USE_STMT (use_p
))))
3469 exit_phi
= USE_STMT (use_p
);
3475 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (exit_phi
);
3476 /* FORNOW. Currently not supporting the case that an inner-loop induction
3477 is not used in the outer-loop (i.e. only outside the outer-loop). */
3478 gcc_assert (STMT_VINFO_RELEVANT_P (stmt_vinfo
)
3479 && !STMT_VINFO_LIVE_P (stmt_vinfo
));
3481 STMT_VINFO_VEC_STMT (stmt_vinfo
) = new_stmt
;
3482 if (dump_enabled_p ())
3484 dump_printf_loc (MSG_NOTE
, vect_location
,
3485 "vector of inductions after inner-loop:");
3486 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, new_stmt
, 0);
3487 dump_printf (MSG_NOTE
, "\n");
3493 if (dump_enabled_p ())
3495 dump_printf_loc (MSG_NOTE
, vect_location
,
3496 "transform induction: created def-use cycle: ");
3497 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, induction_phi
, 0);
3498 dump_printf (MSG_NOTE
, "\n");
3499 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
,
3500 SSA_NAME_DEF_STMT (vec_def
), 0);
3501 dump_printf (MSG_NOTE
, "\n");
3504 STMT_VINFO_VEC_STMT (phi_info
) = induction_phi
;
3505 if (!useless_type_conversion_p (resvectype
, vectype
))
3507 new_stmt
= gimple_build_assign_with_ops
3509 vect_get_new_vect_var (resvectype
, vect_simple_var
, "vec_iv_"),
3510 build1 (VIEW_CONVERT_EXPR
, resvectype
, induc_def
), NULL_TREE
);
3511 induc_def
= make_ssa_name (gimple_assign_lhs (new_stmt
), new_stmt
);
3512 gimple_assign_set_lhs (new_stmt
, induc_def
);
3513 si
= gsi_after_labels (bb
);
3514 gsi_insert_before (&si
, new_stmt
, GSI_SAME_STMT
);
3515 set_vinfo_for_stmt (new_stmt
,
3516 new_stmt_vec_info (new_stmt
, loop_vinfo
, NULL
));
3517 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (new_stmt
))
3518 = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (induction_phi
));
3525 /* Function get_initial_def_for_reduction
3528 STMT - a stmt that performs a reduction operation in the loop.
3529 INIT_VAL - the initial value of the reduction variable
3532 ADJUSTMENT_DEF - a tree that holds a value to be added to the final result
3533 of the reduction (used for adjusting the epilog - see below).
3534 Return a vector variable, initialized according to the operation that STMT
3535 performs. This vector will be used as the initial value of the
3536 vector of partial results.
3538 Option1 (adjust in epilog): Initialize the vector as follows:
3539 add/bit or/xor: [0,0,...,0,0]
3540 mult/bit and: [1,1,...,1,1]
3541 min/max/cond_expr: [init_val,init_val,..,init_val,init_val]
3542 and when necessary (e.g. add/mult case) let the caller know
3543 that it needs to adjust the result by init_val.
3545 Option2: Initialize the vector as follows:
3546 add/bit or/xor: [init_val,0,0,...,0]
3547 mult/bit and: [init_val,1,1,...,1]
3548 min/max/cond_expr: [init_val,init_val,...,init_val]
3549 and no adjustments are needed.
3551 For example, for the following code:
3557 STMT is 's = s + a[i]', and the reduction variable is 's'.
3558 For a vector of 4 units, we want to return either [0,0,0,init_val],
3559 or [0,0,0,0] and let the caller know that it needs to adjust
3560 the result at the end by 'init_val'.
3562 FORNOW, we are using the 'adjust in epilog' scheme, because this way the
3563 initialization vector is simpler (same element in all entries), if
3564 ADJUSTMENT_DEF is not NULL, and Option2 otherwise.
3566 A cost model should help decide between these two schemes. */
3569 get_initial_def_for_reduction (gimple stmt
, tree init_val
,
3570 tree
*adjustment_def
)
3572 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
3573 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
3574 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
3575 tree scalar_type
= TREE_TYPE (init_val
);
3576 tree vectype
= get_vectype_for_scalar_type (scalar_type
);
3578 enum tree_code code
= gimple_assign_rhs_code (stmt
);
3583 bool nested_in_vect_loop
= false;
3585 REAL_VALUE_TYPE real_init_val
= dconst0
;
3586 int int_init_val
= 0;
3587 gimple def_stmt
= NULL
;
3589 gcc_assert (vectype
);
3590 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
3592 gcc_assert (POINTER_TYPE_P (scalar_type
) || INTEGRAL_TYPE_P (scalar_type
)
3593 || SCALAR_FLOAT_TYPE_P (scalar_type
));
3595 if (nested_in_vect_loop_p (loop
, stmt
))
3596 nested_in_vect_loop
= true;
3598 gcc_assert (loop
== (gimple_bb (stmt
))->loop_father
);
3600 /* In case of double reduction we only create a vector variable to be put
3601 in the reduction phi node. The actual statement creation is done in
3602 vect_create_epilog_for_reduction. */
3603 if (adjustment_def
&& nested_in_vect_loop
3604 && TREE_CODE (init_val
) == SSA_NAME
3605 && (def_stmt
= SSA_NAME_DEF_STMT (init_val
))
3606 && gimple_code (def_stmt
) == GIMPLE_PHI
3607 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt
))
3608 && vinfo_for_stmt (def_stmt
)
3609 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt
))
3610 == vect_double_reduction_def
)
3612 *adjustment_def
= NULL
;
3613 return vect_create_destination_var (init_val
, vectype
);
3616 if (TREE_CONSTANT (init_val
))
3618 if (SCALAR_FLOAT_TYPE_P (scalar_type
))
3619 init_value
= build_real (scalar_type
, TREE_REAL_CST (init_val
));
3621 init_value
= build_int_cst (scalar_type
, TREE_INT_CST_LOW (init_val
));
3624 init_value
= init_val
;
3628 case WIDEN_SUM_EXPR
:
3636 /* ADJUSMENT_DEF is NULL when called from
3637 vect_create_epilog_for_reduction to vectorize double reduction. */
3640 if (nested_in_vect_loop
)
3641 *adjustment_def
= vect_get_vec_def_for_operand (init_val
, stmt
,
3644 *adjustment_def
= init_val
;
3647 if (code
== MULT_EXPR
)
3649 real_init_val
= dconst1
;
3653 if (code
== BIT_AND_EXPR
)
3656 if (SCALAR_FLOAT_TYPE_P (scalar_type
))
3657 def_for_init
= build_real (scalar_type
, real_init_val
);
3659 def_for_init
= build_int_cst (scalar_type
, int_init_val
);
3661 /* Create a vector of '0' or '1' except the first element. */
3662 elts
= XALLOCAVEC (tree
, nunits
);
3663 for (i
= nunits
- 2; i
>= 0; --i
)
3664 elts
[i
+ 1] = def_for_init
;
3666 /* Option1: the first element is '0' or '1' as well. */
3669 elts
[0] = def_for_init
;
3670 init_def
= build_vector (vectype
, elts
);
3674 /* Option2: the first element is INIT_VAL. */
3676 if (TREE_CONSTANT (init_val
))
3677 init_def
= build_vector (vectype
, elts
);
3680 vec
<constructor_elt
, va_gc
> *v
;
3681 vec_alloc (v
, nunits
);
3682 CONSTRUCTOR_APPEND_ELT (v
, NULL_TREE
, init_val
);
3683 for (i
= 1; i
< nunits
; ++i
)
3684 CONSTRUCTOR_APPEND_ELT (v
, NULL_TREE
, elts
[i
]);
3685 init_def
= build_constructor (vectype
, v
);
3695 *adjustment_def
= NULL_TREE
;
3696 init_def
= vect_get_vec_def_for_operand (init_val
, stmt
, NULL
);
3700 init_def
= build_vector_from_val (vectype
, init_value
);
3711 /* Function vect_create_epilog_for_reduction
3713 Create code at the loop-epilog to finalize the result of a reduction
3716 VECT_DEFS is list of vector of partial results, i.e., the lhs's of vector
3717 reduction statements.
3718 STMT is the scalar reduction stmt that is being vectorized.
3719 NCOPIES is > 1 in case the vectorization factor (VF) is bigger than the
3720 number of elements that we can fit in a vectype (nunits). In this case
3721 we have to generate more than one vector stmt - i.e - we need to "unroll"
3722 the vector stmt by a factor VF/nunits. For more details see documentation
3723 in vectorizable_operation.
3724 REDUC_CODE is the tree-code for the epilog reduction.
3725 REDUCTION_PHIS is a list of the phi-nodes that carry the reduction
3727 REDUC_INDEX is the index of the operand in the right hand side of the
3728 statement that is defined by REDUCTION_PHI.
3729 DOUBLE_REDUC is TRUE if double reduction phi nodes should be handled.
3730 SLP_NODE is an SLP node containing a group of reduction statements. The
3731 first one in this group is STMT.
3734 1. Creates the reduction def-use cycles: sets the arguments for
3736 The loop-entry argument is the vectorized initial-value of the reduction.
3737 The loop-latch argument is taken from VECT_DEFS - the vector of partial
3739 2. "Reduces" each vector of partial results VECT_DEFS into a single result,
3740 by applying the operation specified by REDUC_CODE if available, or by
3741 other means (whole-vector shifts or a scalar loop).
3742 The function also creates a new phi node at the loop exit to preserve
3743 loop-closed form, as illustrated below.
3745 The flow at the entry to this function:
3748 vec_def = phi <null, null> # REDUCTION_PHI
3749 VECT_DEF = vector_stmt # vectorized form of STMT
3750 s_loop = scalar_stmt # (scalar) STMT
3752 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
3756 The above is transformed by this function into:
3759 vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI
3760 VECT_DEF = vector_stmt # vectorized form of STMT
3761 s_loop = scalar_stmt # (scalar) STMT
3763 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
3764 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
3765 v_out2 = reduce <v_out1>
3766 s_out3 = extract_field <v_out2, 0>
3767 s_out4 = adjust_result <s_out3>
3773 vect_create_epilog_for_reduction (vec
<tree
> vect_defs
, gimple stmt
,
3774 int ncopies
, enum tree_code reduc_code
,
3775 vec
<gimple
> reduction_phis
,
3776 int reduc_index
, bool double_reduc
,
3779 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
3780 stmt_vec_info prev_phi_info
;
3782 enum machine_mode mode
;
3783 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
3784 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
), *outer_loop
= NULL
;
3785 basic_block exit_bb
;
3788 gimple new_phi
= NULL
, phi
;
3789 gimple_stmt_iterator exit_gsi
;
3791 tree new_temp
= NULL_TREE
, new_dest
, new_name
, new_scalar_dest
;
3792 gimple epilog_stmt
= NULL
;
3793 enum tree_code code
= gimple_assign_rhs_code (stmt
);
3795 tree bitsize
, bitpos
;
3796 tree adjustment_def
= NULL
;
3797 tree vec_initial_def
= NULL
;
3798 tree reduction_op
, expr
, def
;
3799 tree orig_name
, scalar_result
;
3800 imm_use_iterator imm_iter
, phi_imm_iter
;
3801 use_operand_p use_p
, phi_use_p
;
3802 bool extract_scalar_result
= false;
3803 gimple use_stmt
, orig_stmt
, reduction_phi
= NULL
;
3804 bool nested_in_vect_loop
= false;
3805 auto_vec
<gimple
> new_phis
;
3806 auto_vec
<gimple
> inner_phis
;
3807 enum vect_def_type dt
= vect_unknown_def_type
;
3809 auto_vec
<tree
> scalar_results
;
3810 unsigned int group_size
= 1, k
, ratio
;
3811 auto_vec
<tree
> vec_initial_defs
;
3812 auto_vec
<gimple
> phis
;
3813 bool slp_reduc
= false;
3814 tree new_phi_result
;
3815 gimple inner_phi
= NULL
;
3818 group_size
= SLP_TREE_SCALAR_STMTS (slp_node
).length ();
3820 if (nested_in_vect_loop_p (loop
, stmt
))
3824 nested_in_vect_loop
= true;
3825 gcc_assert (!slp_node
);
3828 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt
)))
3830 case GIMPLE_SINGLE_RHS
:
3831 gcc_assert (TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt
))
3833 reduction_op
= TREE_OPERAND (gimple_assign_rhs1 (stmt
), reduc_index
);
3835 case GIMPLE_UNARY_RHS
:
3836 reduction_op
= gimple_assign_rhs1 (stmt
);
3838 case GIMPLE_BINARY_RHS
:
3839 reduction_op
= reduc_index
?
3840 gimple_assign_rhs2 (stmt
) : gimple_assign_rhs1 (stmt
);
3842 case GIMPLE_TERNARY_RHS
:
3843 reduction_op
= gimple_op (stmt
, reduc_index
+ 1);
3849 vectype
= get_vectype_for_scalar_type (TREE_TYPE (reduction_op
));
3850 gcc_assert (vectype
);
3851 mode
= TYPE_MODE (vectype
);
3853 /* 1. Create the reduction def-use cycle:
3854 Set the arguments of REDUCTION_PHIS, i.e., transform
3857 vec_def = phi <null, null> # REDUCTION_PHI
3858 VECT_DEF = vector_stmt # vectorized form of STMT
3864 vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI
3865 VECT_DEF = vector_stmt # vectorized form of STMT
3868 (in case of SLP, do it for all the phis). */
3870 /* Get the loop-entry arguments. */
3872 vect_get_vec_defs (reduction_op
, NULL_TREE
, stmt
, &vec_initial_defs
,
3873 NULL
, slp_node
, reduc_index
);
3876 vec_initial_defs
.create (1);
3877 /* For the case of reduction, vect_get_vec_def_for_operand returns
3878 the scalar def before the loop, that defines the initial value
3879 of the reduction variable. */
3880 vec_initial_def
= vect_get_vec_def_for_operand (reduction_op
, stmt
,
3882 vec_initial_defs
.quick_push (vec_initial_def
);
3885 /* Set phi nodes arguments. */
3886 FOR_EACH_VEC_ELT (reduction_phis
, i
, phi
)
3888 tree vec_init_def
= vec_initial_defs
[i
];
3889 tree def
= vect_defs
[i
];
3890 for (j
= 0; j
< ncopies
; j
++)
3892 /* Set the loop-entry arg of the reduction-phi. */
3893 add_phi_arg (phi
, vec_init_def
, loop_preheader_edge (loop
),
3896 /* Set the loop-latch arg for the reduction-phi. */
3898 def
= vect_get_vec_def_for_stmt_copy (vect_unknown_def_type
, def
);
3900 add_phi_arg (phi
, def
, loop_latch_edge (loop
), UNKNOWN_LOCATION
);
3902 if (dump_enabled_p ())
3904 dump_printf_loc (MSG_NOTE
, vect_location
,
3905 "transform reduction: created def-use cycle: ");
3906 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, phi
, 0);
3907 dump_printf (MSG_NOTE
, "\n");
3908 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, SSA_NAME_DEF_STMT (def
), 0);
3909 dump_printf (MSG_NOTE
, "\n");
3912 phi
= STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi
));
3916 /* 2. Create epilog code.
3917 The reduction epilog code operates across the elements of the vector
3918 of partial results computed by the vectorized loop.
3919 The reduction epilog code consists of:
3921 step 1: compute the scalar result in a vector (v_out2)
3922 step 2: extract the scalar result (s_out3) from the vector (v_out2)
3923 step 3: adjust the scalar result (s_out3) if needed.
3925 Step 1 can be accomplished using one the following three schemes:
3926 (scheme 1) using reduc_code, if available.
3927 (scheme 2) using whole-vector shifts, if available.
3928 (scheme 3) using a scalar loop. In this case steps 1+2 above are
3931 The overall epilog code looks like this:
3933 s_out0 = phi <s_loop> # original EXIT_PHI
3934 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
3935 v_out2 = reduce <v_out1> # step 1
3936 s_out3 = extract_field <v_out2, 0> # step 2
3937 s_out4 = adjust_result <s_out3> # step 3
3939 (step 3 is optional, and steps 1 and 2 may be combined).
3940 Lastly, the uses of s_out0 are replaced by s_out4. */
3943 /* 2.1 Create new loop-exit-phis to preserve loop-closed form:
3944 v_out1 = phi <VECT_DEF>
3945 Store them in NEW_PHIS. */
3947 exit_bb
= single_exit (loop
)->dest
;
3948 prev_phi_info
= NULL
;
3949 new_phis
.create (vect_defs
.length ());
3950 FOR_EACH_VEC_ELT (vect_defs
, i
, def
)
3952 for (j
= 0; j
< ncopies
; j
++)
3954 tree new_def
= copy_ssa_name (def
, NULL
);
3955 phi
= create_phi_node (new_def
, exit_bb
);
3956 set_vinfo_for_stmt (phi
, new_stmt_vec_info (phi
, loop_vinfo
, NULL
));
3958 new_phis
.quick_push (phi
);
3961 def
= vect_get_vec_def_for_stmt_copy (dt
, def
);
3962 STMT_VINFO_RELATED_STMT (prev_phi_info
) = phi
;
3965 SET_PHI_ARG_DEF (phi
, single_exit (loop
)->dest_idx
, def
);
3966 prev_phi_info
= vinfo_for_stmt (phi
);
3970 /* The epilogue is created for the outer-loop, i.e., for the loop being
3971 vectorized. Create exit phis for the outer loop. */
3975 exit_bb
= single_exit (loop
)->dest
;
3976 inner_phis
.create (vect_defs
.length ());
3977 FOR_EACH_VEC_ELT (new_phis
, i
, phi
)
3979 tree new_result
= copy_ssa_name (PHI_RESULT (phi
), NULL
);
3980 gimple outer_phi
= create_phi_node (new_result
, exit_bb
);
3981 SET_PHI_ARG_DEF (outer_phi
, single_exit (loop
)->dest_idx
,
3983 set_vinfo_for_stmt (outer_phi
, new_stmt_vec_info (outer_phi
,
3985 inner_phis
.quick_push (phi
);
3986 new_phis
[i
] = outer_phi
;
3987 prev_phi_info
= vinfo_for_stmt (outer_phi
);
3988 while (STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi
)))
3990 phi
= STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi
));
3991 new_result
= copy_ssa_name (PHI_RESULT (phi
), NULL
);
3992 outer_phi
= create_phi_node (new_result
, exit_bb
);
3993 SET_PHI_ARG_DEF (outer_phi
, single_exit (loop
)->dest_idx
,
3995 set_vinfo_for_stmt (outer_phi
, new_stmt_vec_info (outer_phi
,
3997 STMT_VINFO_RELATED_STMT (prev_phi_info
) = outer_phi
;
3998 prev_phi_info
= vinfo_for_stmt (outer_phi
);
4003 exit_gsi
= gsi_after_labels (exit_bb
);
4005 /* 2.2 Get the relevant tree-code to use in the epilog for schemes 2,3
4006 (i.e. when reduc_code is not available) and in the final adjustment
4007 code (if needed). Also get the original scalar reduction variable as
4008 defined in the loop. In case STMT is a "pattern-stmt" (i.e. - it
4009 represents a reduction pattern), the tree-code and scalar-def are
4010 taken from the original stmt that the pattern-stmt (STMT) replaces.
4011 Otherwise (it is a regular reduction) - the tree-code and scalar-def
4012 are taken from STMT. */
4014 orig_stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
4017 /* Regular reduction */
4022 /* Reduction pattern */
4023 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (orig_stmt
);
4024 gcc_assert (STMT_VINFO_IN_PATTERN_P (stmt_vinfo
));
4025 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_vinfo
) == stmt
);
4028 code
= gimple_assign_rhs_code (orig_stmt
);
4029 /* For MINUS_EXPR the initial vector is [init_val,0,...,0], therefore,
4030 partial results are added and not subtracted. */
4031 if (code
== MINUS_EXPR
)
4034 scalar_dest
= gimple_assign_lhs (orig_stmt
);
4035 scalar_type
= TREE_TYPE (scalar_dest
);
4036 scalar_results
.create (group_size
);
4037 new_scalar_dest
= vect_create_destination_var (scalar_dest
, NULL
);
4038 bitsize
= TYPE_SIZE (scalar_type
);
4040 /* In case this is a reduction in an inner-loop while vectorizing an outer
4041 loop - we don't need to extract a single scalar result at the end of the
4042 inner-loop (unless it is double reduction, i.e., the use of reduction is
4043 outside the outer-loop). The final vector of partial results will be used
4044 in the vectorized outer-loop, or reduced to a scalar result at the end of
4046 if (nested_in_vect_loop
&& !double_reduc
)
4047 goto vect_finalize_reduction
;
4049 /* SLP reduction without reduction chain, e.g.,
4053 b2 = operation (b1) */
4054 slp_reduc
= (slp_node
&& !GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
)));
4056 /* In case of reduction chain, e.g.,
4059 a3 = operation (a2),
4061 we may end up with more than one vector result. Here we reduce them to
4063 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
)))
4065 tree first_vect
= PHI_RESULT (new_phis
[0]);
4067 gimple new_vec_stmt
= NULL
;
4069 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
4070 for (k
= 1; k
< new_phis
.length (); k
++)
4072 gimple next_phi
= new_phis
[k
];
4073 tree second_vect
= PHI_RESULT (next_phi
);
4075 tmp
= build2 (code
, vectype
, first_vect
, second_vect
);
4076 new_vec_stmt
= gimple_build_assign (vec_dest
, tmp
);
4077 first_vect
= make_ssa_name (vec_dest
, new_vec_stmt
);
4078 gimple_assign_set_lhs (new_vec_stmt
, first_vect
);
4079 gsi_insert_before (&exit_gsi
, new_vec_stmt
, GSI_SAME_STMT
);
4082 new_phi_result
= first_vect
;
4085 new_phis
.truncate (0);
4086 new_phis
.safe_push (new_vec_stmt
);
4090 new_phi_result
= PHI_RESULT (new_phis
[0]);
4092 /* 2.3 Create the reduction code, using one of the three schemes described
4093 above. In SLP we simply need to extract all the elements from the
4094 vector (without reducing them), so we use scalar shifts. */
4095 if (reduc_code
!= ERROR_MARK
&& !slp_reduc
)
4099 /*** Case 1: Create:
4100 v_out2 = reduc_expr <v_out1> */
4102 if (dump_enabled_p ())
4103 dump_printf_loc (MSG_NOTE
, vect_location
,
4104 "Reduce using direct vector reduction.\n");
4106 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
4107 tmp
= build1 (reduc_code
, vectype
, new_phi_result
);
4108 epilog_stmt
= gimple_build_assign (vec_dest
, tmp
);
4109 new_temp
= make_ssa_name (vec_dest
, epilog_stmt
);
4110 gimple_assign_set_lhs (epilog_stmt
, new_temp
);
4111 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
4113 extract_scalar_result
= true;
4117 enum tree_code shift_code
= ERROR_MARK
;
4118 bool have_whole_vector_shift
= true;
4120 int element_bitsize
= tree_to_uhwi (bitsize
);
4121 int vec_size_in_bits
= tree_to_uhwi (TYPE_SIZE (vectype
));
4124 if (optab_handler (vec_shr_optab
, mode
) != CODE_FOR_nothing
)
4125 shift_code
= VEC_RSHIFT_EXPR
;
4127 have_whole_vector_shift
= false;
4129 /* Regardless of whether we have a whole vector shift, if we're
4130 emulating the operation via tree-vect-generic, we don't want
4131 to use it. Only the first round of the reduction is likely
4132 to still be profitable via emulation. */
4133 /* ??? It might be better to emit a reduction tree code here, so that
4134 tree-vect-generic can expand the first round via bit tricks. */
4135 if (!VECTOR_MODE_P (mode
))
4136 have_whole_vector_shift
= false;
4139 optab optab
= optab_for_tree_code (code
, vectype
, optab_default
);
4140 if (optab_handler (optab
, mode
) == CODE_FOR_nothing
)
4141 have_whole_vector_shift
= false;
4144 if (have_whole_vector_shift
&& !slp_reduc
)
4146 /*** Case 2: Create:
4147 for (offset = VS/2; offset >= element_size; offset/=2)
4149 Create: va' = vec_shift <va, offset>
4150 Create: va = vop <va, va'>
4153 if (dump_enabled_p ())
4154 dump_printf_loc (MSG_NOTE
, vect_location
,
4155 "Reduce using vector shifts\n");
4157 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
4158 new_temp
= new_phi_result
;
4159 for (bit_offset
= vec_size_in_bits
/2;
4160 bit_offset
>= element_bitsize
;
4163 tree bitpos
= size_int (bit_offset
);
4165 epilog_stmt
= gimple_build_assign_with_ops (shift_code
,
4166 vec_dest
, new_temp
, bitpos
);
4167 new_name
= make_ssa_name (vec_dest
, epilog_stmt
);
4168 gimple_assign_set_lhs (epilog_stmt
, new_name
);
4169 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
4171 epilog_stmt
= gimple_build_assign_with_ops (code
, vec_dest
,
4172 new_name
, new_temp
);
4173 new_temp
= make_ssa_name (vec_dest
, epilog_stmt
);
4174 gimple_assign_set_lhs (epilog_stmt
, new_temp
);
4175 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
4178 extract_scalar_result
= true;
4184 /*** Case 3: Create:
4185 s = extract_field <v_out2, 0>
4186 for (offset = element_size;
4187 offset < vector_size;
4188 offset += element_size;)
4190 Create: s' = extract_field <v_out2, offset>
4191 Create: s = op <s, s'> // For non SLP cases
4194 if (dump_enabled_p ())
4195 dump_printf_loc (MSG_NOTE
, vect_location
,
4196 "Reduce using scalar code.\n");
4198 vec_size_in_bits
= tree_to_uhwi (TYPE_SIZE (vectype
));
4199 FOR_EACH_VEC_ELT (new_phis
, i
, new_phi
)
4201 if (gimple_code (new_phi
) == GIMPLE_PHI
)
4202 vec_temp
= PHI_RESULT (new_phi
);
4204 vec_temp
= gimple_assign_lhs (new_phi
);
4205 rhs
= build3 (BIT_FIELD_REF
, scalar_type
, vec_temp
, bitsize
,
4207 epilog_stmt
= gimple_build_assign (new_scalar_dest
, rhs
);
4208 new_temp
= make_ssa_name (new_scalar_dest
, epilog_stmt
);
4209 gimple_assign_set_lhs (epilog_stmt
, new_temp
);
4210 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
4212 /* In SLP we don't need to apply reduction operation, so we just
4213 collect s' values in SCALAR_RESULTS. */
4215 scalar_results
.safe_push (new_temp
);
4217 for (bit_offset
= element_bitsize
;
4218 bit_offset
< vec_size_in_bits
;
4219 bit_offset
+= element_bitsize
)
4221 tree bitpos
= bitsize_int (bit_offset
);
4222 tree rhs
= build3 (BIT_FIELD_REF
, scalar_type
, vec_temp
,
4225 epilog_stmt
= gimple_build_assign (new_scalar_dest
, rhs
);
4226 new_name
= make_ssa_name (new_scalar_dest
, epilog_stmt
);
4227 gimple_assign_set_lhs (epilog_stmt
, new_name
);
4228 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
4232 /* In SLP we don't need to apply reduction operation, so
4233 we just collect s' values in SCALAR_RESULTS. */
4234 new_temp
= new_name
;
4235 scalar_results
.safe_push (new_name
);
4239 epilog_stmt
= gimple_build_assign_with_ops (code
,
4240 new_scalar_dest
, new_name
, new_temp
);
4241 new_temp
= make_ssa_name (new_scalar_dest
, epilog_stmt
);
4242 gimple_assign_set_lhs (epilog_stmt
, new_temp
);
4243 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
4248 /* The only case where we need to reduce scalar results in SLP, is
4249 unrolling. If the size of SCALAR_RESULTS is greater than
4250 GROUP_SIZE, we reduce them combining elements modulo
4254 tree res
, first_res
, new_res
;
4257 /* Reduce multiple scalar results in case of SLP unrolling. */
4258 for (j
= group_size
; scalar_results
.iterate (j
, &res
);
4261 first_res
= scalar_results
[j
% group_size
];
4262 new_stmt
= gimple_build_assign_with_ops (code
,
4263 new_scalar_dest
, first_res
, res
);
4264 new_res
= make_ssa_name (new_scalar_dest
, new_stmt
);
4265 gimple_assign_set_lhs (new_stmt
, new_res
);
4266 gsi_insert_before (&exit_gsi
, new_stmt
, GSI_SAME_STMT
);
4267 scalar_results
[j
% group_size
] = new_res
;
4271 /* Not SLP - we have one scalar to keep in SCALAR_RESULTS. */
4272 scalar_results
.safe_push (new_temp
);
4274 extract_scalar_result
= false;
4278 /* 2.4 Extract the final scalar result. Create:
4279 s_out3 = extract_field <v_out2, bitpos> */
4281 if (extract_scalar_result
)
4285 if (dump_enabled_p ())
4286 dump_printf_loc (MSG_NOTE
, vect_location
,
4287 "extract scalar result\n");
4289 if (BYTES_BIG_ENDIAN
)
4290 bitpos
= size_binop (MULT_EXPR
,
4291 bitsize_int (TYPE_VECTOR_SUBPARTS (vectype
) - 1),
4292 TYPE_SIZE (scalar_type
));
4294 bitpos
= bitsize_zero_node
;
4296 rhs
= build3 (BIT_FIELD_REF
, scalar_type
, new_temp
, bitsize
, bitpos
);
4297 epilog_stmt
= gimple_build_assign (new_scalar_dest
, rhs
);
4298 new_temp
= make_ssa_name (new_scalar_dest
, epilog_stmt
);
4299 gimple_assign_set_lhs (epilog_stmt
, new_temp
);
4300 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
4301 scalar_results
.safe_push (new_temp
);
4304 vect_finalize_reduction
:
4309 /* 2.5 Adjust the final result by the initial value of the reduction
4310 variable. (When such adjustment is not needed, then
4311 'adjustment_def' is zero). For example, if code is PLUS we create:
4312 new_temp = loop_exit_def + adjustment_def */
4316 gcc_assert (!slp_reduc
);
4317 if (nested_in_vect_loop
)
4319 new_phi
= new_phis
[0];
4320 gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def
)) == VECTOR_TYPE
);
4321 expr
= build2 (code
, vectype
, PHI_RESULT (new_phi
), adjustment_def
);
4322 new_dest
= vect_create_destination_var (scalar_dest
, vectype
);
4326 new_temp
= scalar_results
[0];
4327 gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def
)) != VECTOR_TYPE
);
4328 expr
= build2 (code
, scalar_type
, new_temp
, adjustment_def
);
4329 new_dest
= vect_create_destination_var (scalar_dest
, scalar_type
);
4332 epilog_stmt
= gimple_build_assign (new_dest
, expr
);
4333 new_temp
= make_ssa_name (new_dest
, epilog_stmt
);
4334 gimple_assign_set_lhs (epilog_stmt
, new_temp
);
4335 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
4336 if (nested_in_vect_loop
)
4338 set_vinfo_for_stmt (epilog_stmt
,
4339 new_stmt_vec_info (epilog_stmt
, loop_vinfo
,
4341 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (epilog_stmt
)) =
4342 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (new_phi
));
4345 scalar_results
.quick_push (new_temp
);
4347 scalar_results
[0] = new_temp
;
4350 scalar_results
[0] = new_temp
;
4352 new_phis
[0] = epilog_stmt
;
4355 /* 2.6 Handle the loop-exit phis. Replace the uses of scalar loop-exit
4356 phis with new adjusted scalar results, i.e., replace use <s_out0>
4361 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
4362 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
4363 v_out2 = reduce <v_out1>
4364 s_out3 = extract_field <v_out2, 0>
4365 s_out4 = adjust_result <s_out3>
4372 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
4373 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
4374 v_out2 = reduce <v_out1>
4375 s_out3 = extract_field <v_out2, 0>
4376 s_out4 = adjust_result <s_out3>
4381 /* In SLP reduction chain we reduce vector results into one vector if
4382 necessary, hence we set here GROUP_SIZE to 1. SCALAR_DEST is the LHS of
4383 the last stmt in the reduction chain, since we are looking for the loop
4385 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
)))
4387 scalar_dest
= gimple_assign_lhs (
4388 SLP_TREE_SCALAR_STMTS (slp_node
)[group_size
- 1]);
4392 /* In SLP we may have several statements in NEW_PHIS and REDUCTION_PHIS (in
4393 case that GROUP_SIZE is greater than vectorization factor). Therefore, we
4394 need to match SCALAR_RESULTS with corresponding statements. The first
4395 (GROUP_SIZE / number of new vector stmts) scalar results correspond to
4396 the first vector stmt, etc.
4397 (RATIO is equal to (GROUP_SIZE / number of new vector stmts)). */
4398 if (group_size
> new_phis
.length ())
4400 ratio
= group_size
/ new_phis
.length ();
4401 gcc_assert (!(group_size
% new_phis
.length ()));
4406 for (k
= 0; k
< group_size
; k
++)
4410 epilog_stmt
= new_phis
[k
/ ratio
];
4411 reduction_phi
= reduction_phis
[k
/ ratio
];
4413 inner_phi
= inner_phis
[k
/ ratio
];
4418 gimple current_stmt
= SLP_TREE_SCALAR_STMTS (slp_node
)[k
];
4420 orig_stmt
= STMT_VINFO_RELATED_STMT (vinfo_for_stmt (current_stmt
));
4421 /* SLP statements can't participate in patterns. */
4422 gcc_assert (!orig_stmt
);
4423 scalar_dest
= gimple_assign_lhs (current_stmt
);
4427 /* Find the loop-closed-use at the loop exit of the original scalar
4428 result. (The reduction result is expected to have two immediate uses -
4429 one at the latch block, and one at the loop exit). */
4430 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, scalar_dest
)
4431 if (!flow_bb_inside_loop_p (loop
, gimple_bb (USE_STMT (use_p
)))
4432 && !is_gimple_debug (USE_STMT (use_p
)))
4433 phis
.safe_push (USE_STMT (use_p
));
4435 /* While we expect to have found an exit_phi because of loop-closed-ssa
4436 form we can end up without one if the scalar cycle is dead. */
4438 FOR_EACH_VEC_ELT (phis
, i
, exit_phi
)
4442 stmt_vec_info exit_phi_vinfo
= vinfo_for_stmt (exit_phi
);
4445 /* FORNOW. Currently not supporting the case that an inner-loop
4446 reduction is not used in the outer-loop (but only outside the
4447 outer-loop), unless it is double reduction. */
4448 gcc_assert ((STMT_VINFO_RELEVANT_P (exit_phi_vinfo
)
4449 && !STMT_VINFO_LIVE_P (exit_phi_vinfo
))
4452 STMT_VINFO_VEC_STMT (exit_phi_vinfo
) = epilog_stmt
;
4454 || STMT_VINFO_DEF_TYPE (exit_phi_vinfo
)
4455 != vect_double_reduction_def
)
4458 /* Handle double reduction:
4460 stmt1: s1 = phi <s0, s2> - double reduction phi (outer loop)
4461 stmt2: s3 = phi <s1, s4> - (regular) reduc phi (inner loop)
4462 stmt3: s4 = use (s3) - (regular) reduc stmt (inner loop)
4463 stmt4: s2 = phi <s4> - double reduction stmt (outer loop)
4465 At that point the regular reduction (stmt2 and stmt3) is
4466 already vectorized, as well as the exit phi node, stmt4.
4467 Here we vectorize the phi node of double reduction, stmt1, and
4468 update all relevant statements. */
4470 /* Go through all the uses of s2 to find double reduction phi
4471 node, i.e., stmt1 above. */
4472 orig_name
= PHI_RESULT (exit_phi
);
4473 FOR_EACH_IMM_USE_STMT (use_stmt
, imm_iter
, orig_name
)
4475 stmt_vec_info use_stmt_vinfo
;
4476 stmt_vec_info new_phi_vinfo
;
4477 tree vect_phi_init
, preheader_arg
, vect_phi_res
, init_def
;
4478 basic_block bb
= gimple_bb (use_stmt
);
4481 /* Check that USE_STMT is really double reduction phi
4483 if (gimple_code (use_stmt
) != GIMPLE_PHI
4484 || gimple_phi_num_args (use_stmt
) != 2
4485 || bb
->loop_father
!= outer_loop
)
4487 use_stmt_vinfo
= vinfo_for_stmt (use_stmt
);
4489 || STMT_VINFO_DEF_TYPE (use_stmt_vinfo
)
4490 != vect_double_reduction_def
)
4493 /* Create vector phi node for double reduction:
4494 vs1 = phi <vs0, vs2>
4495 vs1 was created previously in this function by a call to
4496 vect_get_vec_def_for_operand and is stored in
4498 vs2 is defined by INNER_PHI, the vectorized EXIT_PHI;
4499 vs0 is created here. */
4501 /* Create vector phi node. */
4502 vect_phi
= create_phi_node (vec_initial_def
, bb
);
4503 new_phi_vinfo
= new_stmt_vec_info (vect_phi
,
4504 loop_vec_info_for_loop (outer_loop
), NULL
);
4505 set_vinfo_for_stmt (vect_phi
, new_phi_vinfo
);
4507 /* Create vs0 - initial def of the double reduction phi. */
4508 preheader_arg
= PHI_ARG_DEF_FROM_EDGE (use_stmt
,
4509 loop_preheader_edge (outer_loop
));
4510 init_def
= get_initial_def_for_reduction (stmt
,
4511 preheader_arg
, NULL
);
4512 vect_phi_init
= vect_init_vector (use_stmt
, init_def
,
4515 /* Update phi node arguments with vs0 and vs2. */
4516 add_phi_arg (vect_phi
, vect_phi_init
,
4517 loop_preheader_edge (outer_loop
),
4519 add_phi_arg (vect_phi
, PHI_RESULT (inner_phi
),
4520 loop_latch_edge (outer_loop
), UNKNOWN_LOCATION
);
4521 if (dump_enabled_p ())
4523 dump_printf_loc (MSG_NOTE
, vect_location
,
4524 "created double reduction phi node: ");
4525 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, vect_phi
, 0);
4526 dump_printf (MSG_NOTE
, "\n");
4529 vect_phi_res
= PHI_RESULT (vect_phi
);
4531 /* Replace the use, i.e., set the correct vs1 in the regular
4532 reduction phi node. FORNOW, NCOPIES is always 1, so the
4533 loop is redundant. */
4534 use
= reduction_phi
;
4535 for (j
= 0; j
< ncopies
; j
++)
4537 edge pr_edge
= loop_preheader_edge (loop
);
4538 SET_PHI_ARG_DEF (use
, pr_edge
->dest_idx
, vect_phi_res
);
4539 use
= STMT_VINFO_RELATED_STMT (vinfo_for_stmt (use
));
4546 if (nested_in_vect_loop
)
4555 /* Find the loop-closed-use at the loop exit of the original scalar
4556 result. (The reduction result is expected to have two immediate uses,
4557 one at the latch block, and one at the loop exit). For double
4558 reductions we are looking for exit phis of the outer loop. */
4559 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, scalar_dest
)
4561 if (!flow_bb_inside_loop_p (loop
, gimple_bb (USE_STMT (use_p
))))
4563 if (!is_gimple_debug (USE_STMT (use_p
)))
4564 phis
.safe_push (USE_STMT (use_p
));
4568 if (double_reduc
&& gimple_code (USE_STMT (use_p
)) == GIMPLE_PHI
)
4570 tree phi_res
= PHI_RESULT (USE_STMT (use_p
));
4572 FOR_EACH_IMM_USE_FAST (phi_use_p
, phi_imm_iter
, phi_res
)
4574 if (!flow_bb_inside_loop_p (loop
,
4575 gimple_bb (USE_STMT (phi_use_p
)))
4576 && !is_gimple_debug (USE_STMT (phi_use_p
)))
4577 phis
.safe_push (USE_STMT (phi_use_p
));
4583 FOR_EACH_VEC_ELT (phis
, i
, exit_phi
)
4585 /* Replace the uses: */
4586 orig_name
= PHI_RESULT (exit_phi
);
4587 scalar_result
= scalar_results
[k
];
4588 FOR_EACH_IMM_USE_STMT (use_stmt
, imm_iter
, orig_name
)
4589 FOR_EACH_IMM_USE_ON_STMT (use_p
, imm_iter
)
4590 SET_USE (use_p
, scalar_result
);
4598 /* Function vectorizable_reduction.
4600 Check if STMT performs a reduction operation that can be vectorized.
4601 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4602 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
4603 Return FALSE if not a vectorizable STMT, TRUE otherwise.
4605 This function also handles reduction idioms (patterns) that have been
4606 recognized in advance during vect_pattern_recog. In this case, STMT may be
4608 X = pattern_expr (arg0, arg1, ..., X)
4609 and it's STMT_VINFO_RELATED_STMT points to the last stmt in the original
4610 sequence that had been detected and replaced by the pattern-stmt (STMT).
4612 In some cases of reduction patterns, the type of the reduction variable X is
4613 different than the type of the other arguments of STMT.
4614 In such cases, the vectype that is used when transforming STMT into a vector
4615 stmt is different than the vectype that is used to determine the
4616 vectorization factor, because it consists of a different number of elements
4617 than the actual number of elements that are being operated upon in parallel.
4619 For example, consider an accumulation of shorts into an int accumulator.
4620 On some targets it's possible to vectorize this pattern operating on 8
4621 shorts at a time (hence, the vectype for purposes of determining the
4622 vectorization factor should be V8HI); on the other hand, the vectype that
4623 is used to create the vector form is actually V4SI (the type of the result).
4625 Upon entry to this function, STMT_VINFO_VECTYPE records the vectype that
4626 indicates what is the actual level of parallelism (V8HI in the example), so
4627 that the right vectorization factor would be derived. This vectype
4628 corresponds to the type of arguments to the reduction stmt, and should *NOT*
4629 be used to create the vectorized stmt. The right vectype for the vectorized
4630 stmt is obtained from the type of the result X:
4631 get_vectype_for_scalar_type (TREE_TYPE (X))
4633 This means that, contrary to "regular" reductions (or "regular" stmts in
4634 general), the following equation:
4635 STMT_VINFO_VECTYPE == get_vectype_for_scalar_type (TREE_TYPE (X))
4636 does *NOT* necessarily hold for reduction patterns. */
4639 vectorizable_reduction (gimple stmt
, gimple_stmt_iterator
*gsi
,
4640 gimple
*vec_stmt
, slp_tree slp_node
)
4644 tree loop_vec_def0
= NULL_TREE
, loop_vec_def1
= NULL_TREE
;
4645 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4646 tree vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
4647 tree vectype_in
= NULL_TREE
;
4648 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
4649 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
4650 enum tree_code code
, orig_code
, epilog_reduc_code
;
4651 enum machine_mode vec_mode
;
4653 optab optab
, reduc_optab
;
4654 tree new_temp
= NULL_TREE
;
4657 enum vect_def_type dt
;
4658 gimple new_phi
= NULL
;
4662 stmt_vec_info orig_stmt_info
;
4663 tree expr
= NULL_TREE
;
4667 stmt_vec_info prev_stmt_info
, prev_phi_info
;
4668 bool single_defuse_cycle
= false;
4669 tree reduc_def
= NULL_TREE
;
4670 gimple new_stmt
= NULL
;
4673 bool nested_cycle
= false, found_nested_cycle_def
= false;
4674 gimple reduc_def_stmt
= NULL
;
4675 /* The default is that the reduction variable is the last in statement. */
4676 int reduc_index
= 2;
4677 bool double_reduc
= false, dummy
;
4679 struct loop
* def_stmt_loop
, *outer_loop
= NULL
;
4681 gimple def_arg_stmt
;
4682 auto_vec
<tree
> vec_oprnds0
;
4683 auto_vec
<tree
> vec_oprnds1
;
4684 auto_vec
<tree
> vect_defs
;
4685 auto_vec
<gimple
> phis
;
4687 tree def0
, def1
, tem
, op0
, op1
= NULL_TREE
;
4689 /* In case of reduction chain we switch to the first stmt in the chain, but
4690 we don't update STMT_INFO, since only the last stmt is marked as reduction
4691 and has reduction properties. */
4692 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
)))
4693 stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
4695 if (nested_in_vect_loop_p (loop
, stmt
))
4699 nested_cycle
= true;
4702 /* 1. Is vectorizable reduction? */
4703 /* Not supportable if the reduction variable is used in the loop, unless
4704 it's a reduction chain. */
4705 if (STMT_VINFO_RELEVANT (stmt_info
) > vect_used_in_outer
4706 && !GROUP_FIRST_ELEMENT (stmt_info
))
4709 /* Reductions that are not used even in an enclosing outer-loop,
4710 are expected to be "live" (used out of the loop). */
4711 if (STMT_VINFO_RELEVANT (stmt_info
) == vect_unused_in_scope
4712 && !STMT_VINFO_LIVE_P (stmt_info
))
4715 /* Make sure it was already recognized as a reduction computation. */
4716 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_reduction_def
4717 && STMT_VINFO_DEF_TYPE (stmt_info
) != vect_nested_cycle
)
4720 /* 2. Has this been recognized as a reduction pattern?
4722 Check if STMT represents a pattern that has been recognized
4723 in earlier analysis stages. For stmts that represent a pattern,
4724 the STMT_VINFO_RELATED_STMT field records the last stmt in
4725 the original sequence that constitutes the pattern. */
4727 orig_stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
4730 orig_stmt_info
= vinfo_for_stmt (orig_stmt
);
4731 gcc_assert (STMT_VINFO_IN_PATTERN_P (orig_stmt_info
));
4732 gcc_assert (!STMT_VINFO_IN_PATTERN_P (stmt_info
));
4735 /* 3. Check the operands of the operation. The first operands are defined
4736 inside the loop body. The last operand is the reduction variable,
4737 which is defined by the loop-header-phi. */
4739 gcc_assert (is_gimple_assign (stmt
));
4742 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt
)))
4744 case GIMPLE_SINGLE_RHS
:
4745 op_type
= TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt
));
4746 if (op_type
== ternary_op
)
4748 tree rhs
= gimple_assign_rhs1 (stmt
);
4749 ops
[0] = TREE_OPERAND (rhs
, 0);
4750 ops
[1] = TREE_OPERAND (rhs
, 1);
4751 ops
[2] = TREE_OPERAND (rhs
, 2);
4752 code
= TREE_CODE (rhs
);
4758 case GIMPLE_BINARY_RHS
:
4759 code
= gimple_assign_rhs_code (stmt
);
4760 op_type
= TREE_CODE_LENGTH (code
);
4761 gcc_assert (op_type
== binary_op
);
4762 ops
[0] = gimple_assign_rhs1 (stmt
);
4763 ops
[1] = gimple_assign_rhs2 (stmt
);
4766 case GIMPLE_TERNARY_RHS
:
4767 code
= gimple_assign_rhs_code (stmt
);
4768 op_type
= TREE_CODE_LENGTH (code
);
4769 gcc_assert (op_type
== ternary_op
);
4770 ops
[0] = gimple_assign_rhs1 (stmt
);
4771 ops
[1] = gimple_assign_rhs2 (stmt
);
4772 ops
[2] = gimple_assign_rhs3 (stmt
);
4775 case GIMPLE_UNARY_RHS
:
4782 if (code
== COND_EXPR
&& slp_node
)
4785 scalar_dest
= gimple_assign_lhs (stmt
);
4786 scalar_type
= TREE_TYPE (scalar_dest
);
4787 if (!POINTER_TYPE_P (scalar_type
) && !INTEGRAL_TYPE_P (scalar_type
)
4788 && !SCALAR_FLOAT_TYPE_P (scalar_type
))
4791 /* Do not try to vectorize bit-precision reductions. */
4792 if ((TYPE_PRECISION (scalar_type
)
4793 != GET_MODE_PRECISION (TYPE_MODE (scalar_type
))))
4796 /* All uses but the last are expected to be defined in the loop.
4797 The last use is the reduction variable. In case of nested cycle this
4798 assumption is not true: we use reduc_index to record the index of the
4799 reduction variable. */
4800 for (i
= 0; i
< op_type
- 1; i
++)
4802 /* The condition of COND_EXPR is checked in vectorizable_condition(). */
4803 if (i
== 0 && code
== COND_EXPR
)
4806 is_simple_use
= vect_is_simple_use_1 (ops
[i
], stmt
, loop_vinfo
, NULL
,
4807 &def_stmt
, &def
, &dt
, &tem
);
4810 gcc_assert (is_simple_use
);
4812 if (dt
!= vect_internal_def
4813 && dt
!= vect_external_def
4814 && dt
!= vect_constant_def
4815 && dt
!= vect_induction_def
4816 && !(dt
== vect_nested_cycle
&& nested_cycle
))
4819 if (dt
== vect_nested_cycle
)
4821 found_nested_cycle_def
= true;
4822 reduc_def_stmt
= def_stmt
;
4827 is_simple_use
= vect_is_simple_use_1 (ops
[i
], stmt
, loop_vinfo
, NULL
,
4828 &def_stmt
, &def
, &dt
, &tem
);
4831 gcc_assert (is_simple_use
);
4832 if (!(dt
== vect_reduction_def
4833 || dt
== vect_nested_cycle
4834 || ((dt
== vect_internal_def
|| dt
== vect_external_def
4835 || dt
== vect_constant_def
|| dt
== vect_induction_def
)
4836 && nested_cycle
&& found_nested_cycle_def
)))
4838 /* For pattern recognized stmts, orig_stmt might be a reduction,
4839 but some helper statements for the pattern might not, or
4840 might be COND_EXPRs with reduction uses in the condition. */
4841 gcc_assert (orig_stmt
);
4844 if (!found_nested_cycle_def
)
4845 reduc_def_stmt
= def_stmt
;
4847 gcc_assert (gimple_code (reduc_def_stmt
) == GIMPLE_PHI
);
4849 gcc_assert (orig_stmt
== vect_is_simple_reduction (loop_vinfo
,
4855 gimple tmp
= vect_is_simple_reduction (loop_vinfo
, reduc_def_stmt
,
4856 !nested_cycle
, &dummy
);
4857 /* We changed STMT to be the first stmt in reduction chain, hence we
4858 check that in this case the first element in the chain is STMT. */
4859 gcc_assert (stmt
== tmp
4860 || GROUP_FIRST_ELEMENT (vinfo_for_stmt (tmp
)) == stmt
);
4863 if (STMT_VINFO_LIVE_P (vinfo_for_stmt (reduc_def_stmt
)))
4866 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
4869 ncopies
= (LOOP_VINFO_VECT_FACTOR (loop_vinfo
)
4870 / TYPE_VECTOR_SUBPARTS (vectype_in
));
4872 gcc_assert (ncopies
>= 1);
4874 vec_mode
= TYPE_MODE (vectype_in
);
4876 if (code
== COND_EXPR
)
4878 if (!vectorizable_condition (stmt
, gsi
, NULL
, ops
[reduc_index
], 0, NULL
))
4880 if (dump_enabled_p ())
4881 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4882 "unsupported condition in reduction\n");
4889 /* 4. Supportable by target? */
4891 if (code
== LSHIFT_EXPR
|| code
== RSHIFT_EXPR
4892 || code
== LROTATE_EXPR
|| code
== RROTATE_EXPR
)
4894 /* Shifts and rotates are only supported by vectorizable_shifts,
4895 not vectorizable_reduction. */
4896 if (dump_enabled_p ())
4897 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4898 "unsupported shift or rotation.\n");
4902 /* 4.1. check support for the operation in the loop */
4903 optab
= optab_for_tree_code (code
, vectype_in
, optab_default
);
4906 if (dump_enabled_p ())
4907 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4913 if (optab_handler (optab
, vec_mode
) == CODE_FOR_nothing
)
4915 if (dump_enabled_p ())
4916 dump_printf (MSG_NOTE
, "op not supported by target.\n");
4918 if (GET_MODE_SIZE (vec_mode
) != UNITS_PER_WORD
4919 || LOOP_VINFO_VECT_FACTOR (loop_vinfo
)
4920 < vect_min_worthwhile_factor (code
))
4923 if (dump_enabled_p ())
4924 dump_printf (MSG_NOTE
, "proceeding using word mode.\n");
4927 /* Worthwhile without SIMD support? */
4928 if (!VECTOR_MODE_P (TYPE_MODE (vectype_in
))
4929 && LOOP_VINFO_VECT_FACTOR (loop_vinfo
)
4930 < vect_min_worthwhile_factor (code
))
4932 if (dump_enabled_p ())
4933 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4934 "not worthwhile without SIMD support.\n");
4940 /* 4.2. Check support for the epilog operation.
4942 If STMT represents a reduction pattern, then the type of the
4943 reduction variable may be different than the type of the rest
4944 of the arguments. For example, consider the case of accumulation
4945 of shorts into an int accumulator; The original code:
4946 S1: int_a = (int) short_a;
4947 orig_stmt-> S2: int_acc = plus <int_a ,int_acc>;
4950 STMT: int_acc = widen_sum <short_a, int_acc>
4953 1. The tree-code that is used to create the vector operation in the
4954 epilog code (that reduces the partial results) is not the
4955 tree-code of STMT, but is rather the tree-code of the original
4956 stmt from the pattern that STMT is replacing. I.e, in the example
4957 above we want to use 'widen_sum' in the loop, but 'plus' in the
4959 2. The type (mode) we use to check available target support
4960 for the vector operation to be created in the *epilog*, is
4961 determined by the type of the reduction variable (in the example
4962 above we'd check this: optab_handler (plus_optab, vect_int_mode])).
4963 However the type (mode) we use to check available target support
4964 for the vector operation to be created *inside the loop*, is
4965 determined by the type of the other arguments to STMT (in the
4966 example we'd check this: optab_handler (widen_sum_optab,
4969 This is contrary to "regular" reductions, in which the types of all
4970 the arguments are the same as the type of the reduction variable.
4971 For "regular" reductions we can therefore use the same vector type
4972 (and also the same tree-code) when generating the epilog code and
4973 when generating the code inside the loop. */
4977 /* This is a reduction pattern: get the vectype from the type of the
4978 reduction variable, and get the tree-code from orig_stmt. */
4979 orig_code
= gimple_assign_rhs_code (orig_stmt
);
4980 gcc_assert (vectype_out
);
4981 vec_mode
= TYPE_MODE (vectype_out
);
4985 /* Regular reduction: use the same vectype and tree-code as used for
4986 the vector code inside the loop can be used for the epilog code. */
4992 def_bb
= gimple_bb (reduc_def_stmt
);
4993 def_stmt_loop
= def_bb
->loop_father
;
4994 def_arg
= PHI_ARG_DEF_FROM_EDGE (reduc_def_stmt
,
4995 loop_preheader_edge (def_stmt_loop
));
4996 if (TREE_CODE (def_arg
) == SSA_NAME
4997 && (def_arg_stmt
= SSA_NAME_DEF_STMT (def_arg
))
4998 && gimple_code (def_arg_stmt
) == GIMPLE_PHI
4999 && flow_bb_inside_loop_p (outer_loop
, gimple_bb (def_arg_stmt
))
5000 && vinfo_for_stmt (def_arg_stmt
)
5001 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_arg_stmt
))
5002 == vect_double_reduction_def
)
5003 double_reduc
= true;
5006 epilog_reduc_code
= ERROR_MARK
;
5007 if (reduction_code_for_scalar_code (orig_code
, &epilog_reduc_code
))
5009 reduc_optab
= optab_for_tree_code (epilog_reduc_code
, vectype_out
,
5013 if (dump_enabled_p ())
5014 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5015 "no optab for reduction.\n");
5017 epilog_reduc_code
= ERROR_MARK
;
5021 && optab_handler (reduc_optab
, vec_mode
) == CODE_FOR_nothing
)
5023 if (dump_enabled_p ())
5024 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5025 "reduc op not supported by target.\n");
5027 epilog_reduc_code
= ERROR_MARK
;
5032 if (!nested_cycle
|| double_reduc
)
5034 if (dump_enabled_p ())
5035 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5036 "no reduc code for scalar code.\n");
5042 if (double_reduc
&& ncopies
> 1)
5044 if (dump_enabled_p ())
5045 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5046 "multiple types in double reduction\n");
5051 /* In case of widenning multiplication by a constant, we update the type
5052 of the constant to be the type of the other operand. We check that the
5053 constant fits the type in the pattern recognition pass. */
5054 if (code
== DOT_PROD_EXPR
5055 && !types_compatible_p (TREE_TYPE (ops
[0]), TREE_TYPE (ops
[1])))
5057 if (TREE_CODE (ops
[0]) == INTEGER_CST
)
5058 ops
[0] = fold_convert (TREE_TYPE (ops
[1]), ops
[0]);
5059 else if (TREE_CODE (ops
[1]) == INTEGER_CST
)
5060 ops
[1] = fold_convert (TREE_TYPE (ops
[0]), ops
[1]);
5063 if (dump_enabled_p ())
5064 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5065 "invalid types in dot-prod\n");
5071 if (!vec_stmt
) /* transformation not required. */
5073 if (!vect_model_reduction_cost (stmt_info
, epilog_reduc_code
, ncopies
))
5075 STMT_VINFO_TYPE (stmt_info
) = reduc_vec_info_type
;
5081 if (dump_enabled_p ())
5082 dump_printf_loc (MSG_NOTE
, vect_location
, "transform reduction.\n");
5084 /* FORNOW: Multiple types are not supported for condition. */
5085 if (code
== COND_EXPR
)
5086 gcc_assert (ncopies
== 1);
5088 /* Create the destination vector */
5089 vec_dest
= vect_create_destination_var (scalar_dest
, vectype_out
);
5091 /* In case the vectorization factor (VF) is bigger than the number
5092 of elements that we can fit in a vectype (nunits), we have to generate
5093 more than one vector stmt - i.e - we need to "unroll" the
5094 vector stmt by a factor VF/nunits. For more details see documentation
5095 in vectorizable_operation. */
5097 /* If the reduction is used in an outer loop we need to generate
5098 VF intermediate results, like so (e.g. for ncopies=2):
5103 (i.e. we generate VF results in 2 registers).
5104 In this case we have a separate def-use cycle for each copy, and therefore
5105 for each copy we get the vector def for the reduction variable from the
5106 respective phi node created for this copy.
5108 Otherwise (the reduction is unused in the loop nest), we can combine
5109 together intermediate results, like so (e.g. for ncopies=2):
5113 (i.e. we generate VF/2 results in a single register).
5114 In this case for each copy we get the vector def for the reduction variable
5115 from the vectorized reduction operation generated in the previous iteration.
5118 if (STMT_VINFO_RELEVANT (stmt_info
) == vect_unused_in_scope
)
5120 single_defuse_cycle
= true;
5124 epilog_copies
= ncopies
;
5126 prev_stmt_info
= NULL
;
5127 prev_phi_info
= NULL
;
5130 vec_num
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
5131 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype_out
)
5132 == TYPE_VECTOR_SUBPARTS (vectype_in
));
5137 vec_oprnds0
.create (1);
5138 if (op_type
== ternary_op
)
5139 vec_oprnds1
.create (1);
5142 phis
.create (vec_num
);
5143 vect_defs
.create (vec_num
);
5145 vect_defs
.quick_push (NULL_TREE
);
5147 for (j
= 0; j
< ncopies
; j
++)
5149 if (j
== 0 || !single_defuse_cycle
)
5151 for (i
= 0; i
< vec_num
; i
++)
5153 /* Create the reduction-phi that defines the reduction
5155 new_phi
= create_phi_node (vec_dest
, loop
->header
);
5156 set_vinfo_for_stmt (new_phi
,
5157 new_stmt_vec_info (new_phi
, loop_vinfo
,
5159 if (j
== 0 || slp_node
)
5160 phis
.quick_push (new_phi
);
5164 if (code
== COND_EXPR
)
5166 gcc_assert (!slp_node
);
5167 vectorizable_condition (stmt
, gsi
, vec_stmt
,
5168 PHI_RESULT (phis
[0]),
5170 /* Multiple types are not supported for condition. */
5177 op0
= ops
[!reduc_index
];
5178 if (op_type
== ternary_op
)
5180 if (reduc_index
== 0)
5187 vect_get_vec_defs (op0
, op1
, stmt
, &vec_oprnds0
, &vec_oprnds1
,
5191 loop_vec_def0
= vect_get_vec_def_for_operand (ops
[!reduc_index
],
5193 vec_oprnds0
.quick_push (loop_vec_def0
);
5194 if (op_type
== ternary_op
)
5196 loop_vec_def1
= vect_get_vec_def_for_operand (op1
, stmt
,
5198 vec_oprnds1
.quick_push (loop_vec_def1
);
5206 enum vect_def_type dt
;
5210 vect_is_simple_use (ops
[!reduc_index
], stmt
, loop_vinfo
, NULL
,
5211 &dummy_stmt
, &dummy
, &dt
);
5212 loop_vec_def0
= vect_get_vec_def_for_stmt_copy (dt
,
5214 vec_oprnds0
[0] = loop_vec_def0
;
5215 if (op_type
== ternary_op
)
5217 vect_is_simple_use (op1
, stmt
, loop_vinfo
, NULL
, &dummy_stmt
,
5219 loop_vec_def1
= vect_get_vec_def_for_stmt_copy (dt
,
5221 vec_oprnds1
[0] = loop_vec_def1
;
5225 if (single_defuse_cycle
)
5226 reduc_def
= gimple_assign_lhs (new_stmt
);
5228 STMT_VINFO_RELATED_STMT (prev_phi_info
) = new_phi
;
5231 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, def0
)
5234 reduc_def
= PHI_RESULT (phis
[i
]);
5237 if (!single_defuse_cycle
|| j
== 0)
5238 reduc_def
= PHI_RESULT (new_phi
);
5241 def1
= ((op_type
== ternary_op
)
5242 ? vec_oprnds1
[i
] : NULL
);
5243 if (op_type
== binary_op
)
5245 if (reduc_index
== 0)
5246 expr
= build2 (code
, vectype_out
, reduc_def
, def0
);
5248 expr
= build2 (code
, vectype_out
, def0
, reduc_def
);
5252 if (reduc_index
== 0)
5253 expr
= build3 (code
, vectype_out
, reduc_def
, def0
, def1
);
5256 if (reduc_index
== 1)
5257 expr
= build3 (code
, vectype_out
, def0
, reduc_def
, def1
);
5259 expr
= build3 (code
, vectype_out
, def0
, def1
, reduc_def
);
5263 new_stmt
= gimple_build_assign (vec_dest
, expr
);
5264 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
5265 gimple_assign_set_lhs (new_stmt
, new_temp
);
5266 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5270 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
5271 vect_defs
.quick_push (new_temp
);
5274 vect_defs
[0] = new_temp
;
5281 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
5283 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
5285 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
5286 prev_phi_info
= vinfo_for_stmt (new_phi
);
5289 /* Finalize the reduction-phi (set its arguments) and create the
5290 epilog reduction code. */
5291 if ((!single_defuse_cycle
|| code
== COND_EXPR
) && !slp_node
)
5293 new_temp
= gimple_assign_lhs (*vec_stmt
);
5294 vect_defs
[0] = new_temp
;
5297 vect_create_epilog_for_reduction (vect_defs
, stmt
, epilog_copies
,
5298 epilog_reduc_code
, phis
, reduc_index
,
5299 double_reduc
, slp_node
);
5304 /* Function vect_min_worthwhile_factor.
5306 For a loop where we could vectorize the operation indicated by CODE,
5307 return the minimum vectorization factor that makes it worthwhile
5308 to use generic vectors. */
5310 vect_min_worthwhile_factor (enum tree_code code
)
5331 /* Function vectorizable_induction
5333 Check if PHI performs an induction computation that can be vectorized.
5334 If VEC_STMT is also passed, vectorize the induction PHI: create a vectorized
5335 phi to replace it, put it in VEC_STMT, and add it to the same basic block.
5336 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5339 vectorizable_induction (gimple phi
, gimple_stmt_iterator
*gsi ATTRIBUTE_UNUSED
,
5342 stmt_vec_info stmt_info
= vinfo_for_stmt (phi
);
5343 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
5344 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
5345 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
5346 int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
5347 int ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
5350 gcc_assert (ncopies
>= 1);
5351 /* FORNOW. These restrictions should be relaxed. */
5352 if (nested_in_vect_loop_p (loop
, phi
))
5354 imm_use_iterator imm_iter
;
5355 use_operand_p use_p
;
5362 if (dump_enabled_p ())
5363 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5364 "multiple types in nested loop.\n");
5369 latch_e
= loop_latch_edge (loop
->inner
);
5370 loop_arg
= PHI_ARG_DEF_FROM_EDGE (phi
, latch_e
);
5371 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, loop_arg
)
5373 if (!flow_bb_inside_loop_p (loop
->inner
,
5374 gimple_bb (USE_STMT (use_p
))))
5376 exit_phi
= USE_STMT (use_p
);
5382 stmt_vec_info exit_phi_vinfo
= vinfo_for_stmt (exit_phi
);
5383 if (!(STMT_VINFO_RELEVANT_P (exit_phi_vinfo
)
5384 && !STMT_VINFO_LIVE_P (exit_phi_vinfo
)))
5386 if (dump_enabled_p ())
5387 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5388 "inner-loop induction only used outside "
5389 "of the outer vectorized loop.\n");
5395 if (!STMT_VINFO_RELEVANT_P (stmt_info
))
5398 /* FORNOW: SLP not supported. */
5399 if (STMT_SLP_TYPE (stmt_info
))
5402 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_info
) == vect_induction_def
);
5404 if (gimple_code (phi
) != GIMPLE_PHI
)
5407 if (!vec_stmt
) /* transformation not required. */
5409 STMT_VINFO_TYPE (stmt_info
) = induc_vec_info_type
;
5410 if (dump_enabled_p ())
5411 dump_printf_loc (MSG_NOTE
, vect_location
,
5412 "=== vectorizable_induction ===\n");
5413 vect_model_induction_cost (stmt_info
, ncopies
);
5419 if (dump_enabled_p ())
5420 dump_printf_loc (MSG_NOTE
, vect_location
, "transform induction phi.\n");
5422 vec_def
= get_initial_def_for_induction (phi
);
5423 *vec_stmt
= SSA_NAME_DEF_STMT (vec_def
);
5427 /* Function vectorizable_live_operation.
5429 STMT computes a value that is used outside the loop. Check if
5430 it can be supported. */
5433 vectorizable_live_operation (gimple stmt
,
5434 gimple_stmt_iterator
*gsi ATTRIBUTE_UNUSED
,
5437 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
5438 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
5439 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
5445 enum vect_def_type dt
;
5446 enum tree_code code
;
5447 enum gimple_rhs_class rhs_class
;
5449 gcc_assert (STMT_VINFO_LIVE_P (stmt_info
));
5451 if (STMT_VINFO_DEF_TYPE (stmt_info
) == vect_reduction_def
)
5454 if (!is_gimple_assign (stmt
))
5456 if (gimple_call_internal_p (stmt
)
5457 && gimple_call_internal_fn (stmt
) == IFN_GOMP_SIMD_LANE
5458 && gimple_call_lhs (stmt
)
5460 && TREE_CODE (gimple_call_arg (stmt
, 0)) == SSA_NAME
5462 == SSA_NAME_VAR (gimple_call_arg (stmt
, 0)))
5464 edge e
= single_exit (loop
);
5465 basic_block merge_bb
= e
->dest
;
5466 imm_use_iterator imm_iter
;
5467 use_operand_p use_p
;
5468 tree lhs
= gimple_call_lhs (stmt
);
5470 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, lhs
)
5472 gimple use_stmt
= USE_STMT (use_p
);
5473 if (gimple_code (use_stmt
) == GIMPLE_PHI
5474 || gimple_bb (use_stmt
) == merge_bb
)
5479 = build_int_cst (unsigned_type_node
,
5480 loop_vinfo
->vectorization_factor
- 1);
5481 SET_PHI_ARG_DEF (use_stmt
, e
->dest_idx
, vfm1
);
5491 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
5494 /* FORNOW. CHECKME. */
5495 if (nested_in_vect_loop_p (loop
, stmt
))
5498 code
= gimple_assign_rhs_code (stmt
);
5499 op_type
= TREE_CODE_LENGTH (code
);
5500 rhs_class
= get_gimple_rhs_class (code
);
5501 gcc_assert (rhs_class
!= GIMPLE_UNARY_RHS
|| op_type
== unary_op
);
5502 gcc_assert (rhs_class
!= GIMPLE_BINARY_RHS
|| op_type
== binary_op
);
5504 /* FORNOW: support only if all uses are invariant. This means
5505 that the scalar operations can remain in place, unvectorized.
5506 The original last scalar value that they compute will be used. */
5508 for (i
= 0; i
< op_type
; i
++)
5510 if (rhs_class
== GIMPLE_SINGLE_RHS
)
5511 op
= TREE_OPERAND (gimple_op (stmt
, 1), i
);
5513 op
= gimple_op (stmt
, i
+ 1);
5515 && !vect_is_simple_use (op
, stmt
, loop_vinfo
, NULL
, &def_stmt
, &def
,
5518 if (dump_enabled_p ())
5519 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5520 "use not simple.\n");
5524 if (dt
!= vect_external_def
&& dt
!= vect_constant_def
)
5528 /* No transformation is required for the cases we currently support. */
5532 /* Kill any debug uses outside LOOP of SSA names defined in STMT. */
5535 vect_loop_kill_debug_uses (struct loop
*loop
, gimple stmt
)
5537 ssa_op_iter op_iter
;
5538 imm_use_iterator imm_iter
;
5539 def_operand_p def_p
;
5542 FOR_EACH_PHI_OR_STMT_DEF (def_p
, stmt
, op_iter
, SSA_OP_DEF
)
5544 FOR_EACH_IMM_USE_STMT (ustmt
, imm_iter
, DEF_FROM_PTR (def_p
))
5548 if (!is_gimple_debug (ustmt
))
5551 bb
= gimple_bb (ustmt
);
5553 if (!flow_bb_inside_loop_p (loop
, bb
))
5555 if (gimple_debug_bind_p (ustmt
))
5557 if (dump_enabled_p ())
5558 dump_printf_loc (MSG_NOTE
, vect_location
,
5559 "killing debug use\n");
5561 gimple_debug_bind_reset_value (ustmt
);
5562 update_stmt (ustmt
);
5572 /* This function builds ni_name = number of iterations. Statements
5573 are emitted on the loop preheader edge. */
5576 vect_build_loop_niters (loop_vec_info loop_vinfo
)
5578 tree ni
= unshare_expr (LOOP_VINFO_NITERS (loop_vinfo
));
5579 if (TREE_CODE (ni
) == INTEGER_CST
)
5584 gimple_seq stmts
= NULL
;
5585 edge pe
= loop_preheader_edge (LOOP_VINFO_LOOP (loop_vinfo
));
5587 var
= create_tmp_var (TREE_TYPE (ni
), "niters");
5588 ni_name
= force_gimple_operand (ni
, &stmts
, false, var
);
5590 gsi_insert_seq_on_edge_immediate (pe
, stmts
);
5597 /* This function generates the following statements:
5599 ni_name = number of iterations loop executes
5600 ratio = ni_name / vf
5601 ratio_mult_vf_name = ratio * vf
5603 and places them on the loop preheader edge. */
5606 vect_generate_tmps_on_preheader (loop_vec_info loop_vinfo
,
5608 tree
*ratio_mult_vf_name_ptr
,
5609 tree
*ratio_name_ptr
)
5611 tree ni_minus_gap_name
;
5614 tree ratio_mult_vf_name
;
5615 tree ni
= LOOP_VINFO_NITERS (loop_vinfo
);
5616 int vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
5617 edge pe
= loop_preheader_edge (LOOP_VINFO_LOOP (loop_vinfo
));
5620 log_vf
= build_int_cst (TREE_TYPE (ni
), exact_log2 (vf
));
5622 /* If epilogue loop is required because of data accesses with gaps, we
5623 subtract one iteration from the total number of iterations here for
5624 correct calculation of RATIO. */
5625 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo
))
5627 ni_minus_gap_name
= fold_build2 (MINUS_EXPR
, TREE_TYPE (ni_name
),
5629 build_one_cst (TREE_TYPE (ni_name
)));
5630 if (!is_gimple_val (ni_minus_gap_name
))
5632 var
= create_tmp_var (TREE_TYPE (ni
), "ni_gap");
5633 gimple stmts
= NULL
;
5634 ni_minus_gap_name
= force_gimple_operand (ni_minus_gap_name
, &stmts
,
5636 gsi_insert_seq_on_edge_immediate (pe
, stmts
);
5640 ni_minus_gap_name
= ni_name
;
5642 /* Create: ratio = ni >> log2(vf) */
5644 ratio_name
= fold_build2 (RSHIFT_EXPR
, TREE_TYPE (ni_minus_gap_name
),
5645 ni_minus_gap_name
, log_vf
);
5646 if (!is_gimple_val (ratio_name
))
5648 var
= create_tmp_var (TREE_TYPE (ni
), "bnd");
5649 gimple stmts
= NULL
;
5650 ratio_name
= force_gimple_operand (ratio_name
, &stmts
, true, var
);
5651 gsi_insert_seq_on_edge_immediate (pe
, stmts
);
5653 *ratio_name_ptr
= ratio_name
;
5655 /* Create: ratio_mult_vf = ratio << log2 (vf). */
5657 if (ratio_mult_vf_name_ptr
)
5659 ratio_mult_vf_name
= fold_build2 (LSHIFT_EXPR
, TREE_TYPE (ratio_name
),
5660 ratio_name
, log_vf
);
5661 if (!is_gimple_val (ratio_mult_vf_name
))
5663 var
= create_tmp_var (TREE_TYPE (ni
), "ratio_mult_vf");
5664 gimple stmts
= NULL
;
5665 ratio_mult_vf_name
= force_gimple_operand (ratio_mult_vf_name
, &stmts
,
5667 gsi_insert_seq_on_edge_immediate (pe
, stmts
);
5669 *ratio_mult_vf_name_ptr
= ratio_mult_vf_name
;
5676 /* Function vect_transform_loop.
5678 The analysis phase has determined that the loop is vectorizable.
5679 Vectorize the loop - created vectorized stmts to replace the scalar
5680 stmts in the loop, and update the loop exit condition. */
5683 vect_transform_loop (loop_vec_info loop_vinfo
)
5685 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
5686 basic_block
*bbs
= LOOP_VINFO_BBS (loop_vinfo
);
5687 int nbbs
= loop
->num_nodes
;
5688 gimple_stmt_iterator si
;
5691 int vectorization_factor
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
5693 bool slp_scheduled
= false;
5694 unsigned int nunits
;
5695 gimple stmt
, pattern_stmt
;
5696 gimple_seq pattern_def_seq
= NULL
;
5697 gimple_stmt_iterator pattern_def_si
= gsi_none ();
5698 bool transform_pattern_stmt
= false;
5699 bool check_profitability
= false;
5701 /* Record number of iterations before we started tampering with the profile. */
5702 gcov_type expected_iterations
= expected_loop_iterations_unbounded (loop
);
5704 if (dump_enabled_p ())
5705 dump_printf_loc (MSG_NOTE
, vect_location
, "=== vec_transform_loop ===\n");
5707 /* If profile is inprecise, we have chance to fix it up. */
5708 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
))
5709 expected_iterations
= LOOP_VINFO_INT_NITERS (loop_vinfo
);
5711 /* Use the more conservative vectorization threshold. If the number
5712 of iterations is constant assume the cost check has been performed
5713 by our caller. If the threshold makes all loops profitable that
5714 run at least the vectorization factor number of times checking
5715 is pointless, too. */
5716 th
= ((PARAM_VALUE (PARAM_MIN_VECT_LOOP_BOUND
)
5717 * LOOP_VINFO_VECT_FACTOR (loop_vinfo
)) - 1);
5718 th
= MAX (th
, LOOP_VINFO_COST_MODEL_MIN_ITERS (loop_vinfo
));
5719 if (th
>= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) - 1
5720 && !LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
))
5722 if (dump_enabled_p ())
5723 dump_printf_loc (MSG_NOTE
, vect_location
,
5724 "Profitability threshold is %d loop iterations.\n",
5726 check_profitability
= true;
5729 /* Version the loop first, if required, so the profitability check
5732 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo
)
5733 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo
))
5735 vect_loop_versioning (loop_vinfo
, th
, check_profitability
);
5736 check_profitability
= false;
5739 tree ni_name
= vect_build_loop_niters (loop_vinfo
);
5740 LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo
) = ni_name
;
5742 /* Peel the loop if there are data refs with unknown alignment.
5743 Only one data ref with unknown store is allowed. */
5745 if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo
))
5747 vect_do_peeling_for_alignment (loop_vinfo
, ni_name
,
5748 th
, check_profitability
);
5749 check_profitability
= false;
5750 /* The above adjusts LOOP_VINFO_NITERS, so cause ni_name to
5752 ni_name
= NULL_TREE
;
5755 /* If the loop has a symbolic number of iterations 'n' (i.e. it's not a
5756 compile time constant), or it is a constant that doesn't divide by the
5757 vectorization factor, then an epilog loop needs to be created.
5758 We therefore duplicate the loop: the original loop will be vectorized,
5759 and will compute the first (n/VF) iterations. The second copy of the loop
5760 will remain scalar and will compute the remaining (n%VF) iterations.
5761 (VF is the vectorization factor). */
5763 if (LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo
)
5764 || LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo
))
5768 ni_name
= vect_build_loop_niters (loop_vinfo
);
5769 vect_generate_tmps_on_preheader (loop_vinfo
, ni_name
, &ratio_mult_vf
,
5771 vect_do_peeling_for_loop_bound (loop_vinfo
, ni_name
, ratio_mult_vf
,
5772 th
, check_profitability
);
5774 else if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
))
5775 ratio
= build_int_cst (TREE_TYPE (LOOP_VINFO_NITERS (loop_vinfo
)),
5776 LOOP_VINFO_INT_NITERS (loop_vinfo
) / vectorization_factor
);
5780 ni_name
= vect_build_loop_niters (loop_vinfo
);
5781 vect_generate_tmps_on_preheader (loop_vinfo
, ni_name
, NULL
, &ratio
);
5784 /* 1) Make sure the loop header has exactly two entries
5785 2) Make sure we have a preheader basic block. */
5787 gcc_assert (EDGE_COUNT (loop
->header
->preds
) == 2);
5789 split_edge (loop_preheader_edge (loop
));
5791 /* FORNOW: the vectorizer supports only loops which body consist
5792 of one basic block (header + empty latch). When the vectorizer will
5793 support more involved loop forms, the order by which the BBs are
5794 traversed need to be reconsidered. */
5796 for (i
= 0; i
< nbbs
; i
++)
5798 basic_block bb
= bbs
[i
];
5799 stmt_vec_info stmt_info
;
5802 for (si
= gsi_start_phis (bb
); !gsi_end_p (si
); gsi_next (&si
))
5804 phi
= gsi_stmt (si
);
5805 if (dump_enabled_p ())
5807 dump_printf_loc (MSG_NOTE
, vect_location
,
5808 "------>vectorizing phi: ");
5809 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, phi
, 0);
5810 dump_printf (MSG_NOTE
, "\n");
5812 stmt_info
= vinfo_for_stmt (phi
);
5816 if (MAY_HAVE_DEBUG_STMTS
&& !STMT_VINFO_LIVE_P (stmt_info
))
5817 vect_loop_kill_debug_uses (loop
, phi
);
5819 if (!STMT_VINFO_RELEVANT_P (stmt_info
)
5820 && !STMT_VINFO_LIVE_P (stmt_info
))
5823 if ((TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info
))
5824 != (unsigned HOST_WIDE_INT
) vectorization_factor
)
5825 && dump_enabled_p ())
5826 dump_printf_loc (MSG_NOTE
, vect_location
, "multiple-types.\n");
5828 if (STMT_VINFO_DEF_TYPE (stmt_info
) == vect_induction_def
)
5830 if (dump_enabled_p ())
5831 dump_printf_loc (MSG_NOTE
, vect_location
, "transform phi.\n");
5832 vect_transform_stmt (phi
, NULL
, NULL
, NULL
, NULL
);
5836 pattern_stmt
= NULL
;
5837 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
) || transform_pattern_stmt
;)
5841 if (transform_pattern_stmt
)
5842 stmt
= pattern_stmt
;
5845 stmt
= gsi_stmt (si
);
5846 /* During vectorization remove existing clobber stmts. */
5847 if (gimple_clobber_p (stmt
))
5849 unlink_stmt_vdef (stmt
);
5850 gsi_remove (&si
, true);
5851 release_defs (stmt
);
5856 if (dump_enabled_p ())
5858 dump_printf_loc (MSG_NOTE
, vect_location
,
5859 "------>vectorizing statement: ");
5860 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
5861 dump_printf (MSG_NOTE
, "\n");
5864 stmt_info
= vinfo_for_stmt (stmt
);
5866 /* vector stmts created in the outer-loop during vectorization of
5867 stmts in an inner-loop may not have a stmt_info, and do not
5868 need to be vectorized. */
5875 if (MAY_HAVE_DEBUG_STMTS
&& !STMT_VINFO_LIVE_P (stmt_info
))
5876 vect_loop_kill_debug_uses (loop
, stmt
);
5878 if (!STMT_VINFO_RELEVANT_P (stmt_info
)
5879 && !STMT_VINFO_LIVE_P (stmt_info
))
5881 if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
5882 && (pattern_stmt
= STMT_VINFO_RELATED_STMT (stmt_info
))
5883 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt
))
5884 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt
))))
5886 stmt
= pattern_stmt
;
5887 stmt_info
= vinfo_for_stmt (stmt
);
5895 else if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
5896 && (pattern_stmt
= STMT_VINFO_RELATED_STMT (stmt_info
))
5897 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt
))
5898 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt
))))
5899 transform_pattern_stmt
= true;
5901 /* If pattern statement has def stmts, vectorize them too. */
5902 if (is_pattern_stmt_p (stmt_info
))
5904 if (pattern_def_seq
== NULL
)
5906 pattern_def_seq
= STMT_VINFO_PATTERN_DEF_SEQ (stmt_info
);
5907 pattern_def_si
= gsi_start (pattern_def_seq
);
5909 else if (!gsi_end_p (pattern_def_si
))
5910 gsi_next (&pattern_def_si
);
5911 if (pattern_def_seq
!= NULL
)
5913 gimple pattern_def_stmt
= NULL
;
5914 stmt_vec_info pattern_def_stmt_info
= NULL
;
5916 while (!gsi_end_p (pattern_def_si
))
5918 pattern_def_stmt
= gsi_stmt (pattern_def_si
);
5919 pattern_def_stmt_info
5920 = vinfo_for_stmt (pattern_def_stmt
);
5921 if (STMT_VINFO_RELEVANT_P (pattern_def_stmt_info
)
5922 || STMT_VINFO_LIVE_P (pattern_def_stmt_info
))
5924 gsi_next (&pattern_def_si
);
5927 if (!gsi_end_p (pattern_def_si
))
5929 if (dump_enabled_p ())
5931 dump_printf_loc (MSG_NOTE
, vect_location
,
5932 "==> vectorizing pattern def "
5934 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
,
5935 pattern_def_stmt
, 0);
5936 dump_printf (MSG_NOTE
, "\n");
5939 stmt
= pattern_def_stmt
;
5940 stmt_info
= pattern_def_stmt_info
;
5944 pattern_def_si
= gsi_none ();
5945 transform_pattern_stmt
= false;
5949 transform_pattern_stmt
= false;
5952 gcc_assert (STMT_VINFO_VECTYPE (stmt_info
));
5953 nunits
= (unsigned int) TYPE_VECTOR_SUBPARTS (
5954 STMT_VINFO_VECTYPE (stmt_info
));
5955 if (!STMT_SLP_TYPE (stmt_info
)
5956 && nunits
!= (unsigned int) vectorization_factor
5957 && dump_enabled_p ())
5958 /* For SLP VF is set according to unrolling factor, and not to
5959 vector size, hence for SLP this print is not valid. */
5960 dump_printf_loc (MSG_NOTE
, vect_location
,
5961 "multiple-types.\n");
5963 /* SLP. Schedule all the SLP instances when the first SLP stmt is
5965 if (STMT_SLP_TYPE (stmt_info
))
5969 slp_scheduled
= true;
5971 if (dump_enabled_p ())
5972 dump_printf_loc (MSG_NOTE
, vect_location
,
5973 "=== scheduling SLP instances ===\n");
5975 vect_schedule_slp (loop_vinfo
, NULL
);
5978 /* Hybrid SLP stmts must be vectorized in addition to SLP. */
5979 if (!vinfo_for_stmt (stmt
) || PURE_SLP_STMT (stmt_info
))
5981 if (!transform_pattern_stmt
&& gsi_end_p (pattern_def_si
))
5983 pattern_def_seq
= NULL
;
5990 /* -------- vectorize statement ------------ */
5991 if (dump_enabled_p ())
5992 dump_printf_loc (MSG_NOTE
, vect_location
, "transform statement.\n");
5994 grouped_store
= false;
5995 is_store
= vect_transform_stmt (stmt
, &si
, &grouped_store
, NULL
, NULL
);
5998 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
6000 /* Interleaving. If IS_STORE is TRUE, the vectorization of the
6001 interleaving chain was completed - free all the stores in
6004 vect_remove_stores (GROUP_FIRST_ELEMENT (stmt_info
));
6009 /* Free the attached stmt_vec_info and remove the stmt. */
6010 gimple store
= gsi_stmt (si
);
6011 free_stmt_vec_info (store
);
6012 unlink_stmt_vdef (store
);
6013 gsi_remove (&si
, true);
6014 release_defs (store
);
6019 if (!transform_pattern_stmt
&& gsi_end_p (pattern_def_si
))
6021 pattern_def_seq
= NULL
;
6027 slpeel_make_loop_iterate_ntimes (loop
, ratio
);
6029 /* Reduce loop iterations by the vectorization factor. */
6030 scale_loop_profile (loop
, GCOV_COMPUTE_SCALE (1, vectorization_factor
),
6031 expected_iterations
/ vectorization_factor
);
6032 loop
->nb_iterations_upper_bound
6033 = loop
->nb_iterations_upper_bound
.udiv (double_int::from_uhwi (vectorization_factor
),
6035 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo
)
6036 && loop
->nb_iterations_upper_bound
!= double_int_zero
)
6037 loop
->nb_iterations_upper_bound
= loop
->nb_iterations_upper_bound
- double_int_one
;
6038 if (loop
->any_estimate
)
6040 loop
->nb_iterations_estimate
6041 = loop
->nb_iterations_estimate
.udiv (double_int::from_uhwi (vectorization_factor
),
6043 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo
)
6044 && loop
->nb_iterations_estimate
!= double_int_zero
)
6045 loop
->nb_iterations_estimate
= loop
->nb_iterations_estimate
- double_int_one
;
6048 if (dump_enabled_p ())
6050 dump_printf_loc (MSG_NOTE
, vect_location
,
6051 "LOOP VECTORIZED\n");
6053 dump_printf_loc (MSG_NOTE
, vect_location
,
6054 "OUTER LOOP VECTORIZED\n");
6055 dump_printf (MSG_NOTE
, "\n");