safe_unordered_sequence.h, [...]: Rename respectively in...
[gcc.git] / gcc / tree-vect-loop.c
1 /* Loop Vectorization
2 Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
3 Free Software Foundation, Inc.
4 Contributed by Dorit Naishlos <dorit@il.ibm.com> and
5 Ira Rosen <irar@il.ibm.com>
6
7 This file is part of GCC.
8
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
13
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "ggc.h"
28 #include "tree.h"
29 #include "basic-block.h"
30 #include "tree-pretty-print.h"
31 #include "gimple-pretty-print.h"
32 #include "tree-flow.h"
33 #include "tree-dump.h"
34 #include "cfgloop.h"
35 #include "cfglayout.h"
36 #include "expr.h"
37 #include "recog.h"
38 #include "optabs.h"
39 #include "params.h"
40 #include "diagnostic-core.h"
41 #include "tree-chrec.h"
42 #include "tree-scalar-evolution.h"
43 #include "tree-vectorizer.h"
44 #include "target.h"
45
46 /* Loop Vectorization Pass.
47
48 This pass tries to vectorize loops.
49
50 For example, the vectorizer transforms the following simple loop:
51
52 short a[N]; short b[N]; short c[N]; int i;
53
54 for (i=0; i<N; i++){
55 a[i] = b[i] + c[i];
56 }
57
58 as if it was manually vectorized by rewriting the source code into:
59
60 typedef int __attribute__((mode(V8HI))) v8hi;
61 short a[N]; short b[N]; short c[N]; int i;
62 v8hi *pa = (v8hi*)a, *pb = (v8hi*)b, *pc = (v8hi*)c;
63 v8hi va, vb, vc;
64
65 for (i=0; i<N/8; i++){
66 vb = pb[i];
67 vc = pc[i];
68 va = vb + vc;
69 pa[i] = va;
70 }
71
72 The main entry to this pass is vectorize_loops(), in which
73 the vectorizer applies a set of analyses on a given set of loops,
74 followed by the actual vectorization transformation for the loops that
75 had successfully passed the analysis phase.
76 Throughout this pass we make a distinction between two types of
77 data: scalars (which are represented by SSA_NAMES), and memory references
78 ("data-refs"). These two types of data require different handling both
79 during analysis and transformation. The types of data-refs that the
80 vectorizer currently supports are ARRAY_REFS which base is an array DECL
81 (not a pointer), and INDIRECT_REFS through pointers; both array and pointer
82 accesses are required to have a simple (consecutive) access pattern.
83
84 Analysis phase:
85 ===============
86 The driver for the analysis phase is vect_analyze_loop().
87 It applies a set of analyses, some of which rely on the scalar evolution
88 analyzer (scev) developed by Sebastian Pop.
89
90 During the analysis phase the vectorizer records some information
91 per stmt in a "stmt_vec_info" struct which is attached to each stmt in the
92 loop, as well as general information about the loop as a whole, which is
93 recorded in a "loop_vec_info" struct attached to each loop.
94
95 Transformation phase:
96 =====================
97 The loop transformation phase scans all the stmts in the loop, and
98 creates a vector stmt (or a sequence of stmts) for each scalar stmt S in
99 the loop that needs to be vectorized. It inserts the vector code sequence
100 just before the scalar stmt S, and records a pointer to the vector code
101 in STMT_VINFO_VEC_STMT (stmt_info) (stmt_info is the stmt_vec_info struct
102 attached to S). This pointer will be used for the vectorization of following
103 stmts which use the def of stmt S. Stmt S is removed if it writes to memory;
104 otherwise, we rely on dead code elimination for removing it.
105
106 For example, say stmt S1 was vectorized into stmt VS1:
107
108 VS1: vb = px[i];
109 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
110 S2: a = b;
111
112 To vectorize stmt S2, the vectorizer first finds the stmt that defines
113 the operand 'b' (S1), and gets the relevant vector def 'vb' from the
114 vector stmt VS1 pointed to by STMT_VINFO_VEC_STMT (stmt_info (S1)). The
115 resulting sequence would be:
116
117 VS1: vb = px[i];
118 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
119 VS2: va = vb;
120 S2: a = b; STMT_VINFO_VEC_STMT (stmt_info (S2)) = VS2
121
122 Operands that are not SSA_NAMEs, are data-refs that appear in
123 load/store operations (like 'x[i]' in S1), and are handled differently.
124
125 Target modeling:
126 =================
127 Currently the only target specific information that is used is the
128 size of the vector (in bytes) - "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD".
129 Targets that can support different sizes of vectors, for now will need
130 to specify one value for "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD". More
131 flexibility will be added in the future.
132
133 Since we only vectorize operations which vector form can be
134 expressed using existing tree codes, to verify that an operation is
135 supported, the vectorizer checks the relevant optab at the relevant
136 machine_mode (e.g, optab_handler (add_optab, V8HImode)). If
137 the value found is CODE_FOR_nothing, then there's no target support, and
138 we can't vectorize the stmt.
139
140 For additional information on this project see:
141 http://gcc.gnu.org/projects/tree-ssa/vectorization.html
142 */
143
144 /* Function vect_determine_vectorization_factor
145
146 Determine the vectorization factor (VF). VF is the number of data elements
147 that are operated upon in parallel in a single iteration of the vectorized
148 loop. For example, when vectorizing a loop that operates on 4byte elements,
149 on a target with vector size (VS) 16byte, the VF is set to 4, since 4
150 elements can fit in a single vector register.
151
152 We currently support vectorization of loops in which all types operated upon
153 are of the same size. Therefore this function currently sets VF according to
154 the size of the types operated upon, and fails if there are multiple sizes
155 in the loop.
156
157 VF is also the factor by which the loop iterations are strip-mined, e.g.:
158 original loop:
159 for (i=0; i<N; i++){
160 a[i] = b[i] + c[i];
161 }
162
163 vectorized loop:
164 for (i=0; i<N; i+=VF){
165 a[i:VF] = b[i:VF] + c[i:VF];
166 }
167 */
168
169 static bool
170 vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
171 {
172 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
173 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
174 int nbbs = loop->num_nodes;
175 gimple_stmt_iterator si;
176 unsigned int vectorization_factor = 0;
177 tree scalar_type;
178 gimple phi;
179 tree vectype;
180 unsigned int nunits;
181 stmt_vec_info stmt_info;
182 int i;
183 HOST_WIDE_INT dummy;
184 gimple stmt, pattern_stmt = NULL;
185 bool analyze_pattern_stmt = false;
186
187 if (vect_print_dump_info (REPORT_DETAILS))
188 fprintf (vect_dump, "=== vect_determine_vectorization_factor ===");
189
190 for (i = 0; i < nbbs; i++)
191 {
192 basic_block bb = bbs[i];
193
194 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
195 {
196 phi = gsi_stmt (si);
197 stmt_info = vinfo_for_stmt (phi);
198 if (vect_print_dump_info (REPORT_DETAILS))
199 {
200 fprintf (vect_dump, "==> examining phi: ");
201 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
202 }
203
204 gcc_assert (stmt_info);
205
206 if (STMT_VINFO_RELEVANT_P (stmt_info))
207 {
208 gcc_assert (!STMT_VINFO_VECTYPE (stmt_info));
209 scalar_type = TREE_TYPE (PHI_RESULT (phi));
210
211 if (vect_print_dump_info (REPORT_DETAILS))
212 {
213 fprintf (vect_dump, "get vectype for scalar type: ");
214 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
215 }
216
217 vectype = get_vectype_for_scalar_type (scalar_type);
218 if (!vectype)
219 {
220 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
221 {
222 fprintf (vect_dump,
223 "not vectorized: unsupported data-type ");
224 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
225 }
226 return false;
227 }
228 STMT_VINFO_VECTYPE (stmt_info) = vectype;
229
230 if (vect_print_dump_info (REPORT_DETAILS))
231 {
232 fprintf (vect_dump, "vectype: ");
233 print_generic_expr (vect_dump, vectype, TDF_SLIM);
234 }
235
236 nunits = TYPE_VECTOR_SUBPARTS (vectype);
237 if (vect_print_dump_info (REPORT_DETAILS))
238 fprintf (vect_dump, "nunits = %d", nunits);
239
240 if (!vectorization_factor
241 || (nunits > vectorization_factor))
242 vectorization_factor = nunits;
243 }
244 }
245
246 for (si = gsi_start_bb (bb); !gsi_end_p (si) || analyze_pattern_stmt;)
247 {
248 tree vf_vectype;
249
250 if (analyze_pattern_stmt)
251 {
252 stmt = pattern_stmt;
253 analyze_pattern_stmt = false;
254 }
255 else
256 stmt = gsi_stmt (si);
257
258 stmt_info = vinfo_for_stmt (stmt);
259
260 if (vect_print_dump_info (REPORT_DETAILS))
261 {
262 fprintf (vect_dump, "==> examining statement: ");
263 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
264 }
265
266 gcc_assert (stmt_info);
267
268 /* Skip stmts which do not need to be vectorized. */
269 if (!STMT_VINFO_RELEVANT_P (stmt_info)
270 && !STMT_VINFO_LIVE_P (stmt_info))
271 {
272 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
273 && (pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info))
274 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
275 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
276 {
277 stmt = pattern_stmt;
278 stmt_info = vinfo_for_stmt (pattern_stmt);
279 if (vect_print_dump_info (REPORT_DETAILS))
280 {
281 fprintf (vect_dump, "==> examining pattern statement: ");
282 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
283 }
284 }
285 else
286 {
287 if (vect_print_dump_info (REPORT_DETAILS))
288 fprintf (vect_dump, "skip.");
289 gsi_next (&si);
290 continue;
291 }
292 }
293 else if (STMT_VINFO_IN_PATTERN_P (stmt_info)
294 && (pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info))
295 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
296 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
297 analyze_pattern_stmt = true;
298
299 if (gimple_get_lhs (stmt) == NULL_TREE)
300 {
301 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
302 {
303 fprintf (vect_dump, "not vectorized: irregular stmt.");
304 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
305 }
306 return false;
307 }
308
309 if (VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))))
310 {
311 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
312 {
313 fprintf (vect_dump, "not vectorized: vector stmt in loop:");
314 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
315 }
316 return false;
317 }
318
319 if (STMT_VINFO_VECTYPE (stmt_info))
320 {
321 /* The only case when a vectype had been already set is for stmts
322 that contain a dataref, or for "pattern-stmts" (stmts generated
323 by the vectorizer to represent/replace a certain idiom). */
324 gcc_assert (STMT_VINFO_DATA_REF (stmt_info)
325 || is_pattern_stmt_p (stmt_info));
326 vectype = STMT_VINFO_VECTYPE (stmt_info);
327 }
328 else
329 {
330 gcc_assert (!STMT_VINFO_DATA_REF (stmt_info));
331 scalar_type = TREE_TYPE (gimple_get_lhs (stmt));
332 if (vect_print_dump_info (REPORT_DETAILS))
333 {
334 fprintf (vect_dump, "get vectype for scalar type: ");
335 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
336 }
337 vectype = get_vectype_for_scalar_type (scalar_type);
338 if (!vectype)
339 {
340 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
341 {
342 fprintf (vect_dump,
343 "not vectorized: unsupported data-type ");
344 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
345 }
346 return false;
347 }
348
349 STMT_VINFO_VECTYPE (stmt_info) = vectype;
350 }
351
352 /* The vectorization factor is according to the smallest
353 scalar type (or the largest vector size, but we only
354 support one vector size per loop). */
355 scalar_type = vect_get_smallest_scalar_type (stmt, &dummy,
356 &dummy);
357 if (vect_print_dump_info (REPORT_DETAILS))
358 {
359 fprintf (vect_dump, "get vectype for scalar type: ");
360 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
361 }
362 vf_vectype = get_vectype_for_scalar_type (scalar_type);
363 if (!vf_vectype)
364 {
365 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
366 {
367 fprintf (vect_dump,
368 "not vectorized: unsupported data-type ");
369 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
370 }
371 return false;
372 }
373
374 if ((GET_MODE_SIZE (TYPE_MODE (vectype))
375 != GET_MODE_SIZE (TYPE_MODE (vf_vectype))))
376 {
377 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
378 {
379 fprintf (vect_dump,
380 "not vectorized: different sized vector "
381 "types in statement, ");
382 print_generic_expr (vect_dump, vectype, TDF_SLIM);
383 fprintf (vect_dump, " and ");
384 print_generic_expr (vect_dump, vf_vectype, TDF_SLIM);
385 }
386 return false;
387 }
388
389 if (vect_print_dump_info (REPORT_DETAILS))
390 {
391 fprintf (vect_dump, "vectype: ");
392 print_generic_expr (vect_dump, vf_vectype, TDF_SLIM);
393 }
394
395 nunits = TYPE_VECTOR_SUBPARTS (vf_vectype);
396 if (vect_print_dump_info (REPORT_DETAILS))
397 fprintf (vect_dump, "nunits = %d", nunits);
398
399 if (!vectorization_factor
400 || (nunits > vectorization_factor))
401 vectorization_factor = nunits;
402
403 if (!analyze_pattern_stmt)
404 gsi_next (&si);
405 }
406 }
407
408 /* TODO: Analyze cost. Decide if worth while to vectorize. */
409 if (vect_print_dump_info (REPORT_DETAILS))
410 fprintf (vect_dump, "vectorization factor = %d", vectorization_factor);
411 if (vectorization_factor <= 1)
412 {
413 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
414 fprintf (vect_dump, "not vectorized: unsupported data-type");
415 return false;
416 }
417 LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor;
418
419 return true;
420 }
421
422
423 /* Function vect_is_simple_iv_evolution.
424
425 FORNOW: A simple evolution of an induction variables in the loop is
426 considered a polynomial evolution with constant step. */
427
428 static bool
429 vect_is_simple_iv_evolution (unsigned loop_nb, tree access_fn, tree * init,
430 tree * step)
431 {
432 tree init_expr;
433 tree step_expr;
434 tree evolution_part = evolution_part_in_loop_num (access_fn, loop_nb);
435
436 /* When there is no evolution in this loop, the evolution function
437 is not "simple". */
438 if (evolution_part == NULL_TREE)
439 return false;
440
441 /* When the evolution is a polynomial of degree >= 2
442 the evolution function is not "simple". */
443 if (tree_is_chrec (evolution_part))
444 return false;
445
446 step_expr = evolution_part;
447 init_expr = unshare_expr (initial_condition_in_loop_num (access_fn, loop_nb));
448
449 if (vect_print_dump_info (REPORT_DETAILS))
450 {
451 fprintf (vect_dump, "step: ");
452 print_generic_expr (vect_dump, step_expr, TDF_SLIM);
453 fprintf (vect_dump, ", init: ");
454 print_generic_expr (vect_dump, init_expr, TDF_SLIM);
455 }
456
457 *init = init_expr;
458 *step = step_expr;
459
460 if (TREE_CODE (step_expr) != INTEGER_CST)
461 {
462 if (vect_print_dump_info (REPORT_DETAILS))
463 fprintf (vect_dump, "step unknown.");
464 return false;
465 }
466
467 return true;
468 }
469
470 /* Function vect_analyze_scalar_cycles_1.
471
472 Examine the cross iteration def-use cycles of scalar variables
473 in LOOP. LOOP_VINFO represents the loop that is now being
474 considered for vectorization (can be LOOP, or an outer-loop
475 enclosing LOOP). */
476
477 static void
478 vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop)
479 {
480 basic_block bb = loop->header;
481 tree dumy;
482 VEC(gimple,heap) *worklist = VEC_alloc (gimple, heap, 64);
483 gimple_stmt_iterator gsi;
484 bool double_reduc;
485
486 if (vect_print_dump_info (REPORT_DETAILS))
487 fprintf (vect_dump, "=== vect_analyze_scalar_cycles ===");
488
489 /* First - identify all inductions. Reduction detection assumes that all the
490 inductions have been identified, therefore, this order must not be
491 changed. */
492 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
493 {
494 gimple phi = gsi_stmt (gsi);
495 tree access_fn = NULL;
496 tree def = PHI_RESULT (phi);
497 stmt_vec_info stmt_vinfo = vinfo_for_stmt (phi);
498
499 if (vect_print_dump_info (REPORT_DETAILS))
500 {
501 fprintf (vect_dump, "Analyze phi: ");
502 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
503 }
504
505 /* Skip virtual phi's. The data dependences that are associated with
506 virtual defs/uses (i.e., memory accesses) are analyzed elsewhere. */
507 if (!is_gimple_reg (SSA_NAME_VAR (def)))
508 continue;
509
510 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_unknown_def_type;
511
512 /* Analyze the evolution function. */
513 access_fn = analyze_scalar_evolution (loop, def);
514 if (access_fn)
515 STRIP_NOPS (access_fn);
516 if (access_fn && vect_print_dump_info (REPORT_DETAILS))
517 {
518 fprintf (vect_dump, "Access function of PHI: ");
519 print_generic_expr (vect_dump, access_fn, TDF_SLIM);
520 }
521
522 if (!access_fn
523 || !vect_is_simple_iv_evolution (loop->num, access_fn, &dumy, &dumy))
524 {
525 VEC_safe_push (gimple, heap, worklist, phi);
526 continue;
527 }
528
529 if (vect_print_dump_info (REPORT_DETAILS))
530 fprintf (vect_dump, "Detected induction.");
531 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_induction_def;
532 }
533
534
535 /* Second - identify all reductions and nested cycles. */
536 while (VEC_length (gimple, worklist) > 0)
537 {
538 gimple phi = VEC_pop (gimple, worklist);
539 tree def = PHI_RESULT (phi);
540 stmt_vec_info stmt_vinfo = vinfo_for_stmt (phi);
541 gimple reduc_stmt;
542 bool nested_cycle;
543
544 if (vect_print_dump_info (REPORT_DETAILS))
545 {
546 fprintf (vect_dump, "Analyze phi: ");
547 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
548 }
549
550 gcc_assert (is_gimple_reg (SSA_NAME_VAR (def)));
551 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_unknown_def_type);
552
553 nested_cycle = (loop != LOOP_VINFO_LOOP (loop_vinfo));
554 reduc_stmt = vect_force_simple_reduction (loop_vinfo, phi, !nested_cycle,
555 &double_reduc);
556 if (reduc_stmt)
557 {
558 if (double_reduc)
559 {
560 if (vect_print_dump_info (REPORT_DETAILS))
561 fprintf (vect_dump, "Detected double reduction.");
562
563 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_double_reduction_def;
564 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) =
565 vect_double_reduction_def;
566 }
567 else
568 {
569 if (nested_cycle)
570 {
571 if (vect_print_dump_info (REPORT_DETAILS))
572 fprintf (vect_dump, "Detected vectorizable nested cycle.");
573
574 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_nested_cycle;
575 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) =
576 vect_nested_cycle;
577 }
578 else
579 {
580 if (vect_print_dump_info (REPORT_DETAILS))
581 fprintf (vect_dump, "Detected reduction.");
582
583 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_reduction_def;
584 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) =
585 vect_reduction_def;
586 /* Store the reduction cycles for possible vectorization in
587 loop-aware SLP. */
588 VEC_safe_push (gimple, heap,
589 LOOP_VINFO_REDUCTIONS (loop_vinfo),
590 reduc_stmt);
591 }
592 }
593 }
594 else
595 if (vect_print_dump_info (REPORT_DETAILS))
596 fprintf (vect_dump, "Unknown def-use cycle pattern.");
597 }
598
599 VEC_free (gimple, heap, worklist);
600 }
601
602
603 /* Function vect_analyze_scalar_cycles.
604
605 Examine the cross iteration def-use cycles of scalar variables, by
606 analyzing the loop-header PHIs of scalar variables. Classify each
607 cycle as one of the following: invariant, induction, reduction, unknown.
608 We do that for the loop represented by LOOP_VINFO, and also to its
609 inner-loop, if exists.
610 Examples for scalar cycles:
611
612 Example1: reduction:
613
614 loop1:
615 for (i=0; i<N; i++)
616 sum += a[i];
617
618 Example2: induction:
619
620 loop2:
621 for (i=0; i<N; i++)
622 a[i] = i; */
623
624 static void
625 vect_analyze_scalar_cycles (loop_vec_info loop_vinfo)
626 {
627 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
628
629 vect_analyze_scalar_cycles_1 (loop_vinfo, loop);
630
631 /* When vectorizing an outer-loop, the inner-loop is executed sequentially.
632 Reductions in such inner-loop therefore have different properties than
633 the reductions in the nest that gets vectorized:
634 1. When vectorized, they are executed in the same order as in the original
635 scalar loop, so we can't change the order of computation when
636 vectorizing them.
637 2. FIXME: Inner-loop reductions can be used in the inner-loop, so the
638 current checks are too strict. */
639
640 if (loop->inner)
641 vect_analyze_scalar_cycles_1 (loop_vinfo, loop->inner);
642 }
643
644 /* Function vect_get_loop_niters.
645
646 Determine how many iterations the loop is executed.
647 If an expression that represents the number of iterations
648 can be constructed, place it in NUMBER_OF_ITERATIONS.
649 Return the loop exit condition. */
650
651 static gimple
652 vect_get_loop_niters (struct loop *loop, tree *number_of_iterations)
653 {
654 tree niters;
655
656 if (vect_print_dump_info (REPORT_DETAILS))
657 fprintf (vect_dump, "=== get_loop_niters ===");
658
659 niters = number_of_exit_cond_executions (loop);
660
661 if (niters != NULL_TREE
662 && niters != chrec_dont_know)
663 {
664 *number_of_iterations = niters;
665
666 if (vect_print_dump_info (REPORT_DETAILS))
667 {
668 fprintf (vect_dump, "==> get_loop_niters:" );
669 print_generic_expr (vect_dump, *number_of_iterations, TDF_SLIM);
670 }
671 }
672
673 return get_loop_exit_condition (loop);
674 }
675
676
677 /* Function bb_in_loop_p
678
679 Used as predicate for dfs order traversal of the loop bbs. */
680
681 static bool
682 bb_in_loop_p (const_basic_block bb, const void *data)
683 {
684 const struct loop *const loop = (const struct loop *)data;
685 if (flow_bb_inside_loop_p (loop, bb))
686 return true;
687 return false;
688 }
689
690
691 /* Function new_loop_vec_info.
692
693 Create and initialize a new loop_vec_info struct for LOOP, as well as
694 stmt_vec_info structs for all the stmts in LOOP. */
695
696 static loop_vec_info
697 new_loop_vec_info (struct loop *loop)
698 {
699 loop_vec_info res;
700 basic_block *bbs;
701 gimple_stmt_iterator si;
702 unsigned int i, nbbs;
703
704 res = (loop_vec_info) xcalloc (1, sizeof (struct _loop_vec_info));
705 LOOP_VINFO_LOOP (res) = loop;
706
707 bbs = get_loop_body (loop);
708
709 /* Create/Update stmt_info for all stmts in the loop. */
710 for (i = 0; i < loop->num_nodes; i++)
711 {
712 basic_block bb = bbs[i];
713
714 /* BBs in a nested inner-loop will have been already processed (because
715 we will have called vect_analyze_loop_form for any nested inner-loop).
716 Therefore, for stmts in an inner-loop we just want to update the
717 STMT_VINFO_LOOP_VINFO field of their stmt_info to point to the new
718 loop_info of the outer-loop we are currently considering to vectorize
719 (instead of the loop_info of the inner-loop).
720 For stmts in other BBs we need to create a stmt_info from scratch. */
721 if (bb->loop_father != loop)
722 {
723 /* Inner-loop bb. */
724 gcc_assert (loop->inner && bb->loop_father == loop->inner);
725 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
726 {
727 gimple phi = gsi_stmt (si);
728 stmt_vec_info stmt_info = vinfo_for_stmt (phi);
729 loop_vec_info inner_loop_vinfo =
730 STMT_VINFO_LOOP_VINFO (stmt_info);
731 gcc_assert (loop->inner == LOOP_VINFO_LOOP (inner_loop_vinfo));
732 STMT_VINFO_LOOP_VINFO (stmt_info) = res;
733 }
734 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
735 {
736 gimple stmt = gsi_stmt (si);
737 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
738 loop_vec_info inner_loop_vinfo =
739 STMT_VINFO_LOOP_VINFO (stmt_info);
740 gcc_assert (loop->inner == LOOP_VINFO_LOOP (inner_loop_vinfo));
741 STMT_VINFO_LOOP_VINFO (stmt_info) = res;
742 }
743 }
744 else
745 {
746 /* bb in current nest. */
747 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
748 {
749 gimple phi = gsi_stmt (si);
750 gimple_set_uid (phi, 0);
751 set_vinfo_for_stmt (phi, new_stmt_vec_info (phi, res, NULL));
752 }
753
754 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
755 {
756 gimple stmt = gsi_stmt (si);
757 gimple_set_uid (stmt, 0);
758 set_vinfo_for_stmt (stmt, new_stmt_vec_info (stmt, res, NULL));
759 }
760 }
761 }
762
763 /* CHECKME: We want to visit all BBs before their successors (except for
764 latch blocks, for which this assertion wouldn't hold). In the simple
765 case of the loop forms we allow, a dfs order of the BBs would the same
766 as reversed postorder traversal, so we are safe. */
767
768 free (bbs);
769 bbs = XCNEWVEC (basic_block, loop->num_nodes);
770 nbbs = dfs_enumerate_from (loop->header, 0, bb_in_loop_p,
771 bbs, loop->num_nodes, loop);
772 gcc_assert (nbbs == loop->num_nodes);
773
774 LOOP_VINFO_BBS (res) = bbs;
775 LOOP_VINFO_NITERS (res) = NULL;
776 LOOP_VINFO_NITERS_UNCHANGED (res) = NULL;
777 LOOP_VINFO_COST_MODEL_MIN_ITERS (res) = 0;
778 LOOP_VINFO_VECTORIZABLE_P (res) = 0;
779 LOOP_PEELING_FOR_ALIGNMENT (res) = 0;
780 LOOP_VINFO_VECT_FACTOR (res) = 0;
781 LOOP_VINFO_LOOP_NEST (res) = VEC_alloc (loop_p, heap, 3);
782 LOOP_VINFO_DATAREFS (res) = VEC_alloc (data_reference_p, heap, 10);
783 LOOP_VINFO_DDRS (res) = VEC_alloc (ddr_p, heap, 10 * 10);
784 LOOP_VINFO_UNALIGNED_DR (res) = NULL;
785 LOOP_VINFO_MAY_MISALIGN_STMTS (res) =
786 VEC_alloc (gimple, heap,
787 PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIGNMENT_CHECKS));
788 LOOP_VINFO_MAY_ALIAS_DDRS (res) =
789 VEC_alloc (ddr_p, heap,
790 PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS));
791 LOOP_VINFO_STRIDED_STORES (res) = VEC_alloc (gimple, heap, 10);
792 LOOP_VINFO_REDUCTIONS (res) = VEC_alloc (gimple, heap, 10);
793 LOOP_VINFO_REDUCTION_CHAINS (res) = VEC_alloc (gimple, heap, 10);
794 LOOP_VINFO_SLP_INSTANCES (res) = VEC_alloc (slp_instance, heap, 10);
795 LOOP_VINFO_SLP_UNROLLING_FACTOR (res) = 1;
796 LOOP_VINFO_PEELING_HTAB (res) = NULL;
797 LOOP_VINFO_PEELING_FOR_GAPS (res) = false;
798
799 return res;
800 }
801
802
803 /* Function destroy_loop_vec_info.
804
805 Free LOOP_VINFO struct, as well as all the stmt_vec_info structs of all the
806 stmts in the loop. */
807
808 void
809 destroy_loop_vec_info (loop_vec_info loop_vinfo, bool clean_stmts)
810 {
811 struct loop *loop;
812 basic_block *bbs;
813 int nbbs;
814 gimple_stmt_iterator si;
815 int j;
816 VEC (slp_instance, heap) *slp_instances;
817 slp_instance instance;
818
819 if (!loop_vinfo)
820 return;
821
822 loop = LOOP_VINFO_LOOP (loop_vinfo);
823
824 bbs = LOOP_VINFO_BBS (loop_vinfo);
825 nbbs = loop->num_nodes;
826
827 if (!clean_stmts)
828 {
829 free (LOOP_VINFO_BBS (loop_vinfo));
830 free_data_refs (LOOP_VINFO_DATAREFS (loop_vinfo));
831 free_dependence_relations (LOOP_VINFO_DDRS (loop_vinfo));
832 VEC_free (loop_p, heap, LOOP_VINFO_LOOP_NEST (loop_vinfo));
833 VEC_free (gimple, heap, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo));
834 VEC_free (ddr_p, heap, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo));
835
836 free (loop_vinfo);
837 loop->aux = NULL;
838 return;
839 }
840
841 for (j = 0; j < nbbs; j++)
842 {
843 basic_block bb = bbs[j];
844 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
845 free_stmt_vec_info (gsi_stmt (si));
846
847 for (si = gsi_start_bb (bb); !gsi_end_p (si); )
848 {
849 gimple stmt = gsi_stmt (si);
850 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
851
852 if (stmt_info)
853 {
854 /* Check if this statement has a related "pattern stmt"
855 (introduced by the vectorizer during the pattern recognition
856 pass). Free pattern's stmt_vec_info. */
857 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
858 && vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info)))
859 free_stmt_vec_info (STMT_VINFO_RELATED_STMT (stmt_info));
860
861 /* Free stmt_vec_info. */
862 free_stmt_vec_info (stmt);
863 }
864
865 gsi_next (&si);
866 }
867 }
868
869 free (LOOP_VINFO_BBS (loop_vinfo));
870 free_data_refs (LOOP_VINFO_DATAREFS (loop_vinfo));
871 free_dependence_relations (LOOP_VINFO_DDRS (loop_vinfo));
872 VEC_free (loop_p, heap, LOOP_VINFO_LOOP_NEST (loop_vinfo));
873 VEC_free (gimple, heap, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo));
874 VEC_free (ddr_p, heap, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo));
875 slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
876 FOR_EACH_VEC_ELT (slp_instance, slp_instances, j, instance)
877 vect_free_slp_instance (instance);
878
879 VEC_free (slp_instance, heap, LOOP_VINFO_SLP_INSTANCES (loop_vinfo));
880 VEC_free (gimple, heap, LOOP_VINFO_STRIDED_STORES (loop_vinfo));
881 VEC_free (gimple, heap, LOOP_VINFO_REDUCTIONS (loop_vinfo));
882 VEC_free (gimple, heap, LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo));
883
884 if (LOOP_VINFO_PEELING_HTAB (loop_vinfo))
885 htab_delete (LOOP_VINFO_PEELING_HTAB (loop_vinfo));
886
887 free (loop_vinfo);
888 loop->aux = NULL;
889 }
890
891
892 /* Function vect_analyze_loop_1.
893
894 Apply a set of analyses on LOOP, and create a loop_vec_info struct
895 for it. The different analyses will record information in the
896 loop_vec_info struct. This is a subset of the analyses applied in
897 vect_analyze_loop, to be applied on an inner-loop nested in the loop
898 that is now considered for (outer-loop) vectorization. */
899
900 static loop_vec_info
901 vect_analyze_loop_1 (struct loop *loop)
902 {
903 loop_vec_info loop_vinfo;
904
905 if (vect_print_dump_info (REPORT_DETAILS))
906 fprintf (vect_dump, "===== analyze_loop_nest_1 =====");
907
908 /* Check the CFG characteristics of the loop (nesting, entry/exit, etc. */
909
910 loop_vinfo = vect_analyze_loop_form (loop);
911 if (!loop_vinfo)
912 {
913 if (vect_print_dump_info (REPORT_DETAILS))
914 fprintf (vect_dump, "bad inner-loop form.");
915 return NULL;
916 }
917
918 return loop_vinfo;
919 }
920
921
922 /* Function vect_analyze_loop_form.
923
924 Verify that certain CFG restrictions hold, including:
925 - the loop has a pre-header
926 - the loop has a single entry and exit
927 - the loop exit condition is simple enough, and the number of iterations
928 can be analyzed (a countable loop). */
929
930 loop_vec_info
931 vect_analyze_loop_form (struct loop *loop)
932 {
933 loop_vec_info loop_vinfo;
934 gimple loop_cond;
935 tree number_of_iterations = NULL;
936 loop_vec_info inner_loop_vinfo = NULL;
937
938 if (vect_print_dump_info (REPORT_DETAILS))
939 fprintf (vect_dump, "=== vect_analyze_loop_form ===");
940
941 /* Different restrictions apply when we are considering an inner-most loop,
942 vs. an outer (nested) loop.
943 (FORNOW. May want to relax some of these restrictions in the future). */
944
945 if (!loop->inner)
946 {
947 /* Inner-most loop. We currently require that the number of BBs is
948 exactly 2 (the header and latch). Vectorizable inner-most loops
949 look like this:
950
951 (pre-header)
952 |
953 header <--------+
954 | | |
955 | +--> latch --+
956 |
957 (exit-bb) */
958
959 if (loop->num_nodes != 2)
960 {
961 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
962 fprintf (vect_dump, "not vectorized: control flow in loop.");
963 return NULL;
964 }
965
966 if (empty_block_p (loop->header))
967 {
968 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
969 fprintf (vect_dump, "not vectorized: empty loop.");
970 return NULL;
971 }
972 }
973 else
974 {
975 struct loop *innerloop = loop->inner;
976 edge entryedge;
977
978 /* Nested loop. We currently require that the loop is doubly-nested,
979 contains a single inner loop, and the number of BBs is exactly 5.
980 Vectorizable outer-loops look like this:
981
982 (pre-header)
983 |
984 header <---+
985 | |
986 inner-loop |
987 | |
988 tail ------+
989 |
990 (exit-bb)
991
992 The inner-loop has the properties expected of inner-most loops
993 as described above. */
994
995 if ((loop->inner)->inner || (loop->inner)->next)
996 {
997 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
998 fprintf (vect_dump, "not vectorized: multiple nested loops.");
999 return NULL;
1000 }
1001
1002 /* Analyze the inner-loop. */
1003 inner_loop_vinfo = vect_analyze_loop_1 (loop->inner);
1004 if (!inner_loop_vinfo)
1005 {
1006 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1007 fprintf (vect_dump, "not vectorized: Bad inner loop.");
1008 return NULL;
1009 }
1010
1011 if (!expr_invariant_in_loop_p (loop,
1012 LOOP_VINFO_NITERS (inner_loop_vinfo)))
1013 {
1014 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1015 fprintf (vect_dump,
1016 "not vectorized: inner-loop count not invariant.");
1017 destroy_loop_vec_info (inner_loop_vinfo, true);
1018 return NULL;
1019 }
1020
1021 if (loop->num_nodes != 5)
1022 {
1023 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1024 fprintf (vect_dump, "not vectorized: control flow in loop.");
1025 destroy_loop_vec_info (inner_loop_vinfo, true);
1026 return NULL;
1027 }
1028
1029 gcc_assert (EDGE_COUNT (innerloop->header->preds) == 2);
1030 entryedge = EDGE_PRED (innerloop->header, 0);
1031 if (EDGE_PRED (innerloop->header, 0)->src == innerloop->latch)
1032 entryedge = EDGE_PRED (innerloop->header, 1);
1033
1034 if (entryedge->src != loop->header
1035 || !single_exit (innerloop)
1036 || single_exit (innerloop)->dest != EDGE_PRED (loop->latch, 0)->src)
1037 {
1038 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1039 fprintf (vect_dump, "not vectorized: unsupported outerloop form.");
1040 destroy_loop_vec_info (inner_loop_vinfo, true);
1041 return NULL;
1042 }
1043
1044 if (vect_print_dump_info (REPORT_DETAILS))
1045 fprintf (vect_dump, "Considering outer-loop vectorization.");
1046 }
1047
1048 if (!single_exit (loop)
1049 || EDGE_COUNT (loop->header->preds) != 2)
1050 {
1051 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1052 {
1053 if (!single_exit (loop))
1054 fprintf (vect_dump, "not vectorized: multiple exits.");
1055 else if (EDGE_COUNT (loop->header->preds) != 2)
1056 fprintf (vect_dump, "not vectorized: too many incoming edges.");
1057 }
1058 if (inner_loop_vinfo)
1059 destroy_loop_vec_info (inner_loop_vinfo, true);
1060 return NULL;
1061 }
1062
1063 /* We assume that the loop exit condition is at the end of the loop. i.e,
1064 that the loop is represented as a do-while (with a proper if-guard
1065 before the loop if needed), where the loop header contains all the
1066 executable statements, and the latch is empty. */
1067 if (!empty_block_p (loop->latch)
1068 || !gimple_seq_empty_p (phi_nodes (loop->latch)))
1069 {
1070 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1071 fprintf (vect_dump, "not vectorized: unexpected loop form.");
1072 if (inner_loop_vinfo)
1073 destroy_loop_vec_info (inner_loop_vinfo, true);
1074 return NULL;
1075 }
1076
1077 /* Make sure there exists a single-predecessor exit bb: */
1078 if (!single_pred_p (single_exit (loop)->dest))
1079 {
1080 edge e = single_exit (loop);
1081 if (!(e->flags & EDGE_ABNORMAL))
1082 {
1083 split_loop_exit_edge (e);
1084 if (vect_print_dump_info (REPORT_DETAILS))
1085 fprintf (vect_dump, "split exit edge.");
1086 }
1087 else
1088 {
1089 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1090 fprintf (vect_dump, "not vectorized: abnormal loop exit edge.");
1091 if (inner_loop_vinfo)
1092 destroy_loop_vec_info (inner_loop_vinfo, true);
1093 return NULL;
1094 }
1095 }
1096
1097 loop_cond = vect_get_loop_niters (loop, &number_of_iterations);
1098 if (!loop_cond)
1099 {
1100 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1101 fprintf (vect_dump, "not vectorized: complicated exit condition.");
1102 if (inner_loop_vinfo)
1103 destroy_loop_vec_info (inner_loop_vinfo, true);
1104 return NULL;
1105 }
1106
1107 if (!number_of_iterations)
1108 {
1109 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1110 fprintf (vect_dump,
1111 "not vectorized: number of iterations cannot be computed.");
1112 if (inner_loop_vinfo)
1113 destroy_loop_vec_info (inner_loop_vinfo, true);
1114 return NULL;
1115 }
1116
1117 if (chrec_contains_undetermined (number_of_iterations))
1118 {
1119 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1120 fprintf (vect_dump, "Infinite number of iterations.");
1121 if (inner_loop_vinfo)
1122 destroy_loop_vec_info (inner_loop_vinfo, true);
1123 return NULL;
1124 }
1125
1126 if (!NITERS_KNOWN_P (number_of_iterations))
1127 {
1128 if (vect_print_dump_info (REPORT_DETAILS))
1129 {
1130 fprintf (vect_dump, "Symbolic number of iterations is ");
1131 print_generic_expr (vect_dump, number_of_iterations, TDF_DETAILS);
1132 }
1133 }
1134 else if (TREE_INT_CST_LOW (number_of_iterations) == 0)
1135 {
1136 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1137 fprintf (vect_dump, "not vectorized: number of iterations = 0.");
1138 if (inner_loop_vinfo)
1139 destroy_loop_vec_info (inner_loop_vinfo, false);
1140 return NULL;
1141 }
1142
1143 loop_vinfo = new_loop_vec_info (loop);
1144 LOOP_VINFO_NITERS (loop_vinfo) = number_of_iterations;
1145 LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo) = number_of_iterations;
1146
1147 STMT_VINFO_TYPE (vinfo_for_stmt (loop_cond)) = loop_exit_ctrl_vec_info_type;
1148
1149 /* CHECKME: May want to keep it around it in the future. */
1150 if (inner_loop_vinfo)
1151 destroy_loop_vec_info (inner_loop_vinfo, false);
1152
1153 gcc_assert (!loop->aux);
1154 loop->aux = loop_vinfo;
1155 return loop_vinfo;
1156 }
1157
1158
1159 /* Get cost by calling cost target builtin. */
1160
1161 static inline int
1162 vect_get_cost (enum vect_cost_for_stmt type_of_cost)
1163 {
1164 tree dummy_type = NULL;
1165 int dummy = 0;
1166
1167 return targetm.vectorize.builtin_vectorization_cost (type_of_cost,
1168 dummy_type, dummy);
1169 }
1170
1171
1172 /* Function vect_analyze_loop_operations.
1173
1174 Scan the loop stmts and make sure they are all vectorizable. */
1175
1176 static bool
1177 vect_analyze_loop_operations (loop_vec_info loop_vinfo, bool slp)
1178 {
1179 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1180 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
1181 int nbbs = loop->num_nodes;
1182 gimple_stmt_iterator si;
1183 unsigned int vectorization_factor = 0;
1184 int i;
1185 gimple phi;
1186 stmt_vec_info stmt_info;
1187 bool need_to_vectorize = false;
1188 int min_profitable_iters;
1189 int min_scalar_loop_bound;
1190 unsigned int th;
1191 bool only_slp_in_loop = true, ok;
1192
1193 if (vect_print_dump_info (REPORT_DETAILS))
1194 fprintf (vect_dump, "=== vect_analyze_loop_operations ===");
1195
1196 gcc_assert (LOOP_VINFO_VECT_FACTOR (loop_vinfo));
1197 vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1198 if (slp)
1199 {
1200 /* If all the stmts in the loop can be SLPed, we perform only SLP, and
1201 vectorization factor of the loop is the unrolling factor required by
1202 the SLP instances. If that unrolling factor is 1, we say, that we
1203 perform pure SLP on loop - cross iteration parallelism is not
1204 exploited. */
1205 for (i = 0; i < nbbs; i++)
1206 {
1207 basic_block bb = bbs[i];
1208 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
1209 {
1210 gimple stmt = gsi_stmt (si);
1211 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1212 gcc_assert (stmt_info);
1213 if ((STMT_VINFO_RELEVANT_P (stmt_info)
1214 || VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info)))
1215 && !PURE_SLP_STMT (stmt_info))
1216 /* STMT needs both SLP and loop-based vectorization. */
1217 only_slp_in_loop = false;
1218 }
1219 }
1220
1221 if (only_slp_in_loop)
1222 vectorization_factor = LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo);
1223 else
1224 vectorization_factor = least_common_multiple (vectorization_factor,
1225 LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo));
1226
1227 LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor;
1228 if (vect_print_dump_info (REPORT_DETAILS))
1229 fprintf (vect_dump, "Updating vectorization factor to %d ",
1230 vectorization_factor);
1231 }
1232
1233 for (i = 0; i < nbbs; i++)
1234 {
1235 basic_block bb = bbs[i];
1236
1237 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
1238 {
1239 phi = gsi_stmt (si);
1240 ok = true;
1241
1242 stmt_info = vinfo_for_stmt (phi);
1243 if (vect_print_dump_info (REPORT_DETAILS))
1244 {
1245 fprintf (vect_dump, "examining phi: ");
1246 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
1247 }
1248
1249 /* Inner-loop loop-closed exit phi in outer-loop vectorization
1250 (i.e., a phi in the tail of the outer-loop). */
1251 if (! is_loop_header_bb_p (bb))
1252 {
1253 /* FORNOW: we currently don't support the case that these phis
1254 are not used in the outerloop (unless it is double reduction,
1255 i.e., this phi is vect_reduction_def), cause this case
1256 requires to actually do something here. */
1257 if ((!STMT_VINFO_RELEVANT_P (stmt_info)
1258 || STMT_VINFO_LIVE_P (stmt_info))
1259 && STMT_VINFO_DEF_TYPE (stmt_info)
1260 != vect_double_reduction_def)
1261 {
1262 if (vect_print_dump_info (REPORT_DETAILS))
1263 fprintf (vect_dump,
1264 "Unsupported loop-closed phi in outer-loop.");
1265 return false;
1266 }
1267
1268 /* If PHI is used in the outer loop, we check that its operand
1269 is defined in the inner loop. */
1270 if (STMT_VINFO_RELEVANT_P (stmt_info))
1271 {
1272 tree phi_op;
1273 gimple op_def_stmt;
1274
1275 if (gimple_phi_num_args (phi) != 1)
1276 return false;
1277
1278 phi_op = PHI_ARG_DEF (phi, 0);
1279 if (TREE_CODE (phi_op) != SSA_NAME)
1280 return false;
1281
1282 op_def_stmt = SSA_NAME_DEF_STMT (phi_op);
1283 if (!op_def_stmt || !vinfo_for_stmt (op_def_stmt))
1284 return false;
1285
1286 if (STMT_VINFO_RELEVANT (vinfo_for_stmt (op_def_stmt))
1287 != vect_used_in_outer
1288 && STMT_VINFO_RELEVANT (vinfo_for_stmt (op_def_stmt))
1289 != vect_used_in_outer_by_reduction)
1290 return false;
1291 }
1292
1293 continue;
1294 }
1295
1296 gcc_assert (stmt_info);
1297
1298 if (STMT_VINFO_LIVE_P (stmt_info))
1299 {
1300 /* FORNOW: not yet supported. */
1301 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1302 fprintf (vect_dump, "not vectorized: value used after loop.");
1303 return false;
1304 }
1305
1306 if (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_scope
1307 && STMT_VINFO_DEF_TYPE (stmt_info) != vect_induction_def)
1308 {
1309 /* A scalar-dependence cycle that we don't support. */
1310 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1311 fprintf (vect_dump, "not vectorized: scalar dependence cycle.");
1312 return false;
1313 }
1314
1315 if (STMT_VINFO_RELEVANT_P (stmt_info))
1316 {
1317 need_to_vectorize = true;
1318 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def)
1319 ok = vectorizable_induction (phi, NULL, NULL);
1320 }
1321
1322 if (!ok)
1323 {
1324 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1325 {
1326 fprintf (vect_dump,
1327 "not vectorized: relevant phi not supported: ");
1328 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
1329 }
1330 return false;
1331 }
1332 }
1333
1334 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
1335 {
1336 gimple stmt = gsi_stmt (si);
1337 if (!vect_analyze_stmt (stmt, &need_to_vectorize, NULL))
1338 return false;
1339 }
1340 } /* bbs */
1341
1342 /* All operations in the loop are either irrelevant (deal with loop
1343 control, or dead), or only used outside the loop and can be moved
1344 out of the loop (e.g. invariants, inductions). The loop can be
1345 optimized away by scalar optimizations. We're better off not
1346 touching this loop. */
1347 if (!need_to_vectorize)
1348 {
1349 if (vect_print_dump_info (REPORT_DETAILS))
1350 fprintf (vect_dump,
1351 "All the computation can be taken out of the loop.");
1352 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1353 fprintf (vect_dump,
1354 "not vectorized: redundant loop. no profit to vectorize.");
1355 return false;
1356 }
1357
1358 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1359 && vect_print_dump_info (REPORT_DETAILS))
1360 fprintf (vect_dump,
1361 "vectorization_factor = %d, niters = " HOST_WIDE_INT_PRINT_DEC,
1362 vectorization_factor, LOOP_VINFO_INT_NITERS (loop_vinfo));
1363
1364 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1365 && (LOOP_VINFO_INT_NITERS (loop_vinfo) < vectorization_factor))
1366 {
1367 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1368 fprintf (vect_dump, "not vectorized: iteration count too small.");
1369 if (vect_print_dump_info (REPORT_DETAILS))
1370 fprintf (vect_dump,"not vectorized: iteration count smaller than "
1371 "vectorization factor.");
1372 return false;
1373 }
1374
1375 /* Analyze cost. Decide if worth while to vectorize. */
1376
1377 /* Once VF is set, SLP costs should be updated since the number of created
1378 vector stmts depends on VF. */
1379 vect_update_slp_costs_according_to_vf (loop_vinfo);
1380
1381 min_profitable_iters = vect_estimate_min_profitable_iters (loop_vinfo);
1382 LOOP_VINFO_COST_MODEL_MIN_ITERS (loop_vinfo) = min_profitable_iters;
1383
1384 if (min_profitable_iters < 0)
1385 {
1386 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1387 fprintf (vect_dump, "not vectorized: vectorization not profitable.");
1388 if (vect_print_dump_info (REPORT_DETAILS))
1389 fprintf (vect_dump, "not vectorized: vector version will never be "
1390 "profitable.");
1391 return false;
1392 }
1393
1394 min_scalar_loop_bound = ((PARAM_VALUE (PARAM_MIN_VECT_LOOP_BOUND)
1395 * vectorization_factor) - 1);
1396
1397 /* Use the cost model only if it is more conservative than user specified
1398 threshold. */
1399
1400 th = (unsigned) min_scalar_loop_bound;
1401 if (min_profitable_iters
1402 && (!min_scalar_loop_bound
1403 || min_profitable_iters > min_scalar_loop_bound))
1404 th = (unsigned) min_profitable_iters;
1405
1406 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1407 && LOOP_VINFO_INT_NITERS (loop_vinfo) <= th)
1408 {
1409 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1410 fprintf (vect_dump, "not vectorized: vectorization not "
1411 "profitable.");
1412 if (vect_print_dump_info (REPORT_DETAILS))
1413 fprintf (vect_dump, "not vectorized: iteration count smaller than "
1414 "user specified loop bound parameter or minimum "
1415 "profitable iterations (whichever is more conservative).");
1416 return false;
1417 }
1418
1419 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1420 || LOOP_VINFO_INT_NITERS (loop_vinfo) % vectorization_factor != 0
1421 || LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo))
1422 {
1423 if (vect_print_dump_info (REPORT_DETAILS))
1424 fprintf (vect_dump, "epilog loop required.");
1425 if (!vect_can_advance_ivs_p (loop_vinfo))
1426 {
1427 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1428 fprintf (vect_dump,
1429 "not vectorized: can't create epilog loop 1.");
1430 return false;
1431 }
1432 if (!slpeel_can_duplicate_loop_p (loop, single_exit (loop)))
1433 {
1434 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1435 fprintf (vect_dump,
1436 "not vectorized: can't create epilog loop 2.");
1437 return false;
1438 }
1439 }
1440
1441 return true;
1442 }
1443
1444
1445 /* Function vect_analyze_loop_2.
1446
1447 Apply a set of analyses on LOOP, and create a loop_vec_info struct
1448 for it. The different analyses will record information in the
1449 loop_vec_info struct. */
1450 static bool
1451 vect_analyze_loop_2 (loop_vec_info loop_vinfo)
1452 {
1453 bool ok, dummy, slp = false;
1454 int max_vf = MAX_VECTORIZATION_FACTOR;
1455 int min_vf = 2;
1456
1457 /* Find all data references in the loop (which correspond to vdefs/vuses)
1458 and analyze their evolution in the loop. Also adjust the minimal
1459 vectorization factor according to the loads and stores.
1460
1461 FORNOW: Handle only simple, array references, which
1462 alignment can be forced, and aligned pointer-references. */
1463
1464 ok = vect_analyze_data_refs (loop_vinfo, NULL, &min_vf);
1465 if (!ok)
1466 {
1467 if (vect_print_dump_info (REPORT_DETAILS))
1468 fprintf (vect_dump, "bad data references.");
1469 return false;
1470 }
1471
1472 /* Classify all cross-iteration scalar data-flow cycles.
1473 Cross-iteration cycles caused by virtual phis are analyzed separately. */
1474
1475 vect_analyze_scalar_cycles (loop_vinfo);
1476
1477 vect_pattern_recog (loop_vinfo);
1478
1479 /* Data-flow analysis to detect stmts that do not need to be vectorized. */
1480
1481 ok = vect_mark_stmts_to_be_vectorized (loop_vinfo);
1482 if (!ok)
1483 {
1484 if (vect_print_dump_info (REPORT_DETAILS))
1485 fprintf (vect_dump, "unexpected pattern.");
1486 return false;
1487 }
1488
1489 /* Analyze data dependences between the data-refs in the loop
1490 and adjust the maximum vectorization factor according to
1491 the dependences.
1492 FORNOW: fail at the first data dependence that we encounter. */
1493
1494 ok = vect_analyze_data_ref_dependences (loop_vinfo, NULL, &max_vf, &dummy);
1495 if (!ok
1496 || max_vf < min_vf)
1497 {
1498 if (vect_print_dump_info (REPORT_DETAILS))
1499 fprintf (vect_dump, "bad data dependence.");
1500 return false;
1501 }
1502
1503 ok = vect_determine_vectorization_factor (loop_vinfo);
1504 if (!ok)
1505 {
1506 if (vect_print_dump_info (REPORT_DETAILS))
1507 fprintf (vect_dump, "can't determine vectorization factor.");
1508 return false;
1509 }
1510 if (max_vf < LOOP_VINFO_VECT_FACTOR (loop_vinfo))
1511 {
1512 if (vect_print_dump_info (REPORT_DETAILS))
1513 fprintf (vect_dump, "bad data dependence.");
1514 return false;
1515 }
1516
1517 /* Analyze the alignment of the data-refs in the loop.
1518 Fail if a data reference is found that cannot be vectorized. */
1519
1520 ok = vect_analyze_data_refs_alignment (loop_vinfo, NULL);
1521 if (!ok)
1522 {
1523 if (vect_print_dump_info (REPORT_DETAILS))
1524 fprintf (vect_dump, "bad data alignment.");
1525 return false;
1526 }
1527
1528 /* Analyze the access patterns of the data-refs in the loop (consecutive,
1529 complex, etc.). FORNOW: Only handle consecutive access pattern. */
1530
1531 ok = vect_analyze_data_ref_accesses (loop_vinfo, NULL);
1532 if (!ok)
1533 {
1534 if (vect_print_dump_info (REPORT_DETAILS))
1535 fprintf (vect_dump, "bad data access.");
1536 return false;
1537 }
1538
1539 /* Prune the list of ddrs to be tested at run-time by versioning for alias.
1540 It is important to call pruning after vect_analyze_data_ref_accesses,
1541 since we use grouping information gathered by interleaving analysis. */
1542 ok = vect_prune_runtime_alias_test_list (loop_vinfo);
1543 if (!ok)
1544 {
1545 if (vect_print_dump_info (REPORT_DETAILS))
1546 fprintf (vect_dump, "too long list of versioning for alias "
1547 "run-time tests.");
1548 return false;
1549 }
1550
1551 /* This pass will decide on using loop versioning and/or loop peeling in
1552 order to enhance the alignment of data references in the loop. */
1553
1554 ok = vect_enhance_data_refs_alignment (loop_vinfo);
1555 if (!ok)
1556 {
1557 if (vect_print_dump_info (REPORT_DETAILS))
1558 fprintf (vect_dump, "bad data alignment.");
1559 return false;
1560 }
1561
1562 /* Check the SLP opportunities in the loop, analyze and build SLP trees. */
1563 ok = vect_analyze_slp (loop_vinfo, NULL);
1564 if (ok)
1565 {
1566 /* Decide which possible SLP instances to SLP. */
1567 slp = vect_make_slp_decision (loop_vinfo);
1568
1569 /* Find stmts that need to be both vectorized and SLPed. */
1570 vect_detect_hybrid_slp (loop_vinfo);
1571 }
1572 else
1573 return false;
1574
1575 /* Scan all the operations in the loop and make sure they are
1576 vectorizable. */
1577
1578 ok = vect_analyze_loop_operations (loop_vinfo, slp);
1579 if (!ok)
1580 {
1581 if (vect_print_dump_info (REPORT_DETAILS))
1582 fprintf (vect_dump, "bad operation or unsupported loop bound.");
1583 return false;
1584 }
1585
1586 return true;
1587 }
1588
1589 /* Function vect_analyze_loop.
1590
1591 Apply a set of analyses on LOOP, and create a loop_vec_info struct
1592 for it. The different analyses will record information in the
1593 loop_vec_info struct. */
1594 loop_vec_info
1595 vect_analyze_loop (struct loop *loop)
1596 {
1597 loop_vec_info loop_vinfo;
1598 unsigned int vector_sizes;
1599
1600 /* Autodetect first vector size we try. */
1601 current_vector_size = 0;
1602 vector_sizes = targetm.vectorize.autovectorize_vector_sizes ();
1603
1604 if (vect_print_dump_info (REPORT_DETAILS))
1605 fprintf (vect_dump, "===== analyze_loop_nest =====");
1606
1607 if (loop_outer (loop)
1608 && loop_vec_info_for_loop (loop_outer (loop))
1609 && LOOP_VINFO_VECTORIZABLE_P (loop_vec_info_for_loop (loop_outer (loop))))
1610 {
1611 if (vect_print_dump_info (REPORT_DETAILS))
1612 fprintf (vect_dump, "outer-loop already vectorized.");
1613 return NULL;
1614 }
1615
1616 while (1)
1617 {
1618 /* Check the CFG characteristics of the loop (nesting, entry/exit). */
1619 loop_vinfo = vect_analyze_loop_form (loop);
1620 if (!loop_vinfo)
1621 {
1622 if (vect_print_dump_info (REPORT_DETAILS))
1623 fprintf (vect_dump, "bad loop form.");
1624 return NULL;
1625 }
1626
1627 if (vect_analyze_loop_2 (loop_vinfo))
1628 {
1629 LOOP_VINFO_VECTORIZABLE_P (loop_vinfo) = 1;
1630
1631 return loop_vinfo;
1632 }
1633
1634 destroy_loop_vec_info (loop_vinfo, true);
1635
1636 vector_sizes &= ~current_vector_size;
1637 if (vector_sizes == 0
1638 || current_vector_size == 0)
1639 return NULL;
1640
1641 /* Try the next biggest vector size. */
1642 current_vector_size = 1 << floor_log2 (vector_sizes);
1643 if (vect_print_dump_info (REPORT_DETAILS))
1644 fprintf (vect_dump, "***** Re-trying analysis with "
1645 "vector size %d\n", current_vector_size);
1646 }
1647 }
1648
1649
1650 /* Function reduction_code_for_scalar_code
1651
1652 Input:
1653 CODE - tree_code of a reduction operations.
1654
1655 Output:
1656 REDUC_CODE - the corresponding tree-code to be used to reduce the
1657 vector of partial results into a single scalar result (which
1658 will also reside in a vector) or ERROR_MARK if the operation is
1659 a supported reduction operation, but does not have such tree-code.
1660
1661 Return FALSE if CODE currently cannot be vectorized as reduction. */
1662
1663 static bool
1664 reduction_code_for_scalar_code (enum tree_code code,
1665 enum tree_code *reduc_code)
1666 {
1667 switch (code)
1668 {
1669 case MAX_EXPR:
1670 *reduc_code = REDUC_MAX_EXPR;
1671 return true;
1672
1673 case MIN_EXPR:
1674 *reduc_code = REDUC_MIN_EXPR;
1675 return true;
1676
1677 case PLUS_EXPR:
1678 *reduc_code = REDUC_PLUS_EXPR;
1679 return true;
1680
1681 case MULT_EXPR:
1682 case MINUS_EXPR:
1683 case BIT_IOR_EXPR:
1684 case BIT_XOR_EXPR:
1685 case BIT_AND_EXPR:
1686 *reduc_code = ERROR_MARK;
1687 return true;
1688
1689 default:
1690 return false;
1691 }
1692 }
1693
1694
1695 /* Error reporting helper for vect_is_simple_reduction below. GIMPLE statement
1696 STMT is printed with a message MSG. */
1697
1698 static void
1699 report_vect_op (gimple stmt, const char *msg)
1700 {
1701 fprintf (vect_dump, "%s", msg);
1702 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
1703 }
1704
1705
1706 /* Detect SLP reduction of the form:
1707
1708 #a1 = phi <a5, a0>
1709 a2 = operation (a1)
1710 a3 = operation (a2)
1711 a4 = operation (a3)
1712 a5 = operation (a4)
1713
1714 #a = phi <a5>
1715
1716 PHI is the reduction phi node (#a1 = phi <a5, a0> above)
1717 FIRST_STMT is the first reduction stmt in the chain
1718 (a2 = operation (a1)).
1719
1720 Return TRUE if a reduction chain was detected. */
1721
1722 static bool
1723 vect_is_slp_reduction (loop_vec_info loop_info, gimple phi, gimple first_stmt)
1724 {
1725 struct loop *loop = (gimple_bb (phi))->loop_father;
1726 struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
1727 enum tree_code code;
1728 gimple current_stmt = NULL, loop_use_stmt = NULL, first, next_stmt;
1729 stmt_vec_info use_stmt_info, current_stmt_info;
1730 tree lhs;
1731 imm_use_iterator imm_iter;
1732 use_operand_p use_p;
1733 int nloop_uses, size = 0;
1734 bool found = false;
1735
1736 if (loop != vect_loop)
1737 return false;
1738
1739 lhs = PHI_RESULT (phi);
1740 code = gimple_assign_rhs_code (first_stmt);
1741 while (1)
1742 {
1743 nloop_uses = 0;
1744 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs)
1745 {
1746 gimple use_stmt = USE_STMT (use_p);
1747 if (is_gimple_debug (use_stmt))
1748 continue;
1749
1750 use_stmt = USE_STMT (use_p);
1751
1752 /* Check if we got back to the reduction phi. */
1753 if (use_stmt == phi)
1754 {
1755 loop_use_stmt = use_stmt;
1756 found = true;
1757 break;
1758 }
1759
1760 if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt))
1761 && vinfo_for_stmt (use_stmt)
1762 && !STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (use_stmt)))
1763 {
1764 loop_use_stmt = use_stmt;
1765 nloop_uses++;
1766 }
1767
1768 if (nloop_uses > 1)
1769 return false;
1770 }
1771
1772 if (found)
1773 break;
1774
1775 /* We reached a statement with no loop uses. */
1776 if (nloop_uses == 0)
1777 return false;
1778
1779 /* This is a loop exit phi, and we haven't reached the reduction phi. */
1780 if (gimple_code (loop_use_stmt) == GIMPLE_PHI)
1781 return false;
1782
1783 if (!is_gimple_assign (loop_use_stmt)
1784 || code != gimple_assign_rhs_code (loop_use_stmt)
1785 || !flow_bb_inside_loop_p (loop, gimple_bb (loop_use_stmt)))
1786 return false;
1787
1788 /* Insert USE_STMT into reduction chain. */
1789 use_stmt_info = vinfo_for_stmt (loop_use_stmt);
1790 if (current_stmt)
1791 {
1792 current_stmt_info = vinfo_for_stmt (current_stmt);
1793 GROUP_NEXT_ELEMENT (current_stmt_info) = loop_use_stmt;
1794 GROUP_FIRST_ELEMENT (use_stmt_info)
1795 = GROUP_FIRST_ELEMENT (current_stmt_info);
1796 }
1797 else
1798 GROUP_FIRST_ELEMENT (use_stmt_info) = loop_use_stmt;
1799
1800 lhs = gimple_assign_lhs (loop_use_stmt);
1801 current_stmt = loop_use_stmt;
1802 size++;
1803 }
1804
1805 if (!found || loop_use_stmt != phi || size < 2)
1806 return false;
1807
1808 /* Swap the operands, if needed, to make the reduction operand be the second
1809 operand. */
1810 lhs = PHI_RESULT (phi);
1811 next_stmt = GROUP_FIRST_ELEMENT (vinfo_for_stmt (current_stmt));
1812 while (next_stmt)
1813 {
1814 if (gimple_assign_rhs2 (next_stmt) == lhs)
1815 {
1816 tree op = gimple_assign_rhs1 (next_stmt);
1817 gimple def_stmt = NULL;
1818
1819 if (TREE_CODE (op) == SSA_NAME)
1820 def_stmt = SSA_NAME_DEF_STMT (op);
1821
1822 /* Check that the other def is either defined in the loop
1823 ("vect_internal_def"), or it's an induction (defined by a
1824 loop-header phi-node). */
1825 if (def_stmt
1826 && gimple_bb (def_stmt)
1827 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
1828 && (is_gimple_assign (def_stmt)
1829 || is_gimple_call (def_stmt)
1830 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt))
1831 == vect_induction_def
1832 || (gimple_code (def_stmt) == GIMPLE_PHI
1833 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt))
1834 == vect_internal_def
1835 && !is_loop_header_bb_p (gimple_bb (def_stmt)))))
1836 {
1837 lhs = gimple_assign_lhs (next_stmt);
1838 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
1839 continue;
1840 }
1841
1842 return false;
1843 }
1844 else
1845 {
1846 tree op = gimple_assign_rhs2 (next_stmt);
1847 gimple def_stmt = NULL;
1848
1849 if (TREE_CODE (op) == SSA_NAME)
1850 def_stmt = SSA_NAME_DEF_STMT (op);
1851
1852 /* Check that the other def is either defined in the loop
1853 ("vect_internal_def"), or it's an induction (defined by a
1854 loop-header phi-node). */
1855 if (def_stmt
1856 && gimple_bb (def_stmt)
1857 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
1858 && (is_gimple_assign (def_stmt)
1859 || is_gimple_call (def_stmt)
1860 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt))
1861 == vect_induction_def
1862 || (gimple_code (def_stmt) == GIMPLE_PHI
1863 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt))
1864 == vect_internal_def
1865 && !is_loop_header_bb_p (gimple_bb (def_stmt)))))
1866 {
1867 if (vect_print_dump_info (REPORT_DETAILS))
1868 {
1869 fprintf (vect_dump, "swapping oprnds: ");
1870 print_gimple_stmt (vect_dump, next_stmt, 0, TDF_SLIM);
1871 }
1872
1873 swap_tree_operands (next_stmt,
1874 gimple_assign_rhs1_ptr (next_stmt),
1875 gimple_assign_rhs2_ptr (next_stmt));
1876 mark_symbols_for_renaming (next_stmt);
1877 }
1878 else
1879 return false;
1880 }
1881
1882 lhs = gimple_assign_lhs (next_stmt);
1883 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
1884 }
1885
1886 /* Save the chain for further analysis in SLP detection. */
1887 first = GROUP_FIRST_ELEMENT (vinfo_for_stmt (current_stmt));
1888 VEC_safe_push (gimple, heap, LOOP_VINFO_REDUCTION_CHAINS (loop_info), first);
1889 GROUP_SIZE (vinfo_for_stmt (first)) = size;
1890
1891 return true;
1892 }
1893
1894
1895 /* Function vect_is_simple_reduction_1
1896
1897 (1) Detect a cross-iteration def-use cycle that represents a simple
1898 reduction computation. We look for the following pattern:
1899
1900 loop_header:
1901 a1 = phi < a0, a2 >
1902 a3 = ...
1903 a2 = operation (a3, a1)
1904
1905 such that:
1906 1. operation is commutative and associative and it is safe to
1907 change the order of the computation (if CHECK_REDUCTION is true)
1908 2. no uses for a2 in the loop (a2 is used out of the loop)
1909 3. no uses of a1 in the loop besides the reduction operation
1910 4. no uses of a1 outside the loop.
1911
1912 Conditions 1,4 are tested here.
1913 Conditions 2,3 are tested in vect_mark_stmts_to_be_vectorized.
1914
1915 (2) Detect a cross-iteration def-use cycle in nested loops, i.e.,
1916 nested cycles, if CHECK_REDUCTION is false.
1917
1918 (3) Detect cycles of phi nodes in outer-loop vectorization, i.e., double
1919 reductions:
1920
1921 a1 = phi < a0, a2 >
1922 inner loop (def of a3)
1923 a2 = phi < a3 >
1924
1925 If MODIFY is true it tries also to rework the code in-place to enable
1926 detection of more reduction patterns. For the time being we rewrite
1927 "res -= RHS" into "rhs += -RHS" when it seems worthwhile.
1928 */
1929
1930 static gimple
1931 vect_is_simple_reduction_1 (loop_vec_info loop_info, gimple phi,
1932 bool check_reduction, bool *double_reduc,
1933 bool modify)
1934 {
1935 struct loop *loop = (gimple_bb (phi))->loop_father;
1936 struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
1937 edge latch_e = loop_latch_edge (loop);
1938 tree loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e);
1939 gimple def_stmt, def1 = NULL, def2 = NULL;
1940 enum tree_code orig_code, code;
1941 tree op1, op2, op3 = NULL_TREE, op4 = NULL_TREE;
1942 tree type;
1943 int nloop_uses;
1944 tree name;
1945 imm_use_iterator imm_iter;
1946 use_operand_p use_p;
1947 bool phi_def;
1948
1949 *double_reduc = false;
1950
1951 /* If CHECK_REDUCTION is true, we assume inner-most loop vectorization,
1952 otherwise, we assume outer loop vectorization. */
1953 gcc_assert ((check_reduction && loop == vect_loop)
1954 || (!check_reduction && flow_loop_nested_p (vect_loop, loop)));
1955
1956 name = PHI_RESULT (phi);
1957 nloop_uses = 0;
1958 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, name)
1959 {
1960 gimple use_stmt = USE_STMT (use_p);
1961 if (is_gimple_debug (use_stmt))
1962 continue;
1963
1964 if (!flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
1965 {
1966 if (vect_print_dump_info (REPORT_DETAILS))
1967 fprintf (vect_dump, "intermediate value used outside loop.");
1968
1969 return NULL;
1970 }
1971
1972 if (vinfo_for_stmt (use_stmt)
1973 && !is_pattern_stmt_p (vinfo_for_stmt (use_stmt)))
1974 nloop_uses++;
1975 if (nloop_uses > 1)
1976 {
1977 if (vect_print_dump_info (REPORT_DETAILS))
1978 fprintf (vect_dump, "reduction used in loop.");
1979 return NULL;
1980 }
1981 }
1982
1983 if (TREE_CODE (loop_arg) != SSA_NAME)
1984 {
1985 if (vect_print_dump_info (REPORT_DETAILS))
1986 {
1987 fprintf (vect_dump, "reduction: not ssa_name: ");
1988 print_generic_expr (vect_dump, loop_arg, TDF_SLIM);
1989 }
1990 return NULL;
1991 }
1992
1993 def_stmt = SSA_NAME_DEF_STMT (loop_arg);
1994 if (!def_stmt)
1995 {
1996 if (vect_print_dump_info (REPORT_DETAILS))
1997 fprintf (vect_dump, "reduction: no def_stmt.");
1998 return NULL;
1999 }
2000
2001 if (!is_gimple_assign (def_stmt) && gimple_code (def_stmt) != GIMPLE_PHI)
2002 {
2003 if (vect_print_dump_info (REPORT_DETAILS))
2004 print_gimple_stmt (vect_dump, def_stmt, 0, TDF_SLIM);
2005 return NULL;
2006 }
2007
2008 if (is_gimple_assign (def_stmt))
2009 {
2010 name = gimple_assign_lhs (def_stmt);
2011 phi_def = false;
2012 }
2013 else
2014 {
2015 name = PHI_RESULT (def_stmt);
2016 phi_def = true;
2017 }
2018
2019 nloop_uses = 0;
2020 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, name)
2021 {
2022 gimple use_stmt = USE_STMT (use_p);
2023 if (is_gimple_debug (use_stmt))
2024 continue;
2025 if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt))
2026 && vinfo_for_stmt (use_stmt)
2027 && !is_pattern_stmt_p (vinfo_for_stmt (use_stmt)))
2028 nloop_uses++;
2029 if (nloop_uses > 1)
2030 {
2031 if (vect_print_dump_info (REPORT_DETAILS))
2032 fprintf (vect_dump, "reduction used in loop.");
2033 return NULL;
2034 }
2035 }
2036
2037 /* If DEF_STMT is a phi node itself, we expect it to have a single argument
2038 defined in the inner loop. */
2039 if (phi_def)
2040 {
2041 op1 = PHI_ARG_DEF (def_stmt, 0);
2042
2043 if (gimple_phi_num_args (def_stmt) != 1
2044 || TREE_CODE (op1) != SSA_NAME)
2045 {
2046 if (vect_print_dump_info (REPORT_DETAILS))
2047 fprintf (vect_dump, "unsupported phi node definition.");
2048
2049 return NULL;
2050 }
2051
2052 def1 = SSA_NAME_DEF_STMT (op1);
2053 if (flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
2054 && loop->inner
2055 && flow_bb_inside_loop_p (loop->inner, gimple_bb (def1))
2056 && is_gimple_assign (def1))
2057 {
2058 if (vect_print_dump_info (REPORT_DETAILS))
2059 report_vect_op (def_stmt, "detected double reduction: ");
2060
2061 *double_reduc = true;
2062 return def_stmt;
2063 }
2064
2065 return NULL;
2066 }
2067
2068 code = orig_code = gimple_assign_rhs_code (def_stmt);
2069
2070 /* We can handle "res -= x[i]", which is non-associative by
2071 simply rewriting this into "res += -x[i]". Avoid changing
2072 gimple instruction for the first simple tests and only do this
2073 if we're allowed to change code at all. */
2074 if (code == MINUS_EXPR
2075 && modify
2076 && (op1 = gimple_assign_rhs1 (def_stmt))
2077 && TREE_CODE (op1) == SSA_NAME
2078 && SSA_NAME_DEF_STMT (op1) == phi)
2079 code = PLUS_EXPR;
2080
2081 if (check_reduction
2082 && (!commutative_tree_code (code) || !associative_tree_code (code)))
2083 {
2084 if (vect_print_dump_info (REPORT_DETAILS))
2085 report_vect_op (def_stmt, "reduction: not commutative/associative: ");
2086 return NULL;
2087 }
2088
2089 if (get_gimple_rhs_class (code) != GIMPLE_BINARY_RHS)
2090 {
2091 if (code != COND_EXPR)
2092 {
2093 if (vect_print_dump_info (REPORT_DETAILS))
2094 report_vect_op (def_stmt, "reduction: not binary operation: ");
2095
2096 return NULL;
2097 }
2098
2099 op3 = TREE_OPERAND (gimple_assign_rhs1 (def_stmt), 0);
2100 if (COMPARISON_CLASS_P (op3))
2101 {
2102 op4 = TREE_OPERAND (op3, 1);
2103 op3 = TREE_OPERAND (op3, 0);
2104 }
2105
2106 op1 = TREE_OPERAND (gimple_assign_rhs1 (def_stmt), 1);
2107 op2 = TREE_OPERAND (gimple_assign_rhs1 (def_stmt), 2);
2108
2109 if (TREE_CODE (op1) != SSA_NAME && TREE_CODE (op2) != SSA_NAME)
2110 {
2111 if (vect_print_dump_info (REPORT_DETAILS))
2112 report_vect_op (def_stmt, "reduction: uses not ssa_names: ");
2113
2114 return NULL;
2115 }
2116 }
2117 else
2118 {
2119 op1 = gimple_assign_rhs1 (def_stmt);
2120 op2 = gimple_assign_rhs2 (def_stmt);
2121
2122 if (TREE_CODE (op1) != SSA_NAME || TREE_CODE (op2) != SSA_NAME)
2123 {
2124 if (vect_print_dump_info (REPORT_DETAILS))
2125 report_vect_op (def_stmt, "reduction: uses not ssa_names: ");
2126
2127 return NULL;
2128 }
2129 }
2130
2131 type = TREE_TYPE (gimple_assign_lhs (def_stmt));
2132 if ((TREE_CODE (op1) == SSA_NAME
2133 && !types_compatible_p (type,TREE_TYPE (op1)))
2134 || (TREE_CODE (op2) == SSA_NAME
2135 && !types_compatible_p (type, TREE_TYPE (op2)))
2136 || (op3 && TREE_CODE (op3) == SSA_NAME
2137 && !types_compatible_p (type, TREE_TYPE (op3)))
2138 || (op4 && TREE_CODE (op4) == SSA_NAME
2139 && !types_compatible_p (type, TREE_TYPE (op4))))
2140 {
2141 if (vect_print_dump_info (REPORT_DETAILS))
2142 {
2143 fprintf (vect_dump, "reduction: multiple types: operation type: ");
2144 print_generic_expr (vect_dump, type, TDF_SLIM);
2145 fprintf (vect_dump, ", operands types: ");
2146 print_generic_expr (vect_dump, TREE_TYPE (op1), TDF_SLIM);
2147 fprintf (vect_dump, ",");
2148 print_generic_expr (vect_dump, TREE_TYPE (op2), TDF_SLIM);
2149 if (op3)
2150 {
2151 fprintf (vect_dump, ",");
2152 print_generic_expr (vect_dump, TREE_TYPE (op3), TDF_SLIM);
2153 }
2154
2155 if (op4)
2156 {
2157 fprintf (vect_dump, ",");
2158 print_generic_expr (vect_dump, TREE_TYPE (op4), TDF_SLIM);
2159 }
2160 }
2161
2162 return NULL;
2163 }
2164
2165 /* Check that it's ok to change the order of the computation.
2166 Generally, when vectorizing a reduction we change the order of the
2167 computation. This may change the behavior of the program in some
2168 cases, so we need to check that this is ok. One exception is when
2169 vectorizing an outer-loop: the inner-loop is executed sequentially,
2170 and therefore vectorizing reductions in the inner-loop during
2171 outer-loop vectorization is safe. */
2172
2173 /* CHECKME: check for !flag_finite_math_only too? */
2174 if (SCALAR_FLOAT_TYPE_P (type) && !flag_associative_math
2175 && check_reduction)
2176 {
2177 /* Changing the order of operations changes the semantics. */
2178 if (vect_print_dump_info (REPORT_DETAILS))
2179 report_vect_op (def_stmt, "reduction: unsafe fp math optimization: ");
2180 return NULL;
2181 }
2182 else if (INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_TRAPS (type)
2183 && check_reduction)
2184 {
2185 /* Changing the order of operations changes the semantics. */
2186 if (vect_print_dump_info (REPORT_DETAILS))
2187 report_vect_op (def_stmt, "reduction: unsafe int math optimization: ");
2188 return NULL;
2189 }
2190 else if (SAT_FIXED_POINT_TYPE_P (type) && check_reduction)
2191 {
2192 /* Changing the order of operations changes the semantics. */
2193 if (vect_print_dump_info (REPORT_DETAILS))
2194 report_vect_op (def_stmt,
2195 "reduction: unsafe fixed-point math optimization: ");
2196 return NULL;
2197 }
2198
2199 /* If we detected "res -= x[i]" earlier, rewrite it into
2200 "res += -x[i]" now. If this turns out to be useless reassoc
2201 will clean it up again. */
2202 if (orig_code == MINUS_EXPR)
2203 {
2204 tree rhs = gimple_assign_rhs2 (def_stmt);
2205 tree negrhs = make_ssa_name (SSA_NAME_VAR (rhs), NULL);
2206 gimple negate_stmt = gimple_build_assign_with_ops (NEGATE_EXPR, negrhs,
2207 rhs, NULL);
2208 gimple_stmt_iterator gsi = gsi_for_stmt (def_stmt);
2209 set_vinfo_for_stmt (negate_stmt, new_stmt_vec_info (negate_stmt,
2210 loop_info, NULL));
2211 gsi_insert_before (&gsi, negate_stmt, GSI_NEW_STMT);
2212 gimple_assign_set_rhs2 (def_stmt, negrhs);
2213 gimple_assign_set_rhs_code (def_stmt, PLUS_EXPR);
2214 update_stmt (def_stmt);
2215 }
2216
2217 /* Reduction is safe. We're dealing with one of the following:
2218 1) integer arithmetic and no trapv
2219 2) floating point arithmetic, and special flags permit this optimization
2220 3) nested cycle (i.e., outer loop vectorization). */
2221 if (TREE_CODE (op1) == SSA_NAME)
2222 def1 = SSA_NAME_DEF_STMT (op1);
2223
2224 if (TREE_CODE (op2) == SSA_NAME)
2225 def2 = SSA_NAME_DEF_STMT (op2);
2226
2227 if (code != COND_EXPR
2228 && (!def1 || !def2 || gimple_nop_p (def1) || gimple_nop_p (def2)))
2229 {
2230 if (vect_print_dump_info (REPORT_DETAILS))
2231 report_vect_op (def_stmt, "reduction: no defs for operands: ");
2232 return NULL;
2233 }
2234
2235 /* Check that one def is the reduction def, defined by PHI,
2236 the other def is either defined in the loop ("vect_internal_def"),
2237 or it's an induction (defined by a loop-header phi-node). */
2238
2239 if (def2 && def2 == phi
2240 && (code == COND_EXPR
2241 || (def1 && flow_bb_inside_loop_p (loop, gimple_bb (def1))
2242 && (is_gimple_assign (def1)
2243 || is_gimple_call (def1)
2244 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def1))
2245 == vect_induction_def
2246 || (gimple_code (def1) == GIMPLE_PHI
2247 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def1))
2248 == vect_internal_def
2249 && !is_loop_header_bb_p (gimple_bb (def1)))))))
2250 {
2251 if (vect_print_dump_info (REPORT_DETAILS))
2252 report_vect_op (def_stmt, "detected reduction: ");
2253 return def_stmt;
2254 }
2255
2256 if (def1 && def1 == phi
2257 && (code == COND_EXPR
2258 || (def2 && flow_bb_inside_loop_p (loop, gimple_bb (def2))
2259 && (is_gimple_assign (def2)
2260 || is_gimple_call (def2)
2261 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def2))
2262 == vect_induction_def
2263 || (gimple_code (def2) == GIMPLE_PHI
2264 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def2))
2265 == vect_internal_def
2266 && !is_loop_header_bb_p (gimple_bb (def2)))))))
2267 {
2268 if (check_reduction)
2269 {
2270 /* Swap operands (just for simplicity - so that the rest of the code
2271 can assume that the reduction variable is always the last (second)
2272 argument). */
2273 if (vect_print_dump_info (REPORT_DETAILS))
2274 report_vect_op (def_stmt,
2275 "detected reduction: need to swap operands: ");
2276
2277 swap_tree_operands (def_stmt, gimple_assign_rhs1_ptr (def_stmt),
2278 gimple_assign_rhs2_ptr (def_stmt));
2279 }
2280 else
2281 {
2282 if (vect_print_dump_info (REPORT_DETAILS))
2283 report_vect_op (def_stmt, "detected reduction: ");
2284 }
2285
2286 return def_stmt;
2287 }
2288
2289 /* Try to find SLP reduction chain. */
2290 if (check_reduction && vect_is_slp_reduction (loop_info, phi, def_stmt))
2291 {
2292 if (vect_print_dump_info (REPORT_DETAILS))
2293 report_vect_op (def_stmt, "reduction: detected reduction chain: ");
2294
2295 return def_stmt;
2296 }
2297
2298 if (vect_print_dump_info (REPORT_DETAILS))
2299 report_vect_op (def_stmt, "reduction: unknown pattern: ");
2300
2301 return NULL;
2302 }
2303
2304 /* Wrapper around vect_is_simple_reduction_1, that won't modify code
2305 in-place. Arguments as there. */
2306
2307 static gimple
2308 vect_is_simple_reduction (loop_vec_info loop_info, gimple phi,
2309 bool check_reduction, bool *double_reduc)
2310 {
2311 return vect_is_simple_reduction_1 (loop_info, phi, check_reduction,
2312 double_reduc, false);
2313 }
2314
2315 /* Wrapper around vect_is_simple_reduction_1, which will modify code
2316 in-place if it enables detection of more reductions. Arguments
2317 as there. */
2318
2319 gimple
2320 vect_force_simple_reduction (loop_vec_info loop_info, gimple phi,
2321 bool check_reduction, bool *double_reduc)
2322 {
2323 return vect_is_simple_reduction_1 (loop_info, phi, check_reduction,
2324 double_reduc, true);
2325 }
2326
2327 /* Calculate the cost of one scalar iteration of the loop. */
2328 int
2329 vect_get_single_scalar_iteraion_cost (loop_vec_info loop_vinfo)
2330 {
2331 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2332 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
2333 int nbbs = loop->num_nodes, factor, scalar_single_iter_cost = 0;
2334 int innerloop_iters, i, stmt_cost;
2335
2336 /* Count statements in scalar loop. Using this as scalar cost for a single
2337 iteration for now.
2338
2339 TODO: Add outer loop support.
2340
2341 TODO: Consider assigning different costs to different scalar
2342 statements. */
2343
2344 /* FORNOW. */
2345 innerloop_iters = 1;
2346 if (loop->inner)
2347 innerloop_iters = 50; /* FIXME */
2348
2349 for (i = 0; i < nbbs; i++)
2350 {
2351 gimple_stmt_iterator si;
2352 basic_block bb = bbs[i];
2353
2354 if (bb->loop_father == loop->inner)
2355 factor = innerloop_iters;
2356 else
2357 factor = 1;
2358
2359 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
2360 {
2361 gimple stmt = gsi_stmt (si);
2362 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2363
2364 if (!is_gimple_assign (stmt) && !is_gimple_call (stmt))
2365 continue;
2366
2367 /* Skip stmts that are not vectorized inside the loop. */
2368 if (stmt_info
2369 && !STMT_VINFO_RELEVANT_P (stmt_info)
2370 && (!STMT_VINFO_LIVE_P (stmt_info)
2371 || STMT_VINFO_DEF_TYPE (stmt_info) != vect_reduction_def))
2372 continue;
2373
2374 if (STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt)))
2375 {
2376 if (DR_IS_READ (STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt))))
2377 stmt_cost = vect_get_cost (scalar_load);
2378 else
2379 stmt_cost = vect_get_cost (scalar_store);
2380 }
2381 else
2382 stmt_cost = vect_get_cost (scalar_stmt);
2383
2384 scalar_single_iter_cost += stmt_cost * factor;
2385 }
2386 }
2387 return scalar_single_iter_cost;
2388 }
2389
2390 /* Calculate cost of peeling the loop PEEL_ITERS_PROLOGUE times. */
2391 int
2392 vect_get_known_peeling_cost (loop_vec_info loop_vinfo, int peel_iters_prologue,
2393 int *peel_iters_epilogue,
2394 int scalar_single_iter_cost)
2395 {
2396 int peel_guard_costs = 0;
2397 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2398
2399 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
2400 {
2401 *peel_iters_epilogue = vf/2;
2402 if (vect_print_dump_info (REPORT_COST))
2403 fprintf (vect_dump, "cost model: "
2404 "epilogue peel iters set to vf/2 because "
2405 "loop iterations are unknown .");
2406
2407 /* If peeled iterations are known but number of scalar loop
2408 iterations are unknown, count a taken branch per peeled loop. */
2409 peel_guard_costs = 2 * vect_get_cost (cond_branch_taken);
2410 }
2411 else
2412 {
2413 int niters = LOOP_VINFO_INT_NITERS (loop_vinfo);
2414 peel_iters_prologue = niters < peel_iters_prologue ?
2415 niters : peel_iters_prologue;
2416 *peel_iters_epilogue = (niters - peel_iters_prologue) % vf;
2417 /* If we need to peel for gaps, but no peeling is required, we have to
2418 peel VF iterations. */
2419 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) && !*peel_iters_epilogue)
2420 *peel_iters_epilogue = vf;
2421 }
2422
2423 return (peel_iters_prologue * scalar_single_iter_cost)
2424 + (*peel_iters_epilogue * scalar_single_iter_cost)
2425 + peel_guard_costs;
2426 }
2427
2428 /* Function vect_estimate_min_profitable_iters
2429
2430 Return the number of iterations required for the vector version of the
2431 loop to be profitable relative to the cost of the scalar version of the
2432 loop.
2433
2434 TODO: Take profile info into account before making vectorization
2435 decisions, if available. */
2436
2437 int
2438 vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo)
2439 {
2440 int i;
2441 int min_profitable_iters;
2442 int peel_iters_prologue;
2443 int peel_iters_epilogue;
2444 int vec_inside_cost = 0;
2445 int vec_outside_cost = 0;
2446 int scalar_single_iter_cost = 0;
2447 int scalar_outside_cost = 0;
2448 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2449 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2450 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
2451 int nbbs = loop->num_nodes;
2452 int npeel = LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo);
2453 int peel_guard_costs = 0;
2454 int innerloop_iters = 0, factor;
2455 VEC (slp_instance, heap) *slp_instances;
2456 slp_instance instance;
2457
2458 /* Cost model disabled. */
2459 if (!flag_vect_cost_model)
2460 {
2461 if (vect_print_dump_info (REPORT_COST))
2462 fprintf (vect_dump, "cost model disabled.");
2463 return 0;
2464 }
2465
2466 /* Requires loop versioning tests to handle misalignment. */
2467 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo))
2468 {
2469 /* FIXME: Make cost depend on complexity of individual check. */
2470 vec_outside_cost +=
2471 VEC_length (gimple, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo));
2472 if (vect_print_dump_info (REPORT_COST))
2473 fprintf (vect_dump, "cost model: Adding cost of checks for loop "
2474 "versioning to treat misalignment.\n");
2475 }
2476
2477 /* Requires loop versioning with alias checks. */
2478 if (LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
2479 {
2480 /* FIXME: Make cost depend on complexity of individual check. */
2481 vec_outside_cost +=
2482 VEC_length (ddr_p, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo));
2483 if (vect_print_dump_info (REPORT_COST))
2484 fprintf (vect_dump, "cost model: Adding cost of checks for loop "
2485 "versioning aliasing.\n");
2486 }
2487
2488 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
2489 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
2490 vec_outside_cost += vect_get_cost (cond_branch_taken);
2491
2492 /* Count statements in scalar loop. Using this as scalar cost for a single
2493 iteration for now.
2494
2495 TODO: Add outer loop support.
2496
2497 TODO: Consider assigning different costs to different scalar
2498 statements. */
2499
2500 /* FORNOW. */
2501 if (loop->inner)
2502 innerloop_iters = 50; /* FIXME */
2503
2504 for (i = 0; i < nbbs; i++)
2505 {
2506 gimple_stmt_iterator si;
2507 basic_block bb = bbs[i];
2508
2509 if (bb->loop_father == loop->inner)
2510 factor = innerloop_iters;
2511 else
2512 factor = 1;
2513
2514 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
2515 {
2516 gimple stmt = gsi_stmt (si);
2517 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2518 /* Skip stmts that are not vectorized inside the loop. */
2519 if (!STMT_VINFO_RELEVANT_P (stmt_info)
2520 && (!STMT_VINFO_LIVE_P (stmt_info)
2521 || STMT_VINFO_DEF_TYPE (stmt_info) != vect_reduction_def))
2522 continue;
2523 vec_inside_cost += STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info) * factor;
2524 /* FIXME: for stmts in the inner-loop in outer-loop vectorization,
2525 some of the "outside" costs are generated inside the outer-loop. */
2526 vec_outside_cost += STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info);
2527 }
2528 }
2529
2530 scalar_single_iter_cost = vect_get_single_scalar_iteraion_cost (loop_vinfo);
2531
2532 /* Add additional cost for the peeled instructions in prologue and epilogue
2533 loop.
2534
2535 FORNOW: If we don't know the value of peel_iters for prologue or epilogue
2536 at compile-time - we assume it's vf/2 (the worst would be vf-1).
2537
2538 TODO: Build an expression that represents peel_iters for prologue and
2539 epilogue to be used in a run-time test. */
2540
2541 if (npeel < 0)
2542 {
2543 peel_iters_prologue = vf/2;
2544 if (vect_print_dump_info (REPORT_COST))
2545 fprintf (vect_dump, "cost model: "
2546 "prologue peel iters set to vf/2.");
2547
2548 /* If peeling for alignment is unknown, loop bound of main loop becomes
2549 unknown. */
2550 peel_iters_epilogue = vf/2;
2551 if (vect_print_dump_info (REPORT_COST))
2552 fprintf (vect_dump, "cost model: "
2553 "epilogue peel iters set to vf/2 because "
2554 "peeling for alignment is unknown .");
2555
2556 /* If peeled iterations are unknown, count a taken branch and a not taken
2557 branch per peeled loop. Even if scalar loop iterations are known,
2558 vector iterations are not known since peeled prologue iterations are
2559 not known. Hence guards remain the same. */
2560 peel_guard_costs += 2 * (vect_get_cost (cond_branch_taken)
2561 + vect_get_cost (cond_branch_not_taken));
2562 vec_outside_cost += (peel_iters_prologue * scalar_single_iter_cost)
2563 + (peel_iters_epilogue * scalar_single_iter_cost)
2564 + peel_guard_costs;
2565 }
2566 else
2567 {
2568 peel_iters_prologue = npeel;
2569 vec_outside_cost += vect_get_known_peeling_cost (loop_vinfo,
2570 peel_iters_prologue, &peel_iters_epilogue,
2571 scalar_single_iter_cost);
2572 }
2573
2574 /* FORNOW: The scalar outside cost is incremented in one of the
2575 following ways:
2576
2577 1. The vectorizer checks for alignment and aliasing and generates
2578 a condition that allows dynamic vectorization. A cost model
2579 check is ANDED with the versioning condition. Hence scalar code
2580 path now has the added cost of the versioning check.
2581
2582 if (cost > th & versioning_check)
2583 jmp to vector code
2584
2585 Hence run-time scalar is incremented by not-taken branch cost.
2586
2587 2. The vectorizer then checks if a prologue is required. If the
2588 cost model check was not done before during versioning, it has to
2589 be done before the prologue check.
2590
2591 if (cost <= th)
2592 prologue = scalar_iters
2593 if (prologue == 0)
2594 jmp to vector code
2595 else
2596 execute prologue
2597 if (prologue == num_iters)
2598 go to exit
2599
2600 Hence the run-time scalar cost is incremented by a taken branch,
2601 plus a not-taken branch, plus a taken branch cost.
2602
2603 3. The vectorizer then checks if an epilogue is required. If the
2604 cost model check was not done before during prologue check, it
2605 has to be done with the epilogue check.
2606
2607 if (prologue == 0)
2608 jmp to vector code
2609 else
2610 execute prologue
2611 if (prologue == num_iters)
2612 go to exit
2613 vector code:
2614 if ((cost <= th) | (scalar_iters-prologue-epilogue == 0))
2615 jmp to epilogue
2616
2617 Hence the run-time scalar cost should be incremented by 2 taken
2618 branches.
2619
2620 TODO: The back end may reorder the BBS's differently and reverse
2621 conditions/branch directions. Change the estimates below to
2622 something more reasonable. */
2623
2624 /* If the number of iterations is known and we do not do versioning, we can
2625 decide whether to vectorize at compile time. Hence the scalar version
2626 do not carry cost model guard costs. */
2627 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
2628 || LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
2629 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
2630 {
2631 /* Cost model check occurs at versioning. */
2632 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
2633 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
2634 scalar_outside_cost += vect_get_cost (cond_branch_not_taken);
2635 else
2636 {
2637 /* Cost model check occurs at prologue generation. */
2638 if (LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo) < 0)
2639 scalar_outside_cost += 2 * vect_get_cost (cond_branch_taken)
2640 + vect_get_cost (cond_branch_not_taken);
2641 /* Cost model check occurs at epilogue generation. */
2642 else
2643 scalar_outside_cost += 2 * vect_get_cost (cond_branch_taken);
2644 }
2645 }
2646
2647 /* Add SLP costs. */
2648 slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
2649 FOR_EACH_VEC_ELT (slp_instance, slp_instances, i, instance)
2650 {
2651 vec_outside_cost += SLP_INSTANCE_OUTSIDE_OF_LOOP_COST (instance);
2652 vec_inside_cost += SLP_INSTANCE_INSIDE_OF_LOOP_COST (instance);
2653 }
2654
2655 /* Calculate number of iterations required to make the vector version
2656 profitable, relative to the loop bodies only. The following condition
2657 must hold true:
2658 SIC * niters + SOC > VIC * ((niters-PL_ITERS-EP_ITERS)/VF) + VOC
2659 where
2660 SIC = scalar iteration cost, VIC = vector iteration cost,
2661 VOC = vector outside cost, VF = vectorization factor,
2662 PL_ITERS = prologue iterations, EP_ITERS= epilogue iterations
2663 SOC = scalar outside cost for run time cost model check. */
2664
2665 if ((scalar_single_iter_cost * vf) > vec_inside_cost)
2666 {
2667 if (vec_outside_cost <= 0)
2668 min_profitable_iters = 1;
2669 else
2670 {
2671 min_profitable_iters = ((vec_outside_cost - scalar_outside_cost) * vf
2672 - vec_inside_cost * peel_iters_prologue
2673 - vec_inside_cost * peel_iters_epilogue)
2674 / ((scalar_single_iter_cost * vf)
2675 - vec_inside_cost);
2676
2677 if ((scalar_single_iter_cost * vf * min_profitable_iters)
2678 <= ((vec_inside_cost * min_profitable_iters)
2679 + ((vec_outside_cost - scalar_outside_cost) * vf)))
2680 min_profitable_iters++;
2681 }
2682 }
2683 /* vector version will never be profitable. */
2684 else
2685 {
2686 if (vect_print_dump_info (REPORT_COST))
2687 fprintf (vect_dump, "cost model: the vector iteration cost = %d "
2688 "divided by the scalar iteration cost = %d "
2689 "is greater or equal to the vectorization factor = %d.",
2690 vec_inside_cost, scalar_single_iter_cost, vf);
2691 return -1;
2692 }
2693
2694 if (vect_print_dump_info (REPORT_COST))
2695 {
2696 fprintf (vect_dump, "Cost model analysis: \n");
2697 fprintf (vect_dump, " Vector inside of loop cost: %d\n",
2698 vec_inside_cost);
2699 fprintf (vect_dump, " Vector outside of loop cost: %d\n",
2700 vec_outside_cost);
2701 fprintf (vect_dump, " Scalar iteration cost: %d\n",
2702 scalar_single_iter_cost);
2703 fprintf (vect_dump, " Scalar outside cost: %d\n", scalar_outside_cost);
2704 fprintf (vect_dump, " prologue iterations: %d\n",
2705 peel_iters_prologue);
2706 fprintf (vect_dump, " epilogue iterations: %d\n",
2707 peel_iters_epilogue);
2708 fprintf (vect_dump, " Calculated minimum iters for profitability: %d\n",
2709 min_profitable_iters);
2710 }
2711
2712 min_profitable_iters =
2713 min_profitable_iters < vf ? vf : min_profitable_iters;
2714
2715 /* Because the condition we create is:
2716 if (niters <= min_profitable_iters)
2717 then skip the vectorized loop. */
2718 min_profitable_iters--;
2719
2720 if (vect_print_dump_info (REPORT_COST))
2721 fprintf (vect_dump, " Profitability threshold = %d\n",
2722 min_profitable_iters);
2723
2724 return min_profitable_iters;
2725 }
2726
2727
2728 /* TODO: Close dependency between vect_model_*_cost and vectorizable_*
2729 functions. Design better to avoid maintenance issues. */
2730
2731 /* Function vect_model_reduction_cost.
2732
2733 Models cost for a reduction operation, including the vector ops
2734 generated within the strip-mine loop, the initial definition before
2735 the loop, and the epilogue code that must be generated. */
2736
2737 static bool
2738 vect_model_reduction_cost (stmt_vec_info stmt_info, enum tree_code reduc_code,
2739 int ncopies)
2740 {
2741 int outer_cost = 0;
2742 enum tree_code code;
2743 optab optab;
2744 tree vectype;
2745 gimple stmt, orig_stmt;
2746 tree reduction_op;
2747 enum machine_mode mode;
2748 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2749 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2750
2751
2752 /* Cost of reduction op inside loop. */
2753 STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info)
2754 += ncopies * vect_get_cost (vector_stmt);
2755
2756 stmt = STMT_VINFO_STMT (stmt_info);
2757
2758 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)))
2759 {
2760 case GIMPLE_SINGLE_RHS:
2761 gcc_assert (TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt)) == ternary_op);
2762 reduction_op = TREE_OPERAND (gimple_assign_rhs1 (stmt), 2);
2763 break;
2764 case GIMPLE_UNARY_RHS:
2765 reduction_op = gimple_assign_rhs1 (stmt);
2766 break;
2767 case GIMPLE_BINARY_RHS:
2768 reduction_op = gimple_assign_rhs2 (stmt);
2769 break;
2770 case GIMPLE_TERNARY_RHS:
2771 reduction_op = gimple_assign_rhs3 (stmt);
2772 break;
2773 default:
2774 gcc_unreachable ();
2775 }
2776
2777 vectype = get_vectype_for_scalar_type (TREE_TYPE (reduction_op));
2778 if (!vectype)
2779 {
2780 if (vect_print_dump_info (REPORT_COST))
2781 {
2782 fprintf (vect_dump, "unsupported data-type ");
2783 print_generic_expr (vect_dump, TREE_TYPE (reduction_op), TDF_SLIM);
2784 }
2785 return false;
2786 }
2787
2788 mode = TYPE_MODE (vectype);
2789 orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
2790
2791 if (!orig_stmt)
2792 orig_stmt = STMT_VINFO_STMT (stmt_info);
2793
2794 code = gimple_assign_rhs_code (orig_stmt);
2795
2796 /* Add in cost for initial definition. */
2797 outer_cost += vect_get_cost (scalar_to_vec);
2798
2799 /* Determine cost of epilogue code.
2800
2801 We have a reduction operator that will reduce the vector in one statement.
2802 Also requires scalar extract. */
2803
2804 if (!nested_in_vect_loop_p (loop, orig_stmt))
2805 {
2806 if (reduc_code != ERROR_MARK)
2807 outer_cost += vect_get_cost (vector_stmt)
2808 + vect_get_cost (vec_to_scalar);
2809 else
2810 {
2811 int vec_size_in_bits = tree_low_cst (TYPE_SIZE (vectype), 1);
2812 tree bitsize =
2813 TYPE_SIZE (TREE_TYPE (gimple_assign_lhs (orig_stmt)));
2814 int element_bitsize = tree_low_cst (bitsize, 1);
2815 int nelements = vec_size_in_bits / element_bitsize;
2816
2817 optab = optab_for_tree_code (code, vectype, optab_default);
2818
2819 /* We have a whole vector shift available. */
2820 if (VECTOR_MODE_P (mode)
2821 && optab_handler (optab, mode) != CODE_FOR_nothing
2822 && optab_handler (vec_shr_optab, mode) != CODE_FOR_nothing)
2823 /* Final reduction via vector shifts and the reduction operator. Also
2824 requires scalar extract. */
2825 outer_cost += ((exact_log2(nelements) * 2)
2826 * vect_get_cost (vector_stmt)
2827 + vect_get_cost (vec_to_scalar));
2828 else
2829 /* Use extracts and reduction op for final reduction. For N elements,
2830 we have N extracts and N-1 reduction ops. */
2831 outer_cost += ((nelements + nelements - 1)
2832 * vect_get_cost (vector_stmt));
2833 }
2834 }
2835
2836 STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info) = outer_cost;
2837
2838 if (vect_print_dump_info (REPORT_COST))
2839 fprintf (vect_dump, "vect_model_reduction_cost: inside_cost = %d, "
2840 "outside_cost = %d .", STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info),
2841 STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info));
2842
2843 return true;
2844 }
2845
2846
2847 /* Function vect_model_induction_cost.
2848
2849 Models cost for induction operations. */
2850
2851 static void
2852 vect_model_induction_cost (stmt_vec_info stmt_info, int ncopies)
2853 {
2854 /* loop cost for vec_loop. */
2855 STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info)
2856 = ncopies * vect_get_cost (vector_stmt);
2857 /* prologue cost for vec_init and vec_step. */
2858 STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info)
2859 = 2 * vect_get_cost (scalar_to_vec);
2860
2861 if (vect_print_dump_info (REPORT_COST))
2862 fprintf (vect_dump, "vect_model_induction_cost: inside_cost = %d, "
2863 "outside_cost = %d .", STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info),
2864 STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info));
2865 }
2866
2867
2868 /* Function get_initial_def_for_induction
2869
2870 Input:
2871 STMT - a stmt that performs an induction operation in the loop.
2872 IV_PHI - the initial value of the induction variable
2873
2874 Output:
2875 Return a vector variable, initialized with the first VF values of
2876 the induction variable. E.g., for an iv with IV_PHI='X' and
2877 evolution S, for a vector of 4 units, we want to return:
2878 [X, X + S, X + 2*S, X + 3*S]. */
2879
2880 static tree
2881 get_initial_def_for_induction (gimple iv_phi)
2882 {
2883 stmt_vec_info stmt_vinfo = vinfo_for_stmt (iv_phi);
2884 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
2885 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2886 tree scalar_type;
2887 tree vectype;
2888 int nunits;
2889 edge pe = loop_preheader_edge (loop);
2890 struct loop *iv_loop;
2891 basic_block new_bb;
2892 tree vec, vec_init, vec_step, t;
2893 tree access_fn;
2894 tree new_var;
2895 tree new_name;
2896 gimple init_stmt, induction_phi, new_stmt;
2897 tree induc_def, vec_def, vec_dest;
2898 tree init_expr, step_expr;
2899 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2900 int i;
2901 bool ok;
2902 int ncopies;
2903 tree expr;
2904 stmt_vec_info phi_info = vinfo_for_stmt (iv_phi);
2905 bool nested_in_vect_loop = false;
2906 gimple_seq stmts = NULL;
2907 imm_use_iterator imm_iter;
2908 use_operand_p use_p;
2909 gimple exit_phi;
2910 edge latch_e;
2911 tree loop_arg;
2912 gimple_stmt_iterator si;
2913 basic_block bb = gimple_bb (iv_phi);
2914 tree stepvectype;
2915 tree resvectype;
2916
2917 /* Is phi in an inner-loop, while vectorizing an enclosing outer-loop? */
2918 if (nested_in_vect_loop_p (loop, iv_phi))
2919 {
2920 nested_in_vect_loop = true;
2921 iv_loop = loop->inner;
2922 }
2923 else
2924 iv_loop = loop;
2925 gcc_assert (iv_loop == (gimple_bb (iv_phi))->loop_father);
2926
2927 latch_e = loop_latch_edge (iv_loop);
2928 loop_arg = PHI_ARG_DEF_FROM_EDGE (iv_phi, latch_e);
2929
2930 access_fn = analyze_scalar_evolution (iv_loop, PHI_RESULT (iv_phi));
2931 gcc_assert (access_fn);
2932 STRIP_NOPS (access_fn);
2933 ok = vect_is_simple_iv_evolution (iv_loop->num, access_fn,
2934 &init_expr, &step_expr);
2935 gcc_assert (ok);
2936 pe = loop_preheader_edge (iv_loop);
2937
2938 scalar_type = TREE_TYPE (init_expr);
2939 vectype = get_vectype_for_scalar_type (scalar_type);
2940 resvectype = get_vectype_for_scalar_type (TREE_TYPE (PHI_RESULT (iv_phi)));
2941 gcc_assert (vectype);
2942 nunits = TYPE_VECTOR_SUBPARTS (vectype);
2943 ncopies = vf / nunits;
2944
2945 gcc_assert (phi_info);
2946 gcc_assert (ncopies >= 1);
2947
2948 /* Find the first insertion point in the BB. */
2949 si = gsi_after_labels (bb);
2950
2951 /* Create the vector that holds the initial_value of the induction. */
2952 if (nested_in_vect_loop)
2953 {
2954 /* iv_loop is nested in the loop to be vectorized. init_expr had already
2955 been created during vectorization of previous stmts. We obtain it
2956 from the STMT_VINFO_VEC_STMT of the defining stmt. */
2957 tree iv_def = PHI_ARG_DEF_FROM_EDGE (iv_phi,
2958 loop_preheader_edge (iv_loop));
2959 vec_init = vect_get_vec_def_for_operand (iv_def, iv_phi, NULL);
2960 }
2961 else
2962 {
2963 /* iv_loop is the loop to be vectorized. Create:
2964 vec_init = [X, X+S, X+2*S, X+3*S] (S = step_expr, X = init_expr) */
2965 new_var = vect_get_new_vect_var (scalar_type, vect_scalar_var, "var_");
2966 add_referenced_var (new_var);
2967
2968 new_name = force_gimple_operand (init_expr, &stmts, false, new_var);
2969 if (stmts)
2970 {
2971 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
2972 gcc_assert (!new_bb);
2973 }
2974
2975 t = NULL_TREE;
2976 t = tree_cons (NULL_TREE, new_name, t);
2977 for (i = 1; i < nunits; i++)
2978 {
2979 /* Create: new_name_i = new_name + step_expr */
2980 enum tree_code code = POINTER_TYPE_P (scalar_type)
2981 ? POINTER_PLUS_EXPR : PLUS_EXPR;
2982 init_stmt = gimple_build_assign_with_ops (code, new_var,
2983 new_name, step_expr);
2984 new_name = make_ssa_name (new_var, init_stmt);
2985 gimple_assign_set_lhs (init_stmt, new_name);
2986
2987 new_bb = gsi_insert_on_edge_immediate (pe, init_stmt);
2988 gcc_assert (!new_bb);
2989
2990 if (vect_print_dump_info (REPORT_DETAILS))
2991 {
2992 fprintf (vect_dump, "created new init_stmt: ");
2993 print_gimple_stmt (vect_dump, init_stmt, 0, TDF_SLIM);
2994 }
2995 t = tree_cons (NULL_TREE, new_name, t);
2996 }
2997 /* Create a vector from [new_name_0, new_name_1, ..., new_name_nunits-1] */
2998 vec = build_constructor_from_list (vectype, nreverse (t));
2999 vec_init = vect_init_vector (iv_phi, vec, vectype, NULL);
3000 }
3001
3002
3003 /* Create the vector that holds the step of the induction. */
3004 if (nested_in_vect_loop)
3005 /* iv_loop is nested in the loop to be vectorized. Generate:
3006 vec_step = [S, S, S, S] */
3007 new_name = step_expr;
3008 else
3009 {
3010 /* iv_loop is the loop to be vectorized. Generate:
3011 vec_step = [VF*S, VF*S, VF*S, VF*S] */
3012 expr = build_int_cst (TREE_TYPE (step_expr), vf);
3013 new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr),
3014 expr, step_expr);
3015 }
3016
3017 t = unshare_expr (new_name);
3018 gcc_assert (CONSTANT_CLASS_P (new_name));
3019 stepvectype = get_vectype_for_scalar_type (TREE_TYPE (new_name));
3020 gcc_assert (stepvectype);
3021 vec = build_vector_from_val (stepvectype, t);
3022 vec_step = vect_init_vector (iv_phi, vec, stepvectype, NULL);
3023
3024
3025 /* Create the following def-use cycle:
3026 loop prolog:
3027 vec_init = ...
3028 vec_step = ...
3029 loop:
3030 vec_iv = PHI <vec_init, vec_loop>
3031 ...
3032 STMT
3033 ...
3034 vec_loop = vec_iv + vec_step; */
3035
3036 /* Create the induction-phi that defines the induction-operand. */
3037 vec_dest = vect_get_new_vect_var (vectype, vect_simple_var, "vec_iv_");
3038 add_referenced_var (vec_dest);
3039 induction_phi = create_phi_node (vec_dest, iv_loop->header);
3040 set_vinfo_for_stmt (induction_phi,
3041 new_stmt_vec_info (induction_phi, loop_vinfo, NULL));
3042 induc_def = PHI_RESULT (induction_phi);
3043
3044 /* Create the iv update inside the loop */
3045 new_stmt = gimple_build_assign_with_ops (PLUS_EXPR, vec_dest,
3046 induc_def, vec_step);
3047 vec_def = make_ssa_name (vec_dest, new_stmt);
3048 gimple_assign_set_lhs (new_stmt, vec_def);
3049 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
3050 set_vinfo_for_stmt (new_stmt, new_stmt_vec_info (new_stmt, loop_vinfo,
3051 NULL));
3052
3053 /* Set the arguments of the phi node: */
3054 add_phi_arg (induction_phi, vec_init, pe, UNKNOWN_LOCATION);
3055 add_phi_arg (induction_phi, vec_def, loop_latch_edge (iv_loop),
3056 UNKNOWN_LOCATION);
3057
3058
3059 /* In case that vectorization factor (VF) is bigger than the number
3060 of elements that we can fit in a vectype (nunits), we have to generate
3061 more than one vector stmt - i.e - we need to "unroll" the
3062 vector stmt by a factor VF/nunits. For more details see documentation
3063 in vectorizable_operation. */
3064
3065 if (ncopies > 1)
3066 {
3067 stmt_vec_info prev_stmt_vinfo;
3068 /* FORNOW. This restriction should be relaxed. */
3069 gcc_assert (!nested_in_vect_loop);
3070
3071 /* Create the vector that holds the step of the induction. */
3072 expr = build_int_cst (TREE_TYPE (step_expr), nunits);
3073 new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr),
3074 expr, step_expr);
3075 t = unshare_expr (new_name);
3076 gcc_assert (CONSTANT_CLASS_P (new_name));
3077 vec = build_vector_from_val (stepvectype, t);
3078 vec_step = vect_init_vector (iv_phi, vec, stepvectype, NULL);
3079
3080 vec_def = induc_def;
3081 prev_stmt_vinfo = vinfo_for_stmt (induction_phi);
3082 for (i = 1; i < ncopies; i++)
3083 {
3084 /* vec_i = vec_prev + vec_step */
3085 new_stmt = gimple_build_assign_with_ops (PLUS_EXPR, vec_dest,
3086 vec_def, vec_step);
3087 vec_def = make_ssa_name (vec_dest, new_stmt);
3088 gimple_assign_set_lhs (new_stmt, vec_def);
3089
3090 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
3091 if (!useless_type_conversion_p (resvectype, vectype))
3092 {
3093 new_stmt = gimple_build_assign_with_ops
3094 (VIEW_CONVERT_EXPR,
3095 vect_get_new_vect_var (resvectype, vect_simple_var,
3096 "vec_iv_"),
3097 build1 (VIEW_CONVERT_EXPR, resvectype,
3098 gimple_assign_lhs (new_stmt)), NULL_TREE);
3099 gimple_assign_set_lhs (new_stmt,
3100 make_ssa_name
3101 (gimple_assign_lhs (new_stmt), new_stmt));
3102 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
3103 }
3104 set_vinfo_for_stmt (new_stmt,
3105 new_stmt_vec_info (new_stmt, loop_vinfo, NULL));
3106 STMT_VINFO_RELATED_STMT (prev_stmt_vinfo) = new_stmt;
3107 prev_stmt_vinfo = vinfo_for_stmt (new_stmt);
3108 }
3109 }
3110
3111 if (nested_in_vect_loop)
3112 {
3113 /* Find the loop-closed exit-phi of the induction, and record
3114 the final vector of induction results: */
3115 exit_phi = NULL;
3116 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, loop_arg)
3117 {
3118 if (!flow_bb_inside_loop_p (iv_loop, gimple_bb (USE_STMT (use_p))))
3119 {
3120 exit_phi = USE_STMT (use_p);
3121 break;
3122 }
3123 }
3124 if (exit_phi)
3125 {
3126 stmt_vec_info stmt_vinfo = vinfo_for_stmt (exit_phi);
3127 /* FORNOW. Currently not supporting the case that an inner-loop induction
3128 is not used in the outer-loop (i.e. only outside the outer-loop). */
3129 gcc_assert (STMT_VINFO_RELEVANT_P (stmt_vinfo)
3130 && !STMT_VINFO_LIVE_P (stmt_vinfo));
3131
3132 STMT_VINFO_VEC_STMT (stmt_vinfo) = new_stmt;
3133 if (vect_print_dump_info (REPORT_DETAILS))
3134 {
3135 fprintf (vect_dump, "vector of inductions after inner-loop:");
3136 print_gimple_stmt (vect_dump, new_stmt, 0, TDF_SLIM);
3137 }
3138 }
3139 }
3140
3141
3142 if (vect_print_dump_info (REPORT_DETAILS))
3143 {
3144 fprintf (vect_dump, "transform induction: created def-use cycle: ");
3145 print_gimple_stmt (vect_dump, induction_phi, 0, TDF_SLIM);
3146 fprintf (vect_dump, "\n");
3147 print_gimple_stmt (vect_dump, SSA_NAME_DEF_STMT (vec_def), 0, TDF_SLIM);
3148 }
3149
3150 STMT_VINFO_VEC_STMT (phi_info) = induction_phi;
3151 if (!useless_type_conversion_p (resvectype, vectype))
3152 {
3153 new_stmt = gimple_build_assign_with_ops
3154 (VIEW_CONVERT_EXPR,
3155 vect_get_new_vect_var (resvectype, vect_simple_var, "vec_iv_"),
3156 build1 (VIEW_CONVERT_EXPR, resvectype, induc_def), NULL_TREE);
3157 induc_def = make_ssa_name (gimple_assign_lhs (new_stmt), new_stmt);
3158 gimple_assign_set_lhs (new_stmt, induc_def);
3159 si = gsi_start_bb (bb);
3160 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
3161 set_vinfo_for_stmt (new_stmt,
3162 new_stmt_vec_info (new_stmt, loop_vinfo, NULL));
3163 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (new_stmt))
3164 = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (induction_phi));
3165 }
3166
3167 return induc_def;
3168 }
3169
3170
3171 /* Function get_initial_def_for_reduction
3172
3173 Input:
3174 STMT - a stmt that performs a reduction operation in the loop.
3175 INIT_VAL - the initial value of the reduction variable
3176
3177 Output:
3178 ADJUSTMENT_DEF - a tree that holds a value to be added to the final result
3179 of the reduction (used for adjusting the epilog - see below).
3180 Return a vector variable, initialized according to the operation that STMT
3181 performs. This vector will be used as the initial value of the
3182 vector of partial results.
3183
3184 Option1 (adjust in epilog): Initialize the vector as follows:
3185 add/bit or/xor: [0,0,...,0,0]
3186 mult/bit and: [1,1,...,1,1]
3187 min/max/cond_expr: [init_val,init_val,..,init_val,init_val]
3188 and when necessary (e.g. add/mult case) let the caller know
3189 that it needs to adjust the result by init_val.
3190
3191 Option2: Initialize the vector as follows:
3192 add/bit or/xor: [init_val,0,0,...,0]
3193 mult/bit and: [init_val,1,1,...,1]
3194 min/max/cond_expr: [init_val,init_val,...,init_val]
3195 and no adjustments are needed.
3196
3197 For example, for the following code:
3198
3199 s = init_val;
3200 for (i=0;i<n;i++)
3201 s = s + a[i];
3202
3203 STMT is 's = s + a[i]', and the reduction variable is 's'.
3204 For a vector of 4 units, we want to return either [0,0,0,init_val],
3205 or [0,0,0,0] and let the caller know that it needs to adjust
3206 the result at the end by 'init_val'.
3207
3208 FORNOW, we are using the 'adjust in epilog' scheme, because this way the
3209 initialization vector is simpler (same element in all entries), if
3210 ADJUSTMENT_DEF is not NULL, and Option2 otherwise.
3211
3212 A cost model should help decide between these two schemes. */
3213
3214 tree
3215 get_initial_def_for_reduction (gimple stmt, tree init_val,
3216 tree *adjustment_def)
3217 {
3218 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
3219 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
3220 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
3221 tree scalar_type = TREE_TYPE (init_val);
3222 tree vectype = get_vectype_for_scalar_type (scalar_type);
3223 int nunits;
3224 enum tree_code code = gimple_assign_rhs_code (stmt);
3225 tree def_for_init;
3226 tree init_def;
3227 tree t = NULL_TREE;
3228 int i;
3229 bool nested_in_vect_loop = false;
3230 tree init_value;
3231 REAL_VALUE_TYPE real_init_val = dconst0;
3232 int int_init_val = 0;
3233 gimple def_stmt = NULL;
3234
3235 gcc_assert (vectype);
3236 nunits = TYPE_VECTOR_SUBPARTS (vectype);
3237
3238 gcc_assert (POINTER_TYPE_P (scalar_type) || INTEGRAL_TYPE_P (scalar_type)
3239 || SCALAR_FLOAT_TYPE_P (scalar_type));
3240
3241 if (nested_in_vect_loop_p (loop, stmt))
3242 nested_in_vect_loop = true;
3243 else
3244 gcc_assert (loop == (gimple_bb (stmt))->loop_father);
3245
3246 /* In case of double reduction we only create a vector variable to be put
3247 in the reduction phi node. The actual statement creation is done in
3248 vect_create_epilog_for_reduction. */
3249 if (adjustment_def && nested_in_vect_loop
3250 && TREE_CODE (init_val) == SSA_NAME
3251 && (def_stmt = SSA_NAME_DEF_STMT (init_val))
3252 && gimple_code (def_stmt) == GIMPLE_PHI
3253 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
3254 && vinfo_for_stmt (def_stmt)
3255 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt))
3256 == vect_double_reduction_def)
3257 {
3258 *adjustment_def = NULL;
3259 return vect_create_destination_var (init_val, vectype);
3260 }
3261
3262 if (TREE_CONSTANT (init_val))
3263 {
3264 if (SCALAR_FLOAT_TYPE_P (scalar_type))
3265 init_value = build_real (scalar_type, TREE_REAL_CST (init_val));
3266 else
3267 init_value = build_int_cst (scalar_type, TREE_INT_CST_LOW (init_val));
3268 }
3269 else
3270 init_value = init_val;
3271
3272 switch (code)
3273 {
3274 case WIDEN_SUM_EXPR:
3275 case DOT_PROD_EXPR:
3276 case PLUS_EXPR:
3277 case MINUS_EXPR:
3278 case BIT_IOR_EXPR:
3279 case BIT_XOR_EXPR:
3280 case MULT_EXPR:
3281 case BIT_AND_EXPR:
3282 /* ADJUSMENT_DEF is NULL when called from
3283 vect_create_epilog_for_reduction to vectorize double reduction. */
3284 if (adjustment_def)
3285 {
3286 if (nested_in_vect_loop)
3287 *adjustment_def = vect_get_vec_def_for_operand (init_val, stmt,
3288 NULL);
3289 else
3290 *adjustment_def = init_val;
3291 }
3292
3293 if (code == MULT_EXPR)
3294 {
3295 real_init_val = dconst1;
3296 int_init_val = 1;
3297 }
3298
3299 if (code == BIT_AND_EXPR)
3300 int_init_val = -1;
3301
3302 if (SCALAR_FLOAT_TYPE_P (scalar_type))
3303 def_for_init = build_real (scalar_type, real_init_val);
3304 else
3305 def_for_init = build_int_cst (scalar_type, int_init_val);
3306
3307 /* Create a vector of '0' or '1' except the first element. */
3308 for (i = nunits - 2; i >= 0; --i)
3309 t = tree_cons (NULL_TREE, def_for_init, t);
3310
3311 /* Option1: the first element is '0' or '1' as well. */
3312 if (adjustment_def)
3313 {
3314 t = tree_cons (NULL_TREE, def_for_init, t);
3315 init_def = build_vector (vectype, t);
3316 break;
3317 }
3318
3319 /* Option2: the first element is INIT_VAL. */
3320 t = tree_cons (NULL_TREE, init_value, t);
3321 if (TREE_CONSTANT (init_val))
3322 init_def = build_vector (vectype, t);
3323 else
3324 init_def = build_constructor_from_list (vectype, t);
3325
3326 break;
3327
3328 case MIN_EXPR:
3329 case MAX_EXPR:
3330 case COND_EXPR:
3331 if (adjustment_def)
3332 {
3333 *adjustment_def = NULL_TREE;
3334 init_def = vect_get_vec_def_for_operand (init_val, stmt, NULL);
3335 break;
3336 }
3337
3338 init_def = build_vector_from_val (vectype, init_value);
3339 break;
3340
3341 default:
3342 gcc_unreachable ();
3343 }
3344
3345 return init_def;
3346 }
3347
3348
3349 /* Function vect_create_epilog_for_reduction
3350
3351 Create code at the loop-epilog to finalize the result of a reduction
3352 computation.
3353
3354 VECT_DEFS is list of vector of partial results, i.e., the lhs's of vector
3355 reduction statements.
3356 STMT is the scalar reduction stmt that is being vectorized.
3357 NCOPIES is > 1 in case the vectorization factor (VF) is bigger than the
3358 number of elements that we can fit in a vectype (nunits). In this case
3359 we have to generate more than one vector stmt - i.e - we need to "unroll"
3360 the vector stmt by a factor VF/nunits. For more details see documentation
3361 in vectorizable_operation.
3362 REDUC_CODE is the tree-code for the epilog reduction.
3363 REDUCTION_PHIS is a list of the phi-nodes that carry the reduction
3364 computation.
3365 REDUC_INDEX is the index of the operand in the right hand side of the
3366 statement that is defined by REDUCTION_PHI.
3367 DOUBLE_REDUC is TRUE if double reduction phi nodes should be handled.
3368 SLP_NODE is an SLP node containing a group of reduction statements. The
3369 first one in this group is STMT.
3370
3371 This function:
3372 1. Creates the reduction def-use cycles: sets the arguments for
3373 REDUCTION_PHIS:
3374 The loop-entry argument is the vectorized initial-value of the reduction.
3375 The loop-latch argument is taken from VECT_DEFS - the vector of partial
3376 sums.
3377 2. "Reduces" each vector of partial results VECT_DEFS into a single result,
3378 by applying the operation specified by REDUC_CODE if available, or by
3379 other means (whole-vector shifts or a scalar loop).
3380 The function also creates a new phi node at the loop exit to preserve
3381 loop-closed form, as illustrated below.
3382
3383 The flow at the entry to this function:
3384
3385 loop:
3386 vec_def = phi <null, null> # REDUCTION_PHI
3387 VECT_DEF = vector_stmt # vectorized form of STMT
3388 s_loop = scalar_stmt # (scalar) STMT
3389 loop_exit:
3390 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
3391 use <s_out0>
3392 use <s_out0>
3393
3394 The above is transformed by this function into:
3395
3396 loop:
3397 vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI
3398 VECT_DEF = vector_stmt # vectorized form of STMT
3399 s_loop = scalar_stmt # (scalar) STMT
3400 loop_exit:
3401 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
3402 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
3403 v_out2 = reduce <v_out1>
3404 s_out3 = extract_field <v_out2, 0>
3405 s_out4 = adjust_result <s_out3>
3406 use <s_out4>
3407 use <s_out4>
3408 */
3409
3410 static void
3411 vect_create_epilog_for_reduction (VEC (tree, heap) *vect_defs, gimple stmt,
3412 int ncopies, enum tree_code reduc_code,
3413 VEC (gimple, heap) *reduction_phis,
3414 int reduc_index, bool double_reduc,
3415 slp_tree slp_node)
3416 {
3417 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3418 stmt_vec_info prev_phi_info;
3419 tree vectype;
3420 enum machine_mode mode;
3421 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3422 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo), *outer_loop = NULL;
3423 basic_block exit_bb;
3424 tree scalar_dest;
3425 tree scalar_type;
3426 gimple new_phi = NULL, phi;
3427 gimple_stmt_iterator exit_gsi;
3428 tree vec_dest;
3429 tree new_temp = NULL_TREE, new_dest, new_name, new_scalar_dest;
3430 gimple epilog_stmt = NULL;
3431 enum tree_code code = gimple_assign_rhs_code (stmt);
3432 gimple exit_phi;
3433 tree bitsize, bitpos;
3434 tree adjustment_def = NULL;
3435 tree vec_initial_def = NULL;
3436 tree reduction_op, expr, def;
3437 tree orig_name, scalar_result;
3438 imm_use_iterator imm_iter, phi_imm_iter;
3439 use_operand_p use_p, phi_use_p;
3440 bool extract_scalar_result = false;
3441 gimple use_stmt, orig_stmt, reduction_phi = NULL;
3442 bool nested_in_vect_loop = false;
3443 VEC (gimple, heap) *new_phis = NULL;
3444 enum vect_def_type dt = vect_unknown_def_type;
3445 int j, i;
3446 VEC (tree, heap) *scalar_results = NULL;
3447 unsigned int group_size = 1, k, ratio;
3448 VEC (tree, heap) *vec_initial_defs = NULL;
3449 VEC (gimple, heap) *phis;
3450 bool slp_reduc = false;
3451 tree new_phi_result;
3452
3453 if (slp_node)
3454 group_size = VEC_length (gimple, SLP_TREE_SCALAR_STMTS (slp_node));
3455
3456 if (nested_in_vect_loop_p (loop, stmt))
3457 {
3458 outer_loop = loop;
3459 loop = loop->inner;
3460 nested_in_vect_loop = true;
3461 gcc_assert (!slp_node);
3462 }
3463
3464 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)))
3465 {
3466 case GIMPLE_SINGLE_RHS:
3467 gcc_assert (TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt))
3468 == ternary_op);
3469 reduction_op = TREE_OPERAND (gimple_assign_rhs1 (stmt), reduc_index);
3470 break;
3471 case GIMPLE_UNARY_RHS:
3472 reduction_op = gimple_assign_rhs1 (stmt);
3473 break;
3474 case GIMPLE_BINARY_RHS:
3475 reduction_op = reduc_index ?
3476 gimple_assign_rhs2 (stmt) : gimple_assign_rhs1 (stmt);
3477 break;
3478 case GIMPLE_TERNARY_RHS:
3479 reduction_op = gimple_op (stmt, reduc_index + 1);
3480 break;
3481 default:
3482 gcc_unreachable ();
3483 }
3484
3485 vectype = get_vectype_for_scalar_type (TREE_TYPE (reduction_op));
3486 gcc_assert (vectype);
3487 mode = TYPE_MODE (vectype);
3488
3489 /* 1. Create the reduction def-use cycle:
3490 Set the arguments of REDUCTION_PHIS, i.e., transform
3491
3492 loop:
3493 vec_def = phi <null, null> # REDUCTION_PHI
3494 VECT_DEF = vector_stmt # vectorized form of STMT
3495 ...
3496
3497 into:
3498
3499 loop:
3500 vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI
3501 VECT_DEF = vector_stmt # vectorized form of STMT
3502 ...
3503
3504 (in case of SLP, do it for all the phis). */
3505
3506 /* Get the loop-entry arguments. */
3507 if (slp_node)
3508 vect_get_slp_defs (reduction_op, NULL_TREE, slp_node, &vec_initial_defs,
3509 NULL, reduc_index);
3510 else
3511 {
3512 vec_initial_defs = VEC_alloc (tree, heap, 1);
3513 /* For the case of reduction, vect_get_vec_def_for_operand returns
3514 the scalar def before the loop, that defines the initial value
3515 of the reduction variable. */
3516 vec_initial_def = vect_get_vec_def_for_operand (reduction_op, stmt,
3517 &adjustment_def);
3518 VEC_quick_push (tree, vec_initial_defs, vec_initial_def);
3519 }
3520
3521 /* Set phi nodes arguments. */
3522 FOR_EACH_VEC_ELT (gimple, reduction_phis, i, phi)
3523 {
3524 tree vec_init_def = VEC_index (tree, vec_initial_defs, i);
3525 tree def = VEC_index (tree, vect_defs, i);
3526 for (j = 0; j < ncopies; j++)
3527 {
3528 /* Set the loop-entry arg of the reduction-phi. */
3529 add_phi_arg (phi, vec_init_def, loop_preheader_edge (loop),
3530 UNKNOWN_LOCATION);
3531
3532 /* Set the loop-latch arg for the reduction-phi. */
3533 if (j > 0)
3534 def = vect_get_vec_def_for_stmt_copy (vect_unknown_def_type, def);
3535
3536 add_phi_arg (phi, def, loop_latch_edge (loop), UNKNOWN_LOCATION);
3537
3538 if (vect_print_dump_info (REPORT_DETAILS))
3539 {
3540 fprintf (vect_dump, "transform reduction: created def-use"
3541 " cycle: ");
3542 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
3543 fprintf (vect_dump, "\n");
3544 print_gimple_stmt (vect_dump, SSA_NAME_DEF_STMT (def), 0,
3545 TDF_SLIM);
3546 }
3547
3548 phi = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi));
3549 }
3550 }
3551
3552 VEC_free (tree, heap, vec_initial_defs);
3553
3554 /* 2. Create epilog code.
3555 The reduction epilog code operates across the elements of the vector
3556 of partial results computed by the vectorized loop.
3557 The reduction epilog code consists of:
3558
3559 step 1: compute the scalar result in a vector (v_out2)
3560 step 2: extract the scalar result (s_out3) from the vector (v_out2)
3561 step 3: adjust the scalar result (s_out3) if needed.
3562
3563 Step 1 can be accomplished using one the following three schemes:
3564 (scheme 1) using reduc_code, if available.
3565 (scheme 2) using whole-vector shifts, if available.
3566 (scheme 3) using a scalar loop. In this case steps 1+2 above are
3567 combined.
3568
3569 The overall epilog code looks like this:
3570
3571 s_out0 = phi <s_loop> # original EXIT_PHI
3572 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
3573 v_out2 = reduce <v_out1> # step 1
3574 s_out3 = extract_field <v_out2, 0> # step 2
3575 s_out4 = adjust_result <s_out3> # step 3
3576
3577 (step 3 is optional, and steps 1 and 2 may be combined).
3578 Lastly, the uses of s_out0 are replaced by s_out4. */
3579
3580
3581 /* 2.1 Create new loop-exit-phis to preserve loop-closed form:
3582 v_out1 = phi <VECT_DEF>
3583 Store them in NEW_PHIS. */
3584
3585 exit_bb = single_exit (loop)->dest;
3586 prev_phi_info = NULL;
3587 new_phis = VEC_alloc (gimple, heap, VEC_length (tree, vect_defs));
3588 FOR_EACH_VEC_ELT (tree, vect_defs, i, def)
3589 {
3590 for (j = 0; j < ncopies; j++)
3591 {
3592 phi = create_phi_node (SSA_NAME_VAR (def), exit_bb);
3593 set_vinfo_for_stmt (phi, new_stmt_vec_info (phi, loop_vinfo, NULL));
3594 if (j == 0)
3595 VEC_quick_push (gimple, new_phis, phi);
3596 else
3597 {
3598 def = vect_get_vec_def_for_stmt_copy (dt, def);
3599 STMT_VINFO_RELATED_STMT (prev_phi_info) = phi;
3600 }
3601
3602 SET_PHI_ARG_DEF (phi, single_exit (loop)->dest_idx, def);
3603 prev_phi_info = vinfo_for_stmt (phi);
3604 }
3605 }
3606
3607 /* The epilogue is created for the outer-loop, i.e., for the loop being
3608 vectorized. */
3609 if (double_reduc)
3610 {
3611 loop = outer_loop;
3612 exit_bb = single_exit (loop)->dest;
3613 }
3614
3615 exit_gsi = gsi_after_labels (exit_bb);
3616
3617 /* 2.2 Get the relevant tree-code to use in the epilog for schemes 2,3
3618 (i.e. when reduc_code is not available) and in the final adjustment
3619 code (if needed). Also get the original scalar reduction variable as
3620 defined in the loop. In case STMT is a "pattern-stmt" (i.e. - it
3621 represents a reduction pattern), the tree-code and scalar-def are
3622 taken from the original stmt that the pattern-stmt (STMT) replaces.
3623 Otherwise (it is a regular reduction) - the tree-code and scalar-def
3624 are taken from STMT. */
3625
3626 orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
3627 if (!orig_stmt)
3628 {
3629 /* Regular reduction */
3630 orig_stmt = stmt;
3631 }
3632 else
3633 {
3634 /* Reduction pattern */
3635 stmt_vec_info stmt_vinfo = vinfo_for_stmt (orig_stmt);
3636 gcc_assert (STMT_VINFO_IN_PATTERN_P (stmt_vinfo));
3637 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_vinfo) == stmt);
3638 }
3639
3640 code = gimple_assign_rhs_code (orig_stmt);
3641 /* For MINUS_EXPR the initial vector is [init_val,0,...,0], therefore,
3642 partial results are added and not subtracted. */
3643 if (code == MINUS_EXPR)
3644 code = PLUS_EXPR;
3645
3646 scalar_dest = gimple_assign_lhs (orig_stmt);
3647 scalar_type = TREE_TYPE (scalar_dest);
3648 scalar_results = VEC_alloc (tree, heap, group_size);
3649 new_scalar_dest = vect_create_destination_var (scalar_dest, NULL);
3650 bitsize = TYPE_SIZE (scalar_type);
3651
3652 /* In case this is a reduction in an inner-loop while vectorizing an outer
3653 loop - we don't need to extract a single scalar result at the end of the
3654 inner-loop (unless it is double reduction, i.e., the use of reduction is
3655 outside the outer-loop). The final vector of partial results will be used
3656 in the vectorized outer-loop, or reduced to a scalar result at the end of
3657 the outer-loop. */
3658 if (nested_in_vect_loop && !double_reduc)
3659 goto vect_finalize_reduction;
3660
3661 /* SLP reduction without reduction chain, e.g.,
3662 # a1 = phi <a2, a0>
3663 # b1 = phi <b2, b0>
3664 a2 = operation (a1)
3665 b2 = operation (b1) */
3666 slp_reduc = (slp_node && !GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)));
3667
3668 /* In case of reduction chain, e.g.,
3669 # a1 = phi <a3, a0>
3670 a2 = operation (a1)
3671 a3 = operation (a2),
3672
3673 we may end up with more than one vector result. Here we reduce them to
3674 one vector. */
3675 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
3676 {
3677 tree first_vect = PHI_RESULT (VEC_index (gimple, new_phis, 0));
3678 tree tmp;
3679
3680 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3681 for (k = 1; k < VEC_length (gimple, new_phis); k++)
3682 {
3683 gimple next_phi = VEC_index (gimple, new_phis, k);
3684 tree second_vect = PHI_RESULT (next_phi);
3685 gimple new_vec_stmt;
3686
3687 tmp = build2 (code, vectype, first_vect, second_vect);
3688 new_vec_stmt = gimple_build_assign (vec_dest, tmp);
3689 first_vect = make_ssa_name (vec_dest, new_vec_stmt);
3690 gimple_assign_set_lhs (new_vec_stmt, first_vect);
3691 gsi_insert_before (&exit_gsi, new_vec_stmt, GSI_SAME_STMT);
3692 }
3693
3694 new_phi_result = first_vect;
3695 }
3696 else
3697 new_phi_result = PHI_RESULT (VEC_index (gimple, new_phis, 0));
3698
3699 /* 2.3 Create the reduction code, using one of the three schemes described
3700 above. In SLP we simply need to extract all the elements from the
3701 vector (without reducing them), so we use scalar shifts. */
3702 if (reduc_code != ERROR_MARK && !slp_reduc)
3703 {
3704 tree tmp;
3705
3706 /*** Case 1: Create:
3707 v_out2 = reduc_expr <v_out1> */
3708
3709 if (vect_print_dump_info (REPORT_DETAILS))
3710 fprintf (vect_dump, "Reduce using direct vector reduction.");
3711
3712 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3713 tmp = build1 (reduc_code, vectype, new_phi_result);
3714 epilog_stmt = gimple_build_assign (vec_dest, tmp);
3715 new_temp = make_ssa_name (vec_dest, epilog_stmt);
3716 gimple_assign_set_lhs (epilog_stmt, new_temp);
3717 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3718
3719 extract_scalar_result = true;
3720 }
3721 else
3722 {
3723 enum tree_code shift_code = ERROR_MARK;
3724 bool have_whole_vector_shift = true;
3725 int bit_offset;
3726 int element_bitsize = tree_low_cst (bitsize, 1);
3727 int vec_size_in_bits = tree_low_cst (TYPE_SIZE (vectype), 1);
3728 tree vec_temp;
3729
3730 if (optab_handler (vec_shr_optab, mode) != CODE_FOR_nothing)
3731 shift_code = VEC_RSHIFT_EXPR;
3732 else
3733 have_whole_vector_shift = false;
3734
3735 /* Regardless of whether we have a whole vector shift, if we're
3736 emulating the operation via tree-vect-generic, we don't want
3737 to use it. Only the first round of the reduction is likely
3738 to still be profitable via emulation. */
3739 /* ??? It might be better to emit a reduction tree code here, so that
3740 tree-vect-generic can expand the first round via bit tricks. */
3741 if (!VECTOR_MODE_P (mode))
3742 have_whole_vector_shift = false;
3743 else
3744 {
3745 optab optab = optab_for_tree_code (code, vectype, optab_default);
3746 if (optab_handler (optab, mode) == CODE_FOR_nothing)
3747 have_whole_vector_shift = false;
3748 }
3749
3750 if (have_whole_vector_shift && !slp_reduc)
3751 {
3752 /*** Case 2: Create:
3753 for (offset = VS/2; offset >= element_size; offset/=2)
3754 {
3755 Create: va' = vec_shift <va, offset>
3756 Create: va = vop <va, va'>
3757 } */
3758
3759 if (vect_print_dump_info (REPORT_DETAILS))
3760 fprintf (vect_dump, "Reduce using vector shifts");
3761
3762 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3763 new_temp = new_phi_result;
3764 for (bit_offset = vec_size_in_bits/2;
3765 bit_offset >= element_bitsize;
3766 bit_offset /= 2)
3767 {
3768 tree bitpos = size_int (bit_offset);
3769
3770 epilog_stmt = gimple_build_assign_with_ops (shift_code,
3771 vec_dest, new_temp, bitpos);
3772 new_name = make_ssa_name (vec_dest, epilog_stmt);
3773 gimple_assign_set_lhs (epilog_stmt, new_name);
3774 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3775
3776 epilog_stmt = gimple_build_assign_with_ops (code, vec_dest,
3777 new_name, new_temp);
3778 new_temp = make_ssa_name (vec_dest, epilog_stmt);
3779 gimple_assign_set_lhs (epilog_stmt, new_temp);
3780 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3781 }
3782
3783 extract_scalar_result = true;
3784 }
3785 else
3786 {
3787 tree rhs;
3788
3789 /*** Case 3: Create:
3790 s = extract_field <v_out2, 0>
3791 for (offset = element_size;
3792 offset < vector_size;
3793 offset += element_size;)
3794 {
3795 Create: s' = extract_field <v_out2, offset>
3796 Create: s = op <s, s'> // For non SLP cases
3797 } */
3798
3799 if (vect_print_dump_info (REPORT_DETAILS))
3800 fprintf (vect_dump, "Reduce using scalar code. ");
3801
3802 vec_size_in_bits = tree_low_cst (TYPE_SIZE (vectype), 1);
3803 FOR_EACH_VEC_ELT (gimple, new_phis, i, new_phi)
3804 {
3805 vec_temp = PHI_RESULT (new_phi);
3806 rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp, bitsize,
3807 bitsize_zero_node);
3808 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
3809 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
3810 gimple_assign_set_lhs (epilog_stmt, new_temp);
3811 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3812
3813 /* In SLP we don't need to apply reduction operation, so we just
3814 collect s' values in SCALAR_RESULTS. */
3815 if (slp_reduc)
3816 VEC_safe_push (tree, heap, scalar_results, new_temp);
3817
3818 for (bit_offset = element_bitsize;
3819 bit_offset < vec_size_in_bits;
3820 bit_offset += element_bitsize)
3821 {
3822 tree bitpos = bitsize_int (bit_offset);
3823 tree rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp,
3824 bitsize, bitpos);
3825
3826 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
3827 new_name = make_ssa_name (new_scalar_dest, epilog_stmt);
3828 gimple_assign_set_lhs (epilog_stmt, new_name);
3829 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3830
3831 if (slp_reduc)
3832 {
3833 /* In SLP we don't need to apply reduction operation, so
3834 we just collect s' values in SCALAR_RESULTS. */
3835 new_temp = new_name;
3836 VEC_safe_push (tree, heap, scalar_results, new_name);
3837 }
3838 else
3839 {
3840 epilog_stmt = gimple_build_assign_with_ops (code,
3841 new_scalar_dest, new_name, new_temp);
3842 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
3843 gimple_assign_set_lhs (epilog_stmt, new_temp);
3844 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3845 }
3846 }
3847 }
3848
3849 /* The only case where we need to reduce scalar results in SLP, is
3850 unrolling. If the size of SCALAR_RESULTS is greater than
3851 GROUP_SIZE, we reduce them combining elements modulo
3852 GROUP_SIZE. */
3853 if (slp_reduc)
3854 {
3855 tree res, first_res, new_res;
3856 gimple new_stmt;
3857
3858 /* Reduce multiple scalar results in case of SLP unrolling. */
3859 for (j = group_size; VEC_iterate (tree, scalar_results, j, res);
3860 j++)
3861 {
3862 first_res = VEC_index (tree, scalar_results, j % group_size);
3863 new_stmt = gimple_build_assign_with_ops (code,
3864 new_scalar_dest, first_res, res);
3865 new_res = make_ssa_name (new_scalar_dest, new_stmt);
3866 gimple_assign_set_lhs (new_stmt, new_res);
3867 gsi_insert_before (&exit_gsi, new_stmt, GSI_SAME_STMT);
3868 VEC_replace (tree, scalar_results, j % group_size, new_res);
3869 }
3870 }
3871 else
3872 /* Not SLP - we have one scalar to keep in SCALAR_RESULTS. */
3873 VEC_safe_push (tree, heap, scalar_results, new_temp);
3874
3875 extract_scalar_result = false;
3876 }
3877 }
3878
3879 /* 2.4 Extract the final scalar result. Create:
3880 s_out3 = extract_field <v_out2, bitpos> */
3881
3882 if (extract_scalar_result)
3883 {
3884 tree rhs;
3885
3886 if (vect_print_dump_info (REPORT_DETAILS))
3887 fprintf (vect_dump, "extract scalar result");
3888
3889 if (BYTES_BIG_ENDIAN)
3890 bitpos = size_binop (MULT_EXPR,
3891 bitsize_int (TYPE_VECTOR_SUBPARTS (vectype) - 1),
3892 TYPE_SIZE (scalar_type));
3893 else
3894 bitpos = bitsize_zero_node;
3895
3896 rhs = build3 (BIT_FIELD_REF, scalar_type, new_temp, bitsize, bitpos);
3897 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
3898 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
3899 gimple_assign_set_lhs (epilog_stmt, new_temp);
3900 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3901 VEC_safe_push (tree, heap, scalar_results, new_temp);
3902 }
3903
3904 vect_finalize_reduction:
3905
3906 if (double_reduc)
3907 loop = loop->inner;
3908
3909 /* 2.5 Adjust the final result by the initial value of the reduction
3910 variable. (When such adjustment is not needed, then
3911 'adjustment_def' is zero). For example, if code is PLUS we create:
3912 new_temp = loop_exit_def + adjustment_def */
3913
3914 if (adjustment_def)
3915 {
3916 gcc_assert (!slp_reduc);
3917 if (nested_in_vect_loop)
3918 {
3919 new_phi = VEC_index (gimple, new_phis, 0);
3920 gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) == VECTOR_TYPE);
3921 expr = build2 (code, vectype, PHI_RESULT (new_phi), adjustment_def);
3922 new_dest = vect_create_destination_var (scalar_dest, vectype);
3923 }
3924 else
3925 {
3926 new_temp = VEC_index (tree, scalar_results, 0);
3927 gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) != VECTOR_TYPE);
3928 expr = build2 (code, scalar_type, new_temp, adjustment_def);
3929 new_dest = vect_create_destination_var (scalar_dest, scalar_type);
3930 }
3931
3932 epilog_stmt = gimple_build_assign (new_dest, expr);
3933 new_temp = make_ssa_name (new_dest, epilog_stmt);
3934 gimple_assign_set_lhs (epilog_stmt, new_temp);
3935 SSA_NAME_DEF_STMT (new_temp) = epilog_stmt;
3936 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3937 if (nested_in_vect_loop)
3938 {
3939 set_vinfo_for_stmt (epilog_stmt,
3940 new_stmt_vec_info (epilog_stmt, loop_vinfo,
3941 NULL));
3942 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (epilog_stmt)) =
3943 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (new_phi));
3944
3945 if (!double_reduc)
3946 VEC_quick_push (tree, scalar_results, new_temp);
3947 else
3948 VEC_replace (tree, scalar_results, 0, new_temp);
3949 }
3950 else
3951 VEC_replace (tree, scalar_results, 0, new_temp);
3952
3953 VEC_replace (gimple, new_phis, 0, epilog_stmt);
3954 }
3955
3956 /* 2.6 Handle the loop-exit phis. Replace the uses of scalar loop-exit
3957 phis with new adjusted scalar results, i.e., replace use <s_out0>
3958 with use <s_out4>.
3959
3960 Transform:
3961 loop_exit:
3962 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
3963 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
3964 v_out2 = reduce <v_out1>
3965 s_out3 = extract_field <v_out2, 0>
3966 s_out4 = adjust_result <s_out3>
3967 use <s_out0>
3968 use <s_out0>
3969
3970 into:
3971
3972 loop_exit:
3973 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
3974 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
3975 v_out2 = reduce <v_out1>
3976 s_out3 = extract_field <v_out2, 0>
3977 s_out4 = adjust_result <s_out3>
3978 use <s_out4>
3979 use <s_out4> */
3980
3981
3982 /* In SLP reduction chain we reduce vector results into one vector if
3983 necessary, hence we set here GROUP_SIZE to 1. SCALAR_DEST is the LHS of
3984 the last stmt in the reduction chain, since we are looking for the loop
3985 exit phi node. */
3986 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
3987 {
3988 scalar_dest = gimple_assign_lhs (VEC_index (gimple,
3989 SLP_TREE_SCALAR_STMTS (slp_node),
3990 group_size - 1));
3991 group_size = 1;
3992 }
3993
3994 /* In SLP we may have several statements in NEW_PHIS and REDUCTION_PHIS (in
3995 case that GROUP_SIZE is greater than vectorization factor). Therefore, we
3996 need to match SCALAR_RESULTS with corresponding statements. The first
3997 (GROUP_SIZE / number of new vector stmts) scalar results correspond to
3998 the first vector stmt, etc.
3999 (RATIO is equal to (GROUP_SIZE / number of new vector stmts)). */
4000 if (group_size > VEC_length (gimple, new_phis))
4001 {
4002 ratio = group_size / VEC_length (gimple, new_phis);
4003 gcc_assert (!(group_size % VEC_length (gimple, new_phis)));
4004 }
4005 else
4006 ratio = 1;
4007
4008 for (k = 0; k < group_size; k++)
4009 {
4010 if (k % ratio == 0)
4011 {
4012 epilog_stmt = VEC_index (gimple, new_phis, k / ratio);
4013 reduction_phi = VEC_index (gimple, reduction_phis, k / ratio);
4014 }
4015
4016 if (slp_reduc)
4017 {
4018 gimple current_stmt = VEC_index (gimple,
4019 SLP_TREE_SCALAR_STMTS (slp_node), k);
4020
4021 orig_stmt = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (current_stmt));
4022 /* SLP statements can't participate in patterns. */
4023 gcc_assert (!orig_stmt);
4024 scalar_dest = gimple_assign_lhs (current_stmt);
4025 }
4026
4027 phis = VEC_alloc (gimple, heap, 3);
4028 /* Find the loop-closed-use at the loop exit of the original scalar
4029 result. (The reduction result is expected to have two immediate uses -
4030 one at the latch block, and one at the loop exit). */
4031 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
4032 if (!flow_bb_inside_loop_p (loop, gimple_bb (USE_STMT (use_p))))
4033 VEC_safe_push (gimple, heap, phis, USE_STMT (use_p));
4034
4035 /* We expect to have found an exit_phi because of loop-closed-ssa
4036 form. */
4037 gcc_assert (!VEC_empty (gimple, phis));
4038
4039 FOR_EACH_VEC_ELT (gimple, phis, i, exit_phi)
4040 {
4041 if (outer_loop)
4042 {
4043 stmt_vec_info exit_phi_vinfo = vinfo_for_stmt (exit_phi);
4044 gimple vect_phi;
4045
4046 /* FORNOW. Currently not supporting the case that an inner-loop
4047 reduction is not used in the outer-loop (but only outside the
4048 outer-loop), unless it is double reduction. */
4049 gcc_assert ((STMT_VINFO_RELEVANT_P (exit_phi_vinfo)
4050 && !STMT_VINFO_LIVE_P (exit_phi_vinfo))
4051 || double_reduc);
4052
4053 STMT_VINFO_VEC_STMT (exit_phi_vinfo) = epilog_stmt;
4054 if (!double_reduc
4055 || STMT_VINFO_DEF_TYPE (exit_phi_vinfo)
4056 != vect_double_reduction_def)
4057 continue;
4058
4059 /* Handle double reduction:
4060
4061 stmt1: s1 = phi <s0, s2> - double reduction phi (outer loop)
4062 stmt2: s3 = phi <s1, s4> - (regular) reduc phi (inner loop)
4063 stmt3: s4 = use (s3) - (regular) reduc stmt (inner loop)
4064 stmt4: s2 = phi <s4> - double reduction stmt (outer loop)
4065
4066 At that point the regular reduction (stmt2 and stmt3) is
4067 already vectorized, as well as the exit phi node, stmt4.
4068 Here we vectorize the phi node of double reduction, stmt1, and
4069 update all relevant statements. */
4070
4071 /* Go through all the uses of s2 to find double reduction phi
4072 node, i.e., stmt1 above. */
4073 orig_name = PHI_RESULT (exit_phi);
4074 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name)
4075 {
4076 stmt_vec_info use_stmt_vinfo = vinfo_for_stmt (use_stmt);
4077 stmt_vec_info new_phi_vinfo;
4078 tree vect_phi_init, preheader_arg, vect_phi_res, init_def;
4079 basic_block bb = gimple_bb (use_stmt);
4080 gimple use;
4081
4082 /* Check that USE_STMT is really double reduction phi
4083 node. */
4084 if (gimple_code (use_stmt) != GIMPLE_PHI
4085 || gimple_phi_num_args (use_stmt) != 2
4086 || !use_stmt_vinfo
4087 || STMT_VINFO_DEF_TYPE (use_stmt_vinfo)
4088 != vect_double_reduction_def
4089 || bb->loop_father != outer_loop)
4090 continue;
4091
4092 /* Create vector phi node for double reduction:
4093 vs1 = phi <vs0, vs2>
4094 vs1 was created previously in this function by a call to
4095 vect_get_vec_def_for_operand and is stored in
4096 vec_initial_def;
4097 vs2 is defined by EPILOG_STMT, the vectorized EXIT_PHI;
4098 vs0 is created here. */
4099
4100 /* Create vector phi node. */
4101 vect_phi = create_phi_node (vec_initial_def, bb);
4102 new_phi_vinfo = new_stmt_vec_info (vect_phi,
4103 loop_vec_info_for_loop (outer_loop), NULL);
4104 set_vinfo_for_stmt (vect_phi, new_phi_vinfo);
4105
4106 /* Create vs0 - initial def of the double reduction phi. */
4107 preheader_arg = PHI_ARG_DEF_FROM_EDGE (use_stmt,
4108 loop_preheader_edge (outer_loop));
4109 init_def = get_initial_def_for_reduction (stmt,
4110 preheader_arg, NULL);
4111 vect_phi_init = vect_init_vector (use_stmt, init_def,
4112 vectype, NULL);
4113
4114 /* Update phi node arguments with vs0 and vs2. */
4115 add_phi_arg (vect_phi, vect_phi_init,
4116 loop_preheader_edge (outer_loop),
4117 UNKNOWN_LOCATION);
4118 add_phi_arg (vect_phi, PHI_RESULT (epilog_stmt),
4119 loop_latch_edge (outer_loop), UNKNOWN_LOCATION);
4120 if (vect_print_dump_info (REPORT_DETAILS))
4121 {
4122 fprintf (vect_dump, "created double reduction phi "
4123 "node: ");
4124 print_gimple_stmt (vect_dump, vect_phi, 0, TDF_SLIM);
4125 }
4126
4127 vect_phi_res = PHI_RESULT (vect_phi);
4128
4129 /* Replace the use, i.e., set the correct vs1 in the regular
4130 reduction phi node. FORNOW, NCOPIES is always 1, so the
4131 loop is redundant. */
4132 use = reduction_phi;
4133 for (j = 0; j < ncopies; j++)
4134 {
4135 edge pr_edge = loop_preheader_edge (loop);
4136 SET_PHI_ARG_DEF (use, pr_edge->dest_idx, vect_phi_res);
4137 use = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (use));
4138 }
4139 }
4140 }
4141 }
4142
4143 VEC_free (gimple, heap, phis);
4144 if (nested_in_vect_loop)
4145 {
4146 if (double_reduc)
4147 loop = outer_loop;
4148 else
4149 continue;
4150 }
4151
4152 phis = VEC_alloc (gimple, heap, 3);
4153 /* Find the loop-closed-use at the loop exit of the original scalar
4154 result. (The reduction result is expected to have two immediate uses,
4155 one at the latch block, and one at the loop exit). For double
4156 reductions we are looking for exit phis of the outer loop. */
4157 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
4158 {
4159 if (!flow_bb_inside_loop_p (loop, gimple_bb (USE_STMT (use_p))))
4160 VEC_safe_push (gimple, heap, phis, USE_STMT (use_p));
4161 else
4162 {
4163 if (double_reduc && gimple_code (USE_STMT (use_p)) == GIMPLE_PHI)
4164 {
4165 tree phi_res = PHI_RESULT (USE_STMT (use_p));
4166
4167 FOR_EACH_IMM_USE_FAST (phi_use_p, phi_imm_iter, phi_res)
4168 {
4169 if (!flow_bb_inside_loop_p (loop,
4170 gimple_bb (USE_STMT (phi_use_p))))
4171 VEC_safe_push (gimple, heap, phis,
4172 USE_STMT (phi_use_p));
4173 }
4174 }
4175 }
4176 }
4177
4178 FOR_EACH_VEC_ELT (gimple, phis, i, exit_phi)
4179 {
4180 /* Replace the uses: */
4181 orig_name = PHI_RESULT (exit_phi);
4182 scalar_result = VEC_index (tree, scalar_results, k);
4183 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name)
4184 FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
4185 SET_USE (use_p, scalar_result);
4186 }
4187
4188 VEC_free (gimple, heap, phis);
4189 }
4190
4191 VEC_free (tree, heap, scalar_results);
4192 VEC_free (gimple, heap, new_phis);
4193 }
4194
4195
4196 /* Function vectorizable_reduction.
4197
4198 Check if STMT performs a reduction operation that can be vectorized.
4199 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4200 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
4201 Return FALSE if not a vectorizable STMT, TRUE otherwise.
4202
4203 This function also handles reduction idioms (patterns) that have been
4204 recognized in advance during vect_pattern_recog. In this case, STMT may be
4205 of this form:
4206 X = pattern_expr (arg0, arg1, ..., X)
4207 and it's STMT_VINFO_RELATED_STMT points to the last stmt in the original
4208 sequence that had been detected and replaced by the pattern-stmt (STMT).
4209
4210 In some cases of reduction patterns, the type of the reduction variable X is
4211 different than the type of the other arguments of STMT.
4212 In such cases, the vectype that is used when transforming STMT into a vector
4213 stmt is different than the vectype that is used to determine the
4214 vectorization factor, because it consists of a different number of elements
4215 than the actual number of elements that are being operated upon in parallel.
4216
4217 For example, consider an accumulation of shorts into an int accumulator.
4218 On some targets it's possible to vectorize this pattern operating on 8
4219 shorts at a time (hence, the vectype for purposes of determining the
4220 vectorization factor should be V8HI); on the other hand, the vectype that
4221 is used to create the vector form is actually V4SI (the type of the result).
4222
4223 Upon entry to this function, STMT_VINFO_VECTYPE records the vectype that
4224 indicates what is the actual level of parallelism (V8HI in the example), so
4225 that the right vectorization factor would be derived. This vectype
4226 corresponds to the type of arguments to the reduction stmt, and should *NOT*
4227 be used to create the vectorized stmt. The right vectype for the vectorized
4228 stmt is obtained from the type of the result X:
4229 get_vectype_for_scalar_type (TREE_TYPE (X))
4230
4231 This means that, contrary to "regular" reductions (or "regular" stmts in
4232 general), the following equation:
4233 STMT_VINFO_VECTYPE == get_vectype_for_scalar_type (TREE_TYPE (X))
4234 does *NOT* necessarily hold for reduction patterns. */
4235
4236 bool
4237 vectorizable_reduction (gimple stmt, gimple_stmt_iterator *gsi,
4238 gimple *vec_stmt, slp_tree slp_node)
4239 {
4240 tree vec_dest;
4241 tree scalar_dest;
4242 tree loop_vec_def0 = NULL_TREE, loop_vec_def1 = NULL_TREE;
4243 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4244 tree vectype_out = STMT_VINFO_VECTYPE (stmt_info);
4245 tree vectype_in = NULL_TREE;
4246 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4247 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
4248 enum tree_code code, orig_code, epilog_reduc_code;
4249 enum machine_mode vec_mode;
4250 int op_type;
4251 optab optab, reduc_optab;
4252 tree new_temp = NULL_TREE;
4253 tree def;
4254 gimple def_stmt;
4255 enum vect_def_type dt;
4256 gimple new_phi = NULL;
4257 tree scalar_type;
4258 bool is_simple_use;
4259 gimple orig_stmt;
4260 stmt_vec_info orig_stmt_info;
4261 tree expr = NULL_TREE;
4262 int i;
4263 int ncopies;
4264 int epilog_copies;
4265 stmt_vec_info prev_stmt_info, prev_phi_info;
4266 bool single_defuse_cycle = false;
4267 tree reduc_def = NULL_TREE;
4268 gimple new_stmt = NULL;
4269 int j;
4270 tree ops[3];
4271 bool nested_cycle = false, found_nested_cycle_def = false;
4272 gimple reduc_def_stmt = NULL;
4273 /* The default is that the reduction variable is the last in statement. */
4274 int reduc_index = 2;
4275 bool double_reduc = false, dummy;
4276 basic_block def_bb;
4277 struct loop * def_stmt_loop, *outer_loop = NULL;
4278 tree def_arg;
4279 gimple def_arg_stmt;
4280 VEC (tree, heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL, *vect_defs = NULL;
4281 VEC (gimple, heap) *phis = NULL;
4282 int vec_num;
4283 tree def0, def1, tem;
4284
4285 /* In case of reduction chain we switch to the first stmt in the chain, but
4286 we don't update STMT_INFO, since only the last stmt is marked as reduction
4287 and has reduction properties. */
4288 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
4289 stmt = GROUP_FIRST_ELEMENT (stmt_info);
4290
4291 if (nested_in_vect_loop_p (loop, stmt))
4292 {
4293 outer_loop = loop;
4294 loop = loop->inner;
4295 nested_cycle = true;
4296 }
4297
4298 /* 1. Is vectorizable reduction? */
4299 /* Not supportable if the reduction variable is used in the loop, unless
4300 it's a reduction chain. */
4301 if (STMT_VINFO_RELEVANT (stmt_info) > vect_used_in_outer
4302 && !GROUP_FIRST_ELEMENT (stmt_info))
4303 return false;
4304
4305 /* Reductions that are not used even in an enclosing outer-loop,
4306 are expected to be "live" (used out of the loop). */
4307 if (STMT_VINFO_RELEVANT (stmt_info) == vect_unused_in_scope
4308 && !STMT_VINFO_LIVE_P (stmt_info))
4309 return false;
4310
4311 /* Make sure it was already recognized as a reduction computation. */
4312 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_reduction_def
4313 && STMT_VINFO_DEF_TYPE (stmt_info) != vect_nested_cycle)
4314 return false;
4315
4316 /* 2. Has this been recognized as a reduction pattern?
4317
4318 Check if STMT represents a pattern that has been recognized
4319 in earlier analysis stages. For stmts that represent a pattern,
4320 the STMT_VINFO_RELATED_STMT field records the last stmt in
4321 the original sequence that constitutes the pattern. */
4322
4323 orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
4324 if (orig_stmt)
4325 {
4326 orig_stmt_info = vinfo_for_stmt (orig_stmt);
4327 gcc_assert (STMT_VINFO_RELATED_STMT (orig_stmt_info) == stmt);
4328 gcc_assert (STMT_VINFO_IN_PATTERN_P (orig_stmt_info));
4329 gcc_assert (!STMT_VINFO_IN_PATTERN_P (stmt_info));
4330 }
4331
4332 /* 3. Check the operands of the operation. The first operands are defined
4333 inside the loop body. The last operand is the reduction variable,
4334 which is defined by the loop-header-phi. */
4335
4336 gcc_assert (is_gimple_assign (stmt));
4337
4338 /* Flatten RHS. */
4339 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)))
4340 {
4341 case GIMPLE_SINGLE_RHS:
4342 op_type = TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt));
4343 if (op_type == ternary_op)
4344 {
4345 tree rhs = gimple_assign_rhs1 (stmt);
4346 ops[0] = TREE_OPERAND (rhs, 0);
4347 ops[1] = TREE_OPERAND (rhs, 1);
4348 ops[2] = TREE_OPERAND (rhs, 2);
4349 code = TREE_CODE (rhs);
4350 }
4351 else
4352 return false;
4353 break;
4354
4355 case GIMPLE_BINARY_RHS:
4356 code = gimple_assign_rhs_code (stmt);
4357 op_type = TREE_CODE_LENGTH (code);
4358 gcc_assert (op_type == binary_op);
4359 ops[0] = gimple_assign_rhs1 (stmt);
4360 ops[1] = gimple_assign_rhs2 (stmt);
4361 break;
4362
4363 case GIMPLE_TERNARY_RHS:
4364 code = gimple_assign_rhs_code (stmt);
4365 op_type = TREE_CODE_LENGTH (code);
4366 gcc_assert (op_type == ternary_op);
4367 ops[0] = gimple_assign_rhs1 (stmt);
4368 ops[1] = gimple_assign_rhs2 (stmt);
4369 ops[2] = gimple_assign_rhs3 (stmt);
4370 break;
4371
4372 case GIMPLE_UNARY_RHS:
4373 return false;
4374
4375 default:
4376 gcc_unreachable ();
4377 }
4378
4379 scalar_dest = gimple_assign_lhs (stmt);
4380 scalar_type = TREE_TYPE (scalar_dest);
4381 if (!POINTER_TYPE_P (scalar_type) && !INTEGRAL_TYPE_P (scalar_type)
4382 && !SCALAR_FLOAT_TYPE_P (scalar_type))
4383 return false;
4384
4385 /* All uses but the last are expected to be defined in the loop.
4386 The last use is the reduction variable. In case of nested cycle this
4387 assumption is not true: we use reduc_index to record the index of the
4388 reduction variable. */
4389 for (i = 0; i < op_type-1; i++)
4390 {
4391 /* The condition of COND_EXPR is checked in vectorizable_condition(). */
4392 if (i == 0 && code == COND_EXPR)
4393 continue;
4394
4395 is_simple_use = vect_is_simple_use_1 (ops[i], loop_vinfo, NULL,
4396 &def_stmt, &def, &dt, &tem);
4397 if (!vectype_in)
4398 vectype_in = tem;
4399 gcc_assert (is_simple_use);
4400
4401 if (dt != vect_internal_def
4402 && dt != vect_external_def
4403 && dt != vect_constant_def
4404 && dt != vect_induction_def
4405 && !(dt == vect_nested_cycle && nested_cycle))
4406 return false;
4407
4408 if (dt == vect_nested_cycle)
4409 {
4410 found_nested_cycle_def = true;
4411 reduc_def_stmt = def_stmt;
4412 reduc_index = i;
4413 }
4414 }
4415
4416 is_simple_use = vect_is_simple_use_1 (ops[i], loop_vinfo, NULL, &def_stmt,
4417 &def, &dt, &tem);
4418 if (!vectype_in)
4419 vectype_in = tem;
4420 gcc_assert (is_simple_use);
4421 gcc_assert (dt == vect_reduction_def
4422 || dt == vect_nested_cycle
4423 || ((dt == vect_internal_def || dt == vect_external_def
4424 || dt == vect_constant_def || dt == vect_induction_def)
4425 && nested_cycle && found_nested_cycle_def));
4426 if (!found_nested_cycle_def)
4427 reduc_def_stmt = def_stmt;
4428
4429 gcc_assert (gimple_code (reduc_def_stmt) == GIMPLE_PHI);
4430 if (orig_stmt)
4431 gcc_assert (orig_stmt == vect_is_simple_reduction (loop_vinfo,
4432 reduc_def_stmt,
4433 !nested_cycle,
4434 &dummy));
4435 else
4436 {
4437 gimple tmp = vect_is_simple_reduction (loop_vinfo, reduc_def_stmt,
4438 !nested_cycle, &dummy);
4439 /* We changed STMT to be the first stmt in reduction chain, hence we
4440 check that in this case the first element in the chain is STMT. */
4441 gcc_assert (stmt == tmp
4442 || GROUP_FIRST_ELEMENT (vinfo_for_stmt (tmp)) == stmt);
4443 }
4444
4445 if (STMT_VINFO_LIVE_P (vinfo_for_stmt (reduc_def_stmt)))
4446 return false;
4447
4448 if (slp_node || PURE_SLP_STMT (stmt_info))
4449 ncopies = 1;
4450 else
4451 ncopies = (LOOP_VINFO_VECT_FACTOR (loop_vinfo)
4452 / TYPE_VECTOR_SUBPARTS (vectype_in));
4453
4454 gcc_assert (ncopies >= 1);
4455
4456 vec_mode = TYPE_MODE (vectype_in);
4457
4458 if (code == COND_EXPR)
4459 {
4460 if (!vectorizable_condition (stmt, gsi, NULL, ops[reduc_index], 0))
4461 {
4462 if (vect_print_dump_info (REPORT_DETAILS))
4463 fprintf (vect_dump, "unsupported condition in reduction");
4464
4465 return false;
4466 }
4467 }
4468 else
4469 {
4470 /* 4. Supportable by target? */
4471
4472 /* 4.1. check support for the operation in the loop */
4473 optab = optab_for_tree_code (code, vectype_in, optab_default);
4474 if (!optab)
4475 {
4476 if (vect_print_dump_info (REPORT_DETAILS))
4477 fprintf (vect_dump, "no optab.");
4478
4479 return false;
4480 }
4481
4482 if (optab_handler (optab, vec_mode) == CODE_FOR_nothing)
4483 {
4484 if (vect_print_dump_info (REPORT_DETAILS))
4485 fprintf (vect_dump, "op not supported by target.");
4486
4487 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
4488 || LOOP_VINFO_VECT_FACTOR (loop_vinfo)
4489 < vect_min_worthwhile_factor (code))
4490 return false;
4491
4492 if (vect_print_dump_info (REPORT_DETAILS))
4493 fprintf (vect_dump, "proceeding using word mode.");
4494 }
4495
4496 /* Worthwhile without SIMD support? */
4497 if (!VECTOR_MODE_P (TYPE_MODE (vectype_in))
4498 && LOOP_VINFO_VECT_FACTOR (loop_vinfo)
4499 < vect_min_worthwhile_factor (code))
4500 {
4501 if (vect_print_dump_info (REPORT_DETAILS))
4502 fprintf (vect_dump, "not worthwhile without SIMD support.");
4503
4504 return false;
4505 }
4506 }
4507
4508 /* 4.2. Check support for the epilog operation.
4509
4510 If STMT represents a reduction pattern, then the type of the
4511 reduction variable may be different than the type of the rest
4512 of the arguments. For example, consider the case of accumulation
4513 of shorts into an int accumulator; The original code:
4514 S1: int_a = (int) short_a;
4515 orig_stmt-> S2: int_acc = plus <int_a ,int_acc>;
4516
4517 was replaced with:
4518 STMT: int_acc = widen_sum <short_a, int_acc>
4519
4520 This means that:
4521 1. The tree-code that is used to create the vector operation in the
4522 epilog code (that reduces the partial results) is not the
4523 tree-code of STMT, but is rather the tree-code of the original
4524 stmt from the pattern that STMT is replacing. I.e, in the example
4525 above we want to use 'widen_sum' in the loop, but 'plus' in the
4526 epilog.
4527 2. The type (mode) we use to check available target support
4528 for the vector operation to be created in the *epilog*, is
4529 determined by the type of the reduction variable (in the example
4530 above we'd check this: optab_handler (plus_optab, vect_int_mode])).
4531 However the type (mode) we use to check available target support
4532 for the vector operation to be created *inside the loop*, is
4533 determined by the type of the other arguments to STMT (in the
4534 example we'd check this: optab_handler (widen_sum_optab,
4535 vect_short_mode)).
4536
4537 This is contrary to "regular" reductions, in which the types of all
4538 the arguments are the same as the type of the reduction variable.
4539 For "regular" reductions we can therefore use the same vector type
4540 (and also the same tree-code) when generating the epilog code and
4541 when generating the code inside the loop. */
4542
4543 if (orig_stmt)
4544 {
4545 /* This is a reduction pattern: get the vectype from the type of the
4546 reduction variable, and get the tree-code from orig_stmt. */
4547 orig_code = gimple_assign_rhs_code (orig_stmt);
4548 gcc_assert (vectype_out);
4549 vec_mode = TYPE_MODE (vectype_out);
4550 }
4551 else
4552 {
4553 /* Regular reduction: use the same vectype and tree-code as used for
4554 the vector code inside the loop can be used for the epilog code. */
4555 orig_code = code;
4556 }
4557
4558 if (nested_cycle)
4559 {
4560 def_bb = gimple_bb (reduc_def_stmt);
4561 def_stmt_loop = def_bb->loop_father;
4562 def_arg = PHI_ARG_DEF_FROM_EDGE (reduc_def_stmt,
4563 loop_preheader_edge (def_stmt_loop));
4564 if (TREE_CODE (def_arg) == SSA_NAME
4565 && (def_arg_stmt = SSA_NAME_DEF_STMT (def_arg))
4566 && gimple_code (def_arg_stmt) == GIMPLE_PHI
4567 && flow_bb_inside_loop_p (outer_loop, gimple_bb (def_arg_stmt))
4568 && vinfo_for_stmt (def_arg_stmt)
4569 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_arg_stmt))
4570 == vect_double_reduction_def)
4571 double_reduc = true;
4572 }
4573
4574 epilog_reduc_code = ERROR_MARK;
4575 if (reduction_code_for_scalar_code (orig_code, &epilog_reduc_code))
4576 {
4577 reduc_optab = optab_for_tree_code (epilog_reduc_code, vectype_out,
4578 optab_default);
4579 if (!reduc_optab)
4580 {
4581 if (vect_print_dump_info (REPORT_DETAILS))
4582 fprintf (vect_dump, "no optab for reduction.");
4583
4584 epilog_reduc_code = ERROR_MARK;
4585 }
4586
4587 if (reduc_optab
4588 && optab_handler (reduc_optab, vec_mode) == CODE_FOR_nothing)
4589 {
4590 if (vect_print_dump_info (REPORT_DETAILS))
4591 fprintf (vect_dump, "reduc op not supported by target.");
4592
4593 epilog_reduc_code = ERROR_MARK;
4594 }
4595 }
4596 else
4597 {
4598 if (!nested_cycle || double_reduc)
4599 {
4600 if (vect_print_dump_info (REPORT_DETAILS))
4601 fprintf (vect_dump, "no reduc code for scalar code.");
4602
4603 return false;
4604 }
4605 }
4606
4607 if (double_reduc && ncopies > 1)
4608 {
4609 if (vect_print_dump_info (REPORT_DETAILS))
4610 fprintf (vect_dump, "multiple types in double reduction");
4611
4612 return false;
4613 }
4614
4615 /* In case of widenning multiplication by a constant, we update the type
4616 of the constant to be the type of the other operand. We check that the
4617 constant fits the type in the pattern recognition pass. */
4618 if (code == DOT_PROD_EXPR
4619 && !types_compatible_p (TREE_TYPE (ops[0]), TREE_TYPE (ops[1])))
4620 {
4621 if (TREE_CODE (ops[0]) == INTEGER_CST)
4622 ops[0] = fold_convert (TREE_TYPE (ops[1]), ops[0]);
4623 else if (TREE_CODE (ops[1]) == INTEGER_CST)
4624 ops[1] = fold_convert (TREE_TYPE (ops[0]), ops[1]);
4625 else
4626 {
4627 if (vect_print_dump_info (REPORT_DETAILS))
4628 fprintf (vect_dump, "invalid types in dot-prod");
4629
4630 return false;
4631 }
4632 }
4633
4634 if (!vec_stmt) /* transformation not required. */
4635 {
4636 if (!vect_model_reduction_cost (stmt_info, epilog_reduc_code, ncopies))
4637 return false;
4638 STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type;
4639 return true;
4640 }
4641
4642 /** Transform. **/
4643
4644 if (vect_print_dump_info (REPORT_DETAILS))
4645 fprintf (vect_dump, "transform reduction.");
4646
4647 /* FORNOW: Multiple types are not supported for condition. */
4648 if (code == COND_EXPR)
4649 gcc_assert (ncopies == 1);
4650
4651 /* Create the destination vector */
4652 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
4653
4654 /* In case the vectorization factor (VF) is bigger than the number
4655 of elements that we can fit in a vectype (nunits), we have to generate
4656 more than one vector stmt - i.e - we need to "unroll" the
4657 vector stmt by a factor VF/nunits. For more details see documentation
4658 in vectorizable_operation. */
4659
4660 /* If the reduction is used in an outer loop we need to generate
4661 VF intermediate results, like so (e.g. for ncopies=2):
4662 r0 = phi (init, r0)
4663 r1 = phi (init, r1)
4664 r0 = x0 + r0;
4665 r1 = x1 + r1;
4666 (i.e. we generate VF results in 2 registers).
4667 In this case we have a separate def-use cycle for each copy, and therefore
4668 for each copy we get the vector def for the reduction variable from the
4669 respective phi node created for this copy.
4670
4671 Otherwise (the reduction is unused in the loop nest), we can combine
4672 together intermediate results, like so (e.g. for ncopies=2):
4673 r = phi (init, r)
4674 r = x0 + r;
4675 r = x1 + r;
4676 (i.e. we generate VF/2 results in a single register).
4677 In this case for each copy we get the vector def for the reduction variable
4678 from the vectorized reduction operation generated in the previous iteration.
4679 */
4680
4681 if (STMT_VINFO_RELEVANT (stmt_info) == vect_unused_in_scope)
4682 {
4683 single_defuse_cycle = true;
4684 epilog_copies = 1;
4685 }
4686 else
4687 epilog_copies = ncopies;
4688
4689 prev_stmt_info = NULL;
4690 prev_phi_info = NULL;
4691 if (slp_node)
4692 {
4693 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
4694 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype_out)
4695 == TYPE_VECTOR_SUBPARTS (vectype_in));
4696 }
4697 else
4698 {
4699 vec_num = 1;
4700 vec_oprnds0 = VEC_alloc (tree, heap, 1);
4701 if (op_type == ternary_op)
4702 vec_oprnds1 = VEC_alloc (tree, heap, 1);
4703 }
4704
4705 phis = VEC_alloc (gimple, heap, vec_num);
4706 vect_defs = VEC_alloc (tree, heap, vec_num);
4707 if (!slp_node)
4708 VEC_quick_push (tree, vect_defs, NULL_TREE);
4709
4710 for (j = 0; j < ncopies; j++)
4711 {
4712 if (j == 0 || !single_defuse_cycle)
4713 {
4714 for (i = 0; i < vec_num; i++)
4715 {
4716 /* Create the reduction-phi that defines the reduction
4717 operand. */
4718 new_phi = create_phi_node (vec_dest, loop->header);
4719 set_vinfo_for_stmt (new_phi,
4720 new_stmt_vec_info (new_phi, loop_vinfo,
4721 NULL));
4722 if (j == 0 || slp_node)
4723 VEC_quick_push (gimple, phis, new_phi);
4724 }
4725 }
4726
4727 if (code == COND_EXPR)
4728 {
4729 gcc_assert (!slp_node);
4730 vectorizable_condition (stmt, gsi, vec_stmt,
4731 PHI_RESULT (VEC_index (gimple, phis, 0)),
4732 reduc_index);
4733 /* Multiple types are not supported for condition. */
4734 break;
4735 }
4736
4737 /* Handle uses. */
4738 if (j == 0)
4739 {
4740 tree op0, op1 = NULL_TREE;
4741
4742 op0 = ops[!reduc_index];
4743 if (op_type == ternary_op)
4744 {
4745 if (reduc_index == 0)
4746 op1 = ops[2];
4747 else
4748 op1 = ops[1];
4749 }
4750
4751 if (slp_node)
4752 vect_get_slp_defs (op0, op1, slp_node, &vec_oprnds0, &vec_oprnds1,
4753 -1);
4754 else
4755 {
4756 loop_vec_def0 = vect_get_vec_def_for_operand (ops[!reduc_index],
4757 stmt, NULL);
4758 VEC_quick_push (tree, vec_oprnds0, loop_vec_def0);
4759 if (op_type == ternary_op)
4760 {
4761 loop_vec_def1 = vect_get_vec_def_for_operand (op1, stmt,
4762 NULL);
4763 VEC_quick_push (tree, vec_oprnds1, loop_vec_def1);
4764 }
4765 }
4766 }
4767 else
4768 {
4769 if (!slp_node)
4770 {
4771 enum vect_def_type dt = vect_unknown_def_type; /* Dummy */
4772 loop_vec_def0 = vect_get_vec_def_for_stmt_copy (dt, loop_vec_def0);
4773 VEC_replace (tree, vec_oprnds0, 0, loop_vec_def0);
4774 if (op_type == ternary_op)
4775 {
4776 loop_vec_def1 = vect_get_vec_def_for_stmt_copy (dt,
4777 loop_vec_def1);
4778 VEC_replace (tree, vec_oprnds1, 0, loop_vec_def1);
4779 }
4780 }
4781
4782 if (single_defuse_cycle)
4783 reduc_def = gimple_assign_lhs (new_stmt);
4784
4785 STMT_VINFO_RELATED_STMT (prev_phi_info) = new_phi;
4786 }
4787
4788 FOR_EACH_VEC_ELT (tree, vec_oprnds0, i, def0)
4789 {
4790 if (slp_node)
4791 reduc_def = PHI_RESULT (VEC_index (gimple, phis, i));
4792 else
4793 {
4794 if (!single_defuse_cycle || j == 0)
4795 reduc_def = PHI_RESULT (new_phi);
4796 }
4797
4798 def1 = ((op_type == ternary_op)
4799 ? VEC_index (tree, vec_oprnds1, i) : NULL);
4800 if (op_type == binary_op)
4801 {
4802 if (reduc_index == 0)
4803 expr = build2 (code, vectype_out, reduc_def, def0);
4804 else
4805 expr = build2 (code, vectype_out, def0, reduc_def);
4806 }
4807 else
4808 {
4809 if (reduc_index == 0)
4810 expr = build3 (code, vectype_out, reduc_def, def0, def1);
4811 else
4812 {
4813 if (reduc_index == 1)
4814 expr = build3 (code, vectype_out, def0, reduc_def, def1);
4815 else
4816 expr = build3 (code, vectype_out, def0, def1, reduc_def);
4817 }
4818 }
4819
4820 new_stmt = gimple_build_assign (vec_dest, expr);
4821 new_temp = make_ssa_name (vec_dest, new_stmt);
4822 gimple_assign_set_lhs (new_stmt, new_temp);
4823 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4824
4825 if (slp_node)
4826 {
4827 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
4828 VEC_quick_push (tree, vect_defs, new_temp);
4829 }
4830 else
4831 VEC_replace (tree, vect_defs, 0, new_temp);
4832 }
4833
4834 if (slp_node)
4835 continue;
4836
4837 if (j == 0)
4838 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4839 else
4840 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4841
4842 prev_stmt_info = vinfo_for_stmt (new_stmt);
4843 prev_phi_info = vinfo_for_stmt (new_phi);
4844 }
4845
4846 /* Finalize the reduction-phi (set its arguments) and create the
4847 epilog reduction code. */
4848 if ((!single_defuse_cycle || code == COND_EXPR) && !slp_node)
4849 {
4850 new_temp = gimple_assign_lhs (*vec_stmt);
4851 VEC_replace (tree, vect_defs, 0, new_temp);
4852 }
4853
4854 vect_create_epilog_for_reduction (vect_defs, stmt, epilog_copies,
4855 epilog_reduc_code, phis, reduc_index,
4856 double_reduc, slp_node);
4857
4858 VEC_free (gimple, heap, phis);
4859 VEC_free (tree, heap, vec_oprnds0);
4860 if (vec_oprnds1)
4861 VEC_free (tree, heap, vec_oprnds1);
4862
4863 return true;
4864 }
4865
4866 /* Function vect_min_worthwhile_factor.
4867
4868 For a loop where we could vectorize the operation indicated by CODE,
4869 return the minimum vectorization factor that makes it worthwhile
4870 to use generic vectors. */
4871 int
4872 vect_min_worthwhile_factor (enum tree_code code)
4873 {
4874 switch (code)
4875 {
4876 case PLUS_EXPR:
4877 case MINUS_EXPR:
4878 case NEGATE_EXPR:
4879 return 4;
4880
4881 case BIT_AND_EXPR:
4882 case BIT_IOR_EXPR:
4883 case BIT_XOR_EXPR:
4884 case BIT_NOT_EXPR:
4885 return 2;
4886
4887 default:
4888 return INT_MAX;
4889 }
4890 }
4891
4892
4893 /* Function vectorizable_induction
4894
4895 Check if PHI performs an induction computation that can be vectorized.
4896 If VEC_STMT is also passed, vectorize the induction PHI: create a vectorized
4897 phi to replace it, put it in VEC_STMT, and add it to the same basic block.
4898 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4899
4900 bool
4901 vectorizable_induction (gimple phi, gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED,
4902 gimple *vec_stmt)
4903 {
4904 stmt_vec_info stmt_info = vinfo_for_stmt (phi);
4905 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4906 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4907 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
4908 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
4909 int ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
4910 tree vec_def;
4911
4912 gcc_assert (ncopies >= 1);
4913 /* FORNOW. This restriction should be relaxed. */
4914 if (nested_in_vect_loop_p (loop, phi) && ncopies > 1)
4915 {
4916 if (vect_print_dump_info (REPORT_DETAILS))
4917 fprintf (vect_dump, "multiple types in nested loop.");
4918 return false;
4919 }
4920
4921 if (!STMT_VINFO_RELEVANT_P (stmt_info))
4922 return false;
4923
4924 /* FORNOW: SLP not supported. */
4925 if (STMT_SLP_TYPE (stmt_info))
4926 return false;
4927
4928 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def);
4929
4930 if (gimple_code (phi) != GIMPLE_PHI)
4931 return false;
4932
4933 if (!vec_stmt) /* transformation not required. */
4934 {
4935 STMT_VINFO_TYPE (stmt_info) = induc_vec_info_type;
4936 if (vect_print_dump_info (REPORT_DETAILS))
4937 fprintf (vect_dump, "=== vectorizable_induction ===");
4938 vect_model_induction_cost (stmt_info, ncopies);
4939 return true;
4940 }
4941
4942 /** Transform. **/
4943
4944 if (vect_print_dump_info (REPORT_DETAILS))
4945 fprintf (vect_dump, "transform induction phi.");
4946
4947 vec_def = get_initial_def_for_induction (phi);
4948 *vec_stmt = SSA_NAME_DEF_STMT (vec_def);
4949 return true;
4950 }
4951
4952 /* Function vectorizable_live_operation.
4953
4954 STMT computes a value that is used outside the loop. Check if
4955 it can be supported. */
4956
4957 bool
4958 vectorizable_live_operation (gimple stmt,
4959 gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED,
4960 gimple *vec_stmt ATTRIBUTE_UNUSED)
4961 {
4962 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4963 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4964 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
4965 int i;
4966 int op_type;
4967 tree op;
4968 tree def;
4969 gimple def_stmt;
4970 enum vect_def_type dt;
4971 enum tree_code code;
4972 enum gimple_rhs_class rhs_class;
4973
4974 gcc_assert (STMT_VINFO_LIVE_P (stmt_info));
4975
4976 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def)
4977 return false;
4978
4979 if (!is_gimple_assign (stmt))
4980 return false;
4981
4982 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
4983 return false;
4984
4985 /* FORNOW. CHECKME. */
4986 if (nested_in_vect_loop_p (loop, stmt))
4987 return false;
4988
4989 code = gimple_assign_rhs_code (stmt);
4990 op_type = TREE_CODE_LENGTH (code);
4991 rhs_class = get_gimple_rhs_class (code);
4992 gcc_assert (rhs_class != GIMPLE_UNARY_RHS || op_type == unary_op);
4993 gcc_assert (rhs_class != GIMPLE_BINARY_RHS || op_type == binary_op);
4994
4995 /* FORNOW: support only if all uses are invariant. This means
4996 that the scalar operations can remain in place, unvectorized.
4997 The original last scalar value that they compute will be used. */
4998
4999 for (i = 0; i < op_type; i++)
5000 {
5001 if (rhs_class == GIMPLE_SINGLE_RHS)
5002 op = TREE_OPERAND (gimple_op (stmt, 1), i);
5003 else
5004 op = gimple_op (stmt, i + 1);
5005 if (op
5006 && !vect_is_simple_use (op, loop_vinfo, NULL, &def_stmt, &def, &dt))
5007 {
5008 if (vect_print_dump_info (REPORT_DETAILS))
5009 fprintf (vect_dump, "use not simple.");
5010 return false;
5011 }
5012
5013 if (dt != vect_external_def && dt != vect_constant_def)
5014 return false;
5015 }
5016
5017 /* No transformation is required for the cases we currently support. */
5018 return true;
5019 }
5020
5021 /* Kill any debug uses outside LOOP of SSA names defined in STMT. */
5022
5023 static void
5024 vect_loop_kill_debug_uses (struct loop *loop, gimple stmt)
5025 {
5026 ssa_op_iter op_iter;
5027 imm_use_iterator imm_iter;
5028 def_operand_p def_p;
5029 gimple ustmt;
5030
5031 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF)
5032 {
5033 FOR_EACH_IMM_USE_STMT (ustmt, imm_iter, DEF_FROM_PTR (def_p))
5034 {
5035 basic_block bb;
5036
5037 if (!is_gimple_debug (ustmt))
5038 continue;
5039
5040 bb = gimple_bb (ustmt);
5041
5042 if (!flow_bb_inside_loop_p (loop, bb))
5043 {
5044 if (gimple_debug_bind_p (ustmt))
5045 {
5046 if (vect_print_dump_info (REPORT_DETAILS))
5047 fprintf (vect_dump, "killing debug use");
5048
5049 gimple_debug_bind_reset_value (ustmt);
5050 update_stmt (ustmt);
5051 }
5052 else
5053 gcc_unreachable ();
5054 }
5055 }
5056 }
5057 }
5058
5059 /* Function vect_transform_loop.
5060
5061 The analysis phase has determined that the loop is vectorizable.
5062 Vectorize the loop - created vectorized stmts to replace the scalar
5063 stmts in the loop, and update the loop exit condition. */
5064
5065 void
5066 vect_transform_loop (loop_vec_info loop_vinfo)
5067 {
5068 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
5069 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
5070 int nbbs = loop->num_nodes;
5071 gimple_stmt_iterator si;
5072 int i;
5073 tree ratio = NULL;
5074 int vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
5075 bool strided_store;
5076 bool slp_scheduled = false;
5077 unsigned int nunits;
5078 tree cond_expr = NULL_TREE;
5079 gimple_seq cond_expr_stmt_list = NULL;
5080 bool do_peeling_for_loop_bound;
5081 gimple stmt, pattern_stmt;
5082 bool transform_pattern_stmt = false;
5083
5084 if (vect_print_dump_info (REPORT_DETAILS))
5085 fprintf (vect_dump, "=== vec_transform_loop ===");
5086
5087 /* Peel the loop if there are data refs with unknown alignment.
5088 Only one data ref with unknown store is allowed. */
5089
5090 if (LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo))
5091 vect_do_peeling_for_alignment (loop_vinfo);
5092
5093 do_peeling_for_loop_bound
5094 = (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
5095 || (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
5096 && LOOP_VINFO_INT_NITERS (loop_vinfo) % vectorization_factor != 0)
5097 || LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo));
5098
5099 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
5100 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
5101 vect_loop_versioning (loop_vinfo,
5102 !do_peeling_for_loop_bound,
5103 &cond_expr, &cond_expr_stmt_list);
5104
5105 /* If the loop has a symbolic number of iterations 'n' (i.e. it's not a
5106 compile time constant), or it is a constant that doesn't divide by the
5107 vectorization factor, then an epilog loop needs to be created.
5108 We therefore duplicate the loop: the original loop will be vectorized,
5109 and will compute the first (n/VF) iterations. The second copy of the loop
5110 will remain scalar and will compute the remaining (n%VF) iterations.
5111 (VF is the vectorization factor). */
5112
5113 if (do_peeling_for_loop_bound)
5114 vect_do_peeling_for_loop_bound (loop_vinfo, &ratio,
5115 cond_expr, cond_expr_stmt_list);
5116 else
5117 ratio = build_int_cst (TREE_TYPE (LOOP_VINFO_NITERS (loop_vinfo)),
5118 LOOP_VINFO_INT_NITERS (loop_vinfo) / vectorization_factor);
5119
5120 /* 1) Make sure the loop header has exactly two entries
5121 2) Make sure we have a preheader basic block. */
5122
5123 gcc_assert (EDGE_COUNT (loop->header->preds) == 2);
5124
5125 split_edge (loop_preheader_edge (loop));
5126
5127 /* FORNOW: the vectorizer supports only loops which body consist
5128 of one basic block (header + empty latch). When the vectorizer will
5129 support more involved loop forms, the order by which the BBs are
5130 traversed need to be reconsidered. */
5131
5132 for (i = 0; i < nbbs; i++)
5133 {
5134 basic_block bb = bbs[i];
5135 stmt_vec_info stmt_info;
5136 gimple phi;
5137
5138 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
5139 {
5140 phi = gsi_stmt (si);
5141 if (vect_print_dump_info (REPORT_DETAILS))
5142 {
5143 fprintf (vect_dump, "------>vectorizing phi: ");
5144 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
5145 }
5146 stmt_info = vinfo_for_stmt (phi);
5147 if (!stmt_info)
5148 continue;
5149
5150 if (MAY_HAVE_DEBUG_STMTS && !STMT_VINFO_LIVE_P (stmt_info))
5151 vect_loop_kill_debug_uses (loop, phi);
5152
5153 if (!STMT_VINFO_RELEVANT_P (stmt_info)
5154 && !STMT_VINFO_LIVE_P (stmt_info))
5155 continue;
5156
5157 if ((TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info))
5158 != (unsigned HOST_WIDE_INT) vectorization_factor)
5159 && vect_print_dump_info (REPORT_DETAILS))
5160 fprintf (vect_dump, "multiple-types.");
5161
5162 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def)
5163 {
5164 if (vect_print_dump_info (REPORT_DETAILS))
5165 fprintf (vect_dump, "transform phi.");
5166 vect_transform_stmt (phi, NULL, NULL, NULL, NULL);
5167 }
5168 }
5169
5170 pattern_stmt = NULL;
5171 for (si = gsi_start_bb (bb); !gsi_end_p (si) || transform_pattern_stmt;)
5172 {
5173 bool is_store;
5174
5175 if (transform_pattern_stmt)
5176 {
5177 stmt = pattern_stmt;
5178 transform_pattern_stmt = false;
5179 }
5180 else
5181 stmt = gsi_stmt (si);
5182
5183 if (vect_print_dump_info (REPORT_DETAILS))
5184 {
5185 fprintf (vect_dump, "------>vectorizing statement: ");
5186 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
5187 }
5188
5189 stmt_info = vinfo_for_stmt (stmt);
5190
5191 /* vector stmts created in the outer-loop during vectorization of
5192 stmts in an inner-loop may not have a stmt_info, and do not
5193 need to be vectorized. */
5194 if (!stmt_info)
5195 {
5196 gsi_next (&si);
5197 continue;
5198 }
5199
5200 if (MAY_HAVE_DEBUG_STMTS && !STMT_VINFO_LIVE_P (stmt_info))
5201 vect_loop_kill_debug_uses (loop, stmt);
5202
5203 if (!STMT_VINFO_RELEVANT_P (stmt_info)
5204 && !STMT_VINFO_LIVE_P (stmt_info))
5205 {
5206 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
5207 && (pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info))
5208 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
5209 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
5210 {
5211 stmt = pattern_stmt;
5212 stmt_info = vinfo_for_stmt (stmt);
5213 }
5214 else
5215 {
5216 gsi_next (&si);
5217 continue;
5218 }
5219 }
5220 else if (STMT_VINFO_IN_PATTERN_P (stmt_info)
5221 && (pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info))
5222 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
5223 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
5224 transform_pattern_stmt = true;
5225
5226 gcc_assert (STMT_VINFO_VECTYPE (stmt_info));
5227 nunits = (unsigned int) TYPE_VECTOR_SUBPARTS (
5228 STMT_VINFO_VECTYPE (stmt_info));
5229 if (!STMT_SLP_TYPE (stmt_info)
5230 && nunits != (unsigned int) vectorization_factor
5231 && vect_print_dump_info (REPORT_DETAILS))
5232 /* For SLP VF is set according to unrolling factor, and not to
5233 vector size, hence for SLP this print is not valid. */
5234 fprintf (vect_dump, "multiple-types.");
5235
5236 /* SLP. Schedule all the SLP instances when the first SLP stmt is
5237 reached. */
5238 if (STMT_SLP_TYPE (stmt_info))
5239 {
5240 if (!slp_scheduled)
5241 {
5242 slp_scheduled = true;
5243
5244 if (vect_print_dump_info (REPORT_DETAILS))
5245 fprintf (vect_dump, "=== scheduling SLP instances ===");
5246
5247 vect_schedule_slp (loop_vinfo, NULL);
5248 }
5249
5250 /* Hybrid SLP stmts must be vectorized in addition to SLP. */
5251 if (!vinfo_for_stmt (stmt) || PURE_SLP_STMT (stmt_info))
5252 {
5253 if (!transform_pattern_stmt)
5254 gsi_next (&si);
5255 continue;
5256 }
5257 }
5258
5259 /* -------- vectorize statement ------------ */
5260 if (vect_print_dump_info (REPORT_DETAILS))
5261 fprintf (vect_dump, "transform statement.");
5262
5263 strided_store = false;
5264 is_store = vect_transform_stmt (stmt, &si, &strided_store, NULL, NULL);
5265 if (is_store)
5266 {
5267 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
5268 {
5269 /* Interleaving. If IS_STORE is TRUE, the vectorization of the
5270 interleaving chain was completed - free all the stores in
5271 the chain. */
5272 vect_remove_stores (GROUP_FIRST_ELEMENT (stmt_info));
5273 gsi_remove (&si, true);
5274 continue;
5275 }
5276 else
5277 {
5278 /* Free the attached stmt_vec_info and remove the stmt. */
5279 free_stmt_vec_info (stmt);
5280 gsi_remove (&si, true);
5281 continue;
5282 }
5283 }
5284
5285 if (!transform_pattern_stmt)
5286 gsi_next (&si);
5287 } /* stmts in BB */
5288 } /* BBs in loop */
5289
5290 slpeel_make_loop_iterate_ntimes (loop, ratio);
5291
5292 /* The memory tags and pointers in vectorized statements need to
5293 have their SSA forms updated. FIXME, why can't this be delayed
5294 until all the loops have been transformed? */
5295 update_ssa (TODO_update_ssa);
5296
5297 if (vect_print_dump_info (REPORT_VECTORIZED_LOCATIONS))
5298 fprintf (vect_dump, "LOOP VECTORIZED.");
5299 if (loop->inner && vect_print_dump_info (REPORT_VECTORIZED_LOCATIONS))
5300 fprintf (vect_dump, "OUTER LOOP VECTORIZED.");
5301 }