omp-low.c (optimize_omp_library_calls): Use types_compatible_p instead of comparing...
[gcc.git] / gcc / tree-vect-loop.c
1 /* Loop Vectorization
2 Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software
3 Foundation, Inc.
4 Contributed by Dorit Naishlos <dorit@il.ibm.com> and
5 Ira Rosen <irar@il.ibm.com>
6
7 This file is part of GCC.
8
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
13
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "ggc.h"
28 #include "tree.h"
29 #include "basic-block.h"
30 #include "diagnostic.h"
31 #include "tree-flow.h"
32 #include "tree-dump.h"
33 #include "cfgloop.h"
34 #include "cfglayout.h"
35 #include "expr.h"
36 #include "recog.h"
37 #include "optabs.h"
38 #include "params.h"
39 #include "toplev.h"
40 #include "tree-chrec.h"
41 #include "tree-scalar-evolution.h"
42 #include "tree-vectorizer.h"
43
44 /* Loop Vectorization Pass.
45
46 This pass tries to vectorize loops.
47
48 For example, the vectorizer transforms the following simple loop:
49
50 short a[N]; short b[N]; short c[N]; int i;
51
52 for (i=0; i<N; i++){
53 a[i] = b[i] + c[i];
54 }
55
56 as if it was manually vectorized by rewriting the source code into:
57
58 typedef int __attribute__((mode(V8HI))) v8hi;
59 short a[N]; short b[N]; short c[N]; int i;
60 v8hi *pa = (v8hi*)a, *pb = (v8hi*)b, *pc = (v8hi*)c;
61 v8hi va, vb, vc;
62
63 for (i=0; i<N/8; i++){
64 vb = pb[i];
65 vc = pc[i];
66 va = vb + vc;
67 pa[i] = va;
68 }
69
70 The main entry to this pass is vectorize_loops(), in which
71 the vectorizer applies a set of analyses on a given set of loops,
72 followed by the actual vectorization transformation for the loops that
73 had successfully passed the analysis phase.
74 Throughout this pass we make a distinction between two types of
75 data: scalars (which are represented by SSA_NAMES), and memory references
76 ("data-refs"). These two types of data require different handling both
77 during analysis and transformation. The types of data-refs that the
78 vectorizer currently supports are ARRAY_REFS which base is an array DECL
79 (not a pointer), and INDIRECT_REFS through pointers; both array and pointer
80 accesses are required to have a simple (consecutive) access pattern.
81
82 Analysis phase:
83 ===============
84 The driver for the analysis phase is vect_analyze_loop().
85 It applies a set of analyses, some of which rely on the scalar evolution
86 analyzer (scev) developed by Sebastian Pop.
87
88 During the analysis phase the vectorizer records some information
89 per stmt in a "stmt_vec_info" struct which is attached to each stmt in the
90 loop, as well as general information about the loop as a whole, which is
91 recorded in a "loop_vec_info" struct attached to each loop.
92
93 Transformation phase:
94 =====================
95 The loop transformation phase scans all the stmts in the loop, and
96 creates a vector stmt (or a sequence of stmts) for each scalar stmt S in
97 the loop that needs to be vectorized. It inserts the vector code sequence
98 just before the scalar stmt S, and records a pointer to the vector code
99 in STMT_VINFO_VEC_STMT (stmt_info) (stmt_info is the stmt_vec_info struct
100 attached to S). This pointer will be used for the vectorization of following
101 stmts which use the def of stmt S. Stmt S is removed if it writes to memory;
102 otherwise, we rely on dead code elimination for removing it.
103
104 For example, say stmt S1 was vectorized into stmt VS1:
105
106 VS1: vb = px[i];
107 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
108 S2: a = b;
109
110 To vectorize stmt S2, the vectorizer first finds the stmt that defines
111 the operand 'b' (S1), and gets the relevant vector def 'vb' from the
112 vector stmt VS1 pointed to by STMT_VINFO_VEC_STMT (stmt_info (S1)). The
113 resulting sequence would be:
114
115 VS1: vb = px[i];
116 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
117 VS2: va = vb;
118 S2: a = b; STMT_VINFO_VEC_STMT (stmt_info (S2)) = VS2
119
120 Operands that are not SSA_NAMEs, are data-refs that appear in
121 load/store operations (like 'x[i]' in S1), and are handled differently.
122
123 Target modeling:
124 =================
125 Currently the only target specific information that is used is the
126 size of the vector (in bytes) - "UNITS_PER_SIMD_WORD". Targets that can
127 support different sizes of vectors, for now will need to specify one value
128 for "UNITS_PER_SIMD_WORD". More flexibility will be added in the future.
129
130 Since we only vectorize operations which vector form can be
131 expressed using existing tree codes, to verify that an operation is
132 supported, the vectorizer checks the relevant optab at the relevant
133 machine_mode (e.g, optab_handler (add_optab, V8HImode)->insn_code). If
134 the value found is CODE_FOR_nothing, then there's no target support, and
135 we can't vectorize the stmt.
136
137 For additional information on this project see:
138 http://gcc.gnu.org/projects/tree-ssa/vectorization.html
139 */
140
141 /* Function vect_determine_vectorization_factor
142
143 Determine the vectorization factor (VF). VF is the number of data elements
144 that are operated upon in parallel in a single iteration of the vectorized
145 loop. For example, when vectorizing a loop that operates on 4byte elements,
146 on a target with vector size (VS) 16byte, the VF is set to 4, since 4
147 elements can fit in a single vector register.
148
149 We currently support vectorization of loops in which all types operated upon
150 are of the same size. Therefore this function currently sets VF according to
151 the size of the types operated upon, and fails if there are multiple sizes
152 in the loop.
153
154 VF is also the factor by which the loop iterations are strip-mined, e.g.:
155 original loop:
156 for (i=0; i<N; i++){
157 a[i] = b[i] + c[i];
158 }
159
160 vectorized loop:
161 for (i=0; i<N; i+=VF){
162 a[i:VF] = b[i:VF] + c[i:VF];
163 }
164 */
165
166 static bool
167 vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
168 {
169 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
170 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
171 int nbbs = loop->num_nodes;
172 gimple_stmt_iterator si;
173 unsigned int vectorization_factor = 0;
174 tree scalar_type;
175 gimple phi;
176 tree vectype;
177 unsigned int nunits;
178 stmt_vec_info stmt_info;
179 int i;
180 HOST_WIDE_INT dummy;
181
182 if (vect_print_dump_info (REPORT_DETAILS))
183 fprintf (vect_dump, "=== vect_determine_vectorization_factor ===");
184
185 for (i = 0; i < nbbs; i++)
186 {
187 basic_block bb = bbs[i];
188
189 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
190 {
191 phi = gsi_stmt (si);
192 stmt_info = vinfo_for_stmt (phi);
193 if (vect_print_dump_info (REPORT_DETAILS))
194 {
195 fprintf (vect_dump, "==> examining phi: ");
196 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
197 }
198
199 gcc_assert (stmt_info);
200
201 if (STMT_VINFO_RELEVANT_P (stmt_info))
202 {
203 gcc_assert (!STMT_VINFO_VECTYPE (stmt_info));
204 scalar_type = TREE_TYPE (PHI_RESULT (phi));
205
206 if (vect_print_dump_info (REPORT_DETAILS))
207 {
208 fprintf (vect_dump, "get vectype for scalar type: ");
209 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
210 }
211
212 vectype = get_vectype_for_scalar_type (scalar_type);
213 if (!vectype)
214 {
215 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
216 {
217 fprintf (vect_dump,
218 "not vectorized: unsupported data-type ");
219 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
220 }
221 return false;
222 }
223 STMT_VINFO_VECTYPE (stmt_info) = vectype;
224
225 if (vect_print_dump_info (REPORT_DETAILS))
226 {
227 fprintf (vect_dump, "vectype: ");
228 print_generic_expr (vect_dump, vectype, TDF_SLIM);
229 }
230
231 nunits = TYPE_VECTOR_SUBPARTS (vectype);
232 if (vect_print_dump_info (REPORT_DETAILS))
233 fprintf (vect_dump, "nunits = %d", nunits);
234
235 if (!vectorization_factor
236 || (nunits > vectorization_factor))
237 vectorization_factor = nunits;
238 }
239 }
240
241 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
242 {
243 gimple stmt = gsi_stmt (si);
244 stmt_info = vinfo_for_stmt (stmt);
245
246 if (vect_print_dump_info (REPORT_DETAILS))
247 {
248 fprintf (vect_dump, "==> examining statement: ");
249 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
250 }
251
252 gcc_assert (stmt_info);
253
254 /* skip stmts which do not need to be vectorized. */
255 if (!STMT_VINFO_RELEVANT_P (stmt_info)
256 && !STMT_VINFO_LIVE_P (stmt_info))
257 {
258 if (vect_print_dump_info (REPORT_DETAILS))
259 fprintf (vect_dump, "skip.");
260 continue;
261 }
262
263 if (gimple_get_lhs (stmt) == NULL_TREE)
264 {
265 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
266 {
267 fprintf (vect_dump, "not vectorized: irregular stmt.");
268 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
269 }
270 return false;
271 }
272
273 if (VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))))
274 {
275 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
276 {
277 fprintf (vect_dump, "not vectorized: vector stmt in loop:");
278 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
279 }
280 return false;
281 }
282
283 if (STMT_VINFO_VECTYPE (stmt_info))
284 {
285 /* The only case when a vectype had been already set is for stmts
286 that contain a dataref, or for "pattern-stmts" (stmts generated
287 by the vectorizer to represent/replace a certain idiom). */
288 gcc_assert (STMT_VINFO_DATA_REF (stmt_info)
289 || is_pattern_stmt_p (stmt_info));
290 vectype = STMT_VINFO_VECTYPE (stmt_info);
291 }
292 else
293 {
294 gcc_assert (!STMT_VINFO_DATA_REF (stmt_info)
295 && !is_pattern_stmt_p (stmt_info));
296
297 scalar_type = vect_get_smallest_scalar_type (stmt, &dummy,
298 &dummy);
299 if (vect_print_dump_info (REPORT_DETAILS))
300 {
301 fprintf (vect_dump, "get vectype for scalar type: ");
302 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
303 }
304
305 vectype = get_vectype_for_scalar_type (scalar_type);
306 if (!vectype)
307 {
308 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
309 {
310 fprintf (vect_dump,
311 "not vectorized: unsupported data-type ");
312 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
313 }
314 return false;
315 }
316 STMT_VINFO_VECTYPE (stmt_info) = vectype;
317 }
318
319 if (vect_print_dump_info (REPORT_DETAILS))
320 {
321 fprintf (vect_dump, "vectype: ");
322 print_generic_expr (vect_dump, vectype, TDF_SLIM);
323 }
324
325 nunits = TYPE_VECTOR_SUBPARTS (vectype);
326 if (vect_print_dump_info (REPORT_DETAILS))
327 fprintf (vect_dump, "nunits = %d", nunits);
328
329 if (!vectorization_factor
330 || (nunits > vectorization_factor))
331 vectorization_factor = nunits;
332
333 }
334 }
335
336 /* TODO: Analyze cost. Decide if worth while to vectorize. */
337 if (vect_print_dump_info (REPORT_DETAILS))
338 fprintf (vect_dump, "vectorization factor = %d", vectorization_factor);
339 if (vectorization_factor <= 1)
340 {
341 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
342 fprintf (vect_dump, "not vectorized: unsupported data-type");
343 return false;
344 }
345 LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor;
346
347 return true;
348 }
349
350
351 /* Function vect_is_simple_iv_evolution.
352
353 FORNOW: A simple evolution of an induction variables in the loop is
354 considered a polynomial evolution with constant step. */
355
356 static bool
357 vect_is_simple_iv_evolution (unsigned loop_nb, tree access_fn, tree * init,
358 tree * step)
359 {
360 tree init_expr;
361 tree step_expr;
362 tree evolution_part = evolution_part_in_loop_num (access_fn, loop_nb);
363
364 /* When there is no evolution in this loop, the evolution function
365 is not "simple". */
366 if (evolution_part == NULL_TREE)
367 return false;
368
369 /* When the evolution is a polynomial of degree >= 2
370 the evolution function is not "simple". */
371 if (tree_is_chrec (evolution_part))
372 return false;
373
374 step_expr = evolution_part;
375 init_expr = unshare_expr (initial_condition_in_loop_num (access_fn, loop_nb));
376
377 if (vect_print_dump_info (REPORT_DETAILS))
378 {
379 fprintf (vect_dump, "step: ");
380 print_generic_expr (vect_dump, step_expr, TDF_SLIM);
381 fprintf (vect_dump, ", init: ");
382 print_generic_expr (vect_dump, init_expr, TDF_SLIM);
383 }
384
385 *init = init_expr;
386 *step = step_expr;
387
388 if (TREE_CODE (step_expr) != INTEGER_CST)
389 {
390 if (vect_print_dump_info (REPORT_DETAILS))
391 fprintf (vect_dump, "step unknown.");
392 return false;
393 }
394
395 return true;
396 }
397
398 /* Function vect_analyze_scalar_cycles_1.
399
400 Examine the cross iteration def-use cycles of scalar variables
401 in LOOP. LOOP_VINFO represents the loop that is now being
402 considered for vectorization (can be LOOP, or an outer-loop
403 enclosing LOOP). */
404
405 static void
406 vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop)
407 {
408 basic_block bb = loop->header;
409 tree dumy;
410 VEC(gimple,heap) *worklist = VEC_alloc (gimple, heap, 64);
411 gimple_stmt_iterator gsi;
412 bool double_reduc;
413
414 if (vect_print_dump_info (REPORT_DETAILS))
415 fprintf (vect_dump, "=== vect_analyze_scalar_cycles ===");
416
417 /* First - identify all inductions. Reduction detection assumes that all the
418 inductions have been identified, therefore, this order must not be
419 changed. */
420 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
421 {
422 gimple phi = gsi_stmt (gsi);
423 tree access_fn = NULL;
424 tree def = PHI_RESULT (phi);
425 stmt_vec_info stmt_vinfo = vinfo_for_stmt (phi);
426
427 if (vect_print_dump_info (REPORT_DETAILS))
428 {
429 fprintf (vect_dump, "Analyze phi: ");
430 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
431 }
432
433 /* Skip virtual phi's. The data dependences that are associated with
434 virtual defs/uses (i.e., memory accesses) are analyzed elsewhere. */
435 if (!is_gimple_reg (SSA_NAME_VAR (def)))
436 continue;
437
438 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_unknown_def_type;
439
440 /* Analyze the evolution function. */
441 access_fn = analyze_scalar_evolution (loop, def);
442 if (access_fn && vect_print_dump_info (REPORT_DETAILS))
443 {
444 fprintf (vect_dump, "Access function of PHI: ");
445 print_generic_expr (vect_dump, access_fn, TDF_SLIM);
446 }
447
448 if (!access_fn
449 || !vect_is_simple_iv_evolution (loop->num, access_fn, &dumy, &dumy))
450 {
451 VEC_safe_push (gimple, heap, worklist, phi);
452 continue;
453 }
454
455 if (vect_print_dump_info (REPORT_DETAILS))
456 fprintf (vect_dump, "Detected induction.");
457 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_induction_def;
458 }
459
460
461 /* Second - identify all reductions and nested cycles. */
462 while (VEC_length (gimple, worklist) > 0)
463 {
464 gimple phi = VEC_pop (gimple, worklist);
465 tree def = PHI_RESULT (phi);
466 stmt_vec_info stmt_vinfo = vinfo_for_stmt (phi);
467 gimple reduc_stmt;
468 bool nested_cycle;
469
470 if (vect_print_dump_info (REPORT_DETAILS))
471 {
472 fprintf (vect_dump, "Analyze phi: ");
473 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
474 }
475
476 gcc_assert (is_gimple_reg (SSA_NAME_VAR (def)));
477 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_unknown_def_type);
478
479 nested_cycle = (loop != LOOP_VINFO_LOOP (loop_vinfo));
480 reduc_stmt = vect_is_simple_reduction (loop_vinfo, phi, !nested_cycle,
481 &double_reduc);
482 if (reduc_stmt)
483 {
484 if (double_reduc)
485 {
486 if (vect_print_dump_info (REPORT_DETAILS))
487 fprintf (vect_dump, "Detected double reduction.");
488
489 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_double_reduction_def;
490 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) =
491 vect_double_reduction_def;
492 }
493 else
494 {
495 if (nested_cycle)
496 {
497 if (vect_print_dump_info (REPORT_DETAILS))
498 fprintf (vect_dump, "Detected vectorizable nested cycle.");
499
500 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_nested_cycle;
501 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) =
502 vect_nested_cycle;
503 }
504 else
505 {
506 if (vect_print_dump_info (REPORT_DETAILS))
507 fprintf (vect_dump, "Detected reduction.");
508
509 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_reduction_def;
510 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) =
511 vect_reduction_def;
512 }
513 }
514 }
515 else
516 if (vect_print_dump_info (REPORT_DETAILS))
517 fprintf (vect_dump, "Unknown def-use cycle pattern.");
518 }
519
520 VEC_free (gimple, heap, worklist);
521 }
522
523
524 /* Function vect_analyze_scalar_cycles.
525
526 Examine the cross iteration def-use cycles of scalar variables, by
527 analyzing the loop-header PHIs of scalar variables; Classify each
528 cycle as one of the following: invariant, induction, reduction, unknown.
529 We do that for the loop represented by LOOP_VINFO, and also to its
530 inner-loop, if exists.
531 Examples for scalar cycles:
532
533 Example1: reduction:
534
535 loop1:
536 for (i=0; i<N; i++)
537 sum += a[i];
538
539 Example2: induction:
540
541 loop2:
542 for (i=0; i<N; i++)
543 a[i] = i; */
544
545 static void
546 vect_analyze_scalar_cycles (loop_vec_info loop_vinfo)
547 {
548 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
549
550 vect_analyze_scalar_cycles_1 (loop_vinfo, loop);
551
552 /* When vectorizing an outer-loop, the inner-loop is executed sequentially.
553 Reductions in such inner-loop therefore have different properties than
554 the reductions in the nest that gets vectorized:
555 1. When vectorized, they are executed in the same order as in the original
556 scalar loop, so we can't change the order of computation when
557 vectorizing them.
558 2. FIXME: Inner-loop reductions can be used in the inner-loop, so the
559 current checks are too strict. */
560
561 if (loop->inner)
562 vect_analyze_scalar_cycles_1 (loop_vinfo, loop->inner);
563 }
564
565 /* Function vect_get_loop_niters.
566
567 Determine how many iterations the loop is executed.
568 If an expression that represents the number of iterations
569 can be constructed, place it in NUMBER_OF_ITERATIONS.
570 Return the loop exit condition. */
571
572 static gimple
573 vect_get_loop_niters (struct loop *loop, tree *number_of_iterations)
574 {
575 tree niters;
576
577 if (vect_print_dump_info (REPORT_DETAILS))
578 fprintf (vect_dump, "=== get_loop_niters ===");
579
580 niters = number_of_exit_cond_executions (loop);
581
582 if (niters != NULL_TREE
583 && niters != chrec_dont_know)
584 {
585 *number_of_iterations = niters;
586
587 if (vect_print_dump_info (REPORT_DETAILS))
588 {
589 fprintf (vect_dump, "==> get_loop_niters:" );
590 print_generic_expr (vect_dump, *number_of_iterations, TDF_SLIM);
591 }
592 }
593
594 return get_loop_exit_condition (loop);
595 }
596
597
598 /* Function bb_in_loop_p
599
600 Used as predicate for dfs order traversal of the loop bbs. */
601
602 static bool
603 bb_in_loop_p (const_basic_block bb, const void *data)
604 {
605 const struct loop *const loop = (const struct loop *)data;
606 if (flow_bb_inside_loop_p (loop, bb))
607 return true;
608 return false;
609 }
610
611
612 /* Function new_loop_vec_info.
613
614 Create and initialize a new loop_vec_info struct for LOOP, as well as
615 stmt_vec_info structs for all the stmts in LOOP. */
616
617 static loop_vec_info
618 new_loop_vec_info (struct loop *loop)
619 {
620 loop_vec_info res;
621 basic_block *bbs;
622 gimple_stmt_iterator si;
623 unsigned int i, nbbs;
624
625 res = (loop_vec_info) xcalloc (1, sizeof (struct _loop_vec_info));
626 LOOP_VINFO_LOOP (res) = loop;
627
628 bbs = get_loop_body (loop);
629
630 /* Create/Update stmt_info for all stmts in the loop. */
631 for (i = 0; i < loop->num_nodes; i++)
632 {
633 basic_block bb = bbs[i];
634
635 /* BBs in a nested inner-loop will have been already processed (because
636 we will have called vect_analyze_loop_form for any nested inner-loop).
637 Therefore, for stmts in an inner-loop we just want to update the
638 STMT_VINFO_LOOP_VINFO field of their stmt_info to point to the new
639 loop_info of the outer-loop we are currently considering to vectorize
640 (instead of the loop_info of the inner-loop).
641 For stmts in other BBs we need to create a stmt_info from scratch. */
642 if (bb->loop_father != loop)
643 {
644 /* Inner-loop bb. */
645 gcc_assert (loop->inner && bb->loop_father == loop->inner);
646 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
647 {
648 gimple phi = gsi_stmt (si);
649 stmt_vec_info stmt_info = vinfo_for_stmt (phi);
650 loop_vec_info inner_loop_vinfo =
651 STMT_VINFO_LOOP_VINFO (stmt_info);
652 gcc_assert (loop->inner == LOOP_VINFO_LOOP (inner_loop_vinfo));
653 STMT_VINFO_LOOP_VINFO (stmt_info) = res;
654 }
655 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
656 {
657 gimple stmt = gsi_stmt (si);
658 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
659 loop_vec_info inner_loop_vinfo =
660 STMT_VINFO_LOOP_VINFO (stmt_info);
661 gcc_assert (loop->inner == LOOP_VINFO_LOOP (inner_loop_vinfo));
662 STMT_VINFO_LOOP_VINFO (stmt_info) = res;
663 }
664 }
665 else
666 {
667 /* bb in current nest. */
668 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
669 {
670 gimple phi = gsi_stmt (si);
671 gimple_set_uid (phi, 0);
672 set_vinfo_for_stmt (phi, new_stmt_vec_info (phi, res, NULL));
673 }
674
675 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
676 {
677 gimple stmt = gsi_stmt (si);
678 gimple_set_uid (stmt, 0);
679 set_vinfo_for_stmt (stmt, new_stmt_vec_info (stmt, res, NULL));
680 }
681 }
682 }
683
684 /* CHECKME: We want to visit all BBs before their successors (except for
685 latch blocks, for which this assertion wouldn't hold). In the simple
686 case of the loop forms we allow, a dfs order of the BBs would the same
687 as reversed postorder traversal, so we are safe. */
688
689 free (bbs);
690 bbs = XCNEWVEC (basic_block, loop->num_nodes);
691 nbbs = dfs_enumerate_from (loop->header, 0, bb_in_loop_p,
692 bbs, loop->num_nodes, loop);
693 gcc_assert (nbbs == loop->num_nodes);
694
695 LOOP_VINFO_BBS (res) = bbs;
696 LOOP_VINFO_NITERS (res) = NULL;
697 LOOP_VINFO_NITERS_UNCHANGED (res) = NULL;
698 LOOP_VINFO_COST_MODEL_MIN_ITERS (res) = 0;
699 LOOP_VINFO_VECTORIZABLE_P (res) = 0;
700 LOOP_PEELING_FOR_ALIGNMENT (res) = 0;
701 LOOP_VINFO_VECT_FACTOR (res) = 0;
702 LOOP_VINFO_DATAREFS (res) = VEC_alloc (data_reference_p, heap, 10);
703 LOOP_VINFO_DDRS (res) = VEC_alloc (ddr_p, heap, 10 * 10);
704 LOOP_VINFO_UNALIGNED_DR (res) = NULL;
705 LOOP_VINFO_MAY_MISALIGN_STMTS (res) =
706 VEC_alloc (gimple, heap,
707 PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIGNMENT_CHECKS));
708 LOOP_VINFO_MAY_ALIAS_DDRS (res) =
709 VEC_alloc (ddr_p, heap,
710 PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS));
711 LOOP_VINFO_STRIDED_STORES (res) = VEC_alloc (gimple, heap, 10);
712 LOOP_VINFO_SLP_INSTANCES (res) = VEC_alloc (slp_instance, heap, 10);
713 LOOP_VINFO_SLP_UNROLLING_FACTOR (res) = 1;
714
715 return res;
716 }
717
718
719 /* Function destroy_loop_vec_info.
720
721 Free LOOP_VINFO struct, as well as all the stmt_vec_info structs of all the
722 stmts in the loop. */
723
724 void
725 destroy_loop_vec_info (loop_vec_info loop_vinfo, bool clean_stmts)
726 {
727 struct loop *loop;
728 basic_block *bbs;
729 int nbbs;
730 gimple_stmt_iterator si;
731 int j;
732 VEC (slp_instance, heap) *slp_instances;
733 slp_instance instance;
734
735 if (!loop_vinfo)
736 return;
737
738 loop = LOOP_VINFO_LOOP (loop_vinfo);
739
740 bbs = LOOP_VINFO_BBS (loop_vinfo);
741 nbbs = loop->num_nodes;
742
743 if (!clean_stmts)
744 {
745 free (LOOP_VINFO_BBS (loop_vinfo));
746 free_data_refs (LOOP_VINFO_DATAREFS (loop_vinfo));
747 free_dependence_relations (LOOP_VINFO_DDRS (loop_vinfo));
748 VEC_free (gimple, heap, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo));
749
750 free (loop_vinfo);
751 loop->aux = NULL;
752 return;
753 }
754
755 for (j = 0; j < nbbs; j++)
756 {
757 basic_block bb = bbs[j];
758 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
759 free_stmt_vec_info (gsi_stmt (si));
760
761 for (si = gsi_start_bb (bb); !gsi_end_p (si); )
762 {
763 gimple stmt = gsi_stmt (si);
764 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
765
766 if (stmt_info)
767 {
768 /* Check if this is a "pattern stmt" (introduced by the
769 vectorizer during the pattern recognition pass). */
770 bool remove_stmt_p = false;
771 gimple orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
772 if (orig_stmt)
773 {
774 stmt_vec_info orig_stmt_info = vinfo_for_stmt (orig_stmt);
775 if (orig_stmt_info
776 && STMT_VINFO_IN_PATTERN_P (orig_stmt_info))
777 remove_stmt_p = true;
778 }
779
780 /* Free stmt_vec_info. */
781 free_stmt_vec_info (stmt);
782
783 /* Remove dead "pattern stmts". */
784 if (remove_stmt_p)
785 gsi_remove (&si, true);
786 }
787 gsi_next (&si);
788 }
789 }
790
791 free (LOOP_VINFO_BBS (loop_vinfo));
792 free_data_refs (LOOP_VINFO_DATAREFS (loop_vinfo));
793 free_dependence_relations (LOOP_VINFO_DDRS (loop_vinfo));
794 VEC_free (gimple, heap, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo));
795 VEC_free (ddr_p, heap, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo));
796 slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
797 for (j = 0; VEC_iterate (slp_instance, slp_instances, j, instance); j++)
798 vect_free_slp_instance (instance);
799
800 VEC_free (slp_instance, heap, LOOP_VINFO_SLP_INSTANCES (loop_vinfo));
801 VEC_free (gimple, heap, LOOP_VINFO_STRIDED_STORES (loop_vinfo));
802
803 free (loop_vinfo);
804 loop->aux = NULL;
805 }
806
807
808 /* Function vect_analyze_loop_1.
809
810 Apply a set of analyses on LOOP, and create a loop_vec_info struct
811 for it. The different analyses will record information in the
812 loop_vec_info struct. This is a subset of the analyses applied in
813 vect_analyze_loop, to be applied on an inner-loop nested in the loop
814 that is now considered for (outer-loop) vectorization. */
815
816 static loop_vec_info
817 vect_analyze_loop_1 (struct loop *loop)
818 {
819 loop_vec_info loop_vinfo;
820
821 if (vect_print_dump_info (REPORT_DETAILS))
822 fprintf (vect_dump, "===== analyze_loop_nest_1 =====");
823
824 /* Check the CFG characteristics of the loop (nesting, entry/exit, etc. */
825
826 loop_vinfo = vect_analyze_loop_form (loop);
827 if (!loop_vinfo)
828 {
829 if (vect_print_dump_info (REPORT_DETAILS))
830 fprintf (vect_dump, "bad inner-loop form.");
831 return NULL;
832 }
833
834 return loop_vinfo;
835 }
836
837
838 /* Function vect_analyze_loop_form.
839
840 Verify that certain CFG restrictions hold, including:
841 - the loop has a pre-header
842 - the loop has a single entry and exit
843 - the loop exit condition is simple enough, and the number of iterations
844 can be analyzed (a countable loop). */
845
846 loop_vec_info
847 vect_analyze_loop_form (struct loop *loop)
848 {
849 loop_vec_info loop_vinfo;
850 gimple loop_cond;
851 tree number_of_iterations = NULL;
852 loop_vec_info inner_loop_vinfo = NULL;
853
854 if (vect_print_dump_info (REPORT_DETAILS))
855 fprintf (vect_dump, "=== vect_analyze_loop_form ===");
856
857 /* Different restrictions apply when we are considering an inner-most loop,
858 vs. an outer (nested) loop.
859 (FORNOW. May want to relax some of these restrictions in the future). */
860
861 if (!loop->inner)
862 {
863 /* Inner-most loop. We currently require that the number of BBs is
864 exactly 2 (the header and latch). Vectorizable inner-most loops
865 look like this:
866
867 (pre-header)
868 |
869 header <--------+
870 | | |
871 | +--> latch --+
872 |
873 (exit-bb) */
874
875 if (loop->num_nodes != 2)
876 {
877 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
878 fprintf (vect_dump, "not vectorized: control flow in loop.");
879 return NULL;
880 }
881
882 if (empty_block_p (loop->header))
883 {
884 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
885 fprintf (vect_dump, "not vectorized: empty loop.");
886 return NULL;
887 }
888 }
889 else
890 {
891 struct loop *innerloop = loop->inner;
892 edge backedge, entryedge;
893
894 /* Nested loop. We currently require that the loop is doubly-nested,
895 contains a single inner loop, and the number of BBs is exactly 5.
896 Vectorizable outer-loops look like this:
897
898 (pre-header)
899 |
900 header <---+
901 | |
902 inner-loop |
903 | |
904 tail ------+
905 |
906 (exit-bb)
907
908 The inner-loop has the properties expected of inner-most loops
909 as described above. */
910
911 if ((loop->inner)->inner || (loop->inner)->next)
912 {
913 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
914 fprintf (vect_dump, "not vectorized: multiple nested loops.");
915 return NULL;
916 }
917
918 /* Analyze the inner-loop. */
919 inner_loop_vinfo = vect_analyze_loop_1 (loop->inner);
920 if (!inner_loop_vinfo)
921 {
922 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
923 fprintf (vect_dump, "not vectorized: Bad inner loop.");
924 return NULL;
925 }
926
927 if (!expr_invariant_in_loop_p (loop,
928 LOOP_VINFO_NITERS (inner_loop_vinfo)))
929 {
930 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
931 fprintf (vect_dump,
932 "not vectorized: inner-loop count not invariant.");
933 destroy_loop_vec_info (inner_loop_vinfo, true);
934 return NULL;
935 }
936
937 if (loop->num_nodes != 5)
938 {
939 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
940 fprintf (vect_dump, "not vectorized: control flow in loop.");
941 destroy_loop_vec_info (inner_loop_vinfo, true);
942 return NULL;
943 }
944
945 gcc_assert (EDGE_COUNT (innerloop->header->preds) == 2);
946 backedge = EDGE_PRED (innerloop->header, 1);
947 entryedge = EDGE_PRED (innerloop->header, 0);
948 if (EDGE_PRED (innerloop->header, 0)->src == innerloop->latch)
949 {
950 backedge = EDGE_PRED (innerloop->header, 0);
951 entryedge = EDGE_PRED (innerloop->header, 1);
952 }
953
954 if (entryedge->src != loop->header
955 || !single_exit (innerloop)
956 || single_exit (innerloop)->dest != EDGE_PRED (loop->latch, 0)->src)
957 {
958 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
959 fprintf (vect_dump, "not vectorized: unsupported outerloop form.");
960 destroy_loop_vec_info (inner_loop_vinfo, true);
961 return NULL;
962 }
963
964 if (vect_print_dump_info (REPORT_DETAILS))
965 fprintf (vect_dump, "Considering outer-loop vectorization.");
966 }
967
968 if (!single_exit (loop)
969 || EDGE_COUNT (loop->header->preds) != 2)
970 {
971 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
972 {
973 if (!single_exit (loop))
974 fprintf (vect_dump, "not vectorized: multiple exits.");
975 else if (EDGE_COUNT (loop->header->preds) != 2)
976 fprintf (vect_dump, "not vectorized: too many incoming edges.");
977 }
978 if (inner_loop_vinfo)
979 destroy_loop_vec_info (inner_loop_vinfo, true);
980 return NULL;
981 }
982
983 /* We assume that the loop exit condition is at the end of the loop. i.e,
984 that the loop is represented as a do-while (with a proper if-guard
985 before the loop if needed), where the loop header contains all the
986 executable statements, and the latch is empty. */
987 if (!empty_block_p (loop->latch)
988 || phi_nodes (loop->latch))
989 {
990 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
991 fprintf (vect_dump, "not vectorized: unexpected loop form.");
992 if (inner_loop_vinfo)
993 destroy_loop_vec_info (inner_loop_vinfo, true);
994 return NULL;
995 }
996
997 /* Make sure there exists a single-predecessor exit bb: */
998 if (!single_pred_p (single_exit (loop)->dest))
999 {
1000 edge e = single_exit (loop);
1001 if (!(e->flags & EDGE_ABNORMAL))
1002 {
1003 split_loop_exit_edge (e);
1004 if (vect_print_dump_info (REPORT_DETAILS))
1005 fprintf (vect_dump, "split exit edge.");
1006 }
1007 else
1008 {
1009 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1010 fprintf (vect_dump, "not vectorized: abnormal loop exit edge.");
1011 if (inner_loop_vinfo)
1012 destroy_loop_vec_info (inner_loop_vinfo, true);
1013 return NULL;
1014 }
1015 }
1016
1017 loop_cond = vect_get_loop_niters (loop, &number_of_iterations);
1018 if (!loop_cond)
1019 {
1020 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1021 fprintf (vect_dump, "not vectorized: complicated exit condition.");
1022 if (inner_loop_vinfo)
1023 destroy_loop_vec_info (inner_loop_vinfo, true);
1024 return NULL;
1025 }
1026
1027 if (!number_of_iterations)
1028 {
1029 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1030 fprintf (vect_dump,
1031 "not vectorized: number of iterations cannot be computed.");
1032 if (inner_loop_vinfo)
1033 destroy_loop_vec_info (inner_loop_vinfo, true);
1034 return NULL;
1035 }
1036
1037 if (chrec_contains_undetermined (number_of_iterations))
1038 {
1039 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1040 fprintf (vect_dump, "Infinite number of iterations.");
1041 if (inner_loop_vinfo)
1042 destroy_loop_vec_info (inner_loop_vinfo, true);
1043 return NULL;
1044 }
1045
1046 if (!NITERS_KNOWN_P (number_of_iterations))
1047 {
1048 if (vect_print_dump_info (REPORT_DETAILS))
1049 {
1050 fprintf (vect_dump, "Symbolic number of iterations is ");
1051 print_generic_expr (vect_dump, number_of_iterations, TDF_DETAILS);
1052 }
1053 }
1054 else if (TREE_INT_CST_LOW (number_of_iterations) == 0)
1055 {
1056 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1057 fprintf (vect_dump, "not vectorized: number of iterations = 0.");
1058 if (inner_loop_vinfo)
1059 destroy_loop_vec_info (inner_loop_vinfo, false);
1060 return NULL;
1061 }
1062
1063 loop_vinfo = new_loop_vec_info (loop);
1064 LOOP_VINFO_NITERS (loop_vinfo) = number_of_iterations;
1065 LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo) = number_of_iterations;
1066
1067 STMT_VINFO_TYPE (vinfo_for_stmt (loop_cond)) = loop_exit_ctrl_vec_info_type;
1068
1069 /* CHECKME: May want to keep it around it in the future. */
1070 if (inner_loop_vinfo)
1071 destroy_loop_vec_info (inner_loop_vinfo, false);
1072
1073 gcc_assert (!loop->aux);
1074 loop->aux = loop_vinfo;
1075 return loop_vinfo;
1076 }
1077
1078
1079 /* Function vect_analyze_loop_operations.
1080
1081 Scan the loop stmts and make sure they are all vectorizable. */
1082
1083 static bool
1084 vect_analyze_loop_operations (loop_vec_info loop_vinfo)
1085 {
1086 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1087 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
1088 int nbbs = loop->num_nodes;
1089 gimple_stmt_iterator si;
1090 unsigned int vectorization_factor = 0;
1091 int i;
1092 gimple phi;
1093 stmt_vec_info stmt_info;
1094 bool need_to_vectorize = false;
1095 int min_profitable_iters;
1096 int min_scalar_loop_bound;
1097 unsigned int th;
1098 bool only_slp_in_loop = true, ok;
1099
1100 if (vect_print_dump_info (REPORT_DETAILS))
1101 fprintf (vect_dump, "=== vect_analyze_loop_operations ===");
1102
1103 gcc_assert (LOOP_VINFO_VECT_FACTOR (loop_vinfo));
1104 vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1105
1106 for (i = 0; i < nbbs; i++)
1107 {
1108 basic_block bb = bbs[i];
1109
1110 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
1111 {
1112 phi = gsi_stmt (si);
1113 ok = true;
1114
1115 stmt_info = vinfo_for_stmt (phi);
1116 if (vect_print_dump_info (REPORT_DETAILS))
1117 {
1118 fprintf (vect_dump, "examining phi: ");
1119 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
1120 }
1121
1122 if (! is_loop_header_bb_p (bb))
1123 {
1124 /* inner-loop loop-closed exit phi in outer-loop vectorization
1125 (i.e. a phi in the tail of the outer-loop).
1126 FORNOW: we currently don't support the case that these phis
1127 are not used in the outerloop (unless it is double reduction,
1128 i.e., this phi is vect_reduction_def), cause this case
1129 requires to actually do something here. */
1130 if ((!STMT_VINFO_RELEVANT_P (stmt_info)
1131 || STMT_VINFO_LIVE_P (stmt_info))
1132 && STMT_VINFO_DEF_TYPE (stmt_info)
1133 != vect_double_reduction_def)
1134 {
1135 if (vect_print_dump_info (REPORT_DETAILS))
1136 fprintf (vect_dump,
1137 "Unsupported loop-closed phi in outer-loop.");
1138 return false;
1139 }
1140 continue;
1141 }
1142
1143 gcc_assert (stmt_info);
1144
1145 if (STMT_VINFO_LIVE_P (stmt_info))
1146 {
1147 /* FORNOW: not yet supported. */
1148 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1149 fprintf (vect_dump, "not vectorized: value used after loop.");
1150 return false;
1151 }
1152
1153 if (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_scope
1154 && STMT_VINFO_DEF_TYPE (stmt_info) != vect_induction_def)
1155 {
1156 /* A scalar-dependence cycle that we don't support. */
1157 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1158 fprintf (vect_dump, "not vectorized: scalar dependence cycle.");
1159 return false;
1160 }
1161
1162 if (STMT_VINFO_RELEVANT_P (stmt_info))
1163 {
1164 need_to_vectorize = true;
1165 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def)
1166 ok = vectorizable_induction (phi, NULL, NULL);
1167 }
1168
1169 if (!ok)
1170 {
1171 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1172 {
1173 fprintf (vect_dump,
1174 "not vectorized: relevant phi not supported: ");
1175 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
1176 }
1177 return false;
1178 }
1179 }
1180
1181 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
1182 {
1183 gimple stmt = gsi_stmt (si);
1184 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1185
1186 gcc_assert (stmt_info);
1187
1188 if (!vect_analyze_stmt (stmt, &need_to_vectorize, NULL))
1189 return false;
1190
1191 if (STMT_VINFO_RELEVANT_P (stmt_info) && !PURE_SLP_STMT (stmt_info))
1192 /* STMT needs both SLP and loop-based vectorization. */
1193 only_slp_in_loop = false;
1194 }
1195 } /* bbs */
1196
1197 /* All operations in the loop are either irrelevant (deal with loop
1198 control, or dead), or only used outside the loop and can be moved
1199 out of the loop (e.g. invariants, inductions). The loop can be
1200 optimized away by scalar optimizations. We're better off not
1201 touching this loop. */
1202 if (!need_to_vectorize)
1203 {
1204 if (vect_print_dump_info (REPORT_DETAILS))
1205 fprintf (vect_dump,
1206 "All the computation can be taken out of the loop.");
1207 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1208 fprintf (vect_dump,
1209 "not vectorized: redundant loop. no profit to vectorize.");
1210 return false;
1211 }
1212
1213 /* If all the stmts in the loop can be SLPed, we perform only SLP, and
1214 vectorization factor of the loop is the unrolling factor required by the
1215 SLP instances. If that unrolling factor is 1, we say, that we perform
1216 pure SLP on loop - cross iteration parallelism is not exploited. */
1217 if (only_slp_in_loop)
1218 vectorization_factor = LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo);
1219 else
1220 vectorization_factor = least_common_multiple (vectorization_factor,
1221 LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo));
1222
1223 LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor;
1224
1225 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1226 && vect_print_dump_info (REPORT_DETAILS))
1227 fprintf (vect_dump,
1228 "vectorization_factor = %d, niters = " HOST_WIDE_INT_PRINT_DEC,
1229 vectorization_factor, LOOP_VINFO_INT_NITERS (loop_vinfo));
1230
1231 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1232 && (LOOP_VINFO_INT_NITERS (loop_vinfo) < vectorization_factor))
1233 {
1234 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1235 fprintf (vect_dump, "not vectorized: iteration count too small.");
1236 if (vect_print_dump_info (REPORT_DETAILS))
1237 fprintf (vect_dump,"not vectorized: iteration count smaller than "
1238 "vectorization factor.");
1239 return false;
1240 }
1241
1242 /* Analyze cost. Decide if worth while to vectorize. */
1243
1244 /* Once VF is set, SLP costs should be updated since the number of created
1245 vector stmts depends on VF. */
1246 vect_update_slp_costs_according_to_vf (loop_vinfo);
1247
1248 min_profitable_iters = vect_estimate_min_profitable_iters (loop_vinfo);
1249 LOOP_VINFO_COST_MODEL_MIN_ITERS (loop_vinfo) = min_profitable_iters;
1250
1251 if (min_profitable_iters < 0)
1252 {
1253 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1254 fprintf (vect_dump, "not vectorized: vectorization not profitable.");
1255 if (vect_print_dump_info (REPORT_DETAILS))
1256 fprintf (vect_dump, "not vectorized: vector version will never be "
1257 "profitable.");
1258 return false;
1259 }
1260
1261 min_scalar_loop_bound = ((PARAM_VALUE (PARAM_MIN_VECT_LOOP_BOUND)
1262 * vectorization_factor) - 1);
1263
1264 /* Use the cost model only if it is more conservative than user specified
1265 threshold. */
1266
1267 th = (unsigned) min_scalar_loop_bound;
1268 if (min_profitable_iters
1269 && (!min_scalar_loop_bound
1270 || min_profitable_iters > min_scalar_loop_bound))
1271 th = (unsigned) min_profitable_iters;
1272
1273 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1274 && LOOP_VINFO_INT_NITERS (loop_vinfo) <= th)
1275 {
1276 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1277 fprintf (vect_dump, "not vectorized: vectorization not "
1278 "profitable.");
1279 if (vect_print_dump_info (REPORT_DETAILS))
1280 fprintf (vect_dump, "not vectorized: iteration count smaller than "
1281 "user specified loop bound parameter or minimum "
1282 "profitable iterations (whichever is more conservative).");
1283 return false;
1284 }
1285
1286 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1287 || LOOP_VINFO_INT_NITERS (loop_vinfo) % vectorization_factor != 0
1288 || LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo))
1289 {
1290 if (vect_print_dump_info (REPORT_DETAILS))
1291 fprintf (vect_dump, "epilog loop required.");
1292 if (!vect_can_advance_ivs_p (loop_vinfo))
1293 {
1294 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1295 fprintf (vect_dump,
1296 "not vectorized: can't create epilog loop 1.");
1297 return false;
1298 }
1299 if (!slpeel_can_duplicate_loop_p (loop, single_exit (loop)))
1300 {
1301 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1302 fprintf (vect_dump,
1303 "not vectorized: can't create epilog loop 2.");
1304 return false;
1305 }
1306 }
1307
1308 return true;
1309 }
1310
1311
1312 /* Function vect_analyze_loop.
1313
1314 Apply a set of analyses on LOOP, and create a loop_vec_info struct
1315 for it. The different analyses will record information in the
1316 loop_vec_info struct. */
1317 loop_vec_info
1318 vect_analyze_loop (struct loop *loop)
1319 {
1320 bool ok;
1321 loop_vec_info loop_vinfo;
1322
1323 if (vect_print_dump_info (REPORT_DETAILS))
1324 fprintf (vect_dump, "===== analyze_loop_nest =====");
1325
1326 if (loop_outer (loop)
1327 && loop_vec_info_for_loop (loop_outer (loop))
1328 && LOOP_VINFO_VECTORIZABLE_P (loop_vec_info_for_loop (loop_outer (loop))))
1329 {
1330 if (vect_print_dump_info (REPORT_DETAILS))
1331 fprintf (vect_dump, "outer-loop already vectorized.");
1332 return NULL;
1333 }
1334
1335 /* Check the CFG characteristics of the loop (nesting, entry/exit, etc. */
1336
1337 loop_vinfo = vect_analyze_loop_form (loop);
1338 if (!loop_vinfo)
1339 {
1340 if (vect_print_dump_info (REPORT_DETAILS))
1341 fprintf (vect_dump, "bad loop form.");
1342 return NULL;
1343 }
1344
1345 /* Find all data references in the loop (which correspond to vdefs/vuses)
1346 and analyze their evolution in the loop.
1347
1348 FORNOW: Handle only simple, array references, which
1349 alignment can be forced, and aligned pointer-references. */
1350
1351 ok = vect_analyze_data_refs (loop_vinfo, NULL);
1352 if (!ok)
1353 {
1354 if (vect_print_dump_info (REPORT_DETAILS))
1355 fprintf (vect_dump, "bad data references.");
1356 destroy_loop_vec_info (loop_vinfo, true);
1357 return NULL;
1358 }
1359
1360 /* Classify all cross-iteration scalar data-flow cycles.
1361 Cross-iteration cycles caused by virtual phis are analyzed separately. */
1362
1363 vect_analyze_scalar_cycles (loop_vinfo);
1364
1365 vect_pattern_recog (loop_vinfo);
1366
1367 /* Data-flow analysis to detect stmts that do not need to be vectorized. */
1368
1369 ok = vect_mark_stmts_to_be_vectorized (loop_vinfo);
1370 if (!ok)
1371 {
1372 if (vect_print_dump_info (REPORT_DETAILS))
1373 fprintf (vect_dump, "unexpected pattern.");
1374 destroy_loop_vec_info (loop_vinfo, true);
1375 return NULL;
1376 }
1377
1378 /* Analyze the alignment of the data-refs in the loop.
1379 Fail if a data reference is found that cannot be vectorized. */
1380
1381 ok = vect_analyze_data_refs_alignment (loop_vinfo, NULL);
1382 if (!ok)
1383 {
1384 if (vect_print_dump_info (REPORT_DETAILS))
1385 fprintf (vect_dump, "bad data alignment.");
1386 destroy_loop_vec_info (loop_vinfo, true);
1387 return NULL;
1388 }
1389
1390 ok = vect_determine_vectorization_factor (loop_vinfo);
1391 if (!ok)
1392 {
1393 if (vect_print_dump_info (REPORT_DETAILS))
1394 fprintf (vect_dump, "can't determine vectorization factor.");
1395 destroy_loop_vec_info (loop_vinfo, true);
1396 return NULL;
1397 }
1398
1399 /* Analyze data dependences between the data-refs in the loop.
1400 FORNOW: fail at the first data dependence that we encounter. */
1401
1402 ok = vect_analyze_data_ref_dependences (loop_vinfo, NULL);
1403 if (!ok)
1404 {
1405 if (vect_print_dump_info (REPORT_DETAILS))
1406 fprintf (vect_dump, "bad data dependence.");
1407 destroy_loop_vec_info (loop_vinfo, true);
1408 return NULL;
1409 }
1410
1411 /* Analyze the access patterns of the data-refs in the loop (consecutive,
1412 complex, etc.). FORNOW: Only handle consecutive access pattern. */
1413
1414 ok = vect_analyze_data_ref_accesses (loop_vinfo, NULL);
1415 if (!ok)
1416 {
1417 if (vect_print_dump_info (REPORT_DETAILS))
1418 fprintf (vect_dump, "bad data access.");
1419 destroy_loop_vec_info (loop_vinfo, true);
1420 return NULL;
1421 }
1422
1423 /* Prune the list of ddrs to be tested at run-time by versioning for alias.
1424 It is important to call pruning after vect_analyze_data_ref_accesses,
1425 since we use grouping information gathered by interleaving analysis. */
1426 ok = vect_prune_runtime_alias_test_list (loop_vinfo);
1427 if (!ok)
1428 {
1429 if (vect_print_dump_info (REPORT_DETAILS))
1430 fprintf (vect_dump, "too long list of versioning for alias "
1431 "run-time tests.");
1432 destroy_loop_vec_info (loop_vinfo, true);
1433 return NULL;
1434 }
1435
1436 /* Check the SLP opportunities in the loop, analyze and build SLP trees. */
1437 ok = vect_analyze_slp (loop_vinfo, NULL);
1438 if (ok)
1439 {
1440 /* Decide which possible SLP instances to SLP. */
1441 vect_make_slp_decision (loop_vinfo);
1442
1443 /* Find stmts that need to be both vectorized and SLPed. */
1444 vect_detect_hybrid_slp (loop_vinfo);
1445 }
1446
1447 /* This pass will decide on using loop versioning and/or loop peeling in
1448 order to enhance the alignment of data references in the loop. */
1449
1450 ok = vect_enhance_data_refs_alignment (loop_vinfo);
1451 if (!ok)
1452 {
1453 if (vect_print_dump_info (REPORT_DETAILS))
1454 fprintf (vect_dump, "bad data alignment.");
1455 destroy_loop_vec_info (loop_vinfo, true);
1456 return NULL;
1457 }
1458
1459 /* Scan all the operations in the loop and make sure they are
1460 vectorizable. */
1461
1462 ok = vect_analyze_loop_operations (loop_vinfo);
1463 if (!ok)
1464 {
1465 if (vect_print_dump_info (REPORT_DETAILS))
1466 fprintf (vect_dump, "bad operation or unsupported loop bound.");
1467 destroy_loop_vec_info (loop_vinfo, true);
1468 return NULL;
1469 }
1470
1471 LOOP_VINFO_VECTORIZABLE_P (loop_vinfo) = 1;
1472
1473 return loop_vinfo;
1474 }
1475
1476
1477 /* Function reduction_code_for_scalar_code
1478
1479 Input:
1480 CODE - tree_code of a reduction operations.
1481
1482 Output:
1483 REDUC_CODE - the corresponding tree-code to be used to reduce the
1484 vector of partial results into a single scalar result (which
1485 will also reside in a vector) or ERROR_MARK if the operation is
1486 a supported reduction operation, but does not have such tree-code.
1487
1488 Return FALSE if CODE currently cannot be vectorized as reduction. */
1489
1490 static bool
1491 reduction_code_for_scalar_code (enum tree_code code,
1492 enum tree_code *reduc_code)
1493 {
1494 switch (code)
1495 {
1496 case MAX_EXPR:
1497 *reduc_code = REDUC_MAX_EXPR;
1498 return true;
1499
1500 case MIN_EXPR:
1501 *reduc_code = REDUC_MIN_EXPR;
1502 return true;
1503
1504 case PLUS_EXPR:
1505 *reduc_code = REDUC_PLUS_EXPR;
1506 return true;
1507
1508 case MULT_EXPR:
1509 case MINUS_EXPR:
1510 case BIT_IOR_EXPR:
1511 case BIT_XOR_EXPR:
1512 case BIT_AND_EXPR:
1513 *reduc_code = ERROR_MARK;
1514 return true;
1515
1516 default:
1517 return false;
1518 }
1519 }
1520
1521
1522 /* Error reporting helper for vect_is_simple_reduction below. GIMPLE statement
1523 STMT is printed with a message MSG. */
1524
1525 static void
1526 report_vect_op (gimple stmt, const char *msg)
1527 {
1528 fprintf (vect_dump, "%s", msg);
1529 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
1530 }
1531
1532
1533 /* Function vect_is_simple_reduction
1534
1535 (1) Detect a cross-iteration def-use cycle that represents a simple
1536 reduction computation. We look for the following pattern:
1537
1538 loop_header:
1539 a1 = phi < a0, a2 >
1540 a3 = ...
1541 a2 = operation (a3, a1)
1542
1543 such that:
1544 1. operation is commutative and associative and it is safe to
1545 change the order of the computation (if CHECK_REDUCTION is true)
1546 2. no uses for a2 in the loop (a2 is used out of the loop)
1547 3. no uses of a1 in the loop besides the reduction operation.
1548
1549 Condition 1 is tested here.
1550 Conditions 2,3 are tested in vect_mark_stmts_to_be_vectorized.
1551
1552 (2) Detect a cross-iteration def-use cycle in nested loops, i.e.,
1553 nested cycles, if CHECK_REDUCTION is false.
1554
1555 (3) Detect cycles of phi nodes in outer-loop vectorization, i.e., double
1556 reductions:
1557
1558 a1 = phi < a0, a2 >
1559 inner loop (def of a3)
1560 a2 = phi < a3 >
1561 */
1562
1563 gimple
1564 vect_is_simple_reduction (loop_vec_info loop_info, gimple phi,
1565 bool check_reduction, bool *double_reduc)
1566 {
1567 struct loop *loop = (gimple_bb (phi))->loop_father;
1568 struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
1569 edge latch_e = loop_latch_edge (loop);
1570 tree loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e);
1571 gimple def_stmt, def1 = NULL, def2 = NULL;
1572 enum tree_code code;
1573 tree op1, op2, op3 = NULL_TREE, op4 = NULL_TREE;
1574 tree type;
1575 int nloop_uses;
1576 tree name;
1577 imm_use_iterator imm_iter;
1578 use_operand_p use_p;
1579 bool phi_def;
1580
1581 *double_reduc = false;
1582
1583 /* If CHECK_REDUCTION is true, we assume inner-most loop vectorization,
1584 otherwise, we assume outer loop vectorization. */
1585 gcc_assert ((check_reduction && loop == vect_loop)
1586 || (!check_reduction && flow_loop_nested_p (vect_loop, loop)));
1587
1588 name = PHI_RESULT (phi);
1589 nloop_uses = 0;
1590 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, name)
1591 {
1592 gimple use_stmt = USE_STMT (use_p);
1593 if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt))
1594 && vinfo_for_stmt (use_stmt)
1595 && !is_pattern_stmt_p (vinfo_for_stmt (use_stmt)))
1596 nloop_uses++;
1597 if (nloop_uses > 1)
1598 {
1599 if (vect_print_dump_info (REPORT_DETAILS))
1600 fprintf (vect_dump, "reduction used in loop.");
1601 return NULL;
1602 }
1603 }
1604
1605 if (TREE_CODE (loop_arg) != SSA_NAME)
1606 {
1607 if (vect_print_dump_info (REPORT_DETAILS))
1608 {
1609 fprintf (vect_dump, "reduction: not ssa_name: ");
1610 print_generic_expr (vect_dump, loop_arg, TDF_SLIM);
1611 }
1612 return NULL;
1613 }
1614
1615 def_stmt = SSA_NAME_DEF_STMT (loop_arg);
1616 if (!def_stmt)
1617 {
1618 if (vect_print_dump_info (REPORT_DETAILS))
1619 fprintf (vect_dump, "reduction: no def_stmt.");
1620 return NULL;
1621 }
1622
1623 if (!is_gimple_assign (def_stmt) && gimple_code (def_stmt) != GIMPLE_PHI)
1624 {
1625 if (vect_print_dump_info (REPORT_DETAILS))
1626 print_gimple_stmt (vect_dump, def_stmt, 0, TDF_SLIM);
1627 return NULL;
1628 }
1629
1630 if (is_gimple_assign (def_stmt))
1631 {
1632 name = gimple_assign_lhs (def_stmt);
1633 phi_def = false;
1634 }
1635 else
1636 {
1637 name = PHI_RESULT (def_stmt);
1638 phi_def = true;
1639 }
1640
1641 nloop_uses = 0;
1642 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, name)
1643 {
1644 gimple use_stmt = USE_STMT (use_p);
1645 if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt))
1646 && vinfo_for_stmt (use_stmt)
1647 && !is_pattern_stmt_p (vinfo_for_stmt (use_stmt)))
1648 nloop_uses++;
1649 if (nloop_uses > 1)
1650 {
1651 if (vect_print_dump_info (REPORT_DETAILS))
1652 fprintf (vect_dump, "reduction used in loop.");
1653 return NULL;
1654 }
1655 }
1656
1657 /* If DEF_STMT is a phi node itself, we expect it to have a single argument
1658 defined in the inner loop. */
1659 if (phi_def)
1660 {
1661 op1 = PHI_ARG_DEF (def_stmt, 0);
1662
1663 if (gimple_phi_num_args (def_stmt) != 1
1664 || TREE_CODE (op1) != SSA_NAME)
1665 {
1666 if (vect_print_dump_info (REPORT_DETAILS))
1667 fprintf (vect_dump, "unsupported phi node definition.");
1668
1669 return NULL;
1670 }
1671
1672 def1 = SSA_NAME_DEF_STMT (op1);
1673 if (flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
1674 && loop->inner
1675 && flow_bb_inside_loop_p (loop->inner, gimple_bb (def1))
1676 && is_gimple_assign (def1))
1677 {
1678 if (vect_print_dump_info (REPORT_DETAILS))
1679 report_vect_op (def_stmt, "detected double reduction: ");
1680
1681 *double_reduc = true;
1682 return def_stmt;
1683 }
1684
1685 return NULL;
1686 }
1687
1688 code = gimple_assign_rhs_code (def_stmt);
1689
1690 if (check_reduction
1691 && (!commutative_tree_code (code) || !associative_tree_code (code)))
1692 {
1693 if (vect_print_dump_info (REPORT_DETAILS))
1694 report_vect_op (def_stmt, "reduction: not commutative/associative: ");
1695 return NULL;
1696 }
1697
1698 if (get_gimple_rhs_class (code) != GIMPLE_BINARY_RHS)
1699 {
1700 if (code != COND_EXPR)
1701 {
1702 if (vect_print_dump_info (REPORT_DETAILS))
1703 report_vect_op (def_stmt, "reduction: not binary operation: ");
1704
1705 return NULL;
1706 }
1707
1708 op3 = TREE_OPERAND (gimple_assign_rhs1 (def_stmt), 0);
1709 if (COMPARISON_CLASS_P (op3))
1710 {
1711 op4 = TREE_OPERAND (op3, 1);
1712 op3 = TREE_OPERAND (op3, 0);
1713 }
1714
1715 op1 = TREE_OPERAND (gimple_assign_rhs1 (def_stmt), 1);
1716 op2 = TREE_OPERAND (gimple_assign_rhs1 (def_stmt), 2);
1717
1718 if (TREE_CODE (op1) != SSA_NAME && TREE_CODE (op2) != SSA_NAME)
1719 {
1720 if (vect_print_dump_info (REPORT_DETAILS))
1721 report_vect_op (def_stmt, "reduction: uses not ssa_names: ");
1722
1723 return NULL;
1724 }
1725 }
1726 else
1727 {
1728 op1 = gimple_assign_rhs1 (def_stmt);
1729 op2 = gimple_assign_rhs2 (def_stmt);
1730
1731 if (TREE_CODE (op1) != SSA_NAME || TREE_CODE (op2) != SSA_NAME)
1732 {
1733 if (vect_print_dump_info (REPORT_DETAILS))
1734 report_vect_op (def_stmt, "reduction: uses not ssa_names: ");
1735
1736 return NULL;
1737 }
1738 }
1739
1740 type = TREE_TYPE (gimple_assign_lhs (def_stmt));
1741 if ((TREE_CODE (op1) == SSA_NAME
1742 && !types_compatible_p (type,TREE_TYPE (op1)))
1743 || (TREE_CODE (op2) == SSA_NAME
1744 && !types_compatible_p (type, TREE_TYPE (op2)))
1745 || (op3 && TREE_CODE (op3) == SSA_NAME
1746 && !types_compatible_p (type, TREE_TYPE (op3)))
1747 || (op4 && TREE_CODE (op4) == SSA_NAME
1748 && !types_compatible_p (type, TREE_TYPE (op4))))
1749 {
1750 if (vect_print_dump_info (REPORT_DETAILS))
1751 {
1752 fprintf (vect_dump, "reduction: multiple types: operation type: ");
1753 print_generic_expr (vect_dump, type, TDF_SLIM);
1754 fprintf (vect_dump, ", operands types: ");
1755 print_generic_expr (vect_dump, TREE_TYPE (op1), TDF_SLIM);
1756 fprintf (vect_dump, ",");
1757 print_generic_expr (vect_dump, TREE_TYPE (op2), TDF_SLIM);
1758 if (op3)
1759 {
1760 fprintf (vect_dump, ",");
1761 print_generic_expr (vect_dump, TREE_TYPE (op3), TDF_SLIM);
1762 }
1763
1764 if (op4)
1765 {
1766 fprintf (vect_dump, ",");
1767 print_generic_expr (vect_dump, TREE_TYPE (op4), TDF_SLIM);
1768 }
1769 }
1770
1771 return NULL;
1772 }
1773
1774 /* Check that it's ok to change the order of the computation.
1775 Generally, when vectorizing a reduction we change the order of the
1776 computation. This may change the behavior of the program in some
1777 cases, so we need to check that this is ok. One exception is when
1778 vectorizing an outer-loop: the inner-loop is executed sequentially,
1779 and therefore vectorizing reductions in the inner-loop during
1780 outer-loop vectorization is safe. */
1781
1782 /* CHECKME: check for !flag_finite_math_only too? */
1783 if (SCALAR_FLOAT_TYPE_P (type) && !flag_associative_math
1784 && check_reduction)
1785 {
1786 /* Changing the order of operations changes the semantics. */
1787 if (vect_print_dump_info (REPORT_DETAILS))
1788 report_vect_op (def_stmt, "reduction: unsafe fp math optimization: ");
1789 return NULL;
1790 }
1791 else if (INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_TRAPS (type)
1792 && check_reduction)
1793 {
1794 /* Changing the order of operations changes the semantics. */
1795 if (vect_print_dump_info (REPORT_DETAILS))
1796 report_vect_op (def_stmt, "reduction: unsafe int math optimization: ");
1797 return NULL;
1798 }
1799 else if (SAT_FIXED_POINT_TYPE_P (type) && check_reduction)
1800 {
1801 /* Changing the order of operations changes the semantics. */
1802 if (vect_print_dump_info (REPORT_DETAILS))
1803 report_vect_op (def_stmt,
1804 "reduction: unsafe fixed-point math optimization: ");
1805 return NULL;
1806 }
1807
1808 /* Reduction is safe. We're dealing with one of the following:
1809 1) integer arithmetic and no trapv
1810 2) floating point arithmetic, and special flags permit this optimization
1811 3) nested cycle (i.e., outer loop vectorization). */
1812 if (TREE_CODE (op1) == SSA_NAME)
1813 def1 = SSA_NAME_DEF_STMT (op1);
1814
1815 if (TREE_CODE (op2) == SSA_NAME)
1816 def2 = SSA_NAME_DEF_STMT (op2);
1817
1818 if (code != COND_EXPR
1819 && (!def1 || !def2 || gimple_nop_p (def1) || gimple_nop_p (def2)))
1820 {
1821 if (vect_print_dump_info (REPORT_DETAILS))
1822 report_vect_op (def_stmt, "reduction: no defs for operands: ");
1823 return NULL;
1824 }
1825
1826 /* Check that one def is the reduction def, defined by PHI,
1827 the other def is either defined in the loop ("vect_internal_def"),
1828 or it's an induction (defined by a loop-header phi-node). */
1829
1830 if (def2 && def2 == phi
1831 && (code == COND_EXPR
1832 || (def1 && flow_bb_inside_loop_p (loop, gimple_bb (def1))
1833 && (is_gimple_assign (def1)
1834 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def1))
1835 == vect_induction_def
1836 || (gimple_code (def1) == GIMPLE_PHI
1837 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def1))
1838 == vect_internal_def
1839 && !is_loop_header_bb_p (gimple_bb (def1)))))))
1840 {
1841 if (vect_print_dump_info (REPORT_DETAILS))
1842 report_vect_op (def_stmt, "detected reduction: ");
1843 return def_stmt;
1844 }
1845 else if (def1 && def1 == phi
1846 && (code == COND_EXPR
1847 || (def2 && flow_bb_inside_loop_p (loop, gimple_bb (def2))
1848 && (is_gimple_assign (def2)
1849 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def2))
1850 == vect_induction_def
1851 || (gimple_code (def2) == GIMPLE_PHI
1852 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def2))
1853 == vect_internal_def
1854 && !is_loop_header_bb_p (gimple_bb (def2)))))))
1855 {
1856 if (check_reduction)
1857 {
1858 /* Swap operands (just for simplicity - so that the rest of the code
1859 can assume that the reduction variable is always the last (second)
1860 argument). */
1861 if (vect_print_dump_info (REPORT_DETAILS))
1862 report_vect_op (def_stmt,
1863 "detected reduction: need to swap operands: ");
1864
1865 swap_tree_operands (def_stmt, gimple_assign_rhs1_ptr (def_stmt),
1866 gimple_assign_rhs2_ptr (def_stmt));
1867 }
1868 else
1869 {
1870 if (vect_print_dump_info (REPORT_DETAILS))
1871 report_vect_op (def_stmt, "detected reduction: ");
1872 }
1873
1874 return def_stmt;
1875 }
1876 else
1877 {
1878 if (vect_print_dump_info (REPORT_DETAILS))
1879 report_vect_op (def_stmt, "reduction: unknown pattern: ");
1880
1881 return NULL;
1882 }
1883 }
1884
1885
1886 /* Function vect_estimate_min_profitable_iters
1887
1888 Return the number of iterations required for the vector version of the
1889 loop to be profitable relative to the cost of the scalar version of the
1890 loop.
1891
1892 TODO: Take profile info into account before making vectorization
1893 decisions, if available. */
1894
1895 int
1896 vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo)
1897 {
1898 int i;
1899 int min_profitable_iters;
1900 int peel_iters_prologue;
1901 int peel_iters_epilogue;
1902 int vec_inside_cost = 0;
1903 int vec_outside_cost = 0;
1904 int scalar_single_iter_cost = 0;
1905 int scalar_outside_cost = 0;
1906 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1907 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1908 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
1909 int nbbs = loop->num_nodes;
1910 int byte_misalign = LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo);
1911 int peel_guard_costs = 0;
1912 int innerloop_iters = 0, factor;
1913 VEC (slp_instance, heap) *slp_instances;
1914 slp_instance instance;
1915
1916 /* Cost model disabled. */
1917 if (!flag_vect_cost_model)
1918 {
1919 if (vect_print_dump_info (REPORT_COST))
1920 fprintf (vect_dump, "cost model disabled.");
1921 return 0;
1922 }
1923
1924 /* Requires loop versioning tests to handle misalignment. */
1925 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo))
1926 {
1927 /* FIXME: Make cost depend on complexity of individual check. */
1928 vec_outside_cost +=
1929 VEC_length (gimple, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo));
1930 if (vect_print_dump_info (REPORT_COST))
1931 fprintf (vect_dump, "cost model: Adding cost of checks for loop "
1932 "versioning to treat misalignment.\n");
1933 }
1934
1935 /* Requires loop versioning with alias checks. */
1936 if (LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
1937 {
1938 /* FIXME: Make cost depend on complexity of individual check. */
1939 vec_outside_cost +=
1940 VEC_length (ddr_p, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo));
1941 if (vect_print_dump_info (REPORT_COST))
1942 fprintf (vect_dump, "cost model: Adding cost of checks for loop "
1943 "versioning aliasing.\n");
1944 }
1945
1946 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
1947 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
1948 vec_outside_cost += TARG_COND_TAKEN_BRANCH_COST;
1949
1950 /* Count statements in scalar loop. Using this as scalar cost for a single
1951 iteration for now.
1952
1953 TODO: Add outer loop support.
1954
1955 TODO: Consider assigning different costs to different scalar
1956 statements. */
1957
1958 /* FORNOW. */
1959 if (loop->inner)
1960 innerloop_iters = 50; /* FIXME */
1961
1962 for (i = 0; i < nbbs; i++)
1963 {
1964 gimple_stmt_iterator si;
1965 basic_block bb = bbs[i];
1966
1967 if (bb->loop_father == loop->inner)
1968 factor = innerloop_iters;
1969 else
1970 factor = 1;
1971
1972 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
1973 {
1974 gimple stmt = gsi_stmt (si);
1975 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1976 /* Skip stmts that are not vectorized inside the loop. */
1977 if (!STMT_VINFO_RELEVANT_P (stmt_info)
1978 && (!STMT_VINFO_LIVE_P (stmt_info)
1979 || STMT_VINFO_DEF_TYPE (stmt_info) != vect_reduction_def))
1980 continue;
1981 scalar_single_iter_cost += cost_for_stmt (stmt) * factor;
1982 vec_inside_cost += STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info) * factor;
1983 /* FIXME: for stmts in the inner-loop in outer-loop vectorization,
1984 some of the "outside" costs are generated inside the outer-loop. */
1985 vec_outside_cost += STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info);
1986 }
1987 }
1988
1989 /* Add additional cost for the peeled instructions in prologue and epilogue
1990 loop.
1991
1992 FORNOW: If we don't know the value of peel_iters for prologue or epilogue
1993 at compile-time - we assume it's vf/2 (the worst would be vf-1).
1994
1995 TODO: Build an expression that represents peel_iters for prologue and
1996 epilogue to be used in a run-time test. */
1997
1998 if (byte_misalign < 0)
1999 {
2000 peel_iters_prologue = vf/2;
2001 if (vect_print_dump_info (REPORT_COST))
2002 fprintf (vect_dump, "cost model: "
2003 "prologue peel iters set to vf/2.");
2004
2005 /* If peeling for alignment is unknown, loop bound of main loop becomes
2006 unknown. */
2007 peel_iters_epilogue = vf/2;
2008 if (vect_print_dump_info (REPORT_COST))
2009 fprintf (vect_dump, "cost model: "
2010 "epilogue peel iters set to vf/2 because "
2011 "peeling for alignment is unknown .");
2012
2013 /* If peeled iterations are unknown, count a taken branch and a not taken
2014 branch per peeled loop. Even if scalar loop iterations are known,
2015 vector iterations are not known since peeled prologue iterations are
2016 not known. Hence guards remain the same. */
2017 peel_guard_costs += 2 * (TARG_COND_TAKEN_BRANCH_COST
2018 + TARG_COND_NOT_TAKEN_BRANCH_COST);
2019 }
2020 else
2021 {
2022 if (byte_misalign)
2023 {
2024 struct data_reference *dr = LOOP_VINFO_UNALIGNED_DR (loop_vinfo);
2025 int element_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr))));
2026 tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (DR_STMT (dr)));
2027 int nelements = TYPE_VECTOR_SUBPARTS (vectype);
2028
2029 peel_iters_prologue = nelements - (byte_misalign / element_size);
2030 }
2031 else
2032 peel_iters_prologue = 0;
2033
2034 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
2035 {
2036 peel_iters_epilogue = vf/2;
2037 if (vect_print_dump_info (REPORT_COST))
2038 fprintf (vect_dump, "cost model: "
2039 "epilogue peel iters set to vf/2 because "
2040 "loop iterations are unknown .");
2041
2042 /* If peeled iterations are known but number of scalar loop
2043 iterations are unknown, count a taken branch per peeled loop. */
2044 peel_guard_costs += 2 * TARG_COND_TAKEN_BRANCH_COST;
2045
2046 }
2047 else
2048 {
2049 int niters = LOOP_VINFO_INT_NITERS (loop_vinfo);
2050 peel_iters_prologue = niters < peel_iters_prologue ?
2051 niters : peel_iters_prologue;
2052 peel_iters_epilogue = (niters - peel_iters_prologue) % vf;
2053 }
2054 }
2055
2056 vec_outside_cost += (peel_iters_prologue * scalar_single_iter_cost)
2057 + (peel_iters_epilogue * scalar_single_iter_cost)
2058 + peel_guard_costs;
2059
2060 /* FORNOW: The scalar outside cost is incremented in one of the
2061 following ways:
2062
2063 1. The vectorizer checks for alignment and aliasing and generates
2064 a condition that allows dynamic vectorization. A cost model
2065 check is ANDED with the versioning condition. Hence scalar code
2066 path now has the added cost of the versioning check.
2067
2068 if (cost > th & versioning_check)
2069 jmp to vector code
2070
2071 Hence run-time scalar is incremented by not-taken branch cost.
2072
2073 2. The vectorizer then checks if a prologue is required. If the
2074 cost model check was not done before during versioning, it has to
2075 be done before the prologue check.
2076
2077 if (cost <= th)
2078 prologue = scalar_iters
2079 if (prologue == 0)
2080 jmp to vector code
2081 else
2082 execute prologue
2083 if (prologue == num_iters)
2084 go to exit
2085
2086 Hence the run-time scalar cost is incremented by a taken branch,
2087 plus a not-taken branch, plus a taken branch cost.
2088
2089 3. The vectorizer then checks if an epilogue is required. If the
2090 cost model check was not done before during prologue check, it
2091 has to be done with the epilogue check.
2092
2093 if (prologue == 0)
2094 jmp to vector code
2095 else
2096 execute prologue
2097 if (prologue == num_iters)
2098 go to exit
2099 vector code:
2100 if ((cost <= th) | (scalar_iters-prologue-epilogue == 0))
2101 jmp to epilogue
2102
2103 Hence the run-time scalar cost should be incremented by 2 taken
2104 branches.
2105
2106 TODO: The back end may reorder the BBS's differently and reverse
2107 conditions/branch directions. Change the estimates below to
2108 something more reasonable. */
2109
2110 /* If the number of iterations is known and we do not do versioning, we can
2111 decide whether to vectorize at compile time. Hence the scalar version
2112 do not carry cost model guard costs. */
2113 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
2114 || LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
2115 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
2116 {
2117 /* Cost model check occurs at versioning. */
2118 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
2119 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
2120 scalar_outside_cost += TARG_COND_NOT_TAKEN_BRANCH_COST;
2121 else
2122 {
2123 /* Cost model check occurs at prologue generation. */
2124 if (LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo) < 0)
2125 scalar_outside_cost += 2 * TARG_COND_TAKEN_BRANCH_COST
2126 + TARG_COND_NOT_TAKEN_BRANCH_COST;
2127 /* Cost model check occurs at epilogue generation. */
2128 else
2129 scalar_outside_cost += 2 * TARG_COND_TAKEN_BRANCH_COST;
2130 }
2131 }
2132
2133 /* Add SLP costs. */
2134 slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
2135 for (i = 0; VEC_iterate (slp_instance, slp_instances, i, instance); i++)
2136 {
2137 vec_outside_cost += SLP_INSTANCE_OUTSIDE_OF_LOOP_COST (instance);
2138 vec_inside_cost += SLP_INSTANCE_INSIDE_OF_LOOP_COST (instance);
2139 }
2140
2141 /* Calculate number of iterations required to make the vector version
2142 profitable, relative to the loop bodies only. The following condition
2143 must hold true:
2144 SIC * niters + SOC > VIC * ((niters-PL_ITERS-EP_ITERS)/VF) + VOC
2145 where
2146 SIC = scalar iteration cost, VIC = vector iteration cost,
2147 VOC = vector outside cost, VF = vectorization factor,
2148 PL_ITERS = prologue iterations, EP_ITERS= epilogue iterations
2149 SOC = scalar outside cost for run time cost model check. */
2150
2151 if ((scalar_single_iter_cost * vf) > vec_inside_cost)
2152 {
2153 if (vec_outside_cost <= 0)
2154 min_profitable_iters = 1;
2155 else
2156 {
2157 min_profitable_iters = ((vec_outside_cost - scalar_outside_cost) * vf
2158 - vec_inside_cost * peel_iters_prologue
2159 - vec_inside_cost * peel_iters_epilogue)
2160 / ((scalar_single_iter_cost * vf)
2161 - vec_inside_cost);
2162
2163 if ((scalar_single_iter_cost * vf * min_profitable_iters)
2164 <= ((vec_inside_cost * min_profitable_iters)
2165 + ((vec_outside_cost - scalar_outside_cost) * vf)))
2166 min_profitable_iters++;
2167 }
2168 }
2169 /* vector version will never be profitable. */
2170 else
2171 {
2172 if (vect_print_dump_info (REPORT_COST))
2173 fprintf (vect_dump, "cost model: vector iteration cost = %d "
2174 "is divisible by scalar iteration cost = %d by a factor "
2175 "greater than or equal to the vectorization factor = %d .",
2176 vec_inside_cost, scalar_single_iter_cost, vf);
2177 return -1;
2178 }
2179
2180 if (vect_print_dump_info (REPORT_COST))
2181 {
2182 fprintf (vect_dump, "Cost model analysis: \n");
2183 fprintf (vect_dump, " Vector inside of loop cost: %d\n",
2184 vec_inside_cost);
2185 fprintf (vect_dump, " Vector outside of loop cost: %d\n",
2186 vec_outside_cost);
2187 fprintf (vect_dump, " Scalar iteration cost: %d\n",
2188 scalar_single_iter_cost);
2189 fprintf (vect_dump, " Scalar outside cost: %d\n", scalar_outside_cost);
2190 fprintf (vect_dump, " prologue iterations: %d\n",
2191 peel_iters_prologue);
2192 fprintf (vect_dump, " epilogue iterations: %d\n",
2193 peel_iters_epilogue);
2194 fprintf (vect_dump, " Calculated minimum iters for profitability: %d\n",
2195 min_profitable_iters);
2196 }
2197
2198 min_profitable_iters =
2199 min_profitable_iters < vf ? vf : min_profitable_iters;
2200
2201 /* Because the condition we create is:
2202 if (niters <= min_profitable_iters)
2203 then skip the vectorized loop. */
2204 min_profitable_iters--;
2205
2206 if (vect_print_dump_info (REPORT_COST))
2207 fprintf (vect_dump, " Profitability threshold = %d\n",
2208 min_profitable_iters);
2209
2210 return min_profitable_iters;
2211 }
2212
2213
2214 /* TODO: Close dependency between vect_model_*_cost and vectorizable_*
2215 functions. Design better to avoid maintenance issues. */
2216
2217 /* Function vect_model_reduction_cost.
2218
2219 Models cost for a reduction operation, including the vector ops
2220 generated within the strip-mine loop, the initial definition before
2221 the loop, and the epilogue code that must be generated. */
2222
2223 static bool
2224 vect_model_reduction_cost (stmt_vec_info stmt_info, enum tree_code reduc_code,
2225 int ncopies)
2226 {
2227 int outer_cost = 0;
2228 enum tree_code code;
2229 optab optab;
2230 tree vectype;
2231 gimple stmt, orig_stmt;
2232 tree reduction_op;
2233 enum machine_mode mode;
2234 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2235 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2236
2237
2238 /* Cost of reduction op inside loop. */
2239 STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info) += ncopies * TARG_VEC_STMT_COST;
2240
2241 stmt = STMT_VINFO_STMT (stmt_info);
2242
2243 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)))
2244 {
2245 case GIMPLE_SINGLE_RHS:
2246 gcc_assert (TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt)) == ternary_op);
2247 reduction_op = TREE_OPERAND (gimple_assign_rhs1 (stmt), 2);
2248 break;
2249 case GIMPLE_UNARY_RHS:
2250 reduction_op = gimple_assign_rhs1 (stmt);
2251 break;
2252 case GIMPLE_BINARY_RHS:
2253 reduction_op = gimple_assign_rhs2 (stmt);
2254 break;
2255 default:
2256 gcc_unreachable ();
2257 }
2258
2259 vectype = get_vectype_for_scalar_type (TREE_TYPE (reduction_op));
2260 if (!vectype)
2261 {
2262 if (vect_print_dump_info (REPORT_COST))
2263 {
2264 fprintf (vect_dump, "unsupported data-type ");
2265 print_generic_expr (vect_dump, TREE_TYPE (reduction_op), TDF_SLIM);
2266 }
2267 return false;
2268 }
2269
2270 mode = TYPE_MODE (vectype);
2271 orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
2272
2273 if (!orig_stmt)
2274 orig_stmt = STMT_VINFO_STMT (stmt_info);
2275
2276 code = gimple_assign_rhs_code (orig_stmt);
2277
2278 /* Add in cost for initial definition. */
2279 outer_cost += TARG_SCALAR_TO_VEC_COST;
2280
2281 /* Determine cost of epilogue code.
2282
2283 We have a reduction operator that will reduce the vector in one statement.
2284 Also requires scalar extract. */
2285
2286 if (!nested_in_vect_loop_p (loop, orig_stmt))
2287 {
2288 if (reduc_code != ERROR_MARK)
2289 outer_cost += TARG_VEC_STMT_COST + TARG_VEC_TO_SCALAR_COST;
2290 else
2291 {
2292 int vec_size_in_bits = tree_low_cst (TYPE_SIZE (vectype), 1);
2293 tree bitsize =
2294 TYPE_SIZE (TREE_TYPE (gimple_assign_lhs (orig_stmt)));
2295 int element_bitsize = tree_low_cst (bitsize, 1);
2296 int nelements = vec_size_in_bits / element_bitsize;
2297
2298 optab = optab_for_tree_code (code, vectype, optab_default);
2299
2300 /* We have a whole vector shift available. */
2301 if (VECTOR_MODE_P (mode)
2302 && optab_handler (optab, mode)->insn_code != CODE_FOR_nothing
2303 && optab_handler (vec_shr_optab, mode)->insn_code != CODE_FOR_nothing)
2304 /* Final reduction via vector shifts and the reduction operator. Also
2305 requires scalar extract. */
2306 outer_cost += ((exact_log2(nelements) * 2) * TARG_VEC_STMT_COST
2307 + TARG_VEC_TO_SCALAR_COST);
2308 else
2309 /* Use extracts and reduction op for final reduction. For N elements,
2310 we have N extracts and N-1 reduction ops. */
2311 outer_cost += ((nelements + nelements - 1) * TARG_VEC_STMT_COST);
2312 }
2313 }
2314
2315 STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info) = outer_cost;
2316
2317 if (vect_print_dump_info (REPORT_COST))
2318 fprintf (vect_dump, "vect_model_reduction_cost: inside_cost = %d, "
2319 "outside_cost = %d .", STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info),
2320 STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info));
2321
2322 return true;
2323 }
2324
2325
2326 /* Function vect_model_induction_cost.
2327
2328 Models cost for induction operations. */
2329
2330 static void
2331 vect_model_induction_cost (stmt_vec_info stmt_info, int ncopies)
2332 {
2333 /* loop cost for vec_loop. */
2334 STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info) = ncopies * TARG_VEC_STMT_COST;
2335 /* prologue cost for vec_init and vec_step. */
2336 STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info) = 2 * TARG_SCALAR_TO_VEC_COST;
2337
2338 if (vect_print_dump_info (REPORT_COST))
2339 fprintf (vect_dump, "vect_model_induction_cost: inside_cost = %d, "
2340 "outside_cost = %d .", STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info),
2341 STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info));
2342 }
2343
2344
2345 /* Function get_initial_def_for_induction
2346
2347 Input:
2348 STMT - a stmt that performs an induction operation in the loop.
2349 IV_PHI - the initial value of the induction variable
2350
2351 Output:
2352 Return a vector variable, initialized with the first VF values of
2353 the induction variable. E.g., for an iv with IV_PHI='X' and
2354 evolution S, for a vector of 4 units, we want to return:
2355 [X, X + S, X + 2*S, X + 3*S]. */
2356
2357 static tree
2358 get_initial_def_for_induction (gimple iv_phi)
2359 {
2360 stmt_vec_info stmt_vinfo = vinfo_for_stmt (iv_phi);
2361 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
2362 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2363 tree scalar_type = TREE_TYPE (gimple_phi_result (iv_phi));
2364 tree vectype;
2365 int nunits;
2366 edge pe = loop_preheader_edge (loop);
2367 struct loop *iv_loop;
2368 basic_block new_bb;
2369 tree vec, vec_init, vec_step, t;
2370 tree access_fn;
2371 tree new_var;
2372 tree new_name;
2373 gimple init_stmt, induction_phi, new_stmt;
2374 tree induc_def, vec_def, vec_dest;
2375 tree init_expr, step_expr;
2376 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2377 int i;
2378 bool ok;
2379 int ncopies;
2380 tree expr;
2381 stmt_vec_info phi_info = vinfo_for_stmt (iv_phi);
2382 bool nested_in_vect_loop = false;
2383 gimple_seq stmts = NULL;
2384 imm_use_iterator imm_iter;
2385 use_operand_p use_p;
2386 gimple exit_phi;
2387 edge latch_e;
2388 tree loop_arg;
2389 gimple_stmt_iterator si;
2390 basic_block bb = gimple_bb (iv_phi);
2391 tree stepvectype;
2392
2393 vectype = get_vectype_for_scalar_type (scalar_type);
2394 gcc_assert (vectype);
2395 nunits = TYPE_VECTOR_SUBPARTS (vectype);
2396 ncopies = vf / nunits;
2397
2398 gcc_assert (phi_info);
2399 gcc_assert (ncopies >= 1);
2400
2401 /* Find the first insertion point in the BB. */
2402 si = gsi_after_labels (bb);
2403
2404 if (INTEGRAL_TYPE_P (scalar_type))
2405 step_expr = build_int_cst (scalar_type, 0);
2406 else if (POINTER_TYPE_P (scalar_type))
2407 step_expr = build_int_cst (sizetype, 0);
2408 else
2409 step_expr = build_real (scalar_type, dconst0);
2410
2411 /* Is phi in an inner-loop, while vectorizing an enclosing outer-loop? */
2412 if (nested_in_vect_loop_p (loop, iv_phi))
2413 {
2414 nested_in_vect_loop = true;
2415 iv_loop = loop->inner;
2416 }
2417 else
2418 iv_loop = loop;
2419 gcc_assert (iv_loop == (gimple_bb (iv_phi))->loop_father);
2420
2421 latch_e = loop_latch_edge (iv_loop);
2422 loop_arg = PHI_ARG_DEF_FROM_EDGE (iv_phi, latch_e);
2423
2424 access_fn = analyze_scalar_evolution (iv_loop, PHI_RESULT (iv_phi));
2425 gcc_assert (access_fn);
2426 ok = vect_is_simple_iv_evolution (iv_loop->num, access_fn,
2427 &init_expr, &step_expr);
2428 gcc_assert (ok);
2429 pe = loop_preheader_edge (iv_loop);
2430
2431 /* Create the vector that holds the initial_value of the induction. */
2432 if (nested_in_vect_loop)
2433 {
2434 /* iv_loop is nested in the loop to be vectorized. init_expr had already
2435 been created during vectorization of previous stmts; We obtain it from
2436 the STMT_VINFO_VEC_STMT of the defining stmt. */
2437 tree iv_def = PHI_ARG_DEF_FROM_EDGE (iv_phi,
2438 loop_preheader_edge (iv_loop));
2439 vec_init = vect_get_vec_def_for_operand (iv_def, iv_phi, NULL);
2440 }
2441 else
2442 {
2443 /* iv_loop is the loop to be vectorized. Create:
2444 vec_init = [X, X+S, X+2*S, X+3*S] (S = step_expr, X = init_expr) */
2445 new_var = vect_get_new_vect_var (scalar_type, vect_scalar_var, "var_");
2446 add_referenced_var (new_var);
2447
2448 new_name = force_gimple_operand (init_expr, &stmts, false, new_var);
2449 if (stmts)
2450 {
2451 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
2452 gcc_assert (!new_bb);
2453 }
2454
2455 t = NULL_TREE;
2456 t = tree_cons (NULL_TREE, init_expr, t);
2457 for (i = 1; i < nunits; i++)
2458 {
2459 /* Create: new_name_i = new_name + step_expr */
2460 enum tree_code code = POINTER_TYPE_P (scalar_type)
2461 ? POINTER_PLUS_EXPR : PLUS_EXPR;
2462 init_stmt = gimple_build_assign_with_ops (code, new_var,
2463 new_name, step_expr);
2464 new_name = make_ssa_name (new_var, init_stmt);
2465 gimple_assign_set_lhs (init_stmt, new_name);
2466
2467 new_bb = gsi_insert_on_edge_immediate (pe, init_stmt);
2468 gcc_assert (!new_bb);
2469
2470 if (vect_print_dump_info (REPORT_DETAILS))
2471 {
2472 fprintf (vect_dump, "created new init_stmt: ");
2473 print_gimple_stmt (vect_dump, init_stmt, 0, TDF_SLIM);
2474 }
2475 t = tree_cons (NULL_TREE, new_name, t);
2476 }
2477 /* Create a vector from [new_name_0, new_name_1, ..., new_name_nunits-1] */
2478 vec = build_constructor_from_list (vectype, nreverse (t));
2479 vec_init = vect_init_vector (iv_phi, vec, vectype, NULL);
2480 }
2481
2482
2483 /* Create the vector that holds the step of the induction. */
2484 if (nested_in_vect_loop)
2485 /* iv_loop is nested in the loop to be vectorized. Generate:
2486 vec_step = [S, S, S, S] */
2487 new_name = step_expr;
2488 else
2489 {
2490 /* iv_loop is the loop to be vectorized. Generate:
2491 vec_step = [VF*S, VF*S, VF*S, VF*S] */
2492 expr = build_int_cst (TREE_TYPE (step_expr), vf);
2493 new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr),
2494 expr, step_expr);
2495 }
2496
2497 t = NULL_TREE;
2498 for (i = 0; i < nunits; i++)
2499 t = tree_cons (NULL_TREE, unshare_expr (new_name), t);
2500 gcc_assert (CONSTANT_CLASS_P (new_name));
2501 stepvectype = get_vectype_for_scalar_type (TREE_TYPE (new_name));
2502 gcc_assert (stepvectype);
2503 vec = build_vector (stepvectype, t);
2504 vec_step = vect_init_vector (iv_phi, vec, stepvectype, NULL);
2505
2506
2507 /* Create the following def-use cycle:
2508 loop prolog:
2509 vec_init = ...
2510 vec_step = ...
2511 loop:
2512 vec_iv = PHI <vec_init, vec_loop>
2513 ...
2514 STMT
2515 ...
2516 vec_loop = vec_iv + vec_step; */
2517
2518 /* Create the induction-phi that defines the induction-operand. */
2519 vec_dest = vect_get_new_vect_var (vectype, vect_simple_var, "vec_iv_");
2520 add_referenced_var (vec_dest);
2521 induction_phi = create_phi_node (vec_dest, iv_loop->header);
2522 set_vinfo_for_stmt (induction_phi,
2523 new_stmt_vec_info (induction_phi, loop_vinfo, NULL));
2524 induc_def = PHI_RESULT (induction_phi);
2525
2526 /* Create the iv update inside the loop */
2527 new_stmt = gimple_build_assign_with_ops (PLUS_EXPR, vec_dest,
2528 induc_def, vec_step);
2529 vec_def = make_ssa_name (vec_dest, new_stmt);
2530 gimple_assign_set_lhs (new_stmt, vec_def);
2531 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
2532 set_vinfo_for_stmt (new_stmt, new_stmt_vec_info (new_stmt, loop_vinfo,
2533 NULL));
2534
2535 /* Set the arguments of the phi node: */
2536 add_phi_arg (induction_phi, vec_init, pe, UNKNOWN_LOCATION);
2537 add_phi_arg (induction_phi, vec_def, loop_latch_edge (iv_loop),
2538 UNKNOWN_LOCATION);
2539
2540
2541 /* In case that vectorization factor (VF) is bigger than the number
2542 of elements that we can fit in a vectype (nunits), we have to generate
2543 more than one vector stmt - i.e - we need to "unroll" the
2544 vector stmt by a factor VF/nunits. For more details see documentation
2545 in vectorizable_operation. */
2546
2547 if (ncopies > 1)
2548 {
2549 stmt_vec_info prev_stmt_vinfo;
2550 /* FORNOW. This restriction should be relaxed. */
2551 gcc_assert (!nested_in_vect_loop);
2552
2553 /* Create the vector that holds the step of the induction. */
2554 expr = build_int_cst (TREE_TYPE (step_expr), nunits);
2555 new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr),
2556 expr, step_expr);
2557 t = NULL_TREE;
2558 for (i = 0; i < nunits; i++)
2559 t = tree_cons (NULL_TREE, unshare_expr (new_name), t);
2560 gcc_assert (CONSTANT_CLASS_P (new_name));
2561 vec = build_vector (stepvectype, t);
2562 vec_step = vect_init_vector (iv_phi, vec, stepvectype, NULL);
2563
2564 vec_def = induc_def;
2565 prev_stmt_vinfo = vinfo_for_stmt (induction_phi);
2566 for (i = 1; i < ncopies; i++)
2567 {
2568 /* vec_i = vec_prev + vec_step */
2569 new_stmt = gimple_build_assign_with_ops (PLUS_EXPR, vec_dest,
2570 vec_def, vec_step);
2571 vec_def = make_ssa_name (vec_dest, new_stmt);
2572 gimple_assign_set_lhs (new_stmt, vec_def);
2573
2574 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
2575 set_vinfo_for_stmt (new_stmt,
2576 new_stmt_vec_info (new_stmt, loop_vinfo, NULL));
2577 STMT_VINFO_RELATED_STMT (prev_stmt_vinfo) = new_stmt;
2578 prev_stmt_vinfo = vinfo_for_stmt (new_stmt);
2579 }
2580 }
2581
2582 if (nested_in_vect_loop)
2583 {
2584 /* Find the loop-closed exit-phi of the induction, and record
2585 the final vector of induction results: */
2586 exit_phi = NULL;
2587 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, loop_arg)
2588 {
2589 if (!flow_bb_inside_loop_p (iv_loop, gimple_bb (USE_STMT (use_p))))
2590 {
2591 exit_phi = USE_STMT (use_p);
2592 break;
2593 }
2594 }
2595 if (exit_phi)
2596 {
2597 stmt_vec_info stmt_vinfo = vinfo_for_stmt (exit_phi);
2598 /* FORNOW. Currently not supporting the case that an inner-loop induction
2599 is not used in the outer-loop (i.e. only outside the outer-loop). */
2600 gcc_assert (STMT_VINFO_RELEVANT_P (stmt_vinfo)
2601 && !STMT_VINFO_LIVE_P (stmt_vinfo));
2602
2603 STMT_VINFO_VEC_STMT (stmt_vinfo) = new_stmt;
2604 if (vect_print_dump_info (REPORT_DETAILS))
2605 {
2606 fprintf (vect_dump, "vector of inductions after inner-loop:");
2607 print_gimple_stmt (vect_dump, new_stmt, 0, TDF_SLIM);
2608 }
2609 }
2610 }
2611
2612
2613 if (vect_print_dump_info (REPORT_DETAILS))
2614 {
2615 fprintf (vect_dump, "transform induction: created def-use cycle: ");
2616 print_gimple_stmt (vect_dump, induction_phi, 0, TDF_SLIM);
2617 fprintf (vect_dump, "\n");
2618 print_gimple_stmt (vect_dump, SSA_NAME_DEF_STMT (vec_def), 0, TDF_SLIM);
2619 }
2620
2621 STMT_VINFO_VEC_STMT (phi_info) = induction_phi;
2622 return induc_def;
2623 }
2624
2625
2626 /* Function get_initial_def_for_reduction
2627
2628 Input:
2629 STMT - a stmt that performs a reduction operation in the loop.
2630 INIT_VAL - the initial value of the reduction variable
2631
2632 Output:
2633 ADJUSTMENT_DEF - a tree that holds a value to be added to the final result
2634 of the reduction (used for adjusting the epilog - see below).
2635 Return a vector variable, initialized according to the operation that STMT
2636 performs. This vector will be used as the initial value of the
2637 vector of partial results.
2638
2639 Option1 (adjust in epilog): Initialize the vector as follows:
2640 add/bit or/xor: [0,0,...,0,0]
2641 mult/bit and: [1,1,...,1,1]
2642 min/max/cond_expr: [init_val,init_val,..,init_val,init_val]
2643 and when necessary (e.g. add/mult case) let the caller know
2644 that it needs to adjust the result by init_val.
2645
2646 Option2: Initialize the vector as follows:
2647 add/bit or/xor: [init_val,0,0,...,0]
2648 mult/bit and: [init_val,1,1,...,1]
2649 min/max/cond_expr: [init_val,init_val,...,init_val]
2650 and no adjustments are needed.
2651
2652 For example, for the following code:
2653
2654 s = init_val;
2655 for (i=0;i<n;i++)
2656 s = s + a[i];
2657
2658 STMT is 's = s + a[i]', and the reduction variable is 's'.
2659 For a vector of 4 units, we want to return either [0,0,0,init_val],
2660 or [0,0,0,0] and let the caller know that it needs to adjust
2661 the result at the end by 'init_val'.
2662
2663 FORNOW, we are using the 'adjust in epilog' scheme, because this way the
2664 initialization vector is simpler (same element in all entries), if
2665 ADJUSTMENT_DEF is not NULL, and Option2 otherwise.
2666
2667 A cost model should help decide between these two schemes. */
2668
2669 tree
2670 get_initial_def_for_reduction (gimple stmt, tree init_val,
2671 tree *adjustment_def)
2672 {
2673 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
2674 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
2675 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2676 tree scalar_type = TREE_TYPE (init_val);
2677 tree vectype = get_vectype_for_scalar_type (scalar_type);
2678 int nunits;
2679 enum tree_code code = gimple_assign_rhs_code (stmt);
2680 tree def_for_init;
2681 tree init_def;
2682 tree t = NULL_TREE;
2683 int i;
2684 bool nested_in_vect_loop = false;
2685 tree init_value;
2686 REAL_VALUE_TYPE real_init_val = dconst0;
2687 int int_init_val = 0;
2688 gimple def_stmt = NULL;
2689
2690 gcc_assert (vectype);
2691 nunits = TYPE_VECTOR_SUBPARTS (vectype);
2692
2693 gcc_assert (POINTER_TYPE_P (scalar_type) || INTEGRAL_TYPE_P (scalar_type)
2694 || SCALAR_FLOAT_TYPE_P (scalar_type));
2695
2696 if (nested_in_vect_loop_p (loop, stmt))
2697 nested_in_vect_loop = true;
2698 else
2699 gcc_assert (loop == (gimple_bb (stmt))->loop_father);
2700
2701 /* In case of double reduction we only create a vector variable to be put
2702 in the reduction phi node. The actual statement creation is done in
2703 vect_create_epilog_for_reduction. */
2704 if (adjustment_def && nested_in_vect_loop
2705 && TREE_CODE (init_val) == SSA_NAME
2706 && (def_stmt = SSA_NAME_DEF_STMT (init_val))
2707 && gimple_code (def_stmt) == GIMPLE_PHI
2708 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
2709 && vinfo_for_stmt (def_stmt)
2710 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt))
2711 == vect_double_reduction_def)
2712 {
2713 *adjustment_def = NULL;
2714 return vect_create_destination_var (init_val, vectype);
2715 }
2716
2717 if (TREE_CONSTANT (init_val))
2718 {
2719 if (SCALAR_FLOAT_TYPE_P (scalar_type))
2720 init_value = build_real (scalar_type, TREE_REAL_CST (init_val));
2721 else
2722 init_value = build_int_cst (scalar_type, TREE_INT_CST_LOW (init_val));
2723 }
2724 else
2725 init_value = init_val;
2726
2727 switch (code)
2728 {
2729 case WIDEN_SUM_EXPR:
2730 case DOT_PROD_EXPR:
2731 case PLUS_EXPR:
2732 case MINUS_EXPR:
2733 case BIT_IOR_EXPR:
2734 case BIT_XOR_EXPR:
2735 case MULT_EXPR:
2736 case BIT_AND_EXPR:
2737 /* ADJUSMENT_DEF is NULL when called from
2738 vect_create_epilog_for_reduction to vectorize double reduction. */
2739 if (adjustment_def)
2740 {
2741 if (nested_in_vect_loop)
2742 *adjustment_def = vect_get_vec_def_for_operand (init_val, stmt,
2743 NULL);
2744 else
2745 *adjustment_def = init_val;
2746 }
2747
2748 if (code == MULT_EXPR || code == BIT_AND_EXPR)
2749 {
2750 real_init_val = dconst1;
2751 int_init_val = 1;
2752 }
2753
2754 if (SCALAR_FLOAT_TYPE_P (scalar_type))
2755 def_for_init = build_real (scalar_type, real_init_val);
2756 else
2757 def_for_init = build_int_cst (scalar_type, int_init_val);
2758
2759 /* Create a vector of '0' or '1' except the first element. */
2760 for (i = nunits - 2; i >= 0; --i)
2761 t = tree_cons (NULL_TREE, def_for_init, t);
2762
2763 /* Option1: the first element is '0' or '1' as well. */
2764 if (adjustment_def)
2765 {
2766 t = tree_cons (NULL_TREE, def_for_init, t);
2767 init_def = build_vector (vectype, t);
2768 break;
2769 }
2770
2771 /* Option2: the first element is INIT_VAL. */
2772 t = tree_cons (NULL_TREE, init_value, t);
2773 if (TREE_CONSTANT (init_val))
2774 init_def = build_vector (vectype, t);
2775 else
2776 init_def = build_constructor_from_list (vectype, t);
2777
2778 break;
2779
2780 case MIN_EXPR:
2781 case MAX_EXPR:
2782 case COND_EXPR:
2783 if (adjustment_def)
2784 {
2785 *adjustment_def = NULL_TREE;
2786 init_def = vect_get_vec_def_for_operand (init_val, stmt, NULL);
2787 break;
2788 }
2789
2790 for (i = nunits - 1; i >= 0; --i)
2791 t = tree_cons (NULL_TREE, init_value, t);
2792
2793 if (TREE_CONSTANT (init_val))
2794 init_def = build_vector (vectype, t);
2795 else
2796 init_def = build_constructor_from_list (vectype, t);
2797
2798 break;
2799
2800 default:
2801 gcc_unreachable ();
2802 }
2803
2804 return init_def;
2805 }
2806
2807
2808 /* Function vect_create_epilog_for_reduction
2809
2810 Create code at the loop-epilog to finalize the result of a reduction
2811 computation.
2812
2813 VECT_DEF is a vector of partial results.
2814 REDUC_CODE is the tree-code for the epilog reduction.
2815 NCOPIES is > 1 in case the vectorization factor (VF) is bigger than the
2816 number of elements that we can fit in a vectype (nunits). In this case
2817 we have to generate more than one vector stmt - i.e - we need to "unroll"
2818 the vector stmt by a factor VF/nunits. For more details see documentation
2819 in vectorizable_operation.
2820 STMT is the scalar reduction stmt that is being vectorized.
2821 REDUCTION_PHI is the phi-node that carries the reduction computation.
2822 REDUC_INDEX is the index of the operand in the right hand side of the
2823 statement that is defined by REDUCTION_PHI.
2824 DOUBLE_REDUC is TRUE if double reduction phi nodes should be handled.
2825
2826 This function:
2827 1. Creates the reduction def-use cycle: sets the arguments for
2828 REDUCTION_PHI:
2829 The loop-entry argument is the vectorized initial-value of the reduction.
2830 The loop-latch argument is VECT_DEF - the vector of partial sums.
2831 2. "Reduces" the vector of partial results VECT_DEF into a single result,
2832 by applying the operation specified by REDUC_CODE if available, or by
2833 other means (whole-vector shifts or a scalar loop).
2834 The function also creates a new phi node at the loop exit to preserve
2835 loop-closed form, as illustrated below.
2836
2837 The flow at the entry to this function:
2838
2839 loop:
2840 vec_def = phi <null, null> # REDUCTION_PHI
2841 VECT_DEF = vector_stmt # vectorized form of STMT
2842 s_loop = scalar_stmt # (scalar) STMT
2843 loop_exit:
2844 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
2845 use <s_out0>
2846 use <s_out0>
2847
2848 The above is transformed by this function into:
2849
2850 loop:
2851 vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI
2852 VECT_DEF = vector_stmt # vectorized form of STMT
2853 s_loop = scalar_stmt # (scalar) STMT
2854 loop_exit:
2855 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
2856 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
2857 v_out2 = reduce <v_out1>
2858 s_out3 = extract_field <v_out2, 0>
2859 s_out4 = adjust_result <s_out3>
2860 use <s_out4>
2861 use <s_out4>
2862 */
2863
2864 static void
2865 vect_create_epilog_for_reduction (tree vect_def, gimple stmt,
2866 int ncopies,
2867 enum tree_code reduc_code,
2868 gimple reduction_phi,
2869 int reduc_index,
2870 bool double_reduc)
2871 {
2872 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2873 stmt_vec_info prev_phi_info;
2874 tree vectype;
2875 enum machine_mode mode;
2876 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2877 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo), *outer_loop = NULL;
2878 basic_block exit_bb;
2879 tree scalar_dest;
2880 tree scalar_type;
2881 gimple new_phi = NULL, phi;
2882 gimple_stmt_iterator exit_gsi;
2883 tree vec_dest;
2884 tree new_temp = NULL_TREE;
2885 tree new_name;
2886 gimple epilog_stmt = NULL;
2887 tree new_scalar_dest, new_dest;
2888 gimple exit_phi;
2889 tree bitsize, bitpos, bytesize;
2890 enum tree_code code = gimple_assign_rhs_code (stmt);
2891 tree adjustment_def;
2892 tree vec_initial_def, def;
2893 tree orig_name;
2894 imm_use_iterator imm_iter;
2895 use_operand_p use_p;
2896 bool extract_scalar_result = false;
2897 tree reduction_op, expr;
2898 gimple orig_stmt;
2899 gimple use_stmt;
2900 bool nested_in_vect_loop = false;
2901 VEC(gimple,heap) *phis = NULL;
2902 enum vect_def_type dt = vect_unknown_def_type;
2903 int j, i;
2904
2905 if (nested_in_vect_loop_p (loop, stmt))
2906 {
2907 outer_loop = loop;
2908 loop = loop->inner;
2909 nested_in_vect_loop = true;
2910 }
2911
2912 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)))
2913 {
2914 case GIMPLE_SINGLE_RHS:
2915 gcc_assert (TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt))
2916 == ternary_op);
2917 reduction_op = TREE_OPERAND (gimple_assign_rhs1 (stmt), reduc_index);
2918 break;
2919 case GIMPLE_UNARY_RHS:
2920 reduction_op = gimple_assign_rhs1 (stmt);
2921 break;
2922 case GIMPLE_BINARY_RHS:
2923 reduction_op = reduc_index ?
2924 gimple_assign_rhs2 (stmt) : gimple_assign_rhs1 (stmt);
2925 break;
2926 default:
2927 gcc_unreachable ();
2928 }
2929
2930 vectype = get_vectype_for_scalar_type (TREE_TYPE (reduction_op));
2931 gcc_assert (vectype);
2932 mode = TYPE_MODE (vectype);
2933
2934 /*** 1. Create the reduction def-use cycle ***/
2935
2936 /* For the case of reduction, vect_get_vec_def_for_operand returns
2937 the scalar def before the loop, that defines the initial value
2938 of the reduction variable. */
2939 vec_initial_def = vect_get_vec_def_for_operand (reduction_op, stmt,
2940 &adjustment_def);
2941
2942 phi = reduction_phi;
2943 def = vect_def;
2944 for (j = 0; j < ncopies; j++)
2945 {
2946 /* 1.1 set the loop-entry arg of the reduction-phi: */
2947 add_phi_arg (phi, vec_initial_def, loop_preheader_edge (loop),
2948 UNKNOWN_LOCATION);
2949
2950 /* 1.2 set the loop-latch arg for the reduction-phi: */
2951 if (j > 0)
2952 def = vect_get_vec_def_for_stmt_copy (dt, def);
2953 add_phi_arg (phi, def, loop_latch_edge (loop), UNKNOWN_LOCATION);
2954
2955 if (vect_print_dump_info (REPORT_DETAILS))
2956 {
2957 fprintf (vect_dump, "transform reduction: created def-use cycle: ");
2958 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
2959 fprintf (vect_dump, "\n");
2960 print_gimple_stmt (vect_dump, SSA_NAME_DEF_STMT (def), 0, TDF_SLIM);
2961 }
2962
2963 phi = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi));
2964 }
2965
2966 /*** 2. Create epilog code
2967 The reduction epilog code operates across the elements of the vector
2968 of partial results computed by the vectorized loop.
2969 The reduction epilog code consists of:
2970 step 1: compute the scalar result in a vector (v_out2)
2971 step 2: extract the scalar result (s_out3) from the vector (v_out2)
2972 step 3: adjust the scalar result (s_out3) if needed.
2973
2974 Step 1 can be accomplished using one the following three schemes:
2975 (scheme 1) using reduc_code, if available.
2976 (scheme 2) using whole-vector shifts, if available.
2977 (scheme 3) using a scalar loop. In this case steps 1+2 above are
2978 combined.
2979
2980 The overall epilog code looks like this:
2981
2982 s_out0 = phi <s_loop> # original EXIT_PHI
2983 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
2984 v_out2 = reduce <v_out1> # step 1
2985 s_out3 = extract_field <v_out2, 0> # step 2
2986 s_out4 = adjust_result <s_out3> # step 3
2987
2988 (step 3 is optional, and steps 1 and 2 may be combined).
2989 Lastly, the uses of s_out0 are replaced by s_out4.
2990
2991 ***/
2992
2993 /* 2.1 Create new loop-exit-phi to preserve loop-closed form:
2994 v_out1 = phi <v_loop> */
2995
2996 exit_bb = single_exit (loop)->dest;
2997 def = vect_def;
2998 prev_phi_info = NULL;
2999 for (j = 0; j < ncopies; j++)
3000 {
3001 phi = create_phi_node (SSA_NAME_VAR (vect_def), exit_bb);
3002 set_vinfo_for_stmt (phi, new_stmt_vec_info (phi, loop_vinfo, NULL));
3003 if (j == 0)
3004 new_phi = phi;
3005 else
3006 {
3007 def = vect_get_vec_def_for_stmt_copy (dt, def);
3008 STMT_VINFO_RELATED_STMT (prev_phi_info) = phi;
3009 }
3010 SET_PHI_ARG_DEF (phi, single_exit (loop)->dest_idx, def);
3011 prev_phi_info = vinfo_for_stmt (phi);
3012 }
3013
3014 exit_gsi = gsi_after_labels (exit_bb);
3015
3016 /* 2.2 Get the relevant tree-code to use in the epilog for schemes 2,3
3017 (i.e. when reduc_code is not available) and in the final adjustment
3018 code (if needed). Also get the original scalar reduction variable as
3019 defined in the loop. In case STMT is a "pattern-stmt" (i.e. - it
3020 represents a reduction pattern), the tree-code and scalar-def are
3021 taken from the original stmt that the pattern-stmt (STMT) replaces.
3022 Otherwise (it is a regular reduction) - the tree-code and scalar-def
3023 are taken from STMT. */
3024
3025 orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
3026 if (!orig_stmt)
3027 {
3028 /* Regular reduction */
3029 orig_stmt = stmt;
3030 }
3031 else
3032 {
3033 /* Reduction pattern */
3034 stmt_vec_info stmt_vinfo = vinfo_for_stmt (orig_stmt);
3035 gcc_assert (STMT_VINFO_IN_PATTERN_P (stmt_vinfo));
3036 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_vinfo) == stmt);
3037 }
3038
3039 code = gimple_assign_rhs_code (orig_stmt);
3040 scalar_dest = gimple_assign_lhs (orig_stmt);
3041 scalar_type = TREE_TYPE (scalar_dest);
3042 new_scalar_dest = vect_create_destination_var (scalar_dest, NULL);
3043 bitsize = TYPE_SIZE (scalar_type);
3044 bytesize = TYPE_SIZE_UNIT (scalar_type);
3045
3046 /* For MINUS_EXPR the initial vector is [init_val,0,...,0], therefore,
3047 partial results are added and not subtracted. */
3048 if (code == MINUS_EXPR)
3049 code = PLUS_EXPR;
3050
3051 /* In case this is a reduction in an inner-loop while vectorizing an outer
3052 loop - we don't need to extract a single scalar result at the end of the
3053 inner-loop (unless it is double reduction, i.e., the use of reduction is
3054 outside the outer-loop). The final vector of partial results will be used
3055 in the vectorized outer-loop, or reduced to a scalar result at the end of
3056 the outer-loop. */
3057 if (nested_in_vect_loop && !double_reduc)
3058 goto vect_finalize_reduction;
3059
3060 /* The epilogue is created for the outer-loop, i.e., for the loop being
3061 vectorized. */
3062 if (double_reduc)
3063 loop = outer_loop;
3064
3065 /* FORNOW */
3066 gcc_assert (ncopies == 1);
3067
3068 /* 2.3 Create the reduction code, using one of the three schemes described
3069 above. */
3070
3071 if (reduc_code != ERROR_MARK)
3072 {
3073 tree tmp;
3074
3075 /*** Case 1: Create:
3076 v_out2 = reduc_expr <v_out1> */
3077
3078 if (vect_print_dump_info (REPORT_DETAILS))
3079 fprintf (vect_dump, "Reduce using direct vector reduction.");
3080
3081 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3082 tmp = build1 (reduc_code, vectype, PHI_RESULT (new_phi));
3083 epilog_stmt = gimple_build_assign (vec_dest, tmp);
3084 new_temp = make_ssa_name (vec_dest, epilog_stmt);
3085 gimple_assign_set_lhs (epilog_stmt, new_temp);
3086 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3087
3088 extract_scalar_result = true;
3089 }
3090 else
3091 {
3092 enum tree_code shift_code = ERROR_MARK;
3093 bool have_whole_vector_shift = true;
3094 int bit_offset;
3095 int element_bitsize = tree_low_cst (bitsize, 1);
3096 int vec_size_in_bits = tree_low_cst (TYPE_SIZE (vectype), 1);
3097 tree vec_temp;
3098
3099 if (optab_handler (vec_shr_optab, mode)->insn_code != CODE_FOR_nothing)
3100 shift_code = VEC_RSHIFT_EXPR;
3101 else
3102 have_whole_vector_shift = false;
3103
3104 /* Regardless of whether we have a whole vector shift, if we're
3105 emulating the operation via tree-vect-generic, we don't want
3106 to use it. Only the first round of the reduction is likely
3107 to still be profitable via emulation. */
3108 /* ??? It might be better to emit a reduction tree code here, so that
3109 tree-vect-generic can expand the first round via bit tricks. */
3110 if (!VECTOR_MODE_P (mode))
3111 have_whole_vector_shift = false;
3112 else
3113 {
3114 optab optab = optab_for_tree_code (code, vectype, optab_default);
3115 if (optab_handler (optab, mode)->insn_code == CODE_FOR_nothing)
3116 have_whole_vector_shift = false;
3117 }
3118
3119 if (have_whole_vector_shift)
3120 {
3121 /*** Case 2: Create:
3122 for (offset = VS/2; offset >= element_size; offset/=2)
3123 {
3124 Create: va' = vec_shift <va, offset>
3125 Create: va = vop <va, va'>
3126 } */
3127
3128 if (vect_print_dump_info (REPORT_DETAILS))
3129 fprintf (vect_dump, "Reduce using vector shifts");
3130
3131 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3132 new_temp = PHI_RESULT (new_phi);
3133
3134 for (bit_offset = vec_size_in_bits/2;
3135 bit_offset >= element_bitsize;
3136 bit_offset /= 2)
3137 {
3138 tree bitpos = size_int (bit_offset);
3139
3140 epilog_stmt = gimple_build_assign_with_ops (shift_code, vec_dest,
3141 new_temp, bitpos);
3142 new_name = make_ssa_name (vec_dest, epilog_stmt);
3143 gimple_assign_set_lhs (epilog_stmt, new_name);
3144 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3145
3146 epilog_stmt = gimple_build_assign_with_ops (code, vec_dest,
3147 new_name, new_temp);
3148 new_temp = make_ssa_name (vec_dest, epilog_stmt);
3149 gimple_assign_set_lhs (epilog_stmt, new_temp);
3150 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3151 }
3152
3153 extract_scalar_result = true;
3154 }
3155 else
3156 {
3157 tree rhs;
3158
3159 /*** Case 3: Create:
3160 s = extract_field <v_out2, 0>
3161 for (offset = element_size;
3162 offset < vector_size;
3163 offset += element_size;)
3164 {
3165 Create: s' = extract_field <v_out2, offset>
3166 Create: s = op <s, s'>
3167 } */
3168
3169 if (vect_print_dump_info (REPORT_DETAILS))
3170 fprintf (vect_dump, "Reduce using scalar code. ");
3171
3172 vec_temp = PHI_RESULT (new_phi);
3173 vec_size_in_bits = tree_low_cst (TYPE_SIZE (vectype), 1);
3174 rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp, bitsize,
3175 bitsize_zero_node);
3176 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
3177 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
3178 gimple_assign_set_lhs (epilog_stmt, new_temp);
3179 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3180
3181 for (bit_offset = element_bitsize;
3182 bit_offset < vec_size_in_bits;
3183 bit_offset += element_bitsize)
3184 {
3185 tree bitpos = bitsize_int (bit_offset);
3186 tree rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp, bitsize,
3187 bitpos);
3188
3189 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
3190 new_name = make_ssa_name (new_scalar_dest, epilog_stmt);
3191 gimple_assign_set_lhs (epilog_stmt, new_name);
3192 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3193
3194 epilog_stmt = gimple_build_assign_with_ops (code,
3195 new_scalar_dest,
3196 new_name, new_temp);
3197 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
3198 gimple_assign_set_lhs (epilog_stmt, new_temp);
3199 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3200 }
3201
3202 extract_scalar_result = false;
3203 }
3204 }
3205
3206 /* 2.4 Extract the final scalar result. Create:
3207 s_out3 = extract_field <v_out2, bitpos> */
3208
3209 if (extract_scalar_result)
3210 {
3211 tree rhs;
3212
3213 gcc_assert (!nested_in_vect_loop || double_reduc);
3214 if (vect_print_dump_info (REPORT_DETAILS))
3215 fprintf (vect_dump, "extract scalar result");
3216
3217 if (BYTES_BIG_ENDIAN)
3218 bitpos = size_binop (MULT_EXPR,
3219 bitsize_int (TYPE_VECTOR_SUBPARTS (vectype) - 1),
3220 TYPE_SIZE (scalar_type));
3221 else
3222 bitpos = bitsize_zero_node;
3223
3224 rhs = build3 (BIT_FIELD_REF, scalar_type, new_temp, bitsize, bitpos);
3225 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
3226 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
3227 gimple_assign_set_lhs (epilog_stmt, new_temp);
3228 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3229 }
3230
3231 vect_finalize_reduction:
3232
3233 if (double_reduc)
3234 loop = loop->inner;
3235
3236 /* 2.5 Adjust the final result by the initial value of the reduction
3237 variable. (When such adjustment is not needed, then
3238 'adjustment_def' is zero). For example, if code is PLUS we create:
3239 new_temp = loop_exit_def + adjustment_def */
3240
3241 if (adjustment_def)
3242 {
3243 if (nested_in_vect_loop)
3244 {
3245 gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) == VECTOR_TYPE);
3246 expr = build2 (code, vectype, PHI_RESULT (new_phi), adjustment_def);
3247 new_dest = vect_create_destination_var (scalar_dest, vectype);
3248 }
3249 else
3250 {
3251 gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) != VECTOR_TYPE);
3252 expr = build2 (code, scalar_type, new_temp, adjustment_def);
3253 new_dest = vect_create_destination_var (scalar_dest, scalar_type);
3254 }
3255
3256 epilog_stmt = gimple_build_assign (new_dest, expr);
3257 new_temp = make_ssa_name (new_dest, epilog_stmt);
3258 gimple_assign_set_lhs (epilog_stmt, new_temp);
3259 SSA_NAME_DEF_STMT (new_temp) = epilog_stmt;
3260 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3261 }
3262
3263
3264 /* 2.6 Handle the loop-exit phi */
3265
3266 /* Replace uses of s_out0 with uses of s_out3:
3267 Find the loop-closed-use at the loop exit of the original scalar result.
3268 (The reduction result is expected to have two immediate uses - one at the
3269 latch block, and one at the loop exit). */
3270 phis = VEC_alloc (gimple, heap, 10);
3271 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
3272 {
3273 if (!flow_bb_inside_loop_p (loop, gimple_bb (USE_STMT (use_p))))
3274 {
3275 exit_phi = USE_STMT (use_p);
3276 VEC_quick_push (gimple, phis, exit_phi);
3277 }
3278 }
3279
3280 /* We expect to have found an exit_phi because of loop-closed-ssa form. */
3281 gcc_assert (!VEC_empty (gimple, phis));
3282
3283 for (i = 0; VEC_iterate (gimple, phis, i, exit_phi); i++)
3284 {
3285 if (nested_in_vect_loop)
3286 {
3287 stmt_vec_info stmt_vinfo = vinfo_for_stmt (exit_phi);
3288 gimple vect_phi;
3289
3290 /* FORNOW. Currently not supporting the case that an inner-loop
3291 reduction is not used in the outer-loop (but only outside the
3292 outer-loop), unless it is double reduction. */
3293 gcc_assert ((STMT_VINFO_RELEVANT_P (stmt_vinfo)
3294 && !STMT_VINFO_LIVE_P (stmt_vinfo)) || double_reduc);
3295
3296 epilog_stmt = adjustment_def ? epilog_stmt : new_phi;
3297 STMT_VINFO_VEC_STMT (stmt_vinfo) = epilog_stmt;
3298 set_vinfo_for_stmt (epilog_stmt,
3299 new_stmt_vec_info (epilog_stmt, loop_vinfo,
3300 NULL));
3301 if (adjustment_def)
3302 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (epilog_stmt)) =
3303 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (new_phi));
3304
3305 if (!double_reduc
3306 || STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_double_reduction_def)
3307 continue;
3308
3309 /* Handle double reduction:
3310
3311 stmt1: s1 = phi <s0, s2> - double reduction phi (outer loop)
3312 stmt2: s3 = phi <s1, s4> - (regular) reduction phi (inner loop)
3313 stmt3: s4 = use (s3) - (regular) reduction stmt (inner loop)
3314 stmt4: s2 = phi <s4> - double reduction stmt (outer loop)
3315
3316 At that point the regular reduction (stmt2 and stmt3) is already
3317 vectorized, as well as the exit phi node, stmt4.
3318 Here we vectorize the phi node of double reduction, stmt1, and
3319 update all relevant statements. */
3320
3321 /* Go through all the uses of s2 to find double reduction phi node,
3322 i.e., stmt1 above. */
3323 orig_name = PHI_RESULT (exit_phi);
3324 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name)
3325 {
3326 stmt_vec_info use_stmt_vinfo = vinfo_for_stmt (use_stmt);
3327 stmt_vec_info new_phi_vinfo;
3328 tree vect_phi_init, preheader_arg, vect_phi_res, init_def;
3329 basic_block bb = gimple_bb (use_stmt);
3330 gimple use;
3331
3332 /* Check that USE_STMT is really double reduction phi node. */
3333 if (gimple_code (use_stmt) != GIMPLE_PHI
3334 || gimple_phi_num_args (use_stmt) != 2
3335 || !use_stmt_vinfo
3336 || STMT_VINFO_DEF_TYPE (use_stmt_vinfo)
3337 != vect_double_reduction_def
3338 || bb->loop_father != outer_loop)
3339 continue;
3340
3341 /* Create vector phi node for double reduction:
3342 vs1 = phi <vs0, vs2>
3343 vs1 was created previously in this function by a call to
3344 vect_get_vec_def_for_operand and is stored in vec_initial_def;
3345 vs2 is defined by EPILOG_STMT, the vectorized EXIT_PHI;
3346 vs0 is created here. */
3347
3348 /* Create vector phi node. */
3349 vect_phi = create_phi_node (vec_initial_def, bb);
3350 new_phi_vinfo = new_stmt_vec_info (vect_phi,
3351 loop_vec_info_for_loop (outer_loop), NULL);
3352 set_vinfo_for_stmt (vect_phi, new_phi_vinfo);
3353
3354 /* Create vs0 - initial def of the double reduction phi. */
3355 preheader_arg = PHI_ARG_DEF_FROM_EDGE (use_stmt,
3356 loop_preheader_edge (outer_loop));
3357 init_def = get_initial_def_for_reduction (stmt, preheader_arg,
3358 NULL);
3359 vect_phi_init = vect_init_vector (use_stmt, init_def, vectype,
3360 NULL);
3361
3362 /* Update phi node arguments with vs0 and vs2. */
3363 add_phi_arg (vect_phi, vect_phi_init,
3364 loop_preheader_edge (outer_loop), UNKNOWN_LOCATION);
3365 add_phi_arg (vect_phi, PHI_RESULT (epilog_stmt),
3366 loop_latch_edge (outer_loop), UNKNOWN_LOCATION);
3367 if (vect_print_dump_info (REPORT_DETAILS))
3368 {
3369 fprintf (vect_dump, "created double reduction phi node: ");
3370 print_gimple_stmt (vect_dump, vect_phi, 0, TDF_SLIM);
3371 }
3372
3373 vect_phi_res = PHI_RESULT (vect_phi);
3374
3375 /* Replace the use, i.e., set the correct vs1 in the regular
3376 reduction phi node. FORNOW, NCOPIES is always 1, so the loop
3377 is redundant. */
3378 use = reduction_phi;
3379 for (j = 0; j < ncopies; j++)
3380 {
3381 edge pr_edge = loop_preheader_edge (loop);
3382 SET_PHI_ARG_DEF (use, pr_edge->dest_idx, vect_phi_res);
3383 use = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (use));
3384 }
3385 }
3386 }
3387
3388 /* Replace the uses: */
3389 orig_name = PHI_RESULT (exit_phi);
3390 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name)
3391 FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
3392 SET_USE (use_p, new_temp);
3393 }
3394
3395 VEC_free (gimple, heap, phis);
3396 }
3397
3398
3399 /* Function vectorizable_reduction.
3400
3401 Check if STMT performs a reduction operation that can be vectorized.
3402 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3403 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
3404 Return FALSE if not a vectorizable STMT, TRUE otherwise.
3405
3406 This function also handles reduction idioms (patterns) that have been
3407 recognized in advance during vect_pattern_recog. In this case, STMT may be
3408 of this form:
3409 X = pattern_expr (arg0, arg1, ..., X)
3410 and it's STMT_VINFO_RELATED_STMT points to the last stmt in the original
3411 sequence that had been detected and replaced by the pattern-stmt (STMT).
3412
3413 In some cases of reduction patterns, the type of the reduction variable X is
3414 different than the type of the other arguments of STMT.
3415 In such cases, the vectype that is used when transforming STMT into a vector
3416 stmt is different than the vectype that is used to determine the
3417 vectorization factor, because it consists of a different number of elements
3418 than the actual number of elements that are being operated upon in parallel.
3419
3420 For example, consider an accumulation of shorts into an int accumulator.
3421 On some targets it's possible to vectorize this pattern operating on 8
3422 shorts at a time (hence, the vectype for purposes of determining the
3423 vectorization factor should be V8HI); on the other hand, the vectype that
3424 is used to create the vector form is actually V4SI (the type of the result).
3425
3426 Upon entry to this function, STMT_VINFO_VECTYPE records the vectype that
3427 indicates what is the actual level of parallelism (V8HI in the example), so
3428 that the right vectorization factor would be derived. This vectype
3429 corresponds to the type of arguments to the reduction stmt, and should *NOT*
3430 be used to create the vectorized stmt. The right vectype for the vectorized
3431 stmt is obtained from the type of the result X:
3432 get_vectype_for_scalar_type (TREE_TYPE (X))
3433
3434 This means that, contrary to "regular" reductions (or "regular" stmts in
3435 general), the following equation:
3436 STMT_VINFO_VECTYPE == get_vectype_for_scalar_type (TREE_TYPE (X))
3437 does *NOT* necessarily hold for reduction patterns. */
3438
3439 bool
3440 vectorizable_reduction (gimple stmt, gimple_stmt_iterator *gsi,
3441 gimple *vec_stmt)
3442 {
3443 tree vec_dest;
3444 tree scalar_dest;
3445 tree loop_vec_def0 = NULL_TREE, loop_vec_def1 = NULL_TREE;
3446 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3447 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3448 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3449 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
3450 enum tree_code code, orig_code, epilog_reduc_code;
3451 enum machine_mode vec_mode;
3452 int op_type;
3453 optab optab, reduc_optab;
3454 tree new_temp = NULL_TREE;
3455 tree def;
3456 gimple def_stmt;
3457 enum vect_def_type dt;
3458 gimple new_phi = NULL;
3459 tree scalar_type;
3460 bool is_simple_use;
3461 gimple orig_stmt;
3462 stmt_vec_info orig_stmt_info;
3463 tree expr = NULL_TREE;
3464 int i;
3465 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
3466 int ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
3467 int epilog_copies;
3468 stmt_vec_info prev_stmt_info, prev_phi_info;
3469 gimple first_phi = NULL;
3470 bool single_defuse_cycle = false;
3471 tree reduc_def = NULL_TREE;
3472 gimple new_stmt = NULL;
3473 int j;
3474 tree ops[3];
3475 bool nested_cycle = false, found_nested_cycle_def = false;
3476 gimple reduc_def_stmt = NULL;
3477 /* The default is that the reduction variable is the last in statement. */
3478 int reduc_index = 2;
3479 bool double_reduc = false, dummy;
3480 basic_block def_bb;
3481 struct loop * def_stmt_loop, *outer_loop = NULL;
3482 tree def_arg;
3483 gimple def_arg_stmt;
3484
3485 if (nested_in_vect_loop_p (loop, stmt))
3486 {
3487 outer_loop = loop;
3488 loop = loop->inner;
3489 nested_cycle = true;
3490 }
3491
3492 gcc_assert (ncopies >= 1);
3493
3494 /* FORNOW: SLP not supported. */
3495 if (STMT_SLP_TYPE (stmt_info))
3496 return false;
3497
3498 /* 1. Is vectorizable reduction? */
3499 /* Not supportable if the reduction variable is used in the loop. */
3500 if (STMT_VINFO_RELEVANT (stmt_info) > vect_used_in_outer)
3501 return false;
3502
3503 /* Reductions that are not used even in an enclosing outer-loop,
3504 are expected to be "live" (used out of the loop). */
3505 if (STMT_VINFO_RELEVANT (stmt_info) == vect_unused_in_scope
3506 && !STMT_VINFO_LIVE_P (stmt_info))
3507 return false;
3508
3509 /* Make sure it was already recognized as a reduction computation. */
3510 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_reduction_def
3511 && STMT_VINFO_DEF_TYPE (stmt_info) != vect_nested_cycle)
3512 return false;
3513
3514 /* 2. Has this been recognized as a reduction pattern?
3515
3516 Check if STMT represents a pattern that has been recognized
3517 in earlier analysis stages. For stmts that represent a pattern,
3518 the STMT_VINFO_RELATED_STMT field records the last stmt in
3519 the original sequence that constitutes the pattern. */
3520
3521 orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
3522 if (orig_stmt)
3523 {
3524 orig_stmt_info = vinfo_for_stmt (orig_stmt);
3525 gcc_assert (STMT_VINFO_RELATED_STMT (orig_stmt_info) == stmt);
3526 gcc_assert (STMT_VINFO_IN_PATTERN_P (orig_stmt_info));
3527 gcc_assert (!STMT_VINFO_IN_PATTERN_P (stmt_info));
3528 }
3529
3530 /* 3. Check the operands of the operation. The first operands are defined
3531 inside the loop body. The last operand is the reduction variable,
3532 which is defined by the loop-header-phi. */
3533
3534 gcc_assert (is_gimple_assign (stmt));
3535
3536 /* Flatten RHS */
3537 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)))
3538 {
3539 case GIMPLE_SINGLE_RHS:
3540 op_type = TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt));
3541 if (op_type == ternary_op)
3542 {
3543 tree rhs = gimple_assign_rhs1 (stmt);
3544 ops[0] = TREE_OPERAND (rhs, 0);
3545 ops[1] = TREE_OPERAND (rhs, 1);
3546 ops[2] = TREE_OPERAND (rhs, 2);
3547 code = TREE_CODE (rhs);
3548 }
3549 else
3550 return false;
3551 break;
3552
3553 case GIMPLE_BINARY_RHS:
3554 code = gimple_assign_rhs_code (stmt);
3555 op_type = TREE_CODE_LENGTH (code);
3556 gcc_assert (op_type == binary_op);
3557 ops[0] = gimple_assign_rhs1 (stmt);
3558 ops[1] = gimple_assign_rhs2 (stmt);
3559 break;
3560
3561 case GIMPLE_UNARY_RHS:
3562 return false;
3563
3564 default:
3565 gcc_unreachable ();
3566 }
3567
3568 scalar_dest = gimple_assign_lhs (stmt);
3569 scalar_type = TREE_TYPE (scalar_dest);
3570 if (!POINTER_TYPE_P (scalar_type) && !INTEGRAL_TYPE_P (scalar_type)
3571 && !SCALAR_FLOAT_TYPE_P (scalar_type))
3572 return false;
3573
3574 /* All uses but the last are expected to be defined in the loop.
3575 The last use is the reduction variable. In case of nested cycle this
3576 assumption is not true: we use reduc_index to record the index of the
3577 reduction variable. */
3578 for (i = 0; i < op_type-1; i++)
3579 {
3580 /* The condition of COND_EXPR is checked in vectorizable_condition(). */
3581 if (i == 0 && code == COND_EXPR)
3582 continue;
3583
3584 is_simple_use = vect_is_simple_use (ops[i], loop_vinfo, NULL, &def_stmt,
3585 &def, &dt);
3586 gcc_assert (is_simple_use);
3587 if (dt != vect_internal_def
3588 && dt != vect_external_def
3589 && dt != vect_constant_def
3590 && dt != vect_induction_def
3591 && !(dt == vect_nested_cycle && nested_cycle))
3592 return false;
3593
3594 if (dt == vect_nested_cycle)
3595 {
3596 found_nested_cycle_def = true;
3597 reduc_def_stmt = def_stmt;
3598 reduc_index = i;
3599 }
3600 }
3601
3602 is_simple_use = vect_is_simple_use (ops[i], loop_vinfo, NULL, &def_stmt,
3603 &def, &dt);
3604 gcc_assert (is_simple_use);
3605 gcc_assert (dt == vect_reduction_def
3606 || dt == vect_nested_cycle
3607 || ((dt == vect_internal_def || dt == vect_external_def
3608 || dt == vect_constant_def || dt == vect_induction_def)
3609 && nested_cycle && found_nested_cycle_def));
3610 if (!found_nested_cycle_def)
3611 reduc_def_stmt = def_stmt;
3612
3613 gcc_assert (gimple_code (reduc_def_stmt) == GIMPLE_PHI);
3614 if (orig_stmt)
3615 gcc_assert (orig_stmt == vect_is_simple_reduction (loop_vinfo,
3616 reduc_def_stmt,
3617 !nested_cycle,
3618 &dummy));
3619 else
3620 gcc_assert (stmt == vect_is_simple_reduction (loop_vinfo, reduc_def_stmt,
3621 !nested_cycle, &dummy));
3622
3623 if (STMT_VINFO_LIVE_P (vinfo_for_stmt (reduc_def_stmt)))
3624 return false;
3625
3626 vec_mode = TYPE_MODE (vectype);
3627
3628 if (code == COND_EXPR)
3629 {
3630 if (!vectorizable_condition (stmt, gsi, NULL, ops[reduc_index], 0))
3631 {
3632 if (vect_print_dump_info (REPORT_DETAILS))
3633 fprintf (vect_dump, "unsupported condition in reduction");
3634
3635 return false;
3636 }
3637 }
3638 else
3639 {
3640 /* 4. Supportable by target? */
3641
3642 /* 4.1. check support for the operation in the loop */
3643 optab = optab_for_tree_code (code, vectype, optab_default);
3644 if (!optab)
3645 {
3646 if (vect_print_dump_info (REPORT_DETAILS))
3647 fprintf (vect_dump, "no optab.");
3648
3649 return false;
3650 }
3651
3652 if (optab_handler (optab, vec_mode)->insn_code == CODE_FOR_nothing)
3653 {
3654 if (vect_print_dump_info (REPORT_DETAILS))
3655 fprintf (vect_dump, "op not supported by target.");
3656
3657 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
3658 || LOOP_VINFO_VECT_FACTOR (loop_vinfo)
3659 < vect_min_worthwhile_factor (code))
3660 return false;
3661
3662 if (vect_print_dump_info (REPORT_DETAILS))
3663 fprintf (vect_dump, "proceeding using word mode.");
3664 }
3665
3666 /* Worthwhile without SIMD support? */
3667 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
3668 && LOOP_VINFO_VECT_FACTOR (loop_vinfo)
3669 < vect_min_worthwhile_factor (code))
3670 {
3671 if (vect_print_dump_info (REPORT_DETAILS))
3672 fprintf (vect_dump, "not worthwhile without SIMD support.");
3673
3674 return false;
3675 }
3676 }
3677
3678 /* 4.2. Check support for the epilog operation.
3679
3680 If STMT represents a reduction pattern, then the type of the
3681 reduction variable may be different than the type of the rest
3682 of the arguments. For example, consider the case of accumulation
3683 of shorts into an int accumulator; The original code:
3684 S1: int_a = (int) short_a;
3685 orig_stmt-> S2: int_acc = plus <int_a ,int_acc>;
3686
3687 was replaced with:
3688 STMT: int_acc = widen_sum <short_a, int_acc>
3689
3690 This means that:
3691 1. The tree-code that is used to create the vector operation in the
3692 epilog code (that reduces the partial results) is not the
3693 tree-code of STMT, but is rather the tree-code of the original
3694 stmt from the pattern that STMT is replacing. I.e, in the example
3695 above we want to use 'widen_sum' in the loop, but 'plus' in the
3696 epilog.
3697 2. The type (mode) we use to check available target support
3698 for the vector operation to be created in the *epilog*, is
3699 determined by the type of the reduction variable (in the example
3700 above we'd check this: plus_optab[vect_int_mode]).
3701 However the type (mode) we use to check available target support
3702 for the vector operation to be created *inside the loop*, is
3703 determined by the type of the other arguments to STMT (in the
3704 example we'd check this: widen_sum_optab[vect_short_mode]).
3705
3706 This is contrary to "regular" reductions, in which the types of all
3707 the arguments are the same as the type of the reduction variable.
3708 For "regular" reductions we can therefore use the same vector type
3709 (and also the same tree-code) when generating the epilog code and
3710 when generating the code inside the loop. */
3711
3712 if (orig_stmt)
3713 {
3714 /* This is a reduction pattern: get the vectype from the type of the
3715 reduction variable, and get the tree-code from orig_stmt. */
3716 orig_code = gimple_assign_rhs_code (orig_stmt);
3717 vectype = get_vectype_for_scalar_type (TREE_TYPE (def));
3718 if (!vectype)
3719 {
3720 if (vect_print_dump_info (REPORT_DETAILS))
3721 {
3722 fprintf (vect_dump, "unsupported data-type ");
3723 print_generic_expr (vect_dump, TREE_TYPE (def), TDF_SLIM);
3724 }
3725 return false;
3726 }
3727
3728 vec_mode = TYPE_MODE (vectype);
3729 }
3730 else
3731 {
3732 /* Regular reduction: use the same vectype and tree-code as used for
3733 the vector code inside the loop can be used for the epilog code. */
3734 orig_code = code;
3735 }
3736
3737 if (nested_cycle)
3738 {
3739 def_bb = gimple_bb (reduc_def_stmt);
3740 def_stmt_loop = def_bb->loop_father;
3741 def_arg = PHI_ARG_DEF_FROM_EDGE (reduc_def_stmt,
3742 loop_preheader_edge (def_stmt_loop));
3743 if (TREE_CODE (def_arg) == SSA_NAME
3744 && (def_arg_stmt = SSA_NAME_DEF_STMT (def_arg))
3745 && gimple_code (def_arg_stmt) == GIMPLE_PHI
3746 && flow_bb_inside_loop_p (outer_loop, gimple_bb (def_arg_stmt))
3747 && vinfo_for_stmt (def_arg_stmt)
3748 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_arg_stmt))
3749 == vect_double_reduction_def)
3750 double_reduc = true;
3751 }
3752
3753 epilog_reduc_code = ERROR_MARK;
3754 if (reduction_code_for_scalar_code (orig_code, &epilog_reduc_code))
3755 {
3756 reduc_optab = optab_for_tree_code (epilog_reduc_code, vectype,
3757 optab_default);
3758 if (!reduc_optab)
3759 {
3760 if (vect_print_dump_info (REPORT_DETAILS))
3761 fprintf (vect_dump, "no optab for reduction.");
3762
3763 epilog_reduc_code = ERROR_MARK;
3764 }
3765
3766 if (reduc_optab
3767 && optab_handler (reduc_optab, vec_mode)->insn_code
3768 == CODE_FOR_nothing)
3769 {
3770 if (vect_print_dump_info (REPORT_DETAILS))
3771 fprintf (vect_dump, "reduc op not supported by target.");
3772
3773 epilog_reduc_code = ERROR_MARK;
3774 }
3775 }
3776 else
3777 {
3778 if (!nested_cycle || double_reduc)
3779 {
3780 if (vect_print_dump_info (REPORT_DETAILS))
3781 fprintf (vect_dump, "no reduc code for scalar code.");
3782
3783 return false;
3784 }
3785 }
3786
3787 if (double_reduc && ncopies > 1)
3788 {
3789 if (vect_print_dump_info (REPORT_DETAILS))
3790 fprintf (vect_dump, "multiple types in double reduction");
3791
3792 return false;
3793 }
3794
3795 if (!vec_stmt) /* transformation not required. */
3796 {
3797 STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type;
3798 if (!vect_model_reduction_cost (stmt_info, epilog_reduc_code, ncopies))
3799 return false;
3800 return true;
3801 }
3802
3803 /** Transform. **/
3804
3805 if (vect_print_dump_info (REPORT_DETAILS))
3806 fprintf (vect_dump, "transform reduction.");
3807
3808 /* FORNOW: Multiple types are not supported for condition. */
3809 if (code == COND_EXPR)
3810 gcc_assert (ncopies == 1);
3811
3812 /* Create the destination vector */
3813 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3814
3815 /* In case the vectorization factor (VF) is bigger than the number
3816 of elements that we can fit in a vectype (nunits), we have to generate
3817 more than one vector stmt - i.e - we need to "unroll" the
3818 vector stmt by a factor VF/nunits. For more details see documentation
3819 in vectorizable_operation. */
3820
3821 /* If the reduction is used in an outer loop we need to generate
3822 VF intermediate results, like so (e.g. for ncopies=2):
3823 r0 = phi (init, r0)
3824 r1 = phi (init, r1)
3825 r0 = x0 + r0;
3826 r1 = x1 + r1;
3827 (i.e. we generate VF results in 2 registers).
3828 In this case we have a separate def-use cycle for each copy, and therefore
3829 for each copy we get the vector def for the reduction variable from the
3830 respective phi node created for this copy.
3831
3832 Otherwise (the reduction is unused in the loop nest), we can combine
3833 together intermediate results, like so (e.g. for ncopies=2):
3834 r = phi (init, r)
3835 r = x0 + r;
3836 r = x1 + r;
3837 (i.e. we generate VF/2 results in a single register).
3838 In this case for each copy we get the vector def for the reduction variable
3839 from the vectorized reduction operation generated in the previous iteration.
3840 */
3841
3842 if (STMT_VINFO_RELEVANT (stmt_info) == vect_unused_in_scope)
3843 {
3844 single_defuse_cycle = true;
3845 epilog_copies = 1;
3846 }
3847 else
3848 epilog_copies = ncopies;
3849
3850 prev_stmt_info = NULL;
3851 prev_phi_info = NULL;
3852 for (j = 0; j < ncopies; j++)
3853 {
3854 if (j == 0 || !single_defuse_cycle)
3855 {
3856 /* Create the reduction-phi that defines the reduction-operand. */
3857 new_phi = create_phi_node (vec_dest, loop->header);
3858 set_vinfo_for_stmt (new_phi, new_stmt_vec_info (new_phi, loop_vinfo,
3859 NULL));
3860 /* Get the vector def for the reduction variable from the phi
3861 node. */
3862 reduc_def = PHI_RESULT (new_phi);
3863 }
3864
3865 if (code == COND_EXPR)
3866 {
3867 first_phi = new_phi;
3868 vectorizable_condition (stmt, gsi, vec_stmt, reduc_def, reduc_index);
3869 /* Multiple types are not supported for condition. */
3870 break;
3871 }
3872
3873 /* Handle uses. */
3874 if (j == 0)
3875 {
3876 loop_vec_def0 = vect_get_vec_def_for_operand (ops[!reduc_index],
3877 stmt, NULL);
3878 if (op_type == ternary_op)
3879 {
3880 if (reduc_index == 0)
3881 loop_vec_def1 = vect_get_vec_def_for_operand (ops[2], stmt,
3882 NULL);
3883 else
3884 loop_vec_def1 = vect_get_vec_def_for_operand (ops[1], stmt,
3885 NULL);
3886 }
3887
3888 /* Get the vector def for the reduction variable from the phi
3889 node. */
3890 first_phi = new_phi;
3891 }
3892 else
3893 {
3894 enum vect_def_type dt = vect_unknown_def_type; /* Dummy */
3895 loop_vec_def0 = vect_get_vec_def_for_stmt_copy (dt, loop_vec_def0);
3896 if (op_type == ternary_op)
3897 loop_vec_def1 = vect_get_vec_def_for_stmt_copy (dt, loop_vec_def1);
3898
3899 if (single_defuse_cycle)
3900 reduc_def = gimple_assign_lhs (new_stmt);
3901 else
3902 reduc_def = PHI_RESULT (new_phi);
3903
3904 STMT_VINFO_RELATED_STMT (prev_phi_info) = new_phi;
3905 }
3906
3907 /* Arguments are ready. Create the new vector stmt. */
3908 if (op_type == binary_op)
3909 {
3910 if (reduc_index == 0)
3911 expr = build2 (code, vectype, reduc_def, loop_vec_def0);
3912 else
3913 expr = build2 (code, vectype, loop_vec_def0, reduc_def);
3914 }
3915 else
3916 {
3917 if (reduc_index == 0)
3918 expr = build3 (code, vectype, reduc_def, loop_vec_def0,
3919 loop_vec_def1);
3920 else
3921 {
3922 if (reduc_index == 1)
3923 expr = build3 (code, vectype, loop_vec_def0, reduc_def,
3924 loop_vec_def1);
3925 else
3926 expr = build3 (code, vectype, loop_vec_def0, loop_vec_def1,
3927 reduc_def);
3928 }
3929 }
3930
3931 new_stmt = gimple_build_assign (vec_dest, expr);
3932 new_temp = make_ssa_name (vec_dest, new_stmt);
3933 gimple_assign_set_lhs (new_stmt, new_temp);
3934 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3935
3936 if (j == 0)
3937 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3938 else
3939 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3940
3941 prev_stmt_info = vinfo_for_stmt (new_stmt);
3942 prev_phi_info = vinfo_for_stmt (new_phi);
3943 }
3944
3945 /* Finalize the reduction-phi (set its arguments) and create the
3946 epilog reduction code. */
3947 if (!single_defuse_cycle || code == COND_EXPR)
3948 new_temp = gimple_assign_lhs (*vec_stmt);
3949
3950 vect_create_epilog_for_reduction (new_temp, stmt, epilog_copies,
3951 epilog_reduc_code, first_phi, reduc_index,
3952 double_reduc);
3953 return true;
3954 }
3955
3956 /* Function vect_min_worthwhile_factor.
3957
3958 For a loop where we could vectorize the operation indicated by CODE,
3959 return the minimum vectorization factor that makes it worthwhile
3960 to use generic vectors. */
3961 int
3962 vect_min_worthwhile_factor (enum tree_code code)
3963 {
3964 switch (code)
3965 {
3966 case PLUS_EXPR:
3967 case MINUS_EXPR:
3968 case NEGATE_EXPR:
3969 return 4;
3970
3971 case BIT_AND_EXPR:
3972 case BIT_IOR_EXPR:
3973 case BIT_XOR_EXPR:
3974 case BIT_NOT_EXPR:
3975 return 2;
3976
3977 default:
3978 return INT_MAX;
3979 }
3980 }
3981
3982
3983 /* Function vectorizable_induction
3984
3985 Check if PHI performs an induction computation that can be vectorized.
3986 If VEC_STMT is also passed, vectorize the induction PHI: create a vectorized
3987 phi to replace it, put it in VEC_STMT, and add it to the same basic block.
3988 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3989
3990 bool
3991 vectorizable_induction (gimple phi, gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED,
3992 gimple *vec_stmt)
3993 {
3994 stmt_vec_info stmt_info = vinfo_for_stmt (phi);
3995 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3996 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3997 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
3998 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
3999 int ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
4000 tree vec_def;
4001
4002 gcc_assert (ncopies >= 1);
4003 /* FORNOW. This restriction should be relaxed. */
4004 if (nested_in_vect_loop_p (loop, phi) && ncopies > 1)
4005 {
4006 if (vect_print_dump_info (REPORT_DETAILS))
4007 fprintf (vect_dump, "multiple types in nested loop.");
4008 return false;
4009 }
4010
4011 if (!STMT_VINFO_RELEVANT_P (stmt_info))
4012 return false;
4013
4014 /* FORNOW: SLP not supported. */
4015 if (STMT_SLP_TYPE (stmt_info))
4016 return false;
4017
4018 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def);
4019
4020 if (gimple_code (phi) != GIMPLE_PHI)
4021 return false;
4022
4023 if (!vec_stmt) /* transformation not required. */
4024 {
4025 STMT_VINFO_TYPE (stmt_info) = induc_vec_info_type;
4026 if (vect_print_dump_info (REPORT_DETAILS))
4027 fprintf (vect_dump, "=== vectorizable_induction ===");
4028 vect_model_induction_cost (stmt_info, ncopies);
4029 return true;
4030 }
4031
4032 /** Transform. **/
4033
4034 if (vect_print_dump_info (REPORT_DETAILS))
4035 fprintf (vect_dump, "transform induction phi.");
4036
4037 vec_def = get_initial_def_for_induction (phi);
4038 *vec_stmt = SSA_NAME_DEF_STMT (vec_def);
4039 return true;
4040 }
4041
4042 /* Function vectorizable_live_operation.
4043
4044 STMT computes a value that is used outside the loop. Check if
4045 it can be supported. */
4046
4047 bool
4048 vectorizable_live_operation (gimple stmt,
4049 gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED,
4050 gimple *vec_stmt ATTRIBUTE_UNUSED)
4051 {
4052 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4053 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4054 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
4055 int i;
4056 int op_type;
4057 tree op;
4058 tree def;
4059 gimple def_stmt;
4060 enum vect_def_type dt;
4061 enum tree_code code;
4062 enum gimple_rhs_class rhs_class;
4063
4064 gcc_assert (STMT_VINFO_LIVE_P (stmt_info));
4065
4066 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def)
4067 return false;
4068
4069 if (!is_gimple_assign (stmt))
4070 return false;
4071
4072 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
4073 return false;
4074
4075 /* FORNOW. CHECKME. */
4076 if (nested_in_vect_loop_p (loop, stmt))
4077 return false;
4078
4079 code = gimple_assign_rhs_code (stmt);
4080 op_type = TREE_CODE_LENGTH (code);
4081 rhs_class = get_gimple_rhs_class (code);
4082 gcc_assert (rhs_class != GIMPLE_UNARY_RHS || op_type == unary_op);
4083 gcc_assert (rhs_class != GIMPLE_BINARY_RHS || op_type == binary_op);
4084
4085 /* FORNOW: support only if all uses are invariant. This means
4086 that the scalar operations can remain in place, unvectorized.
4087 The original last scalar value that they compute will be used. */
4088
4089 for (i = 0; i < op_type; i++)
4090 {
4091 if (rhs_class == GIMPLE_SINGLE_RHS)
4092 op = TREE_OPERAND (gimple_op (stmt, 1), i);
4093 else
4094 op = gimple_op (stmt, i + 1);
4095 if (op
4096 && !vect_is_simple_use (op, loop_vinfo, NULL, &def_stmt, &def, &dt))
4097 {
4098 if (vect_print_dump_info (REPORT_DETAILS))
4099 fprintf (vect_dump, "use not simple.");
4100 return false;
4101 }
4102
4103 if (dt != vect_external_def && dt != vect_constant_def)
4104 return false;
4105 }
4106
4107 /* No transformation is required for the cases we currently support. */
4108 return true;
4109 }
4110
4111 /* Function vect_transform_loop.
4112
4113 The analysis phase has determined that the loop is vectorizable.
4114 Vectorize the loop - created vectorized stmts to replace the scalar
4115 stmts in the loop, and update the loop exit condition. */
4116
4117 void
4118 vect_transform_loop (loop_vec_info loop_vinfo)
4119 {
4120 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
4121 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
4122 int nbbs = loop->num_nodes;
4123 gimple_stmt_iterator si;
4124 int i;
4125 tree ratio = NULL;
4126 int vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
4127 bool strided_store;
4128 bool slp_scheduled = false;
4129 unsigned int nunits;
4130 tree cond_expr = NULL_TREE;
4131 gimple_seq cond_expr_stmt_list = NULL;
4132 bool do_peeling_for_loop_bound;
4133
4134 if (vect_print_dump_info (REPORT_DETAILS))
4135 fprintf (vect_dump, "=== vec_transform_loop ===");
4136
4137 /* Peel the loop if there are data refs with unknown alignment.
4138 Only one data ref with unknown store is allowed. */
4139
4140 if (LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo))
4141 vect_do_peeling_for_alignment (loop_vinfo);
4142
4143 do_peeling_for_loop_bound
4144 = (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
4145 || (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
4146 && LOOP_VINFO_INT_NITERS (loop_vinfo) % vectorization_factor != 0));
4147
4148 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
4149 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
4150 vect_loop_versioning (loop_vinfo,
4151 !do_peeling_for_loop_bound,
4152 &cond_expr, &cond_expr_stmt_list);
4153
4154 /* If the loop has a symbolic number of iterations 'n' (i.e. it's not a
4155 compile time constant), or it is a constant that doesn't divide by the
4156 vectorization factor, then an epilog loop needs to be created.
4157 We therefore duplicate the loop: the original loop will be vectorized,
4158 and will compute the first (n/VF) iterations. The second copy of the loop
4159 will remain scalar and will compute the remaining (n%VF) iterations.
4160 (VF is the vectorization factor). */
4161
4162 if (do_peeling_for_loop_bound)
4163 vect_do_peeling_for_loop_bound (loop_vinfo, &ratio,
4164 cond_expr, cond_expr_stmt_list);
4165 else
4166 ratio = build_int_cst (TREE_TYPE (LOOP_VINFO_NITERS (loop_vinfo)),
4167 LOOP_VINFO_INT_NITERS (loop_vinfo) / vectorization_factor);
4168
4169 /* 1) Make sure the loop header has exactly two entries
4170 2) Make sure we have a preheader basic block. */
4171
4172 gcc_assert (EDGE_COUNT (loop->header->preds) == 2);
4173
4174 split_edge (loop_preheader_edge (loop));
4175
4176 /* FORNOW: the vectorizer supports only loops which body consist
4177 of one basic block (header + empty latch). When the vectorizer will
4178 support more involved loop forms, the order by which the BBs are
4179 traversed need to be reconsidered. */
4180
4181 for (i = 0; i < nbbs; i++)
4182 {
4183 basic_block bb = bbs[i];
4184 stmt_vec_info stmt_info;
4185 gimple phi;
4186
4187 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
4188 {
4189 phi = gsi_stmt (si);
4190 if (vect_print_dump_info (REPORT_DETAILS))
4191 {
4192 fprintf (vect_dump, "------>vectorizing phi: ");
4193 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
4194 }
4195 stmt_info = vinfo_for_stmt (phi);
4196 if (!stmt_info)
4197 continue;
4198
4199 if (!STMT_VINFO_RELEVANT_P (stmt_info)
4200 && !STMT_VINFO_LIVE_P (stmt_info))
4201 continue;
4202
4203 if ((TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info))
4204 != (unsigned HOST_WIDE_INT) vectorization_factor)
4205 && vect_print_dump_info (REPORT_DETAILS))
4206 fprintf (vect_dump, "multiple-types.");
4207
4208 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def)
4209 {
4210 if (vect_print_dump_info (REPORT_DETAILS))
4211 fprintf (vect_dump, "transform phi.");
4212 vect_transform_stmt (phi, NULL, NULL, NULL, NULL);
4213 }
4214 }
4215
4216 for (si = gsi_start_bb (bb); !gsi_end_p (si);)
4217 {
4218 gimple stmt = gsi_stmt (si);
4219 bool is_store;
4220
4221 if (vect_print_dump_info (REPORT_DETAILS))
4222 {
4223 fprintf (vect_dump, "------>vectorizing statement: ");
4224 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4225 }
4226
4227 stmt_info = vinfo_for_stmt (stmt);
4228
4229 /* vector stmts created in the outer-loop during vectorization of
4230 stmts in an inner-loop may not have a stmt_info, and do not
4231 need to be vectorized. */
4232 if (!stmt_info)
4233 {
4234 gsi_next (&si);
4235 continue;
4236 }
4237
4238 if (!STMT_VINFO_RELEVANT_P (stmt_info)
4239 && !STMT_VINFO_LIVE_P (stmt_info))
4240 {
4241 gsi_next (&si);
4242 continue;
4243 }
4244
4245 gcc_assert (STMT_VINFO_VECTYPE (stmt_info));
4246 nunits =
4247 (unsigned int) TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info));
4248 if (!STMT_SLP_TYPE (stmt_info)
4249 && nunits != (unsigned int) vectorization_factor
4250 && vect_print_dump_info (REPORT_DETAILS))
4251 /* For SLP VF is set according to unrolling factor, and not to
4252 vector size, hence for SLP this print is not valid. */
4253 fprintf (vect_dump, "multiple-types.");
4254
4255 /* SLP. Schedule all the SLP instances when the first SLP stmt is
4256 reached. */
4257 if (STMT_SLP_TYPE (stmt_info))
4258 {
4259 if (!slp_scheduled)
4260 {
4261 slp_scheduled = true;
4262
4263 if (vect_print_dump_info (REPORT_DETAILS))
4264 fprintf (vect_dump, "=== scheduling SLP instances ===");
4265
4266 vect_schedule_slp (loop_vinfo, NULL);
4267 }
4268
4269 /* Hybrid SLP stmts must be vectorized in addition to SLP. */
4270 if (!vinfo_for_stmt (stmt) || PURE_SLP_STMT (stmt_info))
4271 {
4272 gsi_next (&si);
4273 continue;
4274 }
4275 }
4276
4277 /* -------- vectorize statement ------------ */
4278 if (vect_print_dump_info (REPORT_DETAILS))
4279 fprintf (vect_dump, "transform statement.");
4280
4281 strided_store = false;
4282 is_store = vect_transform_stmt (stmt, &si, &strided_store, NULL, NULL);
4283 if (is_store)
4284 {
4285 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
4286 {
4287 /* Interleaving. If IS_STORE is TRUE, the vectorization of the
4288 interleaving chain was completed - free all the stores in
4289 the chain. */
4290 vect_remove_stores (DR_GROUP_FIRST_DR (stmt_info));
4291 gsi_remove (&si, true);
4292 continue;
4293 }
4294 else
4295 {
4296 /* Free the attached stmt_vec_info and remove the stmt. */
4297 free_stmt_vec_info (stmt);
4298 gsi_remove (&si, true);
4299 continue;
4300 }
4301 }
4302 gsi_next (&si);
4303 } /* stmts in BB */
4304 } /* BBs in loop */
4305
4306 slpeel_make_loop_iterate_ntimes (loop, ratio);
4307
4308 /* The memory tags and pointers in vectorized statements need to
4309 have their SSA forms updated. FIXME, why can't this be delayed
4310 until all the loops have been transformed? */
4311 update_ssa (TODO_update_ssa);
4312
4313 if (vect_print_dump_info (REPORT_VECTORIZED_LOCATIONS))
4314 fprintf (vect_dump, "LOOP VECTORIZED.");
4315 if (loop->inner && vect_print_dump_info (REPORT_VECTORIZED_LOCATIONS))
4316 fprintf (vect_dump, "OUTER LOOP VECTORIZED.");
4317 }