tm.texi: Regenerate.
[gcc.git] / gcc / tree-vect-loop.c
1 /* Loop Vectorization
2 Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012
3 Free Software Foundation, Inc.
4 Contributed by Dorit Naishlos <dorit@il.ibm.com> and
5 Ira Rosen <irar@il.ibm.com>
6
7 This file is part of GCC.
8
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
13
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "ggc.h"
28 #include "tree.h"
29 #include "basic-block.h"
30 #include "tree-pretty-print.h"
31 #include "gimple-pretty-print.h"
32 #include "tree-flow.h"
33 #include "tree-dump.h"
34 #include "cfgloop.h"
35 #include "expr.h"
36 #include "recog.h"
37 #include "optabs.h"
38 #include "params.h"
39 #include "diagnostic-core.h"
40 #include "tree-chrec.h"
41 #include "tree-scalar-evolution.h"
42 #include "tree-vectorizer.h"
43 #include "target.h"
44
45 /* Loop Vectorization Pass.
46
47 This pass tries to vectorize loops.
48
49 For example, the vectorizer transforms the following simple loop:
50
51 short a[N]; short b[N]; short c[N]; int i;
52
53 for (i=0; i<N; i++){
54 a[i] = b[i] + c[i];
55 }
56
57 as if it was manually vectorized by rewriting the source code into:
58
59 typedef int __attribute__((mode(V8HI))) v8hi;
60 short a[N]; short b[N]; short c[N]; int i;
61 v8hi *pa = (v8hi*)a, *pb = (v8hi*)b, *pc = (v8hi*)c;
62 v8hi va, vb, vc;
63
64 for (i=0; i<N/8; i++){
65 vb = pb[i];
66 vc = pc[i];
67 va = vb + vc;
68 pa[i] = va;
69 }
70
71 The main entry to this pass is vectorize_loops(), in which
72 the vectorizer applies a set of analyses on a given set of loops,
73 followed by the actual vectorization transformation for the loops that
74 had successfully passed the analysis phase.
75 Throughout this pass we make a distinction between two types of
76 data: scalars (which are represented by SSA_NAMES), and memory references
77 ("data-refs"). These two types of data require different handling both
78 during analysis and transformation. The types of data-refs that the
79 vectorizer currently supports are ARRAY_REFS which base is an array DECL
80 (not a pointer), and INDIRECT_REFS through pointers; both array and pointer
81 accesses are required to have a simple (consecutive) access pattern.
82
83 Analysis phase:
84 ===============
85 The driver for the analysis phase is vect_analyze_loop().
86 It applies a set of analyses, some of which rely on the scalar evolution
87 analyzer (scev) developed by Sebastian Pop.
88
89 During the analysis phase the vectorizer records some information
90 per stmt in a "stmt_vec_info" struct which is attached to each stmt in the
91 loop, as well as general information about the loop as a whole, which is
92 recorded in a "loop_vec_info" struct attached to each loop.
93
94 Transformation phase:
95 =====================
96 The loop transformation phase scans all the stmts in the loop, and
97 creates a vector stmt (or a sequence of stmts) for each scalar stmt S in
98 the loop that needs to be vectorized. It inserts the vector code sequence
99 just before the scalar stmt S, and records a pointer to the vector code
100 in STMT_VINFO_VEC_STMT (stmt_info) (stmt_info is the stmt_vec_info struct
101 attached to S). This pointer will be used for the vectorization of following
102 stmts which use the def of stmt S. Stmt S is removed if it writes to memory;
103 otherwise, we rely on dead code elimination for removing it.
104
105 For example, say stmt S1 was vectorized into stmt VS1:
106
107 VS1: vb = px[i];
108 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
109 S2: a = b;
110
111 To vectorize stmt S2, the vectorizer first finds the stmt that defines
112 the operand 'b' (S1), and gets the relevant vector def 'vb' from the
113 vector stmt VS1 pointed to by STMT_VINFO_VEC_STMT (stmt_info (S1)). The
114 resulting sequence would be:
115
116 VS1: vb = px[i];
117 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
118 VS2: va = vb;
119 S2: a = b; STMT_VINFO_VEC_STMT (stmt_info (S2)) = VS2
120
121 Operands that are not SSA_NAMEs, are data-refs that appear in
122 load/store operations (like 'x[i]' in S1), and are handled differently.
123
124 Target modeling:
125 =================
126 Currently the only target specific information that is used is the
127 size of the vector (in bytes) - "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD".
128 Targets that can support different sizes of vectors, for now will need
129 to specify one value for "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD". More
130 flexibility will be added in the future.
131
132 Since we only vectorize operations which vector form can be
133 expressed using existing tree codes, to verify that an operation is
134 supported, the vectorizer checks the relevant optab at the relevant
135 machine_mode (e.g, optab_handler (add_optab, V8HImode)). If
136 the value found is CODE_FOR_nothing, then there's no target support, and
137 we can't vectorize the stmt.
138
139 For additional information on this project see:
140 http://gcc.gnu.org/projects/tree-ssa/vectorization.html
141 */
142
143 /* Function vect_determine_vectorization_factor
144
145 Determine the vectorization factor (VF). VF is the number of data elements
146 that are operated upon in parallel in a single iteration of the vectorized
147 loop. For example, when vectorizing a loop that operates on 4byte elements,
148 on a target with vector size (VS) 16byte, the VF is set to 4, since 4
149 elements can fit in a single vector register.
150
151 We currently support vectorization of loops in which all types operated upon
152 are of the same size. Therefore this function currently sets VF according to
153 the size of the types operated upon, and fails if there are multiple sizes
154 in the loop.
155
156 VF is also the factor by which the loop iterations are strip-mined, e.g.:
157 original loop:
158 for (i=0; i<N; i++){
159 a[i] = b[i] + c[i];
160 }
161
162 vectorized loop:
163 for (i=0; i<N; i+=VF){
164 a[i:VF] = b[i:VF] + c[i:VF];
165 }
166 */
167
168 static bool
169 vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
170 {
171 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
172 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
173 int nbbs = loop->num_nodes;
174 gimple_stmt_iterator si;
175 unsigned int vectorization_factor = 0;
176 tree scalar_type;
177 gimple phi;
178 tree vectype;
179 unsigned int nunits;
180 stmt_vec_info stmt_info;
181 int i;
182 HOST_WIDE_INT dummy;
183 gimple stmt, pattern_stmt = NULL;
184 gimple_seq pattern_def_seq = NULL;
185 gimple_stmt_iterator pattern_def_si = gsi_none ();
186 bool analyze_pattern_stmt = false;
187
188 if (vect_print_dump_info (REPORT_DETAILS))
189 fprintf (vect_dump, "=== vect_determine_vectorization_factor ===");
190
191 for (i = 0; i < nbbs; i++)
192 {
193 basic_block bb = bbs[i];
194
195 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
196 {
197 phi = gsi_stmt (si);
198 stmt_info = vinfo_for_stmt (phi);
199 if (vect_print_dump_info (REPORT_DETAILS))
200 {
201 fprintf (vect_dump, "==> examining phi: ");
202 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
203 }
204
205 gcc_assert (stmt_info);
206
207 if (STMT_VINFO_RELEVANT_P (stmt_info))
208 {
209 gcc_assert (!STMT_VINFO_VECTYPE (stmt_info));
210 scalar_type = TREE_TYPE (PHI_RESULT (phi));
211
212 if (vect_print_dump_info (REPORT_DETAILS))
213 {
214 fprintf (vect_dump, "get vectype for scalar type: ");
215 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
216 }
217
218 vectype = get_vectype_for_scalar_type (scalar_type);
219 if (!vectype)
220 {
221 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
222 {
223 fprintf (vect_dump,
224 "not vectorized: unsupported data-type ");
225 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
226 }
227 return false;
228 }
229 STMT_VINFO_VECTYPE (stmt_info) = vectype;
230
231 if (vect_print_dump_info (REPORT_DETAILS))
232 {
233 fprintf (vect_dump, "vectype: ");
234 print_generic_expr (vect_dump, vectype, TDF_SLIM);
235 }
236
237 nunits = TYPE_VECTOR_SUBPARTS (vectype);
238 if (vect_print_dump_info (REPORT_DETAILS))
239 fprintf (vect_dump, "nunits = %d", nunits);
240
241 if (!vectorization_factor
242 || (nunits > vectorization_factor))
243 vectorization_factor = nunits;
244 }
245 }
246
247 for (si = gsi_start_bb (bb); !gsi_end_p (si) || analyze_pattern_stmt;)
248 {
249 tree vf_vectype;
250
251 if (analyze_pattern_stmt)
252 stmt = pattern_stmt;
253 else
254 stmt = gsi_stmt (si);
255
256 stmt_info = vinfo_for_stmt (stmt);
257
258 if (vect_print_dump_info (REPORT_DETAILS))
259 {
260 fprintf (vect_dump, "==> examining statement: ");
261 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
262 }
263
264 gcc_assert (stmt_info);
265
266 /* Skip stmts which do not need to be vectorized. */
267 if (!STMT_VINFO_RELEVANT_P (stmt_info)
268 && !STMT_VINFO_LIVE_P (stmt_info))
269 {
270 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
271 && (pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info))
272 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
273 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
274 {
275 stmt = pattern_stmt;
276 stmt_info = vinfo_for_stmt (pattern_stmt);
277 if (vect_print_dump_info (REPORT_DETAILS))
278 {
279 fprintf (vect_dump, "==> examining pattern statement: ");
280 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
281 }
282 }
283 else
284 {
285 if (vect_print_dump_info (REPORT_DETAILS))
286 fprintf (vect_dump, "skip.");
287 gsi_next (&si);
288 continue;
289 }
290 }
291 else if (STMT_VINFO_IN_PATTERN_P (stmt_info)
292 && (pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info))
293 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
294 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
295 analyze_pattern_stmt = true;
296
297 /* If a pattern statement has def stmts, analyze them too. */
298 if (is_pattern_stmt_p (stmt_info))
299 {
300 if (pattern_def_seq == NULL)
301 {
302 pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info);
303 pattern_def_si = gsi_start (pattern_def_seq);
304 }
305 else if (!gsi_end_p (pattern_def_si))
306 gsi_next (&pattern_def_si);
307 if (pattern_def_seq != NULL)
308 {
309 gimple pattern_def_stmt = NULL;
310 stmt_vec_info pattern_def_stmt_info = NULL;
311
312 while (!gsi_end_p (pattern_def_si))
313 {
314 pattern_def_stmt = gsi_stmt (pattern_def_si);
315 pattern_def_stmt_info
316 = vinfo_for_stmt (pattern_def_stmt);
317 if (STMT_VINFO_RELEVANT_P (pattern_def_stmt_info)
318 || STMT_VINFO_LIVE_P (pattern_def_stmt_info))
319 break;
320 gsi_next (&pattern_def_si);
321 }
322
323 if (!gsi_end_p (pattern_def_si))
324 {
325 if (vect_print_dump_info (REPORT_DETAILS))
326 {
327 fprintf (vect_dump,
328 "==> examining pattern def stmt: ");
329 print_gimple_stmt (vect_dump, pattern_def_stmt, 0,
330 TDF_SLIM);
331 }
332
333 stmt = pattern_def_stmt;
334 stmt_info = pattern_def_stmt_info;
335 }
336 else
337 {
338 pattern_def_si = gsi_none ();
339 analyze_pattern_stmt = false;
340 }
341 }
342 else
343 analyze_pattern_stmt = false;
344 }
345
346 if (gimple_get_lhs (stmt) == NULL_TREE)
347 {
348 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
349 {
350 fprintf (vect_dump, "not vectorized: irregular stmt.");
351 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
352 }
353 return false;
354 }
355
356 if (VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))))
357 {
358 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
359 {
360 fprintf (vect_dump, "not vectorized: vector stmt in loop:");
361 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
362 }
363 return false;
364 }
365
366 if (STMT_VINFO_VECTYPE (stmt_info))
367 {
368 /* The only case when a vectype had been already set is for stmts
369 that contain a dataref, or for "pattern-stmts" (stmts
370 generated by the vectorizer to represent/replace a certain
371 idiom). */
372 gcc_assert (STMT_VINFO_DATA_REF (stmt_info)
373 || is_pattern_stmt_p (stmt_info)
374 || !gsi_end_p (pattern_def_si));
375 vectype = STMT_VINFO_VECTYPE (stmt_info);
376 }
377 else
378 {
379 gcc_assert (!STMT_VINFO_DATA_REF (stmt_info));
380 scalar_type = TREE_TYPE (gimple_get_lhs (stmt));
381 if (vect_print_dump_info (REPORT_DETAILS))
382 {
383 fprintf (vect_dump, "get vectype for scalar type: ");
384 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
385 }
386 vectype = get_vectype_for_scalar_type (scalar_type);
387 if (!vectype)
388 {
389 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
390 {
391 fprintf (vect_dump,
392 "not vectorized: unsupported data-type ");
393 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
394 }
395 return false;
396 }
397
398 STMT_VINFO_VECTYPE (stmt_info) = vectype;
399 }
400
401 /* The vectorization factor is according to the smallest
402 scalar type (or the largest vector size, but we only
403 support one vector size per loop). */
404 scalar_type = vect_get_smallest_scalar_type (stmt, &dummy,
405 &dummy);
406 if (vect_print_dump_info (REPORT_DETAILS))
407 {
408 fprintf (vect_dump, "get vectype for scalar type: ");
409 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
410 }
411 vf_vectype = get_vectype_for_scalar_type (scalar_type);
412 if (!vf_vectype)
413 {
414 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
415 {
416 fprintf (vect_dump,
417 "not vectorized: unsupported data-type ");
418 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
419 }
420 return false;
421 }
422
423 if ((GET_MODE_SIZE (TYPE_MODE (vectype))
424 != GET_MODE_SIZE (TYPE_MODE (vf_vectype))))
425 {
426 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
427 {
428 fprintf (vect_dump,
429 "not vectorized: different sized vector "
430 "types in statement, ");
431 print_generic_expr (vect_dump, vectype, TDF_SLIM);
432 fprintf (vect_dump, " and ");
433 print_generic_expr (vect_dump, vf_vectype, TDF_SLIM);
434 }
435 return false;
436 }
437
438 if (vect_print_dump_info (REPORT_DETAILS))
439 {
440 fprintf (vect_dump, "vectype: ");
441 print_generic_expr (vect_dump, vf_vectype, TDF_SLIM);
442 }
443
444 nunits = TYPE_VECTOR_SUBPARTS (vf_vectype);
445 if (vect_print_dump_info (REPORT_DETAILS))
446 fprintf (vect_dump, "nunits = %d", nunits);
447
448 if (!vectorization_factor
449 || (nunits > vectorization_factor))
450 vectorization_factor = nunits;
451
452 if (!analyze_pattern_stmt && gsi_end_p (pattern_def_si))
453 {
454 pattern_def_seq = NULL;
455 gsi_next (&si);
456 }
457 }
458 }
459
460 /* TODO: Analyze cost. Decide if worth while to vectorize. */
461 if (vect_print_dump_info (REPORT_DETAILS))
462 fprintf (vect_dump, "vectorization factor = %d", vectorization_factor);
463 if (vectorization_factor <= 1)
464 {
465 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
466 fprintf (vect_dump, "not vectorized: unsupported data-type");
467 return false;
468 }
469 LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor;
470
471 return true;
472 }
473
474
475 /* Function vect_is_simple_iv_evolution.
476
477 FORNOW: A simple evolution of an induction variables in the loop is
478 considered a polynomial evolution with constant step. */
479
480 static bool
481 vect_is_simple_iv_evolution (unsigned loop_nb, tree access_fn, tree * init,
482 tree * step)
483 {
484 tree init_expr;
485 tree step_expr;
486 tree evolution_part = evolution_part_in_loop_num (access_fn, loop_nb);
487
488 /* When there is no evolution in this loop, the evolution function
489 is not "simple". */
490 if (evolution_part == NULL_TREE)
491 return false;
492
493 /* When the evolution is a polynomial of degree >= 2
494 the evolution function is not "simple". */
495 if (tree_is_chrec (evolution_part))
496 return false;
497
498 step_expr = evolution_part;
499 init_expr = unshare_expr (initial_condition_in_loop_num (access_fn, loop_nb));
500
501 if (vect_print_dump_info (REPORT_DETAILS))
502 {
503 fprintf (vect_dump, "step: ");
504 print_generic_expr (vect_dump, step_expr, TDF_SLIM);
505 fprintf (vect_dump, ", init: ");
506 print_generic_expr (vect_dump, init_expr, TDF_SLIM);
507 }
508
509 *init = init_expr;
510 *step = step_expr;
511
512 if (TREE_CODE (step_expr) != INTEGER_CST)
513 {
514 if (vect_print_dump_info (REPORT_DETAILS))
515 fprintf (vect_dump, "step unknown.");
516 return false;
517 }
518
519 return true;
520 }
521
522 /* Function vect_analyze_scalar_cycles_1.
523
524 Examine the cross iteration def-use cycles of scalar variables
525 in LOOP. LOOP_VINFO represents the loop that is now being
526 considered for vectorization (can be LOOP, or an outer-loop
527 enclosing LOOP). */
528
529 static void
530 vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop)
531 {
532 basic_block bb = loop->header;
533 tree dumy;
534 VEC(gimple,heap) *worklist = VEC_alloc (gimple, heap, 64);
535 gimple_stmt_iterator gsi;
536 bool double_reduc;
537
538 if (vect_print_dump_info (REPORT_DETAILS))
539 fprintf (vect_dump, "=== vect_analyze_scalar_cycles ===");
540
541 /* First - identify all inductions. Reduction detection assumes that all the
542 inductions have been identified, therefore, this order must not be
543 changed. */
544 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
545 {
546 gimple phi = gsi_stmt (gsi);
547 tree access_fn = NULL;
548 tree def = PHI_RESULT (phi);
549 stmt_vec_info stmt_vinfo = vinfo_for_stmt (phi);
550
551 if (vect_print_dump_info (REPORT_DETAILS))
552 {
553 fprintf (vect_dump, "Analyze phi: ");
554 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
555 }
556
557 /* Skip virtual phi's. The data dependences that are associated with
558 virtual defs/uses (i.e., memory accesses) are analyzed elsewhere. */
559 if (!is_gimple_reg (SSA_NAME_VAR (def)))
560 continue;
561
562 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_unknown_def_type;
563
564 /* Analyze the evolution function. */
565 access_fn = analyze_scalar_evolution (loop, def);
566 if (access_fn)
567 {
568 STRIP_NOPS (access_fn);
569 if (vect_print_dump_info (REPORT_DETAILS))
570 {
571 fprintf (vect_dump, "Access function of PHI: ");
572 print_generic_expr (vect_dump, access_fn, TDF_SLIM);
573 }
574 STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo)
575 = evolution_part_in_loop_num (access_fn, loop->num);
576 }
577
578 if (!access_fn
579 || !vect_is_simple_iv_evolution (loop->num, access_fn, &dumy, &dumy))
580 {
581 VEC_safe_push (gimple, heap, worklist, phi);
582 continue;
583 }
584
585 gcc_assert (STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo) != NULL_TREE);
586
587 if (vect_print_dump_info (REPORT_DETAILS))
588 fprintf (vect_dump, "Detected induction.");
589 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_induction_def;
590 }
591
592
593 /* Second - identify all reductions and nested cycles. */
594 while (VEC_length (gimple, worklist) > 0)
595 {
596 gimple phi = VEC_pop (gimple, worklist);
597 tree def = PHI_RESULT (phi);
598 stmt_vec_info stmt_vinfo = vinfo_for_stmt (phi);
599 gimple reduc_stmt;
600 bool nested_cycle;
601
602 if (vect_print_dump_info (REPORT_DETAILS))
603 {
604 fprintf (vect_dump, "Analyze phi: ");
605 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
606 }
607
608 gcc_assert (is_gimple_reg (SSA_NAME_VAR (def)));
609 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_unknown_def_type);
610
611 nested_cycle = (loop != LOOP_VINFO_LOOP (loop_vinfo));
612 reduc_stmt = vect_force_simple_reduction (loop_vinfo, phi, !nested_cycle,
613 &double_reduc);
614 if (reduc_stmt)
615 {
616 if (double_reduc)
617 {
618 if (vect_print_dump_info (REPORT_DETAILS))
619 fprintf (vect_dump, "Detected double reduction.");
620
621 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_double_reduction_def;
622 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) =
623 vect_double_reduction_def;
624 }
625 else
626 {
627 if (nested_cycle)
628 {
629 if (vect_print_dump_info (REPORT_DETAILS))
630 fprintf (vect_dump, "Detected vectorizable nested cycle.");
631
632 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_nested_cycle;
633 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) =
634 vect_nested_cycle;
635 }
636 else
637 {
638 if (vect_print_dump_info (REPORT_DETAILS))
639 fprintf (vect_dump, "Detected reduction.");
640
641 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_reduction_def;
642 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) =
643 vect_reduction_def;
644 /* Store the reduction cycles for possible vectorization in
645 loop-aware SLP. */
646 VEC_safe_push (gimple, heap,
647 LOOP_VINFO_REDUCTIONS (loop_vinfo),
648 reduc_stmt);
649 }
650 }
651 }
652 else
653 if (vect_print_dump_info (REPORT_DETAILS))
654 fprintf (vect_dump, "Unknown def-use cycle pattern.");
655 }
656
657 VEC_free (gimple, heap, worklist);
658 }
659
660
661 /* Function vect_analyze_scalar_cycles.
662
663 Examine the cross iteration def-use cycles of scalar variables, by
664 analyzing the loop-header PHIs of scalar variables. Classify each
665 cycle as one of the following: invariant, induction, reduction, unknown.
666 We do that for the loop represented by LOOP_VINFO, and also to its
667 inner-loop, if exists.
668 Examples for scalar cycles:
669
670 Example1: reduction:
671
672 loop1:
673 for (i=0; i<N; i++)
674 sum += a[i];
675
676 Example2: induction:
677
678 loop2:
679 for (i=0; i<N; i++)
680 a[i] = i; */
681
682 static void
683 vect_analyze_scalar_cycles (loop_vec_info loop_vinfo)
684 {
685 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
686
687 vect_analyze_scalar_cycles_1 (loop_vinfo, loop);
688
689 /* When vectorizing an outer-loop, the inner-loop is executed sequentially.
690 Reductions in such inner-loop therefore have different properties than
691 the reductions in the nest that gets vectorized:
692 1. When vectorized, they are executed in the same order as in the original
693 scalar loop, so we can't change the order of computation when
694 vectorizing them.
695 2. FIXME: Inner-loop reductions can be used in the inner-loop, so the
696 current checks are too strict. */
697
698 if (loop->inner)
699 vect_analyze_scalar_cycles_1 (loop_vinfo, loop->inner);
700 }
701
702 /* Function vect_get_loop_niters.
703
704 Determine how many iterations the loop is executed.
705 If an expression that represents the number of iterations
706 can be constructed, place it in NUMBER_OF_ITERATIONS.
707 Return the loop exit condition. */
708
709 static gimple
710 vect_get_loop_niters (struct loop *loop, tree *number_of_iterations)
711 {
712 tree niters;
713
714 if (vect_print_dump_info (REPORT_DETAILS))
715 fprintf (vect_dump, "=== get_loop_niters ===");
716
717 niters = number_of_exit_cond_executions (loop);
718
719 if (niters != NULL_TREE
720 && niters != chrec_dont_know)
721 {
722 *number_of_iterations = niters;
723
724 if (vect_print_dump_info (REPORT_DETAILS))
725 {
726 fprintf (vect_dump, "==> get_loop_niters:" );
727 print_generic_expr (vect_dump, *number_of_iterations, TDF_SLIM);
728 }
729 }
730
731 return get_loop_exit_condition (loop);
732 }
733
734
735 /* Function bb_in_loop_p
736
737 Used as predicate for dfs order traversal of the loop bbs. */
738
739 static bool
740 bb_in_loop_p (const_basic_block bb, const void *data)
741 {
742 const struct loop *const loop = (const struct loop *)data;
743 if (flow_bb_inside_loop_p (loop, bb))
744 return true;
745 return false;
746 }
747
748
749 /* Function new_loop_vec_info.
750
751 Create and initialize a new loop_vec_info struct for LOOP, as well as
752 stmt_vec_info structs for all the stmts in LOOP. */
753
754 static loop_vec_info
755 new_loop_vec_info (struct loop *loop)
756 {
757 loop_vec_info res;
758 basic_block *bbs;
759 gimple_stmt_iterator si;
760 unsigned int i, nbbs;
761
762 res = (loop_vec_info) xcalloc (1, sizeof (struct _loop_vec_info));
763 LOOP_VINFO_LOOP (res) = loop;
764
765 bbs = get_loop_body (loop);
766
767 /* Create/Update stmt_info for all stmts in the loop. */
768 for (i = 0; i < loop->num_nodes; i++)
769 {
770 basic_block bb = bbs[i];
771
772 /* BBs in a nested inner-loop will have been already processed (because
773 we will have called vect_analyze_loop_form for any nested inner-loop).
774 Therefore, for stmts in an inner-loop we just want to update the
775 STMT_VINFO_LOOP_VINFO field of their stmt_info to point to the new
776 loop_info of the outer-loop we are currently considering to vectorize
777 (instead of the loop_info of the inner-loop).
778 For stmts in other BBs we need to create a stmt_info from scratch. */
779 if (bb->loop_father != loop)
780 {
781 /* Inner-loop bb. */
782 gcc_assert (loop->inner && bb->loop_father == loop->inner);
783 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
784 {
785 gimple phi = gsi_stmt (si);
786 stmt_vec_info stmt_info = vinfo_for_stmt (phi);
787 loop_vec_info inner_loop_vinfo =
788 STMT_VINFO_LOOP_VINFO (stmt_info);
789 gcc_assert (loop->inner == LOOP_VINFO_LOOP (inner_loop_vinfo));
790 STMT_VINFO_LOOP_VINFO (stmt_info) = res;
791 }
792 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
793 {
794 gimple stmt = gsi_stmt (si);
795 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
796 loop_vec_info inner_loop_vinfo =
797 STMT_VINFO_LOOP_VINFO (stmt_info);
798 gcc_assert (loop->inner == LOOP_VINFO_LOOP (inner_loop_vinfo));
799 STMT_VINFO_LOOP_VINFO (stmt_info) = res;
800 }
801 }
802 else
803 {
804 /* bb in current nest. */
805 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
806 {
807 gimple phi = gsi_stmt (si);
808 gimple_set_uid (phi, 0);
809 set_vinfo_for_stmt (phi, new_stmt_vec_info (phi, res, NULL));
810 }
811
812 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
813 {
814 gimple stmt = gsi_stmt (si);
815 gimple_set_uid (stmt, 0);
816 set_vinfo_for_stmt (stmt, new_stmt_vec_info (stmt, res, NULL));
817 }
818 }
819 }
820
821 /* CHECKME: We want to visit all BBs before their successors (except for
822 latch blocks, for which this assertion wouldn't hold). In the simple
823 case of the loop forms we allow, a dfs order of the BBs would the same
824 as reversed postorder traversal, so we are safe. */
825
826 free (bbs);
827 bbs = XCNEWVEC (basic_block, loop->num_nodes);
828 nbbs = dfs_enumerate_from (loop->header, 0, bb_in_loop_p,
829 bbs, loop->num_nodes, loop);
830 gcc_assert (nbbs == loop->num_nodes);
831
832 LOOP_VINFO_BBS (res) = bbs;
833 LOOP_VINFO_NITERS (res) = NULL;
834 LOOP_VINFO_NITERS_UNCHANGED (res) = NULL;
835 LOOP_VINFO_COST_MODEL_MIN_ITERS (res) = 0;
836 LOOP_VINFO_VECTORIZABLE_P (res) = 0;
837 LOOP_PEELING_FOR_ALIGNMENT (res) = 0;
838 LOOP_VINFO_VECT_FACTOR (res) = 0;
839 LOOP_VINFO_LOOP_NEST (res) = VEC_alloc (loop_p, heap, 3);
840 LOOP_VINFO_DATAREFS (res) = VEC_alloc (data_reference_p, heap, 10);
841 LOOP_VINFO_DDRS (res) = VEC_alloc (ddr_p, heap, 10 * 10);
842 LOOP_VINFO_UNALIGNED_DR (res) = NULL;
843 LOOP_VINFO_MAY_MISALIGN_STMTS (res) =
844 VEC_alloc (gimple, heap,
845 PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIGNMENT_CHECKS));
846 LOOP_VINFO_MAY_ALIAS_DDRS (res) =
847 VEC_alloc (ddr_p, heap,
848 PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS));
849 LOOP_VINFO_GROUPED_STORES (res) = VEC_alloc (gimple, heap, 10);
850 LOOP_VINFO_REDUCTIONS (res) = VEC_alloc (gimple, heap, 10);
851 LOOP_VINFO_REDUCTION_CHAINS (res) = VEC_alloc (gimple, heap, 10);
852 LOOP_VINFO_SLP_INSTANCES (res) = VEC_alloc (slp_instance, heap, 10);
853 LOOP_VINFO_SLP_UNROLLING_FACTOR (res) = 1;
854 LOOP_VINFO_PEELING_HTAB (res) = NULL;
855 LOOP_VINFO_TARGET_COST_DATA (res) = init_cost (loop);
856 LOOP_VINFO_PEELING_FOR_GAPS (res) = false;
857
858 return res;
859 }
860
861
862 /* Function destroy_loop_vec_info.
863
864 Free LOOP_VINFO struct, as well as all the stmt_vec_info structs of all the
865 stmts in the loop. */
866
867 void
868 destroy_loop_vec_info (loop_vec_info loop_vinfo, bool clean_stmts)
869 {
870 struct loop *loop;
871 basic_block *bbs;
872 int nbbs;
873 gimple_stmt_iterator si;
874 int j;
875 VEC (slp_instance, heap) *slp_instances;
876 slp_instance instance;
877
878 if (!loop_vinfo)
879 return;
880
881 loop = LOOP_VINFO_LOOP (loop_vinfo);
882
883 bbs = LOOP_VINFO_BBS (loop_vinfo);
884 nbbs = loop->num_nodes;
885
886 if (!clean_stmts)
887 {
888 free (LOOP_VINFO_BBS (loop_vinfo));
889 free_data_refs (LOOP_VINFO_DATAREFS (loop_vinfo));
890 free_dependence_relations (LOOP_VINFO_DDRS (loop_vinfo));
891 VEC_free (loop_p, heap, LOOP_VINFO_LOOP_NEST (loop_vinfo));
892 VEC_free (gimple, heap, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo));
893 VEC_free (ddr_p, heap, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo));
894
895 free (loop_vinfo);
896 loop->aux = NULL;
897 return;
898 }
899
900 for (j = 0; j < nbbs; j++)
901 {
902 basic_block bb = bbs[j];
903 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
904 free_stmt_vec_info (gsi_stmt (si));
905
906 for (si = gsi_start_bb (bb); !gsi_end_p (si); )
907 {
908 gimple stmt = gsi_stmt (si);
909 /* Free stmt_vec_info. */
910 free_stmt_vec_info (stmt);
911 gsi_next (&si);
912 }
913 }
914
915 free (LOOP_VINFO_BBS (loop_vinfo));
916 free_data_refs (LOOP_VINFO_DATAREFS (loop_vinfo));
917 free_dependence_relations (LOOP_VINFO_DDRS (loop_vinfo));
918 VEC_free (loop_p, heap, LOOP_VINFO_LOOP_NEST (loop_vinfo));
919 VEC_free (gimple, heap, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo));
920 VEC_free (ddr_p, heap, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo));
921 slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
922 FOR_EACH_VEC_ELT (slp_instance, slp_instances, j, instance)
923 vect_free_slp_instance (instance);
924
925 VEC_free (slp_instance, heap, LOOP_VINFO_SLP_INSTANCES (loop_vinfo));
926 VEC_free (gimple, heap, LOOP_VINFO_GROUPED_STORES (loop_vinfo));
927 VEC_free (gimple, heap, LOOP_VINFO_REDUCTIONS (loop_vinfo));
928 VEC_free (gimple, heap, LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo));
929
930 if (LOOP_VINFO_PEELING_HTAB (loop_vinfo))
931 htab_delete (LOOP_VINFO_PEELING_HTAB (loop_vinfo));
932
933 destroy_cost_data (LOOP_VINFO_TARGET_COST_DATA (loop_vinfo));
934
935 free (loop_vinfo);
936 loop->aux = NULL;
937 }
938
939
940 /* Function vect_analyze_loop_1.
941
942 Apply a set of analyses on LOOP, and create a loop_vec_info struct
943 for it. The different analyses will record information in the
944 loop_vec_info struct. This is a subset of the analyses applied in
945 vect_analyze_loop, to be applied on an inner-loop nested in the loop
946 that is now considered for (outer-loop) vectorization. */
947
948 static loop_vec_info
949 vect_analyze_loop_1 (struct loop *loop)
950 {
951 loop_vec_info loop_vinfo;
952
953 if (vect_print_dump_info (REPORT_DETAILS))
954 fprintf (vect_dump, "===== analyze_loop_nest_1 =====");
955
956 /* Check the CFG characteristics of the loop (nesting, entry/exit, etc. */
957
958 loop_vinfo = vect_analyze_loop_form (loop);
959 if (!loop_vinfo)
960 {
961 if (vect_print_dump_info (REPORT_DETAILS))
962 fprintf (vect_dump, "bad inner-loop form.");
963 return NULL;
964 }
965
966 return loop_vinfo;
967 }
968
969
970 /* Function vect_analyze_loop_form.
971
972 Verify that certain CFG restrictions hold, including:
973 - the loop has a pre-header
974 - the loop has a single entry and exit
975 - the loop exit condition is simple enough, and the number of iterations
976 can be analyzed (a countable loop). */
977
978 loop_vec_info
979 vect_analyze_loop_form (struct loop *loop)
980 {
981 loop_vec_info loop_vinfo;
982 gimple loop_cond;
983 tree number_of_iterations = NULL;
984 loop_vec_info inner_loop_vinfo = NULL;
985
986 if (vect_print_dump_info (REPORT_DETAILS))
987 fprintf (vect_dump, "=== vect_analyze_loop_form ===");
988
989 /* Different restrictions apply when we are considering an inner-most loop,
990 vs. an outer (nested) loop.
991 (FORNOW. May want to relax some of these restrictions in the future). */
992
993 if (!loop->inner)
994 {
995 /* Inner-most loop. We currently require that the number of BBs is
996 exactly 2 (the header and latch). Vectorizable inner-most loops
997 look like this:
998
999 (pre-header)
1000 |
1001 header <--------+
1002 | | |
1003 | +--> latch --+
1004 |
1005 (exit-bb) */
1006
1007 if (loop->num_nodes != 2)
1008 {
1009 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1010 fprintf (vect_dump, "not vectorized: control flow in loop.");
1011 return NULL;
1012 }
1013
1014 if (empty_block_p (loop->header))
1015 {
1016 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1017 fprintf (vect_dump, "not vectorized: empty loop.");
1018 return NULL;
1019 }
1020 }
1021 else
1022 {
1023 struct loop *innerloop = loop->inner;
1024 edge entryedge;
1025
1026 /* Nested loop. We currently require that the loop is doubly-nested,
1027 contains a single inner loop, and the number of BBs is exactly 5.
1028 Vectorizable outer-loops look like this:
1029
1030 (pre-header)
1031 |
1032 header <---+
1033 | |
1034 inner-loop |
1035 | |
1036 tail ------+
1037 |
1038 (exit-bb)
1039
1040 The inner-loop has the properties expected of inner-most loops
1041 as described above. */
1042
1043 if ((loop->inner)->inner || (loop->inner)->next)
1044 {
1045 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1046 fprintf (vect_dump, "not vectorized: multiple nested loops.");
1047 return NULL;
1048 }
1049
1050 /* Analyze the inner-loop. */
1051 inner_loop_vinfo = vect_analyze_loop_1 (loop->inner);
1052 if (!inner_loop_vinfo)
1053 {
1054 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1055 fprintf (vect_dump, "not vectorized: Bad inner loop.");
1056 return NULL;
1057 }
1058
1059 if (!expr_invariant_in_loop_p (loop,
1060 LOOP_VINFO_NITERS (inner_loop_vinfo)))
1061 {
1062 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1063 fprintf (vect_dump,
1064 "not vectorized: inner-loop count not invariant.");
1065 destroy_loop_vec_info (inner_loop_vinfo, true);
1066 return NULL;
1067 }
1068
1069 if (loop->num_nodes != 5)
1070 {
1071 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1072 fprintf (vect_dump, "not vectorized: control flow in loop.");
1073 destroy_loop_vec_info (inner_loop_vinfo, true);
1074 return NULL;
1075 }
1076
1077 gcc_assert (EDGE_COUNT (innerloop->header->preds) == 2);
1078 entryedge = EDGE_PRED (innerloop->header, 0);
1079 if (EDGE_PRED (innerloop->header, 0)->src == innerloop->latch)
1080 entryedge = EDGE_PRED (innerloop->header, 1);
1081
1082 if (entryedge->src != loop->header
1083 || !single_exit (innerloop)
1084 || single_exit (innerloop)->dest != EDGE_PRED (loop->latch, 0)->src)
1085 {
1086 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1087 fprintf (vect_dump, "not vectorized: unsupported outerloop form.");
1088 destroy_loop_vec_info (inner_loop_vinfo, true);
1089 return NULL;
1090 }
1091
1092 if (vect_print_dump_info (REPORT_DETAILS))
1093 fprintf (vect_dump, "Considering outer-loop vectorization.");
1094 }
1095
1096 if (!single_exit (loop)
1097 || EDGE_COUNT (loop->header->preds) != 2)
1098 {
1099 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1100 {
1101 if (!single_exit (loop))
1102 fprintf (vect_dump, "not vectorized: multiple exits.");
1103 else if (EDGE_COUNT (loop->header->preds) != 2)
1104 fprintf (vect_dump, "not vectorized: too many incoming edges.");
1105 }
1106 if (inner_loop_vinfo)
1107 destroy_loop_vec_info (inner_loop_vinfo, true);
1108 return NULL;
1109 }
1110
1111 /* We assume that the loop exit condition is at the end of the loop. i.e,
1112 that the loop is represented as a do-while (with a proper if-guard
1113 before the loop if needed), where the loop header contains all the
1114 executable statements, and the latch is empty. */
1115 if (!empty_block_p (loop->latch)
1116 || !gimple_seq_empty_p (phi_nodes (loop->latch)))
1117 {
1118 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1119 fprintf (vect_dump, "not vectorized: unexpected loop form.");
1120 if (inner_loop_vinfo)
1121 destroy_loop_vec_info (inner_loop_vinfo, true);
1122 return NULL;
1123 }
1124
1125 /* Make sure there exists a single-predecessor exit bb: */
1126 if (!single_pred_p (single_exit (loop)->dest))
1127 {
1128 edge e = single_exit (loop);
1129 if (!(e->flags & EDGE_ABNORMAL))
1130 {
1131 split_loop_exit_edge (e);
1132 if (vect_print_dump_info (REPORT_DETAILS))
1133 fprintf (vect_dump, "split exit edge.");
1134 }
1135 else
1136 {
1137 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1138 fprintf (vect_dump, "not vectorized: abnormal loop exit edge.");
1139 if (inner_loop_vinfo)
1140 destroy_loop_vec_info (inner_loop_vinfo, true);
1141 return NULL;
1142 }
1143 }
1144
1145 loop_cond = vect_get_loop_niters (loop, &number_of_iterations);
1146 if (!loop_cond)
1147 {
1148 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1149 fprintf (vect_dump, "not vectorized: complicated exit condition.");
1150 if (inner_loop_vinfo)
1151 destroy_loop_vec_info (inner_loop_vinfo, true);
1152 return NULL;
1153 }
1154
1155 if (!number_of_iterations)
1156 {
1157 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1158 fprintf (vect_dump,
1159 "not vectorized: number of iterations cannot be computed.");
1160 if (inner_loop_vinfo)
1161 destroy_loop_vec_info (inner_loop_vinfo, true);
1162 return NULL;
1163 }
1164
1165 if (chrec_contains_undetermined (number_of_iterations))
1166 {
1167 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1168 fprintf (vect_dump, "Infinite number of iterations.");
1169 if (inner_loop_vinfo)
1170 destroy_loop_vec_info (inner_loop_vinfo, true);
1171 return NULL;
1172 }
1173
1174 if (!NITERS_KNOWN_P (number_of_iterations))
1175 {
1176 if (vect_print_dump_info (REPORT_DETAILS))
1177 {
1178 fprintf (vect_dump, "Symbolic number of iterations is ");
1179 print_generic_expr (vect_dump, number_of_iterations, TDF_DETAILS);
1180 }
1181 }
1182 else if (TREE_INT_CST_LOW (number_of_iterations) == 0)
1183 {
1184 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1185 fprintf (vect_dump, "not vectorized: number of iterations = 0.");
1186 if (inner_loop_vinfo)
1187 destroy_loop_vec_info (inner_loop_vinfo, false);
1188 return NULL;
1189 }
1190
1191 loop_vinfo = new_loop_vec_info (loop);
1192 LOOP_VINFO_NITERS (loop_vinfo) = number_of_iterations;
1193 LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo) = number_of_iterations;
1194
1195 STMT_VINFO_TYPE (vinfo_for_stmt (loop_cond)) = loop_exit_ctrl_vec_info_type;
1196
1197 /* CHECKME: May want to keep it around it in the future. */
1198 if (inner_loop_vinfo)
1199 destroy_loop_vec_info (inner_loop_vinfo, false);
1200
1201 gcc_assert (!loop->aux);
1202 loop->aux = loop_vinfo;
1203 return loop_vinfo;
1204 }
1205
1206
1207 /* Function vect_analyze_loop_operations.
1208
1209 Scan the loop stmts and make sure they are all vectorizable. */
1210
1211 static bool
1212 vect_analyze_loop_operations (loop_vec_info loop_vinfo, bool slp)
1213 {
1214 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1215 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
1216 int nbbs = loop->num_nodes;
1217 gimple_stmt_iterator si;
1218 unsigned int vectorization_factor = 0;
1219 int i;
1220 gimple phi;
1221 stmt_vec_info stmt_info;
1222 bool need_to_vectorize = false;
1223 int min_profitable_iters;
1224 int min_scalar_loop_bound;
1225 unsigned int th;
1226 bool only_slp_in_loop = true, ok;
1227 HOST_WIDE_INT max_niter;
1228
1229 if (vect_print_dump_info (REPORT_DETAILS))
1230 fprintf (vect_dump, "=== vect_analyze_loop_operations ===");
1231
1232 gcc_assert (LOOP_VINFO_VECT_FACTOR (loop_vinfo));
1233 vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1234 if (slp)
1235 {
1236 /* If all the stmts in the loop can be SLPed, we perform only SLP, and
1237 vectorization factor of the loop is the unrolling factor required by
1238 the SLP instances. If that unrolling factor is 1, we say, that we
1239 perform pure SLP on loop - cross iteration parallelism is not
1240 exploited. */
1241 for (i = 0; i < nbbs; i++)
1242 {
1243 basic_block bb = bbs[i];
1244 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
1245 {
1246 gimple stmt = gsi_stmt (si);
1247 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1248 gcc_assert (stmt_info);
1249 if ((STMT_VINFO_RELEVANT_P (stmt_info)
1250 || VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info)))
1251 && !PURE_SLP_STMT (stmt_info))
1252 /* STMT needs both SLP and loop-based vectorization. */
1253 only_slp_in_loop = false;
1254 }
1255 }
1256
1257 if (only_slp_in_loop)
1258 vectorization_factor = LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo);
1259 else
1260 vectorization_factor = least_common_multiple (vectorization_factor,
1261 LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo));
1262
1263 LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor;
1264 if (vect_print_dump_info (REPORT_DETAILS))
1265 fprintf (vect_dump, "Updating vectorization factor to %d ",
1266 vectorization_factor);
1267 }
1268
1269 for (i = 0; i < nbbs; i++)
1270 {
1271 basic_block bb = bbs[i];
1272
1273 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
1274 {
1275 phi = gsi_stmt (si);
1276 ok = true;
1277
1278 stmt_info = vinfo_for_stmt (phi);
1279 if (vect_print_dump_info (REPORT_DETAILS))
1280 {
1281 fprintf (vect_dump, "examining phi: ");
1282 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
1283 }
1284
1285 /* Inner-loop loop-closed exit phi in outer-loop vectorization
1286 (i.e., a phi in the tail of the outer-loop). */
1287 if (! is_loop_header_bb_p (bb))
1288 {
1289 /* FORNOW: we currently don't support the case that these phis
1290 are not used in the outerloop (unless it is double reduction,
1291 i.e., this phi is vect_reduction_def), cause this case
1292 requires to actually do something here. */
1293 if ((!STMT_VINFO_RELEVANT_P (stmt_info)
1294 || STMT_VINFO_LIVE_P (stmt_info))
1295 && STMT_VINFO_DEF_TYPE (stmt_info)
1296 != vect_double_reduction_def)
1297 {
1298 if (vect_print_dump_info (REPORT_DETAILS))
1299 fprintf (vect_dump,
1300 "Unsupported loop-closed phi in outer-loop.");
1301 return false;
1302 }
1303
1304 /* If PHI is used in the outer loop, we check that its operand
1305 is defined in the inner loop. */
1306 if (STMT_VINFO_RELEVANT_P (stmt_info))
1307 {
1308 tree phi_op;
1309 gimple op_def_stmt;
1310
1311 if (gimple_phi_num_args (phi) != 1)
1312 return false;
1313
1314 phi_op = PHI_ARG_DEF (phi, 0);
1315 if (TREE_CODE (phi_op) != SSA_NAME)
1316 return false;
1317
1318 op_def_stmt = SSA_NAME_DEF_STMT (phi_op);
1319 if (!op_def_stmt
1320 || !flow_bb_inside_loop_p (loop, gimple_bb (op_def_stmt))
1321 || !vinfo_for_stmt (op_def_stmt))
1322 return false;
1323
1324 if (STMT_VINFO_RELEVANT (vinfo_for_stmt (op_def_stmt))
1325 != vect_used_in_outer
1326 && STMT_VINFO_RELEVANT (vinfo_for_stmt (op_def_stmt))
1327 != vect_used_in_outer_by_reduction)
1328 return false;
1329 }
1330
1331 continue;
1332 }
1333
1334 gcc_assert (stmt_info);
1335
1336 if (STMT_VINFO_LIVE_P (stmt_info))
1337 {
1338 /* FORNOW: not yet supported. */
1339 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1340 fprintf (vect_dump, "not vectorized: value used after loop.");
1341 return false;
1342 }
1343
1344 if (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_scope
1345 && STMT_VINFO_DEF_TYPE (stmt_info) != vect_induction_def)
1346 {
1347 /* A scalar-dependence cycle that we don't support. */
1348 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1349 fprintf (vect_dump, "not vectorized: scalar dependence cycle.");
1350 return false;
1351 }
1352
1353 if (STMT_VINFO_RELEVANT_P (stmt_info))
1354 {
1355 need_to_vectorize = true;
1356 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def)
1357 ok = vectorizable_induction (phi, NULL, NULL);
1358 }
1359
1360 if (!ok)
1361 {
1362 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1363 {
1364 fprintf (vect_dump,
1365 "not vectorized: relevant phi not supported: ");
1366 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
1367 }
1368 return false;
1369 }
1370 }
1371
1372 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
1373 {
1374 gimple stmt = gsi_stmt (si);
1375 if (!vect_analyze_stmt (stmt, &need_to_vectorize, NULL))
1376 return false;
1377 }
1378 } /* bbs */
1379
1380 /* All operations in the loop are either irrelevant (deal with loop
1381 control, or dead), or only used outside the loop and can be moved
1382 out of the loop (e.g. invariants, inductions). The loop can be
1383 optimized away by scalar optimizations. We're better off not
1384 touching this loop. */
1385 if (!need_to_vectorize)
1386 {
1387 if (vect_print_dump_info (REPORT_DETAILS))
1388 fprintf (vect_dump,
1389 "All the computation can be taken out of the loop.");
1390 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1391 fprintf (vect_dump,
1392 "not vectorized: redundant loop. no profit to vectorize.");
1393 return false;
1394 }
1395
1396 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1397 && vect_print_dump_info (REPORT_DETAILS))
1398 fprintf (vect_dump,
1399 "vectorization_factor = %d, niters = " HOST_WIDE_INT_PRINT_DEC,
1400 vectorization_factor, LOOP_VINFO_INT_NITERS (loop_vinfo));
1401
1402 if ((LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1403 && (LOOP_VINFO_INT_NITERS (loop_vinfo) < vectorization_factor))
1404 || ((max_niter = max_stmt_executions_int (loop)) != -1
1405 && (unsigned HOST_WIDE_INT) max_niter < vectorization_factor))
1406 {
1407 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1408 fprintf (vect_dump, "not vectorized: iteration count too small.");
1409 if (vect_print_dump_info (REPORT_DETAILS))
1410 fprintf (vect_dump,"not vectorized: iteration count smaller than "
1411 "vectorization factor.");
1412 return false;
1413 }
1414
1415 /* Analyze cost. Decide if worth while to vectorize. */
1416
1417 /* Once VF is set, SLP costs should be updated since the number of created
1418 vector stmts depends on VF. */
1419 vect_update_slp_costs_according_to_vf (loop_vinfo);
1420
1421 min_profitable_iters = vect_estimate_min_profitable_iters (loop_vinfo);
1422 LOOP_VINFO_COST_MODEL_MIN_ITERS (loop_vinfo) = min_profitable_iters;
1423
1424 if (min_profitable_iters < 0)
1425 {
1426 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1427 fprintf (vect_dump, "not vectorized: vectorization not profitable.");
1428 if (vect_print_dump_info (REPORT_DETAILS))
1429 fprintf (vect_dump, "not vectorized: vector version will never be "
1430 "profitable.");
1431 return false;
1432 }
1433
1434 min_scalar_loop_bound = ((PARAM_VALUE (PARAM_MIN_VECT_LOOP_BOUND)
1435 * vectorization_factor) - 1);
1436
1437 /* Use the cost model only if it is more conservative than user specified
1438 threshold. */
1439
1440 th = (unsigned) min_scalar_loop_bound;
1441 if (min_profitable_iters
1442 && (!min_scalar_loop_bound
1443 || min_profitable_iters > min_scalar_loop_bound))
1444 th = (unsigned) min_profitable_iters;
1445
1446 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1447 && LOOP_VINFO_INT_NITERS (loop_vinfo) <= th)
1448 {
1449 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1450 fprintf (vect_dump, "not vectorized: vectorization not "
1451 "profitable.");
1452 if (vect_print_dump_info (REPORT_DETAILS))
1453 fprintf (vect_dump, "not vectorized: iteration count smaller than "
1454 "user specified loop bound parameter or minimum "
1455 "profitable iterations (whichever is more conservative).");
1456 return false;
1457 }
1458
1459 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1460 || LOOP_VINFO_INT_NITERS (loop_vinfo) % vectorization_factor != 0
1461 || LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo))
1462 {
1463 if (vect_print_dump_info (REPORT_DETAILS))
1464 fprintf (vect_dump, "epilog loop required.");
1465 if (!vect_can_advance_ivs_p (loop_vinfo))
1466 {
1467 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1468 fprintf (vect_dump,
1469 "not vectorized: can't create epilog loop 1.");
1470 return false;
1471 }
1472 if (!slpeel_can_duplicate_loop_p (loop, single_exit (loop)))
1473 {
1474 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1475 fprintf (vect_dump,
1476 "not vectorized: can't create epilog loop 2.");
1477 return false;
1478 }
1479 }
1480
1481 return true;
1482 }
1483
1484
1485 /* Function vect_analyze_loop_2.
1486
1487 Apply a set of analyses on LOOP, and create a loop_vec_info struct
1488 for it. The different analyses will record information in the
1489 loop_vec_info struct. */
1490 static bool
1491 vect_analyze_loop_2 (loop_vec_info loop_vinfo)
1492 {
1493 bool ok, slp = false;
1494 int max_vf = MAX_VECTORIZATION_FACTOR;
1495 int min_vf = 2;
1496
1497 /* Find all data references in the loop (which correspond to vdefs/vuses)
1498 and analyze their evolution in the loop. Also adjust the minimal
1499 vectorization factor according to the loads and stores.
1500
1501 FORNOW: Handle only simple, array references, which
1502 alignment can be forced, and aligned pointer-references. */
1503
1504 ok = vect_analyze_data_refs (loop_vinfo, NULL, &min_vf);
1505 if (!ok)
1506 {
1507 if (vect_print_dump_info (REPORT_DETAILS))
1508 fprintf (vect_dump, "bad data references.");
1509 return false;
1510 }
1511
1512 /* Classify all cross-iteration scalar data-flow cycles.
1513 Cross-iteration cycles caused by virtual phis are analyzed separately. */
1514
1515 vect_analyze_scalar_cycles (loop_vinfo);
1516
1517 vect_pattern_recog (loop_vinfo, NULL);
1518
1519 /* Data-flow analysis to detect stmts that do not need to be vectorized. */
1520
1521 ok = vect_mark_stmts_to_be_vectorized (loop_vinfo);
1522 if (!ok)
1523 {
1524 if (vect_print_dump_info (REPORT_DETAILS))
1525 fprintf (vect_dump, "unexpected pattern.");
1526 return false;
1527 }
1528
1529 /* Analyze data dependences between the data-refs in the loop
1530 and adjust the maximum vectorization factor according to
1531 the dependences.
1532 FORNOW: fail at the first data dependence that we encounter. */
1533
1534 ok = vect_analyze_data_ref_dependences (loop_vinfo, NULL, &max_vf);
1535 if (!ok
1536 || max_vf < min_vf)
1537 {
1538 if (vect_print_dump_info (REPORT_DETAILS))
1539 fprintf (vect_dump, "bad data dependence.");
1540 return false;
1541 }
1542
1543 ok = vect_determine_vectorization_factor (loop_vinfo);
1544 if (!ok)
1545 {
1546 if (vect_print_dump_info (REPORT_DETAILS))
1547 fprintf (vect_dump, "can't determine vectorization factor.");
1548 return false;
1549 }
1550 if (max_vf < LOOP_VINFO_VECT_FACTOR (loop_vinfo))
1551 {
1552 if (vect_print_dump_info (REPORT_DETAILS))
1553 fprintf (vect_dump, "bad data dependence.");
1554 return false;
1555 }
1556
1557 /* Analyze the alignment of the data-refs in the loop.
1558 Fail if a data reference is found that cannot be vectorized. */
1559
1560 ok = vect_analyze_data_refs_alignment (loop_vinfo, NULL);
1561 if (!ok)
1562 {
1563 if (vect_print_dump_info (REPORT_DETAILS))
1564 fprintf (vect_dump, "bad data alignment.");
1565 return false;
1566 }
1567
1568 /* Analyze the access patterns of the data-refs in the loop (consecutive,
1569 complex, etc.). FORNOW: Only handle consecutive access pattern. */
1570
1571 ok = vect_analyze_data_ref_accesses (loop_vinfo, NULL);
1572 if (!ok)
1573 {
1574 if (vect_print_dump_info (REPORT_DETAILS))
1575 fprintf (vect_dump, "bad data access.");
1576 return false;
1577 }
1578
1579 /* Prune the list of ddrs to be tested at run-time by versioning for alias.
1580 It is important to call pruning after vect_analyze_data_ref_accesses,
1581 since we use grouping information gathered by interleaving analysis. */
1582 ok = vect_prune_runtime_alias_test_list (loop_vinfo);
1583 if (!ok)
1584 {
1585 if (vect_print_dump_info (REPORT_DETAILS))
1586 fprintf (vect_dump, "too long list of versioning for alias "
1587 "run-time tests.");
1588 return false;
1589 }
1590
1591 /* This pass will decide on using loop versioning and/or loop peeling in
1592 order to enhance the alignment of data references in the loop. */
1593
1594 ok = vect_enhance_data_refs_alignment (loop_vinfo);
1595 if (!ok)
1596 {
1597 if (vect_print_dump_info (REPORT_DETAILS))
1598 fprintf (vect_dump, "bad data alignment.");
1599 return false;
1600 }
1601
1602 /* Check the SLP opportunities in the loop, analyze and build SLP trees. */
1603 ok = vect_analyze_slp (loop_vinfo, NULL);
1604 if (ok)
1605 {
1606 /* Decide which possible SLP instances to SLP. */
1607 slp = vect_make_slp_decision (loop_vinfo);
1608
1609 /* Find stmts that need to be both vectorized and SLPed. */
1610 vect_detect_hybrid_slp (loop_vinfo);
1611 }
1612 else
1613 return false;
1614
1615 /* Scan all the operations in the loop and make sure they are
1616 vectorizable. */
1617
1618 ok = vect_analyze_loop_operations (loop_vinfo, slp);
1619 if (!ok)
1620 {
1621 if (vect_print_dump_info (REPORT_DETAILS))
1622 fprintf (vect_dump, "bad operation or unsupported loop bound.");
1623 return false;
1624 }
1625
1626 return true;
1627 }
1628
1629 /* Function vect_analyze_loop.
1630
1631 Apply a set of analyses on LOOP, and create a loop_vec_info struct
1632 for it. The different analyses will record information in the
1633 loop_vec_info struct. */
1634 loop_vec_info
1635 vect_analyze_loop (struct loop *loop)
1636 {
1637 loop_vec_info loop_vinfo;
1638 unsigned int vector_sizes;
1639
1640 /* Autodetect first vector size we try. */
1641 current_vector_size = 0;
1642 vector_sizes = targetm.vectorize.autovectorize_vector_sizes ();
1643
1644 if (vect_print_dump_info (REPORT_DETAILS))
1645 fprintf (vect_dump, "===== analyze_loop_nest =====");
1646
1647 if (loop_outer (loop)
1648 && loop_vec_info_for_loop (loop_outer (loop))
1649 && LOOP_VINFO_VECTORIZABLE_P (loop_vec_info_for_loop (loop_outer (loop))))
1650 {
1651 if (vect_print_dump_info (REPORT_DETAILS))
1652 fprintf (vect_dump, "outer-loop already vectorized.");
1653 return NULL;
1654 }
1655
1656 while (1)
1657 {
1658 /* Check the CFG characteristics of the loop (nesting, entry/exit). */
1659 loop_vinfo = vect_analyze_loop_form (loop);
1660 if (!loop_vinfo)
1661 {
1662 if (vect_print_dump_info (REPORT_DETAILS))
1663 fprintf (vect_dump, "bad loop form.");
1664 return NULL;
1665 }
1666
1667 if (vect_analyze_loop_2 (loop_vinfo))
1668 {
1669 LOOP_VINFO_VECTORIZABLE_P (loop_vinfo) = 1;
1670
1671 return loop_vinfo;
1672 }
1673
1674 destroy_loop_vec_info (loop_vinfo, true);
1675
1676 vector_sizes &= ~current_vector_size;
1677 if (vector_sizes == 0
1678 || current_vector_size == 0)
1679 return NULL;
1680
1681 /* Try the next biggest vector size. */
1682 current_vector_size = 1 << floor_log2 (vector_sizes);
1683 if (vect_print_dump_info (REPORT_DETAILS))
1684 fprintf (vect_dump, "***** Re-trying analysis with "
1685 "vector size %d\n", current_vector_size);
1686 }
1687 }
1688
1689
1690 /* Function reduction_code_for_scalar_code
1691
1692 Input:
1693 CODE - tree_code of a reduction operations.
1694
1695 Output:
1696 REDUC_CODE - the corresponding tree-code to be used to reduce the
1697 vector of partial results into a single scalar result (which
1698 will also reside in a vector) or ERROR_MARK if the operation is
1699 a supported reduction operation, but does not have such tree-code.
1700
1701 Return FALSE if CODE currently cannot be vectorized as reduction. */
1702
1703 static bool
1704 reduction_code_for_scalar_code (enum tree_code code,
1705 enum tree_code *reduc_code)
1706 {
1707 switch (code)
1708 {
1709 case MAX_EXPR:
1710 *reduc_code = REDUC_MAX_EXPR;
1711 return true;
1712
1713 case MIN_EXPR:
1714 *reduc_code = REDUC_MIN_EXPR;
1715 return true;
1716
1717 case PLUS_EXPR:
1718 *reduc_code = REDUC_PLUS_EXPR;
1719 return true;
1720
1721 case MULT_EXPR:
1722 case MINUS_EXPR:
1723 case BIT_IOR_EXPR:
1724 case BIT_XOR_EXPR:
1725 case BIT_AND_EXPR:
1726 *reduc_code = ERROR_MARK;
1727 return true;
1728
1729 default:
1730 return false;
1731 }
1732 }
1733
1734
1735 /* Error reporting helper for vect_is_simple_reduction below. GIMPLE statement
1736 STMT is printed with a message MSG. */
1737
1738 static void
1739 report_vect_op (gimple stmt, const char *msg)
1740 {
1741 fprintf (vect_dump, "%s", msg);
1742 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
1743 }
1744
1745
1746 /* Detect SLP reduction of the form:
1747
1748 #a1 = phi <a5, a0>
1749 a2 = operation (a1)
1750 a3 = operation (a2)
1751 a4 = operation (a3)
1752 a5 = operation (a4)
1753
1754 #a = phi <a5>
1755
1756 PHI is the reduction phi node (#a1 = phi <a5, a0> above)
1757 FIRST_STMT is the first reduction stmt in the chain
1758 (a2 = operation (a1)).
1759
1760 Return TRUE if a reduction chain was detected. */
1761
1762 static bool
1763 vect_is_slp_reduction (loop_vec_info loop_info, gimple phi, gimple first_stmt)
1764 {
1765 struct loop *loop = (gimple_bb (phi))->loop_father;
1766 struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
1767 enum tree_code code;
1768 gimple current_stmt = NULL, loop_use_stmt = NULL, first, next_stmt;
1769 stmt_vec_info use_stmt_info, current_stmt_info;
1770 tree lhs;
1771 imm_use_iterator imm_iter;
1772 use_operand_p use_p;
1773 int nloop_uses, size = 0, n_out_of_loop_uses;
1774 bool found = false;
1775
1776 if (loop != vect_loop)
1777 return false;
1778
1779 lhs = PHI_RESULT (phi);
1780 code = gimple_assign_rhs_code (first_stmt);
1781 while (1)
1782 {
1783 nloop_uses = 0;
1784 n_out_of_loop_uses = 0;
1785 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs)
1786 {
1787 gimple use_stmt = USE_STMT (use_p);
1788 if (is_gimple_debug (use_stmt))
1789 continue;
1790
1791 use_stmt = USE_STMT (use_p);
1792
1793 /* Check if we got back to the reduction phi. */
1794 if (use_stmt == phi)
1795 {
1796 loop_use_stmt = use_stmt;
1797 found = true;
1798 break;
1799 }
1800
1801 if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
1802 {
1803 if (vinfo_for_stmt (use_stmt)
1804 && !STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (use_stmt)))
1805 {
1806 loop_use_stmt = use_stmt;
1807 nloop_uses++;
1808 }
1809 }
1810 else
1811 n_out_of_loop_uses++;
1812
1813 /* There are can be either a single use in the loop or two uses in
1814 phi nodes. */
1815 if (nloop_uses > 1 || (n_out_of_loop_uses && nloop_uses))
1816 return false;
1817 }
1818
1819 if (found)
1820 break;
1821
1822 /* We reached a statement with no loop uses. */
1823 if (nloop_uses == 0)
1824 return false;
1825
1826 /* This is a loop exit phi, and we haven't reached the reduction phi. */
1827 if (gimple_code (loop_use_stmt) == GIMPLE_PHI)
1828 return false;
1829
1830 if (!is_gimple_assign (loop_use_stmt)
1831 || code != gimple_assign_rhs_code (loop_use_stmt)
1832 || !flow_bb_inside_loop_p (loop, gimple_bb (loop_use_stmt)))
1833 return false;
1834
1835 /* Insert USE_STMT into reduction chain. */
1836 use_stmt_info = vinfo_for_stmt (loop_use_stmt);
1837 if (current_stmt)
1838 {
1839 current_stmt_info = vinfo_for_stmt (current_stmt);
1840 GROUP_NEXT_ELEMENT (current_stmt_info) = loop_use_stmt;
1841 GROUP_FIRST_ELEMENT (use_stmt_info)
1842 = GROUP_FIRST_ELEMENT (current_stmt_info);
1843 }
1844 else
1845 GROUP_FIRST_ELEMENT (use_stmt_info) = loop_use_stmt;
1846
1847 lhs = gimple_assign_lhs (loop_use_stmt);
1848 current_stmt = loop_use_stmt;
1849 size++;
1850 }
1851
1852 if (!found || loop_use_stmt != phi || size < 2)
1853 return false;
1854
1855 /* Swap the operands, if needed, to make the reduction operand be the second
1856 operand. */
1857 lhs = PHI_RESULT (phi);
1858 next_stmt = GROUP_FIRST_ELEMENT (vinfo_for_stmt (current_stmt));
1859 while (next_stmt)
1860 {
1861 if (gimple_assign_rhs2 (next_stmt) == lhs)
1862 {
1863 tree op = gimple_assign_rhs1 (next_stmt);
1864 gimple def_stmt = NULL;
1865
1866 if (TREE_CODE (op) == SSA_NAME)
1867 def_stmt = SSA_NAME_DEF_STMT (op);
1868
1869 /* Check that the other def is either defined in the loop
1870 ("vect_internal_def"), or it's an induction (defined by a
1871 loop-header phi-node). */
1872 if (def_stmt
1873 && gimple_bb (def_stmt)
1874 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
1875 && (is_gimple_assign (def_stmt)
1876 || is_gimple_call (def_stmt)
1877 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt))
1878 == vect_induction_def
1879 || (gimple_code (def_stmt) == GIMPLE_PHI
1880 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt))
1881 == vect_internal_def
1882 && !is_loop_header_bb_p (gimple_bb (def_stmt)))))
1883 {
1884 lhs = gimple_assign_lhs (next_stmt);
1885 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
1886 continue;
1887 }
1888
1889 return false;
1890 }
1891 else
1892 {
1893 tree op = gimple_assign_rhs2 (next_stmt);
1894 gimple def_stmt = NULL;
1895
1896 if (TREE_CODE (op) == SSA_NAME)
1897 def_stmt = SSA_NAME_DEF_STMT (op);
1898
1899 /* Check that the other def is either defined in the loop
1900 ("vect_internal_def"), or it's an induction (defined by a
1901 loop-header phi-node). */
1902 if (def_stmt
1903 && gimple_bb (def_stmt)
1904 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
1905 && (is_gimple_assign (def_stmt)
1906 || is_gimple_call (def_stmt)
1907 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt))
1908 == vect_induction_def
1909 || (gimple_code (def_stmt) == GIMPLE_PHI
1910 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt))
1911 == vect_internal_def
1912 && !is_loop_header_bb_p (gimple_bb (def_stmt)))))
1913 {
1914 if (vect_print_dump_info (REPORT_DETAILS))
1915 {
1916 fprintf (vect_dump, "swapping oprnds: ");
1917 print_gimple_stmt (vect_dump, next_stmt, 0, TDF_SLIM);
1918 }
1919
1920 swap_tree_operands (next_stmt,
1921 gimple_assign_rhs1_ptr (next_stmt),
1922 gimple_assign_rhs2_ptr (next_stmt));
1923 update_stmt (next_stmt);
1924 }
1925 else
1926 return false;
1927 }
1928
1929 lhs = gimple_assign_lhs (next_stmt);
1930 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
1931 }
1932
1933 /* Save the chain for further analysis in SLP detection. */
1934 first = GROUP_FIRST_ELEMENT (vinfo_for_stmt (current_stmt));
1935 VEC_safe_push (gimple, heap, LOOP_VINFO_REDUCTION_CHAINS (loop_info), first);
1936 GROUP_SIZE (vinfo_for_stmt (first)) = size;
1937
1938 return true;
1939 }
1940
1941
1942 /* Function vect_is_simple_reduction_1
1943
1944 (1) Detect a cross-iteration def-use cycle that represents a simple
1945 reduction computation. We look for the following pattern:
1946
1947 loop_header:
1948 a1 = phi < a0, a2 >
1949 a3 = ...
1950 a2 = operation (a3, a1)
1951
1952 such that:
1953 1. operation is commutative and associative and it is safe to
1954 change the order of the computation (if CHECK_REDUCTION is true)
1955 2. no uses for a2 in the loop (a2 is used out of the loop)
1956 3. no uses of a1 in the loop besides the reduction operation
1957 4. no uses of a1 outside the loop.
1958
1959 Conditions 1,4 are tested here.
1960 Conditions 2,3 are tested in vect_mark_stmts_to_be_vectorized.
1961
1962 (2) Detect a cross-iteration def-use cycle in nested loops, i.e.,
1963 nested cycles, if CHECK_REDUCTION is false.
1964
1965 (3) Detect cycles of phi nodes in outer-loop vectorization, i.e., double
1966 reductions:
1967
1968 a1 = phi < a0, a2 >
1969 inner loop (def of a3)
1970 a2 = phi < a3 >
1971
1972 If MODIFY is true it tries also to rework the code in-place to enable
1973 detection of more reduction patterns. For the time being we rewrite
1974 "res -= RHS" into "rhs += -RHS" when it seems worthwhile.
1975 */
1976
1977 static gimple
1978 vect_is_simple_reduction_1 (loop_vec_info loop_info, gimple phi,
1979 bool check_reduction, bool *double_reduc,
1980 bool modify)
1981 {
1982 struct loop *loop = (gimple_bb (phi))->loop_father;
1983 struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
1984 edge latch_e = loop_latch_edge (loop);
1985 tree loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e);
1986 gimple def_stmt, def1 = NULL, def2 = NULL;
1987 enum tree_code orig_code, code;
1988 tree op1, op2, op3 = NULL_TREE, op4 = NULL_TREE;
1989 tree type;
1990 int nloop_uses;
1991 tree name;
1992 imm_use_iterator imm_iter;
1993 use_operand_p use_p;
1994 bool phi_def;
1995
1996 *double_reduc = false;
1997
1998 /* If CHECK_REDUCTION is true, we assume inner-most loop vectorization,
1999 otherwise, we assume outer loop vectorization. */
2000 gcc_assert ((check_reduction && loop == vect_loop)
2001 || (!check_reduction && flow_loop_nested_p (vect_loop, loop)));
2002
2003 name = PHI_RESULT (phi);
2004 nloop_uses = 0;
2005 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, name)
2006 {
2007 gimple use_stmt = USE_STMT (use_p);
2008 if (is_gimple_debug (use_stmt))
2009 continue;
2010
2011 if (!flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
2012 {
2013 if (vect_print_dump_info (REPORT_DETAILS))
2014 fprintf (vect_dump, "intermediate value used outside loop.");
2015
2016 return NULL;
2017 }
2018
2019 if (vinfo_for_stmt (use_stmt)
2020 && !is_pattern_stmt_p (vinfo_for_stmt (use_stmt)))
2021 nloop_uses++;
2022 if (nloop_uses > 1)
2023 {
2024 if (vect_print_dump_info (REPORT_DETAILS))
2025 fprintf (vect_dump, "reduction used in loop.");
2026 return NULL;
2027 }
2028 }
2029
2030 if (TREE_CODE (loop_arg) != SSA_NAME)
2031 {
2032 if (vect_print_dump_info (REPORT_DETAILS))
2033 {
2034 fprintf (vect_dump, "reduction: not ssa_name: ");
2035 print_generic_expr (vect_dump, loop_arg, TDF_SLIM);
2036 }
2037 return NULL;
2038 }
2039
2040 def_stmt = SSA_NAME_DEF_STMT (loop_arg);
2041 if (!def_stmt)
2042 {
2043 if (vect_print_dump_info (REPORT_DETAILS))
2044 fprintf (vect_dump, "reduction: no def_stmt.");
2045 return NULL;
2046 }
2047
2048 if (!is_gimple_assign (def_stmt) && gimple_code (def_stmt) != GIMPLE_PHI)
2049 {
2050 if (vect_print_dump_info (REPORT_DETAILS))
2051 print_gimple_stmt (vect_dump, def_stmt, 0, TDF_SLIM);
2052 return NULL;
2053 }
2054
2055 if (is_gimple_assign (def_stmt))
2056 {
2057 name = gimple_assign_lhs (def_stmt);
2058 phi_def = false;
2059 }
2060 else
2061 {
2062 name = PHI_RESULT (def_stmt);
2063 phi_def = true;
2064 }
2065
2066 nloop_uses = 0;
2067 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, name)
2068 {
2069 gimple use_stmt = USE_STMT (use_p);
2070 if (is_gimple_debug (use_stmt))
2071 continue;
2072 if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt))
2073 && vinfo_for_stmt (use_stmt)
2074 && !is_pattern_stmt_p (vinfo_for_stmt (use_stmt)))
2075 nloop_uses++;
2076 if (nloop_uses > 1)
2077 {
2078 if (vect_print_dump_info (REPORT_DETAILS))
2079 fprintf (vect_dump, "reduction used in loop.");
2080 return NULL;
2081 }
2082 }
2083
2084 /* If DEF_STMT is a phi node itself, we expect it to have a single argument
2085 defined in the inner loop. */
2086 if (phi_def)
2087 {
2088 op1 = PHI_ARG_DEF (def_stmt, 0);
2089
2090 if (gimple_phi_num_args (def_stmt) != 1
2091 || TREE_CODE (op1) != SSA_NAME)
2092 {
2093 if (vect_print_dump_info (REPORT_DETAILS))
2094 fprintf (vect_dump, "unsupported phi node definition.");
2095
2096 return NULL;
2097 }
2098
2099 def1 = SSA_NAME_DEF_STMT (op1);
2100 if (flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
2101 && loop->inner
2102 && flow_bb_inside_loop_p (loop->inner, gimple_bb (def1))
2103 && is_gimple_assign (def1))
2104 {
2105 if (vect_print_dump_info (REPORT_DETAILS))
2106 report_vect_op (def_stmt, "detected double reduction: ");
2107
2108 *double_reduc = true;
2109 return def_stmt;
2110 }
2111
2112 return NULL;
2113 }
2114
2115 code = orig_code = gimple_assign_rhs_code (def_stmt);
2116
2117 /* We can handle "res -= x[i]", which is non-associative by
2118 simply rewriting this into "res += -x[i]". Avoid changing
2119 gimple instruction for the first simple tests and only do this
2120 if we're allowed to change code at all. */
2121 if (code == MINUS_EXPR
2122 && modify
2123 && (op1 = gimple_assign_rhs1 (def_stmt))
2124 && TREE_CODE (op1) == SSA_NAME
2125 && SSA_NAME_DEF_STMT (op1) == phi)
2126 code = PLUS_EXPR;
2127
2128 if (check_reduction
2129 && (!commutative_tree_code (code) || !associative_tree_code (code)))
2130 {
2131 if (vect_print_dump_info (REPORT_DETAILS))
2132 report_vect_op (def_stmt, "reduction: not commutative/associative: ");
2133 return NULL;
2134 }
2135
2136 if (get_gimple_rhs_class (code) != GIMPLE_BINARY_RHS)
2137 {
2138 if (code != COND_EXPR)
2139 {
2140 if (vect_print_dump_info (REPORT_DETAILS))
2141 report_vect_op (def_stmt, "reduction: not binary operation: ");
2142
2143 return NULL;
2144 }
2145
2146 op3 = gimple_assign_rhs1 (def_stmt);
2147 if (COMPARISON_CLASS_P (op3))
2148 {
2149 op4 = TREE_OPERAND (op3, 1);
2150 op3 = TREE_OPERAND (op3, 0);
2151 }
2152
2153 op1 = gimple_assign_rhs2 (def_stmt);
2154 op2 = gimple_assign_rhs3 (def_stmt);
2155
2156 if (TREE_CODE (op1) != SSA_NAME && TREE_CODE (op2) != SSA_NAME)
2157 {
2158 if (vect_print_dump_info (REPORT_DETAILS))
2159 report_vect_op (def_stmt, "reduction: uses not ssa_names: ");
2160
2161 return NULL;
2162 }
2163 }
2164 else
2165 {
2166 op1 = gimple_assign_rhs1 (def_stmt);
2167 op2 = gimple_assign_rhs2 (def_stmt);
2168
2169 if (TREE_CODE (op1) != SSA_NAME && TREE_CODE (op2) != SSA_NAME)
2170 {
2171 if (vect_print_dump_info (REPORT_DETAILS))
2172 report_vect_op (def_stmt, "reduction: uses not ssa_names: ");
2173
2174 return NULL;
2175 }
2176 }
2177
2178 type = TREE_TYPE (gimple_assign_lhs (def_stmt));
2179 if ((TREE_CODE (op1) == SSA_NAME
2180 && !types_compatible_p (type,TREE_TYPE (op1)))
2181 || (TREE_CODE (op2) == SSA_NAME
2182 && !types_compatible_p (type, TREE_TYPE (op2)))
2183 || (op3 && TREE_CODE (op3) == SSA_NAME
2184 && !types_compatible_p (type, TREE_TYPE (op3)))
2185 || (op4 && TREE_CODE (op4) == SSA_NAME
2186 && !types_compatible_p (type, TREE_TYPE (op4))))
2187 {
2188 if (vect_print_dump_info (REPORT_DETAILS))
2189 {
2190 fprintf (vect_dump, "reduction: multiple types: operation type: ");
2191 print_generic_expr (vect_dump, type, TDF_SLIM);
2192 fprintf (vect_dump, ", operands types: ");
2193 print_generic_expr (vect_dump, TREE_TYPE (op1), TDF_SLIM);
2194 fprintf (vect_dump, ",");
2195 print_generic_expr (vect_dump, TREE_TYPE (op2), TDF_SLIM);
2196 if (op3)
2197 {
2198 fprintf (vect_dump, ",");
2199 print_generic_expr (vect_dump, TREE_TYPE (op3), TDF_SLIM);
2200 }
2201
2202 if (op4)
2203 {
2204 fprintf (vect_dump, ",");
2205 print_generic_expr (vect_dump, TREE_TYPE (op4), TDF_SLIM);
2206 }
2207 }
2208
2209 return NULL;
2210 }
2211
2212 /* Check that it's ok to change the order of the computation.
2213 Generally, when vectorizing a reduction we change the order of the
2214 computation. This may change the behavior of the program in some
2215 cases, so we need to check that this is ok. One exception is when
2216 vectorizing an outer-loop: the inner-loop is executed sequentially,
2217 and therefore vectorizing reductions in the inner-loop during
2218 outer-loop vectorization is safe. */
2219
2220 /* CHECKME: check for !flag_finite_math_only too? */
2221 if (SCALAR_FLOAT_TYPE_P (type) && !flag_associative_math
2222 && check_reduction)
2223 {
2224 /* Changing the order of operations changes the semantics. */
2225 if (vect_print_dump_info (REPORT_DETAILS))
2226 report_vect_op (def_stmt, "reduction: unsafe fp math optimization: ");
2227 return NULL;
2228 }
2229 else if (INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_TRAPS (type)
2230 && check_reduction)
2231 {
2232 /* Changing the order of operations changes the semantics. */
2233 if (vect_print_dump_info (REPORT_DETAILS))
2234 report_vect_op (def_stmt, "reduction: unsafe int math optimization: ");
2235 return NULL;
2236 }
2237 else if (SAT_FIXED_POINT_TYPE_P (type) && check_reduction)
2238 {
2239 /* Changing the order of operations changes the semantics. */
2240 if (vect_print_dump_info (REPORT_DETAILS))
2241 report_vect_op (def_stmt,
2242 "reduction: unsafe fixed-point math optimization: ");
2243 return NULL;
2244 }
2245
2246 /* If we detected "res -= x[i]" earlier, rewrite it into
2247 "res += -x[i]" now. If this turns out to be useless reassoc
2248 will clean it up again. */
2249 if (orig_code == MINUS_EXPR)
2250 {
2251 tree rhs = gimple_assign_rhs2 (def_stmt);
2252 tree negrhs = make_ssa_name (SSA_NAME_VAR (rhs), NULL);
2253 gimple negate_stmt = gimple_build_assign_with_ops (NEGATE_EXPR, negrhs,
2254 rhs, NULL);
2255 gimple_stmt_iterator gsi = gsi_for_stmt (def_stmt);
2256 set_vinfo_for_stmt (negate_stmt, new_stmt_vec_info (negate_stmt,
2257 loop_info, NULL));
2258 gsi_insert_before (&gsi, negate_stmt, GSI_NEW_STMT);
2259 gimple_assign_set_rhs2 (def_stmt, negrhs);
2260 gimple_assign_set_rhs_code (def_stmt, PLUS_EXPR);
2261 update_stmt (def_stmt);
2262 }
2263
2264 /* Reduction is safe. We're dealing with one of the following:
2265 1) integer arithmetic and no trapv
2266 2) floating point arithmetic, and special flags permit this optimization
2267 3) nested cycle (i.e., outer loop vectorization). */
2268 if (TREE_CODE (op1) == SSA_NAME)
2269 def1 = SSA_NAME_DEF_STMT (op1);
2270
2271 if (TREE_CODE (op2) == SSA_NAME)
2272 def2 = SSA_NAME_DEF_STMT (op2);
2273
2274 if (code != COND_EXPR
2275 && ((!def1 || gimple_nop_p (def1)) && (!def2 || gimple_nop_p (def2))))
2276 {
2277 if (vect_print_dump_info (REPORT_DETAILS))
2278 report_vect_op (def_stmt, "reduction: no defs for operands: ");
2279 return NULL;
2280 }
2281
2282 /* Check that one def is the reduction def, defined by PHI,
2283 the other def is either defined in the loop ("vect_internal_def"),
2284 or it's an induction (defined by a loop-header phi-node). */
2285
2286 if (def2 && def2 == phi
2287 && (code == COND_EXPR
2288 || !def1 || gimple_nop_p (def1)
2289 || (def1 && flow_bb_inside_loop_p (loop, gimple_bb (def1))
2290 && (is_gimple_assign (def1)
2291 || is_gimple_call (def1)
2292 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def1))
2293 == vect_induction_def
2294 || (gimple_code (def1) == GIMPLE_PHI
2295 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def1))
2296 == vect_internal_def
2297 && !is_loop_header_bb_p (gimple_bb (def1)))))))
2298 {
2299 if (vect_print_dump_info (REPORT_DETAILS))
2300 report_vect_op (def_stmt, "detected reduction: ");
2301 return def_stmt;
2302 }
2303
2304 if (def1 && def1 == phi
2305 && (code == COND_EXPR
2306 || !def2 || gimple_nop_p (def2)
2307 || (def2 && flow_bb_inside_loop_p (loop, gimple_bb (def2))
2308 && (is_gimple_assign (def2)
2309 || is_gimple_call (def2)
2310 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def2))
2311 == vect_induction_def
2312 || (gimple_code (def2) == GIMPLE_PHI
2313 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def2))
2314 == vect_internal_def
2315 && !is_loop_header_bb_p (gimple_bb (def2)))))))
2316 {
2317 if (check_reduction)
2318 {
2319 /* Swap operands (just for simplicity - so that the rest of the code
2320 can assume that the reduction variable is always the last (second)
2321 argument). */
2322 if (vect_print_dump_info (REPORT_DETAILS))
2323 report_vect_op (def_stmt,
2324 "detected reduction: need to swap operands: ");
2325
2326 swap_tree_operands (def_stmt, gimple_assign_rhs1_ptr (def_stmt),
2327 gimple_assign_rhs2_ptr (def_stmt));
2328 }
2329 else
2330 {
2331 if (vect_print_dump_info (REPORT_DETAILS))
2332 report_vect_op (def_stmt, "detected reduction: ");
2333 }
2334
2335 return def_stmt;
2336 }
2337
2338 /* Try to find SLP reduction chain. */
2339 if (check_reduction && vect_is_slp_reduction (loop_info, phi, def_stmt))
2340 {
2341 if (vect_print_dump_info (REPORT_DETAILS))
2342 report_vect_op (def_stmt, "reduction: detected reduction chain: ");
2343
2344 return def_stmt;
2345 }
2346
2347 if (vect_print_dump_info (REPORT_DETAILS))
2348 report_vect_op (def_stmt, "reduction: unknown pattern: ");
2349
2350 return NULL;
2351 }
2352
2353 /* Wrapper around vect_is_simple_reduction_1, that won't modify code
2354 in-place. Arguments as there. */
2355
2356 static gimple
2357 vect_is_simple_reduction (loop_vec_info loop_info, gimple phi,
2358 bool check_reduction, bool *double_reduc)
2359 {
2360 return vect_is_simple_reduction_1 (loop_info, phi, check_reduction,
2361 double_reduc, false);
2362 }
2363
2364 /* Wrapper around vect_is_simple_reduction_1, which will modify code
2365 in-place if it enables detection of more reductions. Arguments
2366 as there. */
2367
2368 gimple
2369 vect_force_simple_reduction (loop_vec_info loop_info, gimple phi,
2370 bool check_reduction, bool *double_reduc)
2371 {
2372 return vect_is_simple_reduction_1 (loop_info, phi, check_reduction,
2373 double_reduc, true);
2374 }
2375
2376 /* Calculate the cost of one scalar iteration of the loop. */
2377 int
2378 vect_get_single_scalar_iteration_cost (loop_vec_info loop_vinfo)
2379 {
2380 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2381 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
2382 int nbbs = loop->num_nodes, factor, scalar_single_iter_cost = 0;
2383 int innerloop_iters, i, stmt_cost;
2384
2385 /* Count statements in scalar loop. Using this as scalar cost for a single
2386 iteration for now.
2387
2388 TODO: Add outer loop support.
2389
2390 TODO: Consider assigning different costs to different scalar
2391 statements. */
2392
2393 /* FORNOW. */
2394 innerloop_iters = 1;
2395 if (loop->inner)
2396 innerloop_iters = 50; /* FIXME */
2397
2398 for (i = 0; i < nbbs; i++)
2399 {
2400 gimple_stmt_iterator si;
2401 basic_block bb = bbs[i];
2402
2403 if (bb->loop_father == loop->inner)
2404 factor = innerloop_iters;
2405 else
2406 factor = 1;
2407
2408 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
2409 {
2410 gimple stmt = gsi_stmt (si);
2411 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2412
2413 if (!is_gimple_assign (stmt) && !is_gimple_call (stmt))
2414 continue;
2415
2416 /* Skip stmts that are not vectorized inside the loop. */
2417 if (stmt_info
2418 && !STMT_VINFO_RELEVANT_P (stmt_info)
2419 && (!STMT_VINFO_LIVE_P (stmt_info)
2420 || !VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info)))
2421 && !STMT_VINFO_IN_PATTERN_P (stmt_info))
2422 continue;
2423
2424 if (STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt)))
2425 {
2426 if (DR_IS_READ (STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt))))
2427 stmt_cost = vect_get_stmt_cost (scalar_load);
2428 else
2429 stmt_cost = vect_get_stmt_cost (scalar_store);
2430 }
2431 else
2432 stmt_cost = vect_get_stmt_cost (scalar_stmt);
2433
2434 scalar_single_iter_cost += stmt_cost * factor;
2435 }
2436 }
2437 return scalar_single_iter_cost;
2438 }
2439
2440 /* Calculate cost of peeling the loop PEEL_ITERS_PROLOGUE times. */
2441 int
2442 vect_get_known_peeling_cost (loop_vec_info loop_vinfo, int peel_iters_prologue,
2443 int *peel_iters_epilogue,
2444 int scalar_single_iter_cost)
2445 {
2446 int peel_guard_costs = 0;
2447 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2448
2449 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
2450 {
2451 *peel_iters_epilogue = vf/2;
2452 if (vect_print_dump_info (REPORT_COST))
2453 fprintf (vect_dump, "cost model: "
2454 "epilogue peel iters set to vf/2 because "
2455 "loop iterations are unknown .");
2456
2457 /* If peeled iterations are known but number of scalar loop
2458 iterations are unknown, count a taken branch per peeled loop. */
2459 peel_guard_costs = 2 * vect_get_stmt_cost (cond_branch_taken);
2460 }
2461 else
2462 {
2463 int niters = LOOP_VINFO_INT_NITERS (loop_vinfo);
2464 peel_iters_prologue = niters < peel_iters_prologue ?
2465 niters : peel_iters_prologue;
2466 *peel_iters_epilogue = (niters - peel_iters_prologue) % vf;
2467 /* If we need to peel for gaps, but no peeling is required, we have to
2468 peel VF iterations. */
2469 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) && !*peel_iters_epilogue)
2470 *peel_iters_epilogue = vf;
2471 }
2472
2473 return (peel_iters_prologue * scalar_single_iter_cost)
2474 + (*peel_iters_epilogue * scalar_single_iter_cost)
2475 + peel_guard_costs;
2476 }
2477
2478 /* Function vect_estimate_min_profitable_iters
2479
2480 Return the number of iterations required for the vector version of the
2481 loop to be profitable relative to the cost of the scalar version of the
2482 loop.
2483
2484 TODO: Take profile info into account before making vectorization
2485 decisions, if available. */
2486
2487 int
2488 vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo)
2489 {
2490 int i;
2491 int min_profitable_iters;
2492 int peel_iters_prologue;
2493 int peel_iters_epilogue;
2494 int vec_inside_cost = 0;
2495 int vec_outside_cost = 0;
2496 int scalar_single_iter_cost = 0;
2497 int scalar_outside_cost = 0;
2498 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2499 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2500 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
2501 int nbbs = loop->num_nodes;
2502 int npeel = LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo);
2503 int peel_guard_costs = 0;
2504 VEC (slp_instance, heap) *slp_instances;
2505 slp_instance instance;
2506
2507 /* Cost model disabled. */
2508 if (!flag_vect_cost_model)
2509 {
2510 if (vect_print_dump_info (REPORT_COST))
2511 fprintf (vect_dump, "cost model disabled.");
2512 return 0;
2513 }
2514
2515 /* Requires loop versioning tests to handle misalignment. */
2516 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo))
2517 {
2518 /* FIXME: Make cost depend on complexity of individual check. */
2519 vec_outside_cost +=
2520 VEC_length (gimple, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo));
2521 if (vect_print_dump_info (REPORT_COST))
2522 fprintf (vect_dump, "cost model: Adding cost of checks for loop "
2523 "versioning to treat misalignment.\n");
2524 }
2525
2526 /* Requires loop versioning with alias checks. */
2527 if (LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
2528 {
2529 /* FIXME: Make cost depend on complexity of individual check. */
2530 vec_outside_cost +=
2531 VEC_length (ddr_p, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo));
2532 if (vect_print_dump_info (REPORT_COST))
2533 fprintf (vect_dump, "cost model: Adding cost of checks for loop "
2534 "versioning aliasing.\n");
2535 }
2536
2537 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
2538 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
2539 vec_outside_cost += vect_get_stmt_cost (cond_branch_taken);
2540
2541 /* Count statements in scalar loop. Using this as scalar cost for a single
2542 iteration for now.
2543
2544 TODO: Add outer loop support.
2545
2546 TODO: Consider assigning different costs to different scalar
2547 statements. */
2548
2549 for (i = 0; i < nbbs; i++)
2550 {
2551 gimple_stmt_iterator si;
2552 basic_block bb = bbs[i];
2553
2554 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
2555 {
2556 gimple stmt = gsi_stmt (si);
2557 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2558
2559 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
2560 {
2561 stmt = STMT_VINFO_RELATED_STMT (stmt_info);
2562 stmt_info = vinfo_for_stmt (stmt);
2563 }
2564
2565 /* Skip stmts that are not vectorized inside the loop. */
2566 if (!STMT_VINFO_RELEVANT_P (stmt_info)
2567 && (!STMT_VINFO_LIVE_P (stmt_info)
2568 || !VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info))))
2569 continue;
2570
2571 /* FIXME: for stmts in the inner-loop in outer-loop vectorization,
2572 some of the "outside" costs are generated inside the outer-loop. */
2573 vec_outside_cost += STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info);
2574 if (is_pattern_stmt_p (stmt_info)
2575 && STMT_VINFO_PATTERN_DEF_SEQ (stmt_info))
2576 {
2577 gimple_stmt_iterator gsi;
2578
2579 for (gsi = gsi_start (STMT_VINFO_PATTERN_DEF_SEQ (stmt_info));
2580 !gsi_end_p (gsi); gsi_next (&gsi))
2581 {
2582 gimple pattern_def_stmt = gsi_stmt (gsi);
2583 stmt_vec_info pattern_def_stmt_info
2584 = vinfo_for_stmt (pattern_def_stmt);
2585 if (STMT_VINFO_RELEVANT_P (pattern_def_stmt_info)
2586 || STMT_VINFO_LIVE_P (pattern_def_stmt_info))
2587 vec_outside_cost
2588 += STMT_VINFO_OUTSIDE_OF_LOOP_COST
2589 (pattern_def_stmt_info);
2590 }
2591 }
2592 }
2593 }
2594
2595 scalar_single_iter_cost = vect_get_single_scalar_iteration_cost (loop_vinfo);
2596
2597 /* Add additional cost for the peeled instructions in prologue and epilogue
2598 loop.
2599
2600 FORNOW: If we don't know the value of peel_iters for prologue or epilogue
2601 at compile-time - we assume it's vf/2 (the worst would be vf-1).
2602
2603 TODO: Build an expression that represents peel_iters for prologue and
2604 epilogue to be used in a run-time test. */
2605
2606 if (npeel < 0)
2607 {
2608 peel_iters_prologue = vf/2;
2609 if (vect_print_dump_info (REPORT_COST))
2610 fprintf (vect_dump, "cost model: "
2611 "prologue peel iters set to vf/2.");
2612
2613 /* If peeling for alignment is unknown, loop bound of main loop becomes
2614 unknown. */
2615 peel_iters_epilogue = vf/2;
2616 if (vect_print_dump_info (REPORT_COST))
2617 fprintf (vect_dump, "cost model: "
2618 "epilogue peel iters set to vf/2 because "
2619 "peeling for alignment is unknown .");
2620
2621 /* If peeled iterations are unknown, count a taken branch and a not taken
2622 branch per peeled loop. Even if scalar loop iterations are known,
2623 vector iterations are not known since peeled prologue iterations are
2624 not known. Hence guards remain the same. */
2625 peel_guard_costs += 2 * (vect_get_stmt_cost (cond_branch_taken)
2626 + vect_get_stmt_cost (cond_branch_not_taken));
2627 vec_outside_cost += (peel_iters_prologue * scalar_single_iter_cost)
2628 + (peel_iters_epilogue * scalar_single_iter_cost)
2629 + peel_guard_costs;
2630 }
2631 else
2632 {
2633 peel_iters_prologue = npeel;
2634 vec_outside_cost += vect_get_known_peeling_cost (loop_vinfo,
2635 peel_iters_prologue, &peel_iters_epilogue,
2636 scalar_single_iter_cost);
2637 }
2638
2639 /* FORNOW: The scalar outside cost is incremented in one of the
2640 following ways:
2641
2642 1. The vectorizer checks for alignment and aliasing and generates
2643 a condition that allows dynamic vectorization. A cost model
2644 check is ANDED with the versioning condition. Hence scalar code
2645 path now has the added cost of the versioning check.
2646
2647 if (cost > th & versioning_check)
2648 jmp to vector code
2649
2650 Hence run-time scalar is incremented by not-taken branch cost.
2651
2652 2. The vectorizer then checks if a prologue is required. If the
2653 cost model check was not done before during versioning, it has to
2654 be done before the prologue check.
2655
2656 if (cost <= th)
2657 prologue = scalar_iters
2658 if (prologue == 0)
2659 jmp to vector code
2660 else
2661 execute prologue
2662 if (prologue == num_iters)
2663 go to exit
2664
2665 Hence the run-time scalar cost is incremented by a taken branch,
2666 plus a not-taken branch, plus a taken branch cost.
2667
2668 3. The vectorizer then checks if an epilogue is required. If the
2669 cost model check was not done before during prologue check, it
2670 has to be done with the epilogue check.
2671
2672 if (prologue == 0)
2673 jmp to vector code
2674 else
2675 execute prologue
2676 if (prologue == num_iters)
2677 go to exit
2678 vector code:
2679 if ((cost <= th) | (scalar_iters-prologue-epilogue == 0))
2680 jmp to epilogue
2681
2682 Hence the run-time scalar cost should be incremented by 2 taken
2683 branches.
2684
2685 TODO: The back end may reorder the BBS's differently and reverse
2686 conditions/branch directions. Change the estimates below to
2687 something more reasonable. */
2688
2689 /* If the number of iterations is known and we do not do versioning, we can
2690 decide whether to vectorize at compile time. Hence the scalar version
2691 do not carry cost model guard costs. */
2692 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
2693 || LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
2694 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
2695 {
2696 /* Cost model check occurs at versioning. */
2697 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
2698 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
2699 scalar_outside_cost += vect_get_stmt_cost (cond_branch_not_taken);
2700 else
2701 {
2702 /* Cost model check occurs at prologue generation. */
2703 if (LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo) < 0)
2704 scalar_outside_cost += 2 * vect_get_stmt_cost (cond_branch_taken)
2705 + vect_get_stmt_cost (cond_branch_not_taken);
2706 /* Cost model check occurs at epilogue generation. */
2707 else
2708 scalar_outside_cost += 2 * vect_get_stmt_cost (cond_branch_taken);
2709 }
2710 }
2711
2712 /* Add SLP costs. */
2713 slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
2714 FOR_EACH_VEC_ELT (slp_instance, slp_instances, i, instance)
2715 vec_outside_cost += SLP_INSTANCE_OUTSIDE_OF_LOOP_COST (instance);
2716
2717 /* Complete the target-specific cost calculation for the inside-of-loop
2718 costs. */
2719 vec_inside_cost = finish_cost (LOOP_VINFO_TARGET_COST_DATA (loop_vinfo));
2720
2721 /* Calculate number of iterations required to make the vector version
2722 profitable, relative to the loop bodies only. The following condition
2723 must hold true:
2724 SIC * niters + SOC > VIC * ((niters-PL_ITERS-EP_ITERS)/VF) + VOC
2725 where
2726 SIC = scalar iteration cost, VIC = vector iteration cost,
2727 VOC = vector outside cost, VF = vectorization factor,
2728 PL_ITERS = prologue iterations, EP_ITERS= epilogue iterations
2729 SOC = scalar outside cost for run time cost model check. */
2730
2731 if ((scalar_single_iter_cost * vf) > vec_inside_cost)
2732 {
2733 if (vec_outside_cost <= 0)
2734 min_profitable_iters = 1;
2735 else
2736 {
2737 min_profitable_iters = ((vec_outside_cost - scalar_outside_cost) * vf
2738 - vec_inside_cost * peel_iters_prologue
2739 - vec_inside_cost * peel_iters_epilogue)
2740 / ((scalar_single_iter_cost * vf)
2741 - vec_inside_cost);
2742
2743 if ((scalar_single_iter_cost * vf * min_profitable_iters)
2744 <= ((vec_inside_cost * min_profitable_iters)
2745 + ((vec_outside_cost - scalar_outside_cost) * vf)))
2746 min_profitable_iters++;
2747 }
2748 }
2749 /* vector version will never be profitable. */
2750 else
2751 {
2752 if (vect_print_dump_info (REPORT_COST))
2753 fprintf (vect_dump, "cost model: the vector iteration cost = %d "
2754 "divided by the scalar iteration cost = %d "
2755 "is greater or equal to the vectorization factor = %d.",
2756 vec_inside_cost, scalar_single_iter_cost, vf);
2757 return -1;
2758 }
2759
2760 if (vect_print_dump_info (REPORT_COST))
2761 {
2762 fprintf (vect_dump, "Cost model analysis: \n");
2763 fprintf (vect_dump, " Vector inside of loop cost: %d\n",
2764 vec_inside_cost);
2765 fprintf (vect_dump, " Vector outside of loop cost: %d\n",
2766 vec_outside_cost);
2767 fprintf (vect_dump, " Scalar iteration cost: %d\n",
2768 scalar_single_iter_cost);
2769 fprintf (vect_dump, " Scalar outside cost: %d\n", scalar_outside_cost);
2770 fprintf (vect_dump, " prologue iterations: %d\n",
2771 peel_iters_prologue);
2772 fprintf (vect_dump, " epilogue iterations: %d\n",
2773 peel_iters_epilogue);
2774 fprintf (vect_dump, " Calculated minimum iters for profitability: %d\n",
2775 min_profitable_iters);
2776 }
2777
2778 min_profitable_iters =
2779 min_profitable_iters < vf ? vf : min_profitable_iters;
2780
2781 /* Because the condition we create is:
2782 if (niters <= min_profitable_iters)
2783 then skip the vectorized loop. */
2784 min_profitable_iters--;
2785
2786 if (vect_print_dump_info (REPORT_COST))
2787 fprintf (vect_dump, " Profitability threshold = %d\n",
2788 min_profitable_iters);
2789
2790 return min_profitable_iters;
2791 }
2792
2793
2794 /* TODO: Close dependency between vect_model_*_cost and vectorizable_*
2795 functions. Design better to avoid maintenance issues. */
2796
2797 /* Function vect_model_reduction_cost.
2798
2799 Models cost for a reduction operation, including the vector ops
2800 generated within the strip-mine loop, the initial definition before
2801 the loop, and the epilogue code that must be generated. */
2802
2803 static bool
2804 vect_model_reduction_cost (stmt_vec_info stmt_info, enum tree_code reduc_code,
2805 int ncopies)
2806 {
2807 int outer_cost = 0;
2808 enum tree_code code;
2809 optab optab;
2810 tree vectype;
2811 gimple stmt, orig_stmt;
2812 tree reduction_op;
2813 enum machine_mode mode;
2814 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2815 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2816
2817 /* Cost of reduction op inside loop. */
2818 unsigned inside_cost
2819 = add_stmt_cost (LOOP_VINFO_TARGET_COST_DATA (loop_vinfo),
2820 ncopies, vector_stmt, stmt_info, 0);
2821
2822 stmt = STMT_VINFO_STMT (stmt_info);
2823
2824 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)))
2825 {
2826 case GIMPLE_SINGLE_RHS:
2827 gcc_assert (TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt)) == ternary_op);
2828 reduction_op = TREE_OPERAND (gimple_assign_rhs1 (stmt), 2);
2829 break;
2830 case GIMPLE_UNARY_RHS:
2831 reduction_op = gimple_assign_rhs1 (stmt);
2832 break;
2833 case GIMPLE_BINARY_RHS:
2834 reduction_op = gimple_assign_rhs2 (stmt);
2835 break;
2836 case GIMPLE_TERNARY_RHS:
2837 reduction_op = gimple_assign_rhs3 (stmt);
2838 break;
2839 default:
2840 gcc_unreachable ();
2841 }
2842
2843 vectype = get_vectype_for_scalar_type (TREE_TYPE (reduction_op));
2844 if (!vectype)
2845 {
2846 if (vect_print_dump_info (REPORT_COST))
2847 {
2848 fprintf (vect_dump, "unsupported data-type ");
2849 print_generic_expr (vect_dump, TREE_TYPE (reduction_op), TDF_SLIM);
2850 }
2851 return false;
2852 }
2853
2854 mode = TYPE_MODE (vectype);
2855 orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
2856
2857 if (!orig_stmt)
2858 orig_stmt = STMT_VINFO_STMT (stmt_info);
2859
2860 code = gimple_assign_rhs_code (orig_stmt);
2861
2862 /* Add in cost for initial definition. */
2863 outer_cost += vect_get_stmt_cost (scalar_to_vec);
2864
2865 /* Determine cost of epilogue code.
2866
2867 We have a reduction operator that will reduce the vector in one statement.
2868 Also requires scalar extract. */
2869
2870 if (!nested_in_vect_loop_p (loop, orig_stmt))
2871 {
2872 if (reduc_code != ERROR_MARK)
2873 outer_cost += vect_get_stmt_cost (vector_stmt)
2874 + vect_get_stmt_cost (vec_to_scalar);
2875 else
2876 {
2877 int vec_size_in_bits = tree_low_cst (TYPE_SIZE (vectype), 1);
2878 tree bitsize =
2879 TYPE_SIZE (TREE_TYPE (gimple_assign_lhs (orig_stmt)));
2880 int element_bitsize = tree_low_cst (bitsize, 1);
2881 int nelements = vec_size_in_bits / element_bitsize;
2882
2883 optab = optab_for_tree_code (code, vectype, optab_default);
2884
2885 /* We have a whole vector shift available. */
2886 if (VECTOR_MODE_P (mode)
2887 && optab_handler (optab, mode) != CODE_FOR_nothing
2888 && optab_handler (vec_shr_optab, mode) != CODE_FOR_nothing)
2889 /* Final reduction via vector shifts and the reduction operator. Also
2890 requires scalar extract. */
2891 outer_cost += ((exact_log2(nelements) * 2)
2892 * vect_get_stmt_cost (vector_stmt)
2893 + vect_get_stmt_cost (vec_to_scalar));
2894 else
2895 /* Use extracts and reduction op for final reduction. For N elements,
2896 we have N extracts and N-1 reduction ops. */
2897 outer_cost += ((nelements + nelements - 1)
2898 * vect_get_stmt_cost (vector_stmt));
2899 }
2900 }
2901
2902 STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info) = outer_cost;
2903
2904 if (vect_print_dump_info (REPORT_COST))
2905 fprintf (vect_dump, "vect_model_reduction_cost: inside_cost = %d, "
2906 "outside_cost = %d .", inside_cost,
2907 STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info));
2908
2909 return true;
2910 }
2911
2912
2913 /* Function vect_model_induction_cost.
2914
2915 Models cost for induction operations. */
2916
2917 static void
2918 vect_model_induction_cost (stmt_vec_info stmt_info, int ncopies)
2919 {
2920 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2921
2922 /* loop cost for vec_loop. */
2923 unsigned inside_cost
2924 = add_stmt_cost (LOOP_VINFO_TARGET_COST_DATA (loop_vinfo), ncopies,
2925 vector_stmt, stmt_info, 0);
2926
2927 /* prologue cost for vec_init and vec_step. */
2928 STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info)
2929 = 2 * vect_get_stmt_cost (scalar_to_vec);
2930
2931 if (vect_print_dump_info (REPORT_COST))
2932 fprintf (vect_dump, "vect_model_induction_cost: inside_cost = %d, "
2933 "outside_cost = %d .", inside_cost,
2934 STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info));
2935 }
2936
2937
2938 /* Function get_initial_def_for_induction
2939
2940 Input:
2941 STMT - a stmt that performs an induction operation in the loop.
2942 IV_PHI - the initial value of the induction variable
2943
2944 Output:
2945 Return a vector variable, initialized with the first VF values of
2946 the induction variable. E.g., for an iv with IV_PHI='X' and
2947 evolution S, for a vector of 4 units, we want to return:
2948 [X, X + S, X + 2*S, X + 3*S]. */
2949
2950 static tree
2951 get_initial_def_for_induction (gimple iv_phi)
2952 {
2953 stmt_vec_info stmt_vinfo = vinfo_for_stmt (iv_phi);
2954 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
2955 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2956 tree scalar_type;
2957 tree vectype;
2958 int nunits;
2959 edge pe = loop_preheader_edge (loop);
2960 struct loop *iv_loop;
2961 basic_block new_bb;
2962 tree vec, vec_init, vec_step, t;
2963 tree access_fn;
2964 tree new_var;
2965 tree new_name;
2966 gimple init_stmt, induction_phi, new_stmt;
2967 tree induc_def, vec_def, vec_dest;
2968 tree init_expr, step_expr;
2969 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2970 int i;
2971 bool ok;
2972 int ncopies;
2973 tree expr;
2974 stmt_vec_info phi_info = vinfo_for_stmt (iv_phi);
2975 bool nested_in_vect_loop = false;
2976 gimple_seq stmts = NULL;
2977 imm_use_iterator imm_iter;
2978 use_operand_p use_p;
2979 gimple exit_phi;
2980 edge latch_e;
2981 tree loop_arg;
2982 gimple_stmt_iterator si;
2983 basic_block bb = gimple_bb (iv_phi);
2984 tree stepvectype;
2985 tree resvectype;
2986
2987 /* Is phi in an inner-loop, while vectorizing an enclosing outer-loop? */
2988 if (nested_in_vect_loop_p (loop, iv_phi))
2989 {
2990 nested_in_vect_loop = true;
2991 iv_loop = loop->inner;
2992 }
2993 else
2994 iv_loop = loop;
2995 gcc_assert (iv_loop == (gimple_bb (iv_phi))->loop_father);
2996
2997 latch_e = loop_latch_edge (iv_loop);
2998 loop_arg = PHI_ARG_DEF_FROM_EDGE (iv_phi, latch_e);
2999
3000 access_fn = analyze_scalar_evolution (iv_loop, PHI_RESULT (iv_phi));
3001 gcc_assert (access_fn);
3002 STRIP_NOPS (access_fn);
3003 ok = vect_is_simple_iv_evolution (iv_loop->num, access_fn,
3004 &init_expr, &step_expr);
3005 gcc_assert (ok);
3006 pe = loop_preheader_edge (iv_loop);
3007
3008 scalar_type = TREE_TYPE (init_expr);
3009 vectype = get_vectype_for_scalar_type (scalar_type);
3010 resvectype = get_vectype_for_scalar_type (TREE_TYPE (PHI_RESULT (iv_phi)));
3011 gcc_assert (vectype);
3012 nunits = TYPE_VECTOR_SUBPARTS (vectype);
3013 ncopies = vf / nunits;
3014
3015 gcc_assert (phi_info);
3016 gcc_assert (ncopies >= 1);
3017
3018 /* Find the first insertion point in the BB. */
3019 si = gsi_after_labels (bb);
3020
3021 /* Create the vector that holds the initial_value of the induction. */
3022 if (nested_in_vect_loop)
3023 {
3024 /* iv_loop is nested in the loop to be vectorized. init_expr had already
3025 been created during vectorization of previous stmts. We obtain it
3026 from the STMT_VINFO_VEC_STMT of the defining stmt. */
3027 tree iv_def = PHI_ARG_DEF_FROM_EDGE (iv_phi,
3028 loop_preheader_edge (iv_loop));
3029 vec_init = vect_get_vec_def_for_operand (iv_def, iv_phi, NULL);
3030 }
3031 else
3032 {
3033 VEC(constructor_elt,gc) *v;
3034
3035 /* iv_loop is the loop to be vectorized. Create:
3036 vec_init = [X, X+S, X+2*S, X+3*S] (S = step_expr, X = init_expr) */
3037 new_var = vect_get_new_vect_var (scalar_type, vect_scalar_var, "var_");
3038 add_referenced_var (new_var);
3039
3040 new_name = force_gimple_operand (init_expr, &stmts, false, new_var);
3041 if (stmts)
3042 {
3043 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
3044 gcc_assert (!new_bb);
3045 }
3046
3047 v = VEC_alloc (constructor_elt, gc, nunits);
3048 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, new_name);
3049 for (i = 1; i < nunits; i++)
3050 {
3051 /* Create: new_name_i = new_name + step_expr */
3052 enum tree_code code = POINTER_TYPE_P (scalar_type)
3053 ? POINTER_PLUS_EXPR : PLUS_EXPR;
3054 init_stmt = gimple_build_assign_with_ops (code, new_var,
3055 new_name, step_expr);
3056 new_name = make_ssa_name (new_var, init_stmt);
3057 gimple_assign_set_lhs (init_stmt, new_name);
3058
3059 new_bb = gsi_insert_on_edge_immediate (pe, init_stmt);
3060 gcc_assert (!new_bb);
3061
3062 if (vect_print_dump_info (REPORT_DETAILS))
3063 {
3064 fprintf (vect_dump, "created new init_stmt: ");
3065 print_gimple_stmt (vect_dump, init_stmt, 0, TDF_SLIM);
3066 }
3067 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, new_name);
3068 }
3069 /* Create a vector from [new_name_0, new_name_1, ..., new_name_nunits-1] */
3070 vec = build_constructor (vectype, v);
3071 vec_init = vect_init_vector (iv_phi, vec, vectype, NULL);
3072 }
3073
3074
3075 /* Create the vector that holds the step of the induction. */
3076 if (nested_in_vect_loop)
3077 /* iv_loop is nested in the loop to be vectorized. Generate:
3078 vec_step = [S, S, S, S] */
3079 new_name = step_expr;
3080 else
3081 {
3082 /* iv_loop is the loop to be vectorized. Generate:
3083 vec_step = [VF*S, VF*S, VF*S, VF*S] */
3084 expr = build_int_cst (TREE_TYPE (step_expr), vf);
3085 new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr),
3086 expr, step_expr);
3087 }
3088
3089 t = unshare_expr (new_name);
3090 gcc_assert (CONSTANT_CLASS_P (new_name));
3091 stepvectype = get_vectype_for_scalar_type (TREE_TYPE (new_name));
3092 gcc_assert (stepvectype);
3093 vec = build_vector_from_val (stepvectype, t);
3094 vec_step = vect_init_vector (iv_phi, vec, stepvectype, NULL);
3095
3096
3097 /* Create the following def-use cycle:
3098 loop prolog:
3099 vec_init = ...
3100 vec_step = ...
3101 loop:
3102 vec_iv = PHI <vec_init, vec_loop>
3103 ...
3104 STMT
3105 ...
3106 vec_loop = vec_iv + vec_step; */
3107
3108 /* Create the induction-phi that defines the induction-operand. */
3109 vec_dest = vect_get_new_vect_var (vectype, vect_simple_var, "vec_iv_");
3110 add_referenced_var (vec_dest);
3111 induction_phi = create_phi_node (vec_dest, iv_loop->header);
3112 set_vinfo_for_stmt (induction_phi,
3113 new_stmt_vec_info (induction_phi, loop_vinfo, NULL));
3114 induc_def = PHI_RESULT (induction_phi);
3115
3116 /* Create the iv update inside the loop */
3117 new_stmt = gimple_build_assign_with_ops (PLUS_EXPR, vec_dest,
3118 induc_def, vec_step);
3119 vec_def = make_ssa_name (vec_dest, new_stmt);
3120 gimple_assign_set_lhs (new_stmt, vec_def);
3121 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
3122 set_vinfo_for_stmt (new_stmt, new_stmt_vec_info (new_stmt, loop_vinfo,
3123 NULL));
3124
3125 /* Set the arguments of the phi node: */
3126 add_phi_arg (induction_phi, vec_init, pe, UNKNOWN_LOCATION, NULL);
3127 add_phi_arg (induction_phi, vec_def, loop_latch_edge (iv_loop),
3128 UNKNOWN_LOCATION, NULL);
3129
3130
3131 /* In case that vectorization factor (VF) is bigger than the number
3132 of elements that we can fit in a vectype (nunits), we have to generate
3133 more than one vector stmt - i.e - we need to "unroll" the
3134 vector stmt by a factor VF/nunits. For more details see documentation
3135 in vectorizable_operation. */
3136
3137 if (ncopies > 1)
3138 {
3139 stmt_vec_info prev_stmt_vinfo;
3140 /* FORNOW. This restriction should be relaxed. */
3141 gcc_assert (!nested_in_vect_loop);
3142
3143 /* Create the vector that holds the step of the induction. */
3144 expr = build_int_cst (TREE_TYPE (step_expr), nunits);
3145 new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr),
3146 expr, step_expr);
3147 t = unshare_expr (new_name);
3148 gcc_assert (CONSTANT_CLASS_P (new_name));
3149 vec = build_vector_from_val (stepvectype, t);
3150 vec_step = vect_init_vector (iv_phi, vec, stepvectype, NULL);
3151
3152 vec_def = induc_def;
3153 prev_stmt_vinfo = vinfo_for_stmt (induction_phi);
3154 for (i = 1; i < ncopies; i++)
3155 {
3156 /* vec_i = vec_prev + vec_step */
3157 new_stmt = gimple_build_assign_with_ops (PLUS_EXPR, vec_dest,
3158 vec_def, vec_step);
3159 vec_def = make_ssa_name (vec_dest, new_stmt);
3160 gimple_assign_set_lhs (new_stmt, vec_def);
3161
3162 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
3163 if (!useless_type_conversion_p (resvectype, vectype))
3164 {
3165 new_stmt = gimple_build_assign_with_ops
3166 (VIEW_CONVERT_EXPR,
3167 vect_get_new_vect_var (resvectype, vect_simple_var,
3168 "vec_iv_"),
3169 build1 (VIEW_CONVERT_EXPR, resvectype,
3170 gimple_assign_lhs (new_stmt)), NULL_TREE);
3171 gimple_assign_set_lhs (new_stmt,
3172 make_ssa_name
3173 (gimple_assign_lhs (new_stmt), new_stmt));
3174 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
3175 }
3176 set_vinfo_for_stmt (new_stmt,
3177 new_stmt_vec_info (new_stmt, loop_vinfo, NULL));
3178 STMT_VINFO_RELATED_STMT (prev_stmt_vinfo) = new_stmt;
3179 prev_stmt_vinfo = vinfo_for_stmt (new_stmt);
3180 }
3181 }
3182
3183 if (nested_in_vect_loop)
3184 {
3185 /* Find the loop-closed exit-phi of the induction, and record
3186 the final vector of induction results: */
3187 exit_phi = NULL;
3188 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, loop_arg)
3189 {
3190 if (!flow_bb_inside_loop_p (iv_loop, gimple_bb (USE_STMT (use_p))))
3191 {
3192 exit_phi = USE_STMT (use_p);
3193 break;
3194 }
3195 }
3196 if (exit_phi)
3197 {
3198 stmt_vec_info stmt_vinfo = vinfo_for_stmt (exit_phi);
3199 /* FORNOW. Currently not supporting the case that an inner-loop induction
3200 is not used in the outer-loop (i.e. only outside the outer-loop). */
3201 gcc_assert (STMT_VINFO_RELEVANT_P (stmt_vinfo)
3202 && !STMT_VINFO_LIVE_P (stmt_vinfo));
3203
3204 STMT_VINFO_VEC_STMT (stmt_vinfo) = new_stmt;
3205 if (vect_print_dump_info (REPORT_DETAILS))
3206 {
3207 fprintf (vect_dump, "vector of inductions after inner-loop:");
3208 print_gimple_stmt (vect_dump, new_stmt, 0, TDF_SLIM);
3209 }
3210 }
3211 }
3212
3213
3214 if (vect_print_dump_info (REPORT_DETAILS))
3215 {
3216 fprintf (vect_dump, "transform induction: created def-use cycle: ");
3217 print_gimple_stmt (vect_dump, induction_phi, 0, TDF_SLIM);
3218 fprintf (vect_dump, "\n");
3219 print_gimple_stmt (vect_dump, SSA_NAME_DEF_STMT (vec_def), 0, TDF_SLIM);
3220 }
3221
3222 STMT_VINFO_VEC_STMT (phi_info) = induction_phi;
3223 if (!useless_type_conversion_p (resvectype, vectype))
3224 {
3225 new_stmt = gimple_build_assign_with_ops
3226 (VIEW_CONVERT_EXPR,
3227 vect_get_new_vect_var (resvectype, vect_simple_var, "vec_iv_"),
3228 build1 (VIEW_CONVERT_EXPR, resvectype, induc_def), NULL_TREE);
3229 induc_def = make_ssa_name (gimple_assign_lhs (new_stmt), new_stmt);
3230 gimple_assign_set_lhs (new_stmt, induc_def);
3231 si = gsi_start_bb (bb);
3232 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
3233 set_vinfo_for_stmt (new_stmt,
3234 new_stmt_vec_info (new_stmt, loop_vinfo, NULL));
3235 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (new_stmt))
3236 = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (induction_phi));
3237 }
3238
3239 return induc_def;
3240 }
3241
3242
3243 /* Function get_initial_def_for_reduction
3244
3245 Input:
3246 STMT - a stmt that performs a reduction operation in the loop.
3247 INIT_VAL - the initial value of the reduction variable
3248
3249 Output:
3250 ADJUSTMENT_DEF - a tree that holds a value to be added to the final result
3251 of the reduction (used for adjusting the epilog - see below).
3252 Return a vector variable, initialized according to the operation that STMT
3253 performs. This vector will be used as the initial value of the
3254 vector of partial results.
3255
3256 Option1 (adjust in epilog): Initialize the vector as follows:
3257 add/bit or/xor: [0,0,...,0,0]
3258 mult/bit and: [1,1,...,1,1]
3259 min/max/cond_expr: [init_val,init_val,..,init_val,init_val]
3260 and when necessary (e.g. add/mult case) let the caller know
3261 that it needs to adjust the result by init_val.
3262
3263 Option2: Initialize the vector as follows:
3264 add/bit or/xor: [init_val,0,0,...,0]
3265 mult/bit and: [init_val,1,1,...,1]
3266 min/max/cond_expr: [init_val,init_val,...,init_val]
3267 and no adjustments are needed.
3268
3269 For example, for the following code:
3270
3271 s = init_val;
3272 for (i=0;i<n;i++)
3273 s = s + a[i];
3274
3275 STMT is 's = s + a[i]', and the reduction variable is 's'.
3276 For a vector of 4 units, we want to return either [0,0,0,init_val],
3277 or [0,0,0,0] and let the caller know that it needs to adjust
3278 the result at the end by 'init_val'.
3279
3280 FORNOW, we are using the 'adjust in epilog' scheme, because this way the
3281 initialization vector is simpler (same element in all entries), if
3282 ADJUSTMENT_DEF is not NULL, and Option2 otherwise.
3283
3284 A cost model should help decide between these two schemes. */
3285
3286 tree
3287 get_initial_def_for_reduction (gimple stmt, tree init_val,
3288 tree *adjustment_def)
3289 {
3290 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
3291 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
3292 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
3293 tree scalar_type = TREE_TYPE (init_val);
3294 tree vectype = get_vectype_for_scalar_type (scalar_type);
3295 int nunits;
3296 enum tree_code code = gimple_assign_rhs_code (stmt);
3297 tree def_for_init;
3298 tree init_def;
3299 tree *elts;
3300 int i;
3301 bool nested_in_vect_loop = false;
3302 tree init_value;
3303 REAL_VALUE_TYPE real_init_val = dconst0;
3304 int int_init_val = 0;
3305 gimple def_stmt = NULL;
3306
3307 gcc_assert (vectype);
3308 nunits = TYPE_VECTOR_SUBPARTS (vectype);
3309
3310 gcc_assert (POINTER_TYPE_P (scalar_type) || INTEGRAL_TYPE_P (scalar_type)
3311 || SCALAR_FLOAT_TYPE_P (scalar_type));
3312
3313 if (nested_in_vect_loop_p (loop, stmt))
3314 nested_in_vect_loop = true;
3315 else
3316 gcc_assert (loop == (gimple_bb (stmt))->loop_father);
3317
3318 /* In case of double reduction we only create a vector variable to be put
3319 in the reduction phi node. The actual statement creation is done in
3320 vect_create_epilog_for_reduction. */
3321 if (adjustment_def && nested_in_vect_loop
3322 && TREE_CODE (init_val) == SSA_NAME
3323 && (def_stmt = SSA_NAME_DEF_STMT (init_val))
3324 && gimple_code (def_stmt) == GIMPLE_PHI
3325 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
3326 && vinfo_for_stmt (def_stmt)
3327 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt))
3328 == vect_double_reduction_def)
3329 {
3330 *adjustment_def = NULL;
3331 return vect_create_destination_var (init_val, vectype);
3332 }
3333
3334 if (TREE_CONSTANT (init_val))
3335 {
3336 if (SCALAR_FLOAT_TYPE_P (scalar_type))
3337 init_value = build_real (scalar_type, TREE_REAL_CST (init_val));
3338 else
3339 init_value = build_int_cst (scalar_type, TREE_INT_CST_LOW (init_val));
3340 }
3341 else
3342 init_value = init_val;
3343
3344 switch (code)
3345 {
3346 case WIDEN_SUM_EXPR:
3347 case DOT_PROD_EXPR:
3348 case PLUS_EXPR:
3349 case MINUS_EXPR:
3350 case BIT_IOR_EXPR:
3351 case BIT_XOR_EXPR:
3352 case MULT_EXPR:
3353 case BIT_AND_EXPR:
3354 /* ADJUSMENT_DEF is NULL when called from
3355 vect_create_epilog_for_reduction to vectorize double reduction. */
3356 if (adjustment_def)
3357 {
3358 if (nested_in_vect_loop)
3359 *adjustment_def = vect_get_vec_def_for_operand (init_val, stmt,
3360 NULL);
3361 else
3362 *adjustment_def = init_val;
3363 }
3364
3365 if (code == MULT_EXPR)
3366 {
3367 real_init_val = dconst1;
3368 int_init_val = 1;
3369 }
3370
3371 if (code == BIT_AND_EXPR)
3372 int_init_val = -1;
3373
3374 if (SCALAR_FLOAT_TYPE_P (scalar_type))
3375 def_for_init = build_real (scalar_type, real_init_val);
3376 else
3377 def_for_init = build_int_cst (scalar_type, int_init_val);
3378
3379 /* Create a vector of '0' or '1' except the first element. */
3380 elts = XALLOCAVEC (tree, nunits);
3381 for (i = nunits - 2; i >= 0; --i)
3382 elts[i + 1] = def_for_init;
3383
3384 /* Option1: the first element is '0' or '1' as well. */
3385 if (adjustment_def)
3386 {
3387 elts[0] = def_for_init;
3388 init_def = build_vector (vectype, elts);
3389 break;
3390 }
3391
3392 /* Option2: the first element is INIT_VAL. */
3393 elts[0] = init_val;
3394 if (TREE_CONSTANT (init_val))
3395 init_def = build_vector (vectype, elts);
3396 else
3397 {
3398 VEC(constructor_elt,gc) *v;
3399 v = VEC_alloc (constructor_elt, gc, nunits);
3400 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, init_val);
3401 for (i = 1; i < nunits; ++i)
3402 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, elts[i]);
3403 init_def = build_constructor (vectype, v);
3404 }
3405
3406 break;
3407
3408 case MIN_EXPR:
3409 case MAX_EXPR:
3410 case COND_EXPR:
3411 if (adjustment_def)
3412 {
3413 *adjustment_def = NULL_TREE;
3414 init_def = vect_get_vec_def_for_operand (init_val, stmt, NULL);
3415 break;
3416 }
3417
3418 init_def = build_vector_from_val (vectype, init_value);
3419 break;
3420
3421 default:
3422 gcc_unreachable ();
3423 }
3424
3425 return init_def;
3426 }
3427
3428
3429 /* Function vect_create_epilog_for_reduction
3430
3431 Create code at the loop-epilog to finalize the result of a reduction
3432 computation.
3433
3434 VECT_DEFS is list of vector of partial results, i.e., the lhs's of vector
3435 reduction statements.
3436 STMT is the scalar reduction stmt that is being vectorized.
3437 NCOPIES is > 1 in case the vectorization factor (VF) is bigger than the
3438 number of elements that we can fit in a vectype (nunits). In this case
3439 we have to generate more than one vector stmt - i.e - we need to "unroll"
3440 the vector stmt by a factor VF/nunits. For more details see documentation
3441 in vectorizable_operation.
3442 REDUC_CODE is the tree-code for the epilog reduction.
3443 REDUCTION_PHIS is a list of the phi-nodes that carry the reduction
3444 computation.
3445 REDUC_INDEX is the index of the operand in the right hand side of the
3446 statement that is defined by REDUCTION_PHI.
3447 DOUBLE_REDUC is TRUE if double reduction phi nodes should be handled.
3448 SLP_NODE is an SLP node containing a group of reduction statements. The
3449 first one in this group is STMT.
3450
3451 This function:
3452 1. Creates the reduction def-use cycles: sets the arguments for
3453 REDUCTION_PHIS:
3454 The loop-entry argument is the vectorized initial-value of the reduction.
3455 The loop-latch argument is taken from VECT_DEFS - the vector of partial
3456 sums.
3457 2. "Reduces" each vector of partial results VECT_DEFS into a single result,
3458 by applying the operation specified by REDUC_CODE if available, or by
3459 other means (whole-vector shifts or a scalar loop).
3460 The function also creates a new phi node at the loop exit to preserve
3461 loop-closed form, as illustrated below.
3462
3463 The flow at the entry to this function:
3464
3465 loop:
3466 vec_def = phi <null, null> # REDUCTION_PHI
3467 VECT_DEF = vector_stmt # vectorized form of STMT
3468 s_loop = scalar_stmt # (scalar) STMT
3469 loop_exit:
3470 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
3471 use <s_out0>
3472 use <s_out0>
3473
3474 The above is transformed by this function into:
3475
3476 loop:
3477 vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI
3478 VECT_DEF = vector_stmt # vectorized form of STMT
3479 s_loop = scalar_stmt # (scalar) STMT
3480 loop_exit:
3481 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
3482 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
3483 v_out2 = reduce <v_out1>
3484 s_out3 = extract_field <v_out2, 0>
3485 s_out4 = adjust_result <s_out3>
3486 use <s_out4>
3487 use <s_out4>
3488 */
3489
3490 static void
3491 vect_create_epilog_for_reduction (VEC (tree, heap) *vect_defs, gimple stmt,
3492 int ncopies, enum tree_code reduc_code,
3493 VEC (gimple, heap) *reduction_phis,
3494 int reduc_index, bool double_reduc,
3495 slp_tree slp_node)
3496 {
3497 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3498 stmt_vec_info prev_phi_info;
3499 tree vectype;
3500 enum machine_mode mode;
3501 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3502 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo), *outer_loop = NULL;
3503 basic_block exit_bb;
3504 tree scalar_dest;
3505 tree scalar_type;
3506 gimple new_phi = NULL, phi;
3507 gimple_stmt_iterator exit_gsi;
3508 tree vec_dest;
3509 tree new_temp = NULL_TREE, new_dest, new_name, new_scalar_dest;
3510 gimple epilog_stmt = NULL;
3511 enum tree_code code = gimple_assign_rhs_code (stmt);
3512 gimple exit_phi;
3513 tree bitsize, bitpos;
3514 tree adjustment_def = NULL;
3515 tree vec_initial_def = NULL;
3516 tree reduction_op, expr, def;
3517 tree orig_name, scalar_result;
3518 imm_use_iterator imm_iter, phi_imm_iter;
3519 use_operand_p use_p, phi_use_p;
3520 bool extract_scalar_result = false;
3521 gimple use_stmt, orig_stmt, reduction_phi = NULL;
3522 bool nested_in_vect_loop = false;
3523 VEC (gimple, heap) *new_phis = NULL;
3524 VEC (gimple, heap) *inner_phis = NULL;
3525 enum vect_def_type dt = vect_unknown_def_type;
3526 int j, i;
3527 VEC (tree, heap) *scalar_results = NULL;
3528 unsigned int group_size = 1, k, ratio;
3529 VEC (tree, heap) *vec_initial_defs = NULL;
3530 VEC (gimple, heap) *phis;
3531 bool slp_reduc = false;
3532 tree new_phi_result;
3533 gimple inner_phi = NULL;
3534
3535 if (slp_node)
3536 group_size = VEC_length (gimple, SLP_TREE_SCALAR_STMTS (slp_node));
3537
3538 if (nested_in_vect_loop_p (loop, stmt))
3539 {
3540 outer_loop = loop;
3541 loop = loop->inner;
3542 nested_in_vect_loop = true;
3543 gcc_assert (!slp_node);
3544 }
3545
3546 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)))
3547 {
3548 case GIMPLE_SINGLE_RHS:
3549 gcc_assert (TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt))
3550 == ternary_op);
3551 reduction_op = TREE_OPERAND (gimple_assign_rhs1 (stmt), reduc_index);
3552 break;
3553 case GIMPLE_UNARY_RHS:
3554 reduction_op = gimple_assign_rhs1 (stmt);
3555 break;
3556 case GIMPLE_BINARY_RHS:
3557 reduction_op = reduc_index ?
3558 gimple_assign_rhs2 (stmt) : gimple_assign_rhs1 (stmt);
3559 break;
3560 case GIMPLE_TERNARY_RHS:
3561 reduction_op = gimple_op (stmt, reduc_index + 1);
3562 break;
3563 default:
3564 gcc_unreachable ();
3565 }
3566
3567 vectype = get_vectype_for_scalar_type (TREE_TYPE (reduction_op));
3568 gcc_assert (vectype);
3569 mode = TYPE_MODE (vectype);
3570
3571 /* 1. Create the reduction def-use cycle:
3572 Set the arguments of REDUCTION_PHIS, i.e., transform
3573
3574 loop:
3575 vec_def = phi <null, null> # REDUCTION_PHI
3576 VECT_DEF = vector_stmt # vectorized form of STMT
3577 ...
3578
3579 into:
3580
3581 loop:
3582 vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI
3583 VECT_DEF = vector_stmt # vectorized form of STMT
3584 ...
3585
3586 (in case of SLP, do it for all the phis). */
3587
3588 /* Get the loop-entry arguments. */
3589 if (slp_node)
3590 vect_get_vec_defs (reduction_op, NULL_TREE, stmt, &vec_initial_defs,
3591 NULL, slp_node, reduc_index);
3592 else
3593 {
3594 vec_initial_defs = VEC_alloc (tree, heap, 1);
3595 /* For the case of reduction, vect_get_vec_def_for_operand returns
3596 the scalar def before the loop, that defines the initial value
3597 of the reduction variable. */
3598 vec_initial_def = vect_get_vec_def_for_operand (reduction_op, stmt,
3599 &adjustment_def);
3600 VEC_quick_push (tree, vec_initial_defs, vec_initial_def);
3601 }
3602
3603 /* Set phi nodes arguments. */
3604 FOR_EACH_VEC_ELT (gimple, reduction_phis, i, phi)
3605 {
3606 tree vec_init_def = VEC_index (tree, vec_initial_defs, i);
3607 tree def = VEC_index (tree, vect_defs, i);
3608 for (j = 0; j < ncopies; j++)
3609 {
3610 /* Set the loop-entry arg of the reduction-phi. */
3611 add_phi_arg (phi, vec_init_def, loop_preheader_edge (loop),
3612 UNKNOWN_LOCATION, NULL);
3613
3614 /* Set the loop-latch arg for the reduction-phi. */
3615 if (j > 0)
3616 def = vect_get_vec_def_for_stmt_copy (vect_unknown_def_type, def);
3617
3618 add_phi_arg (phi, def, loop_latch_edge (loop), UNKNOWN_LOCATION, NULL);
3619
3620 if (vect_print_dump_info (REPORT_DETAILS))
3621 {
3622 fprintf (vect_dump, "transform reduction: created def-use"
3623 " cycle: ");
3624 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
3625 fprintf (vect_dump, "\n");
3626 print_gimple_stmt (vect_dump, SSA_NAME_DEF_STMT (def), 0,
3627 TDF_SLIM);
3628 }
3629
3630 phi = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi));
3631 }
3632 }
3633
3634 VEC_free (tree, heap, vec_initial_defs);
3635
3636 /* 2. Create epilog code.
3637 The reduction epilog code operates across the elements of the vector
3638 of partial results computed by the vectorized loop.
3639 The reduction epilog code consists of:
3640
3641 step 1: compute the scalar result in a vector (v_out2)
3642 step 2: extract the scalar result (s_out3) from the vector (v_out2)
3643 step 3: adjust the scalar result (s_out3) if needed.
3644
3645 Step 1 can be accomplished using one the following three schemes:
3646 (scheme 1) using reduc_code, if available.
3647 (scheme 2) using whole-vector shifts, if available.
3648 (scheme 3) using a scalar loop. In this case steps 1+2 above are
3649 combined.
3650
3651 The overall epilog code looks like this:
3652
3653 s_out0 = phi <s_loop> # original EXIT_PHI
3654 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
3655 v_out2 = reduce <v_out1> # step 1
3656 s_out3 = extract_field <v_out2, 0> # step 2
3657 s_out4 = adjust_result <s_out3> # step 3
3658
3659 (step 3 is optional, and steps 1 and 2 may be combined).
3660 Lastly, the uses of s_out0 are replaced by s_out4. */
3661
3662
3663 /* 2.1 Create new loop-exit-phis to preserve loop-closed form:
3664 v_out1 = phi <VECT_DEF>
3665 Store them in NEW_PHIS. */
3666
3667 exit_bb = single_exit (loop)->dest;
3668 prev_phi_info = NULL;
3669 new_phis = VEC_alloc (gimple, heap, VEC_length (tree, vect_defs));
3670 FOR_EACH_VEC_ELT (tree, vect_defs, i, def)
3671 {
3672 for (j = 0; j < ncopies; j++)
3673 {
3674 phi = create_phi_node (SSA_NAME_VAR (def), exit_bb);
3675 set_vinfo_for_stmt (phi, new_stmt_vec_info (phi, loop_vinfo, NULL));
3676 if (j == 0)
3677 VEC_quick_push (gimple, new_phis, phi);
3678 else
3679 {
3680 def = vect_get_vec_def_for_stmt_copy (dt, def);
3681 STMT_VINFO_RELATED_STMT (prev_phi_info) = phi;
3682 }
3683
3684 SET_PHI_ARG_DEF (phi, single_exit (loop)->dest_idx, def);
3685 prev_phi_info = vinfo_for_stmt (phi);
3686 }
3687 }
3688
3689 /* The epilogue is created for the outer-loop, i.e., for the loop being
3690 vectorized. Create exit phis for the outer loop. */
3691 if (double_reduc)
3692 {
3693 loop = outer_loop;
3694 exit_bb = single_exit (loop)->dest;
3695 inner_phis = VEC_alloc (gimple, heap, VEC_length (tree, vect_defs));
3696 FOR_EACH_VEC_ELT (gimple, new_phis, i, phi)
3697 {
3698 gimple outer_phi = create_phi_node (SSA_NAME_VAR (PHI_RESULT (phi)),
3699 exit_bb);
3700 SET_PHI_ARG_DEF (outer_phi, single_exit (loop)->dest_idx,
3701 PHI_RESULT (phi));
3702 set_vinfo_for_stmt (outer_phi, new_stmt_vec_info (outer_phi,
3703 loop_vinfo, NULL));
3704 VEC_quick_push (gimple, inner_phis, phi);
3705 VEC_replace (gimple, new_phis, i, outer_phi);
3706 prev_phi_info = vinfo_for_stmt (outer_phi);
3707 while (STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi)))
3708 {
3709 phi = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi));
3710 outer_phi = create_phi_node (SSA_NAME_VAR (PHI_RESULT (phi)),
3711 exit_bb);
3712 SET_PHI_ARG_DEF (outer_phi, single_exit (loop)->dest_idx,
3713 PHI_RESULT (phi));
3714 set_vinfo_for_stmt (outer_phi, new_stmt_vec_info (outer_phi,
3715 loop_vinfo, NULL));
3716 STMT_VINFO_RELATED_STMT (prev_phi_info) = outer_phi;
3717 prev_phi_info = vinfo_for_stmt (outer_phi);
3718 }
3719 }
3720 }
3721
3722 exit_gsi = gsi_after_labels (exit_bb);
3723
3724 /* 2.2 Get the relevant tree-code to use in the epilog for schemes 2,3
3725 (i.e. when reduc_code is not available) and in the final adjustment
3726 code (if needed). Also get the original scalar reduction variable as
3727 defined in the loop. In case STMT is a "pattern-stmt" (i.e. - it
3728 represents a reduction pattern), the tree-code and scalar-def are
3729 taken from the original stmt that the pattern-stmt (STMT) replaces.
3730 Otherwise (it is a regular reduction) - the tree-code and scalar-def
3731 are taken from STMT. */
3732
3733 orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
3734 if (!orig_stmt)
3735 {
3736 /* Regular reduction */
3737 orig_stmt = stmt;
3738 }
3739 else
3740 {
3741 /* Reduction pattern */
3742 stmt_vec_info stmt_vinfo = vinfo_for_stmt (orig_stmt);
3743 gcc_assert (STMT_VINFO_IN_PATTERN_P (stmt_vinfo));
3744 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_vinfo) == stmt);
3745 }
3746
3747 code = gimple_assign_rhs_code (orig_stmt);
3748 /* For MINUS_EXPR the initial vector is [init_val,0,...,0], therefore,
3749 partial results are added and not subtracted. */
3750 if (code == MINUS_EXPR)
3751 code = PLUS_EXPR;
3752
3753 scalar_dest = gimple_assign_lhs (orig_stmt);
3754 scalar_type = TREE_TYPE (scalar_dest);
3755 scalar_results = VEC_alloc (tree, heap, group_size);
3756 new_scalar_dest = vect_create_destination_var (scalar_dest, NULL);
3757 bitsize = TYPE_SIZE (scalar_type);
3758
3759 /* In case this is a reduction in an inner-loop while vectorizing an outer
3760 loop - we don't need to extract a single scalar result at the end of the
3761 inner-loop (unless it is double reduction, i.e., the use of reduction is
3762 outside the outer-loop). The final vector of partial results will be used
3763 in the vectorized outer-loop, or reduced to a scalar result at the end of
3764 the outer-loop. */
3765 if (nested_in_vect_loop && !double_reduc)
3766 goto vect_finalize_reduction;
3767
3768 /* SLP reduction without reduction chain, e.g.,
3769 # a1 = phi <a2, a0>
3770 # b1 = phi <b2, b0>
3771 a2 = operation (a1)
3772 b2 = operation (b1) */
3773 slp_reduc = (slp_node && !GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)));
3774
3775 /* In case of reduction chain, e.g.,
3776 # a1 = phi <a3, a0>
3777 a2 = operation (a1)
3778 a3 = operation (a2),
3779
3780 we may end up with more than one vector result. Here we reduce them to
3781 one vector. */
3782 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
3783 {
3784 tree first_vect = PHI_RESULT (VEC_index (gimple, new_phis, 0));
3785 tree tmp;
3786 gimple new_vec_stmt = NULL;
3787
3788 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3789 for (k = 1; k < VEC_length (gimple, new_phis); k++)
3790 {
3791 gimple next_phi = VEC_index (gimple, new_phis, k);
3792 tree second_vect = PHI_RESULT (next_phi);
3793
3794 tmp = build2 (code, vectype, first_vect, second_vect);
3795 new_vec_stmt = gimple_build_assign (vec_dest, tmp);
3796 first_vect = make_ssa_name (vec_dest, new_vec_stmt);
3797 gimple_assign_set_lhs (new_vec_stmt, first_vect);
3798 gsi_insert_before (&exit_gsi, new_vec_stmt, GSI_SAME_STMT);
3799 }
3800
3801 new_phi_result = first_vect;
3802 if (new_vec_stmt)
3803 {
3804 VEC_truncate (gimple, new_phis, 0);
3805 VEC_safe_push (gimple, heap, new_phis, new_vec_stmt);
3806 }
3807 }
3808 else
3809 new_phi_result = PHI_RESULT (VEC_index (gimple, new_phis, 0));
3810
3811 /* 2.3 Create the reduction code, using one of the three schemes described
3812 above. In SLP we simply need to extract all the elements from the
3813 vector (without reducing them), so we use scalar shifts. */
3814 if (reduc_code != ERROR_MARK && !slp_reduc)
3815 {
3816 tree tmp;
3817
3818 /*** Case 1: Create:
3819 v_out2 = reduc_expr <v_out1> */
3820
3821 if (vect_print_dump_info (REPORT_DETAILS))
3822 fprintf (vect_dump, "Reduce using direct vector reduction.");
3823
3824 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3825 tmp = build1 (reduc_code, vectype, new_phi_result);
3826 epilog_stmt = gimple_build_assign (vec_dest, tmp);
3827 new_temp = make_ssa_name (vec_dest, epilog_stmt);
3828 gimple_assign_set_lhs (epilog_stmt, new_temp);
3829 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3830
3831 extract_scalar_result = true;
3832 }
3833 else
3834 {
3835 enum tree_code shift_code = ERROR_MARK;
3836 bool have_whole_vector_shift = true;
3837 int bit_offset;
3838 int element_bitsize = tree_low_cst (bitsize, 1);
3839 int vec_size_in_bits = tree_low_cst (TYPE_SIZE (vectype), 1);
3840 tree vec_temp;
3841
3842 if (optab_handler (vec_shr_optab, mode) != CODE_FOR_nothing)
3843 shift_code = VEC_RSHIFT_EXPR;
3844 else
3845 have_whole_vector_shift = false;
3846
3847 /* Regardless of whether we have a whole vector shift, if we're
3848 emulating the operation via tree-vect-generic, we don't want
3849 to use it. Only the first round of the reduction is likely
3850 to still be profitable via emulation. */
3851 /* ??? It might be better to emit a reduction tree code here, so that
3852 tree-vect-generic can expand the first round via bit tricks. */
3853 if (!VECTOR_MODE_P (mode))
3854 have_whole_vector_shift = false;
3855 else
3856 {
3857 optab optab = optab_for_tree_code (code, vectype, optab_default);
3858 if (optab_handler (optab, mode) == CODE_FOR_nothing)
3859 have_whole_vector_shift = false;
3860 }
3861
3862 if (have_whole_vector_shift && !slp_reduc)
3863 {
3864 /*** Case 2: Create:
3865 for (offset = VS/2; offset >= element_size; offset/=2)
3866 {
3867 Create: va' = vec_shift <va, offset>
3868 Create: va = vop <va, va'>
3869 } */
3870
3871 if (vect_print_dump_info (REPORT_DETAILS))
3872 fprintf (vect_dump, "Reduce using vector shifts");
3873
3874 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3875 new_temp = new_phi_result;
3876 for (bit_offset = vec_size_in_bits/2;
3877 bit_offset >= element_bitsize;
3878 bit_offset /= 2)
3879 {
3880 tree bitpos = size_int (bit_offset);
3881
3882 epilog_stmt = gimple_build_assign_with_ops (shift_code,
3883 vec_dest, new_temp, bitpos);
3884 new_name = make_ssa_name (vec_dest, epilog_stmt);
3885 gimple_assign_set_lhs (epilog_stmt, new_name);
3886 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3887
3888 epilog_stmt = gimple_build_assign_with_ops (code, vec_dest,
3889 new_name, new_temp);
3890 new_temp = make_ssa_name (vec_dest, epilog_stmt);
3891 gimple_assign_set_lhs (epilog_stmt, new_temp);
3892 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3893 }
3894
3895 extract_scalar_result = true;
3896 }
3897 else
3898 {
3899 tree rhs;
3900
3901 /*** Case 3: Create:
3902 s = extract_field <v_out2, 0>
3903 for (offset = element_size;
3904 offset < vector_size;
3905 offset += element_size;)
3906 {
3907 Create: s' = extract_field <v_out2, offset>
3908 Create: s = op <s, s'> // For non SLP cases
3909 } */
3910
3911 if (vect_print_dump_info (REPORT_DETAILS))
3912 fprintf (vect_dump, "Reduce using scalar code. ");
3913
3914 vec_size_in_bits = tree_low_cst (TYPE_SIZE (vectype), 1);
3915 FOR_EACH_VEC_ELT (gimple, new_phis, i, new_phi)
3916 {
3917 if (gimple_code (new_phi) == GIMPLE_PHI)
3918 vec_temp = PHI_RESULT (new_phi);
3919 else
3920 vec_temp = gimple_assign_lhs (new_phi);
3921 rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp, bitsize,
3922 bitsize_zero_node);
3923 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
3924 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
3925 gimple_assign_set_lhs (epilog_stmt, new_temp);
3926 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3927
3928 /* In SLP we don't need to apply reduction operation, so we just
3929 collect s' values in SCALAR_RESULTS. */
3930 if (slp_reduc)
3931 VEC_safe_push (tree, heap, scalar_results, new_temp);
3932
3933 for (bit_offset = element_bitsize;
3934 bit_offset < vec_size_in_bits;
3935 bit_offset += element_bitsize)
3936 {
3937 tree bitpos = bitsize_int (bit_offset);
3938 tree rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp,
3939 bitsize, bitpos);
3940
3941 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
3942 new_name = make_ssa_name (new_scalar_dest, epilog_stmt);
3943 gimple_assign_set_lhs (epilog_stmt, new_name);
3944 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3945
3946 if (slp_reduc)
3947 {
3948 /* In SLP we don't need to apply reduction operation, so
3949 we just collect s' values in SCALAR_RESULTS. */
3950 new_temp = new_name;
3951 VEC_safe_push (tree, heap, scalar_results, new_name);
3952 }
3953 else
3954 {
3955 epilog_stmt = gimple_build_assign_with_ops (code,
3956 new_scalar_dest, new_name, new_temp);
3957 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
3958 gimple_assign_set_lhs (epilog_stmt, new_temp);
3959 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3960 }
3961 }
3962 }
3963
3964 /* The only case where we need to reduce scalar results in SLP, is
3965 unrolling. If the size of SCALAR_RESULTS is greater than
3966 GROUP_SIZE, we reduce them combining elements modulo
3967 GROUP_SIZE. */
3968 if (slp_reduc)
3969 {
3970 tree res, first_res, new_res;
3971 gimple new_stmt;
3972
3973 /* Reduce multiple scalar results in case of SLP unrolling. */
3974 for (j = group_size; VEC_iterate (tree, scalar_results, j, res);
3975 j++)
3976 {
3977 first_res = VEC_index (tree, scalar_results, j % group_size);
3978 new_stmt = gimple_build_assign_with_ops (code,
3979 new_scalar_dest, first_res, res);
3980 new_res = make_ssa_name (new_scalar_dest, new_stmt);
3981 gimple_assign_set_lhs (new_stmt, new_res);
3982 gsi_insert_before (&exit_gsi, new_stmt, GSI_SAME_STMT);
3983 VEC_replace (tree, scalar_results, j % group_size, new_res);
3984 }
3985 }
3986 else
3987 /* Not SLP - we have one scalar to keep in SCALAR_RESULTS. */
3988 VEC_safe_push (tree, heap, scalar_results, new_temp);
3989
3990 extract_scalar_result = false;
3991 }
3992 }
3993
3994 /* 2.4 Extract the final scalar result. Create:
3995 s_out3 = extract_field <v_out2, bitpos> */
3996
3997 if (extract_scalar_result)
3998 {
3999 tree rhs;
4000
4001 if (vect_print_dump_info (REPORT_DETAILS))
4002 fprintf (vect_dump, "extract scalar result");
4003
4004 if (BYTES_BIG_ENDIAN)
4005 bitpos = size_binop (MULT_EXPR,
4006 bitsize_int (TYPE_VECTOR_SUBPARTS (vectype) - 1),
4007 TYPE_SIZE (scalar_type));
4008 else
4009 bitpos = bitsize_zero_node;
4010
4011 rhs = build3 (BIT_FIELD_REF, scalar_type, new_temp, bitsize, bitpos);
4012 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
4013 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
4014 gimple_assign_set_lhs (epilog_stmt, new_temp);
4015 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
4016 VEC_safe_push (tree, heap, scalar_results, new_temp);
4017 }
4018
4019 vect_finalize_reduction:
4020
4021 if (double_reduc)
4022 loop = loop->inner;
4023
4024 /* 2.5 Adjust the final result by the initial value of the reduction
4025 variable. (When such adjustment is not needed, then
4026 'adjustment_def' is zero). For example, if code is PLUS we create:
4027 new_temp = loop_exit_def + adjustment_def */
4028
4029 if (adjustment_def)
4030 {
4031 gcc_assert (!slp_reduc);
4032 if (nested_in_vect_loop)
4033 {
4034 new_phi = VEC_index (gimple, new_phis, 0);
4035 gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) == VECTOR_TYPE);
4036 expr = build2 (code, vectype, PHI_RESULT (new_phi), adjustment_def);
4037 new_dest = vect_create_destination_var (scalar_dest, vectype);
4038 }
4039 else
4040 {
4041 new_temp = VEC_index (tree, scalar_results, 0);
4042 gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) != VECTOR_TYPE);
4043 expr = build2 (code, scalar_type, new_temp, adjustment_def);
4044 new_dest = vect_create_destination_var (scalar_dest, scalar_type);
4045 }
4046
4047 epilog_stmt = gimple_build_assign (new_dest, expr);
4048 new_temp = make_ssa_name (new_dest, epilog_stmt);
4049 gimple_assign_set_lhs (epilog_stmt, new_temp);
4050 SSA_NAME_DEF_STMT (new_temp) = epilog_stmt;
4051 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
4052 if (nested_in_vect_loop)
4053 {
4054 set_vinfo_for_stmt (epilog_stmt,
4055 new_stmt_vec_info (epilog_stmt, loop_vinfo,
4056 NULL));
4057 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (epilog_stmt)) =
4058 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (new_phi));
4059
4060 if (!double_reduc)
4061 VEC_quick_push (tree, scalar_results, new_temp);
4062 else
4063 VEC_replace (tree, scalar_results, 0, new_temp);
4064 }
4065 else
4066 VEC_replace (tree, scalar_results, 0, new_temp);
4067
4068 VEC_replace (gimple, new_phis, 0, epilog_stmt);
4069 }
4070
4071 /* 2.6 Handle the loop-exit phis. Replace the uses of scalar loop-exit
4072 phis with new adjusted scalar results, i.e., replace use <s_out0>
4073 with use <s_out4>.
4074
4075 Transform:
4076 loop_exit:
4077 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
4078 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
4079 v_out2 = reduce <v_out1>
4080 s_out3 = extract_field <v_out2, 0>
4081 s_out4 = adjust_result <s_out3>
4082 use <s_out0>
4083 use <s_out0>
4084
4085 into:
4086
4087 loop_exit:
4088 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
4089 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
4090 v_out2 = reduce <v_out1>
4091 s_out3 = extract_field <v_out2, 0>
4092 s_out4 = adjust_result <s_out3>
4093 use <s_out4>
4094 use <s_out4> */
4095
4096
4097 /* In SLP reduction chain we reduce vector results into one vector if
4098 necessary, hence we set here GROUP_SIZE to 1. SCALAR_DEST is the LHS of
4099 the last stmt in the reduction chain, since we are looking for the loop
4100 exit phi node. */
4101 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
4102 {
4103 scalar_dest = gimple_assign_lhs (VEC_index (gimple,
4104 SLP_TREE_SCALAR_STMTS (slp_node),
4105 group_size - 1));
4106 group_size = 1;
4107 }
4108
4109 /* In SLP we may have several statements in NEW_PHIS and REDUCTION_PHIS (in
4110 case that GROUP_SIZE is greater than vectorization factor). Therefore, we
4111 need to match SCALAR_RESULTS with corresponding statements. The first
4112 (GROUP_SIZE / number of new vector stmts) scalar results correspond to
4113 the first vector stmt, etc.
4114 (RATIO is equal to (GROUP_SIZE / number of new vector stmts)). */
4115 if (group_size > VEC_length (gimple, new_phis))
4116 {
4117 ratio = group_size / VEC_length (gimple, new_phis);
4118 gcc_assert (!(group_size % VEC_length (gimple, new_phis)));
4119 }
4120 else
4121 ratio = 1;
4122
4123 for (k = 0; k < group_size; k++)
4124 {
4125 if (k % ratio == 0)
4126 {
4127 epilog_stmt = VEC_index (gimple, new_phis, k / ratio);
4128 reduction_phi = VEC_index (gimple, reduction_phis, k / ratio);
4129 if (double_reduc)
4130 inner_phi = VEC_index (gimple, inner_phis, k / ratio);
4131 }
4132
4133 if (slp_reduc)
4134 {
4135 gimple current_stmt = VEC_index (gimple,
4136 SLP_TREE_SCALAR_STMTS (slp_node), k);
4137
4138 orig_stmt = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (current_stmt));
4139 /* SLP statements can't participate in patterns. */
4140 gcc_assert (!orig_stmt);
4141 scalar_dest = gimple_assign_lhs (current_stmt);
4142 }
4143
4144 phis = VEC_alloc (gimple, heap, 3);
4145 /* Find the loop-closed-use at the loop exit of the original scalar
4146 result. (The reduction result is expected to have two immediate uses -
4147 one at the latch block, and one at the loop exit). */
4148 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
4149 if (!flow_bb_inside_loop_p (loop, gimple_bb (USE_STMT (use_p))))
4150 VEC_safe_push (gimple, heap, phis, USE_STMT (use_p));
4151
4152 /* We expect to have found an exit_phi because of loop-closed-ssa
4153 form. */
4154 gcc_assert (!VEC_empty (gimple, phis));
4155
4156 FOR_EACH_VEC_ELT (gimple, phis, i, exit_phi)
4157 {
4158 if (outer_loop)
4159 {
4160 stmt_vec_info exit_phi_vinfo = vinfo_for_stmt (exit_phi);
4161 gimple vect_phi;
4162
4163 /* FORNOW. Currently not supporting the case that an inner-loop
4164 reduction is not used in the outer-loop (but only outside the
4165 outer-loop), unless it is double reduction. */
4166 gcc_assert ((STMT_VINFO_RELEVANT_P (exit_phi_vinfo)
4167 && !STMT_VINFO_LIVE_P (exit_phi_vinfo))
4168 || double_reduc);
4169
4170 STMT_VINFO_VEC_STMT (exit_phi_vinfo) = epilog_stmt;
4171 if (!double_reduc
4172 || STMT_VINFO_DEF_TYPE (exit_phi_vinfo)
4173 != vect_double_reduction_def)
4174 continue;
4175
4176 /* Handle double reduction:
4177
4178 stmt1: s1 = phi <s0, s2> - double reduction phi (outer loop)
4179 stmt2: s3 = phi <s1, s4> - (regular) reduc phi (inner loop)
4180 stmt3: s4 = use (s3) - (regular) reduc stmt (inner loop)
4181 stmt4: s2 = phi <s4> - double reduction stmt (outer loop)
4182
4183 At that point the regular reduction (stmt2 and stmt3) is
4184 already vectorized, as well as the exit phi node, stmt4.
4185 Here we vectorize the phi node of double reduction, stmt1, and
4186 update all relevant statements. */
4187
4188 /* Go through all the uses of s2 to find double reduction phi
4189 node, i.e., stmt1 above. */
4190 orig_name = PHI_RESULT (exit_phi);
4191 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name)
4192 {
4193 stmt_vec_info use_stmt_vinfo;
4194 stmt_vec_info new_phi_vinfo;
4195 tree vect_phi_init, preheader_arg, vect_phi_res, init_def;
4196 basic_block bb = gimple_bb (use_stmt);
4197 gimple use;
4198
4199 /* Check that USE_STMT is really double reduction phi
4200 node. */
4201 if (gimple_code (use_stmt) != GIMPLE_PHI
4202 || gimple_phi_num_args (use_stmt) != 2
4203 || bb->loop_father != outer_loop)
4204 continue;
4205 use_stmt_vinfo = vinfo_for_stmt (use_stmt);
4206 if (!use_stmt_vinfo
4207 || STMT_VINFO_DEF_TYPE (use_stmt_vinfo)
4208 != vect_double_reduction_def)
4209 continue;
4210
4211 /* Create vector phi node for double reduction:
4212 vs1 = phi <vs0, vs2>
4213 vs1 was created previously in this function by a call to
4214 vect_get_vec_def_for_operand and is stored in
4215 vec_initial_def;
4216 vs2 is defined by INNER_PHI, the vectorized EXIT_PHI;
4217 vs0 is created here. */
4218
4219 /* Create vector phi node. */
4220 vect_phi = create_phi_node (vec_initial_def, bb);
4221 new_phi_vinfo = new_stmt_vec_info (vect_phi,
4222 loop_vec_info_for_loop (outer_loop), NULL);
4223 set_vinfo_for_stmt (vect_phi, new_phi_vinfo);
4224
4225 /* Create vs0 - initial def of the double reduction phi. */
4226 preheader_arg = PHI_ARG_DEF_FROM_EDGE (use_stmt,
4227 loop_preheader_edge (outer_loop));
4228 init_def = get_initial_def_for_reduction (stmt,
4229 preheader_arg, NULL);
4230 vect_phi_init = vect_init_vector (use_stmt, init_def,
4231 vectype, NULL);
4232
4233 /* Update phi node arguments with vs0 and vs2. */
4234 add_phi_arg (vect_phi, vect_phi_init,
4235 loop_preheader_edge (outer_loop),
4236 UNKNOWN_LOCATION, NULL);
4237 add_phi_arg (vect_phi, PHI_RESULT (inner_phi),
4238 loop_latch_edge (outer_loop), UNKNOWN_LOCATION,
4239 NULL);
4240 if (vect_print_dump_info (REPORT_DETAILS))
4241 {
4242 fprintf (vect_dump, "created double reduction phi "
4243 "node: ");
4244 print_gimple_stmt (vect_dump, vect_phi, 0, TDF_SLIM);
4245 }
4246
4247 vect_phi_res = PHI_RESULT (vect_phi);
4248
4249 /* Replace the use, i.e., set the correct vs1 in the regular
4250 reduction phi node. FORNOW, NCOPIES is always 1, so the
4251 loop is redundant. */
4252 use = reduction_phi;
4253 for (j = 0; j < ncopies; j++)
4254 {
4255 edge pr_edge = loop_preheader_edge (loop);
4256 SET_PHI_ARG_DEF (use, pr_edge->dest_idx, vect_phi_res);
4257 use = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (use));
4258 }
4259 }
4260 }
4261 }
4262
4263 VEC_free (gimple, heap, phis);
4264 if (nested_in_vect_loop)
4265 {
4266 if (double_reduc)
4267 loop = outer_loop;
4268 else
4269 continue;
4270 }
4271
4272 phis = VEC_alloc (gimple, heap, 3);
4273 /* Find the loop-closed-use at the loop exit of the original scalar
4274 result. (The reduction result is expected to have two immediate uses,
4275 one at the latch block, and one at the loop exit). For double
4276 reductions we are looking for exit phis of the outer loop. */
4277 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
4278 {
4279 if (!flow_bb_inside_loop_p (loop, gimple_bb (USE_STMT (use_p))))
4280 VEC_safe_push (gimple, heap, phis, USE_STMT (use_p));
4281 else
4282 {
4283 if (double_reduc && gimple_code (USE_STMT (use_p)) == GIMPLE_PHI)
4284 {
4285 tree phi_res = PHI_RESULT (USE_STMT (use_p));
4286
4287 FOR_EACH_IMM_USE_FAST (phi_use_p, phi_imm_iter, phi_res)
4288 {
4289 if (!flow_bb_inside_loop_p (loop,
4290 gimple_bb (USE_STMT (phi_use_p))))
4291 VEC_safe_push (gimple, heap, phis,
4292 USE_STMT (phi_use_p));
4293 }
4294 }
4295 }
4296 }
4297
4298 FOR_EACH_VEC_ELT (gimple, phis, i, exit_phi)
4299 {
4300 /* Replace the uses: */
4301 orig_name = PHI_RESULT (exit_phi);
4302 scalar_result = VEC_index (tree, scalar_results, k);
4303 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name)
4304 FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
4305 SET_USE (use_p, scalar_result);
4306 }
4307
4308 VEC_free (gimple, heap, phis);
4309 }
4310
4311 VEC_free (tree, heap, scalar_results);
4312 VEC_free (gimple, heap, new_phis);
4313 }
4314
4315
4316 /* Function vectorizable_reduction.
4317
4318 Check if STMT performs a reduction operation that can be vectorized.
4319 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4320 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
4321 Return FALSE if not a vectorizable STMT, TRUE otherwise.
4322
4323 This function also handles reduction idioms (patterns) that have been
4324 recognized in advance during vect_pattern_recog. In this case, STMT may be
4325 of this form:
4326 X = pattern_expr (arg0, arg1, ..., X)
4327 and it's STMT_VINFO_RELATED_STMT points to the last stmt in the original
4328 sequence that had been detected and replaced by the pattern-stmt (STMT).
4329
4330 In some cases of reduction patterns, the type of the reduction variable X is
4331 different than the type of the other arguments of STMT.
4332 In such cases, the vectype that is used when transforming STMT into a vector
4333 stmt is different than the vectype that is used to determine the
4334 vectorization factor, because it consists of a different number of elements
4335 than the actual number of elements that are being operated upon in parallel.
4336
4337 For example, consider an accumulation of shorts into an int accumulator.
4338 On some targets it's possible to vectorize this pattern operating on 8
4339 shorts at a time (hence, the vectype for purposes of determining the
4340 vectorization factor should be V8HI); on the other hand, the vectype that
4341 is used to create the vector form is actually V4SI (the type of the result).
4342
4343 Upon entry to this function, STMT_VINFO_VECTYPE records the vectype that
4344 indicates what is the actual level of parallelism (V8HI in the example), so
4345 that the right vectorization factor would be derived. This vectype
4346 corresponds to the type of arguments to the reduction stmt, and should *NOT*
4347 be used to create the vectorized stmt. The right vectype for the vectorized
4348 stmt is obtained from the type of the result X:
4349 get_vectype_for_scalar_type (TREE_TYPE (X))
4350
4351 This means that, contrary to "regular" reductions (or "regular" stmts in
4352 general), the following equation:
4353 STMT_VINFO_VECTYPE == get_vectype_for_scalar_type (TREE_TYPE (X))
4354 does *NOT* necessarily hold for reduction patterns. */
4355
4356 bool
4357 vectorizable_reduction (gimple stmt, gimple_stmt_iterator *gsi,
4358 gimple *vec_stmt, slp_tree slp_node)
4359 {
4360 tree vec_dest;
4361 tree scalar_dest;
4362 tree loop_vec_def0 = NULL_TREE, loop_vec_def1 = NULL_TREE;
4363 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4364 tree vectype_out = STMT_VINFO_VECTYPE (stmt_info);
4365 tree vectype_in = NULL_TREE;
4366 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4367 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
4368 enum tree_code code, orig_code, epilog_reduc_code;
4369 enum machine_mode vec_mode;
4370 int op_type;
4371 optab optab, reduc_optab;
4372 tree new_temp = NULL_TREE;
4373 tree def;
4374 gimple def_stmt;
4375 enum vect_def_type dt;
4376 gimple new_phi = NULL;
4377 tree scalar_type;
4378 bool is_simple_use;
4379 gimple orig_stmt;
4380 stmt_vec_info orig_stmt_info;
4381 tree expr = NULL_TREE;
4382 int i;
4383 int ncopies;
4384 int epilog_copies;
4385 stmt_vec_info prev_stmt_info, prev_phi_info;
4386 bool single_defuse_cycle = false;
4387 tree reduc_def = NULL_TREE;
4388 gimple new_stmt = NULL;
4389 int j;
4390 tree ops[3];
4391 bool nested_cycle = false, found_nested_cycle_def = false;
4392 gimple reduc_def_stmt = NULL;
4393 /* The default is that the reduction variable is the last in statement. */
4394 int reduc_index = 2;
4395 bool double_reduc = false, dummy;
4396 basic_block def_bb;
4397 struct loop * def_stmt_loop, *outer_loop = NULL;
4398 tree def_arg;
4399 gimple def_arg_stmt;
4400 VEC (tree, heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL, *vect_defs = NULL;
4401 VEC (gimple, heap) *phis = NULL;
4402 int vec_num;
4403 tree def0, def1, tem, op0, op1 = NULL_TREE;
4404
4405 /* In case of reduction chain we switch to the first stmt in the chain, but
4406 we don't update STMT_INFO, since only the last stmt is marked as reduction
4407 and has reduction properties. */
4408 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
4409 stmt = GROUP_FIRST_ELEMENT (stmt_info);
4410
4411 if (nested_in_vect_loop_p (loop, stmt))
4412 {
4413 outer_loop = loop;
4414 loop = loop->inner;
4415 nested_cycle = true;
4416 }
4417
4418 /* 1. Is vectorizable reduction? */
4419 /* Not supportable if the reduction variable is used in the loop, unless
4420 it's a reduction chain. */
4421 if (STMT_VINFO_RELEVANT (stmt_info) > vect_used_in_outer
4422 && !GROUP_FIRST_ELEMENT (stmt_info))
4423 return false;
4424
4425 /* Reductions that are not used even in an enclosing outer-loop,
4426 are expected to be "live" (used out of the loop). */
4427 if (STMT_VINFO_RELEVANT (stmt_info) == vect_unused_in_scope
4428 && !STMT_VINFO_LIVE_P (stmt_info))
4429 return false;
4430
4431 /* Make sure it was already recognized as a reduction computation. */
4432 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_reduction_def
4433 && STMT_VINFO_DEF_TYPE (stmt_info) != vect_nested_cycle)
4434 return false;
4435
4436 /* 2. Has this been recognized as a reduction pattern?
4437
4438 Check if STMT represents a pattern that has been recognized
4439 in earlier analysis stages. For stmts that represent a pattern,
4440 the STMT_VINFO_RELATED_STMT field records the last stmt in
4441 the original sequence that constitutes the pattern. */
4442
4443 orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
4444 if (orig_stmt)
4445 {
4446 orig_stmt_info = vinfo_for_stmt (orig_stmt);
4447 gcc_assert (STMT_VINFO_RELATED_STMT (orig_stmt_info) == stmt);
4448 gcc_assert (STMT_VINFO_IN_PATTERN_P (orig_stmt_info));
4449 gcc_assert (!STMT_VINFO_IN_PATTERN_P (stmt_info));
4450 }
4451
4452 /* 3. Check the operands of the operation. The first operands are defined
4453 inside the loop body. The last operand is the reduction variable,
4454 which is defined by the loop-header-phi. */
4455
4456 gcc_assert (is_gimple_assign (stmt));
4457
4458 /* Flatten RHS. */
4459 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)))
4460 {
4461 case GIMPLE_SINGLE_RHS:
4462 op_type = TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt));
4463 if (op_type == ternary_op)
4464 {
4465 tree rhs = gimple_assign_rhs1 (stmt);
4466 ops[0] = TREE_OPERAND (rhs, 0);
4467 ops[1] = TREE_OPERAND (rhs, 1);
4468 ops[2] = TREE_OPERAND (rhs, 2);
4469 code = TREE_CODE (rhs);
4470 }
4471 else
4472 return false;
4473 break;
4474
4475 case GIMPLE_BINARY_RHS:
4476 code = gimple_assign_rhs_code (stmt);
4477 op_type = TREE_CODE_LENGTH (code);
4478 gcc_assert (op_type == binary_op);
4479 ops[0] = gimple_assign_rhs1 (stmt);
4480 ops[1] = gimple_assign_rhs2 (stmt);
4481 break;
4482
4483 case GIMPLE_TERNARY_RHS:
4484 code = gimple_assign_rhs_code (stmt);
4485 op_type = TREE_CODE_LENGTH (code);
4486 gcc_assert (op_type == ternary_op);
4487 ops[0] = gimple_assign_rhs1 (stmt);
4488 ops[1] = gimple_assign_rhs2 (stmt);
4489 ops[2] = gimple_assign_rhs3 (stmt);
4490 break;
4491
4492 case GIMPLE_UNARY_RHS:
4493 return false;
4494
4495 default:
4496 gcc_unreachable ();
4497 }
4498
4499 if (code == COND_EXPR && slp_node)
4500 return false;
4501
4502 scalar_dest = gimple_assign_lhs (stmt);
4503 scalar_type = TREE_TYPE (scalar_dest);
4504 if (!POINTER_TYPE_P (scalar_type) && !INTEGRAL_TYPE_P (scalar_type)
4505 && !SCALAR_FLOAT_TYPE_P (scalar_type))
4506 return false;
4507
4508 /* Do not try to vectorize bit-precision reductions. */
4509 if ((TYPE_PRECISION (scalar_type)
4510 != GET_MODE_PRECISION (TYPE_MODE (scalar_type))))
4511 return false;
4512
4513 /* All uses but the last are expected to be defined in the loop.
4514 The last use is the reduction variable. In case of nested cycle this
4515 assumption is not true: we use reduc_index to record the index of the
4516 reduction variable. */
4517 for (i = 0; i < op_type-1; i++)
4518 {
4519 /* The condition of COND_EXPR is checked in vectorizable_condition(). */
4520 if (i == 0 && code == COND_EXPR)
4521 continue;
4522
4523 is_simple_use = vect_is_simple_use_1 (ops[i], stmt, loop_vinfo, NULL,
4524 &def_stmt, &def, &dt, &tem);
4525 if (!vectype_in)
4526 vectype_in = tem;
4527 gcc_assert (is_simple_use);
4528
4529 if (dt != vect_internal_def
4530 && dt != vect_external_def
4531 && dt != vect_constant_def
4532 && dt != vect_induction_def
4533 && !(dt == vect_nested_cycle && nested_cycle))
4534 return false;
4535
4536 if (dt == vect_nested_cycle)
4537 {
4538 found_nested_cycle_def = true;
4539 reduc_def_stmt = def_stmt;
4540 reduc_index = i;
4541 }
4542 }
4543
4544 is_simple_use = vect_is_simple_use_1 (ops[i], stmt, loop_vinfo, NULL,
4545 &def_stmt, &def, &dt, &tem);
4546 if (!vectype_in)
4547 vectype_in = tem;
4548 gcc_assert (is_simple_use);
4549 gcc_assert (dt == vect_reduction_def
4550 || dt == vect_nested_cycle
4551 || ((dt == vect_internal_def || dt == vect_external_def
4552 || dt == vect_constant_def || dt == vect_induction_def)
4553 && nested_cycle && found_nested_cycle_def));
4554 if (!found_nested_cycle_def)
4555 reduc_def_stmt = def_stmt;
4556
4557 gcc_assert (gimple_code (reduc_def_stmt) == GIMPLE_PHI);
4558 if (orig_stmt)
4559 gcc_assert (orig_stmt == vect_is_simple_reduction (loop_vinfo,
4560 reduc_def_stmt,
4561 !nested_cycle,
4562 &dummy));
4563 else
4564 {
4565 gimple tmp = vect_is_simple_reduction (loop_vinfo, reduc_def_stmt,
4566 !nested_cycle, &dummy);
4567 /* We changed STMT to be the first stmt in reduction chain, hence we
4568 check that in this case the first element in the chain is STMT. */
4569 gcc_assert (stmt == tmp
4570 || GROUP_FIRST_ELEMENT (vinfo_for_stmt (tmp)) == stmt);
4571 }
4572
4573 if (STMT_VINFO_LIVE_P (vinfo_for_stmt (reduc_def_stmt)))
4574 return false;
4575
4576 if (slp_node || PURE_SLP_STMT (stmt_info))
4577 ncopies = 1;
4578 else
4579 ncopies = (LOOP_VINFO_VECT_FACTOR (loop_vinfo)
4580 / TYPE_VECTOR_SUBPARTS (vectype_in));
4581
4582 gcc_assert (ncopies >= 1);
4583
4584 vec_mode = TYPE_MODE (vectype_in);
4585
4586 if (code == COND_EXPR)
4587 {
4588 if (!vectorizable_condition (stmt, gsi, NULL, ops[reduc_index], 0, NULL))
4589 {
4590 if (vect_print_dump_info (REPORT_DETAILS))
4591 fprintf (vect_dump, "unsupported condition in reduction");
4592
4593 return false;
4594 }
4595 }
4596 else
4597 {
4598 /* 4. Supportable by target? */
4599
4600 /* 4.1. check support for the operation in the loop */
4601 optab = optab_for_tree_code (code, vectype_in, optab_default);
4602 if (!optab)
4603 {
4604 if (vect_print_dump_info (REPORT_DETAILS))
4605 fprintf (vect_dump, "no optab.");
4606
4607 return false;
4608 }
4609
4610 if (optab_handler (optab, vec_mode) == CODE_FOR_nothing)
4611 {
4612 if (vect_print_dump_info (REPORT_DETAILS))
4613 fprintf (vect_dump, "op not supported by target.");
4614
4615 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
4616 || LOOP_VINFO_VECT_FACTOR (loop_vinfo)
4617 < vect_min_worthwhile_factor (code))
4618 return false;
4619
4620 if (vect_print_dump_info (REPORT_DETAILS))
4621 fprintf (vect_dump, "proceeding using word mode.");
4622 }
4623
4624 /* Worthwhile without SIMD support? */
4625 if (!VECTOR_MODE_P (TYPE_MODE (vectype_in))
4626 && LOOP_VINFO_VECT_FACTOR (loop_vinfo)
4627 < vect_min_worthwhile_factor (code))
4628 {
4629 if (vect_print_dump_info (REPORT_DETAILS))
4630 fprintf (vect_dump, "not worthwhile without SIMD support.");
4631
4632 return false;
4633 }
4634 }
4635
4636 /* 4.2. Check support for the epilog operation.
4637
4638 If STMT represents a reduction pattern, then the type of the
4639 reduction variable may be different than the type of the rest
4640 of the arguments. For example, consider the case of accumulation
4641 of shorts into an int accumulator; The original code:
4642 S1: int_a = (int) short_a;
4643 orig_stmt-> S2: int_acc = plus <int_a ,int_acc>;
4644
4645 was replaced with:
4646 STMT: int_acc = widen_sum <short_a, int_acc>
4647
4648 This means that:
4649 1. The tree-code that is used to create the vector operation in the
4650 epilog code (that reduces the partial results) is not the
4651 tree-code of STMT, but is rather the tree-code of the original
4652 stmt from the pattern that STMT is replacing. I.e, in the example
4653 above we want to use 'widen_sum' in the loop, but 'plus' in the
4654 epilog.
4655 2. The type (mode) we use to check available target support
4656 for the vector operation to be created in the *epilog*, is
4657 determined by the type of the reduction variable (in the example
4658 above we'd check this: optab_handler (plus_optab, vect_int_mode])).
4659 However the type (mode) we use to check available target support
4660 for the vector operation to be created *inside the loop*, is
4661 determined by the type of the other arguments to STMT (in the
4662 example we'd check this: optab_handler (widen_sum_optab,
4663 vect_short_mode)).
4664
4665 This is contrary to "regular" reductions, in which the types of all
4666 the arguments are the same as the type of the reduction variable.
4667 For "regular" reductions we can therefore use the same vector type
4668 (and also the same tree-code) when generating the epilog code and
4669 when generating the code inside the loop. */
4670
4671 if (orig_stmt)
4672 {
4673 /* This is a reduction pattern: get the vectype from the type of the
4674 reduction variable, and get the tree-code from orig_stmt. */
4675 orig_code = gimple_assign_rhs_code (orig_stmt);
4676 gcc_assert (vectype_out);
4677 vec_mode = TYPE_MODE (vectype_out);
4678 }
4679 else
4680 {
4681 /* Regular reduction: use the same vectype and tree-code as used for
4682 the vector code inside the loop can be used for the epilog code. */
4683 orig_code = code;
4684 }
4685
4686 if (nested_cycle)
4687 {
4688 def_bb = gimple_bb (reduc_def_stmt);
4689 def_stmt_loop = def_bb->loop_father;
4690 def_arg = PHI_ARG_DEF_FROM_EDGE (reduc_def_stmt,
4691 loop_preheader_edge (def_stmt_loop));
4692 if (TREE_CODE (def_arg) == SSA_NAME
4693 && (def_arg_stmt = SSA_NAME_DEF_STMT (def_arg))
4694 && gimple_code (def_arg_stmt) == GIMPLE_PHI
4695 && flow_bb_inside_loop_p (outer_loop, gimple_bb (def_arg_stmt))
4696 && vinfo_for_stmt (def_arg_stmt)
4697 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_arg_stmt))
4698 == vect_double_reduction_def)
4699 double_reduc = true;
4700 }
4701
4702 epilog_reduc_code = ERROR_MARK;
4703 if (reduction_code_for_scalar_code (orig_code, &epilog_reduc_code))
4704 {
4705 reduc_optab = optab_for_tree_code (epilog_reduc_code, vectype_out,
4706 optab_default);
4707 if (!reduc_optab)
4708 {
4709 if (vect_print_dump_info (REPORT_DETAILS))
4710 fprintf (vect_dump, "no optab for reduction.");
4711
4712 epilog_reduc_code = ERROR_MARK;
4713 }
4714
4715 if (reduc_optab
4716 && optab_handler (reduc_optab, vec_mode) == CODE_FOR_nothing)
4717 {
4718 if (vect_print_dump_info (REPORT_DETAILS))
4719 fprintf (vect_dump, "reduc op not supported by target.");
4720
4721 epilog_reduc_code = ERROR_MARK;
4722 }
4723 }
4724 else
4725 {
4726 if (!nested_cycle || double_reduc)
4727 {
4728 if (vect_print_dump_info (REPORT_DETAILS))
4729 fprintf (vect_dump, "no reduc code for scalar code.");
4730
4731 return false;
4732 }
4733 }
4734
4735 if (double_reduc && ncopies > 1)
4736 {
4737 if (vect_print_dump_info (REPORT_DETAILS))
4738 fprintf (vect_dump, "multiple types in double reduction");
4739
4740 return false;
4741 }
4742
4743 /* In case of widenning multiplication by a constant, we update the type
4744 of the constant to be the type of the other operand. We check that the
4745 constant fits the type in the pattern recognition pass. */
4746 if (code == DOT_PROD_EXPR
4747 && !types_compatible_p (TREE_TYPE (ops[0]), TREE_TYPE (ops[1])))
4748 {
4749 if (TREE_CODE (ops[0]) == INTEGER_CST)
4750 ops[0] = fold_convert (TREE_TYPE (ops[1]), ops[0]);
4751 else if (TREE_CODE (ops[1]) == INTEGER_CST)
4752 ops[1] = fold_convert (TREE_TYPE (ops[0]), ops[1]);
4753 else
4754 {
4755 if (vect_print_dump_info (REPORT_DETAILS))
4756 fprintf (vect_dump, "invalid types in dot-prod");
4757
4758 return false;
4759 }
4760 }
4761
4762 if (!vec_stmt) /* transformation not required. */
4763 {
4764 if (!vect_model_reduction_cost (stmt_info, epilog_reduc_code, ncopies))
4765 return false;
4766 STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type;
4767 return true;
4768 }
4769
4770 /** Transform. **/
4771
4772 if (vect_print_dump_info (REPORT_DETAILS))
4773 fprintf (vect_dump, "transform reduction.");
4774
4775 /* FORNOW: Multiple types are not supported for condition. */
4776 if (code == COND_EXPR)
4777 gcc_assert (ncopies == 1);
4778
4779 /* Create the destination vector */
4780 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
4781
4782 /* In case the vectorization factor (VF) is bigger than the number
4783 of elements that we can fit in a vectype (nunits), we have to generate
4784 more than one vector stmt - i.e - we need to "unroll" the
4785 vector stmt by a factor VF/nunits. For more details see documentation
4786 in vectorizable_operation. */
4787
4788 /* If the reduction is used in an outer loop we need to generate
4789 VF intermediate results, like so (e.g. for ncopies=2):
4790 r0 = phi (init, r0)
4791 r1 = phi (init, r1)
4792 r0 = x0 + r0;
4793 r1 = x1 + r1;
4794 (i.e. we generate VF results in 2 registers).
4795 In this case we have a separate def-use cycle for each copy, and therefore
4796 for each copy we get the vector def for the reduction variable from the
4797 respective phi node created for this copy.
4798
4799 Otherwise (the reduction is unused in the loop nest), we can combine
4800 together intermediate results, like so (e.g. for ncopies=2):
4801 r = phi (init, r)
4802 r = x0 + r;
4803 r = x1 + r;
4804 (i.e. we generate VF/2 results in a single register).
4805 In this case for each copy we get the vector def for the reduction variable
4806 from the vectorized reduction operation generated in the previous iteration.
4807 */
4808
4809 if (STMT_VINFO_RELEVANT (stmt_info) == vect_unused_in_scope)
4810 {
4811 single_defuse_cycle = true;
4812 epilog_copies = 1;
4813 }
4814 else
4815 epilog_copies = ncopies;
4816
4817 prev_stmt_info = NULL;
4818 prev_phi_info = NULL;
4819 if (slp_node)
4820 {
4821 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
4822 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype_out)
4823 == TYPE_VECTOR_SUBPARTS (vectype_in));
4824 }
4825 else
4826 {
4827 vec_num = 1;
4828 vec_oprnds0 = VEC_alloc (tree, heap, 1);
4829 if (op_type == ternary_op)
4830 vec_oprnds1 = VEC_alloc (tree, heap, 1);
4831 }
4832
4833 phis = VEC_alloc (gimple, heap, vec_num);
4834 vect_defs = VEC_alloc (tree, heap, vec_num);
4835 if (!slp_node)
4836 VEC_quick_push (tree, vect_defs, NULL_TREE);
4837
4838 for (j = 0; j < ncopies; j++)
4839 {
4840 if (j == 0 || !single_defuse_cycle)
4841 {
4842 for (i = 0; i < vec_num; i++)
4843 {
4844 /* Create the reduction-phi that defines the reduction
4845 operand. */
4846 new_phi = create_phi_node (vec_dest, loop->header);
4847 set_vinfo_for_stmt (new_phi,
4848 new_stmt_vec_info (new_phi, loop_vinfo,
4849 NULL));
4850 if (j == 0 || slp_node)
4851 VEC_quick_push (gimple, phis, new_phi);
4852 }
4853 }
4854
4855 if (code == COND_EXPR)
4856 {
4857 gcc_assert (!slp_node);
4858 vectorizable_condition (stmt, gsi, vec_stmt,
4859 PHI_RESULT (VEC_index (gimple, phis, 0)),
4860 reduc_index, NULL);
4861 /* Multiple types are not supported for condition. */
4862 break;
4863 }
4864
4865 /* Handle uses. */
4866 if (j == 0)
4867 {
4868 op0 = ops[!reduc_index];
4869 if (op_type == ternary_op)
4870 {
4871 if (reduc_index == 0)
4872 op1 = ops[2];
4873 else
4874 op1 = ops[1];
4875 }
4876
4877 if (slp_node)
4878 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
4879 slp_node, -1);
4880 else
4881 {
4882 loop_vec_def0 = vect_get_vec_def_for_operand (ops[!reduc_index],
4883 stmt, NULL);
4884 VEC_quick_push (tree, vec_oprnds0, loop_vec_def0);
4885 if (op_type == ternary_op)
4886 {
4887 loop_vec_def1 = vect_get_vec_def_for_operand (op1, stmt,
4888 NULL);
4889 VEC_quick_push (tree, vec_oprnds1, loop_vec_def1);
4890 }
4891 }
4892 }
4893 else
4894 {
4895 if (!slp_node)
4896 {
4897 enum vect_def_type dt;
4898 gimple dummy_stmt;
4899 tree dummy;
4900
4901 vect_is_simple_use (ops[!reduc_index], stmt, loop_vinfo, NULL,
4902 &dummy_stmt, &dummy, &dt);
4903 loop_vec_def0 = vect_get_vec_def_for_stmt_copy (dt,
4904 loop_vec_def0);
4905 VEC_replace (tree, vec_oprnds0, 0, loop_vec_def0);
4906 if (op_type == ternary_op)
4907 {
4908 vect_is_simple_use (op1, stmt, loop_vinfo, NULL, &dummy_stmt,
4909 &dummy, &dt);
4910 loop_vec_def1 = vect_get_vec_def_for_stmt_copy (dt,
4911 loop_vec_def1);
4912 VEC_replace (tree, vec_oprnds1, 0, loop_vec_def1);
4913 }
4914 }
4915
4916 if (single_defuse_cycle)
4917 reduc_def = gimple_assign_lhs (new_stmt);
4918
4919 STMT_VINFO_RELATED_STMT (prev_phi_info) = new_phi;
4920 }
4921
4922 FOR_EACH_VEC_ELT (tree, vec_oprnds0, i, def0)
4923 {
4924 if (slp_node)
4925 reduc_def = PHI_RESULT (VEC_index (gimple, phis, i));
4926 else
4927 {
4928 if (!single_defuse_cycle || j == 0)
4929 reduc_def = PHI_RESULT (new_phi);
4930 }
4931
4932 def1 = ((op_type == ternary_op)
4933 ? VEC_index (tree, vec_oprnds1, i) : NULL);
4934 if (op_type == binary_op)
4935 {
4936 if (reduc_index == 0)
4937 expr = build2 (code, vectype_out, reduc_def, def0);
4938 else
4939 expr = build2 (code, vectype_out, def0, reduc_def);
4940 }
4941 else
4942 {
4943 if (reduc_index == 0)
4944 expr = build3 (code, vectype_out, reduc_def, def0, def1);
4945 else
4946 {
4947 if (reduc_index == 1)
4948 expr = build3 (code, vectype_out, def0, reduc_def, def1);
4949 else
4950 expr = build3 (code, vectype_out, def0, def1, reduc_def);
4951 }
4952 }
4953
4954 new_stmt = gimple_build_assign (vec_dest, expr);
4955 new_temp = make_ssa_name (vec_dest, new_stmt);
4956 gimple_assign_set_lhs (new_stmt, new_temp);
4957 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4958
4959 if (slp_node)
4960 {
4961 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
4962 VEC_quick_push (tree, vect_defs, new_temp);
4963 }
4964 else
4965 VEC_replace (tree, vect_defs, 0, new_temp);
4966 }
4967
4968 if (slp_node)
4969 continue;
4970
4971 if (j == 0)
4972 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4973 else
4974 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4975
4976 prev_stmt_info = vinfo_for_stmt (new_stmt);
4977 prev_phi_info = vinfo_for_stmt (new_phi);
4978 }
4979
4980 /* Finalize the reduction-phi (set its arguments) and create the
4981 epilog reduction code. */
4982 if ((!single_defuse_cycle || code == COND_EXPR) && !slp_node)
4983 {
4984 new_temp = gimple_assign_lhs (*vec_stmt);
4985 VEC_replace (tree, vect_defs, 0, new_temp);
4986 }
4987
4988 vect_create_epilog_for_reduction (vect_defs, stmt, epilog_copies,
4989 epilog_reduc_code, phis, reduc_index,
4990 double_reduc, slp_node);
4991
4992 VEC_free (gimple, heap, phis);
4993 VEC_free (tree, heap, vec_oprnds0);
4994 if (vec_oprnds1)
4995 VEC_free (tree, heap, vec_oprnds1);
4996
4997 return true;
4998 }
4999
5000 /* Function vect_min_worthwhile_factor.
5001
5002 For a loop where we could vectorize the operation indicated by CODE,
5003 return the minimum vectorization factor that makes it worthwhile
5004 to use generic vectors. */
5005 int
5006 vect_min_worthwhile_factor (enum tree_code code)
5007 {
5008 switch (code)
5009 {
5010 case PLUS_EXPR:
5011 case MINUS_EXPR:
5012 case NEGATE_EXPR:
5013 return 4;
5014
5015 case BIT_AND_EXPR:
5016 case BIT_IOR_EXPR:
5017 case BIT_XOR_EXPR:
5018 case BIT_NOT_EXPR:
5019 return 2;
5020
5021 default:
5022 return INT_MAX;
5023 }
5024 }
5025
5026
5027 /* Function vectorizable_induction
5028
5029 Check if PHI performs an induction computation that can be vectorized.
5030 If VEC_STMT is also passed, vectorize the induction PHI: create a vectorized
5031 phi to replace it, put it in VEC_STMT, and add it to the same basic block.
5032 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5033
5034 bool
5035 vectorizable_induction (gimple phi, gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED,
5036 gimple *vec_stmt)
5037 {
5038 stmt_vec_info stmt_info = vinfo_for_stmt (phi);
5039 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
5040 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5041 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
5042 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
5043 int ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
5044 tree vec_def;
5045
5046 gcc_assert (ncopies >= 1);
5047 /* FORNOW. These restrictions should be relaxed. */
5048 if (nested_in_vect_loop_p (loop, phi))
5049 {
5050 imm_use_iterator imm_iter;
5051 use_operand_p use_p;
5052 gimple exit_phi;
5053 edge latch_e;
5054 tree loop_arg;
5055
5056 if (ncopies > 1)
5057 {
5058 if (vect_print_dump_info (REPORT_DETAILS))
5059 fprintf (vect_dump, "multiple types in nested loop.");
5060 return false;
5061 }
5062
5063 exit_phi = NULL;
5064 latch_e = loop_latch_edge (loop->inner);
5065 loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e);
5066 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, loop_arg)
5067 {
5068 if (!flow_bb_inside_loop_p (loop->inner,
5069 gimple_bb (USE_STMT (use_p))))
5070 {
5071 exit_phi = USE_STMT (use_p);
5072 break;
5073 }
5074 }
5075 if (exit_phi)
5076 {
5077 stmt_vec_info exit_phi_vinfo = vinfo_for_stmt (exit_phi);
5078 if (!(STMT_VINFO_RELEVANT_P (exit_phi_vinfo)
5079 && !STMT_VINFO_LIVE_P (exit_phi_vinfo)))
5080 {
5081 if (vect_print_dump_info (REPORT_DETAILS))
5082 fprintf (vect_dump, "inner-loop induction only used outside "
5083 "of the outer vectorized loop.");
5084 return false;
5085 }
5086 }
5087 }
5088
5089 if (!STMT_VINFO_RELEVANT_P (stmt_info))
5090 return false;
5091
5092 /* FORNOW: SLP not supported. */
5093 if (STMT_SLP_TYPE (stmt_info))
5094 return false;
5095
5096 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def);
5097
5098 if (gimple_code (phi) != GIMPLE_PHI)
5099 return false;
5100
5101 if (!vec_stmt) /* transformation not required. */
5102 {
5103 STMT_VINFO_TYPE (stmt_info) = induc_vec_info_type;
5104 if (vect_print_dump_info (REPORT_DETAILS))
5105 fprintf (vect_dump, "=== vectorizable_induction ===");
5106 vect_model_induction_cost (stmt_info, ncopies);
5107 return true;
5108 }
5109
5110 /** Transform. **/
5111
5112 if (vect_print_dump_info (REPORT_DETAILS))
5113 fprintf (vect_dump, "transform induction phi.");
5114
5115 vec_def = get_initial_def_for_induction (phi);
5116 *vec_stmt = SSA_NAME_DEF_STMT (vec_def);
5117 return true;
5118 }
5119
5120 /* Function vectorizable_live_operation.
5121
5122 STMT computes a value that is used outside the loop. Check if
5123 it can be supported. */
5124
5125 bool
5126 vectorizable_live_operation (gimple stmt,
5127 gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED,
5128 gimple *vec_stmt ATTRIBUTE_UNUSED)
5129 {
5130 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5131 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5132 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
5133 int i;
5134 int op_type;
5135 tree op;
5136 tree def;
5137 gimple def_stmt;
5138 enum vect_def_type dt;
5139 enum tree_code code;
5140 enum gimple_rhs_class rhs_class;
5141
5142 gcc_assert (STMT_VINFO_LIVE_P (stmt_info));
5143
5144 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def)
5145 return false;
5146
5147 if (!is_gimple_assign (stmt))
5148 return false;
5149
5150 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
5151 return false;
5152
5153 /* FORNOW. CHECKME. */
5154 if (nested_in_vect_loop_p (loop, stmt))
5155 return false;
5156
5157 code = gimple_assign_rhs_code (stmt);
5158 op_type = TREE_CODE_LENGTH (code);
5159 rhs_class = get_gimple_rhs_class (code);
5160 gcc_assert (rhs_class != GIMPLE_UNARY_RHS || op_type == unary_op);
5161 gcc_assert (rhs_class != GIMPLE_BINARY_RHS || op_type == binary_op);
5162
5163 /* FORNOW: support only if all uses are invariant. This means
5164 that the scalar operations can remain in place, unvectorized.
5165 The original last scalar value that they compute will be used. */
5166
5167 for (i = 0; i < op_type; i++)
5168 {
5169 if (rhs_class == GIMPLE_SINGLE_RHS)
5170 op = TREE_OPERAND (gimple_op (stmt, 1), i);
5171 else
5172 op = gimple_op (stmt, i + 1);
5173 if (op
5174 && !vect_is_simple_use (op, stmt, loop_vinfo, NULL, &def_stmt, &def,
5175 &dt))
5176 {
5177 if (vect_print_dump_info (REPORT_DETAILS))
5178 fprintf (vect_dump, "use not simple.");
5179 return false;
5180 }
5181
5182 if (dt != vect_external_def && dt != vect_constant_def)
5183 return false;
5184 }
5185
5186 /* No transformation is required for the cases we currently support. */
5187 return true;
5188 }
5189
5190 /* Kill any debug uses outside LOOP of SSA names defined in STMT. */
5191
5192 static void
5193 vect_loop_kill_debug_uses (struct loop *loop, gimple stmt)
5194 {
5195 ssa_op_iter op_iter;
5196 imm_use_iterator imm_iter;
5197 def_operand_p def_p;
5198 gimple ustmt;
5199
5200 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF)
5201 {
5202 FOR_EACH_IMM_USE_STMT (ustmt, imm_iter, DEF_FROM_PTR (def_p))
5203 {
5204 basic_block bb;
5205
5206 if (!is_gimple_debug (ustmt))
5207 continue;
5208
5209 bb = gimple_bb (ustmt);
5210
5211 if (!flow_bb_inside_loop_p (loop, bb))
5212 {
5213 if (gimple_debug_bind_p (ustmt))
5214 {
5215 if (vect_print_dump_info (REPORT_DETAILS))
5216 fprintf (vect_dump, "killing debug use");
5217
5218 gimple_debug_bind_reset_value (ustmt);
5219 update_stmt (ustmt);
5220 }
5221 else
5222 gcc_unreachable ();
5223 }
5224 }
5225 }
5226 }
5227
5228 /* Function vect_transform_loop.
5229
5230 The analysis phase has determined that the loop is vectorizable.
5231 Vectorize the loop - created vectorized stmts to replace the scalar
5232 stmts in the loop, and update the loop exit condition. */
5233
5234 void
5235 vect_transform_loop (loop_vec_info loop_vinfo)
5236 {
5237 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
5238 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
5239 int nbbs = loop->num_nodes;
5240 gimple_stmt_iterator si;
5241 int i;
5242 tree ratio = NULL;
5243 int vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
5244 bool grouped_store;
5245 bool slp_scheduled = false;
5246 unsigned int nunits;
5247 gimple stmt, pattern_stmt;
5248 gimple_seq pattern_def_seq = NULL;
5249 gimple_stmt_iterator pattern_def_si = gsi_none ();
5250 bool transform_pattern_stmt = false;
5251 bool check_profitability;
5252 int th;
5253
5254 if (vect_print_dump_info (REPORT_DETAILS))
5255 fprintf (vect_dump, "=== vec_transform_loop ===");
5256
5257 /* Use the more conservative vectorization threshold. If the number
5258 of iterations is constant assume the cost check has been performed
5259 by our caller. If the threshold makes all loops profitable that
5260 run at least the vectorization factor number of times checking
5261 is pointless, too. */
5262 th = ((PARAM_VALUE (PARAM_MIN_VECT_LOOP_BOUND)
5263 * LOOP_VINFO_VECT_FACTOR (loop_vinfo)) - 1);
5264 th = MAX (th, LOOP_VINFO_COST_MODEL_MIN_ITERS (loop_vinfo));
5265 if (th >= LOOP_VINFO_VECT_FACTOR (loop_vinfo) - 1
5266 && !LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
5267 {
5268 if (vect_print_dump_info (REPORT_COST))
5269 fprintf (vect_dump,
5270 "Profitability threshold is %d loop iterations.", th);
5271 check_profitability = true;
5272 }
5273
5274 /* Peel the loop if there are data refs with unknown alignment.
5275 Only one data ref with unknown store is allowed. */
5276
5277 if (LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo))
5278 {
5279 vect_do_peeling_for_alignment (loop_vinfo, th, check_profitability);
5280 check_profitability = false;
5281 }
5282
5283 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
5284 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
5285 {
5286 vect_loop_versioning (loop_vinfo, th, check_profitability);
5287 check_profitability = false;
5288 }
5289
5290 /* If the loop has a symbolic number of iterations 'n' (i.e. it's not a
5291 compile time constant), or it is a constant that doesn't divide by the
5292 vectorization factor, then an epilog loop needs to be created.
5293 We therefore duplicate the loop: the original loop will be vectorized,
5294 and will compute the first (n/VF) iterations. The second copy of the loop
5295 will remain scalar and will compute the remaining (n%VF) iterations.
5296 (VF is the vectorization factor). */
5297
5298 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
5299 || (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
5300 && LOOP_VINFO_INT_NITERS (loop_vinfo) % vectorization_factor != 0)
5301 || LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo))
5302 vect_do_peeling_for_loop_bound (loop_vinfo, &ratio,
5303 th, check_profitability);
5304 else
5305 ratio = build_int_cst (TREE_TYPE (LOOP_VINFO_NITERS (loop_vinfo)),
5306 LOOP_VINFO_INT_NITERS (loop_vinfo) / vectorization_factor);
5307
5308 /* 1) Make sure the loop header has exactly two entries
5309 2) Make sure we have a preheader basic block. */
5310
5311 gcc_assert (EDGE_COUNT (loop->header->preds) == 2);
5312
5313 split_edge (loop_preheader_edge (loop));
5314
5315 /* FORNOW: the vectorizer supports only loops which body consist
5316 of one basic block (header + empty latch). When the vectorizer will
5317 support more involved loop forms, the order by which the BBs are
5318 traversed need to be reconsidered. */
5319
5320 for (i = 0; i < nbbs; i++)
5321 {
5322 basic_block bb = bbs[i];
5323 stmt_vec_info stmt_info;
5324 gimple phi;
5325
5326 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
5327 {
5328 phi = gsi_stmt (si);
5329 if (vect_print_dump_info (REPORT_DETAILS))
5330 {
5331 fprintf (vect_dump, "------>vectorizing phi: ");
5332 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
5333 }
5334 stmt_info = vinfo_for_stmt (phi);
5335 if (!stmt_info)
5336 continue;
5337
5338 if (MAY_HAVE_DEBUG_STMTS && !STMT_VINFO_LIVE_P (stmt_info))
5339 vect_loop_kill_debug_uses (loop, phi);
5340
5341 if (!STMT_VINFO_RELEVANT_P (stmt_info)
5342 && !STMT_VINFO_LIVE_P (stmt_info))
5343 continue;
5344
5345 if ((TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info))
5346 != (unsigned HOST_WIDE_INT) vectorization_factor)
5347 && vect_print_dump_info (REPORT_DETAILS))
5348 fprintf (vect_dump, "multiple-types.");
5349
5350 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def)
5351 {
5352 if (vect_print_dump_info (REPORT_DETAILS))
5353 fprintf (vect_dump, "transform phi.");
5354 vect_transform_stmt (phi, NULL, NULL, NULL, NULL);
5355 }
5356 }
5357
5358 pattern_stmt = NULL;
5359 for (si = gsi_start_bb (bb); !gsi_end_p (si) || transform_pattern_stmt;)
5360 {
5361 bool is_store;
5362
5363 if (transform_pattern_stmt)
5364 stmt = pattern_stmt;
5365 else
5366 stmt = gsi_stmt (si);
5367
5368 if (vect_print_dump_info (REPORT_DETAILS))
5369 {
5370 fprintf (vect_dump, "------>vectorizing statement: ");
5371 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
5372 }
5373
5374 stmt_info = vinfo_for_stmt (stmt);
5375
5376 /* vector stmts created in the outer-loop during vectorization of
5377 stmts in an inner-loop may not have a stmt_info, and do not
5378 need to be vectorized. */
5379 if (!stmt_info)
5380 {
5381 gsi_next (&si);
5382 continue;
5383 }
5384
5385 if (MAY_HAVE_DEBUG_STMTS && !STMT_VINFO_LIVE_P (stmt_info))
5386 vect_loop_kill_debug_uses (loop, stmt);
5387
5388 if (!STMT_VINFO_RELEVANT_P (stmt_info)
5389 && !STMT_VINFO_LIVE_P (stmt_info))
5390 {
5391 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
5392 && (pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info))
5393 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
5394 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
5395 {
5396 stmt = pattern_stmt;
5397 stmt_info = vinfo_for_stmt (stmt);
5398 }
5399 else
5400 {
5401 gsi_next (&si);
5402 continue;
5403 }
5404 }
5405 else if (STMT_VINFO_IN_PATTERN_P (stmt_info)
5406 && (pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info))
5407 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
5408 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
5409 transform_pattern_stmt = true;
5410
5411 /* If pattern statement has def stmts, vectorize them too. */
5412 if (is_pattern_stmt_p (stmt_info))
5413 {
5414 if (pattern_def_seq == NULL)
5415 {
5416 pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info);
5417 pattern_def_si = gsi_start (pattern_def_seq);
5418 }
5419 else if (!gsi_end_p (pattern_def_si))
5420 gsi_next (&pattern_def_si);
5421 if (pattern_def_seq != NULL)
5422 {
5423 gimple pattern_def_stmt = NULL;
5424 stmt_vec_info pattern_def_stmt_info = NULL;
5425
5426 while (!gsi_end_p (pattern_def_si))
5427 {
5428 pattern_def_stmt = gsi_stmt (pattern_def_si);
5429 pattern_def_stmt_info
5430 = vinfo_for_stmt (pattern_def_stmt);
5431 if (STMT_VINFO_RELEVANT_P (pattern_def_stmt_info)
5432 || STMT_VINFO_LIVE_P (pattern_def_stmt_info))
5433 break;
5434 gsi_next (&pattern_def_si);
5435 }
5436
5437 if (!gsi_end_p (pattern_def_si))
5438 {
5439 if (vect_print_dump_info (REPORT_DETAILS))
5440 {
5441 fprintf (vect_dump, "==> vectorizing pattern def"
5442 " stmt: ");
5443 print_gimple_stmt (vect_dump, pattern_def_stmt, 0,
5444 TDF_SLIM);
5445 }
5446
5447 stmt = pattern_def_stmt;
5448 stmt_info = pattern_def_stmt_info;
5449 }
5450 else
5451 {
5452 pattern_def_si = gsi_none ();
5453 transform_pattern_stmt = false;
5454 }
5455 }
5456 else
5457 transform_pattern_stmt = false;
5458 }
5459
5460 gcc_assert (STMT_VINFO_VECTYPE (stmt_info));
5461 nunits = (unsigned int) TYPE_VECTOR_SUBPARTS (
5462 STMT_VINFO_VECTYPE (stmt_info));
5463 if (!STMT_SLP_TYPE (stmt_info)
5464 && nunits != (unsigned int) vectorization_factor
5465 && vect_print_dump_info (REPORT_DETAILS))
5466 /* For SLP VF is set according to unrolling factor, and not to
5467 vector size, hence for SLP this print is not valid. */
5468 fprintf (vect_dump, "multiple-types.");
5469
5470 /* SLP. Schedule all the SLP instances when the first SLP stmt is
5471 reached. */
5472 if (STMT_SLP_TYPE (stmt_info))
5473 {
5474 if (!slp_scheduled)
5475 {
5476 slp_scheduled = true;
5477
5478 if (vect_print_dump_info (REPORT_DETAILS))
5479 fprintf (vect_dump, "=== scheduling SLP instances ===");
5480
5481 vect_schedule_slp (loop_vinfo, NULL);
5482 }
5483
5484 /* Hybrid SLP stmts must be vectorized in addition to SLP. */
5485 if (!vinfo_for_stmt (stmt) || PURE_SLP_STMT (stmt_info))
5486 {
5487 if (!transform_pattern_stmt && gsi_end_p (pattern_def_si))
5488 {
5489 pattern_def_seq = NULL;
5490 gsi_next (&si);
5491 }
5492 continue;
5493 }
5494 }
5495
5496 /* -------- vectorize statement ------------ */
5497 if (vect_print_dump_info (REPORT_DETAILS))
5498 fprintf (vect_dump, "transform statement.");
5499
5500 grouped_store = false;
5501 is_store = vect_transform_stmt (stmt, &si, &grouped_store, NULL, NULL);
5502 if (is_store)
5503 {
5504 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
5505 {
5506 /* Interleaving. If IS_STORE is TRUE, the vectorization of the
5507 interleaving chain was completed - free all the stores in
5508 the chain. */
5509 gsi_next (&si);
5510 vect_remove_stores (GROUP_FIRST_ELEMENT (stmt_info));
5511 continue;
5512 }
5513 else
5514 {
5515 /* Free the attached stmt_vec_info and remove the stmt. */
5516 gimple store = gsi_stmt (si);
5517 free_stmt_vec_info (store);
5518 unlink_stmt_vdef (store);
5519 gsi_remove (&si, true);
5520 release_defs (store);
5521 continue;
5522 }
5523 }
5524
5525 if (!transform_pattern_stmt && gsi_end_p (pattern_def_si))
5526 {
5527 pattern_def_seq = NULL;
5528 gsi_next (&si);
5529 }
5530 } /* stmts in BB */
5531 } /* BBs in loop */
5532
5533 slpeel_make_loop_iterate_ntimes (loop, ratio);
5534
5535 /* The memory tags and pointers in vectorized statements need to
5536 have their SSA forms updated. FIXME, why can't this be delayed
5537 until all the loops have been transformed? */
5538 update_ssa (TODO_update_ssa);
5539
5540 if (vect_print_dump_info (REPORT_VECTORIZED_LOCATIONS))
5541 fprintf (vect_dump, "LOOP VECTORIZED.");
5542 if (loop->inner && vect_print_dump_info (REPORT_VECTORIZED_LOCATIONS))
5543 fprintf (vect_dump, "OUTER LOOP VECTORIZED.");
5544 }