bind_c_array_params_2.f90: Add "-mno-explicit-relocs" for alpha*-*-* targets.
[gcc.git] / gcc / tree-vect-loop.c
1 /* Loop Vectorization
2 Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012
3 Free Software Foundation, Inc.
4 Contributed by Dorit Naishlos <dorit@il.ibm.com> and
5 Ira Rosen <irar@il.ibm.com>
6
7 This file is part of GCC.
8
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
13
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "ggc.h"
28 #include "tree.h"
29 #include "basic-block.h"
30 #include "gimple-pretty-print.h"
31 #include "tree-flow.h"
32 #include "tree-pass.h"
33 #include "cfgloop.h"
34 #include "expr.h"
35 #include "recog.h"
36 #include "optabs.h"
37 #include "params.h"
38 #include "diagnostic-core.h"
39 #include "tree-chrec.h"
40 #include "tree-scalar-evolution.h"
41 #include "tree-vectorizer.h"
42 #include "target.h"
43
44 /* Loop Vectorization Pass.
45
46 This pass tries to vectorize loops.
47
48 For example, the vectorizer transforms the following simple loop:
49
50 short a[N]; short b[N]; short c[N]; int i;
51
52 for (i=0; i<N; i++){
53 a[i] = b[i] + c[i];
54 }
55
56 as if it was manually vectorized by rewriting the source code into:
57
58 typedef int __attribute__((mode(V8HI))) v8hi;
59 short a[N]; short b[N]; short c[N]; int i;
60 v8hi *pa = (v8hi*)a, *pb = (v8hi*)b, *pc = (v8hi*)c;
61 v8hi va, vb, vc;
62
63 for (i=0; i<N/8; i++){
64 vb = pb[i];
65 vc = pc[i];
66 va = vb + vc;
67 pa[i] = va;
68 }
69
70 The main entry to this pass is vectorize_loops(), in which
71 the vectorizer applies a set of analyses on a given set of loops,
72 followed by the actual vectorization transformation for the loops that
73 had successfully passed the analysis phase.
74 Throughout this pass we make a distinction between two types of
75 data: scalars (which are represented by SSA_NAMES), and memory references
76 ("data-refs"). These two types of data require different handling both
77 during analysis and transformation. The types of data-refs that the
78 vectorizer currently supports are ARRAY_REFS which base is an array DECL
79 (not a pointer), and INDIRECT_REFS through pointers; both array and pointer
80 accesses are required to have a simple (consecutive) access pattern.
81
82 Analysis phase:
83 ===============
84 The driver for the analysis phase is vect_analyze_loop().
85 It applies a set of analyses, some of which rely on the scalar evolution
86 analyzer (scev) developed by Sebastian Pop.
87
88 During the analysis phase the vectorizer records some information
89 per stmt in a "stmt_vec_info" struct which is attached to each stmt in the
90 loop, as well as general information about the loop as a whole, which is
91 recorded in a "loop_vec_info" struct attached to each loop.
92
93 Transformation phase:
94 =====================
95 The loop transformation phase scans all the stmts in the loop, and
96 creates a vector stmt (or a sequence of stmts) for each scalar stmt S in
97 the loop that needs to be vectorized. It inserts the vector code sequence
98 just before the scalar stmt S, and records a pointer to the vector code
99 in STMT_VINFO_VEC_STMT (stmt_info) (stmt_info is the stmt_vec_info struct
100 attached to S). This pointer will be used for the vectorization of following
101 stmts which use the def of stmt S. Stmt S is removed if it writes to memory;
102 otherwise, we rely on dead code elimination for removing it.
103
104 For example, say stmt S1 was vectorized into stmt VS1:
105
106 VS1: vb = px[i];
107 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
108 S2: a = b;
109
110 To vectorize stmt S2, the vectorizer first finds the stmt that defines
111 the operand 'b' (S1), and gets the relevant vector def 'vb' from the
112 vector stmt VS1 pointed to by STMT_VINFO_VEC_STMT (stmt_info (S1)). The
113 resulting sequence would be:
114
115 VS1: vb = px[i];
116 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
117 VS2: va = vb;
118 S2: a = b; STMT_VINFO_VEC_STMT (stmt_info (S2)) = VS2
119
120 Operands that are not SSA_NAMEs, are data-refs that appear in
121 load/store operations (like 'x[i]' in S1), and are handled differently.
122
123 Target modeling:
124 =================
125 Currently the only target specific information that is used is the
126 size of the vector (in bytes) - "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD".
127 Targets that can support different sizes of vectors, for now will need
128 to specify one value for "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD". More
129 flexibility will be added in the future.
130
131 Since we only vectorize operations which vector form can be
132 expressed using existing tree codes, to verify that an operation is
133 supported, the vectorizer checks the relevant optab at the relevant
134 machine_mode (e.g, optab_handler (add_optab, V8HImode)). If
135 the value found is CODE_FOR_nothing, then there's no target support, and
136 we can't vectorize the stmt.
137
138 For additional information on this project see:
139 http://gcc.gnu.org/projects/tree-ssa/vectorization.html
140 */
141
142 /* Function vect_determine_vectorization_factor
143
144 Determine the vectorization factor (VF). VF is the number of data elements
145 that are operated upon in parallel in a single iteration of the vectorized
146 loop. For example, when vectorizing a loop that operates on 4byte elements,
147 on a target with vector size (VS) 16byte, the VF is set to 4, since 4
148 elements can fit in a single vector register.
149
150 We currently support vectorization of loops in which all types operated upon
151 are of the same size. Therefore this function currently sets VF according to
152 the size of the types operated upon, and fails if there are multiple sizes
153 in the loop.
154
155 VF is also the factor by which the loop iterations are strip-mined, e.g.:
156 original loop:
157 for (i=0; i<N; i++){
158 a[i] = b[i] + c[i];
159 }
160
161 vectorized loop:
162 for (i=0; i<N; i+=VF){
163 a[i:VF] = b[i:VF] + c[i:VF];
164 }
165 */
166
167 static bool
168 vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
169 {
170 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
171 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
172 int nbbs = loop->num_nodes;
173 gimple_stmt_iterator si;
174 unsigned int vectorization_factor = 0;
175 tree scalar_type;
176 gimple phi;
177 tree vectype;
178 unsigned int nunits;
179 stmt_vec_info stmt_info;
180 int i;
181 HOST_WIDE_INT dummy;
182 gimple stmt, pattern_stmt = NULL;
183 gimple_seq pattern_def_seq = NULL;
184 gimple_stmt_iterator pattern_def_si = gsi_none ();
185 bool analyze_pattern_stmt = false;
186
187 if (vect_print_dump_info (REPORT_DETAILS))
188 fprintf (vect_dump, "=== vect_determine_vectorization_factor ===");
189
190 for (i = 0; i < nbbs; i++)
191 {
192 basic_block bb = bbs[i];
193
194 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
195 {
196 phi = gsi_stmt (si);
197 stmt_info = vinfo_for_stmt (phi);
198 if (vect_print_dump_info (REPORT_DETAILS))
199 {
200 fprintf (vect_dump, "==> examining phi: ");
201 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
202 }
203
204 gcc_assert (stmt_info);
205
206 if (STMT_VINFO_RELEVANT_P (stmt_info))
207 {
208 gcc_assert (!STMT_VINFO_VECTYPE (stmt_info));
209 scalar_type = TREE_TYPE (PHI_RESULT (phi));
210
211 if (vect_print_dump_info (REPORT_DETAILS))
212 {
213 fprintf (vect_dump, "get vectype for scalar type: ");
214 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
215 }
216
217 vectype = get_vectype_for_scalar_type (scalar_type);
218 if (!vectype)
219 {
220 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
221 {
222 fprintf (vect_dump,
223 "not vectorized: unsupported data-type ");
224 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
225 }
226 return false;
227 }
228 STMT_VINFO_VECTYPE (stmt_info) = vectype;
229
230 if (vect_print_dump_info (REPORT_DETAILS))
231 {
232 fprintf (vect_dump, "vectype: ");
233 print_generic_expr (vect_dump, vectype, TDF_SLIM);
234 }
235
236 nunits = TYPE_VECTOR_SUBPARTS (vectype);
237 if (vect_print_dump_info (REPORT_DETAILS))
238 fprintf (vect_dump, "nunits = %d", nunits);
239
240 if (!vectorization_factor
241 || (nunits > vectorization_factor))
242 vectorization_factor = nunits;
243 }
244 }
245
246 for (si = gsi_start_bb (bb); !gsi_end_p (si) || analyze_pattern_stmt;)
247 {
248 tree vf_vectype;
249
250 if (analyze_pattern_stmt)
251 stmt = pattern_stmt;
252 else
253 stmt = gsi_stmt (si);
254
255 stmt_info = vinfo_for_stmt (stmt);
256
257 if (vect_print_dump_info (REPORT_DETAILS))
258 {
259 fprintf (vect_dump, "==> examining statement: ");
260 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
261 }
262
263 gcc_assert (stmt_info);
264
265 /* Skip stmts which do not need to be vectorized. */
266 if (!STMT_VINFO_RELEVANT_P (stmt_info)
267 && !STMT_VINFO_LIVE_P (stmt_info))
268 {
269 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
270 && (pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info))
271 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
272 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
273 {
274 stmt = pattern_stmt;
275 stmt_info = vinfo_for_stmt (pattern_stmt);
276 if (vect_print_dump_info (REPORT_DETAILS))
277 {
278 fprintf (vect_dump, "==> examining pattern statement: ");
279 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
280 }
281 }
282 else
283 {
284 if (vect_print_dump_info (REPORT_DETAILS))
285 fprintf (vect_dump, "skip.");
286 gsi_next (&si);
287 continue;
288 }
289 }
290 else if (STMT_VINFO_IN_PATTERN_P (stmt_info)
291 && (pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info))
292 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
293 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
294 analyze_pattern_stmt = true;
295
296 /* If a pattern statement has def stmts, analyze them too. */
297 if (is_pattern_stmt_p (stmt_info))
298 {
299 if (pattern_def_seq == NULL)
300 {
301 pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info);
302 pattern_def_si = gsi_start (pattern_def_seq);
303 }
304 else if (!gsi_end_p (pattern_def_si))
305 gsi_next (&pattern_def_si);
306 if (pattern_def_seq != NULL)
307 {
308 gimple pattern_def_stmt = NULL;
309 stmt_vec_info pattern_def_stmt_info = NULL;
310
311 while (!gsi_end_p (pattern_def_si))
312 {
313 pattern_def_stmt = gsi_stmt (pattern_def_si);
314 pattern_def_stmt_info
315 = vinfo_for_stmt (pattern_def_stmt);
316 if (STMT_VINFO_RELEVANT_P (pattern_def_stmt_info)
317 || STMT_VINFO_LIVE_P (pattern_def_stmt_info))
318 break;
319 gsi_next (&pattern_def_si);
320 }
321
322 if (!gsi_end_p (pattern_def_si))
323 {
324 if (vect_print_dump_info (REPORT_DETAILS))
325 {
326 fprintf (vect_dump,
327 "==> examining pattern def stmt: ");
328 print_gimple_stmt (vect_dump, pattern_def_stmt, 0,
329 TDF_SLIM);
330 }
331
332 stmt = pattern_def_stmt;
333 stmt_info = pattern_def_stmt_info;
334 }
335 else
336 {
337 pattern_def_si = gsi_none ();
338 analyze_pattern_stmt = false;
339 }
340 }
341 else
342 analyze_pattern_stmt = false;
343 }
344
345 if (gimple_get_lhs (stmt) == NULL_TREE)
346 {
347 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
348 {
349 fprintf (vect_dump, "not vectorized: irregular stmt.");
350 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
351 }
352 return false;
353 }
354
355 if (VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))))
356 {
357 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
358 {
359 fprintf (vect_dump, "not vectorized: vector stmt in loop:");
360 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
361 }
362 return false;
363 }
364
365 if (STMT_VINFO_VECTYPE (stmt_info))
366 {
367 /* The only case when a vectype had been already set is for stmts
368 that contain a dataref, or for "pattern-stmts" (stmts
369 generated by the vectorizer to represent/replace a certain
370 idiom). */
371 gcc_assert (STMT_VINFO_DATA_REF (stmt_info)
372 || is_pattern_stmt_p (stmt_info)
373 || !gsi_end_p (pattern_def_si));
374 vectype = STMT_VINFO_VECTYPE (stmt_info);
375 }
376 else
377 {
378 gcc_assert (!STMT_VINFO_DATA_REF (stmt_info));
379 scalar_type = TREE_TYPE (gimple_get_lhs (stmt));
380 if (vect_print_dump_info (REPORT_DETAILS))
381 {
382 fprintf (vect_dump, "get vectype for scalar type: ");
383 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
384 }
385 vectype = get_vectype_for_scalar_type (scalar_type);
386 if (!vectype)
387 {
388 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
389 {
390 fprintf (vect_dump,
391 "not vectorized: unsupported data-type ");
392 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
393 }
394 return false;
395 }
396
397 STMT_VINFO_VECTYPE (stmt_info) = vectype;
398 }
399
400 /* The vectorization factor is according to the smallest
401 scalar type (or the largest vector size, but we only
402 support one vector size per loop). */
403 scalar_type = vect_get_smallest_scalar_type (stmt, &dummy,
404 &dummy);
405 if (vect_print_dump_info (REPORT_DETAILS))
406 {
407 fprintf (vect_dump, "get vectype for scalar type: ");
408 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
409 }
410 vf_vectype = get_vectype_for_scalar_type (scalar_type);
411 if (!vf_vectype)
412 {
413 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
414 {
415 fprintf (vect_dump,
416 "not vectorized: unsupported data-type ");
417 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
418 }
419 return false;
420 }
421
422 if ((GET_MODE_SIZE (TYPE_MODE (vectype))
423 != GET_MODE_SIZE (TYPE_MODE (vf_vectype))))
424 {
425 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
426 {
427 fprintf (vect_dump,
428 "not vectorized: different sized vector "
429 "types in statement, ");
430 print_generic_expr (vect_dump, vectype, TDF_SLIM);
431 fprintf (vect_dump, " and ");
432 print_generic_expr (vect_dump, vf_vectype, TDF_SLIM);
433 }
434 return false;
435 }
436
437 if (vect_print_dump_info (REPORT_DETAILS))
438 {
439 fprintf (vect_dump, "vectype: ");
440 print_generic_expr (vect_dump, vf_vectype, TDF_SLIM);
441 }
442
443 nunits = TYPE_VECTOR_SUBPARTS (vf_vectype);
444 if (vect_print_dump_info (REPORT_DETAILS))
445 fprintf (vect_dump, "nunits = %d", nunits);
446
447 if (!vectorization_factor
448 || (nunits > vectorization_factor))
449 vectorization_factor = nunits;
450
451 if (!analyze_pattern_stmt && gsi_end_p (pattern_def_si))
452 {
453 pattern_def_seq = NULL;
454 gsi_next (&si);
455 }
456 }
457 }
458
459 /* TODO: Analyze cost. Decide if worth while to vectorize. */
460 if (vect_print_dump_info (REPORT_DETAILS))
461 fprintf (vect_dump, "vectorization factor = %d", vectorization_factor);
462 if (vectorization_factor <= 1)
463 {
464 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
465 fprintf (vect_dump, "not vectorized: unsupported data-type");
466 return false;
467 }
468 LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor;
469
470 return true;
471 }
472
473
474 /* Function vect_is_simple_iv_evolution.
475
476 FORNOW: A simple evolution of an induction variables in the loop is
477 considered a polynomial evolution with constant step. */
478
479 static bool
480 vect_is_simple_iv_evolution (unsigned loop_nb, tree access_fn, tree * init,
481 tree * step)
482 {
483 tree init_expr;
484 tree step_expr;
485 tree evolution_part = evolution_part_in_loop_num (access_fn, loop_nb);
486
487 /* When there is no evolution in this loop, the evolution function
488 is not "simple". */
489 if (evolution_part == NULL_TREE)
490 return false;
491
492 /* When the evolution is a polynomial of degree >= 2
493 the evolution function is not "simple". */
494 if (tree_is_chrec (evolution_part))
495 return false;
496
497 step_expr = evolution_part;
498 init_expr = unshare_expr (initial_condition_in_loop_num (access_fn, loop_nb));
499
500 if (vect_print_dump_info (REPORT_DETAILS))
501 {
502 fprintf (vect_dump, "step: ");
503 print_generic_expr (vect_dump, step_expr, TDF_SLIM);
504 fprintf (vect_dump, ", init: ");
505 print_generic_expr (vect_dump, init_expr, TDF_SLIM);
506 }
507
508 *init = init_expr;
509 *step = step_expr;
510
511 if (TREE_CODE (step_expr) != INTEGER_CST)
512 {
513 if (vect_print_dump_info (REPORT_DETAILS))
514 fprintf (vect_dump, "step unknown.");
515 return false;
516 }
517
518 return true;
519 }
520
521 /* Function vect_analyze_scalar_cycles_1.
522
523 Examine the cross iteration def-use cycles of scalar variables
524 in LOOP. LOOP_VINFO represents the loop that is now being
525 considered for vectorization (can be LOOP, or an outer-loop
526 enclosing LOOP). */
527
528 static void
529 vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop)
530 {
531 basic_block bb = loop->header;
532 tree dumy;
533 VEC(gimple,heap) *worklist = VEC_alloc (gimple, heap, 64);
534 gimple_stmt_iterator gsi;
535 bool double_reduc;
536
537 if (vect_print_dump_info (REPORT_DETAILS))
538 fprintf (vect_dump, "=== vect_analyze_scalar_cycles ===");
539
540 /* First - identify all inductions. Reduction detection assumes that all the
541 inductions have been identified, therefore, this order must not be
542 changed. */
543 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
544 {
545 gimple phi = gsi_stmt (gsi);
546 tree access_fn = NULL;
547 tree def = PHI_RESULT (phi);
548 stmt_vec_info stmt_vinfo = vinfo_for_stmt (phi);
549
550 if (vect_print_dump_info (REPORT_DETAILS))
551 {
552 fprintf (vect_dump, "Analyze phi: ");
553 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
554 }
555
556 /* Skip virtual phi's. The data dependences that are associated with
557 virtual defs/uses (i.e., memory accesses) are analyzed elsewhere. */
558 if (!is_gimple_reg (SSA_NAME_VAR (def)))
559 continue;
560
561 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_unknown_def_type;
562
563 /* Analyze the evolution function. */
564 access_fn = analyze_scalar_evolution (loop, def);
565 if (access_fn)
566 {
567 STRIP_NOPS (access_fn);
568 if (vect_print_dump_info (REPORT_DETAILS))
569 {
570 fprintf (vect_dump, "Access function of PHI: ");
571 print_generic_expr (vect_dump, access_fn, TDF_SLIM);
572 }
573 STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo)
574 = evolution_part_in_loop_num (access_fn, loop->num);
575 }
576
577 if (!access_fn
578 || !vect_is_simple_iv_evolution (loop->num, access_fn, &dumy, &dumy))
579 {
580 VEC_safe_push (gimple, heap, worklist, phi);
581 continue;
582 }
583
584 gcc_assert (STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo) != NULL_TREE);
585
586 if (vect_print_dump_info (REPORT_DETAILS))
587 fprintf (vect_dump, "Detected induction.");
588 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_induction_def;
589 }
590
591
592 /* Second - identify all reductions and nested cycles. */
593 while (VEC_length (gimple, worklist) > 0)
594 {
595 gimple phi = VEC_pop (gimple, worklist);
596 tree def = PHI_RESULT (phi);
597 stmt_vec_info stmt_vinfo = vinfo_for_stmt (phi);
598 gimple reduc_stmt;
599 bool nested_cycle;
600
601 if (vect_print_dump_info (REPORT_DETAILS))
602 {
603 fprintf (vect_dump, "Analyze phi: ");
604 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
605 }
606
607 gcc_assert (is_gimple_reg (SSA_NAME_VAR (def)));
608 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_unknown_def_type);
609
610 nested_cycle = (loop != LOOP_VINFO_LOOP (loop_vinfo));
611 reduc_stmt = vect_force_simple_reduction (loop_vinfo, phi, !nested_cycle,
612 &double_reduc);
613 if (reduc_stmt)
614 {
615 if (double_reduc)
616 {
617 if (vect_print_dump_info (REPORT_DETAILS))
618 fprintf (vect_dump, "Detected double reduction.");
619
620 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_double_reduction_def;
621 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) =
622 vect_double_reduction_def;
623 }
624 else
625 {
626 if (nested_cycle)
627 {
628 if (vect_print_dump_info (REPORT_DETAILS))
629 fprintf (vect_dump, "Detected vectorizable nested cycle.");
630
631 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_nested_cycle;
632 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) =
633 vect_nested_cycle;
634 }
635 else
636 {
637 if (vect_print_dump_info (REPORT_DETAILS))
638 fprintf (vect_dump, "Detected reduction.");
639
640 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_reduction_def;
641 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) =
642 vect_reduction_def;
643 /* Store the reduction cycles for possible vectorization in
644 loop-aware SLP. */
645 VEC_safe_push (gimple, heap,
646 LOOP_VINFO_REDUCTIONS (loop_vinfo),
647 reduc_stmt);
648 }
649 }
650 }
651 else
652 if (vect_print_dump_info (REPORT_DETAILS))
653 fprintf (vect_dump, "Unknown def-use cycle pattern.");
654 }
655
656 VEC_free (gimple, heap, worklist);
657 }
658
659
660 /* Function vect_analyze_scalar_cycles.
661
662 Examine the cross iteration def-use cycles of scalar variables, by
663 analyzing the loop-header PHIs of scalar variables. Classify each
664 cycle as one of the following: invariant, induction, reduction, unknown.
665 We do that for the loop represented by LOOP_VINFO, and also to its
666 inner-loop, if exists.
667 Examples for scalar cycles:
668
669 Example1: reduction:
670
671 loop1:
672 for (i=0; i<N; i++)
673 sum += a[i];
674
675 Example2: induction:
676
677 loop2:
678 for (i=0; i<N; i++)
679 a[i] = i; */
680
681 static void
682 vect_analyze_scalar_cycles (loop_vec_info loop_vinfo)
683 {
684 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
685
686 vect_analyze_scalar_cycles_1 (loop_vinfo, loop);
687
688 /* When vectorizing an outer-loop, the inner-loop is executed sequentially.
689 Reductions in such inner-loop therefore have different properties than
690 the reductions in the nest that gets vectorized:
691 1. When vectorized, they are executed in the same order as in the original
692 scalar loop, so we can't change the order of computation when
693 vectorizing them.
694 2. FIXME: Inner-loop reductions can be used in the inner-loop, so the
695 current checks are too strict. */
696
697 if (loop->inner)
698 vect_analyze_scalar_cycles_1 (loop_vinfo, loop->inner);
699 }
700
701 /* Function vect_get_loop_niters.
702
703 Determine how many iterations the loop is executed.
704 If an expression that represents the number of iterations
705 can be constructed, place it in NUMBER_OF_ITERATIONS.
706 Return the loop exit condition. */
707
708 static gimple
709 vect_get_loop_niters (struct loop *loop, tree *number_of_iterations)
710 {
711 tree niters;
712
713 if (vect_print_dump_info (REPORT_DETAILS))
714 fprintf (vect_dump, "=== get_loop_niters ===");
715
716 niters = number_of_exit_cond_executions (loop);
717
718 if (niters != NULL_TREE
719 && niters != chrec_dont_know)
720 {
721 *number_of_iterations = niters;
722
723 if (vect_print_dump_info (REPORT_DETAILS))
724 {
725 fprintf (vect_dump, "==> get_loop_niters:" );
726 print_generic_expr (vect_dump, *number_of_iterations, TDF_SLIM);
727 }
728 }
729
730 return get_loop_exit_condition (loop);
731 }
732
733
734 /* Function bb_in_loop_p
735
736 Used as predicate for dfs order traversal of the loop bbs. */
737
738 static bool
739 bb_in_loop_p (const_basic_block bb, const void *data)
740 {
741 const struct loop *const loop = (const struct loop *)data;
742 if (flow_bb_inside_loop_p (loop, bb))
743 return true;
744 return false;
745 }
746
747
748 /* Function new_loop_vec_info.
749
750 Create and initialize a new loop_vec_info struct for LOOP, as well as
751 stmt_vec_info structs for all the stmts in LOOP. */
752
753 static loop_vec_info
754 new_loop_vec_info (struct loop *loop)
755 {
756 loop_vec_info res;
757 basic_block *bbs;
758 gimple_stmt_iterator si;
759 unsigned int i, nbbs;
760
761 res = (loop_vec_info) xcalloc (1, sizeof (struct _loop_vec_info));
762 LOOP_VINFO_LOOP (res) = loop;
763
764 bbs = get_loop_body (loop);
765
766 /* Create/Update stmt_info for all stmts in the loop. */
767 for (i = 0; i < loop->num_nodes; i++)
768 {
769 basic_block bb = bbs[i];
770
771 /* BBs in a nested inner-loop will have been already processed (because
772 we will have called vect_analyze_loop_form for any nested inner-loop).
773 Therefore, for stmts in an inner-loop we just want to update the
774 STMT_VINFO_LOOP_VINFO field of their stmt_info to point to the new
775 loop_info of the outer-loop we are currently considering to vectorize
776 (instead of the loop_info of the inner-loop).
777 For stmts in other BBs we need to create a stmt_info from scratch. */
778 if (bb->loop_father != loop)
779 {
780 /* Inner-loop bb. */
781 gcc_assert (loop->inner && bb->loop_father == loop->inner);
782 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
783 {
784 gimple phi = gsi_stmt (si);
785 stmt_vec_info stmt_info = vinfo_for_stmt (phi);
786 loop_vec_info inner_loop_vinfo =
787 STMT_VINFO_LOOP_VINFO (stmt_info);
788 gcc_assert (loop->inner == LOOP_VINFO_LOOP (inner_loop_vinfo));
789 STMT_VINFO_LOOP_VINFO (stmt_info) = res;
790 }
791 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
792 {
793 gimple stmt = gsi_stmt (si);
794 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
795 loop_vec_info inner_loop_vinfo =
796 STMT_VINFO_LOOP_VINFO (stmt_info);
797 gcc_assert (loop->inner == LOOP_VINFO_LOOP (inner_loop_vinfo));
798 STMT_VINFO_LOOP_VINFO (stmt_info) = res;
799 }
800 }
801 else
802 {
803 /* bb in current nest. */
804 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
805 {
806 gimple phi = gsi_stmt (si);
807 gimple_set_uid (phi, 0);
808 set_vinfo_for_stmt (phi, new_stmt_vec_info (phi, res, NULL));
809 }
810
811 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
812 {
813 gimple stmt = gsi_stmt (si);
814 gimple_set_uid (stmt, 0);
815 set_vinfo_for_stmt (stmt, new_stmt_vec_info (stmt, res, NULL));
816 }
817 }
818 }
819
820 /* CHECKME: We want to visit all BBs before their successors (except for
821 latch blocks, for which this assertion wouldn't hold). In the simple
822 case of the loop forms we allow, a dfs order of the BBs would the same
823 as reversed postorder traversal, so we are safe. */
824
825 free (bbs);
826 bbs = XCNEWVEC (basic_block, loop->num_nodes);
827 nbbs = dfs_enumerate_from (loop->header, 0, bb_in_loop_p,
828 bbs, loop->num_nodes, loop);
829 gcc_assert (nbbs == loop->num_nodes);
830
831 LOOP_VINFO_BBS (res) = bbs;
832 LOOP_VINFO_NITERS (res) = NULL;
833 LOOP_VINFO_NITERS_UNCHANGED (res) = NULL;
834 LOOP_VINFO_COST_MODEL_MIN_ITERS (res) = 0;
835 LOOP_VINFO_VECTORIZABLE_P (res) = 0;
836 LOOP_PEELING_FOR_ALIGNMENT (res) = 0;
837 LOOP_VINFO_VECT_FACTOR (res) = 0;
838 LOOP_VINFO_LOOP_NEST (res) = VEC_alloc (loop_p, heap, 3);
839 LOOP_VINFO_DATAREFS (res) = VEC_alloc (data_reference_p, heap, 10);
840 LOOP_VINFO_DDRS (res) = VEC_alloc (ddr_p, heap, 10 * 10);
841 LOOP_VINFO_UNALIGNED_DR (res) = NULL;
842 LOOP_VINFO_MAY_MISALIGN_STMTS (res) =
843 VEC_alloc (gimple, heap,
844 PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIGNMENT_CHECKS));
845 LOOP_VINFO_MAY_ALIAS_DDRS (res) =
846 VEC_alloc (ddr_p, heap,
847 PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS));
848 LOOP_VINFO_GROUPED_STORES (res) = VEC_alloc (gimple, heap, 10);
849 LOOP_VINFO_REDUCTIONS (res) = VEC_alloc (gimple, heap, 10);
850 LOOP_VINFO_REDUCTION_CHAINS (res) = VEC_alloc (gimple, heap, 10);
851 LOOP_VINFO_SLP_INSTANCES (res) = VEC_alloc (slp_instance, heap, 10);
852 LOOP_VINFO_SLP_UNROLLING_FACTOR (res) = 1;
853 LOOP_VINFO_PEELING_HTAB (res) = NULL;
854 LOOP_VINFO_TARGET_COST_DATA (res) = init_cost (loop);
855 LOOP_VINFO_PEELING_FOR_GAPS (res) = false;
856
857 return res;
858 }
859
860
861 /* Function destroy_loop_vec_info.
862
863 Free LOOP_VINFO struct, as well as all the stmt_vec_info structs of all the
864 stmts in the loop. */
865
866 void
867 destroy_loop_vec_info (loop_vec_info loop_vinfo, bool clean_stmts)
868 {
869 struct loop *loop;
870 basic_block *bbs;
871 int nbbs;
872 gimple_stmt_iterator si;
873 int j;
874 VEC (slp_instance, heap) *slp_instances;
875 slp_instance instance;
876
877 if (!loop_vinfo)
878 return;
879
880 loop = LOOP_VINFO_LOOP (loop_vinfo);
881
882 bbs = LOOP_VINFO_BBS (loop_vinfo);
883 nbbs = loop->num_nodes;
884
885 if (!clean_stmts)
886 {
887 free (LOOP_VINFO_BBS (loop_vinfo));
888 free_data_refs (LOOP_VINFO_DATAREFS (loop_vinfo));
889 free_dependence_relations (LOOP_VINFO_DDRS (loop_vinfo));
890 VEC_free (loop_p, heap, LOOP_VINFO_LOOP_NEST (loop_vinfo));
891 VEC_free (gimple, heap, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo));
892 VEC_free (ddr_p, heap, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo));
893
894 free (loop_vinfo);
895 loop->aux = NULL;
896 return;
897 }
898
899 for (j = 0; j < nbbs; j++)
900 {
901 basic_block bb = bbs[j];
902 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
903 free_stmt_vec_info (gsi_stmt (si));
904
905 for (si = gsi_start_bb (bb); !gsi_end_p (si); )
906 {
907 gimple stmt = gsi_stmt (si);
908 /* Free stmt_vec_info. */
909 free_stmt_vec_info (stmt);
910 gsi_next (&si);
911 }
912 }
913
914 free (LOOP_VINFO_BBS (loop_vinfo));
915 free_data_refs (LOOP_VINFO_DATAREFS (loop_vinfo));
916 free_dependence_relations (LOOP_VINFO_DDRS (loop_vinfo));
917 VEC_free (loop_p, heap, LOOP_VINFO_LOOP_NEST (loop_vinfo));
918 VEC_free (gimple, heap, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo));
919 VEC_free (ddr_p, heap, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo));
920 slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
921 FOR_EACH_VEC_ELT (slp_instance, slp_instances, j, instance)
922 vect_free_slp_instance (instance);
923
924 VEC_free (slp_instance, heap, LOOP_VINFO_SLP_INSTANCES (loop_vinfo));
925 VEC_free (gimple, heap, LOOP_VINFO_GROUPED_STORES (loop_vinfo));
926 VEC_free (gimple, heap, LOOP_VINFO_REDUCTIONS (loop_vinfo));
927 VEC_free (gimple, heap, LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo));
928
929 if (LOOP_VINFO_PEELING_HTAB (loop_vinfo))
930 htab_delete (LOOP_VINFO_PEELING_HTAB (loop_vinfo));
931
932 destroy_cost_data (LOOP_VINFO_TARGET_COST_DATA (loop_vinfo));
933
934 free (loop_vinfo);
935 loop->aux = NULL;
936 }
937
938
939 /* Function vect_analyze_loop_1.
940
941 Apply a set of analyses on LOOP, and create a loop_vec_info struct
942 for it. The different analyses will record information in the
943 loop_vec_info struct. This is a subset of the analyses applied in
944 vect_analyze_loop, to be applied on an inner-loop nested in the loop
945 that is now considered for (outer-loop) vectorization. */
946
947 static loop_vec_info
948 vect_analyze_loop_1 (struct loop *loop)
949 {
950 loop_vec_info loop_vinfo;
951
952 if (vect_print_dump_info (REPORT_DETAILS))
953 fprintf (vect_dump, "===== analyze_loop_nest_1 =====");
954
955 /* Check the CFG characteristics of the loop (nesting, entry/exit, etc. */
956
957 loop_vinfo = vect_analyze_loop_form (loop);
958 if (!loop_vinfo)
959 {
960 if (vect_print_dump_info (REPORT_DETAILS))
961 fprintf (vect_dump, "bad inner-loop form.");
962 return NULL;
963 }
964
965 return loop_vinfo;
966 }
967
968
969 /* Function vect_analyze_loop_form.
970
971 Verify that certain CFG restrictions hold, including:
972 - the loop has a pre-header
973 - the loop has a single entry and exit
974 - the loop exit condition is simple enough, and the number of iterations
975 can be analyzed (a countable loop). */
976
977 loop_vec_info
978 vect_analyze_loop_form (struct loop *loop)
979 {
980 loop_vec_info loop_vinfo;
981 gimple loop_cond;
982 tree number_of_iterations = NULL;
983 loop_vec_info inner_loop_vinfo = NULL;
984
985 if (vect_print_dump_info (REPORT_DETAILS))
986 fprintf (vect_dump, "=== vect_analyze_loop_form ===");
987
988 /* Different restrictions apply when we are considering an inner-most loop,
989 vs. an outer (nested) loop.
990 (FORNOW. May want to relax some of these restrictions in the future). */
991
992 if (!loop->inner)
993 {
994 /* Inner-most loop. We currently require that the number of BBs is
995 exactly 2 (the header and latch). Vectorizable inner-most loops
996 look like this:
997
998 (pre-header)
999 |
1000 header <--------+
1001 | | |
1002 | +--> latch --+
1003 |
1004 (exit-bb) */
1005
1006 if (loop->num_nodes != 2)
1007 {
1008 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1009 fprintf (vect_dump, "not vectorized: control flow in loop.");
1010 return NULL;
1011 }
1012
1013 if (empty_block_p (loop->header))
1014 {
1015 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1016 fprintf (vect_dump, "not vectorized: empty loop.");
1017 return NULL;
1018 }
1019 }
1020 else
1021 {
1022 struct loop *innerloop = loop->inner;
1023 edge entryedge;
1024
1025 /* Nested loop. We currently require that the loop is doubly-nested,
1026 contains a single inner loop, and the number of BBs is exactly 5.
1027 Vectorizable outer-loops look like this:
1028
1029 (pre-header)
1030 |
1031 header <---+
1032 | |
1033 inner-loop |
1034 | |
1035 tail ------+
1036 |
1037 (exit-bb)
1038
1039 The inner-loop has the properties expected of inner-most loops
1040 as described above. */
1041
1042 if ((loop->inner)->inner || (loop->inner)->next)
1043 {
1044 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1045 fprintf (vect_dump, "not vectorized: multiple nested loops.");
1046 return NULL;
1047 }
1048
1049 /* Analyze the inner-loop. */
1050 inner_loop_vinfo = vect_analyze_loop_1 (loop->inner);
1051 if (!inner_loop_vinfo)
1052 {
1053 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1054 fprintf (vect_dump, "not vectorized: Bad inner loop.");
1055 return NULL;
1056 }
1057
1058 if (!expr_invariant_in_loop_p (loop,
1059 LOOP_VINFO_NITERS (inner_loop_vinfo)))
1060 {
1061 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1062 fprintf (vect_dump,
1063 "not vectorized: inner-loop count not invariant.");
1064 destroy_loop_vec_info (inner_loop_vinfo, true);
1065 return NULL;
1066 }
1067
1068 if (loop->num_nodes != 5)
1069 {
1070 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1071 fprintf (vect_dump, "not vectorized: control flow in loop.");
1072 destroy_loop_vec_info (inner_loop_vinfo, true);
1073 return NULL;
1074 }
1075
1076 gcc_assert (EDGE_COUNT (innerloop->header->preds) == 2);
1077 entryedge = EDGE_PRED (innerloop->header, 0);
1078 if (EDGE_PRED (innerloop->header, 0)->src == innerloop->latch)
1079 entryedge = EDGE_PRED (innerloop->header, 1);
1080
1081 if (entryedge->src != loop->header
1082 || !single_exit (innerloop)
1083 || single_exit (innerloop)->dest != EDGE_PRED (loop->latch, 0)->src)
1084 {
1085 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1086 fprintf (vect_dump, "not vectorized: unsupported outerloop form.");
1087 destroy_loop_vec_info (inner_loop_vinfo, true);
1088 return NULL;
1089 }
1090
1091 if (vect_print_dump_info (REPORT_DETAILS))
1092 fprintf (vect_dump, "Considering outer-loop vectorization.");
1093 }
1094
1095 if (!single_exit (loop)
1096 || EDGE_COUNT (loop->header->preds) != 2)
1097 {
1098 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1099 {
1100 if (!single_exit (loop))
1101 fprintf (vect_dump, "not vectorized: multiple exits.");
1102 else if (EDGE_COUNT (loop->header->preds) != 2)
1103 fprintf (vect_dump, "not vectorized: too many incoming edges.");
1104 }
1105 if (inner_loop_vinfo)
1106 destroy_loop_vec_info (inner_loop_vinfo, true);
1107 return NULL;
1108 }
1109
1110 /* We assume that the loop exit condition is at the end of the loop. i.e,
1111 that the loop is represented as a do-while (with a proper if-guard
1112 before the loop if needed), where the loop header contains all the
1113 executable statements, and the latch is empty. */
1114 if (!empty_block_p (loop->latch)
1115 || !gimple_seq_empty_p (phi_nodes (loop->latch)))
1116 {
1117 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1118 fprintf (vect_dump, "not vectorized: unexpected loop form.");
1119 if (inner_loop_vinfo)
1120 destroy_loop_vec_info (inner_loop_vinfo, true);
1121 return NULL;
1122 }
1123
1124 /* Make sure there exists a single-predecessor exit bb: */
1125 if (!single_pred_p (single_exit (loop)->dest))
1126 {
1127 edge e = single_exit (loop);
1128 if (!(e->flags & EDGE_ABNORMAL))
1129 {
1130 split_loop_exit_edge (e);
1131 if (vect_print_dump_info (REPORT_DETAILS))
1132 fprintf (vect_dump, "split exit edge.");
1133 }
1134 else
1135 {
1136 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1137 fprintf (vect_dump, "not vectorized: abnormal loop exit edge.");
1138 if (inner_loop_vinfo)
1139 destroy_loop_vec_info (inner_loop_vinfo, true);
1140 return NULL;
1141 }
1142 }
1143
1144 loop_cond = vect_get_loop_niters (loop, &number_of_iterations);
1145 if (!loop_cond)
1146 {
1147 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1148 fprintf (vect_dump, "not vectorized: complicated exit condition.");
1149 if (inner_loop_vinfo)
1150 destroy_loop_vec_info (inner_loop_vinfo, true);
1151 return NULL;
1152 }
1153
1154 if (!number_of_iterations)
1155 {
1156 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1157 fprintf (vect_dump,
1158 "not vectorized: number of iterations cannot be computed.");
1159 if (inner_loop_vinfo)
1160 destroy_loop_vec_info (inner_loop_vinfo, true);
1161 return NULL;
1162 }
1163
1164 if (chrec_contains_undetermined (number_of_iterations))
1165 {
1166 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1167 fprintf (vect_dump, "Infinite number of iterations.");
1168 if (inner_loop_vinfo)
1169 destroy_loop_vec_info (inner_loop_vinfo, true);
1170 return NULL;
1171 }
1172
1173 if (!NITERS_KNOWN_P (number_of_iterations))
1174 {
1175 if (vect_print_dump_info (REPORT_DETAILS))
1176 {
1177 fprintf (vect_dump, "Symbolic number of iterations is ");
1178 print_generic_expr (vect_dump, number_of_iterations, TDF_DETAILS);
1179 }
1180 }
1181 else if (TREE_INT_CST_LOW (number_of_iterations) == 0)
1182 {
1183 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1184 fprintf (vect_dump, "not vectorized: number of iterations = 0.");
1185 if (inner_loop_vinfo)
1186 destroy_loop_vec_info (inner_loop_vinfo, false);
1187 return NULL;
1188 }
1189
1190 loop_vinfo = new_loop_vec_info (loop);
1191 LOOP_VINFO_NITERS (loop_vinfo) = number_of_iterations;
1192 LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo) = number_of_iterations;
1193
1194 STMT_VINFO_TYPE (vinfo_for_stmt (loop_cond)) = loop_exit_ctrl_vec_info_type;
1195
1196 /* CHECKME: May want to keep it around it in the future. */
1197 if (inner_loop_vinfo)
1198 destroy_loop_vec_info (inner_loop_vinfo, false);
1199
1200 gcc_assert (!loop->aux);
1201 loop->aux = loop_vinfo;
1202 return loop_vinfo;
1203 }
1204
1205
1206 /* Function vect_analyze_loop_operations.
1207
1208 Scan the loop stmts and make sure they are all vectorizable. */
1209
1210 static bool
1211 vect_analyze_loop_operations (loop_vec_info loop_vinfo, bool slp)
1212 {
1213 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1214 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
1215 int nbbs = loop->num_nodes;
1216 gimple_stmt_iterator si;
1217 unsigned int vectorization_factor = 0;
1218 int i;
1219 gimple phi;
1220 stmt_vec_info stmt_info;
1221 bool need_to_vectorize = false;
1222 int min_profitable_iters;
1223 int min_scalar_loop_bound;
1224 unsigned int th;
1225 bool only_slp_in_loop = true, ok;
1226 HOST_WIDE_INT max_niter;
1227
1228 if (vect_print_dump_info (REPORT_DETAILS))
1229 fprintf (vect_dump, "=== vect_analyze_loop_operations ===");
1230
1231 gcc_assert (LOOP_VINFO_VECT_FACTOR (loop_vinfo));
1232 vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1233 if (slp)
1234 {
1235 /* If all the stmts in the loop can be SLPed, we perform only SLP, and
1236 vectorization factor of the loop is the unrolling factor required by
1237 the SLP instances. If that unrolling factor is 1, we say, that we
1238 perform pure SLP on loop - cross iteration parallelism is not
1239 exploited. */
1240 for (i = 0; i < nbbs; i++)
1241 {
1242 basic_block bb = bbs[i];
1243 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
1244 {
1245 gimple stmt = gsi_stmt (si);
1246 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1247 gcc_assert (stmt_info);
1248 if ((STMT_VINFO_RELEVANT_P (stmt_info)
1249 || VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info)))
1250 && !PURE_SLP_STMT (stmt_info))
1251 /* STMT needs both SLP and loop-based vectorization. */
1252 only_slp_in_loop = false;
1253 }
1254 }
1255
1256 if (only_slp_in_loop)
1257 vectorization_factor = LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo);
1258 else
1259 vectorization_factor = least_common_multiple (vectorization_factor,
1260 LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo));
1261
1262 LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor;
1263 if (vect_print_dump_info (REPORT_DETAILS))
1264 fprintf (vect_dump, "Updating vectorization factor to %d ",
1265 vectorization_factor);
1266 }
1267
1268 for (i = 0; i < nbbs; i++)
1269 {
1270 basic_block bb = bbs[i];
1271
1272 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
1273 {
1274 phi = gsi_stmt (si);
1275 ok = true;
1276
1277 stmt_info = vinfo_for_stmt (phi);
1278 if (vect_print_dump_info (REPORT_DETAILS))
1279 {
1280 fprintf (vect_dump, "examining phi: ");
1281 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
1282 }
1283
1284 /* Inner-loop loop-closed exit phi in outer-loop vectorization
1285 (i.e., a phi in the tail of the outer-loop). */
1286 if (! is_loop_header_bb_p (bb))
1287 {
1288 /* FORNOW: we currently don't support the case that these phis
1289 are not used in the outerloop (unless it is double reduction,
1290 i.e., this phi is vect_reduction_def), cause this case
1291 requires to actually do something here. */
1292 if ((!STMT_VINFO_RELEVANT_P (stmt_info)
1293 || STMT_VINFO_LIVE_P (stmt_info))
1294 && STMT_VINFO_DEF_TYPE (stmt_info)
1295 != vect_double_reduction_def)
1296 {
1297 if (vect_print_dump_info (REPORT_DETAILS))
1298 fprintf (vect_dump,
1299 "Unsupported loop-closed phi in outer-loop.");
1300 return false;
1301 }
1302
1303 /* If PHI is used in the outer loop, we check that its operand
1304 is defined in the inner loop. */
1305 if (STMT_VINFO_RELEVANT_P (stmt_info))
1306 {
1307 tree phi_op;
1308 gimple op_def_stmt;
1309
1310 if (gimple_phi_num_args (phi) != 1)
1311 return false;
1312
1313 phi_op = PHI_ARG_DEF (phi, 0);
1314 if (TREE_CODE (phi_op) != SSA_NAME)
1315 return false;
1316
1317 op_def_stmt = SSA_NAME_DEF_STMT (phi_op);
1318 if (!op_def_stmt
1319 || !flow_bb_inside_loop_p (loop, gimple_bb (op_def_stmt))
1320 || !vinfo_for_stmt (op_def_stmt))
1321 return false;
1322
1323 if (STMT_VINFO_RELEVANT (vinfo_for_stmt (op_def_stmt))
1324 != vect_used_in_outer
1325 && STMT_VINFO_RELEVANT (vinfo_for_stmt (op_def_stmt))
1326 != vect_used_in_outer_by_reduction)
1327 return false;
1328 }
1329
1330 continue;
1331 }
1332
1333 gcc_assert (stmt_info);
1334
1335 if (STMT_VINFO_LIVE_P (stmt_info))
1336 {
1337 /* FORNOW: not yet supported. */
1338 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1339 fprintf (vect_dump, "not vectorized: value used after loop.");
1340 return false;
1341 }
1342
1343 if (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_scope
1344 && STMT_VINFO_DEF_TYPE (stmt_info) != vect_induction_def)
1345 {
1346 /* A scalar-dependence cycle that we don't support. */
1347 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1348 fprintf (vect_dump, "not vectorized: scalar dependence cycle.");
1349 return false;
1350 }
1351
1352 if (STMT_VINFO_RELEVANT_P (stmt_info))
1353 {
1354 need_to_vectorize = true;
1355 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def)
1356 ok = vectorizable_induction (phi, NULL, NULL);
1357 }
1358
1359 if (!ok)
1360 {
1361 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1362 {
1363 fprintf (vect_dump,
1364 "not vectorized: relevant phi not supported: ");
1365 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
1366 }
1367 return false;
1368 }
1369 }
1370
1371 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
1372 {
1373 gimple stmt = gsi_stmt (si);
1374 if (!vect_analyze_stmt (stmt, &need_to_vectorize, NULL))
1375 return false;
1376 }
1377 } /* bbs */
1378
1379 /* All operations in the loop are either irrelevant (deal with loop
1380 control, or dead), or only used outside the loop and can be moved
1381 out of the loop (e.g. invariants, inductions). The loop can be
1382 optimized away by scalar optimizations. We're better off not
1383 touching this loop. */
1384 if (!need_to_vectorize)
1385 {
1386 if (vect_print_dump_info (REPORT_DETAILS))
1387 fprintf (vect_dump,
1388 "All the computation can be taken out of the loop.");
1389 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1390 fprintf (vect_dump,
1391 "not vectorized: redundant loop. no profit to vectorize.");
1392 return false;
1393 }
1394
1395 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1396 && vect_print_dump_info (REPORT_DETAILS))
1397 fprintf (vect_dump,
1398 "vectorization_factor = %d, niters = " HOST_WIDE_INT_PRINT_DEC,
1399 vectorization_factor, LOOP_VINFO_INT_NITERS (loop_vinfo));
1400
1401 if ((LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1402 && (LOOP_VINFO_INT_NITERS (loop_vinfo) < vectorization_factor))
1403 || ((max_niter = max_stmt_executions_int (loop)) != -1
1404 && (unsigned HOST_WIDE_INT) max_niter < vectorization_factor))
1405 {
1406 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1407 fprintf (vect_dump, "not vectorized: iteration count too small.");
1408 if (vect_print_dump_info (REPORT_DETAILS))
1409 fprintf (vect_dump,"not vectorized: iteration count smaller than "
1410 "vectorization factor.");
1411 return false;
1412 }
1413
1414 /* Analyze cost. Decide if worth while to vectorize. */
1415
1416 /* Once VF is set, SLP costs should be updated since the number of created
1417 vector stmts depends on VF. */
1418 vect_update_slp_costs_according_to_vf (loop_vinfo);
1419
1420 min_profitable_iters = vect_estimate_min_profitable_iters (loop_vinfo);
1421 LOOP_VINFO_COST_MODEL_MIN_ITERS (loop_vinfo) = min_profitable_iters;
1422
1423 if (min_profitable_iters < 0)
1424 {
1425 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1426 fprintf (vect_dump, "not vectorized: vectorization not profitable.");
1427 if (vect_print_dump_info (REPORT_DETAILS))
1428 fprintf (vect_dump, "not vectorized: vector version will never be "
1429 "profitable.");
1430 return false;
1431 }
1432
1433 min_scalar_loop_bound = ((PARAM_VALUE (PARAM_MIN_VECT_LOOP_BOUND)
1434 * vectorization_factor) - 1);
1435
1436 /* Use the cost model only if it is more conservative than user specified
1437 threshold. */
1438
1439 th = (unsigned) min_scalar_loop_bound;
1440 if (min_profitable_iters
1441 && (!min_scalar_loop_bound
1442 || min_profitable_iters > min_scalar_loop_bound))
1443 th = (unsigned) min_profitable_iters;
1444
1445 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1446 && LOOP_VINFO_INT_NITERS (loop_vinfo) <= th)
1447 {
1448 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1449 fprintf (vect_dump, "not vectorized: vectorization not "
1450 "profitable.");
1451 if (vect_print_dump_info (REPORT_DETAILS))
1452 fprintf (vect_dump, "not vectorized: iteration count smaller than "
1453 "user specified loop bound parameter or minimum "
1454 "profitable iterations (whichever is more conservative).");
1455 return false;
1456 }
1457
1458 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1459 || LOOP_VINFO_INT_NITERS (loop_vinfo) % vectorization_factor != 0
1460 || LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo))
1461 {
1462 if (vect_print_dump_info (REPORT_DETAILS))
1463 fprintf (vect_dump, "epilog loop required.");
1464 if (!vect_can_advance_ivs_p (loop_vinfo))
1465 {
1466 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1467 fprintf (vect_dump,
1468 "not vectorized: can't create epilog loop 1.");
1469 return false;
1470 }
1471 if (!slpeel_can_duplicate_loop_p (loop, single_exit (loop)))
1472 {
1473 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1474 fprintf (vect_dump,
1475 "not vectorized: can't create epilog loop 2.");
1476 return false;
1477 }
1478 }
1479
1480 return true;
1481 }
1482
1483
1484 /* Function vect_analyze_loop_2.
1485
1486 Apply a set of analyses on LOOP, and create a loop_vec_info struct
1487 for it. The different analyses will record information in the
1488 loop_vec_info struct. */
1489 static bool
1490 vect_analyze_loop_2 (loop_vec_info loop_vinfo)
1491 {
1492 bool ok, slp = false;
1493 int max_vf = MAX_VECTORIZATION_FACTOR;
1494 int min_vf = 2;
1495
1496 /* Find all data references in the loop (which correspond to vdefs/vuses)
1497 and analyze their evolution in the loop. Also adjust the minimal
1498 vectorization factor according to the loads and stores.
1499
1500 FORNOW: Handle only simple, array references, which
1501 alignment can be forced, and aligned pointer-references. */
1502
1503 ok = vect_analyze_data_refs (loop_vinfo, NULL, &min_vf);
1504 if (!ok)
1505 {
1506 if (vect_print_dump_info (REPORT_DETAILS))
1507 fprintf (vect_dump, "bad data references.");
1508 return false;
1509 }
1510
1511 /* Classify all cross-iteration scalar data-flow cycles.
1512 Cross-iteration cycles caused by virtual phis are analyzed separately. */
1513
1514 vect_analyze_scalar_cycles (loop_vinfo);
1515
1516 vect_pattern_recog (loop_vinfo, NULL);
1517
1518 /* Data-flow analysis to detect stmts that do not need to be vectorized. */
1519
1520 ok = vect_mark_stmts_to_be_vectorized (loop_vinfo);
1521 if (!ok)
1522 {
1523 if (vect_print_dump_info (REPORT_DETAILS))
1524 fprintf (vect_dump, "unexpected pattern.");
1525 return false;
1526 }
1527
1528 /* Analyze data dependences between the data-refs in the loop
1529 and adjust the maximum vectorization factor according to
1530 the dependences.
1531 FORNOW: fail at the first data dependence that we encounter. */
1532
1533 ok = vect_analyze_data_ref_dependences (loop_vinfo, NULL, &max_vf);
1534 if (!ok
1535 || max_vf < min_vf)
1536 {
1537 if (vect_print_dump_info (REPORT_DETAILS))
1538 fprintf (vect_dump, "bad data dependence.");
1539 return false;
1540 }
1541
1542 ok = vect_determine_vectorization_factor (loop_vinfo);
1543 if (!ok)
1544 {
1545 if (vect_print_dump_info (REPORT_DETAILS))
1546 fprintf (vect_dump, "can't determine vectorization factor.");
1547 return false;
1548 }
1549 if (max_vf < LOOP_VINFO_VECT_FACTOR (loop_vinfo))
1550 {
1551 if (vect_print_dump_info (REPORT_DETAILS))
1552 fprintf (vect_dump, "bad data dependence.");
1553 return false;
1554 }
1555
1556 /* Analyze the alignment of the data-refs in the loop.
1557 Fail if a data reference is found that cannot be vectorized. */
1558
1559 ok = vect_analyze_data_refs_alignment (loop_vinfo, NULL);
1560 if (!ok)
1561 {
1562 if (vect_print_dump_info (REPORT_DETAILS))
1563 fprintf (vect_dump, "bad data alignment.");
1564 return false;
1565 }
1566
1567 /* Analyze the access patterns of the data-refs in the loop (consecutive,
1568 complex, etc.). FORNOW: Only handle consecutive access pattern. */
1569
1570 ok = vect_analyze_data_ref_accesses (loop_vinfo, NULL);
1571 if (!ok)
1572 {
1573 if (vect_print_dump_info (REPORT_DETAILS))
1574 fprintf (vect_dump, "bad data access.");
1575 return false;
1576 }
1577
1578 /* Prune the list of ddrs to be tested at run-time by versioning for alias.
1579 It is important to call pruning after vect_analyze_data_ref_accesses,
1580 since we use grouping information gathered by interleaving analysis. */
1581 ok = vect_prune_runtime_alias_test_list (loop_vinfo);
1582 if (!ok)
1583 {
1584 if (vect_print_dump_info (REPORT_DETAILS))
1585 fprintf (vect_dump, "too long list of versioning for alias "
1586 "run-time tests.");
1587 return false;
1588 }
1589
1590 /* This pass will decide on using loop versioning and/or loop peeling in
1591 order to enhance the alignment of data references in the loop. */
1592
1593 ok = vect_enhance_data_refs_alignment (loop_vinfo);
1594 if (!ok)
1595 {
1596 if (vect_print_dump_info (REPORT_DETAILS))
1597 fprintf (vect_dump, "bad data alignment.");
1598 return false;
1599 }
1600
1601 /* Check the SLP opportunities in the loop, analyze and build SLP trees. */
1602 ok = vect_analyze_slp (loop_vinfo, NULL);
1603 if (ok)
1604 {
1605 /* Decide which possible SLP instances to SLP. */
1606 slp = vect_make_slp_decision (loop_vinfo);
1607
1608 /* Find stmts that need to be both vectorized and SLPed. */
1609 vect_detect_hybrid_slp (loop_vinfo);
1610 }
1611 else
1612 return false;
1613
1614 /* Scan all the operations in the loop and make sure they are
1615 vectorizable. */
1616
1617 ok = vect_analyze_loop_operations (loop_vinfo, slp);
1618 if (!ok)
1619 {
1620 if (vect_print_dump_info (REPORT_DETAILS))
1621 fprintf (vect_dump, "bad operation or unsupported loop bound.");
1622 return false;
1623 }
1624
1625 return true;
1626 }
1627
1628 /* Function vect_analyze_loop.
1629
1630 Apply a set of analyses on LOOP, and create a loop_vec_info struct
1631 for it. The different analyses will record information in the
1632 loop_vec_info struct. */
1633 loop_vec_info
1634 vect_analyze_loop (struct loop *loop)
1635 {
1636 loop_vec_info loop_vinfo;
1637 unsigned int vector_sizes;
1638
1639 /* Autodetect first vector size we try. */
1640 current_vector_size = 0;
1641 vector_sizes = targetm.vectorize.autovectorize_vector_sizes ();
1642
1643 if (vect_print_dump_info (REPORT_DETAILS))
1644 fprintf (vect_dump, "===== analyze_loop_nest =====");
1645
1646 if (loop_outer (loop)
1647 && loop_vec_info_for_loop (loop_outer (loop))
1648 && LOOP_VINFO_VECTORIZABLE_P (loop_vec_info_for_loop (loop_outer (loop))))
1649 {
1650 if (vect_print_dump_info (REPORT_DETAILS))
1651 fprintf (vect_dump, "outer-loop already vectorized.");
1652 return NULL;
1653 }
1654
1655 while (1)
1656 {
1657 /* Check the CFG characteristics of the loop (nesting, entry/exit). */
1658 loop_vinfo = vect_analyze_loop_form (loop);
1659 if (!loop_vinfo)
1660 {
1661 if (vect_print_dump_info (REPORT_DETAILS))
1662 fprintf (vect_dump, "bad loop form.");
1663 return NULL;
1664 }
1665
1666 if (vect_analyze_loop_2 (loop_vinfo))
1667 {
1668 LOOP_VINFO_VECTORIZABLE_P (loop_vinfo) = 1;
1669
1670 return loop_vinfo;
1671 }
1672
1673 destroy_loop_vec_info (loop_vinfo, true);
1674
1675 vector_sizes &= ~current_vector_size;
1676 if (vector_sizes == 0
1677 || current_vector_size == 0)
1678 return NULL;
1679
1680 /* Try the next biggest vector size. */
1681 current_vector_size = 1 << floor_log2 (vector_sizes);
1682 if (vect_print_dump_info (REPORT_DETAILS))
1683 fprintf (vect_dump, "***** Re-trying analysis with "
1684 "vector size %d\n", current_vector_size);
1685 }
1686 }
1687
1688
1689 /* Function reduction_code_for_scalar_code
1690
1691 Input:
1692 CODE - tree_code of a reduction operations.
1693
1694 Output:
1695 REDUC_CODE - the corresponding tree-code to be used to reduce the
1696 vector of partial results into a single scalar result (which
1697 will also reside in a vector) or ERROR_MARK if the operation is
1698 a supported reduction operation, but does not have such tree-code.
1699
1700 Return FALSE if CODE currently cannot be vectorized as reduction. */
1701
1702 static bool
1703 reduction_code_for_scalar_code (enum tree_code code,
1704 enum tree_code *reduc_code)
1705 {
1706 switch (code)
1707 {
1708 case MAX_EXPR:
1709 *reduc_code = REDUC_MAX_EXPR;
1710 return true;
1711
1712 case MIN_EXPR:
1713 *reduc_code = REDUC_MIN_EXPR;
1714 return true;
1715
1716 case PLUS_EXPR:
1717 *reduc_code = REDUC_PLUS_EXPR;
1718 return true;
1719
1720 case MULT_EXPR:
1721 case MINUS_EXPR:
1722 case BIT_IOR_EXPR:
1723 case BIT_XOR_EXPR:
1724 case BIT_AND_EXPR:
1725 *reduc_code = ERROR_MARK;
1726 return true;
1727
1728 default:
1729 return false;
1730 }
1731 }
1732
1733
1734 /* Error reporting helper for vect_is_simple_reduction below. GIMPLE statement
1735 STMT is printed with a message MSG. */
1736
1737 static void
1738 report_vect_op (gimple stmt, const char *msg)
1739 {
1740 fprintf (vect_dump, "%s", msg);
1741 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
1742 }
1743
1744
1745 /* Detect SLP reduction of the form:
1746
1747 #a1 = phi <a5, a0>
1748 a2 = operation (a1)
1749 a3 = operation (a2)
1750 a4 = operation (a3)
1751 a5 = operation (a4)
1752
1753 #a = phi <a5>
1754
1755 PHI is the reduction phi node (#a1 = phi <a5, a0> above)
1756 FIRST_STMT is the first reduction stmt in the chain
1757 (a2 = operation (a1)).
1758
1759 Return TRUE if a reduction chain was detected. */
1760
1761 static bool
1762 vect_is_slp_reduction (loop_vec_info loop_info, gimple phi, gimple first_stmt)
1763 {
1764 struct loop *loop = (gimple_bb (phi))->loop_father;
1765 struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
1766 enum tree_code code;
1767 gimple current_stmt = NULL, loop_use_stmt = NULL, first, next_stmt;
1768 stmt_vec_info use_stmt_info, current_stmt_info;
1769 tree lhs;
1770 imm_use_iterator imm_iter;
1771 use_operand_p use_p;
1772 int nloop_uses, size = 0, n_out_of_loop_uses;
1773 bool found = false;
1774
1775 if (loop != vect_loop)
1776 return false;
1777
1778 lhs = PHI_RESULT (phi);
1779 code = gimple_assign_rhs_code (first_stmt);
1780 while (1)
1781 {
1782 nloop_uses = 0;
1783 n_out_of_loop_uses = 0;
1784 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs)
1785 {
1786 gimple use_stmt = USE_STMT (use_p);
1787 if (is_gimple_debug (use_stmt))
1788 continue;
1789
1790 use_stmt = USE_STMT (use_p);
1791
1792 /* Check if we got back to the reduction phi. */
1793 if (use_stmt == phi)
1794 {
1795 loop_use_stmt = use_stmt;
1796 found = true;
1797 break;
1798 }
1799
1800 if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
1801 {
1802 if (vinfo_for_stmt (use_stmt)
1803 && !STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (use_stmt)))
1804 {
1805 loop_use_stmt = use_stmt;
1806 nloop_uses++;
1807 }
1808 }
1809 else
1810 n_out_of_loop_uses++;
1811
1812 /* There are can be either a single use in the loop or two uses in
1813 phi nodes. */
1814 if (nloop_uses > 1 || (n_out_of_loop_uses && nloop_uses))
1815 return false;
1816 }
1817
1818 if (found)
1819 break;
1820
1821 /* We reached a statement with no loop uses. */
1822 if (nloop_uses == 0)
1823 return false;
1824
1825 /* This is a loop exit phi, and we haven't reached the reduction phi. */
1826 if (gimple_code (loop_use_stmt) == GIMPLE_PHI)
1827 return false;
1828
1829 if (!is_gimple_assign (loop_use_stmt)
1830 || code != gimple_assign_rhs_code (loop_use_stmt)
1831 || !flow_bb_inside_loop_p (loop, gimple_bb (loop_use_stmt)))
1832 return false;
1833
1834 /* Insert USE_STMT into reduction chain. */
1835 use_stmt_info = vinfo_for_stmt (loop_use_stmt);
1836 if (current_stmt)
1837 {
1838 current_stmt_info = vinfo_for_stmt (current_stmt);
1839 GROUP_NEXT_ELEMENT (current_stmt_info) = loop_use_stmt;
1840 GROUP_FIRST_ELEMENT (use_stmt_info)
1841 = GROUP_FIRST_ELEMENT (current_stmt_info);
1842 }
1843 else
1844 GROUP_FIRST_ELEMENT (use_stmt_info) = loop_use_stmt;
1845
1846 lhs = gimple_assign_lhs (loop_use_stmt);
1847 current_stmt = loop_use_stmt;
1848 size++;
1849 }
1850
1851 if (!found || loop_use_stmt != phi || size < 2)
1852 return false;
1853
1854 /* Swap the operands, if needed, to make the reduction operand be the second
1855 operand. */
1856 lhs = PHI_RESULT (phi);
1857 next_stmt = GROUP_FIRST_ELEMENT (vinfo_for_stmt (current_stmt));
1858 while (next_stmt)
1859 {
1860 if (gimple_assign_rhs2 (next_stmt) == lhs)
1861 {
1862 tree op = gimple_assign_rhs1 (next_stmt);
1863 gimple def_stmt = NULL;
1864
1865 if (TREE_CODE (op) == SSA_NAME)
1866 def_stmt = SSA_NAME_DEF_STMT (op);
1867
1868 /* Check that the other def is either defined in the loop
1869 ("vect_internal_def"), or it's an induction (defined by a
1870 loop-header phi-node). */
1871 if (def_stmt
1872 && gimple_bb (def_stmt)
1873 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
1874 && (is_gimple_assign (def_stmt)
1875 || is_gimple_call (def_stmt)
1876 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt))
1877 == vect_induction_def
1878 || (gimple_code (def_stmt) == GIMPLE_PHI
1879 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt))
1880 == vect_internal_def
1881 && !is_loop_header_bb_p (gimple_bb (def_stmt)))))
1882 {
1883 lhs = gimple_assign_lhs (next_stmt);
1884 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
1885 continue;
1886 }
1887
1888 return false;
1889 }
1890 else
1891 {
1892 tree op = gimple_assign_rhs2 (next_stmt);
1893 gimple def_stmt = NULL;
1894
1895 if (TREE_CODE (op) == SSA_NAME)
1896 def_stmt = SSA_NAME_DEF_STMT (op);
1897
1898 /* Check that the other def is either defined in the loop
1899 ("vect_internal_def"), or it's an induction (defined by a
1900 loop-header phi-node). */
1901 if (def_stmt
1902 && gimple_bb (def_stmt)
1903 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
1904 && (is_gimple_assign (def_stmt)
1905 || is_gimple_call (def_stmt)
1906 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt))
1907 == vect_induction_def
1908 || (gimple_code (def_stmt) == GIMPLE_PHI
1909 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt))
1910 == vect_internal_def
1911 && !is_loop_header_bb_p (gimple_bb (def_stmt)))))
1912 {
1913 if (vect_print_dump_info (REPORT_DETAILS))
1914 {
1915 fprintf (vect_dump, "swapping oprnds: ");
1916 print_gimple_stmt (vect_dump, next_stmt, 0, TDF_SLIM);
1917 }
1918
1919 swap_tree_operands (next_stmt,
1920 gimple_assign_rhs1_ptr (next_stmt),
1921 gimple_assign_rhs2_ptr (next_stmt));
1922 update_stmt (next_stmt);
1923 }
1924 else
1925 return false;
1926 }
1927
1928 lhs = gimple_assign_lhs (next_stmt);
1929 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
1930 }
1931
1932 /* Save the chain for further analysis in SLP detection. */
1933 first = GROUP_FIRST_ELEMENT (vinfo_for_stmt (current_stmt));
1934 VEC_safe_push (gimple, heap, LOOP_VINFO_REDUCTION_CHAINS (loop_info), first);
1935 GROUP_SIZE (vinfo_for_stmt (first)) = size;
1936
1937 return true;
1938 }
1939
1940
1941 /* Function vect_is_simple_reduction_1
1942
1943 (1) Detect a cross-iteration def-use cycle that represents a simple
1944 reduction computation. We look for the following pattern:
1945
1946 loop_header:
1947 a1 = phi < a0, a2 >
1948 a3 = ...
1949 a2 = operation (a3, a1)
1950
1951 such that:
1952 1. operation is commutative and associative and it is safe to
1953 change the order of the computation (if CHECK_REDUCTION is true)
1954 2. no uses for a2 in the loop (a2 is used out of the loop)
1955 3. no uses of a1 in the loop besides the reduction operation
1956 4. no uses of a1 outside the loop.
1957
1958 Conditions 1,4 are tested here.
1959 Conditions 2,3 are tested in vect_mark_stmts_to_be_vectorized.
1960
1961 (2) Detect a cross-iteration def-use cycle in nested loops, i.e.,
1962 nested cycles, if CHECK_REDUCTION is false.
1963
1964 (3) Detect cycles of phi nodes in outer-loop vectorization, i.e., double
1965 reductions:
1966
1967 a1 = phi < a0, a2 >
1968 inner loop (def of a3)
1969 a2 = phi < a3 >
1970
1971 If MODIFY is true it tries also to rework the code in-place to enable
1972 detection of more reduction patterns. For the time being we rewrite
1973 "res -= RHS" into "rhs += -RHS" when it seems worthwhile.
1974 */
1975
1976 static gimple
1977 vect_is_simple_reduction_1 (loop_vec_info loop_info, gimple phi,
1978 bool check_reduction, bool *double_reduc,
1979 bool modify)
1980 {
1981 struct loop *loop = (gimple_bb (phi))->loop_father;
1982 struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
1983 edge latch_e = loop_latch_edge (loop);
1984 tree loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e);
1985 gimple def_stmt, def1 = NULL, def2 = NULL;
1986 enum tree_code orig_code, code;
1987 tree op1, op2, op3 = NULL_TREE, op4 = NULL_TREE;
1988 tree type;
1989 int nloop_uses;
1990 tree name;
1991 imm_use_iterator imm_iter;
1992 use_operand_p use_p;
1993 bool phi_def;
1994
1995 *double_reduc = false;
1996
1997 /* If CHECK_REDUCTION is true, we assume inner-most loop vectorization,
1998 otherwise, we assume outer loop vectorization. */
1999 gcc_assert ((check_reduction && loop == vect_loop)
2000 || (!check_reduction && flow_loop_nested_p (vect_loop, loop)));
2001
2002 name = PHI_RESULT (phi);
2003 nloop_uses = 0;
2004 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, name)
2005 {
2006 gimple use_stmt = USE_STMT (use_p);
2007 if (is_gimple_debug (use_stmt))
2008 continue;
2009
2010 if (!flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
2011 {
2012 if (vect_print_dump_info (REPORT_DETAILS))
2013 fprintf (vect_dump, "intermediate value used outside loop.");
2014
2015 return NULL;
2016 }
2017
2018 if (vinfo_for_stmt (use_stmt)
2019 && !is_pattern_stmt_p (vinfo_for_stmt (use_stmt)))
2020 nloop_uses++;
2021 if (nloop_uses > 1)
2022 {
2023 if (vect_print_dump_info (REPORT_DETAILS))
2024 fprintf (vect_dump, "reduction used in loop.");
2025 return NULL;
2026 }
2027 }
2028
2029 if (TREE_CODE (loop_arg) != SSA_NAME)
2030 {
2031 if (vect_print_dump_info (REPORT_DETAILS))
2032 {
2033 fprintf (vect_dump, "reduction: not ssa_name: ");
2034 print_generic_expr (vect_dump, loop_arg, TDF_SLIM);
2035 }
2036 return NULL;
2037 }
2038
2039 def_stmt = SSA_NAME_DEF_STMT (loop_arg);
2040 if (!def_stmt)
2041 {
2042 if (vect_print_dump_info (REPORT_DETAILS))
2043 fprintf (vect_dump, "reduction: no def_stmt.");
2044 return NULL;
2045 }
2046
2047 if (!is_gimple_assign (def_stmt) && gimple_code (def_stmt) != GIMPLE_PHI)
2048 {
2049 if (vect_print_dump_info (REPORT_DETAILS))
2050 print_gimple_stmt (vect_dump, def_stmt, 0, TDF_SLIM);
2051 return NULL;
2052 }
2053
2054 if (is_gimple_assign (def_stmt))
2055 {
2056 name = gimple_assign_lhs (def_stmt);
2057 phi_def = false;
2058 }
2059 else
2060 {
2061 name = PHI_RESULT (def_stmt);
2062 phi_def = true;
2063 }
2064
2065 nloop_uses = 0;
2066 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, name)
2067 {
2068 gimple use_stmt = USE_STMT (use_p);
2069 if (is_gimple_debug (use_stmt))
2070 continue;
2071 if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt))
2072 && vinfo_for_stmt (use_stmt)
2073 && !is_pattern_stmt_p (vinfo_for_stmt (use_stmt)))
2074 nloop_uses++;
2075 if (nloop_uses > 1)
2076 {
2077 if (vect_print_dump_info (REPORT_DETAILS))
2078 fprintf (vect_dump, "reduction used in loop.");
2079 return NULL;
2080 }
2081 }
2082
2083 /* If DEF_STMT is a phi node itself, we expect it to have a single argument
2084 defined in the inner loop. */
2085 if (phi_def)
2086 {
2087 op1 = PHI_ARG_DEF (def_stmt, 0);
2088
2089 if (gimple_phi_num_args (def_stmt) != 1
2090 || TREE_CODE (op1) != SSA_NAME)
2091 {
2092 if (vect_print_dump_info (REPORT_DETAILS))
2093 fprintf (vect_dump, "unsupported phi node definition.");
2094
2095 return NULL;
2096 }
2097
2098 def1 = SSA_NAME_DEF_STMT (op1);
2099 if (flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
2100 && loop->inner
2101 && flow_bb_inside_loop_p (loop->inner, gimple_bb (def1))
2102 && is_gimple_assign (def1))
2103 {
2104 if (vect_print_dump_info (REPORT_DETAILS))
2105 report_vect_op (def_stmt, "detected double reduction: ");
2106
2107 *double_reduc = true;
2108 return def_stmt;
2109 }
2110
2111 return NULL;
2112 }
2113
2114 code = orig_code = gimple_assign_rhs_code (def_stmt);
2115
2116 /* We can handle "res -= x[i]", which is non-associative by
2117 simply rewriting this into "res += -x[i]". Avoid changing
2118 gimple instruction for the first simple tests and only do this
2119 if we're allowed to change code at all. */
2120 if (code == MINUS_EXPR
2121 && modify
2122 && (op1 = gimple_assign_rhs1 (def_stmt))
2123 && TREE_CODE (op1) == SSA_NAME
2124 && SSA_NAME_DEF_STMT (op1) == phi)
2125 code = PLUS_EXPR;
2126
2127 if (check_reduction
2128 && (!commutative_tree_code (code) || !associative_tree_code (code)))
2129 {
2130 if (vect_print_dump_info (REPORT_DETAILS))
2131 report_vect_op (def_stmt, "reduction: not commutative/associative: ");
2132 return NULL;
2133 }
2134
2135 if (get_gimple_rhs_class (code) != GIMPLE_BINARY_RHS)
2136 {
2137 if (code != COND_EXPR)
2138 {
2139 if (vect_print_dump_info (REPORT_DETAILS))
2140 report_vect_op (def_stmt, "reduction: not binary operation: ");
2141
2142 return NULL;
2143 }
2144
2145 op3 = gimple_assign_rhs1 (def_stmt);
2146 if (COMPARISON_CLASS_P (op3))
2147 {
2148 op4 = TREE_OPERAND (op3, 1);
2149 op3 = TREE_OPERAND (op3, 0);
2150 }
2151
2152 op1 = gimple_assign_rhs2 (def_stmt);
2153 op2 = gimple_assign_rhs3 (def_stmt);
2154
2155 if (TREE_CODE (op1) != SSA_NAME && TREE_CODE (op2) != SSA_NAME)
2156 {
2157 if (vect_print_dump_info (REPORT_DETAILS))
2158 report_vect_op (def_stmt, "reduction: uses not ssa_names: ");
2159
2160 return NULL;
2161 }
2162 }
2163 else
2164 {
2165 op1 = gimple_assign_rhs1 (def_stmt);
2166 op2 = gimple_assign_rhs2 (def_stmt);
2167
2168 if (TREE_CODE (op1) != SSA_NAME && TREE_CODE (op2) != SSA_NAME)
2169 {
2170 if (vect_print_dump_info (REPORT_DETAILS))
2171 report_vect_op (def_stmt, "reduction: uses not ssa_names: ");
2172
2173 return NULL;
2174 }
2175 }
2176
2177 type = TREE_TYPE (gimple_assign_lhs (def_stmt));
2178 if ((TREE_CODE (op1) == SSA_NAME
2179 && !types_compatible_p (type,TREE_TYPE (op1)))
2180 || (TREE_CODE (op2) == SSA_NAME
2181 && !types_compatible_p (type, TREE_TYPE (op2)))
2182 || (op3 && TREE_CODE (op3) == SSA_NAME
2183 && !types_compatible_p (type, TREE_TYPE (op3)))
2184 || (op4 && TREE_CODE (op4) == SSA_NAME
2185 && !types_compatible_p (type, TREE_TYPE (op4))))
2186 {
2187 if (vect_print_dump_info (REPORT_DETAILS))
2188 {
2189 fprintf (vect_dump, "reduction: multiple types: operation type: ");
2190 print_generic_expr (vect_dump, type, TDF_SLIM);
2191 fprintf (vect_dump, ", operands types: ");
2192 print_generic_expr (vect_dump, TREE_TYPE (op1), TDF_SLIM);
2193 fprintf (vect_dump, ",");
2194 print_generic_expr (vect_dump, TREE_TYPE (op2), TDF_SLIM);
2195 if (op3)
2196 {
2197 fprintf (vect_dump, ",");
2198 print_generic_expr (vect_dump, TREE_TYPE (op3), TDF_SLIM);
2199 }
2200
2201 if (op4)
2202 {
2203 fprintf (vect_dump, ",");
2204 print_generic_expr (vect_dump, TREE_TYPE (op4), TDF_SLIM);
2205 }
2206 }
2207
2208 return NULL;
2209 }
2210
2211 /* Check that it's ok to change the order of the computation.
2212 Generally, when vectorizing a reduction we change the order of the
2213 computation. This may change the behavior of the program in some
2214 cases, so we need to check that this is ok. One exception is when
2215 vectorizing an outer-loop: the inner-loop is executed sequentially,
2216 and therefore vectorizing reductions in the inner-loop during
2217 outer-loop vectorization is safe. */
2218
2219 /* CHECKME: check for !flag_finite_math_only too? */
2220 if (SCALAR_FLOAT_TYPE_P (type) && !flag_associative_math
2221 && check_reduction)
2222 {
2223 /* Changing the order of operations changes the semantics. */
2224 if (vect_print_dump_info (REPORT_DETAILS))
2225 report_vect_op (def_stmt, "reduction: unsafe fp math optimization: ");
2226 return NULL;
2227 }
2228 else if (INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_TRAPS (type)
2229 && check_reduction)
2230 {
2231 /* Changing the order of operations changes the semantics. */
2232 if (vect_print_dump_info (REPORT_DETAILS))
2233 report_vect_op (def_stmt, "reduction: unsafe int math optimization: ");
2234 return NULL;
2235 }
2236 else if (SAT_FIXED_POINT_TYPE_P (type) && check_reduction)
2237 {
2238 /* Changing the order of operations changes the semantics. */
2239 if (vect_print_dump_info (REPORT_DETAILS))
2240 report_vect_op (def_stmt,
2241 "reduction: unsafe fixed-point math optimization: ");
2242 return NULL;
2243 }
2244
2245 /* If we detected "res -= x[i]" earlier, rewrite it into
2246 "res += -x[i]" now. If this turns out to be useless reassoc
2247 will clean it up again. */
2248 if (orig_code == MINUS_EXPR)
2249 {
2250 tree rhs = gimple_assign_rhs2 (def_stmt);
2251 tree negrhs = make_ssa_name (SSA_NAME_VAR (rhs), NULL);
2252 gimple negate_stmt = gimple_build_assign_with_ops (NEGATE_EXPR, negrhs,
2253 rhs, NULL);
2254 gimple_stmt_iterator gsi = gsi_for_stmt (def_stmt);
2255 set_vinfo_for_stmt (negate_stmt, new_stmt_vec_info (negate_stmt,
2256 loop_info, NULL));
2257 gsi_insert_before (&gsi, negate_stmt, GSI_NEW_STMT);
2258 gimple_assign_set_rhs2 (def_stmt, negrhs);
2259 gimple_assign_set_rhs_code (def_stmt, PLUS_EXPR);
2260 update_stmt (def_stmt);
2261 }
2262
2263 /* Reduction is safe. We're dealing with one of the following:
2264 1) integer arithmetic and no trapv
2265 2) floating point arithmetic, and special flags permit this optimization
2266 3) nested cycle (i.e., outer loop vectorization). */
2267 if (TREE_CODE (op1) == SSA_NAME)
2268 def1 = SSA_NAME_DEF_STMT (op1);
2269
2270 if (TREE_CODE (op2) == SSA_NAME)
2271 def2 = SSA_NAME_DEF_STMT (op2);
2272
2273 if (code != COND_EXPR
2274 && ((!def1 || gimple_nop_p (def1)) && (!def2 || gimple_nop_p (def2))))
2275 {
2276 if (vect_print_dump_info (REPORT_DETAILS))
2277 report_vect_op (def_stmt, "reduction: no defs for operands: ");
2278 return NULL;
2279 }
2280
2281 /* Check that one def is the reduction def, defined by PHI,
2282 the other def is either defined in the loop ("vect_internal_def"),
2283 or it's an induction (defined by a loop-header phi-node). */
2284
2285 if (def2 && def2 == phi
2286 && (code == COND_EXPR
2287 || !def1 || gimple_nop_p (def1)
2288 || (def1 && flow_bb_inside_loop_p (loop, gimple_bb (def1))
2289 && (is_gimple_assign (def1)
2290 || is_gimple_call (def1)
2291 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def1))
2292 == vect_induction_def
2293 || (gimple_code (def1) == GIMPLE_PHI
2294 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def1))
2295 == vect_internal_def
2296 && !is_loop_header_bb_p (gimple_bb (def1)))))))
2297 {
2298 if (vect_print_dump_info (REPORT_DETAILS))
2299 report_vect_op (def_stmt, "detected reduction: ");
2300 return def_stmt;
2301 }
2302
2303 if (def1 && def1 == phi
2304 && (code == COND_EXPR
2305 || !def2 || gimple_nop_p (def2)
2306 || (def2 && flow_bb_inside_loop_p (loop, gimple_bb (def2))
2307 && (is_gimple_assign (def2)
2308 || is_gimple_call (def2)
2309 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def2))
2310 == vect_induction_def
2311 || (gimple_code (def2) == GIMPLE_PHI
2312 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def2))
2313 == vect_internal_def
2314 && !is_loop_header_bb_p (gimple_bb (def2)))))))
2315 {
2316 if (check_reduction)
2317 {
2318 /* Swap operands (just for simplicity - so that the rest of the code
2319 can assume that the reduction variable is always the last (second)
2320 argument). */
2321 if (vect_print_dump_info (REPORT_DETAILS))
2322 report_vect_op (def_stmt,
2323 "detected reduction: need to swap operands: ");
2324
2325 swap_tree_operands (def_stmt, gimple_assign_rhs1_ptr (def_stmt),
2326 gimple_assign_rhs2_ptr (def_stmt));
2327 }
2328 else
2329 {
2330 if (vect_print_dump_info (REPORT_DETAILS))
2331 report_vect_op (def_stmt, "detected reduction: ");
2332 }
2333
2334 return def_stmt;
2335 }
2336
2337 /* Try to find SLP reduction chain. */
2338 if (check_reduction && vect_is_slp_reduction (loop_info, phi, def_stmt))
2339 {
2340 if (vect_print_dump_info (REPORT_DETAILS))
2341 report_vect_op (def_stmt, "reduction: detected reduction chain: ");
2342
2343 return def_stmt;
2344 }
2345
2346 if (vect_print_dump_info (REPORT_DETAILS))
2347 report_vect_op (def_stmt, "reduction: unknown pattern: ");
2348
2349 return NULL;
2350 }
2351
2352 /* Wrapper around vect_is_simple_reduction_1, that won't modify code
2353 in-place. Arguments as there. */
2354
2355 static gimple
2356 vect_is_simple_reduction (loop_vec_info loop_info, gimple phi,
2357 bool check_reduction, bool *double_reduc)
2358 {
2359 return vect_is_simple_reduction_1 (loop_info, phi, check_reduction,
2360 double_reduc, false);
2361 }
2362
2363 /* Wrapper around vect_is_simple_reduction_1, which will modify code
2364 in-place if it enables detection of more reductions. Arguments
2365 as there. */
2366
2367 gimple
2368 vect_force_simple_reduction (loop_vec_info loop_info, gimple phi,
2369 bool check_reduction, bool *double_reduc)
2370 {
2371 return vect_is_simple_reduction_1 (loop_info, phi, check_reduction,
2372 double_reduc, true);
2373 }
2374
2375 /* Calculate the cost of one scalar iteration of the loop. */
2376 int
2377 vect_get_single_scalar_iteration_cost (loop_vec_info loop_vinfo)
2378 {
2379 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2380 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
2381 int nbbs = loop->num_nodes, factor, scalar_single_iter_cost = 0;
2382 int innerloop_iters, i, stmt_cost;
2383
2384 /* Count statements in scalar loop. Using this as scalar cost for a single
2385 iteration for now.
2386
2387 TODO: Add outer loop support.
2388
2389 TODO: Consider assigning different costs to different scalar
2390 statements. */
2391
2392 /* FORNOW. */
2393 innerloop_iters = 1;
2394 if (loop->inner)
2395 innerloop_iters = 50; /* FIXME */
2396
2397 for (i = 0; i < nbbs; i++)
2398 {
2399 gimple_stmt_iterator si;
2400 basic_block bb = bbs[i];
2401
2402 if (bb->loop_father == loop->inner)
2403 factor = innerloop_iters;
2404 else
2405 factor = 1;
2406
2407 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
2408 {
2409 gimple stmt = gsi_stmt (si);
2410 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2411
2412 if (!is_gimple_assign (stmt) && !is_gimple_call (stmt))
2413 continue;
2414
2415 /* Skip stmts that are not vectorized inside the loop. */
2416 if (stmt_info
2417 && !STMT_VINFO_RELEVANT_P (stmt_info)
2418 && (!STMT_VINFO_LIVE_P (stmt_info)
2419 || !VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info)))
2420 && !STMT_VINFO_IN_PATTERN_P (stmt_info))
2421 continue;
2422
2423 if (STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt)))
2424 {
2425 if (DR_IS_READ (STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt))))
2426 stmt_cost = vect_get_stmt_cost (scalar_load);
2427 else
2428 stmt_cost = vect_get_stmt_cost (scalar_store);
2429 }
2430 else
2431 stmt_cost = vect_get_stmt_cost (scalar_stmt);
2432
2433 scalar_single_iter_cost += stmt_cost * factor;
2434 }
2435 }
2436 return scalar_single_iter_cost;
2437 }
2438
2439 /* Calculate cost of peeling the loop PEEL_ITERS_PROLOGUE times. */
2440 int
2441 vect_get_known_peeling_cost (loop_vec_info loop_vinfo, int peel_iters_prologue,
2442 int *peel_iters_epilogue,
2443 int scalar_single_iter_cost,
2444 stmt_vector_for_cost *prologue_cost_vec,
2445 stmt_vector_for_cost *epilogue_cost_vec)
2446 {
2447 int retval = 0;
2448 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2449
2450 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
2451 {
2452 *peel_iters_epilogue = vf/2;
2453 if (vect_print_dump_info (REPORT_COST))
2454 fprintf (vect_dump, "cost model: "
2455 "epilogue peel iters set to vf/2 because "
2456 "loop iterations are unknown .");
2457
2458 /* If peeled iterations are known but number of scalar loop
2459 iterations are unknown, count a taken branch per peeled loop. */
2460 retval = record_stmt_cost (prologue_cost_vec, 2, cond_branch_taken,
2461 NULL, 0, vect_prologue);
2462 }
2463 else
2464 {
2465 int niters = LOOP_VINFO_INT_NITERS (loop_vinfo);
2466 peel_iters_prologue = niters < peel_iters_prologue ?
2467 niters : peel_iters_prologue;
2468 *peel_iters_epilogue = (niters - peel_iters_prologue) % vf;
2469 /* If we need to peel for gaps, but no peeling is required, we have to
2470 peel VF iterations. */
2471 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) && !*peel_iters_epilogue)
2472 *peel_iters_epilogue = vf;
2473 }
2474
2475 if (peel_iters_prologue)
2476 retval += record_stmt_cost (prologue_cost_vec,
2477 peel_iters_prologue * scalar_single_iter_cost,
2478 scalar_stmt, NULL, 0, vect_prologue);
2479 if (*peel_iters_epilogue)
2480 retval += record_stmt_cost (epilogue_cost_vec,
2481 *peel_iters_epilogue * scalar_single_iter_cost,
2482 scalar_stmt, NULL, 0, vect_epilogue);
2483 return retval;
2484 }
2485
2486 /* Function vect_estimate_min_profitable_iters
2487
2488 Return the number of iterations required for the vector version of the
2489 loop to be profitable relative to the cost of the scalar version of the
2490 loop.
2491
2492 TODO: Take profile info into account before making vectorization
2493 decisions, if available. */
2494
2495 int
2496 vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo)
2497 {
2498 int min_profitable_iters;
2499 int peel_iters_prologue;
2500 int peel_iters_epilogue;
2501 unsigned vec_inside_cost = 0;
2502 int vec_outside_cost = 0;
2503 unsigned vec_prologue_cost = 0;
2504 unsigned vec_epilogue_cost = 0;
2505 int scalar_single_iter_cost = 0;
2506 int scalar_outside_cost = 0;
2507 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2508 int npeel = LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo);
2509 void *target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
2510
2511 /* Cost model disabled. */
2512 if (!flag_vect_cost_model)
2513 {
2514 if (vect_print_dump_info (REPORT_COST))
2515 fprintf (vect_dump, "cost model disabled.");
2516 return 0;
2517 }
2518
2519 /* Requires loop versioning tests to handle misalignment. */
2520 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo))
2521 {
2522 /* FIXME: Make cost depend on complexity of individual check. */
2523 unsigned len = VEC_length (gimple,
2524 LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo));
2525 (void) add_stmt_cost (target_cost_data, len, vector_stmt, NULL, 0,
2526 vect_prologue);
2527 if (vect_print_dump_info (REPORT_COST))
2528 fprintf (vect_dump, "cost model: Adding cost of checks for loop "
2529 "versioning to treat misalignment.\n");
2530 }
2531
2532 /* Requires loop versioning with alias checks. */
2533 if (LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
2534 {
2535 /* FIXME: Make cost depend on complexity of individual check. */
2536 unsigned len = VEC_length (ddr_p, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo));
2537 (void) add_stmt_cost (target_cost_data, len, vector_stmt, NULL, 0,
2538 vect_prologue);
2539 if (vect_print_dump_info (REPORT_COST))
2540 fprintf (vect_dump, "cost model: Adding cost of checks for loop "
2541 "versioning aliasing.\n");
2542 }
2543
2544 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
2545 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
2546 (void) add_stmt_cost (target_cost_data, 1, cond_branch_taken, NULL, 0,
2547 vect_prologue);
2548
2549 /* Count statements in scalar loop. Using this as scalar cost for a single
2550 iteration for now.
2551
2552 TODO: Add outer loop support.
2553
2554 TODO: Consider assigning different costs to different scalar
2555 statements. */
2556
2557 scalar_single_iter_cost = vect_get_single_scalar_iteration_cost (loop_vinfo);
2558
2559 /* Add additional cost for the peeled instructions in prologue and epilogue
2560 loop.
2561
2562 FORNOW: If we don't know the value of peel_iters for prologue or epilogue
2563 at compile-time - we assume it's vf/2 (the worst would be vf-1).
2564
2565 TODO: Build an expression that represents peel_iters for prologue and
2566 epilogue to be used in a run-time test. */
2567
2568 if (npeel < 0)
2569 {
2570 peel_iters_prologue = vf/2;
2571 if (vect_print_dump_info (REPORT_COST))
2572 fprintf (vect_dump, "cost model: "
2573 "prologue peel iters set to vf/2.");
2574
2575 /* If peeling for alignment is unknown, loop bound of main loop becomes
2576 unknown. */
2577 peel_iters_epilogue = vf/2;
2578 if (vect_print_dump_info (REPORT_COST))
2579 fprintf (vect_dump, "cost model: "
2580 "epilogue peel iters set to vf/2 because "
2581 "peeling for alignment is unknown .");
2582
2583 /* If peeled iterations are unknown, count a taken branch and a not taken
2584 branch per peeled loop. Even if scalar loop iterations are known,
2585 vector iterations are not known since peeled prologue iterations are
2586 not known. Hence guards remain the same. */
2587 (void) add_stmt_cost (target_cost_data, 2, cond_branch_taken,
2588 NULL, 0, vect_prologue);
2589 (void) add_stmt_cost (target_cost_data, 2, cond_branch_not_taken,
2590 NULL, 0, vect_prologue);
2591 /* FORNOW: Don't attempt to pass individual scalar instructions to
2592 the model; just assume linear cost for scalar iterations. */
2593 (void) add_stmt_cost (target_cost_data,
2594 peel_iters_prologue * scalar_single_iter_cost,
2595 scalar_stmt, NULL, 0, vect_prologue);
2596 (void) add_stmt_cost (target_cost_data,
2597 peel_iters_epilogue * scalar_single_iter_cost,
2598 scalar_stmt, NULL, 0, vect_epilogue);
2599 }
2600 else
2601 {
2602 stmt_vector_for_cost prologue_cost_vec, epilogue_cost_vec;
2603 stmt_info_for_cost *si;
2604 int j;
2605 void *data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
2606
2607 prologue_cost_vec = VEC_alloc (stmt_info_for_cost, heap, 2);
2608 epilogue_cost_vec = VEC_alloc (stmt_info_for_cost, heap, 2);
2609 peel_iters_prologue = npeel;
2610
2611 (void) vect_get_known_peeling_cost (loop_vinfo, peel_iters_prologue,
2612 &peel_iters_epilogue,
2613 scalar_single_iter_cost,
2614 &prologue_cost_vec,
2615 &epilogue_cost_vec);
2616
2617 FOR_EACH_VEC_ELT (stmt_info_for_cost, prologue_cost_vec, j, si)
2618 {
2619 struct _stmt_vec_info *stmt_info
2620 = si->stmt ? vinfo_for_stmt (si->stmt) : NULL;
2621 (void) add_stmt_cost (data, si->count, si->kind, stmt_info,
2622 si->misalign, vect_prologue);
2623 }
2624
2625 FOR_EACH_VEC_ELT (stmt_info_for_cost, epilogue_cost_vec, j, si)
2626 {
2627 struct _stmt_vec_info *stmt_info
2628 = si->stmt ? vinfo_for_stmt (si->stmt) : NULL;
2629 (void) add_stmt_cost (data, si->count, si->kind, stmt_info,
2630 si->misalign, vect_epilogue);
2631 }
2632
2633 VEC_free (stmt_info_for_cost, heap, prologue_cost_vec);
2634 VEC_free (stmt_info_for_cost, heap, epilogue_cost_vec);
2635 }
2636
2637 /* FORNOW: The scalar outside cost is incremented in one of the
2638 following ways:
2639
2640 1. The vectorizer checks for alignment and aliasing and generates
2641 a condition that allows dynamic vectorization. A cost model
2642 check is ANDED with the versioning condition. Hence scalar code
2643 path now has the added cost of the versioning check.
2644
2645 if (cost > th & versioning_check)
2646 jmp to vector code
2647
2648 Hence run-time scalar is incremented by not-taken branch cost.
2649
2650 2. The vectorizer then checks if a prologue is required. If the
2651 cost model check was not done before during versioning, it has to
2652 be done before the prologue check.
2653
2654 if (cost <= th)
2655 prologue = scalar_iters
2656 if (prologue == 0)
2657 jmp to vector code
2658 else
2659 execute prologue
2660 if (prologue == num_iters)
2661 go to exit
2662
2663 Hence the run-time scalar cost is incremented by a taken branch,
2664 plus a not-taken branch, plus a taken branch cost.
2665
2666 3. The vectorizer then checks if an epilogue is required. If the
2667 cost model check was not done before during prologue check, it
2668 has to be done with the epilogue check.
2669
2670 if (prologue == 0)
2671 jmp to vector code
2672 else
2673 execute prologue
2674 if (prologue == num_iters)
2675 go to exit
2676 vector code:
2677 if ((cost <= th) | (scalar_iters-prologue-epilogue == 0))
2678 jmp to epilogue
2679
2680 Hence the run-time scalar cost should be incremented by 2 taken
2681 branches.
2682
2683 TODO: The back end may reorder the BBS's differently and reverse
2684 conditions/branch directions. Change the estimates below to
2685 something more reasonable. */
2686
2687 /* If the number of iterations is known and we do not do versioning, we can
2688 decide whether to vectorize at compile time. Hence the scalar version
2689 do not carry cost model guard costs. */
2690 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
2691 || LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
2692 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
2693 {
2694 /* Cost model check occurs at versioning. */
2695 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
2696 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
2697 scalar_outside_cost += vect_get_stmt_cost (cond_branch_not_taken);
2698 else
2699 {
2700 /* Cost model check occurs at prologue generation. */
2701 if (LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo) < 0)
2702 scalar_outside_cost += 2 * vect_get_stmt_cost (cond_branch_taken)
2703 + vect_get_stmt_cost (cond_branch_not_taken);
2704 /* Cost model check occurs at epilogue generation. */
2705 else
2706 scalar_outside_cost += 2 * vect_get_stmt_cost (cond_branch_taken);
2707 }
2708 }
2709
2710 /* Complete the target-specific cost calculations. */
2711 finish_cost (LOOP_VINFO_TARGET_COST_DATA (loop_vinfo), &vec_prologue_cost,
2712 &vec_inside_cost, &vec_epilogue_cost);
2713
2714 vec_outside_cost = (int)(vec_prologue_cost + vec_epilogue_cost);
2715
2716 /* Calculate number of iterations required to make the vector version
2717 profitable, relative to the loop bodies only. The following condition
2718 must hold true:
2719 SIC * niters + SOC > VIC * ((niters-PL_ITERS-EP_ITERS)/VF) + VOC
2720 where
2721 SIC = scalar iteration cost, VIC = vector iteration cost,
2722 VOC = vector outside cost, VF = vectorization factor,
2723 PL_ITERS = prologue iterations, EP_ITERS= epilogue iterations
2724 SOC = scalar outside cost for run time cost model check. */
2725
2726 if ((scalar_single_iter_cost * vf) > (int) vec_inside_cost)
2727 {
2728 if (vec_outside_cost <= 0)
2729 min_profitable_iters = 1;
2730 else
2731 {
2732 min_profitable_iters = ((vec_outside_cost - scalar_outside_cost) * vf
2733 - vec_inside_cost * peel_iters_prologue
2734 - vec_inside_cost * peel_iters_epilogue)
2735 / ((scalar_single_iter_cost * vf)
2736 - vec_inside_cost);
2737
2738 if ((scalar_single_iter_cost * vf * min_profitable_iters)
2739 <= (((int) vec_inside_cost * min_profitable_iters)
2740 + (((int) vec_outside_cost - scalar_outside_cost) * vf)))
2741 min_profitable_iters++;
2742 }
2743 }
2744 /* vector version will never be profitable. */
2745 else
2746 {
2747 if (vect_print_dump_info (REPORT_COST))
2748 fprintf (vect_dump, "cost model: the vector iteration cost = %d "
2749 "divided by the scalar iteration cost = %d "
2750 "is greater or equal to the vectorization factor = %d.",
2751 vec_inside_cost, scalar_single_iter_cost, vf);
2752 return -1;
2753 }
2754
2755 if (vect_print_dump_info (REPORT_COST))
2756 {
2757 fprintf (vect_dump, "Cost model analysis: \n");
2758 fprintf (vect_dump, " Vector inside of loop cost: %d\n",
2759 vec_inside_cost);
2760 fprintf (vect_dump, " Vector prologue cost: %d\n",
2761 vec_prologue_cost);
2762 fprintf (vect_dump, " Vector epilogue cost: %d\n",
2763 vec_epilogue_cost);
2764 fprintf (vect_dump, " Scalar iteration cost: %d\n",
2765 scalar_single_iter_cost);
2766 fprintf (vect_dump, " Scalar outside cost: %d\n", scalar_outside_cost);
2767 fprintf (vect_dump, " prologue iterations: %d\n",
2768 peel_iters_prologue);
2769 fprintf (vect_dump, " epilogue iterations: %d\n",
2770 peel_iters_epilogue);
2771 fprintf (vect_dump, " Calculated minimum iters for profitability: %d\n",
2772 min_profitable_iters);
2773 }
2774
2775 min_profitable_iters =
2776 min_profitable_iters < vf ? vf : min_profitable_iters;
2777
2778 /* Because the condition we create is:
2779 if (niters <= min_profitable_iters)
2780 then skip the vectorized loop. */
2781 min_profitable_iters--;
2782
2783 if (vect_print_dump_info (REPORT_COST))
2784 fprintf (vect_dump, " Profitability threshold = %d\n",
2785 min_profitable_iters);
2786
2787 return min_profitable_iters;
2788 }
2789
2790
2791 /* TODO: Close dependency between vect_model_*_cost and vectorizable_*
2792 functions. Design better to avoid maintenance issues. */
2793
2794 /* Function vect_model_reduction_cost.
2795
2796 Models cost for a reduction operation, including the vector ops
2797 generated within the strip-mine loop, the initial definition before
2798 the loop, and the epilogue code that must be generated. */
2799
2800 static bool
2801 vect_model_reduction_cost (stmt_vec_info stmt_info, enum tree_code reduc_code,
2802 int ncopies)
2803 {
2804 int prologue_cost = 0, epilogue_cost = 0;
2805 enum tree_code code;
2806 optab optab;
2807 tree vectype;
2808 gimple stmt, orig_stmt;
2809 tree reduction_op;
2810 enum machine_mode mode;
2811 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2812 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2813 void *target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
2814
2815 /* Cost of reduction op inside loop. */
2816 unsigned inside_cost = add_stmt_cost (target_cost_data, ncopies, vector_stmt,
2817 stmt_info, 0, vect_body);
2818 stmt = STMT_VINFO_STMT (stmt_info);
2819
2820 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)))
2821 {
2822 case GIMPLE_SINGLE_RHS:
2823 gcc_assert (TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt)) == ternary_op);
2824 reduction_op = TREE_OPERAND (gimple_assign_rhs1 (stmt), 2);
2825 break;
2826 case GIMPLE_UNARY_RHS:
2827 reduction_op = gimple_assign_rhs1 (stmt);
2828 break;
2829 case GIMPLE_BINARY_RHS:
2830 reduction_op = gimple_assign_rhs2 (stmt);
2831 break;
2832 case GIMPLE_TERNARY_RHS:
2833 reduction_op = gimple_assign_rhs3 (stmt);
2834 break;
2835 default:
2836 gcc_unreachable ();
2837 }
2838
2839 vectype = get_vectype_for_scalar_type (TREE_TYPE (reduction_op));
2840 if (!vectype)
2841 {
2842 if (vect_print_dump_info (REPORT_COST))
2843 {
2844 fprintf (vect_dump, "unsupported data-type ");
2845 print_generic_expr (vect_dump, TREE_TYPE (reduction_op), TDF_SLIM);
2846 }
2847 return false;
2848 }
2849
2850 mode = TYPE_MODE (vectype);
2851 orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
2852
2853 if (!orig_stmt)
2854 orig_stmt = STMT_VINFO_STMT (stmt_info);
2855
2856 code = gimple_assign_rhs_code (orig_stmt);
2857
2858 /* Add in cost for initial definition. */
2859 prologue_cost += add_stmt_cost (target_cost_data, 1, scalar_to_vec,
2860 stmt_info, 0, vect_prologue);
2861
2862 /* Determine cost of epilogue code.
2863
2864 We have a reduction operator that will reduce the vector in one statement.
2865 Also requires scalar extract. */
2866
2867 if (!nested_in_vect_loop_p (loop, orig_stmt))
2868 {
2869 if (reduc_code != ERROR_MARK)
2870 {
2871 epilogue_cost += add_stmt_cost (target_cost_data, 1, vector_stmt,
2872 stmt_info, 0, vect_epilogue);
2873 epilogue_cost += add_stmt_cost (target_cost_data, 1, vec_to_scalar,
2874 stmt_info, 0, vect_epilogue);
2875 }
2876 else
2877 {
2878 int vec_size_in_bits = tree_low_cst (TYPE_SIZE (vectype), 1);
2879 tree bitsize =
2880 TYPE_SIZE (TREE_TYPE (gimple_assign_lhs (orig_stmt)));
2881 int element_bitsize = tree_low_cst (bitsize, 1);
2882 int nelements = vec_size_in_bits / element_bitsize;
2883
2884 optab = optab_for_tree_code (code, vectype, optab_default);
2885
2886 /* We have a whole vector shift available. */
2887 if (VECTOR_MODE_P (mode)
2888 && optab_handler (optab, mode) != CODE_FOR_nothing
2889 && optab_handler (vec_shr_optab, mode) != CODE_FOR_nothing)
2890 {
2891 /* Final reduction via vector shifts and the reduction operator.
2892 Also requires scalar extract. */
2893 epilogue_cost += add_stmt_cost (target_cost_data,
2894 exact_log2 (nelements) * 2,
2895 vector_stmt, stmt_info, 0,
2896 vect_epilogue);
2897 epilogue_cost += add_stmt_cost (target_cost_data, 1,
2898 vec_to_scalar, stmt_info, 0,
2899 vect_epilogue);
2900 }
2901 else
2902 /* Use extracts and reduction op for final reduction. For N
2903 elements, we have N extracts and N-1 reduction ops. */
2904 epilogue_cost += add_stmt_cost (target_cost_data,
2905 nelements + nelements - 1,
2906 vector_stmt, stmt_info, 0,
2907 vect_epilogue);
2908 }
2909 }
2910
2911 if (vect_print_dump_info (REPORT_COST))
2912 fprintf (vect_dump, "vect_model_reduction_cost: inside_cost = %d, "
2913 "prologue_cost = %d, epilogue_cost = %d .", inside_cost,
2914 prologue_cost, epilogue_cost);
2915
2916 return true;
2917 }
2918
2919
2920 /* Function vect_model_induction_cost.
2921
2922 Models cost for induction operations. */
2923
2924 static void
2925 vect_model_induction_cost (stmt_vec_info stmt_info, int ncopies)
2926 {
2927 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2928 void *target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
2929 unsigned inside_cost, prologue_cost;
2930
2931 /* loop cost for vec_loop. */
2932 inside_cost = add_stmt_cost (target_cost_data, ncopies, vector_stmt,
2933 stmt_info, 0, vect_body);
2934
2935 /* prologue cost for vec_init and vec_step. */
2936 prologue_cost = add_stmt_cost (target_cost_data, 2, scalar_to_vec,
2937 stmt_info, 0, vect_prologue);
2938
2939 if (vect_print_dump_info (REPORT_COST))
2940 fprintf (vect_dump, "vect_model_induction_cost: inside_cost = %d, "
2941 "prologue_cost = %d .", inside_cost, prologue_cost);
2942 }
2943
2944
2945 /* Function get_initial_def_for_induction
2946
2947 Input:
2948 STMT - a stmt that performs an induction operation in the loop.
2949 IV_PHI - the initial value of the induction variable
2950
2951 Output:
2952 Return a vector variable, initialized with the first VF values of
2953 the induction variable. E.g., for an iv with IV_PHI='X' and
2954 evolution S, for a vector of 4 units, we want to return:
2955 [X, X + S, X + 2*S, X + 3*S]. */
2956
2957 static tree
2958 get_initial_def_for_induction (gimple iv_phi)
2959 {
2960 stmt_vec_info stmt_vinfo = vinfo_for_stmt (iv_phi);
2961 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
2962 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2963 tree scalar_type;
2964 tree vectype;
2965 int nunits;
2966 edge pe = loop_preheader_edge (loop);
2967 struct loop *iv_loop;
2968 basic_block new_bb;
2969 tree vec, vec_init, vec_step, t;
2970 tree access_fn;
2971 tree new_var;
2972 tree new_name;
2973 gimple init_stmt, induction_phi, new_stmt;
2974 tree induc_def, vec_def, vec_dest;
2975 tree init_expr, step_expr;
2976 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2977 int i;
2978 bool ok;
2979 int ncopies;
2980 tree expr;
2981 stmt_vec_info phi_info = vinfo_for_stmt (iv_phi);
2982 bool nested_in_vect_loop = false;
2983 gimple_seq stmts = NULL;
2984 imm_use_iterator imm_iter;
2985 use_operand_p use_p;
2986 gimple exit_phi;
2987 edge latch_e;
2988 tree loop_arg;
2989 gimple_stmt_iterator si;
2990 basic_block bb = gimple_bb (iv_phi);
2991 tree stepvectype;
2992 tree resvectype;
2993
2994 /* Is phi in an inner-loop, while vectorizing an enclosing outer-loop? */
2995 if (nested_in_vect_loop_p (loop, iv_phi))
2996 {
2997 nested_in_vect_loop = true;
2998 iv_loop = loop->inner;
2999 }
3000 else
3001 iv_loop = loop;
3002 gcc_assert (iv_loop == (gimple_bb (iv_phi))->loop_father);
3003
3004 latch_e = loop_latch_edge (iv_loop);
3005 loop_arg = PHI_ARG_DEF_FROM_EDGE (iv_phi, latch_e);
3006
3007 access_fn = analyze_scalar_evolution (iv_loop, PHI_RESULT (iv_phi));
3008 gcc_assert (access_fn);
3009 STRIP_NOPS (access_fn);
3010 ok = vect_is_simple_iv_evolution (iv_loop->num, access_fn,
3011 &init_expr, &step_expr);
3012 gcc_assert (ok);
3013 pe = loop_preheader_edge (iv_loop);
3014
3015 scalar_type = TREE_TYPE (init_expr);
3016 vectype = get_vectype_for_scalar_type (scalar_type);
3017 resvectype = get_vectype_for_scalar_type (TREE_TYPE (PHI_RESULT (iv_phi)));
3018 gcc_assert (vectype);
3019 nunits = TYPE_VECTOR_SUBPARTS (vectype);
3020 ncopies = vf / nunits;
3021
3022 gcc_assert (phi_info);
3023 gcc_assert (ncopies >= 1);
3024
3025 /* Find the first insertion point in the BB. */
3026 si = gsi_after_labels (bb);
3027
3028 /* Create the vector that holds the initial_value of the induction. */
3029 if (nested_in_vect_loop)
3030 {
3031 /* iv_loop is nested in the loop to be vectorized. init_expr had already
3032 been created during vectorization of previous stmts. We obtain it
3033 from the STMT_VINFO_VEC_STMT of the defining stmt. */
3034 tree iv_def = PHI_ARG_DEF_FROM_EDGE (iv_phi,
3035 loop_preheader_edge (iv_loop));
3036 vec_init = vect_get_vec_def_for_operand (iv_def, iv_phi, NULL);
3037 }
3038 else
3039 {
3040 VEC(constructor_elt,gc) *v;
3041
3042 /* iv_loop is the loop to be vectorized. Create:
3043 vec_init = [X, X+S, X+2*S, X+3*S] (S = step_expr, X = init_expr) */
3044 new_var = vect_get_new_vect_var (scalar_type, vect_scalar_var, "var_");
3045 new_name = force_gimple_operand (init_expr, &stmts, false, new_var);
3046 if (stmts)
3047 {
3048 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
3049 gcc_assert (!new_bb);
3050 }
3051
3052 v = VEC_alloc (constructor_elt, gc, nunits);
3053 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, new_name);
3054 for (i = 1; i < nunits; i++)
3055 {
3056 /* Create: new_name_i = new_name + step_expr */
3057 enum tree_code code = POINTER_TYPE_P (scalar_type)
3058 ? POINTER_PLUS_EXPR : PLUS_EXPR;
3059 init_stmt = gimple_build_assign_with_ops (code, new_var,
3060 new_name, step_expr);
3061 new_name = make_ssa_name (new_var, init_stmt);
3062 gimple_assign_set_lhs (init_stmt, new_name);
3063
3064 new_bb = gsi_insert_on_edge_immediate (pe, init_stmt);
3065 gcc_assert (!new_bb);
3066
3067 if (vect_print_dump_info (REPORT_DETAILS))
3068 {
3069 fprintf (vect_dump, "created new init_stmt: ");
3070 print_gimple_stmt (vect_dump, init_stmt, 0, TDF_SLIM);
3071 }
3072 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, new_name);
3073 }
3074 /* Create a vector from [new_name_0, new_name_1, ..., new_name_nunits-1] */
3075 vec = build_constructor (vectype, v);
3076 vec_init = vect_init_vector (iv_phi, vec, vectype, NULL);
3077 }
3078
3079
3080 /* Create the vector that holds the step of the induction. */
3081 if (nested_in_vect_loop)
3082 /* iv_loop is nested in the loop to be vectorized. Generate:
3083 vec_step = [S, S, S, S] */
3084 new_name = step_expr;
3085 else
3086 {
3087 /* iv_loop is the loop to be vectorized. Generate:
3088 vec_step = [VF*S, VF*S, VF*S, VF*S] */
3089 expr = build_int_cst (TREE_TYPE (step_expr), vf);
3090 new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr),
3091 expr, step_expr);
3092 }
3093
3094 t = unshare_expr (new_name);
3095 gcc_assert (CONSTANT_CLASS_P (new_name));
3096 stepvectype = get_vectype_for_scalar_type (TREE_TYPE (new_name));
3097 gcc_assert (stepvectype);
3098 vec = build_vector_from_val (stepvectype, t);
3099 vec_step = vect_init_vector (iv_phi, vec, stepvectype, NULL);
3100
3101
3102 /* Create the following def-use cycle:
3103 loop prolog:
3104 vec_init = ...
3105 vec_step = ...
3106 loop:
3107 vec_iv = PHI <vec_init, vec_loop>
3108 ...
3109 STMT
3110 ...
3111 vec_loop = vec_iv + vec_step; */
3112
3113 /* Create the induction-phi that defines the induction-operand. */
3114 vec_dest = vect_get_new_vect_var (vectype, vect_simple_var, "vec_iv_");
3115 induction_phi = create_phi_node (vec_dest, iv_loop->header);
3116 set_vinfo_for_stmt (induction_phi,
3117 new_stmt_vec_info (induction_phi, loop_vinfo, NULL));
3118 induc_def = PHI_RESULT (induction_phi);
3119
3120 /* Create the iv update inside the loop */
3121 new_stmt = gimple_build_assign_with_ops (PLUS_EXPR, vec_dest,
3122 induc_def, vec_step);
3123 vec_def = make_ssa_name (vec_dest, new_stmt);
3124 gimple_assign_set_lhs (new_stmt, vec_def);
3125 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
3126 set_vinfo_for_stmt (new_stmt, new_stmt_vec_info (new_stmt, loop_vinfo,
3127 NULL));
3128
3129 /* Set the arguments of the phi node: */
3130 add_phi_arg (induction_phi, vec_init, pe, UNKNOWN_LOCATION);
3131 add_phi_arg (induction_phi, vec_def, loop_latch_edge (iv_loop),
3132 UNKNOWN_LOCATION);
3133
3134
3135 /* In case that vectorization factor (VF) is bigger than the number
3136 of elements that we can fit in a vectype (nunits), we have to generate
3137 more than one vector stmt - i.e - we need to "unroll" the
3138 vector stmt by a factor VF/nunits. For more details see documentation
3139 in vectorizable_operation. */
3140
3141 if (ncopies > 1)
3142 {
3143 stmt_vec_info prev_stmt_vinfo;
3144 /* FORNOW. This restriction should be relaxed. */
3145 gcc_assert (!nested_in_vect_loop);
3146
3147 /* Create the vector that holds the step of the induction. */
3148 expr = build_int_cst (TREE_TYPE (step_expr), nunits);
3149 new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr),
3150 expr, step_expr);
3151 t = unshare_expr (new_name);
3152 gcc_assert (CONSTANT_CLASS_P (new_name));
3153 vec = build_vector_from_val (stepvectype, t);
3154 vec_step = vect_init_vector (iv_phi, vec, stepvectype, NULL);
3155
3156 vec_def = induc_def;
3157 prev_stmt_vinfo = vinfo_for_stmt (induction_phi);
3158 for (i = 1; i < ncopies; i++)
3159 {
3160 /* vec_i = vec_prev + vec_step */
3161 new_stmt = gimple_build_assign_with_ops (PLUS_EXPR, vec_dest,
3162 vec_def, vec_step);
3163 vec_def = make_ssa_name (vec_dest, new_stmt);
3164 gimple_assign_set_lhs (new_stmt, vec_def);
3165
3166 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
3167 if (!useless_type_conversion_p (resvectype, vectype))
3168 {
3169 new_stmt = gimple_build_assign_with_ops
3170 (VIEW_CONVERT_EXPR,
3171 vect_get_new_vect_var (resvectype, vect_simple_var,
3172 "vec_iv_"),
3173 build1 (VIEW_CONVERT_EXPR, resvectype,
3174 gimple_assign_lhs (new_stmt)), NULL_TREE);
3175 gimple_assign_set_lhs (new_stmt,
3176 make_ssa_name
3177 (gimple_assign_lhs (new_stmt), new_stmt));
3178 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
3179 }
3180 set_vinfo_for_stmt (new_stmt,
3181 new_stmt_vec_info (new_stmt, loop_vinfo, NULL));
3182 STMT_VINFO_RELATED_STMT (prev_stmt_vinfo) = new_stmt;
3183 prev_stmt_vinfo = vinfo_for_stmt (new_stmt);
3184 }
3185 }
3186
3187 if (nested_in_vect_loop)
3188 {
3189 /* Find the loop-closed exit-phi of the induction, and record
3190 the final vector of induction results: */
3191 exit_phi = NULL;
3192 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, loop_arg)
3193 {
3194 if (!flow_bb_inside_loop_p (iv_loop, gimple_bb (USE_STMT (use_p))))
3195 {
3196 exit_phi = USE_STMT (use_p);
3197 break;
3198 }
3199 }
3200 if (exit_phi)
3201 {
3202 stmt_vec_info stmt_vinfo = vinfo_for_stmt (exit_phi);
3203 /* FORNOW. Currently not supporting the case that an inner-loop induction
3204 is not used in the outer-loop (i.e. only outside the outer-loop). */
3205 gcc_assert (STMT_VINFO_RELEVANT_P (stmt_vinfo)
3206 && !STMT_VINFO_LIVE_P (stmt_vinfo));
3207
3208 STMT_VINFO_VEC_STMT (stmt_vinfo) = new_stmt;
3209 if (vect_print_dump_info (REPORT_DETAILS))
3210 {
3211 fprintf (vect_dump, "vector of inductions after inner-loop:");
3212 print_gimple_stmt (vect_dump, new_stmt, 0, TDF_SLIM);
3213 }
3214 }
3215 }
3216
3217
3218 if (vect_print_dump_info (REPORT_DETAILS))
3219 {
3220 fprintf (vect_dump, "transform induction: created def-use cycle: ");
3221 print_gimple_stmt (vect_dump, induction_phi, 0, TDF_SLIM);
3222 fprintf (vect_dump, "\n");
3223 print_gimple_stmt (vect_dump, SSA_NAME_DEF_STMT (vec_def), 0, TDF_SLIM);
3224 }
3225
3226 STMT_VINFO_VEC_STMT (phi_info) = induction_phi;
3227 if (!useless_type_conversion_p (resvectype, vectype))
3228 {
3229 new_stmt = gimple_build_assign_with_ops
3230 (VIEW_CONVERT_EXPR,
3231 vect_get_new_vect_var (resvectype, vect_simple_var, "vec_iv_"),
3232 build1 (VIEW_CONVERT_EXPR, resvectype, induc_def), NULL_TREE);
3233 induc_def = make_ssa_name (gimple_assign_lhs (new_stmt), new_stmt);
3234 gimple_assign_set_lhs (new_stmt, induc_def);
3235 si = gsi_start_bb (bb);
3236 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
3237 set_vinfo_for_stmt (new_stmt,
3238 new_stmt_vec_info (new_stmt, loop_vinfo, NULL));
3239 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (new_stmt))
3240 = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (induction_phi));
3241 }
3242
3243 return induc_def;
3244 }
3245
3246
3247 /* Function get_initial_def_for_reduction
3248
3249 Input:
3250 STMT - a stmt that performs a reduction operation in the loop.
3251 INIT_VAL - the initial value of the reduction variable
3252
3253 Output:
3254 ADJUSTMENT_DEF - a tree that holds a value to be added to the final result
3255 of the reduction (used for adjusting the epilog - see below).
3256 Return a vector variable, initialized according to the operation that STMT
3257 performs. This vector will be used as the initial value of the
3258 vector of partial results.
3259
3260 Option1 (adjust in epilog): Initialize the vector as follows:
3261 add/bit or/xor: [0,0,...,0,0]
3262 mult/bit and: [1,1,...,1,1]
3263 min/max/cond_expr: [init_val,init_val,..,init_val,init_val]
3264 and when necessary (e.g. add/mult case) let the caller know
3265 that it needs to adjust the result by init_val.
3266
3267 Option2: Initialize the vector as follows:
3268 add/bit or/xor: [init_val,0,0,...,0]
3269 mult/bit and: [init_val,1,1,...,1]
3270 min/max/cond_expr: [init_val,init_val,...,init_val]
3271 and no adjustments are needed.
3272
3273 For example, for the following code:
3274
3275 s = init_val;
3276 for (i=0;i<n;i++)
3277 s = s + a[i];
3278
3279 STMT is 's = s + a[i]', and the reduction variable is 's'.
3280 For a vector of 4 units, we want to return either [0,0,0,init_val],
3281 or [0,0,0,0] and let the caller know that it needs to adjust
3282 the result at the end by 'init_val'.
3283
3284 FORNOW, we are using the 'adjust in epilog' scheme, because this way the
3285 initialization vector is simpler (same element in all entries), if
3286 ADJUSTMENT_DEF is not NULL, and Option2 otherwise.
3287
3288 A cost model should help decide between these two schemes. */
3289
3290 tree
3291 get_initial_def_for_reduction (gimple stmt, tree init_val,
3292 tree *adjustment_def)
3293 {
3294 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
3295 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
3296 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
3297 tree scalar_type = TREE_TYPE (init_val);
3298 tree vectype = get_vectype_for_scalar_type (scalar_type);
3299 int nunits;
3300 enum tree_code code = gimple_assign_rhs_code (stmt);
3301 tree def_for_init;
3302 tree init_def;
3303 tree *elts;
3304 int i;
3305 bool nested_in_vect_loop = false;
3306 tree init_value;
3307 REAL_VALUE_TYPE real_init_val = dconst0;
3308 int int_init_val = 0;
3309 gimple def_stmt = NULL;
3310
3311 gcc_assert (vectype);
3312 nunits = TYPE_VECTOR_SUBPARTS (vectype);
3313
3314 gcc_assert (POINTER_TYPE_P (scalar_type) || INTEGRAL_TYPE_P (scalar_type)
3315 || SCALAR_FLOAT_TYPE_P (scalar_type));
3316
3317 if (nested_in_vect_loop_p (loop, stmt))
3318 nested_in_vect_loop = true;
3319 else
3320 gcc_assert (loop == (gimple_bb (stmt))->loop_father);
3321
3322 /* In case of double reduction we only create a vector variable to be put
3323 in the reduction phi node. The actual statement creation is done in
3324 vect_create_epilog_for_reduction. */
3325 if (adjustment_def && nested_in_vect_loop
3326 && TREE_CODE (init_val) == SSA_NAME
3327 && (def_stmt = SSA_NAME_DEF_STMT (init_val))
3328 && gimple_code (def_stmt) == GIMPLE_PHI
3329 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
3330 && vinfo_for_stmt (def_stmt)
3331 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt))
3332 == vect_double_reduction_def)
3333 {
3334 *adjustment_def = NULL;
3335 return vect_create_destination_var (init_val, vectype);
3336 }
3337
3338 if (TREE_CONSTANT (init_val))
3339 {
3340 if (SCALAR_FLOAT_TYPE_P (scalar_type))
3341 init_value = build_real (scalar_type, TREE_REAL_CST (init_val));
3342 else
3343 init_value = build_int_cst (scalar_type, TREE_INT_CST_LOW (init_val));
3344 }
3345 else
3346 init_value = init_val;
3347
3348 switch (code)
3349 {
3350 case WIDEN_SUM_EXPR:
3351 case DOT_PROD_EXPR:
3352 case PLUS_EXPR:
3353 case MINUS_EXPR:
3354 case BIT_IOR_EXPR:
3355 case BIT_XOR_EXPR:
3356 case MULT_EXPR:
3357 case BIT_AND_EXPR:
3358 /* ADJUSMENT_DEF is NULL when called from
3359 vect_create_epilog_for_reduction to vectorize double reduction. */
3360 if (adjustment_def)
3361 {
3362 if (nested_in_vect_loop)
3363 *adjustment_def = vect_get_vec_def_for_operand (init_val, stmt,
3364 NULL);
3365 else
3366 *adjustment_def = init_val;
3367 }
3368
3369 if (code == MULT_EXPR)
3370 {
3371 real_init_val = dconst1;
3372 int_init_val = 1;
3373 }
3374
3375 if (code == BIT_AND_EXPR)
3376 int_init_val = -1;
3377
3378 if (SCALAR_FLOAT_TYPE_P (scalar_type))
3379 def_for_init = build_real (scalar_type, real_init_val);
3380 else
3381 def_for_init = build_int_cst (scalar_type, int_init_val);
3382
3383 /* Create a vector of '0' or '1' except the first element. */
3384 elts = XALLOCAVEC (tree, nunits);
3385 for (i = nunits - 2; i >= 0; --i)
3386 elts[i + 1] = def_for_init;
3387
3388 /* Option1: the first element is '0' or '1' as well. */
3389 if (adjustment_def)
3390 {
3391 elts[0] = def_for_init;
3392 init_def = build_vector (vectype, elts);
3393 break;
3394 }
3395
3396 /* Option2: the first element is INIT_VAL. */
3397 elts[0] = init_val;
3398 if (TREE_CONSTANT (init_val))
3399 init_def = build_vector (vectype, elts);
3400 else
3401 {
3402 VEC(constructor_elt,gc) *v;
3403 v = VEC_alloc (constructor_elt, gc, nunits);
3404 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, init_val);
3405 for (i = 1; i < nunits; ++i)
3406 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, elts[i]);
3407 init_def = build_constructor (vectype, v);
3408 }
3409
3410 break;
3411
3412 case MIN_EXPR:
3413 case MAX_EXPR:
3414 case COND_EXPR:
3415 if (adjustment_def)
3416 {
3417 *adjustment_def = NULL_TREE;
3418 init_def = vect_get_vec_def_for_operand (init_val, stmt, NULL);
3419 break;
3420 }
3421
3422 init_def = build_vector_from_val (vectype, init_value);
3423 break;
3424
3425 default:
3426 gcc_unreachable ();
3427 }
3428
3429 return init_def;
3430 }
3431
3432
3433 /* Function vect_create_epilog_for_reduction
3434
3435 Create code at the loop-epilog to finalize the result of a reduction
3436 computation.
3437
3438 VECT_DEFS is list of vector of partial results, i.e., the lhs's of vector
3439 reduction statements.
3440 STMT is the scalar reduction stmt that is being vectorized.
3441 NCOPIES is > 1 in case the vectorization factor (VF) is bigger than the
3442 number of elements that we can fit in a vectype (nunits). In this case
3443 we have to generate more than one vector stmt - i.e - we need to "unroll"
3444 the vector stmt by a factor VF/nunits. For more details see documentation
3445 in vectorizable_operation.
3446 REDUC_CODE is the tree-code for the epilog reduction.
3447 REDUCTION_PHIS is a list of the phi-nodes that carry the reduction
3448 computation.
3449 REDUC_INDEX is the index of the operand in the right hand side of the
3450 statement that is defined by REDUCTION_PHI.
3451 DOUBLE_REDUC is TRUE if double reduction phi nodes should be handled.
3452 SLP_NODE is an SLP node containing a group of reduction statements. The
3453 first one in this group is STMT.
3454
3455 This function:
3456 1. Creates the reduction def-use cycles: sets the arguments for
3457 REDUCTION_PHIS:
3458 The loop-entry argument is the vectorized initial-value of the reduction.
3459 The loop-latch argument is taken from VECT_DEFS - the vector of partial
3460 sums.
3461 2. "Reduces" each vector of partial results VECT_DEFS into a single result,
3462 by applying the operation specified by REDUC_CODE if available, or by
3463 other means (whole-vector shifts or a scalar loop).
3464 The function also creates a new phi node at the loop exit to preserve
3465 loop-closed form, as illustrated below.
3466
3467 The flow at the entry to this function:
3468
3469 loop:
3470 vec_def = phi <null, null> # REDUCTION_PHI
3471 VECT_DEF = vector_stmt # vectorized form of STMT
3472 s_loop = scalar_stmt # (scalar) STMT
3473 loop_exit:
3474 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
3475 use <s_out0>
3476 use <s_out0>
3477
3478 The above is transformed by this function into:
3479
3480 loop:
3481 vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI
3482 VECT_DEF = vector_stmt # vectorized form of STMT
3483 s_loop = scalar_stmt # (scalar) STMT
3484 loop_exit:
3485 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
3486 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
3487 v_out2 = reduce <v_out1>
3488 s_out3 = extract_field <v_out2, 0>
3489 s_out4 = adjust_result <s_out3>
3490 use <s_out4>
3491 use <s_out4>
3492 */
3493
3494 static void
3495 vect_create_epilog_for_reduction (VEC (tree, heap) *vect_defs, gimple stmt,
3496 int ncopies, enum tree_code reduc_code,
3497 VEC (gimple, heap) *reduction_phis,
3498 int reduc_index, bool double_reduc,
3499 slp_tree slp_node)
3500 {
3501 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3502 stmt_vec_info prev_phi_info;
3503 tree vectype;
3504 enum machine_mode mode;
3505 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3506 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo), *outer_loop = NULL;
3507 basic_block exit_bb;
3508 tree scalar_dest;
3509 tree scalar_type;
3510 gimple new_phi = NULL, phi;
3511 gimple_stmt_iterator exit_gsi;
3512 tree vec_dest;
3513 tree new_temp = NULL_TREE, new_dest, new_name, new_scalar_dest;
3514 gimple epilog_stmt = NULL;
3515 enum tree_code code = gimple_assign_rhs_code (stmt);
3516 gimple exit_phi;
3517 tree bitsize, bitpos;
3518 tree adjustment_def = NULL;
3519 tree vec_initial_def = NULL;
3520 tree reduction_op, expr, def;
3521 tree orig_name, scalar_result;
3522 imm_use_iterator imm_iter, phi_imm_iter;
3523 use_operand_p use_p, phi_use_p;
3524 bool extract_scalar_result = false;
3525 gimple use_stmt, orig_stmt, reduction_phi = NULL;
3526 bool nested_in_vect_loop = false;
3527 VEC (gimple, heap) *new_phis = NULL;
3528 VEC (gimple, heap) *inner_phis = NULL;
3529 enum vect_def_type dt = vect_unknown_def_type;
3530 int j, i;
3531 VEC (tree, heap) *scalar_results = NULL;
3532 unsigned int group_size = 1, k, ratio;
3533 VEC (tree, heap) *vec_initial_defs = NULL;
3534 VEC (gimple, heap) *phis;
3535 bool slp_reduc = false;
3536 tree new_phi_result;
3537 gimple inner_phi = NULL;
3538
3539 if (slp_node)
3540 group_size = VEC_length (gimple, SLP_TREE_SCALAR_STMTS (slp_node));
3541
3542 if (nested_in_vect_loop_p (loop, stmt))
3543 {
3544 outer_loop = loop;
3545 loop = loop->inner;
3546 nested_in_vect_loop = true;
3547 gcc_assert (!slp_node);
3548 }
3549
3550 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)))
3551 {
3552 case GIMPLE_SINGLE_RHS:
3553 gcc_assert (TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt))
3554 == ternary_op);
3555 reduction_op = TREE_OPERAND (gimple_assign_rhs1 (stmt), reduc_index);
3556 break;
3557 case GIMPLE_UNARY_RHS:
3558 reduction_op = gimple_assign_rhs1 (stmt);
3559 break;
3560 case GIMPLE_BINARY_RHS:
3561 reduction_op = reduc_index ?
3562 gimple_assign_rhs2 (stmt) : gimple_assign_rhs1 (stmt);
3563 break;
3564 case GIMPLE_TERNARY_RHS:
3565 reduction_op = gimple_op (stmt, reduc_index + 1);
3566 break;
3567 default:
3568 gcc_unreachable ();
3569 }
3570
3571 vectype = get_vectype_for_scalar_type (TREE_TYPE (reduction_op));
3572 gcc_assert (vectype);
3573 mode = TYPE_MODE (vectype);
3574
3575 /* 1. Create the reduction def-use cycle:
3576 Set the arguments of REDUCTION_PHIS, i.e., transform
3577
3578 loop:
3579 vec_def = phi <null, null> # REDUCTION_PHI
3580 VECT_DEF = vector_stmt # vectorized form of STMT
3581 ...
3582
3583 into:
3584
3585 loop:
3586 vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI
3587 VECT_DEF = vector_stmt # vectorized form of STMT
3588 ...
3589
3590 (in case of SLP, do it for all the phis). */
3591
3592 /* Get the loop-entry arguments. */
3593 if (slp_node)
3594 vect_get_vec_defs (reduction_op, NULL_TREE, stmt, &vec_initial_defs,
3595 NULL, slp_node, reduc_index);
3596 else
3597 {
3598 vec_initial_defs = VEC_alloc (tree, heap, 1);
3599 /* For the case of reduction, vect_get_vec_def_for_operand returns
3600 the scalar def before the loop, that defines the initial value
3601 of the reduction variable. */
3602 vec_initial_def = vect_get_vec_def_for_operand (reduction_op, stmt,
3603 &adjustment_def);
3604 VEC_quick_push (tree, vec_initial_defs, vec_initial_def);
3605 }
3606
3607 /* Set phi nodes arguments. */
3608 FOR_EACH_VEC_ELT (gimple, reduction_phis, i, phi)
3609 {
3610 tree vec_init_def = VEC_index (tree, vec_initial_defs, i);
3611 tree def = VEC_index (tree, vect_defs, i);
3612 for (j = 0; j < ncopies; j++)
3613 {
3614 /* Set the loop-entry arg of the reduction-phi. */
3615 add_phi_arg (phi, vec_init_def, loop_preheader_edge (loop),
3616 UNKNOWN_LOCATION);
3617
3618 /* Set the loop-latch arg for the reduction-phi. */
3619 if (j > 0)
3620 def = vect_get_vec_def_for_stmt_copy (vect_unknown_def_type, def);
3621
3622 add_phi_arg (phi, def, loop_latch_edge (loop), UNKNOWN_LOCATION);
3623
3624 if (vect_print_dump_info (REPORT_DETAILS))
3625 {
3626 fprintf (vect_dump, "transform reduction: created def-use"
3627 " cycle: ");
3628 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
3629 fprintf (vect_dump, "\n");
3630 print_gimple_stmt (vect_dump, SSA_NAME_DEF_STMT (def), 0,
3631 TDF_SLIM);
3632 }
3633
3634 phi = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi));
3635 }
3636 }
3637
3638 VEC_free (tree, heap, vec_initial_defs);
3639
3640 /* 2. Create epilog code.
3641 The reduction epilog code operates across the elements of the vector
3642 of partial results computed by the vectorized loop.
3643 The reduction epilog code consists of:
3644
3645 step 1: compute the scalar result in a vector (v_out2)
3646 step 2: extract the scalar result (s_out3) from the vector (v_out2)
3647 step 3: adjust the scalar result (s_out3) if needed.
3648
3649 Step 1 can be accomplished using one the following three schemes:
3650 (scheme 1) using reduc_code, if available.
3651 (scheme 2) using whole-vector shifts, if available.
3652 (scheme 3) using a scalar loop. In this case steps 1+2 above are
3653 combined.
3654
3655 The overall epilog code looks like this:
3656
3657 s_out0 = phi <s_loop> # original EXIT_PHI
3658 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
3659 v_out2 = reduce <v_out1> # step 1
3660 s_out3 = extract_field <v_out2, 0> # step 2
3661 s_out4 = adjust_result <s_out3> # step 3
3662
3663 (step 3 is optional, and steps 1 and 2 may be combined).
3664 Lastly, the uses of s_out0 are replaced by s_out4. */
3665
3666
3667 /* 2.1 Create new loop-exit-phis to preserve loop-closed form:
3668 v_out1 = phi <VECT_DEF>
3669 Store them in NEW_PHIS. */
3670
3671 exit_bb = single_exit (loop)->dest;
3672 prev_phi_info = NULL;
3673 new_phis = VEC_alloc (gimple, heap, VEC_length (tree, vect_defs));
3674 FOR_EACH_VEC_ELT (tree, vect_defs, i, def)
3675 {
3676 for (j = 0; j < ncopies; j++)
3677 {
3678 phi = create_phi_node (SSA_NAME_VAR (def), exit_bb);
3679 set_vinfo_for_stmt (phi, new_stmt_vec_info (phi, loop_vinfo, NULL));
3680 if (j == 0)
3681 VEC_quick_push (gimple, new_phis, phi);
3682 else
3683 {
3684 def = vect_get_vec_def_for_stmt_copy (dt, def);
3685 STMT_VINFO_RELATED_STMT (prev_phi_info) = phi;
3686 }
3687
3688 SET_PHI_ARG_DEF (phi, single_exit (loop)->dest_idx, def);
3689 prev_phi_info = vinfo_for_stmt (phi);
3690 }
3691 }
3692
3693 /* The epilogue is created for the outer-loop, i.e., for the loop being
3694 vectorized. Create exit phis for the outer loop. */
3695 if (double_reduc)
3696 {
3697 loop = outer_loop;
3698 exit_bb = single_exit (loop)->dest;
3699 inner_phis = VEC_alloc (gimple, heap, VEC_length (tree, vect_defs));
3700 FOR_EACH_VEC_ELT (gimple, new_phis, i, phi)
3701 {
3702 gimple outer_phi = create_phi_node (SSA_NAME_VAR (PHI_RESULT (phi)),
3703 exit_bb);
3704 SET_PHI_ARG_DEF (outer_phi, single_exit (loop)->dest_idx,
3705 PHI_RESULT (phi));
3706 set_vinfo_for_stmt (outer_phi, new_stmt_vec_info (outer_phi,
3707 loop_vinfo, NULL));
3708 VEC_quick_push (gimple, inner_phis, phi);
3709 VEC_replace (gimple, new_phis, i, outer_phi);
3710 prev_phi_info = vinfo_for_stmt (outer_phi);
3711 while (STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi)))
3712 {
3713 phi = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi));
3714 outer_phi = create_phi_node (SSA_NAME_VAR (PHI_RESULT (phi)),
3715 exit_bb);
3716 SET_PHI_ARG_DEF (outer_phi, single_exit (loop)->dest_idx,
3717 PHI_RESULT (phi));
3718 set_vinfo_for_stmt (outer_phi, new_stmt_vec_info (outer_phi,
3719 loop_vinfo, NULL));
3720 STMT_VINFO_RELATED_STMT (prev_phi_info) = outer_phi;
3721 prev_phi_info = vinfo_for_stmt (outer_phi);
3722 }
3723 }
3724 }
3725
3726 exit_gsi = gsi_after_labels (exit_bb);
3727
3728 /* 2.2 Get the relevant tree-code to use in the epilog for schemes 2,3
3729 (i.e. when reduc_code is not available) and in the final adjustment
3730 code (if needed). Also get the original scalar reduction variable as
3731 defined in the loop. In case STMT is a "pattern-stmt" (i.e. - it
3732 represents a reduction pattern), the tree-code and scalar-def are
3733 taken from the original stmt that the pattern-stmt (STMT) replaces.
3734 Otherwise (it is a regular reduction) - the tree-code and scalar-def
3735 are taken from STMT. */
3736
3737 orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
3738 if (!orig_stmt)
3739 {
3740 /* Regular reduction */
3741 orig_stmt = stmt;
3742 }
3743 else
3744 {
3745 /* Reduction pattern */
3746 stmt_vec_info stmt_vinfo = vinfo_for_stmt (orig_stmt);
3747 gcc_assert (STMT_VINFO_IN_PATTERN_P (stmt_vinfo));
3748 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_vinfo) == stmt);
3749 }
3750
3751 code = gimple_assign_rhs_code (orig_stmt);
3752 /* For MINUS_EXPR the initial vector is [init_val,0,...,0], therefore,
3753 partial results are added and not subtracted. */
3754 if (code == MINUS_EXPR)
3755 code = PLUS_EXPR;
3756
3757 scalar_dest = gimple_assign_lhs (orig_stmt);
3758 scalar_type = TREE_TYPE (scalar_dest);
3759 scalar_results = VEC_alloc (tree, heap, group_size);
3760 new_scalar_dest = vect_create_destination_var (scalar_dest, NULL);
3761 bitsize = TYPE_SIZE (scalar_type);
3762
3763 /* In case this is a reduction in an inner-loop while vectorizing an outer
3764 loop - we don't need to extract a single scalar result at the end of the
3765 inner-loop (unless it is double reduction, i.e., the use of reduction is
3766 outside the outer-loop). The final vector of partial results will be used
3767 in the vectorized outer-loop, or reduced to a scalar result at the end of
3768 the outer-loop. */
3769 if (nested_in_vect_loop && !double_reduc)
3770 goto vect_finalize_reduction;
3771
3772 /* SLP reduction without reduction chain, e.g.,
3773 # a1 = phi <a2, a0>
3774 # b1 = phi <b2, b0>
3775 a2 = operation (a1)
3776 b2 = operation (b1) */
3777 slp_reduc = (slp_node && !GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)));
3778
3779 /* In case of reduction chain, e.g.,
3780 # a1 = phi <a3, a0>
3781 a2 = operation (a1)
3782 a3 = operation (a2),
3783
3784 we may end up with more than one vector result. Here we reduce them to
3785 one vector. */
3786 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
3787 {
3788 tree first_vect = PHI_RESULT (VEC_index (gimple, new_phis, 0));
3789 tree tmp;
3790 gimple new_vec_stmt = NULL;
3791
3792 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3793 for (k = 1; k < VEC_length (gimple, new_phis); k++)
3794 {
3795 gimple next_phi = VEC_index (gimple, new_phis, k);
3796 tree second_vect = PHI_RESULT (next_phi);
3797
3798 tmp = build2 (code, vectype, first_vect, second_vect);
3799 new_vec_stmt = gimple_build_assign (vec_dest, tmp);
3800 first_vect = make_ssa_name (vec_dest, new_vec_stmt);
3801 gimple_assign_set_lhs (new_vec_stmt, first_vect);
3802 gsi_insert_before (&exit_gsi, new_vec_stmt, GSI_SAME_STMT);
3803 }
3804
3805 new_phi_result = first_vect;
3806 if (new_vec_stmt)
3807 {
3808 VEC_truncate (gimple, new_phis, 0);
3809 VEC_safe_push (gimple, heap, new_phis, new_vec_stmt);
3810 }
3811 }
3812 else
3813 new_phi_result = PHI_RESULT (VEC_index (gimple, new_phis, 0));
3814
3815 /* 2.3 Create the reduction code, using one of the three schemes described
3816 above. In SLP we simply need to extract all the elements from the
3817 vector (without reducing them), so we use scalar shifts. */
3818 if (reduc_code != ERROR_MARK && !slp_reduc)
3819 {
3820 tree tmp;
3821
3822 /*** Case 1: Create:
3823 v_out2 = reduc_expr <v_out1> */
3824
3825 if (vect_print_dump_info (REPORT_DETAILS))
3826 fprintf (vect_dump, "Reduce using direct vector reduction.");
3827
3828 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3829 tmp = build1 (reduc_code, vectype, new_phi_result);
3830 epilog_stmt = gimple_build_assign (vec_dest, tmp);
3831 new_temp = make_ssa_name (vec_dest, epilog_stmt);
3832 gimple_assign_set_lhs (epilog_stmt, new_temp);
3833 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3834
3835 extract_scalar_result = true;
3836 }
3837 else
3838 {
3839 enum tree_code shift_code = ERROR_MARK;
3840 bool have_whole_vector_shift = true;
3841 int bit_offset;
3842 int element_bitsize = tree_low_cst (bitsize, 1);
3843 int vec_size_in_bits = tree_low_cst (TYPE_SIZE (vectype), 1);
3844 tree vec_temp;
3845
3846 if (optab_handler (vec_shr_optab, mode) != CODE_FOR_nothing)
3847 shift_code = VEC_RSHIFT_EXPR;
3848 else
3849 have_whole_vector_shift = false;
3850
3851 /* Regardless of whether we have a whole vector shift, if we're
3852 emulating the operation via tree-vect-generic, we don't want
3853 to use it. Only the first round of the reduction is likely
3854 to still be profitable via emulation. */
3855 /* ??? It might be better to emit a reduction tree code here, so that
3856 tree-vect-generic can expand the first round via bit tricks. */
3857 if (!VECTOR_MODE_P (mode))
3858 have_whole_vector_shift = false;
3859 else
3860 {
3861 optab optab = optab_for_tree_code (code, vectype, optab_default);
3862 if (optab_handler (optab, mode) == CODE_FOR_nothing)
3863 have_whole_vector_shift = false;
3864 }
3865
3866 if (have_whole_vector_shift && !slp_reduc)
3867 {
3868 /*** Case 2: Create:
3869 for (offset = VS/2; offset >= element_size; offset/=2)
3870 {
3871 Create: va' = vec_shift <va, offset>
3872 Create: va = vop <va, va'>
3873 } */
3874
3875 if (vect_print_dump_info (REPORT_DETAILS))
3876 fprintf (vect_dump, "Reduce using vector shifts");
3877
3878 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3879 new_temp = new_phi_result;
3880 for (bit_offset = vec_size_in_bits/2;
3881 bit_offset >= element_bitsize;
3882 bit_offset /= 2)
3883 {
3884 tree bitpos = size_int (bit_offset);
3885
3886 epilog_stmt = gimple_build_assign_with_ops (shift_code,
3887 vec_dest, new_temp, bitpos);
3888 new_name = make_ssa_name (vec_dest, epilog_stmt);
3889 gimple_assign_set_lhs (epilog_stmt, new_name);
3890 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3891
3892 epilog_stmt = gimple_build_assign_with_ops (code, vec_dest,
3893 new_name, new_temp);
3894 new_temp = make_ssa_name (vec_dest, epilog_stmt);
3895 gimple_assign_set_lhs (epilog_stmt, new_temp);
3896 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3897 }
3898
3899 extract_scalar_result = true;
3900 }
3901 else
3902 {
3903 tree rhs;
3904
3905 /*** Case 3: Create:
3906 s = extract_field <v_out2, 0>
3907 for (offset = element_size;
3908 offset < vector_size;
3909 offset += element_size;)
3910 {
3911 Create: s' = extract_field <v_out2, offset>
3912 Create: s = op <s, s'> // For non SLP cases
3913 } */
3914
3915 if (vect_print_dump_info (REPORT_DETAILS))
3916 fprintf (vect_dump, "Reduce using scalar code. ");
3917
3918 vec_size_in_bits = tree_low_cst (TYPE_SIZE (vectype), 1);
3919 FOR_EACH_VEC_ELT (gimple, new_phis, i, new_phi)
3920 {
3921 if (gimple_code (new_phi) == GIMPLE_PHI)
3922 vec_temp = PHI_RESULT (new_phi);
3923 else
3924 vec_temp = gimple_assign_lhs (new_phi);
3925 rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp, bitsize,
3926 bitsize_zero_node);
3927 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
3928 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
3929 gimple_assign_set_lhs (epilog_stmt, new_temp);
3930 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3931
3932 /* In SLP we don't need to apply reduction operation, so we just
3933 collect s' values in SCALAR_RESULTS. */
3934 if (slp_reduc)
3935 VEC_safe_push (tree, heap, scalar_results, new_temp);
3936
3937 for (bit_offset = element_bitsize;
3938 bit_offset < vec_size_in_bits;
3939 bit_offset += element_bitsize)
3940 {
3941 tree bitpos = bitsize_int (bit_offset);
3942 tree rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp,
3943 bitsize, bitpos);
3944
3945 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
3946 new_name = make_ssa_name (new_scalar_dest, epilog_stmt);
3947 gimple_assign_set_lhs (epilog_stmt, new_name);
3948 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3949
3950 if (slp_reduc)
3951 {
3952 /* In SLP we don't need to apply reduction operation, so
3953 we just collect s' values in SCALAR_RESULTS. */
3954 new_temp = new_name;
3955 VEC_safe_push (tree, heap, scalar_results, new_name);
3956 }
3957 else
3958 {
3959 epilog_stmt = gimple_build_assign_with_ops (code,
3960 new_scalar_dest, new_name, new_temp);
3961 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
3962 gimple_assign_set_lhs (epilog_stmt, new_temp);
3963 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3964 }
3965 }
3966 }
3967
3968 /* The only case where we need to reduce scalar results in SLP, is
3969 unrolling. If the size of SCALAR_RESULTS is greater than
3970 GROUP_SIZE, we reduce them combining elements modulo
3971 GROUP_SIZE. */
3972 if (slp_reduc)
3973 {
3974 tree res, first_res, new_res;
3975 gimple new_stmt;
3976
3977 /* Reduce multiple scalar results in case of SLP unrolling. */
3978 for (j = group_size; VEC_iterate (tree, scalar_results, j, res);
3979 j++)
3980 {
3981 first_res = VEC_index (tree, scalar_results, j % group_size);
3982 new_stmt = gimple_build_assign_with_ops (code,
3983 new_scalar_dest, first_res, res);
3984 new_res = make_ssa_name (new_scalar_dest, new_stmt);
3985 gimple_assign_set_lhs (new_stmt, new_res);
3986 gsi_insert_before (&exit_gsi, new_stmt, GSI_SAME_STMT);
3987 VEC_replace (tree, scalar_results, j % group_size, new_res);
3988 }
3989 }
3990 else
3991 /* Not SLP - we have one scalar to keep in SCALAR_RESULTS. */
3992 VEC_safe_push (tree, heap, scalar_results, new_temp);
3993
3994 extract_scalar_result = false;
3995 }
3996 }
3997
3998 /* 2.4 Extract the final scalar result. Create:
3999 s_out3 = extract_field <v_out2, bitpos> */
4000
4001 if (extract_scalar_result)
4002 {
4003 tree rhs;
4004
4005 if (vect_print_dump_info (REPORT_DETAILS))
4006 fprintf (vect_dump, "extract scalar result");
4007
4008 if (BYTES_BIG_ENDIAN)
4009 bitpos = size_binop (MULT_EXPR,
4010 bitsize_int (TYPE_VECTOR_SUBPARTS (vectype) - 1),
4011 TYPE_SIZE (scalar_type));
4012 else
4013 bitpos = bitsize_zero_node;
4014
4015 rhs = build3 (BIT_FIELD_REF, scalar_type, new_temp, bitsize, bitpos);
4016 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
4017 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
4018 gimple_assign_set_lhs (epilog_stmt, new_temp);
4019 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
4020 VEC_safe_push (tree, heap, scalar_results, new_temp);
4021 }
4022
4023 vect_finalize_reduction:
4024
4025 if (double_reduc)
4026 loop = loop->inner;
4027
4028 /* 2.5 Adjust the final result by the initial value of the reduction
4029 variable. (When such adjustment is not needed, then
4030 'adjustment_def' is zero). For example, if code is PLUS we create:
4031 new_temp = loop_exit_def + adjustment_def */
4032
4033 if (adjustment_def)
4034 {
4035 gcc_assert (!slp_reduc);
4036 if (nested_in_vect_loop)
4037 {
4038 new_phi = VEC_index (gimple, new_phis, 0);
4039 gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) == VECTOR_TYPE);
4040 expr = build2 (code, vectype, PHI_RESULT (new_phi), adjustment_def);
4041 new_dest = vect_create_destination_var (scalar_dest, vectype);
4042 }
4043 else
4044 {
4045 new_temp = VEC_index (tree, scalar_results, 0);
4046 gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) != VECTOR_TYPE);
4047 expr = build2 (code, scalar_type, new_temp, adjustment_def);
4048 new_dest = vect_create_destination_var (scalar_dest, scalar_type);
4049 }
4050
4051 epilog_stmt = gimple_build_assign (new_dest, expr);
4052 new_temp = make_ssa_name (new_dest, epilog_stmt);
4053 gimple_assign_set_lhs (epilog_stmt, new_temp);
4054 SSA_NAME_DEF_STMT (new_temp) = epilog_stmt;
4055 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
4056 if (nested_in_vect_loop)
4057 {
4058 set_vinfo_for_stmt (epilog_stmt,
4059 new_stmt_vec_info (epilog_stmt, loop_vinfo,
4060 NULL));
4061 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (epilog_stmt)) =
4062 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (new_phi));
4063
4064 if (!double_reduc)
4065 VEC_quick_push (tree, scalar_results, new_temp);
4066 else
4067 VEC_replace (tree, scalar_results, 0, new_temp);
4068 }
4069 else
4070 VEC_replace (tree, scalar_results, 0, new_temp);
4071
4072 VEC_replace (gimple, new_phis, 0, epilog_stmt);
4073 }
4074
4075 /* 2.6 Handle the loop-exit phis. Replace the uses of scalar loop-exit
4076 phis with new adjusted scalar results, i.e., replace use <s_out0>
4077 with use <s_out4>.
4078
4079 Transform:
4080 loop_exit:
4081 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
4082 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
4083 v_out2 = reduce <v_out1>
4084 s_out3 = extract_field <v_out2, 0>
4085 s_out4 = adjust_result <s_out3>
4086 use <s_out0>
4087 use <s_out0>
4088
4089 into:
4090
4091 loop_exit:
4092 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
4093 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
4094 v_out2 = reduce <v_out1>
4095 s_out3 = extract_field <v_out2, 0>
4096 s_out4 = adjust_result <s_out3>
4097 use <s_out4>
4098 use <s_out4> */
4099
4100
4101 /* In SLP reduction chain we reduce vector results into one vector if
4102 necessary, hence we set here GROUP_SIZE to 1. SCALAR_DEST is the LHS of
4103 the last stmt in the reduction chain, since we are looking for the loop
4104 exit phi node. */
4105 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
4106 {
4107 scalar_dest = gimple_assign_lhs (VEC_index (gimple,
4108 SLP_TREE_SCALAR_STMTS (slp_node),
4109 group_size - 1));
4110 group_size = 1;
4111 }
4112
4113 /* In SLP we may have several statements in NEW_PHIS and REDUCTION_PHIS (in
4114 case that GROUP_SIZE is greater than vectorization factor). Therefore, we
4115 need to match SCALAR_RESULTS with corresponding statements. The first
4116 (GROUP_SIZE / number of new vector stmts) scalar results correspond to
4117 the first vector stmt, etc.
4118 (RATIO is equal to (GROUP_SIZE / number of new vector stmts)). */
4119 if (group_size > VEC_length (gimple, new_phis))
4120 {
4121 ratio = group_size / VEC_length (gimple, new_phis);
4122 gcc_assert (!(group_size % VEC_length (gimple, new_phis)));
4123 }
4124 else
4125 ratio = 1;
4126
4127 for (k = 0; k < group_size; k++)
4128 {
4129 if (k % ratio == 0)
4130 {
4131 epilog_stmt = VEC_index (gimple, new_phis, k / ratio);
4132 reduction_phi = VEC_index (gimple, reduction_phis, k / ratio);
4133 if (double_reduc)
4134 inner_phi = VEC_index (gimple, inner_phis, k / ratio);
4135 }
4136
4137 if (slp_reduc)
4138 {
4139 gimple current_stmt = VEC_index (gimple,
4140 SLP_TREE_SCALAR_STMTS (slp_node), k);
4141
4142 orig_stmt = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (current_stmt));
4143 /* SLP statements can't participate in patterns. */
4144 gcc_assert (!orig_stmt);
4145 scalar_dest = gimple_assign_lhs (current_stmt);
4146 }
4147
4148 phis = VEC_alloc (gimple, heap, 3);
4149 /* Find the loop-closed-use at the loop exit of the original scalar
4150 result. (The reduction result is expected to have two immediate uses -
4151 one at the latch block, and one at the loop exit). */
4152 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
4153 if (!flow_bb_inside_loop_p (loop, gimple_bb (USE_STMT (use_p))))
4154 VEC_safe_push (gimple, heap, phis, USE_STMT (use_p));
4155
4156 /* We expect to have found an exit_phi because of loop-closed-ssa
4157 form. */
4158 gcc_assert (!VEC_empty (gimple, phis));
4159
4160 FOR_EACH_VEC_ELT (gimple, phis, i, exit_phi)
4161 {
4162 if (outer_loop)
4163 {
4164 stmt_vec_info exit_phi_vinfo = vinfo_for_stmt (exit_phi);
4165 gimple vect_phi;
4166
4167 /* FORNOW. Currently not supporting the case that an inner-loop
4168 reduction is not used in the outer-loop (but only outside the
4169 outer-loop), unless it is double reduction. */
4170 gcc_assert ((STMT_VINFO_RELEVANT_P (exit_phi_vinfo)
4171 && !STMT_VINFO_LIVE_P (exit_phi_vinfo))
4172 || double_reduc);
4173
4174 STMT_VINFO_VEC_STMT (exit_phi_vinfo) = epilog_stmt;
4175 if (!double_reduc
4176 || STMT_VINFO_DEF_TYPE (exit_phi_vinfo)
4177 != vect_double_reduction_def)
4178 continue;
4179
4180 /* Handle double reduction:
4181
4182 stmt1: s1 = phi <s0, s2> - double reduction phi (outer loop)
4183 stmt2: s3 = phi <s1, s4> - (regular) reduc phi (inner loop)
4184 stmt3: s4 = use (s3) - (regular) reduc stmt (inner loop)
4185 stmt4: s2 = phi <s4> - double reduction stmt (outer loop)
4186
4187 At that point the regular reduction (stmt2 and stmt3) is
4188 already vectorized, as well as the exit phi node, stmt4.
4189 Here we vectorize the phi node of double reduction, stmt1, and
4190 update all relevant statements. */
4191
4192 /* Go through all the uses of s2 to find double reduction phi
4193 node, i.e., stmt1 above. */
4194 orig_name = PHI_RESULT (exit_phi);
4195 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name)
4196 {
4197 stmt_vec_info use_stmt_vinfo;
4198 stmt_vec_info new_phi_vinfo;
4199 tree vect_phi_init, preheader_arg, vect_phi_res, init_def;
4200 basic_block bb = gimple_bb (use_stmt);
4201 gimple use;
4202
4203 /* Check that USE_STMT is really double reduction phi
4204 node. */
4205 if (gimple_code (use_stmt) != GIMPLE_PHI
4206 || gimple_phi_num_args (use_stmt) != 2
4207 || bb->loop_father != outer_loop)
4208 continue;
4209 use_stmt_vinfo = vinfo_for_stmt (use_stmt);
4210 if (!use_stmt_vinfo
4211 || STMT_VINFO_DEF_TYPE (use_stmt_vinfo)
4212 != vect_double_reduction_def)
4213 continue;
4214
4215 /* Create vector phi node for double reduction:
4216 vs1 = phi <vs0, vs2>
4217 vs1 was created previously in this function by a call to
4218 vect_get_vec_def_for_operand and is stored in
4219 vec_initial_def;
4220 vs2 is defined by INNER_PHI, the vectorized EXIT_PHI;
4221 vs0 is created here. */
4222
4223 /* Create vector phi node. */
4224 vect_phi = create_phi_node (vec_initial_def, bb);
4225 new_phi_vinfo = new_stmt_vec_info (vect_phi,
4226 loop_vec_info_for_loop (outer_loop), NULL);
4227 set_vinfo_for_stmt (vect_phi, new_phi_vinfo);
4228
4229 /* Create vs0 - initial def of the double reduction phi. */
4230 preheader_arg = PHI_ARG_DEF_FROM_EDGE (use_stmt,
4231 loop_preheader_edge (outer_loop));
4232 init_def = get_initial_def_for_reduction (stmt,
4233 preheader_arg, NULL);
4234 vect_phi_init = vect_init_vector (use_stmt, init_def,
4235 vectype, NULL);
4236
4237 /* Update phi node arguments with vs0 and vs2. */
4238 add_phi_arg (vect_phi, vect_phi_init,
4239 loop_preheader_edge (outer_loop),
4240 UNKNOWN_LOCATION);
4241 add_phi_arg (vect_phi, PHI_RESULT (inner_phi),
4242 loop_latch_edge (outer_loop), UNKNOWN_LOCATION);
4243 if (vect_print_dump_info (REPORT_DETAILS))
4244 {
4245 fprintf (vect_dump, "created double reduction phi "
4246 "node: ");
4247 print_gimple_stmt (vect_dump, vect_phi, 0, TDF_SLIM);
4248 }
4249
4250 vect_phi_res = PHI_RESULT (vect_phi);
4251
4252 /* Replace the use, i.e., set the correct vs1 in the regular
4253 reduction phi node. FORNOW, NCOPIES is always 1, so the
4254 loop is redundant. */
4255 use = reduction_phi;
4256 for (j = 0; j < ncopies; j++)
4257 {
4258 edge pr_edge = loop_preheader_edge (loop);
4259 SET_PHI_ARG_DEF (use, pr_edge->dest_idx, vect_phi_res);
4260 use = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (use));
4261 }
4262 }
4263 }
4264 }
4265
4266 VEC_free (gimple, heap, phis);
4267 if (nested_in_vect_loop)
4268 {
4269 if (double_reduc)
4270 loop = outer_loop;
4271 else
4272 continue;
4273 }
4274
4275 phis = VEC_alloc (gimple, heap, 3);
4276 /* Find the loop-closed-use at the loop exit of the original scalar
4277 result. (The reduction result is expected to have two immediate uses,
4278 one at the latch block, and one at the loop exit). For double
4279 reductions we are looking for exit phis of the outer loop. */
4280 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
4281 {
4282 if (!flow_bb_inside_loop_p (loop, gimple_bb (USE_STMT (use_p))))
4283 VEC_safe_push (gimple, heap, phis, USE_STMT (use_p));
4284 else
4285 {
4286 if (double_reduc && gimple_code (USE_STMT (use_p)) == GIMPLE_PHI)
4287 {
4288 tree phi_res = PHI_RESULT (USE_STMT (use_p));
4289
4290 FOR_EACH_IMM_USE_FAST (phi_use_p, phi_imm_iter, phi_res)
4291 {
4292 if (!flow_bb_inside_loop_p (loop,
4293 gimple_bb (USE_STMT (phi_use_p))))
4294 VEC_safe_push (gimple, heap, phis,
4295 USE_STMT (phi_use_p));
4296 }
4297 }
4298 }
4299 }
4300
4301 FOR_EACH_VEC_ELT (gimple, phis, i, exit_phi)
4302 {
4303 /* Replace the uses: */
4304 orig_name = PHI_RESULT (exit_phi);
4305 scalar_result = VEC_index (tree, scalar_results, k);
4306 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name)
4307 FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
4308 SET_USE (use_p, scalar_result);
4309 }
4310
4311 VEC_free (gimple, heap, phis);
4312 }
4313
4314 VEC_free (tree, heap, scalar_results);
4315 VEC_free (gimple, heap, new_phis);
4316 }
4317
4318
4319 /* Function vectorizable_reduction.
4320
4321 Check if STMT performs a reduction operation that can be vectorized.
4322 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4323 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
4324 Return FALSE if not a vectorizable STMT, TRUE otherwise.
4325
4326 This function also handles reduction idioms (patterns) that have been
4327 recognized in advance during vect_pattern_recog. In this case, STMT may be
4328 of this form:
4329 X = pattern_expr (arg0, arg1, ..., X)
4330 and it's STMT_VINFO_RELATED_STMT points to the last stmt in the original
4331 sequence that had been detected and replaced by the pattern-stmt (STMT).
4332
4333 In some cases of reduction patterns, the type of the reduction variable X is
4334 different than the type of the other arguments of STMT.
4335 In such cases, the vectype that is used when transforming STMT into a vector
4336 stmt is different than the vectype that is used to determine the
4337 vectorization factor, because it consists of a different number of elements
4338 than the actual number of elements that are being operated upon in parallel.
4339
4340 For example, consider an accumulation of shorts into an int accumulator.
4341 On some targets it's possible to vectorize this pattern operating on 8
4342 shorts at a time (hence, the vectype for purposes of determining the
4343 vectorization factor should be V8HI); on the other hand, the vectype that
4344 is used to create the vector form is actually V4SI (the type of the result).
4345
4346 Upon entry to this function, STMT_VINFO_VECTYPE records the vectype that
4347 indicates what is the actual level of parallelism (V8HI in the example), so
4348 that the right vectorization factor would be derived. This vectype
4349 corresponds to the type of arguments to the reduction stmt, and should *NOT*
4350 be used to create the vectorized stmt. The right vectype for the vectorized
4351 stmt is obtained from the type of the result X:
4352 get_vectype_for_scalar_type (TREE_TYPE (X))
4353
4354 This means that, contrary to "regular" reductions (or "regular" stmts in
4355 general), the following equation:
4356 STMT_VINFO_VECTYPE == get_vectype_for_scalar_type (TREE_TYPE (X))
4357 does *NOT* necessarily hold for reduction patterns. */
4358
4359 bool
4360 vectorizable_reduction (gimple stmt, gimple_stmt_iterator *gsi,
4361 gimple *vec_stmt, slp_tree slp_node)
4362 {
4363 tree vec_dest;
4364 tree scalar_dest;
4365 tree loop_vec_def0 = NULL_TREE, loop_vec_def1 = NULL_TREE;
4366 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4367 tree vectype_out = STMT_VINFO_VECTYPE (stmt_info);
4368 tree vectype_in = NULL_TREE;
4369 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4370 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
4371 enum tree_code code, orig_code, epilog_reduc_code;
4372 enum machine_mode vec_mode;
4373 int op_type;
4374 optab optab, reduc_optab;
4375 tree new_temp = NULL_TREE;
4376 tree def;
4377 gimple def_stmt;
4378 enum vect_def_type dt;
4379 gimple new_phi = NULL;
4380 tree scalar_type;
4381 bool is_simple_use;
4382 gimple orig_stmt;
4383 stmt_vec_info orig_stmt_info;
4384 tree expr = NULL_TREE;
4385 int i;
4386 int ncopies;
4387 int epilog_copies;
4388 stmt_vec_info prev_stmt_info, prev_phi_info;
4389 bool single_defuse_cycle = false;
4390 tree reduc_def = NULL_TREE;
4391 gimple new_stmt = NULL;
4392 int j;
4393 tree ops[3];
4394 bool nested_cycle = false, found_nested_cycle_def = false;
4395 gimple reduc_def_stmt = NULL;
4396 /* The default is that the reduction variable is the last in statement. */
4397 int reduc_index = 2;
4398 bool double_reduc = false, dummy;
4399 basic_block def_bb;
4400 struct loop * def_stmt_loop, *outer_loop = NULL;
4401 tree def_arg;
4402 gimple def_arg_stmt;
4403 VEC (tree, heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL, *vect_defs = NULL;
4404 VEC (gimple, heap) *phis = NULL;
4405 int vec_num;
4406 tree def0, def1, tem, op0, op1 = NULL_TREE;
4407
4408 /* In case of reduction chain we switch to the first stmt in the chain, but
4409 we don't update STMT_INFO, since only the last stmt is marked as reduction
4410 and has reduction properties. */
4411 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
4412 stmt = GROUP_FIRST_ELEMENT (stmt_info);
4413
4414 if (nested_in_vect_loop_p (loop, stmt))
4415 {
4416 outer_loop = loop;
4417 loop = loop->inner;
4418 nested_cycle = true;
4419 }
4420
4421 /* 1. Is vectorizable reduction? */
4422 /* Not supportable if the reduction variable is used in the loop, unless
4423 it's a reduction chain. */
4424 if (STMT_VINFO_RELEVANT (stmt_info) > vect_used_in_outer
4425 && !GROUP_FIRST_ELEMENT (stmt_info))
4426 return false;
4427
4428 /* Reductions that are not used even in an enclosing outer-loop,
4429 are expected to be "live" (used out of the loop). */
4430 if (STMT_VINFO_RELEVANT (stmt_info) == vect_unused_in_scope
4431 && !STMT_VINFO_LIVE_P (stmt_info))
4432 return false;
4433
4434 /* Make sure it was already recognized as a reduction computation. */
4435 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_reduction_def
4436 && STMT_VINFO_DEF_TYPE (stmt_info) != vect_nested_cycle)
4437 return false;
4438
4439 /* 2. Has this been recognized as a reduction pattern?
4440
4441 Check if STMT represents a pattern that has been recognized
4442 in earlier analysis stages. For stmts that represent a pattern,
4443 the STMT_VINFO_RELATED_STMT field records the last stmt in
4444 the original sequence that constitutes the pattern. */
4445
4446 orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
4447 if (orig_stmt)
4448 {
4449 orig_stmt_info = vinfo_for_stmt (orig_stmt);
4450 gcc_assert (STMT_VINFO_RELATED_STMT (orig_stmt_info) == stmt);
4451 gcc_assert (STMT_VINFO_IN_PATTERN_P (orig_stmt_info));
4452 gcc_assert (!STMT_VINFO_IN_PATTERN_P (stmt_info));
4453 }
4454
4455 /* 3. Check the operands of the operation. The first operands are defined
4456 inside the loop body. The last operand is the reduction variable,
4457 which is defined by the loop-header-phi. */
4458
4459 gcc_assert (is_gimple_assign (stmt));
4460
4461 /* Flatten RHS. */
4462 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)))
4463 {
4464 case GIMPLE_SINGLE_RHS:
4465 op_type = TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt));
4466 if (op_type == ternary_op)
4467 {
4468 tree rhs = gimple_assign_rhs1 (stmt);
4469 ops[0] = TREE_OPERAND (rhs, 0);
4470 ops[1] = TREE_OPERAND (rhs, 1);
4471 ops[2] = TREE_OPERAND (rhs, 2);
4472 code = TREE_CODE (rhs);
4473 }
4474 else
4475 return false;
4476 break;
4477
4478 case GIMPLE_BINARY_RHS:
4479 code = gimple_assign_rhs_code (stmt);
4480 op_type = TREE_CODE_LENGTH (code);
4481 gcc_assert (op_type == binary_op);
4482 ops[0] = gimple_assign_rhs1 (stmt);
4483 ops[1] = gimple_assign_rhs2 (stmt);
4484 break;
4485
4486 case GIMPLE_TERNARY_RHS:
4487 code = gimple_assign_rhs_code (stmt);
4488 op_type = TREE_CODE_LENGTH (code);
4489 gcc_assert (op_type == ternary_op);
4490 ops[0] = gimple_assign_rhs1 (stmt);
4491 ops[1] = gimple_assign_rhs2 (stmt);
4492 ops[2] = gimple_assign_rhs3 (stmt);
4493 break;
4494
4495 case GIMPLE_UNARY_RHS:
4496 return false;
4497
4498 default:
4499 gcc_unreachable ();
4500 }
4501
4502 if (code == COND_EXPR && slp_node)
4503 return false;
4504
4505 scalar_dest = gimple_assign_lhs (stmt);
4506 scalar_type = TREE_TYPE (scalar_dest);
4507 if (!POINTER_TYPE_P (scalar_type) && !INTEGRAL_TYPE_P (scalar_type)
4508 && !SCALAR_FLOAT_TYPE_P (scalar_type))
4509 return false;
4510
4511 /* Do not try to vectorize bit-precision reductions. */
4512 if ((TYPE_PRECISION (scalar_type)
4513 != GET_MODE_PRECISION (TYPE_MODE (scalar_type))))
4514 return false;
4515
4516 /* All uses but the last are expected to be defined in the loop.
4517 The last use is the reduction variable. In case of nested cycle this
4518 assumption is not true: we use reduc_index to record the index of the
4519 reduction variable. */
4520 for (i = 0; i < op_type-1; i++)
4521 {
4522 /* The condition of COND_EXPR is checked in vectorizable_condition(). */
4523 if (i == 0 && code == COND_EXPR)
4524 continue;
4525
4526 is_simple_use = vect_is_simple_use_1 (ops[i], stmt, loop_vinfo, NULL,
4527 &def_stmt, &def, &dt, &tem);
4528 if (!vectype_in)
4529 vectype_in = tem;
4530 gcc_assert (is_simple_use);
4531
4532 if (dt != vect_internal_def
4533 && dt != vect_external_def
4534 && dt != vect_constant_def
4535 && dt != vect_induction_def
4536 && !(dt == vect_nested_cycle && nested_cycle))
4537 return false;
4538
4539 if (dt == vect_nested_cycle)
4540 {
4541 found_nested_cycle_def = true;
4542 reduc_def_stmt = def_stmt;
4543 reduc_index = i;
4544 }
4545 }
4546
4547 is_simple_use = vect_is_simple_use_1 (ops[i], stmt, loop_vinfo, NULL,
4548 &def_stmt, &def, &dt, &tem);
4549 if (!vectype_in)
4550 vectype_in = tem;
4551 gcc_assert (is_simple_use);
4552 gcc_assert (dt == vect_reduction_def
4553 || dt == vect_nested_cycle
4554 || ((dt == vect_internal_def || dt == vect_external_def
4555 || dt == vect_constant_def || dt == vect_induction_def)
4556 && nested_cycle && found_nested_cycle_def));
4557 if (!found_nested_cycle_def)
4558 reduc_def_stmt = def_stmt;
4559
4560 gcc_assert (gimple_code (reduc_def_stmt) == GIMPLE_PHI);
4561 if (orig_stmt)
4562 gcc_assert (orig_stmt == vect_is_simple_reduction (loop_vinfo,
4563 reduc_def_stmt,
4564 !nested_cycle,
4565 &dummy));
4566 else
4567 {
4568 gimple tmp = vect_is_simple_reduction (loop_vinfo, reduc_def_stmt,
4569 !nested_cycle, &dummy);
4570 /* We changed STMT to be the first stmt in reduction chain, hence we
4571 check that in this case the first element in the chain is STMT. */
4572 gcc_assert (stmt == tmp
4573 || GROUP_FIRST_ELEMENT (vinfo_for_stmt (tmp)) == stmt);
4574 }
4575
4576 if (STMT_VINFO_LIVE_P (vinfo_for_stmt (reduc_def_stmt)))
4577 return false;
4578
4579 if (slp_node || PURE_SLP_STMT (stmt_info))
4580 ncopies = 1;
4581 else
4582 ncopies = (LOOP_VINFO_VECT_FACTOR (loop_vinfo)
4583 / TYPE_VECTOR_SUBPARTS (vectype_in));
4584
4585 gcc_assert (ncopies >= 1);
4586
4587 vec_mode = TYPE_MODE (vectype_in);
4588
4589 if (code == COND_EXPR)
4590 {
4591 if (!vectorizable_condition (stmt, gsi, NULL, ops[reduc_index], 0, NULL))
4592 {
4593 if (vect_print_dump_info (REPORT_DETAILS))
4594 fprintf (vect_dump, "unsupported condition in reduction");
4595
4596 return false;
4597 }
4598 }
4599 else
4600 {
4601 /* 4. Supportable by target? */
4602
4603 /* 4.1. check support for the operation in the loop */
4604 optab = optab_for_tree_code (code, vectype_in, optab_default);
4605 if (!optab)
4606 {
4607 if (vect_print_dump_info (REPORT_DETAILS))
4608 fprintf (vect_dump, "no optab.");
4609
4610 return false;
4611 }
4612
4613 if (optab_handler (optab, vec_mode) == CODE_FOR_nothing)
4614 {
4615 if (vect_print_dump_info (REPORT_DETAILS))
4616 fprintf (vect_dump, "op not supported by target.");
4617
4618 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
4619 || LOOP_VINFO_VECT_FACTOR (loop_vinfo)
4620 < vect_min_worthwhile_factor (code))
4621 return false;
4622
4623 if (vect_print_dump_info (REPORT_DETAILS))
4624 fprintf (vect_dump, "proceeding using word mode.");
4625 }
4626
4627 /* Worthwhile without SIMD support? */
4628 if (!VECTOR_MODE_P (TYPE_MODE (vectype_in))
4629 && LOOP_VINFO_VECT_FACTOR (loop_vinfo)
4630 < vect_min_worthwhile_factor (code))
4631 {
4632 if (vect_print_dump_info (REPORT_DETAILS))
4633 fprintf (vect_dump, "not worthwhile without SIMD support.");
4634
4635 return false;
4636 }
4637 }
4638
4639 /* 4.2. Check support for the epilog operation.
4640
4641 If STMT represents a reduction pattern, then the type of the
4642 reduction variable may be different than the type of the rest
4643 of the arguments. For example, consider the case of accumulation
4644 of shorts into an int accumulator; The original code:
4645 S1: int_a = (int) short_a;
4646 orig_stmt-> S2: int_acc = plus <int_a ,int_acc>;
4647
4648 was replaced with:
4649 STMT: int_acc = widen_sum <short_a, int_acc>
4650
4651 This means that:
4652 1. The tree-code that is used to create the vector operation in the
4653 epilog code (that reduces the partial results) is not the
4654 tree-code of STMT, but is rather the tree-code of the original
4655 stmt from the pattern that STMT is replacing. I.e, in the example
4656 above we want to use 'widen_sum' in the loop, but 'plus' in the
4657 epilog.
4658 2. The type (mode) we use to check available target support
4659 for the vector operation to be created in the *epilog*, is
4660 determined by the type of the reduction variable (in the example
4661 above we'd check this: optab_handler (plus_optab, vect_int_mode])).
4662 However the type (mode) we use to check available target support
4663 for the vector operation to be created *inside the loop*, is
4664 determined by the type of the other arguments to STMT (in the
4665 example we'd check this: optab_handler (widen_sum_optab,
4666 vect_short_mode)).
4667
4668 This is contrary to "regular" reductions, in which the types of all
4669 the arguments are the same as the type of the reduction variable.
4670 For "regular" reductions we can therefore use the same vector type
4671 (and also the same tree-code) when generating the epilog code and
4672 when generating the code inside the loop. */
4673
4674 if (orig_stmt)
4675 {
4676 /* This is a reduction pattern: get the vectype from the type of the
4677 reduction variable, and get the tree-code from orig_stmt. */
4678 orig_code = gimple_assign_rhs_code (orig_stmt);
4679 gcc_assert (vectype_out);
4680 vec_mode = TYPE_MODE (vectype_out);
4681 }
4682 else
4683 {
4684 /* Regular reduction: use the same vectype and tree-code as used for
4685 the vector code inside the loop can be used for the epilog code. */
4686 orig_code = code;
4687 }
4688
4689 if (nested_cycle)
4690 {
4691 def_bb = gimple_bb (reduc_def_stmt);
4692 def_stmt_loop = def_bb->loop_father;
4693 def_arg = PHI_ARG_DEF_FROM_EDGE (reduc_def_stmt,
4694 loop_preheader_edge (def_stmt_loop));
4695 if (TREE_CODE (def_arg) == SSA_NAME
4696 && (def_arg_stmt = SSA_NAME_DEF_STMT (def_arg))
4697 && gimple_code (def_arg_stmt) == GIMPLE_PHI
4698 && flow_bb_inside_loop_p (outer_loop, gimple_bb (def_arg_stmt))
4699 && vinfo_for_stmt (def_arg_stmt)
4700 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_arg_stmt))
4701 == vect_double_reduction_def)
4702 double_reduc = true;
4703 }
4704
4705 epilog_reduc_code = ERROR_MARK;
4706 if (reduction_code_for_scalar_code (orig_code, &epilog_reduc_code))
4707 {
4708 reduc_optab = optab_for_tree_code (epilog_reduc_code, vectype_out,
4709 optab_default);
4710 if (!reduc_optab)
4711 {
4712 if (vect_print_dump_info (REPORT_DETAILS))
4713 fprintf (vect_dump, "no optab for reduction.");
4714
4715 epilog_reduc_code = ERROR_MARK;
4716 }
4717
4718 if (reduc_optab
4719 && optab_handler (reduc_optab, vec_mode) == CODE_FOR_nothing)
4720 {
4721 if (vect_print_dump_info (REPORT_DETAILS))
4722 fprintf (vect_dump, "reduc op not supported by target.");
4723
4724 epilog_reduc_code = ERROR_MARK;
4725 }
4726 }
4727 else
4728 {
4729 if (!nested_cycle || double_reduc)
4730 {
4731 if (vect_print_dump_info (REPORT_DETAILS))
4732 fprintf (vect_dump, "no reduc code for scalar code.");
4733
4734 return false;
4735 }
4736 }
4737
4738 if (double_reduc && ncopies > 1)
4739 {
4740 if (vect_print_dump_info (REPORT_DETAILS))
4741 fprintf (vect_dump, "multiple types in double reduction");
4742
4743 return false;
4744 }
4745
4746 /* In case of widenning multiplication by a constant, we update the type
4747 of the constant to be the type of the other operand. We check that the
4748 constant fits the type in the pattern recognition pass. */
4749 if (code == DOT_PROD_EXPR
4750 && !types_compatible_p (TREE_TYPE (ops[0]), TREE_TYPE (ops[1])))
4751 {
4752 if (TREE_CODE (ops[0]) == INTEGER_CST)
4753 ops[0] = fold_convert (TREE_TYPE (ops[1]), ops[0]);
4754 else if (TREE_CODE (ops[1]) == INTEGER_CST)
4755 ops[1] = fold_convert (TREE_TYPE (ops[0]), ops[1]);
4756 else
4757 {
4758 if (vect_print_dump_info (REPORT_DETAILS))
4759 fprintf (vect_dump, "invalid types in dot-prod");
4760
4761 return false;
4762 }
4763 }
4764
4765 if (!vec_stmt) /* transformation not required. */
4766 {
4767 if (!vect_model_reduction_cost (stmt_info, epilog_reduc_code, ncopies))
4768 return false;
4769 STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type;
4770 return true;
4771 }
4772
4773 /** Transform. **/
4774
4775 if (vect_print_dump_info (REPORT_DETAILS))
4776 fprintf (vect_dump, "transform reduction.");
4777
4778 /* FORNOW: Multiple types are not supported for condition. */
4779 if (code == COND_EXPR)
4780 gcc_assert (ncopies == 1);
4781
4782 /* Create the destination vector */
4783 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
4784
4785 /* In case the vectorization factor (VF) is bigger than the number
4786 of elements that we can fit in a vectype (nunits), we have to generate
4787 more than one vector stmt - i.e - we need to "unroll" the
4788 vector stmt by a factor VF/nunits. For more details see documentation
4789 in vectorizable_operation. */
4790
4791 /* If the reduction is used in an outer loop we need to generate
4792 VF intermediate results, like so (e.g. for ncopies=2):
4793 r0 = phi (init, r0)
4794 r1 = phi (init, r1)
4795 r0 = x0 + r0;
4796 r1 = x1 + r1;
4797 (i.e. we generate VF results in 2 registers).
4798 In this case we have a separate def-use cycle for each copy, and therefore
4799 for each copy we get the vector def for the reduction variable from the
4800 respective phi node created for this copy.
4801
4802 Otherwise (the reduction is unused in the loop nest), we can combine
4803 together intermediate results, like so (e.g. for ncopies=2):
4804 r = phi (init, r)
4805 r = x0 + r;
4806 r = x1 + r;
4807 (i.e. we generate VF/2 results in a single register).
4808 In this case for each copy we get the vector def for the reduction variable
4809 from the vectorized reduction operation generated in the previous iteration.
4810 */
4811
4812 if (STMT_VINFO_RELEVANT (stmt_info) == vect_unused_in_scope)
4813 {
4814 single_defuse_cycle = true;
4815 epilog_copies = 1;
4816 }
4817 else
4818 epilog_copies = ncopies;
4819
4820 prev_stmt_info = NULL;
4821 prev_phi_info = NULL;
4822 if (slp_node)
4823 {
4824 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
4825 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype_out)
4826 == TYPE_VECTOR_SUBPARTS (vectype_in));
4827 }
4828 else
4829 {
4830 vec_num = 1;
4831 vec_oprnds0 = VEC_alloc (tree, heap, 1);
4832 if (op_type == ternary_op)
4833 vec_oprnds1 = VEC_alloc (tree, heap, 1);
4834 }
4835
4836 phis = VEC_alloc (gimple, heap, vec_num);
4837 vect_defs = VEC_alloc (tree, heap, vec_num);
4838 if (!slp_node)
4839 VEC_quick_push (tree, vect_defs, NULL_TREE);
4840
4841 for (j = 0; j < ncopies; j++)
4842 {
4843 if (j == 0 || !single_defuse_cycle)
4844 {
4845 for (i = 0; i < vec_num; i++)
4846 {
4847 /* Create the reduction-phi that defines the reduction
4848 operand. */
4849 new_phi = create_phi_node (vec_dest, loop->header);
4850 set_vinfo_for_stmt (new_phi,
4851 new_stmt_vec_info (new_phi, loop_vinfo,
4852 NULL));
4853 if (j == 0 || slp_node)
4854 VEC_quick_push (gimple, phis, new_phi);
4855 }
4856 }
4857
4858 if (code == COND_EXPR)
4859 {
4860 gcc_assert (!slp_node);
4861 vectorizable_condition (stmt, gsi, vec_stmt,
4862 PHI_RESULT (VEC_index (gimple, phis, 0)),
4863 reduc_index, NULL);
4864 /* Multiple types are not supported for condition. */
4865 break;
4866 }
4867
4868 /* Handle uses. */
4869 if (j == 0)
4870 {
4871 op0 = ops[!reduc_index];
4872 if (op_type == ternary_op)
4873 {
4874 if (reduc_index == 0)
4875 op1 = ops[2];
4876 else
4877 op1 = ops[1];
4878 }
4879
4880 if (slp_node)
4881 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
4882 slp_node, -1);
4883 else
4884 {
4885 loop_vec_def0 = vect_get_vec_def_for_operand (ops[!reduc_index],
4886 stmt, NULL);
4887 VEC_quick_push (tree, vec_oprnds0, loop_vec_def0);
4888 if (op_type == ternary_op)
4889 {
4890 loop_vec_def1 = vect_get_vec_def_for_operand (op1, stmt,
4891 NULL);
4892 VEC_quick_push (tree, vec_oprnds1, loop_vec_def1);
4893 }
4894 }
4895 }
4896 else
4897 {
4898 if (!slp_node)
4899 {
4900 enum vect_def_type dt;
4901 gimple dummy_stmt;
4902 tree dummy;
4903
4904 vect_is_simple_use (ops[!reduc_index], stmt, loop_vinfo, NULL,
4905 &dummy_stmt, &dummy, &dt);
4906 loop_vec_def0 = vect_get_vec_def_for_stmt_copy (dt,
4907 loop_vec_def0);
4908 VEC_replace (tree, vec_oprnds0, 0, loop_vec_def0);
4909 if (op_type == ternary_op)
4910 {
4911 vect_is_simple_use (op1, stmt, loop_vinfo, NULL, &dummy_stmt,
4912 &dummy, &dt);
4913 loop_vec_def1 = vect_get_vec_def_for_stmt_copy (dt,
4914 loop_vec_def1);
4915 VEC_replace (tree, vec_oprnds1, 0, loop_vec_def1);
4916 }
4917 }
4918
4919 if (single_defuse_cycle)
4920 reduc_def = gimple_assign_lhs (new_stmt);
4921
4922 STMT_VINFO_RELATED_STMT (prev_phi_info) = new_phi;
4923 }
4924
4925 FOR_EACH_VEC_ELT (tree, vec_oprnds0, i, def0)
4926 {
4927 if (slp_node)
4928 reduc_def = PHI_RESULT (VEC_index (gimple, phis, i));
4929 else
4930 {
4931 if (!single_defuse_cycle || j == 0)
4932 reduc_def = PHI_RESULT (new_phi);
4933 }
4934
4935 def1 = ((op_type == ternary_op)
4936 ? VEC_index (tree, vec_oprnds1, i) : NULL);
4937 if (op_type == binary_op)
4938 {
4939 if (reduc_index == 0)
4940 expr = build2 (code, vectype_out, reduc_def, def0);
4941 else
4942 expr = build2 (code, vectype_out, def0, reduc_def);
4943 }
4944 else
4945 {
4946 if (reduc_index == 0)
4947 expr = build3 (code, vectype_out, reduc_def, def0, def1);
4948 else
4949 {
4950 if (reduc_index == 1)
4951 expr = build3 (code, vectype_out, def0, reduc_def, def1);
4952 else
4953 expr = build3 (code, vectype_out, def0, def1, reduc_def);
4954 }
4955 }
4956
4957 new_stmt = gimple_build_assign (vec_dest, expr);
4958 new_temp = make_ssa_name (vec_dest, new_stmt);
4959 gimple_assign_set_lhs (new_stmt, new_temp);
4960 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4961
4962 if (slp_node)
4963 {
4964 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
4965 VEC_quick_push (tree, vect_defs, new_temp);
4966 }
4967 else
4968 VEC_replace (tree, vect_defs, 0, new_temp);
4969 }
4970
4971 if (slp_node)
4972 continue;
4973
4974 if (j == 0)
4975 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4976 else
4977 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4978
4979 prev_stmt_info = vinfo_for_stmt (new_stmt);
4980 prev_phi_info = vinfo_for_stmt (new_phi);
4981 }
4982
4983 /* Finalize the reduction-phi (set its arguments) and create the
4984 epilog reduction code. */
4985 if ((!single_defuse_cycle || code == COND_EXPR) && !slp_node)
4986 {
4987 new_temp = gimple_assign_lhs (*vec_stmt);
4988 VEC_replace (tree, vect_defs, 0, new_temp);
4989 }
4990
4991 vect_create_epilog_for_reduction (vect_defs, stmt, epilog_copies,
4992 epilog_reduc_code, phis, reduc_index,
4993 double_reduc, slp_node);
4994
4995 VEC_free (gimple, heap, phis);
4996 VEC_free (tree, heap, vec_oprnds0);
4997 if (vec_oprnds1)
4998 VEC_free (tree, heap, vec_oprnds1);
4999
5000 return true;
5001 }
5002
5003 /* Function vect_min_worthwhile_factor.
5004
5005 For a loop where we could vectorize the operation indicated by CODE,
5006 return the minimum vectorization factor that makes it worthwhile
5007 to use generic vectors. */
5008 int
5009 vect_min_worthwhile_factor (enum tree_code code)
5010 {
5011 switch (code)
5012 {
5013 case PLUS_EXPR:
5014 case MINUS_EXPR:
5015 case NEGATE_EXPR:
5016 return 4;
5017
5018 case BIT_AND_EXPR:
5019 case BIT_IOR_EXPR:
5020 case BIT_XOR_EXPR:
5021 case BIT_NOT_EXPR:
5022 return 2;
5023
5024 default:
5025 return INT_MAX;
5026 }
5027 }
5028
5029
5030 /* Function vectorizable_induction
5031
5032 Check if PHI performs an induction computation that can be vectorized.
5033 If VEC_STMT is also passed, vectorize the induction PHI: create a vectorized
5034 phi to replace it, put it in VEC_STMT, and add it to the same basic block.
5035 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5036
5037 bool
5038 vectorizable_induction (gimple phi, gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED,
5039 gimple *vec_stmt)
5040 {
5041 stmt_vec_info stmt_info = vinfo_for_stmt (phi);
5042 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
5043 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5044 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
5045 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
5046 int ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
5047 tree vec_def;
5048
5049 gcc_assert (ncopies >= 1);
5050 /* FORNOW. These restrictions should be relaxed. */
5051 if (nested_in_vect_loop_p (loop, phi))
5052 {
5053 imm_use_iterator imm_iter;
5054 use_operand_p use_p;
5055 gimple exit_phi;
5056 edge latch_e;
5057 tree loop_arg;
5058
5059 if (ncopies > 1)
5060 {
5061 if (vect_print_dump_info (REPORT_DETAILS))
5062 fprintf (vect_dump, "multiple types in nested loop.");
5063 return false;
5064 }
5065
5066 exit_phi = NULL;
5067 latch_e = loop_latch_edge (loop->inner);
5068 loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e);
5069 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, loop_arg)
5070 {
5071 if (!flow_bb_inside_loop_p (loop->inner,
5072 gimple_bb (USE_STMT (use_p))))
5073 {
5074 exit_phi = USE_STMT (use_p);
5075 break;
5076 }
5077 }
5078 if (exit_phi)
5079 {
5080 stmt_vec_info exit_phi_vinfo = vinfo_for_stmt (exit_phi);
5081 if (!(STMT_VINFO_RELEVANT_P (exit_phi_vinfo)
5082 && !STMT_VINFO_LIVE_P (exit_phi_vinfo)))
5083 {
5084 if (vect_print_dump_info (REPORT_DETAILS))
5085 fprintf (vect_dump, "inner-loop induction only used outside "
5086 "of the outer vectorized loop.");
5087 return false;
5088 }
5089 }
5090 }
5091
5092 if (!STMT_VINFO_RELEVANT_P (stmt_info))
5093 return false;
5094
5095 /* FORNOW: SLP not supported. */
5096 if (STMT_SLP_TYPE (stmt_info))
5097 return false;
5098
5099 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def);
5100
5101 if (gimple_code (phi) != GIMPLE_PHI)
5102 return false;
5103
5104 if (!vec_stmt) /* transformation not required. */
5105 {
5106 STMT_VINFO_TYPE (stmt_info) = induc_vec_info_type;
5107 if (vect_print_dump_info (REPORT_DETAILS))
5108 fprintf (vect_dump, "=== vectorizable_induction ===");
5109 vect_model_induction_cost (stmt_info, ncopies);
5110 return true;
5111 }
5112
5113 /** Transform. **/
5114
5115 if (vect_print_dump_info (REPORT_DETAILS))
5116 fprintf (vect_dump, "transform induction phi.");
5117
5118 vec_def = get_initial_def_for_induction (phi);
5119 *vec_stmt = SSA_NAME_DEF_STMT (vec_def);
5120 return true;
5121 }
5122
5123 /* Function vectorizable_live_operation.
5124
5125 STMT computes a value that is used outside the loop. Check if
5126 it can be supported. */
5127
5128 bool
5129 vectorizable_live_operation (gimple stmt,
5130 gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED,
5131 gimple *vec_stmt ATTRIBUTE_UNUSED)
5132 {
5133 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5134 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5135 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
5136 int i;
5137 int op_type;
5138 tree op;
5139 tree def;
5140 gimple def_stmt;
5141 enum vect_def_type dt;
5142 enum tree_code code;
5143 enum gimple_rhs_class rhs_class;
5144
5145 gcc_assert (STMT_VINFO_LIVE_P (stmt_info));
5146
5147 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def)
5148 return false;
5149
5150 if (!is_gimple_assign (stmt))
5151 return false;
5152
5153 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
5154 return false;
5155
5156 /* FORNOW. CHECKME. */
5157 if (nested_in_vect_loop_p (loop, stmt))
5158 return false;
5159
5160 code = gimple_assign_rhs_code (stmt);
5161 op_type = TREE_CODE_LENGTH (code);
5162 rhs_class = get_gimple_rhs_class (code);
5163 gcc_assert (rhs_class != GIMPLE_UNARY_RHS || op_type == unary_op);
5164 gcc_assert (rhs_class != GIMPLE_BINARY_RHS || op_type == binary_op);
5165
5166 /* FORNOW: support only if all uses are invariant. This means
5167 that the scalar operations can remain in place, unvectorized.
5168 The original last scalar value that they compute will be used. */
5169
5170 for (i = 0; i < op_type; i++)
5171 {
5172 if (rhs_class == GIMPLE_SINGLE_RHS)
5173 op = TREE_OPERAND (gimple_op (stmt, 1), i);
5174 else
5175 op = gimple_op (stmt, i + 1);
5176 if (op
5177 && !vect_is_simple_use (op, stmt, loop_vinfo, NULL, &def_stmt, &def,
5178 &dt))
5179 {
5180 if (vect_print_dump_info (REPORT_DETAILS))
5181 fprintf (vect_dump, "use not simple.");
5182 return false;
5183 }
5184
5185 if (dt != vect_external_def && dt != vect_constant_def)
5186 return false;
5187 }
5188
5189 /* No transformation is required for the cases we currently support. */
5190 return true;
5191 }
5192
5193 /* Kill any debug uses outside LOOP of SSA names defined in STMT. */
5194
5195 static void
5196 vect_loop_kill_debug_uses (struct loop *loop, gimple stmt)
5197 {
5198 ssa_op_iter op_iter;
5199 imm_use_iterator imm_iter;
5200 def_operand_p def_p;
5201 gimple ustmt;
5202
5203 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF)
5204 {
5205 FOR_EACH_IMM_USE_STMT (ustmt, imm_iter, DEF_FROM_PTR (def_p))
5206 {
5207 basic_block bb;
5208
5209 if (!is_gimple_debug (ustmt))
5210 continue;
5211
5212 bb = gimple_bb (ustmt);
5213
5214 if (!flow_bb_inside_loop_p (loop, bb))
5215 {
5216 if (gimple_debug_bind_p (ustmt))
5217 {
5218 if (vect_print_dump_info (REPORT_DETAILS))
5219 fprintf (vect_dump, "killing debug use");
5220
5221 gimple_debug_bind_reset_value (ustmt);
5222 update_stmt (ustmt);
5223 }
5224 else
5225 gcc_unreachable ();
5226 }
5227 }
5228 }
5229 }
5230
5231 /* Function vect_transform_loop.
5232
5233 The analysis phase has determined that the loop is vectorizable.
5234 Vectorize the loop - created vectorized stmts to replace the scalar
5235 stmts in the loop, and update the loop exit condition. */
5236
5237 void
5238 vect_transform_loop (loop_vec_info loop_vinfo)
5239 {
5240 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
5241 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
5242 int nbbs = loop->num_nodes;
5243 gimple_stmt_iterator si;
5244 int i;
5245 tree ratio = NULL;
5246 int vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
5247 bool grouped_store;
5248 bool slp_scheduled = false;
5249 unsigned int nunits;
5250 gimple stmt, pattern_stmt;
5251 gimple_seq pattern_def_seq = NULL;
5252 gimple_stmt_iterator pattern_def_si = gsi_none ();
5253 bool transform_pattern_stmt = false;
5254 bool check_profitability;
5255 int th;
5256
5257 if (vect_print_dump_info (REPORT_DETAILS))
5258 fprintf (vect_dump, "=== vec_transform_loop ===");
5259
5260 /* Use the more conservative vectorization threshold. If the number
5261 of iterations is constant assume the cost check has been performed
5262 by our caller. If the threshold makes all loops profitable that
5263 run at least the vectorization factor number of times checking
5264 is pointless, too. */
5265 th = ((PARAM_VALUE (PARAM_MIN_VECT_LOOP_BOUND)
5266 * LOOP_VINFO_VECT_FACTOR (loop_vinfo)) - 1);
5267 th = MAX (th, LOOP_VINFO_COST_MODEL_MIN_ITERS (loop_vinfo));
5268 if (th >= LOOP_VINFO_VECT_FACTOR (loop_vinfo) - 1
5269 && !LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
5270 {
5271 if (vect_print_dump_info (REPORT_COST))
5272 fprintf (vect_dump,
5273 "Profitability threshold is %d loop iterations.", th);
5274 check_profitability = true;
5275 }
5276
5277 /* Peel the loop if there are data refs with unknown alignment.
5278 Only one data ref with unknown store is allowed. */
5279
5280 if (LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo))
5281 {
5282 vect_do_peeling_for_alignment (loop_vinfo, th, check_profitability);
5283 check_profitability = false;
5284 }
5285
5286 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
5287 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
5288 {
5289 vect_loop_versioning (loop_vinfo, th, check_profitability);
5290 check_profitability = false;
5291 }
5292
5293 /* If the loop has a symbolic number of iterations 'n' (i.e. it's not a
5294 compile time constant), or it is a constant that doesn't divide by the
5295 vectorization factor, then an epilog loop needs to be created.
5296 We therefore duplicate the loop: the original loop will be vectorized,
5297 and will compute the first (n/VF) iterations. The second copy of the loop
5298 will remain scalar and will compute the remaining (n%VF) iterations.
5299 (VF is the vectorization factor). */
5300
5301 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
5302 || (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
5303 && LOOP_VINFO_INT_NITERS (loop_vinfo) % vectorization_factor != 0)
5304 || LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo))
5305 vect_do_peeling_for_loop_bound (loop_vinfo, &ratio,
5306 th, check_profitability);
5307 else
5308 ratio = build_int_cst (TREE_TYPE (LOOP_VINFO_NITERS (loop_vinfo)),
5309 LOOP_VINFO_INT_NITERS (loop_vinfo) / vectorization_factor);
5310
5311 /* 1) Make sure the loop header has exactly two entries
5312 2) Make sure we have a preheader basic block. */
5313
5314 gcc_assert (EDGE_COUNT (loop->header->preds) == 2);
5315
5316 split_edge (loop_preheader_edge (loop));
5317
5318 /* FORNOW: the vectorizer supports only loops which body consist
5319 of one basic block (header + empty latch). When the vectorizer will
5320 support more involved loop forms, the order by which the BBs are
5321 traversed need to be reconsidered. */
5322
5323 for (i = 0; i < nbbs; i++)
5324 {
5325 basic_block bb = bbs[i];
5326 stmt_vec_info stmt_info;
5327 gimple phi;
5328
5329 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
5330 {
5331 phi = gsi_stmt (si);
5332 if (vect_print_dump_info (REPORT_DETAILS))
5333 {
5334 fprintf (vect_dump, "------>vectorizing phi: ");
5335 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
5336 }
5337 stmt_info = vinfo_for_stmt (phi);
5338 if (!stmt_info)
5339 continue;
5340
5341 if (MAY_HAVE_DEBUG_STMTS && !STMT_VINFO_LIVE_P (stmt_info))
5342 vect_loop_kill_debug_uses (loop, phi);
5343
5344 if (!STMT_VINFO_RELEVANT_P (stmt_info)
5345 && !STMT_VINFO_LIVE_P (stmt_info))
5346 continue;
5347
5348 if ((TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info))
5349 != (unsigned HOST_WIDE_INT) vectorization_factor)
5350 && vect_print_dump_info (REPORT_DETAILS))
5351 fprintf (vect_dump, "multiple-types.");
5352
5353 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def)
5354 {
5355 if (vect_print_dump_info (REPORT_DETAILS))
5356 fprintf (vect_dump, "transform phi.");
5357 vect_transform_stmt (phi, NULL, NULL, NULL, NULL);
5358 }
5359 }
5360
5361 pattern_stmt = NULL;
5362 for (si = gsi_start_bb (bb); !gsi_end_p (si) || transform_pattern_stmt;)
5363 {
5364 bool is_store;
5365
5366 if (transform_pattern_stmt)
5367 stmt = pattern_stmt;
5368 else
5369 stmt = gsi_stmt (si);
5370
5371 if (vect_print_dump_info (REPORT_DETAILS))
5372 {
5373 fprintf (vect_dump, "------>vectorizing statement: ");
5374 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
5375 }
5376
5377 stmt_info = vinfo_for_stmt (stmt);
5378
5379 /* vector stmts created in the outer-loop during vectorization of
5380 stmts in an inner-loop may not have a stmt_info, and do not
5381 need to be vectorized. */
5382 if (!stmt_info)
5383 {
5384 gsi_next (&si);
5385 continue;
5386 }
5387
5388 if (MAY_HAVE_DEBUG_STMTS && !STMT_VINFO_LIVE_P (stmt_info))
5389 vect_loop_kill_debug_uses (loop, stmt);
5390
5391 if (!STMT_VINFO_RELEVANT_P (stmt_info)
5392 && !STMT_VINFO_LIVE_P (stmt_info))
5393 {
5394 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
5395 && (pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info))
5396 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
5397 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
5398 {
5399 stmt = pattern_stmt;
5400 stmt_info = vinfo_for_stmt (stmt);
5401 }
5402 else
5403 {
5404 gsi_next (&si);
5405 continue;
5406 }
5407 }
5408 else if (STMT_VINFO_IN_PATTERN_P (stmt_info)
5409 && (pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info))
5410 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
5411 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
5412 transform_pattern_stmt = true;
5413
5414 /* If pattern statement has def stmts, vectorize them too. */
5415 if (is_pattern_stmt_p (stmt_info))
5416 {
5417 if (pattern_def_seq == NULL)
5418 {
5419 pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info);
5420 pattern_def_si = gsi_start (pattern_def_seq);
5421 }
5422 else if (!gsi_end_p (pattern_def_si))
5423 gsi_next (&pattern_def_si);
5424 if (pattern_def_seq != NULL)
5425 {
5426 gimple pattern_def_stmt = NULL;
5427 stmt_vec_info pattern_def_stmt_info = NULL;
5428
5429 while (!gsi_end_p (pattern_def_si))
5430 {
5431 pattern_def_stmt = gsi_stmt (pattern_def_si);
5432 pattern_def_stmt_info
5433 = vinfo_for_stmt (pattern_def_stmt);
5434 if (STMT_VINFO_RELEVANT_P (pattern_def_stmt_info)
5435 || STMT_VINFO_LIVE_P (pattern_def_stmt_info))
5436 break;
5437 gsi_next (&pattern_def_si);
5438 }
5439
5440 if (!gsi_end_p (pattern_def_si))
5441 {
5442 if (vect_print_dump_info (REPORT_DETAILS))
5443 {
5444 fprintf (vect_dump, "==> vectorizing pattern def"
5445 " stmt: ");
5446 print_gimple_stmt (vect_dump, pattern_def_stmt, 0,
5447 TDF_SLIM);
5448 }
5449
5450 stmt = pattern_def_stmt;
5451 stmt_info = pattern_def_stmt_info;
5452 }
5453 else
5454 {
5455 pattern_def_si = gsi_none ();
5456 transform_pattern_stmt = false;
5457 }
5458 }
5459 else
5460 transform_pattern_stmt = false;
5461 }
5462
5463 gcc_assert (STMT_VINFO_VECTYPE (stmt_info));
5464 nunits = (unsigned int) TYPE_VECTOR_SUBPARTS (
5465 STMT_VINFO_VECTYPE (stmt_info));
5466 if (!STMT_SLP_TYPE (stmt_info)
5467 && nunits != (unsigned int) vectorization_factor
5468 && vect_print_dump_info (REPORT_DETAILS))
5469 /* For SLP VF is set according to unrolling factor, and not to
5470 vector size, hence for SLP this print is not valid. */
5471 fprintf (vect_dump, "multiple-types.");
5472
5473 /* SLP. Schedule all the SLP instances when the first SLP stmt is
5474 reached. */
5475 if (STMT_SLP_TYPE (stmt_info))
5476 {
5477 if (!slp_scheduled)
5478 {
5479 slp_scheduled = true;
5480
5481 if (vect_print_dump_info (REPORT_DETAILS))
5482 fprintf (vect_dump, "=== scheduling SLP instances ===");
5483
5484 vect_schedule_slp (loop_vinfo, NULL);
5485 }
5486
5487 /* Hybrid SLP stmts must be vectorized in addition to SLP. */
5488 if (!vinfo_for_stmt (stmt) || PURE_SLP_STMT (stmt_info))
5489 {
5490 if (!transform_pattern_stmt && gsi_end_p (pattern_def_si))
5491 {
5492 pattern_def_seq = NULL;
5493 gsi_next (&si);
5494 }
5495 continue;
5496 }
5497 }
5498
5499 /* -------- vectorize statement ------------ */
5500 if (vect_print_dump_info (REPORT_DETAILS))
5501 fprintf (vect_dump, "transform statement.");
5502
5503 grouped_store = false;
5504 is_store = vect_transform_stmt (stmt, &si, &grouped_store, NULL, NULL);
5505 if (is_store)
5506 {
5507 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
5508 {
5509 /* Interleaving. If IS_STORE is TRUE, the vectorization of the
5510 interleaving chain was completed - free all the stores in
5511 the chain. */
5512 gsi_next (&si);
5513 vect_remove_stores (GROUP_FIRST_ELEMENT (stmt_info));
5514 continue;
5515 }
5516 else
5517 {
5518 /* Free the attached stmt_vec_info and remove the stmt. */
5519 gimple store = gsi_stmt (si);
5520 free_stmt_vec_info (store);
5521 unlink_stmt_vdef (store);
5522 gsi_remove (&si, true);
5523 release_defs (store);
5524 continue;
5525 }
5526 }
5527
5528 if (!transform_pattern_stmt && gsi_end_p (pattern_def_si))
5529 {
5530 pattern_def_seq = NULL;
5531 gsi_next (&si);
5532 }
5533 } /* stmts in BB */
5534 } /* BBs in loop */
5535
5536 slpeel_make_loop_iterate_ntimes (loop, ratio);
5537
5538 /* The memory tags and pointers in vectorized statements need to
5539 have their SSA forms updated. FIXME, why can't this be delayed
5540 until all the loops have been transformed? */
5541 update_ssa (TODO_update_ssa);
5542
5543 if (vect_print_dump_info (REPORT_VECTORIZED_LOCATIONS))
5544 fprintf (vect_dump, "LOOP VECTORIZED.");
5545 if (loop->inner && vect_print_dump_info (REPORT_VECTORIZED_LOCATIONS))
5546 fprintf (vect_dump, "OUTER LOOP VECTORIZED.");
5547 }