intrinsic.h (gfc_check_selected_real_kind, [...]): Update prototypes.
[gcc.git] / gcc / tree-vect-loop.c
1 /* Loop Vectorization
2 Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
3 Free Software Foundation, Inc.
4 Contributed by Dorit Naishlos <dorit@il.ibm.com> and
5 Ira Rosen <irar@il.ibm.com>
6
7 This file is part of GCC.
8
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
13
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "ggc.h"
28 #include "tree.h"
29 #include "basic-block.h"
30 #include "tree-pretty-print.h"
31 #include "gimple-pretty-print.h"
32 #include "tree-flow.h"
33 #include "tree-dump.h"
34 #include "cfgloop.h"
35 #include "cfglayout.h"
36 #include "expr.h"
37 #include "recog.h"
38 #include "optabs.h"
39 #include "params.h"
40 #include "toplev.h"
41 #include "tree-chrec.h"
42 #include "tree-scalar-evolution.h"
43 #include "tree-vectorizer.h"
44 #include "target.h"
45
46 /* Loop Vectorization Pass.
47
48 This pass tries to vectorize loops.
49
50 For example, the vectorizer transforms the following simple loop:
51
52 short a[N]; short b[N]; short c[N]; int i;
53
54 for (i=0; i<N; i++){
55 a[i] = b[i] + c[i];
56 }
57
58 as if it was manually vectorized by rewriting the source code into:
59
60 typedef int __attribute__((mode(V8HI))) v8hi;
61 short a[N]; short b[N]; short c[N]; int i;
62 v8hi *pa = (v8hi*)a, *pb = (v8hi*)b, *pc = (v8hi*)c;
63 v8hi va, vb, vc;
64
65 for (i=0; i<N/8; i++){
66 vb = pb[i];
67 vc = pc[i];
68 va = vb + vc;
69 pa[i] = va;
70 }
71
72 The main entry to this pass is vectorize_loops(), in which
73 the vectorizer applies a set of analyses on a given set of loops,
74 followed by the actual vectorization transformation for the loops that
75 had successfully passed the analysis phase.
76 Throughout this pass we make a distinction between two types of
77 data: scalars (which are represented by SSA_NAMES), and memory references
78 ("data-refs"). These two types of data require different handling both
79 during analysis and transformation. The types of data-refs that the
80 vectorizer currently supports are ARRAY_REFS which base is an array DECL
81 (not a pointer), and INDIRECT_REFS through pointers; both array and pointer
82 accesses are required to have a simple (consecutive) access pattern.
83
84 Analysis phase:
85 ===============
86 The driver for the analysis phase is vect_analyze_loop().
87 It applies a set of analyses, some of which rely on the scalar evolution
88 analyzer (scev) developed by Sebastian Pop.
89
90 During the analysis phase the vectorizer records some information
91 per stmt in a "stmt_vec_info" struct which is attached to each stmt in the
92 loop, as well as general information about the loop as a whole, which is
93 recorded in a "loop_vec_info" struct attached to each loop.
94
95 Transformation phase:
96 =====================
97 The loop transformation phase scans all the stmts in the loop, and
98 creates a vector stmt (or a sequence of stmts) for each scalar stmt S in
99 the loop that needs to be vectorized. It inserts the vector code sequence
100 just before the scalar stmt S, and records a pointer to the vector code
101 in STMT_VINFO_VEC_STMT (stmt_info) (stmt_info is the stmt_vec_info struct
102 attached to S). This pointer will be used for the vectorization of following
103 stmts which use the def of stmt S. Stmt S is removed if it writes to memory;
104 otherwise, we rely on dead code elimination for removing it.
105
106 For example, say stmt S1 was vectorized into stmt VS1:
107
108 VS1: vb = px[i];
109 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
110 S2: a = b;
111
112 To vectorize stmt S2, the vectorizer first finds the stmt that defines
113 the operand 'b' (S1), and gets the relevant vector def 'vb' from the
114 vector stmt VS1 pointed to by STMT_VINFO_VEC_STMT (stmt_info (S1)). The
115 resulting sequence would be:
116
117 VS1: vb = px[i];
118 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
119 VS2: va = vb;
120 S2: a = b; STMT_VINFO_VEC_STMT (stmt_info (S2)) = VS2
121
122 Operands that are not SSA_NAMEs, are data-refs that appear in
123 load/store operations (like 'x[i]' in S1), and are handled differently.
124
125 Target modeling:
126 =================
127 Currently the only target specific information that is used is the
128 size of the vector (in bytes) - "UNITS_PER_SIMD_WORD". Targets that can
129 support different sizes of vectors, for now will need to specify one value
130 for "UNITS_PER_SIMD_WORD". More flexibility will be added in the future.
131
132 Since we only vectorize operations which vector form can be
133 expressed using existing tree codes, to verify that an operation is
134 supported, the vectorizer checks the relevant optab at the relevant
135 machine_mode (e.g, optab_handler (add_optab, V8HImode)->insn_code). If
136 the value found is CODE_FOR_nothing, then there's no target support, and
137 we can't vectorize the stmt.
138
139 For additional information on this project see:
140 http://gcc.gnu.org/projects/tree-ssa/vectorization.html
141 */
142
143 /* Function vect_determine_vectorization_factor
144
145 Determine the vectorization factor (VF). VF is the number of data elements
146 that are operated upon in parallel in a single iteration of the vectorized
147 loop. For example, when vectorizing a loop that operates on 4byte elements,
148 on a target with vector size (VS) 16byte, the VF is set to 4, since 4
149 elements can fit in a single vector register.
150
151 We currently support vectorization of loops in which all types operated upon
152 are of the same size. Therefore this function currently sets VF according to
153 the size of the types operated upon, and fails if there are multiple sizes
154 in the loop.
155
156 VF is also the factor by which the loop iterations are strip-mined, e.g.:
157 original loop:
158 for (i=0; i<N; i++){
159 a[i] = b[i] + c[i];
160 }
161
162 vectorized loop:
163 for (i=0; i<N; i+=VF){
164 a[i:VF] = b[i:VF] + c[i:VF];
165 }
166 */
167
168 static bool
169 vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
170 {
171 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
172 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
173 int nbbs = loop->num_nodes;
174 gimple_stmt_iterator si;
175 unsigned int vectorization_factor = 0;
176 tree scalar_type;
177 gimple phi;
178 tree vectype;
179 unsigned int nunits;
180 stmt_vec_info stmt_info;
181 int i;
182 HOST_WIDE_INT dummy;
183
184 if (vect_print_dump_info (REPORT_DETAILS))
185 fprintf (vect_dump, "=== vect_determine_vectorization_factor ===");
186
187 for (i = 0; i < nbbs; i++)
188 {
189 basic_block bb = bbs[i];
190
191 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
192 {
193 phi = gsi_stmt (si);
194 stmt_info = vinfo_for_stmt (phi);
195 if (vect_print_dump_info (REPORT_DETAILS))
196 {
197 fprintf (vect_dump, "==> examining phi: ");
198 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
199 }
200
201 gcc_assert (stmt_info);
202
203 if (STMT_VINFO_RELEVANT_P (stmt_info))
204 {
205 gcc_assert (!STMT_VINFO_VECTYPE (stmt_info));
206 scalar_type = TREE_TYPE (PHI_RESULT (phi));
207
208 if (vect_print_dump_info (REPORT_DETAILS))
209 {
210 fprintf (vect_dump, "get vectype for scalar type: ");
211 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
212 }
213
214 vectype = get_vectype_for_scalar_type (scalar_type);
215 if (!vectype)
216 {
217 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
218 {
219 fprintf (vect_dump,
220 "not vectorized: unsupported data-type ");
221 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
222 }
223 return false;
224 }
225 STMT_VINFO_VECTYPE (stmt_info) = vectype;
226
227 if (vect_print_dump_info (REPORT_DETAILS))
228 {
229 fprintf (vect_dump, "vectype: ");
230 print_generic_expr (vect_dump, vectype, TDF_SLIM);
231 }
232
233 nunits = TYPE_VECTOR_SUBPARTS (vectype);
234 if (vect_print_dump_info (REPORT_DETAILS))
235 fprintf (vect_dump, "nunits = %d", nunits);
236
237 if (!vectorization_factor
238 || (nunits > vectorization_factor))
239 vectorization_factor = nunits;
240 }
241 }
242
243 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
244 {
245 tree vf_vectype;
246 gimple stmt = gsi_stmt (si);
247 stmt_info = vinfo_for_stmt (stmt);
248
249 if (vect_print_dump_info (REPORT_DETAILS))
250 {
251 fprintf (vect_dump, "==> examining statement: ");
252 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
253 }
254
255 gcc_assert (stmt_info);
256
257 /* skip stmts which do not need to be vectorized. */
258 if (!STMT_VINFO_RELEVANT_P (stmt_info)
259 && !STMT_VINFO_LIVE_P (stmt_info))
260 {
261 if (vect_print_dump_info (REPORT_DETAILS))
262 fprintf (vect_dump, "skip.");
263 continue;
264 }
265
266 if (gimple_get_lhs (stmt) == NULL_TREE)
267 {
268 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
269 {
270 fprintf (vect_dump, "not vectorized: irregular stmt.");
271 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
272 }
273 return false;
274 }
275
276 if (VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))))
277 {
278 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
279 {
280 fprintf (vect_dump, "not vectorized: vector stmt in loop:");
281 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
282 }
283 return false;
284 }
285
286 if (STMT_VINFO_VECTYPE (stmt_info))
287 {
288 /* The only case when a vectype had been already set is for stmts
289 that contain a dataref, or for "pattern-stmts" (stmts generated
290 by the vectorizer to represent/replace a certain idiom). */
291 gcc_assert (STMT_VINFO_DATA_REF (stmt_info)
292 || is_pattern_stmt_p (stmt_info));
293 vectype = STMT_VINFO_VECTYPE (stmt_info);
294 }
295 else
296 {
297 gcc_assert (!STMT_VINFO_DATA_REF (stmt_info)
298 && !is_pattern_stmt_p (stmt_info));
299
300 scalar_type = TREE_TYPE (gimple_get_lhs (stmt));
301 if (vect_print_dump_info (REPORT_DETAILS))
302 {
303 fprintf (vect_dump, "get vectype for scalar type: ");
304 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
305 }
306 vectype = get_vectype_for_scalar_type (scalar_type);
307 if (!vectype)
308 {
309 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
310 {
311 fprintf (vect_dump,
312 "not vectorized: unsupported data-type ");
313 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
314 }
315 return false;
316 }
317
318 STMT_VINFO_VECTYPE (stmt_info) = vectype;
319 }
320
321 /* The vectorization factor is according to the smallest
322 scalar type (or the largest vector size, but we only
323 support one vector size per loop). */
324 scalar_type = vect_get_smallest_scalar_type (stmt, &dummy,
325 &dummy);
326 if (vect_print_dump_info (REPORT_DETAILS))
327 {
328 fprintf (vect_dump, "get vectype for scalar type: ");
329 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
330 }
331 vf_vectype = get_vectype_for_scalar_type (scalar_type);
332 if (!vf_vectype)
333 {
334 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
335 {
336 fprintf (vect_dump,
337 "not vectorized: unsupported data-type ");
338 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
339 }
340 return false;
341 }
342
343 if ((GET_MODE_SIZE (TYPE_MODE (vectype))
344 != GET_MODE_SIZE (TYPE_MODE (vf_vectype))))
345 {
346 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
347 {
348 fprintf (vect_dump,
349 "not vectorized: different sized vector "
350 "types in statement, ");
351 print_generic_expr (vect_dump, vectype, TDF_SLIM);
352 fprintf (vect_dump, " and ");
353 print_generic_expr (vect_dump, vf_vectype, TDF_SLIM);
354 }
355 return false;
356 }
357
358 if (vect_print_dump_info (REPORT_DETAILS))
359 {
360 fprintf (vect_dump, "vectype: ");
361 print_generic_expr (vect_dump, vf_vectype, TDF_SLIM);
362 }
363
364 nunits = TYPE_VECTOR_SUBPARTS (vf_vectype);
365 if (vect_print_dump_info (REPORT_DETAILS))
366 fprintf (vect_dump, "nunits = %d", nunits);
367
368 if (!vectorization_factor
369 || (nunits > vectorization_factor))
370 vectorization_factor = nunits;
371 }
372 }
373
374 /* TODO: Analyze cost. Decide if worth while to vectorize. */
375 if (vect_print_dump_info (REPORT_DETAILS))
376 fprintf (vect_dump, "vectorization factor = %d", vectorization_factor);
377 if (vectorization_factor <= 1)
378 {
379 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
380 fprintf (vect_dump, "not vectorized: unsupported data-type");
381 return false;
382 }
383 LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor;
384
385 return true;
386 }
387
388
389 /* Function vect_is_simple_iv_evolution.
390
391 FORNOW: A simple evolution of an induction variables in the loop is
392 considered a polynomial evolution with constant step. */
393
394 static bool
395 vect_is_simple_iv_evolution (unsigned loop_nb, tree access_fn, tree * init,
396 tree * step)
397 {
398 tree init_expr;
399 tree step_expr;
400 tree evolution_part = evolution_part_in_loop_num (access_fn, loop_nb);
401
402 /* When there is no evolution in this loop, the evolution function
403 is not "simple". */
404 if (evolution_part == NULL_TREE)
405 return false;
406
407 /* When the evolution is a polynomial of degree >= 2
408 the evolution function is not "simple". */
409 if (tree_is_chrec (evolution_part))
410 return false;
411
412 step_expr = evolution_part;
413 init_expr = unshare_expr (initial_condition_in_loop_num (access_fn, loop_nb));
414
415 if (vect_print_dump_info (REPORT_DETAILS))
416 {
417 fprintf (vect_dump, "step: ");
418 print_generic_expr (vect_dump, step_expr, TDF_SLIM);
419 fprintf (vect_dump, ", init: ");
420 print_generic_expr (vect_dump, init_expr, TDF_SLIM);
421 }
422
423 *init = init_expr;
424 *step = step_expr;
425
426 if (TREE_CODE (step_expr) != INTEGER_CST)
427 {
428 if (vect_print_dump_info (REPORT_DETAILS))
429 fprintf (vect_dump, "step unknown.");
430 return false;
431 }
432
433 return true;
434 }
435
436 /* Function vect_analyze_scalar_cycles_1.
437
438 Examine the cross iteration def-use cycles of scalar variables
439 in LOOP. LOOP_VINFO represents the loop that is now being
440 considered for vectorization (can be LOOP, or an outer-loop
441 enclosing LOOP). */
442
443 static void
444 vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop)
445 {
446 basic_block bb = loop->header;
447 tree dumy;
448 VEC(gimple,heap) *worklist = VEC_alloc (gimple, heap, 64);
449 gimple_stmt_iterator gsi;
450 bool double_reduc;
451
452 if (vect_print_dump_info (REPORT_DETAILS))
453 fprintf (vect_dump, "=== vect_analyze_scalar_cycles ===");
454
455 /* First - identify all inductions. Reduction detection assumes that all the
456 inductions have been identified, therefore, this order must not be
457 changed. */
458 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
459 {
460 gimple phi = gsi_stmt (gsi);
461 tree access_fn = NULL;
462 tree def = PHI_RESULT (phi);
463 stmt_vec_info stmt_vinfo = vinfo_for_stmt (phi);
464
465 if (vect_print_dump_info (REPORT_DETAILS))
466 {
467 fprintf (vect_dump, "Analyze phi: ");
468 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
469 }
470
471 /* Skip virtual phi's. The data dependences that are associated with
472 virtual defs/uses (i.e., memory accesses) are analyzed elsewhere. */
473 if (!is_gimple_reg (SSA_NAME_VAR (def)))
474 continue;
475
476 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_unknown_def_type;
477
478 /* Analyze the evolution function. */
479 access_fn = analyze_scalar_evolution (loop, def);
480 if (access_fn && vect_print_dump_info (REPORT_DETAILS))
481 {
482 fprintf (vect_dump, "Access function of PHI: ");
483 print_generic_expr (vect_dump, access_fn, TDF_SLIM);
484 }
485
486 if (!access_fn
487 || !vect_is_simple_iv_evolution (loop->num, access_fn, &dumy, &dumy))
488 {
489 VEC_safe_push (gimple, heap, worklist, phi);
490 continue;
491 }
492
493 if (vect_print_dump_info (REPORT_DETAILS))
494 fprintf (vect_dump, "Detected induction.");
495 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_induction_def;
496 }
497
498
499 /* Second - identify all reductions and nested cycles. */
500 while (VEC_length (gimple, worklist) > 0)
501 {
502 gimple phi = VEC_pop (gimple, worklist);
503 tree def = PHI_RESULT (phi);
504 stmt_vec_info stmt_vinfo = vinfo_for_stmt (phi);
505 gimple reduc_stmt;
506 bool nested_cycle;
507
508 if (vect_print_dump_info (REPORT_DETAILS))
509 {
510 fprintf (vect_dump, "Analyze phi: ");
511 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
512 }
513
514 gcc_assert (is_gimple_reg (SSA_NAME_VAR (def)));
515 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_unknown_def_type);
516
517 nested_cycle = (loop != LOOP_VINFO_LOOP (loop_vinfo));
518 reduc_stmt = vect_force_simple_reduction (loop_vinfo, phi, !nested_cycle,
519 &double_reduc);
520 if (reduc_stmt)
521 {
522 if (double_reduc)
523 {
524 if (vect_print_dump_info (REPORT_DETAILS))
525 fprintf (vect_dump, "Detected double reduction.");
526
527 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_double_reduction_def;
528 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) =
529 vect_double_reduction_def;
530 }
531 else
532 {
533 if (nested_cycle)
534 {
535 if (vect_print_dump_info (REPORT_DETAILS))
536 fprintf (vect_dump, "Detected vectorizable nested cycle.");
537
538 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_nested_cycle;
539 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) =
540 vect_nested_cycle;
541 }
542 else
543 {
544 if (vect_print_dump_info (REPORT_DETAILS))
545 fprintf (vect_dump, "Detected reduction.");
546
547 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_reduction_def;
548 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) =
549 vect_reduction_def;
550 /* Store the reduction cycles for possible vectorization in
551 loop-aware SLP. */
552 VEC_safe_push (gimple, heap,
553 LOOP_VINFO_REDUCTIONS (loop_vinfo),
554 reduc_stmt);
555 }
556 }
557 }
558 else
559 if (vect_print_dump_info (REPORT_DETAILS))
560 fprintf (vect_dump, "Unknown def-use cycle pattern.");
561 }
562
563 VEC_free (gimple, heap, worklist);
564 }
565
566
567 /* Function vect_analyze_scalar_cycles.
568
569 Examine the cross iteration def-use cycles of scalar variables, by
570 analyzing the loop-header PHIs of scalar variables; Classify each
571 cycle as one of the following: invariant, induction, reduction, unknown.
572 We do that for the loop represented by LOOP_VINFO, and also to its
573 inner-loop, if exists.
574 Examples for scalar cycles:
575
576 Example1: reduction:
577
578 loop1:
579 for (i=0; i<N; i++)
580 sum += a[i];
581
582 Example2: induction:
583
584 loop2:
585 for (i=0; i<N; i++)
586 a[i] = i; */
587
588 static void
589 vect_analyze_scalar_cycles (loop_vec_info loop_vinfo)
590 {
591 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
592
593 vect_analyze_scalar_cycles_1 (loop_vinfo, loop);
594
595 /* When vectorizing an outer-loop, the inner-loop is executed sequentially.
596 Reductions in such inner-loop therefore have different properties than
597 the reductions in the nest that gets vectorized:
598 1. When vectorized, they are executed in the same order as in the original
599 scalar loop, so we can't change the order of computation when
600 vectorizing them.
601 2. FIXME: Inner-loop reductions can be used in the inner-loop, so the
602 current checks are too strict. */
603
604 if (loop->inner)
605 vect_analyze_scalar_cycles_1 (loop_vinfo, loop->inner);
606 }
607
608 /* Function vect_get_loop_niters.
609
610 Determine how many iterations the loop is executed.
611 If an expression that represents the number of iterations
612 can be constructed, place it in NUMBER_OF_ITERATIONS.
613 Return the loop exit condition. */
614
615 static gimple
616 vect_get_loop_niters (struct loop *loop, tree *number_of_iterations)
617 {
618 tree niters;
619
620 if (vect_print_dump_info (REPORT_DETAILS))
621 fprintf (vect_dump, "=== get_loop_niters ===");
622
623 niters = number_of_exit_cond_executions (loop);
624
625 if (niters != NULL_TREE
626 && niters != chrec_dont_know)
627 {
628 *number_of_iterations = niters;
629
630 if (vect_print_dump_info (REPORT_DETAILS))
631 {
632 fprintf (vect_dump, "==> get_loop_niters:" );
633 print_generic_expr (vect_dump, *number_of_iterations, TDF_SLIM);
634 }
635 }
636
637 return get_loop_exit_condition (loop);
638 }
639
640
641 /* Function bb_in_loop_p
642
643 Used as predicate for dfs order traversal of the loop bbs. */
644
645 static bool
646 bb_in_loop_p (const_basic_block bb, const void *data)
647 {
648 const struct loop *const loop = (const struct loop *)data;
649 if (flow_bb_inside_loop_p (loop, bb))
650 return true;
651 return false;
652 }
653
654
655 /* Function new_loop_vec_info.
656
657 Create and initialize a new loop_vec_info struct for LOOP, as well as
658 stmt_vec_info structs for all the stmts in LOOP. */
659
660 static loop_vec_info
661 new_loop_vec_info (struct loop *loop)
662 {
663 loop_vec_info res;
664 basic_block *bbs;
665 gimple_stmt_iterator si;
666 unsigned int i, nbbs;
667
668 res = (loop_vec_info) xcalloc (1, sizeof (struct _loop_vec_info));
669 LOOP_VINFO_LOOP (res) = loop;
670
671 bbs = get_loop_body (loop);
672
673 /* Create/Update stmt_info for all stmts in the loop. */
674 for (i = 0; i < loop->num_nodes; i++)
675 {
676 basic_block bb = bbs[i];
677
678 /* BBs in a nested inner-loop will have been already processed (because
679 we will have called vect_analyze_loop_form for any nested inner-loop).
680 Therefore, for stmts in an inner-loop we just want to update the
681 STMT_VINFO_LOOP_VINFO field of their stmt_info to point to the new
682 loop_info of the outer-loop we are currently considering to vectorize
683 (instead of the loop_info of the inner-loop).
684 For stmts in other BBs we need to create a stmt_info from scratch. */
685 if (bb->loop_father != loop)
686 {
687 /* Inner-loop bb. */
688 gcc_assert (loop->inner && bb->loop_father == loop->inner);
689 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
690 {
691 gimple phi = gsi_stmt (si);
692 stmt_vec_info stmt_info = vinfo_for_stmt (phi);
693 loop_vec_info inner_loop_vinfo =
694 STMT_VINFO_LOOP_VINFO (stmt_info);
695 gcc_assert (loop->inner == LOOP_VINFO_LOOP (inner_loop_vinfo));
696 STMT_VINFO_LOOP_VINFO (stmt_info) = res;
697 }
698 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
699 {
700 gimple stmt = gsi_stmt (si);
701 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
702 loop_vec_info inner_loop_vinfo =
703 STMT_VINFO_LOOP_VINFO (stmt_info);
704 gcc_assert (loop->inner == LOOP_VINFO_LOOP (inner_loop_vinfo));
705 STMT_VINFO_LOOP_VINFO (stmt_info) = res;
706 }
707 }
708 else
709 {
710 /* bb in current nest. */
711 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
712 {
713 gimple phi = gsi_stmt (si);
714 gimple_set_uid (phi, 0);
715 set_vinfo_for_stmt (phi, new_stmt_vec_info (phi, res, NULL));
716 }
717
718 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
719 {
720 gimple stmt = gsi_stmt (si);
721 gimple_set_uid (stmt, 0);
722 set_vinfo_for_stmt (stmt, new_stmt_vec_info (stmt, res, NULL));
723 }
724 }
725 }
726
727 /* CHECKME: We want to visit all BBs before their successors (except for
728 latch blocks, for which this assertion wouldn't hold). In the simple
729 case of the loop forms we allow, a dfs order of the BBs would the same
730 as reversed postorder traversal, so we are safe. */
731
732 free (bbs);
733 bbs = XCNEWVEC (basic_block, loop->num_nodes);
734 nbbs = dfs_enumerate_from (loop->header, 0, bb_in_loop_p,
735 bbs, loop->num_nodes, loop);
736 gcc_assert (nbbs == loop->num_nodes);
737
738 LOOP_VINFO_BBS (res) = bbs;
739 LOOP_VINFO_NITERS (res) = NULL;
740 LOOP_VINFO_NITERS_UNCHANGED (res) = NULL;
741 LOOP_VINFO_COST_MODEL_MIN_ITERS (res) = 0;
742 LOOP_VINFO_VECTORIZABLE_P (res) = 0;
743 LOOP_PEELING_FOR_ALIGNMENT (res) = 0;
744 LOOP_VINFO_VECT_FACTOR (res) = 0;
745 LOOP_VINFO_DATAREFS (res) = VEC_alloc (data_reference_p, heap, 10);
746 LOOP_VINFO_DDRS (res) = VEC_alloc (ddr_p, heap, 10 * 10);
747 LOOP_VINFO_UNALIGNED_DR (res) = NULL;
748 LOOP_VINFO_MAY_MISALIGN_STMTS (res) =
749 VEC_alloc (gimple, heap,
750 PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIGNMENT_CHECKS));
751 LOOP_VINFO_MAY_ALIAS_DDRS (res) =
752 VEC_alloc (ddr_p, heap,
753 PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS));
754 LOOP_VINFO_STRIDED_STORES (res) = VEC_alloc (gimple, heap, 10);
755 LOOP_VINFO_REDUCTIONS (res) = VEC_alloc (gimple, heap, 10);
756 LOOP_VINFO_SLP_INSTANCES (res) = VEC_alloc (slp_instance, heap, 10);
757 LOOP_VINFO_SLP_UNROLLING_FACTOR (res) = 1;
758
759 return res;
760 }
761
762
763 /* Function destroy_loop_vec_info.
764
765 Free LOOP_VINFO struct, as well as all the stmt_vec_info structs of all the
766 stmts in the loop. */
767
768 void
769 destroy_loop_vec_info (loop_vec_info loop_vinfo, bool clean_stmts)
770 {
771 struct loop *loop;
772 basic_block *bbs;
773 int nbbs;
774 gimple_stmt_iterator si;
775 int j;
776 VEC (slp_instance, heap) *slp_instances;
777 slp_instance instance;
778
779 if (!loop_vinfo)
780 return;
781
782 loop = LOOP_VINFO_LOOP (loop_vinfo);
783
784 bbs = LOOP_VINFO_BBS (loop_vinfo);
785 nbbs = loop->num_nodes;
786
787 if (!clean_stmts)
788 {
789 free (LOOP_VINFO_BBS (loop_vinfo));
790 free_data_refs (LOOP_VINFO_DATAREFS (loop_vinfo));
791 free_dependence_relations (LOOP_VINFO_DDRS (loop_vinfo));
792 VEC_free (gimple, heap, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo));
793
794 free (loop_vinfo);
795 loop->aux = NULL;
796 return;
797 }
798
799 for (j = 0; j < nbbs; j++)
800 {
801 basic_block bb = bbs[j];
802 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
803 free_stmt_vec_info (gsi_stmt (si));
804
805 for (si = gsi_start_bb (bb); !gsi_end_p (si); )
806 {
807 gimple stmt = gsi_stmt (si);
808 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
809
810 if (stmt_info)
811 {
812 /* Check if this is a "pattern stmt" (introduced by the
813 vectorizer during the pattern recognition pass). */
814 bool remove_stmt_p = false;
815 gimple orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
816 if (orig_stmt)
817 {
818 stmt_vec_info orig_stmt_info = vinfo_for_stmt (orig_stmt);
819 if (orig_stmt_info
820 && STMT_VINFO_IN_PATTERN_P (orig_stmt_info))
821 remove_stmt_p = true;
822 }
823
824 /* Free stmt_vec_info. */
825 free_stmt_vec_info (stmt);
826
827 /* Remove dead "pattern stmts". */
828 if (remove_stmt_p)
829 gsi_remove (&si, true);
830 }
831 gsi_next (&si);
832 }
833 }
834
835 free (LOOP_VINFO_BBS (loop_vinfo));
836 free_data_refs (LOOP_VINFO_DATAREFS (loop_vinfo));
837 free_dependence_relations (LOOP_VINFO_DDRS (loop_vinfo));
838 VEC_free (gimple, heap, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo));
839 VEC_free (ddr_p, heap, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo));
840 slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
841 for (j = 0; VEC_iterate (slp_instance, slp_instances, j, instance); j++)
842 vect_free_slp_instance (instance);
843
844 VEC_free (slp_instance, heap, LOOP_VINFO_SLP_INSTANCES (loop_vinfo));
845 VEC_free (gimple, heap, LOOP_VINFO_STRIDED_STORES (loop_vinfo));
846 VEC_free (gimple, heap, LOOP_VINFO_REDUCTIONS (loop_vinfo));
847
848 free (loop_vinfo);
849 loop->aux = NULL;
850 }
851
852
853 /* Function vect_analyze_loop_1.
854
855 Apply a set of analyses on LOOP, and create a loop_vec_info struct
856 for it. The different analyses will record information in the
857 loop_vec_info struct. This is a subset of the analyses applied in
858 vect_analyze_loop, to be applied on an inner-loop nested in the loop
859 that is now considered for (outer-loop) vectorization. */
860
861 static loop_vec_info
862 vect_analyze_loop_1 (struct loop *loop)
863 {
864 loop_vec_info loop_vinfo;
865
866 if (vect_print_dump_info (REPORT_DETAILS))
867 fprintf (vect_dump, "===== analyze_loop_nest_1 =====");
868
869 /* Check the CFG characteristics of the loop (nesting, entry/exit, etc. */
870
871 loop_vinfo = vect_analyze_loop_form (loop);
872 if (!loop_vinfo)
873 {
874 if (vect_print_dump_info (REPORT_DETAILS))
875 fprintf (vect_dump, "bad inner-loop form.");
876 return NULL;
877 }
878
879 return loop_vinfo;
880 }
881
882
883 /* Function vect_analyze_loop_form.
884
885 Verify that certain CFG restrictions hold, including:
886 - the loop has a pre-header
887 - the loop has a single entry and exit
888 - the loop exit condition is simple enough, and the number of iterations
889 can be analyzed (a countable loop). */
890
891 loop_vec_info
892 vect_analyze_loop_form (struct loop *loop)
893 {
894 loop_vec_info loop_vinfo;
895 gimple loop_cond;
896 tree number_of_iterations = NULL;
897 loop_vec_info inner_loop_vinfo = NULL;
898
899 if (vect_print_dump_info (REPORT_DETAILS))
900 fprintf (vect_dump, "=== vect_analyze_loop_form ===");
901
902 /* Different restrictions apply when we are considering an inner-most loop,
903 vs. an outer (nested) loop.
904 (FORNOW. May want to relax some of these restrictions in the future). */
905
906 if (!loop->inner)
907 {
908 /* Inner-most loop. We currently require that the number of BBs is
909 exactly 2 (the header and latch). Vectorizable inner-most loops
910 look like this:
911
912 (pre-header)
913 |
914 header <--------+
915 | | |
916 | +--> latch --+
917 |
918 (exit-bb) */
919
920 if (loop->num_nodes != 2)
921 {
922 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
923 fprintf (vect_dump, "not vectorized: control flow in loop.");
924 return NULL;
925 }
926
927 if (empty_block_p (loop->header))
928 {
929 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
930 fprintf (vect_dump, "not vectorized: empty loop.");
931 return NULL;
932 }
933 }
934 else
935 {
936 struct loop *innerloop = loop->inner;
937 edge entryedge;
938
939 /* Nested loop. We currently require that the loop is doubly-nested,
940 contains a single inner loop, and the number of BBs is exactly 5.
941 Vectorizable outer-loops look like this:
942
943 (pre-header)
944 |
945 header <---+
946 | |
947 inner-loop |
948 | |
949 tail ------+
950 |
951 (exit-bb)
952
953 The inner-loop has the properties expected of inner-most loops
954 as described above. */
955
956 if ((loop->inner)->inner || (loop->inner)->next)
957 {
958 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
959 fprintf (vect_dump, "not vectorized: multiple nested loops.");
960 return NULL;
961 }
962
963 /* Analyze the inner-loop. */
964 inner_loop_vinfo = vect_analyze_loop_1 (loop->inner);
965 if (!inner_loop_vinfo)
966 {
967 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
968 fprintf (vect_dump, "not vectorized: Bad inner loop.");
969 return NULL;
970 }
971
972 if (!expr_invariant_in_loop_p (loop,
973 LOOP_VINFO_NITERS (inner_loop_vinfo)))
974 {
975 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
976 fprintf (vect_dump,
977 "not vectorized: inner-loop count not invariant.");
978 destroy_loop_vec_info (inner_loop_vinfo, true);
979 return NULL;
980 }
981
982 if (loop->num_nodes != 5)
983 {
984 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
985 fprintf (vect_dump, "not vectorized: control flow in loop.");
986 destroy_loop_vec_info (inner_loop_vinfo, true);
987 return NULL;
988 }
989
990 gcc_assert (EDGE_COUNT (innerloop->header->preds) == 2);
991 entryedge = EDGE_PRED (innerloop->header, 0);
992 if (EDGE_PRED (innerloop->header, 0)->src == innerloop->latch)
993 entryedge = EDGE_PRED (innerloop->header, 1);
994
995 if (entryedge->src != loop->header
996 || !single_exit (innerloop)
997 || single_exit (innerloop)->dest != EDGE_PRED (loop->latch, 0)->src)
998 {
999 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1000 fprintf (vect_dump, "not vectorized: unsupported outerloop form.");
1001 destroy_loop_vec_info (inner_loop_vinfo, true);
1002 return NULL;
1003 }
1004
1005 if (vect_print_dump_info (REPORT_DETAILS))
1006 fprintf (vect_dump, "Considering outer-loop vectorization.");
1007 }
1008
1009 if (!single_exit (loop)
1010 || EDGE_COUNT (loop->header->preds) != 2)
1011 {
1012 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1013 {
1014 if (!single_exit (loop))
1015 fprintf (vect_dump, "not vectorized: multiple exits.");
1016 else if (EDGE_COUNT (loop->header->preds) != 2)
1017 fprintf (vect_dump, "not vectorized: too many incoming edges.");
1018 }
1019 if (inner_loop_vinfo)
1020 destroy_loop_vec_info (inner_loop_vinfo, true);
1021 return NULL;
1022 }
1023
1024 /* We assume that the loop exit condition is at the end of the loop. i.e,
1025 that the loop is represented as a do-while (with a proper if-guard
1026 before the loop if needed), where the loop header contains all the
1027 executable statements, and the latch is empty. */
1028 if (!empty_block_p (loop->latch)
1029 || !gimple_seq_empty_p (phi_nodes (loop->latch)))
1030 {
1031 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1032 fprintf (vect_dump, "not vectorized: unexpected loop form.");
1033 if (inner_loop_vinfo)
1034 destroy_loop_vec_info (inner_loop_vinfo, true);
1035 return NULL;
1036 }
1037
1038 /* Make sure there exists a single-predecessor exit bb: */
1039 if (!single_pred_p (single_exit (loop)->dest))
1040 {
1041 edge e = single_exit (loop);
1042 if (!(e->flags & EDGE_ABNORMAL))
1043 {
1044 split_loop_exit_edge (e);
1045 if (vect_print_dump_info (REPORT_DETAILS))
1046 fprintf (vect_dump, "split exit edge.");
1047 }
1048 else
1049 {
1050 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1051 fprintf (vect_dump, "not vectorized: abnormal loop exit edge.");
1052 if (inner_loop_vinfo)
1053 destroy_loop_vec_info (inner_loop_vinfo, true);
1054 return NULL;
1055 }
1056 }
1057
1058 loop_cond = vect_get_loop_niters (loop, &number_of_iterations);
1059 if (!loop_cond)
1060 {
1061 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1062 fprintf (vect_dump, "not vectorized: complicated exit condition.");
1063 if (inner_loop_vinfo)
1064 destroy_loop_vec_info (inner_loop_vinfo, true);
1065 return NULL;
1066 }
1067
1068 if (!number_of_iterations)
1069 {
1070 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1071 fprintf (vect_dump,
1072 "not vectorized: number of iterations cannot be computed.");
1073 if (inner_loop_vinfo)
1074 destroy_loop_vec_info (inner_loop_vinfo, true);
1075 return NULL;
1076 }
1077
1078 if (chrec_contains_undetermined (number_of_iterations))
1079 {
1080 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1081 fprintf (vect_dump, "Infinite number of iterations.");
1082 if (inner_loop_vinfo)
1083 destroy_loop_vec_info (inner_loop_vinfo, true);
1084 return NULL;
1085 }
1086
1087 if (!NITERS_KNOWN_P (number_of_iterations))
1088 {
1089 if (vect_print_dump_info (REPORT_DETAILS))
1090 {
1091 fprintf (vect_dump, "Symbolic number of iterations is ");
1092 print_generic_expr (vect_dump, number_of_iterations, TDF_DETAILS);
1093 }
1094 }
1095 else if (TREE_INT_CST_LOW (number_of_iterations) == 0)
1096 {
1097 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1098 fprintf (vect_dump, "not vectorized: number of iterations = 0.");
1099 if (inner_loop_vinfo)
1100 destroy_loop_vec_info (inner_loop_vinfo, false);
1101 return NULL;
1102 }
1103
1104 loop_vinfo = new_loop_vec_info (loop);
1105 LOOP_VINFO_NITERS (loop_vinfo) = number_of_iterations;
1106 LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo) = number_of_iterations;
1107
1108 STMT_VINFO_TYPE (vinfo_for_stmt (loop_cond)) = loop_exit_ctrl_vec_info_type;
1109
1110 /* CHECKME: May want to keep it around it in the future. */
1111 if (inner_loop_vinfo)
1112 destroy_loop_vec_info (inner_loop_vinfo, false);
1113
1114 gcc_assert (!loop->aux);
1115 loop->aux = loop_vinfo;
1116 return loop_vinfo;
1117 }
1118
1119
1120 /* Get cost by calling cost target builtin. */
1121
1122 static inline
1123 int vect_get_cost (enum vect_cost_for_stmt type_of_cost)
1124 {
1125 return targetm.vectorize.builtin_vectorization_cost (type_of_cost);
1126 }
1127
1128
1129 /* Function vect_analyze_loop_operations.
1130
1131 Scan the loop stmts and make sure they are all vectorizable. */
1132
1133 static bool
1134 vect_analyze_loop_operations (loop_vec_info loop_vinfo)
1135 {
1136 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1137 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
1138 int nbbs = loop->num_nodes;
1139 gimple_stmt_iterator si;
1140 unsigned int vectorization_factor = 0;
1141 int i;
1142 gimple phi;
1143 stmt_vec_info stmt_info;
1144 bool need_to_vectorize = false;
1145 int min_profitable_iters;
1146 int min_scalar_loop_bound;
1147 unsigned int th;
1148 bool only_slp_in_loop = true, ok;
1149
1150 if (vect_print_dump_info (REPORT_DETAILS))
1151 fprintf (vect_dump, "=== vect_analyze_loop_operations ===");
1152
1153 gcc_assert (LOOP_VINFO_VECT_FACTOR (loop_vinfo));
1154 vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1155
1156 for (i = 0; i < nbbs; i++)
1157 {
1158 basic_block bb = bbs[i];
1159
1160 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
1161 {
1162 phi = gsi_stmt (si);
1163 ok = true;
1164
1165 stmt_info = vinfo_for_stmt (phi);
1166 if (vect_print_dump_info (REPORT_DETAILS))
1167 {
1168 fprintf (vect_dump, "examining phi: ");
1169 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
1170 }
1171
1172 if (! is_loop_header_bb_p (bb))
1173 {
1174 /* inner-loop loop-closed exit phi in outer-loop vectorization
1175 (i.e. a phi in the tail of the outer-loop).
1176 FORNOW: we currently don't support the case that these phis
1177 are not used in the outerloop (unless it is double reduction,
1178 i.e., this phi is vect_reduction_def), cause this case
1179 requires to actually do something here. */
1180 if ((!STMT_VINFO_RELEVANT_P (stmt_info)
1181 || STMT_VINFO_LIVE_P (stmt_info))
1182 && STMT_VINFO_DEF_TYPE (stmt_info)
1183 != vect_double_reduction_def)
1184 {
1185 if (vect_print_dump_info (REPORT_DETAILS))
1186 fprintf (vect_dump,
1187 "Unsupported loop-closed phi in outer-loop.");
1188 return false;
1189 }
1190 continue;
1191 }
1192
1193 gcc_assert (stmt_info);
1194
1195 if (STMT_VINFO_LIVE_P (stmt_info))
1196 {
1197 /* FORNOW: not yet supported. */
1198 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1199 fprintf (vect_dump, "not vectorized: value used after loop.");
1200 return false;
1201 }
1202
1203 if (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_scope
1204 && STMT_VINFO_DEF_TYPE (stmt_info) != vect_induction_def)
1205 {
1206 /* A scalar-dependence cycle that we don't support. */
1207 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1208 fprintf (vect_dump, "not vectorized: scalar dependence cycle.");
1209 return false;
1210 }
1211
1212 if (STMT_VINFO_RELEVANT_P (stmt_info))
1213 {
1214 need_to_vectorize = true;
1215 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def)
1216 ok = vectorizable_induction (phi, NULL, NULL);
1217 }
1218
1219 if (!ok)
1220 {
1221 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1222 {
1223 fprintf (vect_dump,
1224 "not vectorized: relevant phi not supported: ");
1225 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
1226 }
1227 return false;
1228 }
1229 }
1230
1231 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
1232 {
1233 gimple stmt = gsi_stmt (si);
1234 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1235
1236 gcc_assert (stmt_info);
1237
1238 if (!vect_analyze_stmt (stmt, &need_to_vectorize, NULL))
1239 return false;
1240
1241 if ((STMT_VINFO_RELEVANT_P (stmt_info)
1242 || VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info)))
1243 && !PURE_SLP_STMT (stmt_info))
1244 /* STMT needs both SLP and loop-based vectorization. */
1245 only_slp_in_loop = false;
1246 }
1247 } /* bbs */
1248
1249 /* All operations in the loop are either irrelevant (deal with loop
1250 control, or dead), or only used outside the loop and can be moved
1251 out of the loop (e.g. invariants, inductions). The loop can be
1252 optimized away by scalar optimizations. We're better off not
1253 touching this loop. */
1254 if (!need_to_vectorize)
1255 {
1256 if (vect_print_dump_info (REPORT_DETAILS))
1257 fprintf (vect_dump,
1258 "All the computation can be taken out of the loop.");
1259 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1260 fprintf (vect_dump,
1261 "not vectorized: redundant loop. no profit to vectorize.");
1262 return false;
1263 }
1264
1265 /* If all the stmts in the loop can be SLPed, we perform only SLP, and
1266 vectorization factor of the loop is the unrolling factor required by the
1267 SLP instances. If that unrolling factor is 1, we say, that we perform
1268 pure SLP on loop - cross iteration parallelism is not exploited. */
1269 if (only_slp_in_loop)
1270 vectorization_factor = LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo);
1271 else
1272 vectorization_factor = least_common_multiple (vectorization_factor,
1273 LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo));
1274
1275 LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor;
1276
1277 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1278 && vect_print_dump_info (REPORT_DETAILS))
1279 fprintf (vect_dump,
1280 "vectorization_factor = %d, niters = " HOST_WIDE_INT_PRINT_DEC,
1281 vectorization_factor, LOOP_VINFO_INT_NITERS (loop_vinfo));
1282
1283 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1284 && (LOOP_VINFO_INT_NITERS (loop_vinfo) < vectorization_factor))
1285 {
1286 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1287 fprintf (vect_dump, "not vectorized: iteration count too small.");
1288 if (vect_print_dump_info (REPORT_DETAILS))
1289 fprintf (vect_dump,"not vectorized: iteration count smaller than "
1290 "vectorization factor.");
1291 return false;
1292 }
1293
1294 /* Analyze cost. Decide if worth while to vectorize. */
1295
1296 /* Once VF is set, SLP costs should be updated since the number of created
1297 vector stmts depends on VF. */
1298 vect_update_slp_costs_according_to_vf (loop_vinfo);
1299
1300 min_profitable_iters = vect_estimate_min_profitable_iters (loop_vinfo);
1301 LOOP_VINFO_COST_MODEL_MIN_ITERS (loop_vinfo) = min_profitable_iters;
1302
1303 if (min_profitable_iters < 0)
1304 {
1305 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1306 fprintf (vect_dump, "not vectorized: vectorization not profitable.");
1307 if (vect_print_dump_info (REPORT_DETAILS))
1308 fprintf (vect_dump, "not vectorized: vector version will never be "
1309 "profitable.");
1310 return false;
1311 }
1312
1313 min_scalar_loop_bound = ((PARAM_VALUE (PARAM_MIN_VECT_LOOP_BOUND)
1314 * vectorization_factor) - 1);
1315
1316 /* Use the cost model only if it is more conservative than user specified
1317 threshold. */
1318
1319 th = (unsigned) min_scalar_loop_bound;
1320 if (min_profitable_iters
1321 && (!min_scalar_loop_bound
1322 || min_profitable_iters > min_scalar_loop_bound))
1323 th = (unsigned) min_profitable_iters;
1324
1325 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1326 && LOOP_VINFO_INT_NITERS (loop_vinfo) <= th)
1327 {
1328 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1329 fprintf (vect_dump, "not vectorized: vectorization not "
1330 "profitable.");
1331 if (vect_print_dump_info (REPORT_DETAILS))
1332 fprintf (vect_dump, "not vectorized: iteration count smaller than "
1333 "user specified loop bound parameter or minimum "
1334 "profitable iterations (whichever is more conservative).");
1335 return false;
1336 }
1337
1338 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1339 || LOOP_VINFO_INT_NITERS (loop_vinfo) % vectorization_factor != 0
1340 || LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo))
1341 {
1342 if (vect_print_dump_info (REPORT_DETAILS))
1343 fprintf (vect_dump, "epilog loop required.");
1344 if (!vect_can_advance_ivs_p (loop_vinfo))
1345 {
1346 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1347 fprintf (vect_dump,
1348 "not vectorized: can't create epilog loop 1.");
1349 return false;
1350 }
1351 if (!slpeel_can_duplicate_loop_p (loop, single_exit (loop)))
1352 {
1353 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1354 fprintf (vect_dump,
1355 "not vectorized: can't create epilog loop 2.");
1356 return false;
1357 }
1358 }
1359
1360 return true;
1361 }
1362
1363
1364 /* Function vect_analyze_loop.
1365
1366 Apply a set of analyses on LOOP, and create a loop_vec_info struct
1367 for it. The different analyses will record information in the
1368 loop_vec_info struct. */
1369 loop_vec_info
1370 vect_analyze_loop (struct loop *loop)
1371 {
1372 bool ok;
1373 loop_vec_info loop_vinfo;
1374 int max_vf = MAX_VECTORIZATION_FACTOR;
1375 int min_vf = 2;
1376
1377 if (vect_print_dump_info (REPORT_DETAILS))
1378 fprintf (vect_dump, "===== analyze_loop_nest =====");
1379
1380 if (loop_outer (loop)
1381 && loop_vec_info_for_loop (loop_outer (loop))
1382 && LOOP_VINFO_VECTORIZABLE_P (loop_vec_info_for_loop (loop_outer (loop))))
1383 {
1384 if (vect_print_dump_info (REPORT_DETAILS))
1385 fprintf (vect_dump, "outer-loop already vectorized.");
1386 return NULL;
1387 }
1388
1389 /* Check the CFG characteristics of the loop (nesting, entry/exit, etc. */
1390
1391 loop_vinfo = vect_analyze_loop_form (loop);
1392 if (!loop_vinfo)
1393 {
1394 if (vect_print_dump_info (REPORT_DETAILS))
1395 fprintf (vect_dump, "bad loop form.");
1396 return NULL;
1397 }
1398
1399 /* Find all data references in the loop (which correspond to vdefs/vuses)
1400 and analyze their evolution in the loop. Also adjust the minimal
1401 vectorization factor according to the loads and stores.
1402
1403 FORNOW: Handle only simple, array references, which
1404 alignment can be forced, and aligned pointer-references. */
1405
1406 ok = vect_analyze_data_refs (loop_vinfo, NULL, &min_vf);
1407 if (!ok)
1408 {
1409 if (vect_print_dump_info (REPORT_DETAILS))
1410 fprintf (vect_dump, "bad data references.");
1411 destroy_loop_vec_info (loop_vinfo, true);
1412 return NULL;
1413 }
1414
1415 /* Classify all cross-iteration scalar data-flow cycles.
1416 Cross-iteration cycles caused by virtual phis are analyzed separately. */
1417
1418 vect_analyze_scalar_cycles (loop_vinfo);
1419
1420 vect_pattern_recog (loop_vinfo);
1421
1422 /* Data-flow analysis to detect stmts that do not need to be vectorized. */
1423
1424 ok = vect_mark_stmts_to_be_vectorized (loop_vinfo);
1425 if (!ok)
1426 {
1427 if (vect_print_dump_info (REPORT_DETAILS))
1428 fprintf (vect_dump, "unexpected pattern.");
1429 destroy_loop_vec_info (loop_vinfo, true);
1430 return NULL;
1431 }
1432
1433 /* Analyze data dependences between the data-refs in the loop
1434 and adjust the maximum vectorization factor according to
1435 the dependences.
1436 FORNOW: fail at the first data dependence that we encounter. */
1437
1438 ok = vect_analyze_data_ref_dependences (loop_vinfo, NULL, &max_vf);
1439 if (!ok
1440 || max_vf < min_vf)
1441 {
1442 if (vect_print_dump_info (REPORT_DETAILS))
1443 fprintf (vect_dump, "bad data dependence.");
1444 destroy_loop_vec_info (loop_vinfo, true);
1445 return NULL;
1446 }
1447
1448 ok = vect_determine_vectorization_factor (loop_vinfo);
1449 if (!ok)
1450 {
1451 if (vect_print_dump_info (REPORT_DETAILS))
1452 fprintf (vect_dump, "can't determine vectorization factor.");
1453 destroy_loop_vec_info (loop_vinfo, true);
1454 return NULL;
1455 }
1456 if (max_vf < LOOP_VINFO_VECT_FACTOR (loop_vinfo))
1457 {
1458 if (vect_print_dump_info (REPORT_DETAILS))
1459 fprintf (vect_dump, "bad data dependence.");
1460 destroy_loop_vec_info (loop_vinfo, true);
1461 return NULL;
1462 }
1463
1464 /* Analyze the alignment of the data-refs in the loop.
1465 Fail if a data reference is found that cannot be vectorized. */
1466
1467 ok = vect_analyze_data_refs_alignment (loop_vinfo, NULL);
1468 if (!ok)
1469 {
1470 if (vect_print_dump_info (REPORT_DETAILS))
1471 fprintf (vect_dump, "bad data alignment.");
1472 destroy_loop_vec_info (loop_vinfo, true);
1473 return NULL;
1474 }
1475
1476 /* Analyze the access patterns of the data-refs in the loop (consecutive,
1477 complex, etc.). FORNOW: Only handle consecutive access pattern. */
1478
1479 ok = vect_analyze_data_ref_accesses (loop_vinfo, NULL);
1480 if (!ok)
1481 {
1482 if (vect_print_dump_info (REPORT_DETAILS))
1483 fprintf (vect_dump, "bad data access.");
1484 destroy_loop_vec_info (loop_vinfo, true);
1485 return NULL;
1486 }
1487
1488 /* Prune the list of ddrs to be tested at run-time by versioning for alias.
1489 It is important to call pruning after vect_analyze_data_ref_accesses,
1490 since we use grouping information gathered by interleaving analysis. */
1491 ok = vect_prune_runtime_alias_test_list (loop_vinfo);
1492 if (!ok)
1493 {
1494 if (vect_print_dump_info (REPORT_DETAILS))
1495 fprintf (vect_dump, "too long list of versioning for alias "
1496 "run-time tests.");
1497 destroy_loop_vec_info (loop_vinfo, true);
1498 return NULL;
1499 }
1500
1501 /* Check the SLP opportunities in the loop, analyze and build SLP trees. */
1502 ok = vect_analyze_slp (loop_vinfo, NULL);
1503 if (ok)
1504 {
1505 /* Decide which possible SLP instances to SLP. */
1506 vect_make_slp_decision (loop_vinfo);
1507
1508 /* Find stmts that need to be both vectorized and SLPed. */
1509 vect_detect_hybrid_slp (loop_vinfo);
1510 }
1511
1512 /* This pass will decide on using loop versioning and/or loop peeling in
1513 order to enhance the alignment of data references in the loop. */
1514
1515 ok = vect_enhance_data_refs_alignment (loop_vinfo);
1516 if (!ok)
1517 {
1518 if (vect_print_dump_info (REPORT_DETAILS))
1519 fprintf (vect_dump, "bad data alignment.");
1520 destroy_loop_vec_info (loop_vinfo, true);
1521 return NULL;
1522 }
1523
1524 /* Scan all the operations in the loop and make sure they are
1525 vectorizable. */
1526
1527 ok = vect_analyze_loop_operations (loop_vinfo);
1528 if (!ok)
1529 {
1530 if (vect_print_dump_info (REPORT_DETAILS))
1531 fprintf (vect_dump, "bad operation or unsupported loop bound.");
1532 destroy_loop_vec_info (loop_vinfo, true);
1533 return NULL;
1534 }
1535
1536 LOOP_VINFO_VECTORIZABLE_P (loop_vinfo) = 1;
1537
1538 return loop_vinfo;
1539 }
1540
1541
1542 /* Function reduction_code_for_scalar_code
1543
1544 Input:
1545 CODE - tree_code of a reduction operations.
1546
1547 Output:
1548 REDUC_CODE - the corresponding tree-code to be used to reduce the
1549 vector of partial results into a single scalar result (which
1550 will also reside in a vector) or ERROR_MARK if the operation is
1551 a supported reduction operation, but does not have such tree-code.
1552
1553 Return FALSE if CODE currently cannot be vectorized as reduction. */
1554
1555 static bool
1556 reduction_code_for_scalar_code (enum tree_code code,
1557 enum tree_code *reduc_code)
1558 {
1559 switch (code)
1560 {
1561 case MAX_EXPR:
1562 *reduc_code = REDUC_MAX_EXPR;
1563 return true;
1564
1565 case MIN_EXPR:
1566 *reduc_code = REDUC_MIN_EXPR;
1567 return true;
1568
1569 case PLUS_EXPR:
1570 *reduc_code = REDUC_PLUS_EXPR;
1571 return true;
1572
1573 case MULT_EXPR:
1574 case MINUS_EXPR:
1575 case BIT_IOR_EXPR:
1576 case BIT_XOR_EXPR:
1577 case BIT_AND_EXPR:
1578 *reduc_code = ERROR_MARK;
1579 return true;
1580
1581 default:
1582 return false;
1583 }
1584 }
1585
1586
1587 /* Error reporting helper for vect_is_simple_reduction below. GIMPLE statement
1588 STMT is printed with a message MSG. */
1589
1590 static void
1591 report_vect_op (gimple stmt, const char *msg)
1592 {
1593 fprintf (vect_dump, "%s", msg);
1594 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
1595 }
1596
1597
1598 /* Function vect_is_simple_reduction_1
1599
1600 (1) Detect a cross-iteration def-use cycle that represents a simple
1601 reduction computation. We look for the following pattern:
1602
1603 loop_header:
1604 a1 = phi < a0, a2 >
1605 a3 = ...
1606 a2 = operation (a3, a1)
1607
1608 such that:
1609 1. operation is commutative and associative and it is safe to
1610 change the order of the computation (if CHECK_REDUCTION is true)
1611 2. no uses for a2 in the loop (a2 is used out of the loop)
1612 3. no uses of a1 in the loop besides the reduction operation.
1613
1614 Condition 1 is tested here.
1615 Conditions 2,3 are tested in vect_mark_stmts_to_be_vectorized.
1616
1617 (2) Detect a cross-iteration def-use cycle in nested loops, i.e.,
1618 nested cycles, if CHECK_REDUCTION is false.
1619
1620 (3) Detect cycles of phi nodes in outer-loop vectorization, i.e., double
1621 reductions:
1622
1623 a1 = phi < a0, a2 >
1624 inner loop (def of a3)
1625 a2 = phi < a3 >
1626
1627 If MODIFY is true it tries also to rework the code in-place to enable
1628 detection of more reduction patterns. For the time being we rewrite
1629 "res -= RHS" into "rhs += -RHS" when it seems worthwhile.
1630 */
1631
1632 static gimple
1633 vect_is_simple_reduction_1 (loop_vec_info loop_info, gimple phi,
1634 bool check_reduction, bool *double_reduc,
1635 bool modify)
1636 {
1637 struct loop *loop = (gimple_bb (phi))->loop_father;
1638 struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
1639 edge latch_e = loop_latch_edge (loop);
1640 tree loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e);
1641 gimple def_stmt, def1 = NULL, def2 = NULL;
1642 enum tree_code orig_code, code;
1643 tree op1, op2, op3 = NULL_TREE, op4 = NULL_TREE;
1644 tree type;
1645 int nloop_uses;
1646 tree name;
1647 imm_use_iterator imm_iter;
1648 use_operand_p use_p;
1649 bool phi_def;
1650
1651 *double_reduc = false;
1652
1653 /* If CHECK_REDUCTION is true, we assume inner-most loop vectorization,
1654 otherwise, we assume outer loop vectorization. */
1655 gcc_assert ((check_reduction && loop == vect_loop)
1656 || (!check_reduction && flow_loop_nested_p (vect_loop, loop)));
1657
1658 name = PHI_RESULT (phi);
1659 nloop_uses = 0;
1660 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, name)
1661 {
1662 gimple use_stmt = USE_STMT (use_p);
1663 if (is_gimple_debug (use_stmt))
1664 continue;
1665 if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt))
1666 && vinfo_for_stmt (use_stmt)
1667 && !is_pattern_stmt_p (vinfo_for_stmt (use_stmt)))
1668 nloop_uses++;
1669 if (nloop_uses > 1)
1670 {
1671 if (vect_print_dump_info (REPORT_DETAILS))
1672 fprintf (vect_dump, "reduction used in loop.");
1673 return NULL;
1674 }
1675 }
1676
1677 if (TREE_CODE (loop_arg) != SSA_NAME)
1678 {
1679 if (vect_print_dump_info (REPORT_DETAILS))
1680 {
1681 fprintf (vect_dump, "reduction: not ssa_name: ");
1682 print_generic_expr (vect_dump, loop_arg, TDF_SLIM);
1683 }
1684 return NULL;
1685 }
1686
1687 def_stmt = SSA_NAME_DEF_STMT (loop_arg);
1688 if (!def_stmt)
1689 {
1690 if (vect_print_dump_info (REPORT_DETAILS))
1691 fprintf (vect_dump, "reduction: no def_stmt.");
1692 return NULL;
1693 }
1694
1695 if (!is_gimple_assign (def_stmt) && gimple_code (def_stmt) != GIMPLE_PHI)
1696 {
1697 if (vect_print_dump_info (REPORT_DETAILS))
1698 print_gimple_stmt (vect_dump, def_stmt, 0, TDF_SLIM);
1699 return NULL;
1700 }
1701
1702 if (is_gimple_assign (def_stmt))
1703 {
1704 name = gimple_assign_lhs (def_stmt);
1705 phi_def = false;
1706 }
1707 else
1708 {
1709 name = PHI_RESULT (def_stmt);
1710 phi_def = true;
1711 }
1712
1713 nloop_uses = 0;
1714 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, name)
1715 {
1716 gimple use_stmt = USE_STMT (use_p);
1717 if (is_gimple_debug (use_stmt))
1718 continue;
1719 if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt))
1720 && vinfo_for_stmt (use_stmt)
1721 && !is_pattern_stmt_p (vinfo_for_stmt (use_stmt)))
1722 nloop_uses++;
1723 if (nloop_uses > 1)
1724 {
1725 if (vect_print_dump_info (REPORT_DETAILS))
1726 fprintf (vect_dump, "reduction used in loop.");
1727 return NULL;
1728 }
1729 }
1730
1731 /* If DEF_STMT is a phi node itself, we expect it to have a single argument
1732 defined in the inner loop. */
1733 if (phi_def)
1734 {
1735 op1 = PHI_ARG_DEF (def_stmt, 0);
1736
1737 if (gimple_phi_num_args (def_stmt) != 1
1738 || TREE_CODE (op1) != SSA_NAME)
1739 {
1740 if (vect_print_dump_info (REPORT_DETAILS))
1741 fprintf (vect_dump, "unsupported phi node definition.");
1742
1743 return NULL;
1744 }
1745
1746 def1 = SSA_NAME_DEF_STMT (op1);
1747 if (flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
1748 && loop->inner
1749 && flow_bb_inside_loop_p (loop->inner, gimple_bb (def1))
1750 && is_gimple_assign (def1))
1751 {
1752 if (vect_print_dump_info (REPORT_DETAILS))
1753 report_vect_op (def_stmt, "detected double reduction: ");
1754
1755 *double_reduc = true;
1756 return def_stmt;
1757 }
1758
1759 return NULL;
1760 }
1761
1762 code = orig_code = gimple_assign_rhs_code (def_stmt);
1763
1764 /* We can handle "res -= x[i]", which is non-associative by
1765 simply rewriting this into "res += -x[i]". Avoid changing
1766 gimple instruction for the first simple tests and only do this
1767 if we're allowed to change code at all. */
1768 if (code == MINUS_EXPR && modify)
1769 code = PLUS_EXPR;
1770
1771 if (check_reduction
1772 && (!commutative_tree_code (code) || !associative_tree_code (code)))
1773 {
1774 if (vect_print_dump_info (REPORT_DETAILS))
1775 report_vect_op (def_stmt, "reduction: not commutative/associative: ");
1776 return NULL;
1777 }
1778
1779 if (get_gimple_rhs_class (code) != GIMPLE_BINARY_RHS)
1780 {
1781 if (code != COND_EXPR)
1782 {
1783 if (vect_print_dump_info (REPORT_DETAILS))
1784 report_vect_op (def_stmt, "reduction: not binary operation: ");
1785
1786 return NULL;
1787 }
1788
1789 op3 = TREE_OPERAND (gimple_assign_rhs1 (def_stmt), 0);
1790 if (COMPARISON_CLASS_P (op3))
1791 {
1792 op4 = TREE_OPERAND (op3, 1);
1793 op3 = TREE_OPERAND (op3, 0);
1794 }
1795
1796 op1 = TREE_OPERAND (gimple_assign_rhs1 (def_stmt), 1);
1797 op2 = TREE_OPERAND (gimple_assign_rhs1 (def_stmt), 2);
1798
1799 if (TREE_CODE (op1) != SSA_NAME && TREE_CODE (op2) != SSA_NAME)
1800 {
1801 if (vect_print_dump_info (REPORT_DETAILS))
1802 report_vect_op (def_stmt, "reduction: uses not ssa_names: ");
1803
1804 return NULL;
1805 }
1806 }
1807 else
1808 {
1809 op1 = gimple_assign_rhs1 (def_stmt);
1810 op2 = gimple_assign_rhs2 (def_stmt);
1811
1812 if (TREE_CODE (op1) != SSA_NAME || TREE_CODE (op2) != SSA_NAME)
1813 {
1814 if (vect_print_dump_info (REPORT_DETAILS))
1815 report_vect_op (def_stmt, "reduction: uses not ssa_names: ");
1816
1817 return NULL;
1818 }
1819 }
1820
1821 type = TREE_TYPE (gimple_assign_lhs (def_stmt));
1822 if ((TREE_CODE (op1) == SSA_NAME
1823 && !types_compatible_p (type,TREE_TYPE (op1)))
1824 || (TREE_CODE (op2) == SSA_NAME
1825 && !types_compatible_p (type, TREE_TYPE (op2)))
1826 || (op3 && TREE_CODE (op3) == SSA_NAME
1827 && !types_compatible_p (type, TREE_TYPE (op3)))
1828 || (op4 && TREE_CODE (op4) == SSA_NAME
1829 && !types_compatible_p (type, TREE_TYPE (op4))))
1830 {
1831 if (vect_print_dump_info (REPORT_DETAILS))
1832 {
1833 fprintf (vect_dump, "reduction: multiple types: operation type: ");
1834 print_generic_expr (vect_dump, type, TDF_SLIM);
1835 fprintf (vect_dump, ", operands types: ");
1836 print_generic_expr (vect_dump, TREE_TYPE (op1), TDF_SLIM);
1837 fprintf (vect_dump, ",");
1838 print_generic_expr (vect_dump, TREE_TYPE (op2), TDF_SLIM);
1839 if (op3)
1840 {
1841 fprintf (vect_dump, ",");
1842 print_generic_expr (vect_dump, TREE_TYPE (op3), TDF_SLIM);
1843 }
1844
1845 if (op4)
1846 {
1847 fprintf (vect_dump, ",");
1848 print_generic_expr (vect_dump, TREE_TYPE (op4), TDF_SLIM);
1849 }
1850 }
1851
1852 return NULL;
1853 }
1854
1855 /* Check that it's ok to change the order of the computation.
1856 Generally, when vectorizing a reduction we change the order of the
1857 computation. This may change the behavior of the program in some
1858 cases, so we need to check that this is ok. One exception is when
1859 vectorizing an outer-loop: the inner-loop is executed sequentially,
1860 and therefore vectorizing reductions in the inner-loop during
1861 outer-loop vectorization is safe. */
1862
1863 /* CHECKME: check for !flag_finite_math_only too? */
1864 if (SCALAR_FLOAT_TYPE_P (type) && !flag_associative_math
1865 && check_reduction)
1866 {
1867 /* Changing the order of operations changes the semantics. */
1868 if (vect_print_dump_info (REPORT_DETAILS))
1869 report_vect_op (def_stmt, "reduction: unsafe fp math optimization: ");
1870 return NULL;
1871 }
1872 else if (INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_TRAPS (type)
1873 && check_reduction)
1874 {
1875 /* Changing the order of operations changes the semantics. */
1876 if (vect_print_dump_info (REPORT_DETAILS))
1877 report_vect_op (def_stmt, "reduction: unsafe int math optimization: ");
1878 return NULL;
1879 }
1880 else if (SAT_FIXED_POINT_TYPE_P (type) && check_reduction)
1881 {
1882 /* Changing the order of operations changes the semantics. */
1883 if (vect_print_dump_info (REPORT_DETAILS))
1884 report_vect_op (def_stmt,
1885 "reduction: unsafe fixed-point math optimization: ");
1886 return NULL;
1887 }
1888
1889 /* If we detected "res -= x[i]" earlier, rewrite it into
1890 "res += -x[i]" now. If this turns out to be useless reassoc
1891 will clean it up again. */
1892 if (orig_code == MINUS_EXPR)
1893 {
1894 tree rhs = gimple_assign_rhs2 (def_stmt);
1895 tree negrhs = make_ssa_name (SSA_NAME_VAR (rhs), NULL);
1896 gimple negate_stmt = gimple_build_assign_with_ops (NEGATE_EXPR, negrhs,
1897 rhs, NULL);
1898 gimple_stmt_iterator gsi = gsi_for_stmt (def_stmt);
1899 set_vinfo_for_stmt (negate_stmt, new_stmt_vec_info (negate_stmt,
1900 loop_info, NULL));
1901 gsi_insert_before (&gsi, negate_stmt, GSI_NEW_STMT);
1902 gimple_assign_set_rhs2 (def_stmt, negrhs);
1903 gimple_assign_set_rhs_code (def_stmt, PLUS_EXPR);
1904 update_stmt (def_stmt);
1905 }
1906
1907 /* Reduction is safe. We're dealing with one of the following:
1908 1) integer arithmetic and no trapv
1909 2) floating point arithmetic, and special flags permit this optimization
1910 3) nested cycle (i.e., outer loop vectorization). */
1911 if (TREE_CODE (op1) == SSA_NAME)
1912 def1 = SSA_NAME_DEF_STMT (op1);
1913
1914 if (TREE_CODE (op2) == SSA_NAME)
1915 def2 = SSA_NAME_DEF_STMT (op2);
1916
1917 if (code != COND_EXPR
1918 && (!def1 || !def2 || gimple_nop_p (def1) || gimple_nop_p (def2)))
1919 {
1920 if (vect_print_dump_info (REPORT_DETAILS))
1921 report_vect_op (def_stmt, "reduction: no defs for operands: ");
1922 return NULL;
1923 }
1924
1925 /* Check that one def is the reduction def, defined by PHI,
1926 the other def is either defined in the loop ("vect_internal_def"),
1927 or it's an induction (defined by a loop-header phi-node). */
1928
1929 if (def2 && def2 == phi
1930 && (code == COND_EXPR
1931 || (def1 && flow_bb_inside_loop_p (loop, gimple_bb (def1))
1932 && (is_gimple_assign (def1)
1933 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def1))
1934 == vect_induction_def
1935 || (gimple_code (def1) == GIMPLE_PHI
1936 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def1))
1937 == vect_internal_def
1938 && !is_loop_header_bb_p (gimple_bb (def1)))))))
1939 {
1940 if (vect_print_dump_info (REPORT_DETAILS))
1941 report_vect_op (def_stmt, "detected reduction: ");
1942 return def_stmt;
1943 }
1944 else if (def1 && def1 == phi
1945 && (code == COND_EXPR
1946 || (def2 && flow_bb_inside_loop_p (loop, gimple_bb (def2))
1947 && (is_gimple_assign (def2)
1948 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def2))
1949 == vect_induction_def
1950 || (gimple_code (def2) == GIMPLE_PHI
1951 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def2))
1952 == vect_internal_def
1953 && !is_loop_header_bb_p (gimple_bb (def2)))))))
1954 {
1955 if (check_reduction)
1956 {
1957 /* Swap operands (just for simplicity - so that the rest of the code
1958 can assume that the reduction variable is always the last (second)
1959 argument). */
1960 if (vect_print_dump_info (REPORT_DETAILS))
1961 report_vect_op (def_stmt,
1962 "detected reduction: need to swap operands: ");
1963
1964 swap_tree_operands (def_stmt, gimple_assign_rhs1_ptr (def_stmt),
1965 gimple_assign_rhs2_ptr (def_stmt));
1966 }
1967 else
1968 {
1969 if (vect_print_dump_info (REPORT_DETAILS))
1970 report_vect_op (def_stmt, "detected reduction: ");
1971 }
1972
1973 return def_stmt;
1974 }
1975 else
1976 {
1977 if (vect_print_dump_info (REPORT_DETAILS))
1978 report_vect_op (def_stmt, "reduction: unknown pattern: ");
1979
1980 return NULL;
1981 }
1982 }
1983
1984 /* Wrapper around vect_is_simple_reduction_1, that won't modify code
1985 in-place. Arguments as there. */
1986
1987 static gimple
1988 vect_is_simple_reduction (loop_vec_info loop_info, gimple phi,
1989 bool check_reduction, bool *double_reduc)
1990 {
1991 return vect_is_simple_reduction_1 (loop_info, phi, check_reduction,
1992 double_reduc, false);
1993 }
1994
1995 /* Wrapper around vect_is_simple_reduction_1, which will modify code
1996 in-place if it enables detection of more reductions. Arguments
1997 as there. */
1998
1999 gimple
2000 vect_force_simple_reduction (loop_vec_info loop_info, gimple phi,
2001 bool check_reduction, bool *double_reduc)
2002 {
2003 return vect_is_simple_reduction_1 (loop_info, phi, check_reduction,
2004 double_reduc, true);
2005 }
2006
2007 /* Function vect_estimate_min_profitable_iters
2008
2009 Return the number of iterations required for the vector version of the
2010 loop to be profitable relative to the cost of the scalar version of the
2011 loop.
2012
2013 TODO: Take profile info into account before making vectorization
2014 decisions, if available. */
2015
2016 int
2017 vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo)
2018 {
2019 int i;
2020 int min_profitable_iters;
2021 int peel_iters_prologue;
2022 int peel_iters_epilogue;
2023 int vec_inside_cost = 0;
2024 int vec_outside_cost = 0;
2025 int scalar_single_iter_cost = 0;
2026 int scalar_outside_cost = 0;
2027 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2028 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2029 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
2030 int nbbs = loop->num_nodes;
2031 int byte_misalign = LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo);
2032 int peel_guard_costs = 0;
2033 int innerloop_iters = 0, factor;
2034 VEC (slp_instance, heap) *slp_instances;
2035 slp_instance instance;
2036
2037 /* Cost model disabled. */
2038 if (!flag_vect_cost_model)
2039 {
2040 if (vect_print_dump_info (REPORT_COST))
2041 fprintf (vect_dump, "cost model disabled.");
2042 return 0;
2043 }
2044
2045 /* Requires loop versioning tests to handle misalignment. */
2046 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo))
2047 {
2048 /* FIXME: Make cost depend on complexity of individual check. */
2049 vec_outside_cost +=
2050 VEC_length (gimple, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo));
2051 if (vect_print_dump_info (REPORT_COST))
2052 fprintf (vect_dump, "cost model: Adding cost of checks for loop "
2053 "versioning to treat misalignment.\n");
2054 }
2055
2056 /* Requires loop versioning with alias checks. */
2057 if (LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
2058 {
2059 /* FIXME: Make cost depend on complexity of individual check. */
2060 vec_outside_cost +=
2061 VEC_length (ddr_p, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo));
2062 if (vect_print_dump_info (REPORT_COST))
2063 fprintf (vect_dump, "cost model: Adding cost of checks for loop "
2064 "versioning aliasing.\n");
2065 }
2066
2067 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
2068 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
2069 vec_outside_cost += vect_get_cost (cond_branch_taken);
2070
2071 /* Count statements in scalar loop. Using this as scalar cost for a single
2072 iteration for now.
2073
2074 TODO: Add outer loop support.
2075
2076 TODO: Consider assigning different costs to different scalar
2077 statements. */
2078
2079 /* FORNOW. */
2080 if (loop->inner)
2081 innerloop_iters = 50; /* FIXME */
2082
2083 for (i = 0; i < nbbs; i++)
2084 {
2085 gimple_stmt_iterator si;
2086 basic_block bb = bbs[i];
2087
2088 if (bb->loop_father == loop->inner)
2089 factor = innerloop_iters;
2090 else
2091 factor = 1;
2092
2093 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
2094 {
2095 gimple stmt = gsi_stmt (si);
2096 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2097 /* Skip stmts that are not vectorized inside the loop. */
2098 if (!STMT_VINFO_RELEVANT_P (stmt_info)
2099 && (!STMT_VINFO_LIVE_P (stmt_info)
2100 || STMT_VINFO_DEF_TYPE (stmt_info) != vect_reduction_def))
2101 continue;
2102 scalar_single_iter_cost += cost_for_stmt (stmt) * factor;
2103 vec_inside_cost += STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info) * factor;
2104 /* FIXME: for stmts in the inner-loop in outer-loop vectorization,
2105 some of the "outside" costs are generated inside the outer-loop. */
2106 vec_outside_cost += STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info);
2107 }
2108 }
2109
2110 /* Add additional cost for the peeled instructions in prologue and epilogue
2111 loop.
2112
2113 FORNOW: If we don't know the value of peel_iters for prologue or epilogue
2114 at compile-time - we assume it's vf/2 (the worst would be vf-1).
2115
2116 TODO: Build an expression that represents peel_iters for prologue and
2117 epilogue to be used in a run-time test. */
2118
2119 if (byte_misalign < 0)
2120 {
2121 peel_iters_prologue = vf/2;
2122 if (vect_print_dump_info (REPORT_COST))
2123 fprintf (vect_dump, "cost model: "
2124 "prologue peel iters set to vf/2.");
2125
2126 /* If peeling for alignment is unknown, loop bound of main loop becomes
2127 unknown. */
2128 peel_iters_epilogue = vf/2;
2129 if (vect_print_dump_info (REPORT_COST))
2130 fprintf (vect_dump, "cost model: "
2131 "epilogue peel iters set to vf/2 because "
2132 "peeling for alignment is unknown .");
2133
2134 /* If peeled iterations are unknown, count a taken branch and a not taken
2135 branch per peeled loop. Even if scalar loop iterations are known,
2136 vector iterations are not known since peeled prologue iterations are
2137 not known. Hence guards remain the same. */
2138 peel_guard_costs += 2 * (vect_get_cost (cond_branch_taken)
2139 + vect_get_cost (cond_branch_not_taken));
2140 }
2141 else
2142 {
2143 if (byte_misalign)
2144 {
2145 struct data_reference *dr = LOOP_VINFO_UNALIGNED_DR (loop_vinfo);
2146 int element_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr))));
2147 tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (DR_STMT (dr)));
2148 int nelements = TYPE_VECTOR_SUBPARTS (vectype);
2149
2150 peel_iters_prologue = nelements - (byte_misalign / element_size);
2151 }
2152 else
2153 peel_iters_prologue = 0;
2154
2155 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
2156 {
2157 peel_iters_epilogue = vf/2;
2158 if (vect_print_dump_info (REPORT_COST))
2159 fprintf (vect_dump, "cost model: "
2160 "epilogue peel iters set to vf/2 because "
2161 "loop iterations are unknown .");
2162
2163 /* If peeled iterations are known but number of scalar loop
2164 iterations are unknown, count a taken branch per peeled loop. */
2165 peel_guard_costs += 2 * vect_get_cost (cond_branch_taken);
2166 }
2167 else
2168 {
2169 int niters = LOOP_VINFO_INT_NITERS (loop_vinfo);
2170 peel_iters_prologue = niters < peel_iters_prologue ?
2171 niters : peel_iters_prologue;
2172 peel_iters_epilogue = (niters - peel_iters_prologue) % vf;
2173 }
2174 }
2175
2176 vec_outside_cost += (peel_iters_prologue * scalar_single_iter_cost)
2177 + (peel_iters_epilogue * scalar_single_iter_cost)
2178 + peel_guard_costs;
2179
2180 /* FORNOW: The scalar outside cost is incremented in one of the
2181 following ways:
2182
2183 1. The vectorizer checks for alignment and aliasing and generates
2184 a condition that allows dynamic vectorization. A cost model
2185 check is ANDED with the versioning condition. Hence scalar code
2186 path now has the added cost of the versioning check.
2187
2188 if (cost > th & versioning_check)
2189 jmp to vector code
2190
2191 Hence run-time scalar is incremented by not-taken branch cost.
2192
2193 2. The vectorizer then checks if a prologue is required. If the
2194 cost model check was not done before during versioning, it has to
2195 be done before the prologue check.
2196
2197 if (cost <= th)
2198 prologue = scalar_iters
2199 if (prologue == 0)
2200 jmp to vector code
2201 else
2202 execute prologue
2203 if (prologue == num_iters)
2204 go to exit
2205
2206 Hence the run-time scalar cost is incremented by a taken branch,
2207 plus a not-taken branch, plus a taken branch cost.
2208
2209 3. The vectorizer then checks if an epilogue is required. If the
2210 cost model check was not done before during prologue check, it
2211 has to be done with the epilogue check.
2212
2213 if (prologue == 0)
2214 jmp to vector code
2215 else
2216 execute prologue
2217 if (prologue == num_iters)
2218 go to exit
2219 vector code:
2220 if ((cost <= th) | (scalar_iters-prologue-epilogue == 0))
2221 jmp to epilogue
2222
2223 Hence the run-time scalar cost should be incremented by 2 taken
2224 branches.
2225
2226 TODO: The back end may reorder the BBS's differently and reverse
2227 conditions/branch directions. Change the estimates below to
2228 something more reasonable. */
2229
2230 /* If the number of iterations is known and we do not do versioning, we can
2231 decide whether to vectorize at compile time. Hence the scalar version
2232 do not carry cost model guard costs. */
2233 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
2234 || LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
2235 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
2236 {
2237 /* Cost model check occurs at versioning. */
2238 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
2239 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
2240 scalar_outside_cost += vect_get_cost (cond_branch_not_taken);
2241 else
2242 {
2243 /* Cost model check occurs at prologue generation. */
2244 if (LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo) < 0)
2245 scalar_outside_cost += 2 * vect_get_cost (cond_branch_taken)
2246 + vect_get_cost (cond_branch_not_taken);
2247 /* Cost model check occurs at epilogue generation. */
2248 else
2249 scalar_outside_cost += 2 * vect_get_cost (cond_branch_taken);
2250 }
2251 }
2252
2253 /* Add SLP costs. */
2254 slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
2255 for (i = 0; VEC_iterate (slp_instance, slp_instances, i, instance); i++)
2256 {
2257 vec_outside_cost += SLP_INSTANCE_OUTSIDE_OF_LOOP_COST (instance);
2258 vec_inside_cost += SLP_INSTANCE_INSIDE_OF_LOOP_COST (instance);
2259 }
2260
2261 /* Calculate number of iterations required to make the vector version
2262 profitable, relative to the loop bodies only. The following condition
2263 must hold true:
2264 SIC * niters + SOC > VIC * ((niters-PL_ITERS-EP_ITERS)/VF) + VOC
2265 where
2266 SIC = scalar iteration cost, VIC = vector iteration cost,
2267 VOC = vector outside cost, VF = vectorization factor,
2268 PL_ITERS = prologue iterations, EP_ITERS= epilogue iterations
2269 SOC = scalar outside cost for run time cost model check. */
2270
2271 if ((scalar_single_iter_cost * vf) > vec_inside_cost)
2272 {
2273 if (vec_outside_cost <= 0)
2274 min_profitable_iters = 1;
2275 else
2276 {
2277 min_profitable_iters = ((vec_outside_cost - scalar_outside_cost) * vf
2278 - vec_inside_cost * peel_iters_prologue
2279 - vec_inside_cost * peel_iters_epilogue)
2280 / ((scalar_single_iter_cost * vf)
2281 - vec_inside_cost);
2282
2283 if ((scalar_single_iter_cost * vf * min_profitable_iters)
2284 <= ((vec_inside_cost * min_profitable_iters)
2285 + ((vec_outside_cost - scalar_outside_cost) * vf)))
2286 min_profitable_iters++;
2287 }
2288 }
2289 /* vector version will never be profitable. */
2290 else
2291 {
2292 if (vect_print_dump_info (REPORT_COST))
2293 fprintf (vect_dump, "cost model: the vector iteration cost = %d "
2294 "divided by the scalar iteration cost = %d "
2295 "is greater or equal to the vectorization factor = %d.",
2296 vec_inside_cost, scalar_single_iter_cost, vf);
2297 return -1;
2298 }
2299
2300 if (vect_print_dump_info (REPORT_COST))
2301 {
2302 fprintf (vect_dump, "Cost model analysis: \n");
2303 fprintf (vect_dump, " Vector inside of loop cost: %d\n",
2304 vec_inside_cost);
2305 fprintf (vect_dump, " Vector outside of loop cost: %d\n",
2306 vec_outside_cost);
2307 fprintf (vect_dump, " Scalar iteration cost: %d\n",
2308 scalar_single_iter_cost);
2309 fprintf (vect_dump, " Scalar outside cost: %d\n", scalar_outside_cost);
2310 fprintf (vect_dump, " prologue iterations: %d\n",
2311 peel_iters_prologue);
2312 fprintf (vect_dump, " epilogue iterations: %d\n",
2313 peel_iters_epilogue);
2314 fprintf (vect_dump, " Calculated minimum iters for profitability: %d\n",
2315 min_profitable_iters);
2316 }
2317
2318 min_profitable_iters =
2319 min_profitable_iters < vf ? vf : min_profitable_iters;
2320
2321 /* Because the condition we create is:
2322 if (niters <= min_profitable_iters)
2323 then skip the vectorized loop. */
2324 min_profitable_iters--;
2325
2326 if (vect_print_dump_info (REPORT_COST))
2327 fprintf (vect_dump, " Profitability threshold = %d\n",
2328 min_profitable_iters);
2329
2330 return min_profitable_iters;
2331 }
2332
2333
2334 /* TODO: Close dependency between vect_model_*_cost and vectorizable_*
2335 functions. Design better to avoid maintenance issues. */
2336
2337 /* Function vect_model_reduction_cost.
2338
2339 Models cost for a reduction operation, including the vector ops
2340 generated within the strip-mine loop, the initial definition before
2341 the loop, and the epilogue code that must be generated. */
2342
2343 static bool
2344 vect_model_reduction_cost (stmt_vec_info stmt_info, enum tree_code reduc_code,
2345 int ncopies)
2346 {
2347 int outer_cost = 0;
2348 enum tree_code code;
2349 optab optab;
2350 tree vectype;
2351 gimple stmt, orig_stmt;
2352 tree reduction_op;
2353 enum machine_mode mode;
2354 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2355 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2356
2357
2358 /* Cost of reduction op inside loop. */
2359 STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info)
2360 += ncopies * vect_get_cost (vector_stmt);
2361
2362 stmt = STMT_VINFO_STMT (stmt_info);
2363
2364 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)))
2365 {
2366 case GIMPLE_SINGLE_RHS:
2367 gcc_assert (TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt)) == ternary_op);
2368 reduction_op = TREE_OPERAND (gimple_assign_rhs1 (stmt), 2);
2369 break;
2370 case GIMPLE_UNARY_RHS:
2371 reduction_op = gimple_assign_rhs1 (stmt);
2372 break;
2373 case GIMPLE_BINARY_RHS:
2374 reduction_op = gimple_assign_rhs2 (stmt);
2375 break;
2376 default:
2377 gcc_unreachable ();
2378 }
2379
2380 vectype = get_vectype_for_scalar_type (TREE_TYPE (reduction_op));
2381 if (!vectype)
2382 {
2383 if (vect_print_dump_info (REPORT_COST))
2384 {
2385 fprintf (vect_dump, "unsupported data-type ");
2386 print_generic_expr (vect_dump, TREE_TYPE (reduction_op), TDF_SLIM);
2387 }
2388 return false;
2389 }
2390
2391 mode = TYPE_MODE (vectype);
2392 orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
2393
2394 if (!orig_stmt)
2395 orig_stmt = STMT_VINFO_STMT (stmt_info);
2396
2397 code = gimple_assign_rhs_code (orig_stmt);
2398
2399 /* Add in cost for initial definition. */
2400 outer_cost += vect_get_cost (scalar_to_vec);
2401
2402 /* Determine cost of epilogue code.
2403
2404 We have a reduction operator that will reduce the vector in one statement.
2405 Also requires scalar extract. */
2406
2407 if (!nested_in_vect_loop_p (loop, orig_stmt))
2408 {
2409 if (reduc_code != ERROR_MARK)
2410 outer_cost += vect_get_cost (vector_stmt)
2411 + vect_get_cost (vec_to_scalar);
2412 else
2413 {
2414 int vec_size_in_bits = tree_low_cst (TYPE_SIZE (vectype), 1);
2415 tree bitsize =
2416 TYPE_SIZE (TREE_TYPE (gimple_assign_lhs (orig_stmt)));
2417 int element_bitsize = tree_low_cst (bitsize, 1);
2418 int nelements = vec_size_in_bits / element_bitsize;
2419
2420 optab = optab_for_tree_code (code, vectype, optab_default);
2421
2422 /* We have a whole vector shift available. */
2423 if (VECTOR_MODE_P (mode)
2424 && optab_handler (optab, mode)->insn_code != CODE_FOR_nothing
2425 && optab_handler (vec_shr_optab, mode)->insn_code != CODE_FOR_nothing)
2426 /* Final reduction via vector shifts and the reduction operator. Also
2427 requires scalar extract. */
2428 outer_cost += ((exact_log2(nelements) * 2)
2429 * vect_get_cost (vector_stmt)
2430 + vect_get_cost (vec_to_scalar));
2431 else
2432 /* Use extracts and reduction op for final reduction. For N elements,
2433 we have N extracts and N-1 reduction ops. */
2434 outer_cost += ((nelements + nelements - 1)
2435 * vect_get_cost (vector_stmt));
2436 }
2437 }
2438
2439 STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info) = outer_cost;
2440
2441 if (vect_print_dump_info (REPORT_COST))
2442 fprintf (vect_dump, "vect_model_reduction_cost: inside_cost = %d, "
2443 "outside_cost = %d .", STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info),
2444 STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info));
2445
2446 return true;
2447 }
2448
2449
2450 /* Function vect_model_induction_cost.
2451
2452 Models cost for induction operations. */
2453
2454 static void
2455 vect_model_induction_cost (stmt_vec_info stmt_info, int ncopies)
2456 {
2457 /* loop cost for vec_loop. */
2458 STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info)
2459 = ncopies * vect_get_cost (vector_stmt);
2460 /* prologue cost for vec_init and vec_step. */
2461 STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info)
2462 = 2 * vect_get_cost (scalar_to_vec);
2463
2464 if (vect_print_dump_info (REPORT_COST))
2465 fprintf (vect_dump, "vect_model_induction_cost: inside_cost = %d, "
2466 "outside_cost = %d .", STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info),
2467 STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info));
2468 }
2469
2470
2471 /* Function get_initial_def_for_induction
2472
2473 Input:
2474 STMT - a stmt that performs an induction operation in the loop.
2475 IV_PHI - the initial value of the induction variable
2476
2477 Output:
2478 Return a vector variable, initialized with the first VF values of
2479 the induction variable. E.g., for an iv with IV_PHI='X' and
2480 evolution S, for a vector of 4 units, we want to return:
2481 [X, X + S, X + 2*S, X + 3*S]. */
2482
2483 static tree
2484 get_initial_def_for_induction (gimple iv_phi)
2485 {
2486 stmt_vec_info stmt_vinfo = vinfo_for_stmt (iv_phi);
2487 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
2488 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2489 tree scalar_type = TREE_TYPE (gimple_phi_result (iv_phi));
2490 tree vectype;
2491 int nunits;
2492 edge pe = loop_preheader_edge (loop);
2493 struct loop *iv_loop;
2494 basic_block new_bb;
2495 tree vec, vec_init, vec_step, t;
2496 tree access_fn;
2497 tree new_var;
2498 tree new_name;
2499 gimple init_stmt, induction_phi, new_stmt;
2500 tree induc_def, vec_def, vec_dest;
2501 tree init_expr, step_expr;
2502 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2503 int i;
2504 bool ok;
2505 int ncopies;
2506 tree expr;
2507 stmt_vec_info phi_info = vinfo_for_stmt (iv_phi);
2508 bool nested_in_vect_loop = false;
2509 gimple_seq stmts = NULL;
2510 imm_use_iterator imm_iter;
2511 use_operand_p use_p;
2512 gimple exit_phi;
2513 edge latch_e;
2514 tree loop_arg;
2515 gimple_stmt_iterator si;
2516 basic_block bb = gimple_bb (iv_phi);
2517 tree stepvectype;
2518
2519 vectype = get_vectype_for_scalar_type (scalar_type);
2520 gcc_assert (vectype);
2521 nunits = TYPE_VECTOR_SUBPARTS (vectype);
2522 ncopies = vf / nunits;
2523
2524 gcc_assert (phi_info);
2525 gcc_assert (ncopies >= 1);
2526
2527 /* Find the first insertion point in the BB. */
2528 si = gsi_after_labels (bb);
2529
2530 if (INTEGRAL_TYPE_P (scalar_type))
2531 step_expr = build_int_cst (scalar_type, 0);
2532 else if (POINTER_TYPE_P (scalar_type))
2533 step_expr = build_int_cst (sizetype, 0);
2534 else
2535 step_expr = build_real (scalar_type, dconst0);
2536
2537 /* Is phi in an inner-loop, while vectorizing an enclosing outer-loop? */
2538 if (nested_in_vect_loop_p (loop, iv_phi))
2539 {
2540 nested_in_vect_loop = true;
2541 iv_loop = loop->inner;
2542 }
2543 else
2544 iv_loop = loop;
2545 gcc_assert (iv_loop == (gimple_bb (iv_phi))->loop_father);
2546
2547 latch_e = loop_latch_edge (iv_loop);
2548 loop_arg = PHI_ARG_DEF_FROM_EDGE (iv_phi, latch_e);
2549
2550 access_fn = analyze_scalar_evolution (iv_loop, PHI_RESULT (iv_phi));
2551 gcc_assert (access_fn);
2552 ok = vect_is_simple_iv_evolution (iv_loop->num, access_fn,
2553 &init_expr, &step_expr);
2554 gcc_assert (ok);
2555 pe = loop_preheader_edge (iv_loop);
2556
2557 /* Create the vector that holds the initial_value of the induction. */
2558 if (nested_in_vect_loop)
2559 {
2560 /* iv_loop is nested in the loop to be vectorized. init_expr had already
2561 been created during vectorization of previous stmts; We obtain it from
2562 the STMT_VINFO_VEC_STMT of the defining stmt. */
2563 tree iv_def = PHI_ARG_DEF_FROM_EDGE (iv_phi,
2564 loop_preheader_edge (iv_loop));
2565 vec_init = vect_get_vec_def_for_operand (iv_def, iv_phi, NULL);
2566 }
2567 else
2568 {
2569 /* iv_loop is the loop to be vectorized. Create:
2570 vec_init = [X, X+S, X+2*S, X+3*S] (S = step_expr, X = init_expr) */
2571 new_var = vect_get_new_vect_var (scalar_type, vect_scalar_var, "var_");
2572 add_referenced_var (new_var);
2573
2574 new_name = force_gimple_operand (init_expr, &stmts, false, new_var);
2575 if (stmts)
2576 {
2577 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
2578 gcc_assert (!new_bb);
2579 }
2580
2581 t = NULL_TREE;
2582 t = tree_cons (NULL_TREE, init_expr, t);
2583 for (i = 1; i < nunits; i++)
2584 {
2585 /* Create: new_name_i = new_name + step_expr */
2586 enum tree_code code = POINTER_TYPE_P (scalar_type)
2587 ? POINTER_PLUS_EXPR : PLUS_EXPR;
2588 init_stmt = gimple_build_assign_with_ops (code, new_var,
2589 new_name, step_expr);
2590 new_name = make_ssa_name (new_var, init_stmt);
2591 gimple_assign_set_lhs (init_stmt, new_name);
2592
2593 new_bb = gsi_insert_on_edge_immediate (pe, init_stmt);
2594 gcc_assert (!new_bb);
2595
2596 if (vect_print_dump_info (REPORT_DETAILS))
2597 {
2598 fprintf (vect_dump, "created new init_stmt: ");
2599 print_gimple_stmt (vect_dump, init_stmt, 0, TDF_SLIM);
2600 }
2601 t = tree_cons (NULL_TREE, new_name, t);
2602 }
2603 /* Create a vector from [new_name_0, new_name_1, ..., new_name_nunits-1] */
2604 vec = build_constructor_from_list (vectype, nreverse (t));
2605 vec_init = vect_init_vector (iv_phi, vec, vectype, NULL);
2606 }
2607
2608
2609 /* Create the vector that holds the step of the induction. */
2610 if (nested_in_vect_loop)
2611 /* iv_loop is nested in the loop to be vectorized. Generate:
2612 vec_step = [S, S, S, S] */
2613 new_name = step_expr;
2614 else
2615 {
2616 /* iv_loop is the loop to be vectorized. Generate:
2617 vec_step = [VF*S, VF*S, VF*S, VF*S] */
2618 expr = build_int_cst (TREE_TYPE (step_expr), vf);
2619 new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr),
2620 expr, step_expr);
2621 }
2622
2623 t = NULL_TREE;
2624 for (i = 0; i < nunits; i++)
2625 t = tree_cons (NULL_TREE, unshare_expr (new_name), t);
2626 gcc_assert (CONSTANT_CLASS_P (new_name));
2627 stepvectype = get_vectype_for_scalar_type (TREE_TYPE (new_name));
2628 gcc_assert (stepvectype);
2629 vec = build_vector (stepvectype, t);
2630 vec_step = vect_init_vector (iv_phi, vec, stepvectype, NULL);
2631
2632
2633 /* Create the following def-use cycle:
2634 loop prolog:
2635 vec_init = ...
2636 vec_step = ...
2637 loop:
2638 vec_iv = PHI <vec_init, vec_loop>
2639 ...
2640 STMT
2641 ...
2642 vec_loop = vec_iv + vec_step; */
2643
2644 /* Create the induction-phi that defines the induction-operand. */
2645 vec_dest = vect_get_new_vect_var (vectype, vect_simple_var, "vec_iv_");
2646 add_referenced_var (vec_dest);
2647 induction_phi = create_phi_node (vec_dest, iv_loop->header);
2648 set_vinfo_for_stmt (induction_phi,
2649 new_stmt_vec_info (induction_phi, loop_vinfo, NULL));
2650 induc_def = PHI_RESULT (induction_phi);
2651
2652 /* Create the iv update inside the loop */
2653 new_stmt = gimple_build_assign_with_ops (PLUS_EXPR, vec_dest,
2654 induc_def, vec_step);
2655 vec_def = make_ssa_name (vec_dest, new_stmt);
2656 gimple_assign_set_lhs (new_stmt, vec_def);
2657 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
2658 set_vinfo_for_stmt (new_stmt, new_stmt_vec_info (new_stmt, loop_vinfo,
2659 NULL));
2660
2661 /* Set the arguments of the phi node: */
2662 add_phi_arg (induction_phi, vec_init, pe, UNKNOWN_LOCATION);
2663 add_phi_arg (induction_phi, vec_def, loop_latch_edge (iv_loop),
2664 UNKNOWN_LOCATION);
2665
2666
2667 /* In case that vectorization factor (VF) is bigger than the number
2668 of elements that we can fit in a vectype (nunits), we have to generate
2669 more than one vector stmt - i.e - we need to "unroll" the
2670 vector stmt by a factor VF/nunits. For more details see documentation
2671 in vectorizable_operation. */
2672
2673 if (ncopies > 1)
2674 {
2675 stmt_vec_info prev_stmt_vinfo;
2676 /* FORNOW. This restriction should be relaxed. */
2677 gcc_assert (!nested_in_vect_loop);
2678
2679 /* Create the vector that holds the step of the induction. */
2680 expr = build_int_cst (TREE_TYPE (step_expr), nunits);
2681 new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr),
2682 expr, step_expr);
2683 t = NULL_TREE;
2684 for (i = 0; i < nunits; i++)
2685 t = tree_cons (NULL_TREE, unshare_expr (new_name), t);
2686 gcc_assert (CONSTANT_CLASS_P (new_name));
2687 vec = build_vector (stepvectype, t);
2688 vec_step = vect_init_vector (iv_phi, vec, stepvectype, NULL);
2689
2690 vec_def = induc_def;
2691 prev_stmt_vinfo = vinfo_for_stmt (induction_phi);
2692 for (i = 1; i < ncopies; i++)
2693 {
2694 /* vec_i = vec_prev + vec_step */
2695 new_stmt = gimple_build_assign_with_ops (PLUS_EXPR, vec_dest,
2696 vec_def, vec_step);
2697 vec_def = make_ssa_name (vec_dest, new_stmt);
2698 gimple_assign_set_lhs (new_stmt, vec_def);
2699
2700 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
2701 set_vinfo_for_stmt (new_stmt,
2702 new_stmt_vec_info (new_stmt, loop_vinfo, NULL));
2703 STMT_VINFO_RELATED_STMT (prev_stmt_vinfo) = new_stmt;
2704 prev_stmt_vinfo = vinfo_for_stmt (new_stmt);
2705 }
2706 }
2707
2708 if (nested_in_vect_loop)
2709 {
2710 /* Find the loop-closed exit-phi of the induction, and record
2711 the final vector of induction results: */
2712 exit_phi = NULL;
2713 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, loop_arg)
2714 {
2715 if (!flow_bb_inside_loop_p (iv_loop, gimple_bb (USE_STMT (use_p))))
2716 {
2717 exit_phi = USE_STMT (use_p);
2718 break;
2719 }
2720 }
2721 if (exit_phi)
2722 {
2723 stmt_vec_info stmt_vinfo = vinfo_for_stmt (exit_phi);
2724 /* FORNOW. Currently not supporting the case that an inner-loop induction
2725 is not used in the outer-loop (i.e. only outside the outer-loop). */
2726 gcc_assert (STMT_VINFO_RELEVANT_P (stmt_vinfo)
2727 && !STMT_VINFO_LIVE_P (stmt_vinfo));
2728
2729 STMT_VINFO_VEC_STMT (stmt_vinfo) = new_stmt;
2730 if (vect_print_dump_info (REPORT_DETAILS))
2731 {
2732 fprintf (vect_dump, "vector of inductions after inner-loop:");
2733 print_gimple_stmt (vect_dump, new_stmt, 0, TDF_SLIM);
2734 }
2735 }
2736 }
2737
2738
2739 if (vect_print_dump_info (REPORT_DETAILS))
2740 {
2741 fprintf (vect_dump, "transform induction: created def-use cycle: ");
2742 print_gimple_stmt (vect_dump, induction_phi, 0, TDF_SLIM);
2743 fprintf (vect_dump, "\n");
2744 print_gimple_stmt (vect_dump, SSA_NAME_DEF_STMT (vec_def), 0, TDF_SLIM);
2745 }
2746
2747 STMT_VINFO_VEC_STMT (phi_info) = induction_phi;
2748 return induc_def;
2749 }
2750
2751
2752 /* Function get_initial_def_for_reduction
2753
2754 Input:
2755 STMT - a stmt that performs a reduction operation in the loop.
2756 INIT_VAL - the initial value of the reduction variable
2757
2758 Output:
2759 ADJUSTMENT_DEF - a tree that holds a value to be added to the final result
2760 of the reduction (used for adjusting the epilog - see below).
2761 Return a vector variable, initialized according to the operation that STMT
2762 performs. This vector will be used as the initial value of the
2763 vector of partial results.
2764
2765 Option1 (adjust in epilog): Initialize the vector as follows:
2766 add/bit or/xor: [0,0,...,0,0]
2767 mult/bit and: [1,1,...,1,1]
2768 min/max/cond_expr: [init_val,init_val,..,init_val,init_val]
2769 and when necessary (e.g. add/mult case) let the caller know
2770 that it needs to adjust the result by init_val.
2771
2772 Option2: Initialize the vector as follows:
2773 add/bit or/xor: [init_val,0,0,...,0]
2774 mult/bit and: [init_val,1,1,...,1]
2775 min/max/cond_expr: [init_val,init_val,...,init_val]
2776 and no adjustments are needed.
2777
2778 For example, for the following code:
2779
2780 s = init_val;
2781 for (i=0;i<n;i++)
2782 s = s + a[i];
2783
2784 STMT is 's = s + a[i]', and the reduction variable is 's'.
2785 For a vector of 4 units, we want to return either [0,0,0,init_val],
2786 or [0,0,0,0] and let the caller know that it needs to adjust
2787 the result at the end by 'init_val'.
2788
2789 FORNOW, we are using the 'adjust in epilog' scheme, because this way the
2790 initialization vector is simpler (same element in all entries), if
2791 ADJUSTMENT_DEF is not NULL, and Option2 otherwise.
2792
2793 A cost model should help decide between these two schemes. */
2794
2795 tree
2796 get_initial_def_for_reduction (gimple stmt, tree init_val,
2797 tree *adjustment_def)
2798 {
2799 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
2800 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
2801 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2802 tree scalar_type = TREE_TYPE (init_val);
2803 tree vectype = get_vectype_for_scalar_type (scalar_type);
2804 int nunits;
2805 enum tree_code code = gimple_assign_rhs_code (stmt);
2806 tree def_for_init;
2807 tree init_def;
2808 tree t = NULL_TREE;
2809 int i;
2810 bool nested_in_vect_loop = false;
2811 tree init_value;
2812 REAL_VALUE_TYPE real_init_val = dconst0;
2813 int int_init_val = 0;
2814 gimple def_stmt = NULL;
2815
2816 gcc_assert (vectype);
2817 nunits = TYPE_VECTOR_SUBPARTS (vectype);
2818
2819 gcc_assert (POINTER_TYPE_P (scalar_type) || INTEGRAL_TYPE_P (scalar_type)
2820 || SCALAR_FLOAT_TYPE_P (scalar_type));
2821
2822 if (nested_in_vect_loop_p (loop, stmt))
2823 nested_in_vect_loop = true;
2824 else
2825 gcc_assert (loop == (gimple_bb (stmt))->loop_father);
2826
2827 /* In case of double reduction we only create a vector variable to be put
2828 in the reduction phi node. The actual statement creation is done in
2829 vect_create_epilog_for_reduction. */
2830 if (adjustment_def && nested_in_vect_loop
2831 && TREE_CODE (init_val) == SSA_NAME
2832 && (def_stmt = SSA_NAME_DEF_STMT (init_val))
2833 && gimple_code (def_stmt) == GIMPLE_PHI
2834 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
2835 && vinfo_for_stmt (def_stmt)
2836 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt))
2837 == vect_double_reduction_def)
2838 {
2839 *adjustment_def = NULL;
2840 return vect_create_destination_var (init_val, vectype);
2841 }
2842
2843 if (TREE_CONSTANT (init_val))
2844 {
2845 if (SCALAR_FLOAT_TYPE_P (scalar_type))
2846 init_value = build_real (scalar_type, TREE_REAL_CST (init_val));
2847 else
2848 init_value = build_int_cst (scalar_type, TREE_INT_CST_LOW (init_val));
2849 }
2850 else
2851 init_value = init_val;
2852
2853 switch (code)
2854 {
2855 case WIDEN_SUM_EXPR:
2856 case DOT_PROD_EXPR:
2857 case PLUS_EXPR:
2858 case MINUS_EXPR:
2859 case BIT_IOR_EXPR:
2860 case BIT_XOR_EXPR:
2861 case MULT_EXPR:
2862 case BIT_AND_EXPR:
2863 /* ADJUSMENT_DEF is NULL when called from
2864 vect_create_epilog_for_reduction to vectorize double reduction. */
2865 if (adjustment_def)
2866 {
2867 if (nested_in_vect_loop)
2868 *adjustment_def = vect_get_vec_def_for_operand (init_val, stmt,
2869 NULL);
2870 else
2871 *adjustment_def = init_val;
2872 }
2873
2874 if (code == MULT_EXPR)
2875 {
2876 real_init_val = dconst1;
2877 int_init_val = 1;
2878 }
2879
2880 if (code == BIT_AND_EXPR)
2881 int_init_val = -1;
2882
2883 if (SCALAR_FLOAT_TYPE_P (scalar_type))
2884 def_for_init = build_real (scalar_type, real_init_val);
2885 else
2886 def_for_init = build_int_cst (scalar_type, int_init_val);
2887
2888 /* Create a vector of '0' or '1' except the first element. */
2889 for (i = nunits - 2; i >= 0; --i)
2890 t = tree_cons (NULL_TREE, def_for_init, t);
2891
2892 /* Option1: the first element is '0' or '1' as well. */
2893 if (adjustment_def)
2894 {
2895 t = tree_cons (NULL_TREE, def_for_init, t);
2896 init_def = build_vector (vectype, t);
2897 break;
2898 }
2899
2900 /* Option2: the first element is INIT_VAL. */
2901 t = tree_cons (NULL_TREE, init_value, t);
2902 if (TREE_CONSTANT (init_val))
2903 init_def = build_vector (vectype, t);
2904 else
2905 init_def = build_constructor_from_list (vectype, t);
2906
2907 break;
2908
2909 case MIN_EXPR:
2910 case MAX_EXPR:
2911 case COND_EXPR:
2912 if (adjustment_def)
2913 {
2914 *adjustment_def = NULL_TREE;
2915 init_def = vect_get_vec_def_for_operand (init_val, stmt, NULL);
2916 break;
2917 }
2918
2919 for (i = nunits - 1; i >= 0; --i)
2920 t = tree_cons (NULL_TREE, init_value, t);
2921
2922 if (TREE_CONSTANT (init_val))
2923 init_def = build_vector (vectype, t);
2924 else
2925 init_def = build_constructor_from_list (vectype, t);
2926
2927 break;
2928
2929 default:
2930 gcc_unreachable ();
2931 }
2932
2933 return init_def;
2934 }
2935
2936
2937 /* Function vect_create_epilog_for_reduction
2938
2939 Create code at the loop-epilog to finalize the result of a reduction
2940 computation.
2941
2942 VECT_DEFS is list of vector of partial results, i.e., the lhs's of vector
2943 reduction statements.
2944 STMT is the scalar reduction stmt that is being vectorized.
2945 NCOPIES is > 1 in case the vectorization factor (VF) is bigger than the
2946 number of elements that we can fit in a vectype (nunits). In this case
2947 we have to generate more than one vector stmt - i.e - we need to "unroll"
2948 the vector stmt by a factor VF/nunits. For more details see documentation
2949 in vectorizable_operation.
2950 REDUC_CODE is the tree-code for the epilog reduction.
2951 REDUCTION_PHIS is a list of the phi-nodes that carry the reduction
2952 computation.
2953 REDUC_INDEX is the index of the operand in the right hand side of the
2954 statement that is defined by REDUCTION_PHI.
2955 DOUBLE_REDUC is TRUE if double reduction phi nodes should be handled.
2956 SLP_NODE is an SLP node containing a group of reduction statements. The
2957 first one in this group is STMT.
2958
2959 This function:
2960 1. Creates the reduction def-use cycles: sets the arguments for
2961 REDUCTION_PHIS:
2962 The loop-entry argument is the vectorized initial-value of the reduction.
2963 The loop-latch argument is taken from VECT_DEFS - the vector of partial
2964 sums.
2965 2. "Reduces" each vector of partial results VECT_DEFS into a single result,
2966 by applying the operation specified by REDUC_CODE if available, or by
2967 other means (whole-vector shifts or a scalar loop).
2968 The function also creates a new phi node at the loop exit to preserve
2969 loop-closed form, as illustrated below.
2970
2971 The flow at the entry to this function:
2972
2973 loop:
2974 vec_def = phi <null, null> # REDUCTION_PHI
2975 VECT_DEF = vector_stmt # vectorized form of STMT
2976 s_loop = scalar_stmt # (scalar) STMT
2977 loop_exit:
2978 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
2979 use <s_out0>
2980 use <s_out0>
2981
2982 The above is transformed by this function into:
2983
2984 loop:
2985 vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI
2986 VECT_DEF = vector_stmt # vectorized form of STMT
2987 s_loop = scalar_stmt # (scalar) STMT
2988 loop_exit:
2989 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
2990 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
2991 v_out2 = reduce <v_out1>
2992 s_out3 = extract_field <v_out2, 0>
2993 s_out4 = adjust_result <s_out3>
2994 use <s_out4>
2995 use <s_out4>
2996 */
2997
2998 static void
2999 vect_create_epilog_for_reduction (VEC (tree, heap) *vect_defs, gimple stmt,
3000 int ncopies, enum tree_code reduc_code,
3001 VEC (gimple, heap) *reduction_phis,
3002 int reduc_index, bool double_reduc,
3003 slp_tree slp_node)
3004 {
3005 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3006 stmt_vec_info prev_phi_info;
3007 tree vectype;
3008 enum machine_mode mode;
3009 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3010 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo), *outer_loop = NULL;
3011 basic_block exit_bb;
3012 tree scalar_dest;
3013 tree scalar_type;
3014 gimple new_phi = NULL, phi;
3015 gimple_stmt_iterator exit_gsi;
3016 tree vec_dest;
3017 tree new_temp = NULL_TREE, new_dest, new_name, new_scalar_dest;
3018 gimple epilog_stmt = NULL;
3019 enum tree_code code = gimple_assign_rhs_code (stmt);
3020 gimple exit_phi;
3021 tree bitsize, bitpos;
3022 tree adjustment_def = NULL;
3023 tree vec_initial_def = NULL;
3024 tree reduction_op, expr, def;
3025 tree orig_name, scalar_result;
3026 imm_use_iterator imm_iter;
3027 use_operand_p use_p;
3028 bool extract_scalar_result = false;
3029 gimple use_stmt, orig_stmt, reduction_phi = NULL;
3030 bool nested_in_vect_loop = false;
3031 VEC (gimple, heap) *new_phis = NULL;
3032 enum vect_def_type dt = vect_unknown_def_type;
3033 int j, i;
3034 VEC (tree, heap) *scalar_results = NULL;
3035 unsigned int group_size = 1, k, ratio;
3036 VEC (tree, heap) *vec_initial_defs = NULL;
3037 VEC (gimple, heap) *phis;
3038
3039 if (slp_node)
3040 group_size = VEC_length (gimple, SLP_TREE_SCALAR_STMTS (slp_node));
3041
3042 if (nested_in_vect_loop_p (loop, stmt))
3043 {
3044 outer_loop = loop;
3045 loop = loop->inner;
3046 nested_in_vect_loop = true;
3047 gcc_assert (!slp_node);
3048 }
3049
3050 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)))
3051 {
3052 case GIMPLE_SINGLE_RHS:
3053 gcc_assert (TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt))
3054 == ternary_op);
3055 reduction_op = TREE_OPERAND (gimple_assign_rhs1 (stmt), reduc_index);
3056 break;
3057 case GIMPLE_UNARY_RHS:
3058 reduction_op = gimple_assign_rhs1 (stmt);
3059 break;
3060 case GIMPLE_BINARY_RHS:
3061 reduction_op = reduc_index ?
3062 gimple_assign_rhs2 (stmt) : gimple_assign_rhs1 (stmt);
3063 break;
3064 default:
3065 gcc_unreachable ();
3066 }
3067
3068 vectype = get_vectype_for_scalar_type (TREE_TYPE (reduction_op));
3069 gcc_assert (vectype);
3070 mode = TYPE_MODE (vectype);
3071
3072 /* 1. Create the reduction def-use cycle:
3073 Set the arguments of REDUCTION_PHIS, i.e., transform
3074
3075 loop:
3076 vec_def = phi <null, null> # REDUCTION_PHI
3077 VECT_DEF = vector_stmt # vectorized form of STMT
3078 ...
3079
3080 into:
3081
3082 loop:
3083 vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI
3084 VECT_DEF = vector_stmt # vectorized form of STMT
3085 ...
3086
3087 (in case of SLP, do it for all the phis). */
3088
3089 /* Get the loop-entry arguments. */
3090 if (slp_node)
3091 vect_get_slp_defs (slp_node, &vec_initial_defs, NULL, reduc_index);
3092 else
3093 {
3094 vec_initial_defs = VEC_alloc (tree, heap, 1);
3095 /* For the case of reduction, vect_get_vec_def_for_operand returns
3096 the scalar def before the loop, that defines the initial value
3097 of the reduction variable. */
3098 vec_initial_def = vect_get_vec_def_for_operand (reduction_op, stmt,
3099 &adjustment_def);
3100 VEC_quick_push (tree, vec_initial_defs, vec_initial_def);
3101 }
3102
3103 /* Set phi nodes arguments. */
3104 for (i = 0; VEC_iterate (gimple, reduction_phis, i, phi); i++)
3105 {
3106 tree vec_init_def = VEC_index (tree, vec_initial_defs, i);
3107 tree def = VEC_index (tree, vect_defs, i);
3108 for (j = 0; j < ncopies; j++)
3109 {
3110 /* Set the loop-entry arg of the reduction-phi. */
3111 add_phi_arg (phi, vec_init_def, loop_preheader_edge (loop),
3112 UNKNOWN_LOCATION);
3113
3114 /* Set the loop-latch arg for the reduction-phi. */
3115 if (j > 0)
3116 def = vect_get_vec_def_for_stmt_copy (vect_unknown_def_type, def);
3117
3118 add_phi_arg (phi, def, loop_latch_edge (loop), UNKNOWN_LOCATION);
3119
3120 if (vect_print_dump_info (REPORT_DETAILS))
3121 {
3122 fprintf (vect_dump, "transform reduction: created def-use"
3123 " cycle: ");
3124 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
3125 fprintf (vect_dump, "\n");
3126 print_gimple_stmt (vect_dump, SSA_NAME_DEF_STMT (def), 0,
3127 TDF_SLIM);
3128 }
3129
3130 phi = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi));
3131 }
3132 }
3133
3134 VEC_free (tree, heap, vec_initial_defs);
3135
3136 /* 2. Create epilog code.
3137 The reduction epilog code operates across the elements of the vector
3138 of partial results computed by the vectorized loop.
3139 The reduction epilog code consists of:
3140
3141 step 1: compute the scalar result in a vector (v_out2)
3142 step 2: extract the scalar result (s_out3) from the vector (v_out2)
3143 step 3: adjust the scalar result (s_out3) if needed.
3144
3145 Step 1 can be accomplished using one the following three schemes:
3146 (scheme 1) using reduc_code, if available.
3147 (scheme 2) using whole-vector shifts, if available.
3148 (scheme 3) using a scalar loop. In this case steps 1+2 above are
3149 combined.
3150
3151 The overall epilog code looks like this:
3152
3153 s_out0 = phi <s_loop> # original EXIT_PHI
3154 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
3155 v_out2 = reduce <v_out1> # step 1
3156 s_out3 = extract_field <v_out2, 0> # step 2
3157 s_out4 = adjust_result <s_out3> # step 3
3158
3159 (step 3 is optional, and steps 1 and 2 may be combined).
3160 Lastly, the uses of s_out0 are replaced by s_out4. */
3161
3162
3163 /* 2.1 Create new loop-exit-phis to preserve loop-closed form:
3164 v_out1 = phi <VECT_DEF>
3165 Store them in NEW_PHIS. */
3166
3167 exit_bb = single_exit (loop)->dest;
3168 prev_phi_info = NULL;
3169 new_phis = VEC_alloc (gimple, heap, VEC_length (tree, vect_defs));
3170 for (i = 0; VEC_iterate (tree, vect_defs, i, def); i++)
3171 {
3172 for (j = 0; j < ncopies; j++)
3173 {
3174 phi = create_phi_node (SSA_NAME_VAR (def), exit_bb);
3175 set_vinfo_for_stmt (phi, new_stmt_vec_info (phi, loop_vinfo, NULL));
3176 if (j == 0)
3177 VEC_quick_push (gimple, new_phis, phi);
3178 else
3179 {
3180 def = vect_get_vec_def_for_stmt_copy (dt, def);
3181 STMT_VINFO_RELATED_STMT (prev_phi_info) = phi;
3182 }
3183
3184 SET_PHI_ARG_DEF (phi, single_exit (loop)->dest_idx, def);
3185 prev_phi_info = vinfo_for_stmt (phi);
3186 }
3187 }
3188
3189 exit_gsi = gsi_after_labels (exit_bb);
3190
3191 /* 2.2 Get the relevant tree-code to use in the epilog for schemes 2,3
3192 (i.e. when reduc_code is not available) and in the final adjustment
3193 code (if needed). Also get the original scalar reduction variable as
3194 defined in the loop. In case STMT is a "pattern-stmt" (i.e. - it
3195 represents a reduction pattern), the tree-code and scalar-def are
3196 taken from the original stmt that the pattern-stmt (STMT) replaces.
3197 Otherwise (it is a regular reduction) - the tree-code and scalar-def
3198 are taken from STMT. */
3199
3200 orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
3201 if (!orig_stmt)
3202 {
3203 /* Regular reduction */
3204 orig_stmt = stmt;
3205 }
3206 else
3207 {
3208 /* Reduction pattern */
3209 stmt_vec_info stmt_vinfo = vinfo_for_stmt (orig_stmt);
3210 gcc_assert (STMT_VINFO_IN_PATTERN_P (stmt_vinfo));
3211 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_vinfo) == stmt);
3212 }
3213
3214 code = gimple_assign_rhs_code (orig_stmt);
3215 /* For MINUS_EXPR the initial vector is [init_val,0,...,0], therefore,
3216 partial results are added and not subtracted. */
3217 if (code == MINUS_EXPR)
3218 code = PLUS_EXPR;
3219
3220 scalar_dest = gimple_assign_lhs (orig_stmt);
3221 scalar_type = TREE_TYPE (scalar_dest);
3222 scalar_results = VEC_alloc (tree, heap, group_size);
3223 new_scalar_dest = vect_create_destination_var (scalar_dest, NULL);
3224 bitsize = TYPE_SIZE (scalar_type);
3225
3226 /* In case this is a reduction in an inner-loop while vectorizing an outer
3227 loop - we don't need to extract a single scalar result at the end of the
3228 inner-loop (unless it is double reduction, i.e., the use of reduction is
3229 outside the outer-loop). The final vector of partial results will be used
3230 in the vectorized outer-loop, or reduced to a scalar result at the end of
3231 the outer-loop. */
3232 if (nested_in_vect_loop && !double_reduc)
3233 goto vect_finalize_reduction;
3234
3235 /* 2.3 Create the reduction code, using one of the three schemes described
3236 above. In SLP we simply need to extract all the elements from the
3237 vector (without reducing them), so we use scalar shifts. */
3238 if (reduc_code != ERROR_MARK && !slp_node)
3239 {
3240 tree tmp;
3241
3242 /*** Case 1: Create:
3243 v_out2 = reduc_expr <v_out1> */
3244
3245 if (vect_print_dump_info (REPORT_DETAILS))
3246 fprintf (vect_dump, "Reduce using direct vector reduction.");
3247
3248 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3249 new_phi = VEC_index (gimple, new_phis, 0);
3250 tmp = build1 (reduc_code, vectype, PHI_RESULT (new_phi));
3251 epilog_stmt = gimple_build_assign (vec_dest, tmp);
3252 new_temp = make_ssa_name (vec_dest, epilog_stmt);
3253 gimple_assign_set_lhs (epilog_stmt, new_temp);
3254 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3255
3256 extract_scalar_result = true;
3257 }
3258 else
3259 {
3260 enum tree_code shift_code = ERROR_MARK;
3261 bool have_whole_vector_shift = true;
3262 int bit_offset;
3263 int element_bitsize = tree_low_cst (bitsize, 1);
3264 int vec_size_in_bits = tree_low_cst (TYPE_SIZE (vectype), 1);
3265 tree vec_temp;
3266
3267 if (optab_handler (vec_shr_optab, mode)->insn_code != CODE_FOR_nothing)
3268 shift_code = VEC_RSHIFT_EXPR;
3269 else
3270 have_whole_vector_shift = false;
3271
3272 /* Regardless of whether we have a whole vector shift, if we're
3273 emulating the operation via tree-vect-generic, we don't want
3274 to use it. Only the first round of the reduction is likely
3275 to still be profitable via emulation. */
3276 /* ??? It might be better to emit a reduction tree code here, so that
3277 tree-vect-generic can expand the first round via bit tricks. */
3278 if (!VECTOR_MODE_P (mode))
3279 have_whole_vector_shift = false;
3280 else
3281 {
3282 optab optab = optab_for_tree_code (code, vectype, optab_default);
3283 if (optab_handler (optab, mode)->insn_code == CODE_FOR_nothing)
3284 have_whole_vector_shift = false;
3285 }
3286
3287 if (have_whole_vector_shift && !slp_node)
3288 {
3289 /*** Case 2: Create:
3290 for (offset = VS/2; offset >= element_size; offset/=2)
3291 {
3292 Create: va' = vec_shift <va, offset>
3293 Create: va = vop <va, va'>
3294 } */
3295
3296 if (vect_print_dump_info (REPORT_DETAILS))
3297 fprintf (vect_dump, "Reduce using vector shifts");
3298
3299 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3300 new_phi = VEC_index (gimple, new_phis, 0);
3301 new_temp = PHI_RESULT (new_phi);
3302 for (bit_offset = vec_size_in_bits/2;
3303 bit_offset >= element_bitsize;
3304 bit_offset /= 2)
3305 {
3306 tree bitpos = size_int (bit_offset);
3307
3308 epilog_stmt = gimple_build_assign_with_ops (shift_code,
3309 vec_dest, new_temp, bitpos);
3310 new_name = make_ssa_name (vec_dest, epilog_stmt);
3311 gimple_assign_set_lhs (epilog_stmt, new_name);
3312 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3313
3314 epilog_stmt = gimple_build_assign_with_ops (code, vec_dest,
3315 new_name, new_temp);
3316 new_temp = make_ssa_name (vec_dest, epilog_stmt);
3317 gimple_assign_set_lhs (epilog_stmt, new_temp);
3318 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3319 }
3320
3321 extract_scalar_result = true;
3322 }
3323 else
3324 {
3325 tree rhs;
3326
3327 /*** Case 3: Create:
3328 s = extract_field <v_out2, 0>
3329 for (offset = element_size;
3330 offset < vector_size;
3331 offset += element_size;)
3332 {
3333 Create: s' = extract_field <v_out2, offset>
3334 Create: s = op <s, s'> // For non SLP cases
3335 } */
3336
3337 if (vect_print_dump_info (REPORT_DETAILS))
3338 fprintf (vect_dump, "Reduce using scalar code. ");
3339
3340 vec_size_in_bits = tree_low_cst (TYPE_SIZE (vectype), 1);
3341 for (i = 0; VEC_iterate (gimple, new_phis, i, new_phi); i++)
3342 {
3343 vec_temp = PHI_RESULT (new_phi);
3344 rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp, bitsize,
3345 bitsize_zero_node);
3346 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
3347 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
3348 gimple_assign_set_lhs (epilog_stmt, new_temp);
3349 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3350
3351 /* In SLP we don't need to apply reduction operation, so we just
3352 collect s' values in SCALAR_RESULTS. */
3353 if (slp_node)
3354 VEC_safe_push (tree, heap, scalar_results, new_temp);
3355
3356 for (bit_offset = element_bitsize;
3357 bit_offset < vec_size_in_bits;
3358 bit_offset += element_bitsize)
3359 {
3360 tree bitpos = bitsize_int (bit_offset);
3361 tree rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp,
3362 bitsize, bitpos);
3363
3364 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
3365 new_name = make_ssa_name (new_scalar_dest, epilog_stmt);
3366 gimple_assign_set_lhs (epilog_stmt, new_name);
3367 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3368
3369 if (slp_node)
3370 {
3371 /* In SLP we don't need to apply reduction operation, so
3372 we just collect s' values in SCALAR_RESULTS. */
3373 new_temp = new_name;
3374 VEC_safe_push (tree, heap, scalar_results, new_name);
3375 }
3376 else
3377 {
3378 epilog_stmt = gimple_build_assign_with_ops (code,
3379 new_scalar_dest, new_name, new_temp);
3380 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
3381 gimple_assign_set_lhs (epilog_stmt, new_temp);
3382 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3383 }
3384 }
3385 }
3386
3387 /* The only case where we need to reduce scalar results in SLP, is
3388 unrolling. If the size of SCALAR_RESULTS is greater than
3389 GROUP_SIZE, we reduce them combining elements modulo
3390 GROUP_SIZE. */
3391 if (slp_node)
3392 {
3393 tree res, first_res, new_res;
3394 gimple new_stmt;
3395
3396 /* Reduce multiple scalar results in case of SLP unrolling. */
3397 for (j = group_size; VEC_iterate (tree, scalar_results, j, res);
3398 j++)
3399 {
3400 first_res = VEC_index (tree, scalar_results, j % group_size);
3401 new_stmt = gimple_build_assign_with_ops (code,
3402 new_scalar_dest, first_res, res);
3403 new_res = make_ssa_name (new_scalar_dest, new_stmt);
3404 gimple_assign_set_lhs (new_stmt, new_res);
3405 gsi_insert_before (&exit_gsi, new_stmt, GSI_SAME_STMT);
3406 VEC_replace (tree, scalar_results, j % group_size, new_res);
3407 }
3408 }
3409 else
3410 /* Not SLP - we have one scalar to keep in SCALAR_RESULTS. */
3411 VEC_safe_push (tree, heap, scalar_results, new_temp);
3412
3413 extract_scalar_result = false;
3414 }
3415 }
3416
3417 /* 2.4 Extract the final scalar result. Create:
3418 s_out3 = extract_field <v_out2, bitpos> */
3419
3420 if (extract_scalar_result)
3421 {
3422 tree rhs;
3423
3424 if (vect_print_dump_info (REPORT_DETAILS))
3425 fprintf (vect_dump, "extract scalar result");
3426
3427 if (BYTES_BIG_ENDIAN)
3428 bitpos = size_binop (MULT_EXPR,
3429 bitsize_int (TYPE_VECTOR_SUBPARTS (vectype) - 1),
3430 TYPE_SIZE (scalar_type));
3431 else
3432 bitpos = bitsize_zero_node;
3433
3434 rhs = build3 (BIT_FIELD_REF, scalar_type, new_temp, bitsize, bitpos);
3435 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
3436 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
3437 gimple_assign_set_lhs (epilog_stmt, new_temp);
3438 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3439 VEC_safe_push (tree, heap, scalar_results, new_temp);
3440 }
3441
3442 vect_finalize_reduction:
3443
3444 /* 2.5 Adjust the final result by the initial value of the reduction
3445 variable. (When such adjustment is not needed, then
3446 'adjustment_def' is zero). For example, if code is PLUS we create:
3447 new_temp = loop_exit_def + adjustment_def */
3448
3449 if (adjustment_def)
3450 {
3451 gcc_assert (!slp_node);
3452 if (nested_in_vect_loop)
3453 {
3454 new_phi = VEC_index (gimple, new_phis, 0);
3455 gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) == VECTOR_TYPE);
3456 expr = build2 (code, vectype, PHI_RESULT (new_phi), adjustment_def);
3457 new_dest = vect_create_destination_var (scalar_dest, vectype);
3458 }
3459 else
3460 {
3461 new_temp = VEC_index (tree, scalar_results, 0);
3462 gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) != VECTOR_TYPE);
3463 expr = build2 (code, scalar_type, new_temp, adjustment_def);
3464 new_dest = vect_create_destination_var (scalar_dest, scalar_type);
3465 }
3466
3467 epilog_stmt = gimple_build_assign (new_dest, expr);
3468 new_temp = make_ssa_name (new_dest, epilog_stmt);
3469 gimple_assign_set_lhs (epilog_stmt, new_temp);
3470 SSA_NAME_DEF_STMT (new_temp) = epilog_stmt;
3471 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3472 if (nested_in_vect_loop)
3473 {
3474 set_vinfo_for_stmt (epilog_stmt,
3475 new_stmt_vec_info (epilog_stmt, loop_vinfo,
3476 NULL));
3477 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (epilog_stmt)) =
3478 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (new_phi));
3479
3480 if (!double_reduc)
3481 VEC_quick_push (tree, scalar_results, new_temp);
3482 else
3483 VEC_replace (tree, scalar_results, 0, new_temp);
3484 }
3485 else
3486 VEC_replace (tree, scalar_results, 0, new_temp);
3487
3488 VEC_replace (gimple, new_phis, 0, epilog_stmt);
3489 }
3490
3491 /* 2.6 Handle the loop-exit phis. Replace the uses of scalar loop-exit
3492 phis with new adjusted scalar results, i.e., replace use <s_out0>
3493 with use <s_out4>.
3494
3495 Transform:
3496 loop_exit:
3497 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
3498 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
3499 v_out2 = reduce <v_out1>
3500 s_out3 = extract_field <v_out2, 0>
3501 s_out4 = adjust_result <s_out3>
3502 use <s_out0>
3503 use <s_out0>
3504
3505 into:
3506
3507 loop_exit:
3508 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
3509 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
3510 v_out2 = reduce <v_out1>
3511 s_out3 = extract_field <v_out2, 0>
3512 s_out4 = adjust_result <s_out3>
3513 use <s_out4>
3514 use <s_out4> */
3515
3516 /* In SLP we may have several statements in NEW_PHIS and REDUCTION_PHIS (in
3517 case that GROUP_SIZE is greater than vectorization factor). Therefore, we
3518 need to match SCALAR_RESULTS with corresponding statements. The first
3519 (GROUP_SIZE / number of new vector stmts) scalar results correspond to
3520 the first vector stmt, etc.
3521 (RATIO is equal to (GROUP_SIZE / number of new vector stmts)). */
3522 if (group_size > VEC_length (gimple, new_phis))
3523 {
3524 ratio = group_size / VEC_length (gimple, new_phis);
3525 gcc_assert (!(group_size % VEC_length (gimple, new_phis)));
3526 }
3527 else
3528 ratio = 1;
3529
3530 for (k = 0; k < group_size; k++)
3531 {
3532 if (k % ratio == 0)
3533 {
3534 epilog_stmt = VEC_index (gimple, new_phis, k / ratio);
3535 reduction_phi = VEC_index (gimple, reduction_phis, k / ratio);
3536 }
3537
3538 if (slp_node)
3539 {
3540 gimple current_stmt = VEC_index (gimple,
3541 SLP_TREE_SCALAR_STMTS (slp_node), k);
3542
3543 orig_stmt = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (current_stmt));
3544 /* SLP statements can't participate in patterns. */
3545 gcc_assert (!orig_stmt);
3546 scalar_dest = gimple_assign_lhs (current_stmt);
3547 }
3548
3549 phis = VEC_alloc (gimple, heap, 3);
3550 /* Find the loop-closed-use at the loop exit of the original scalar
3551 result. (The reduction result is expected to have two immediate uses -
3552 one at the latch block, and one at the loop exit). */
3553 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
3554 if (!flow_bb_inside_loop_p (loop, gimple_bb (USE_STMT (use_p))))
3555 VEC_safe_push (gimple, heap, phis, USE_STMT (use_p));
3556
3557 /* We expect to have found an exit_phi because of loop-closed-ssa
3558 form. */
3559 gcc_assert (!VEC_empty (gimple, phis));
3560
3561 for (i = 0; VEC_iterate (gimple, phis, i, exit_phi); i++)
3562 {
3563 if (outer_loop)
3564 {
3565 stmt_vec_info exit_phi_vinfo = vinfo_for_stmt (exit_phi);
3566 gimple vect_phi;
3567
3568 /* FORNOW. Currently not supporting the case that an inner-loop
3569 reduction is not used in the outer-loop (but only outside the
3570 outer-loop), unless it is double reduction. */
3571 gcc_assert ((STMT_VINFO_RELEVANT_P (exit_phi_vinfo)
3572 && !STMT_VINFO_LIVE_P (exit_phi_vinfo))
3573 || double_reduc);
3574
3575 STMT_VINFO_VEC_STMT (exit_phi_vinfo) = epilog_stmt;
3576 if (!double_reduc
3577 || STMT_VINFO_DEF_TYPE (exit_phi_vinfo)
3578 != vect_double_reduction_def)
3579 continue;
3580
3581 /* Handle double reduction:
3582
3583 stmt1: s1 = phi <s0, s2> - double reduction phi (outer loop)
3584 stmt2: s3 = phi <s1, s4> - (regular) reduc phi (inner loop)
3585 stmt3: s4 = use (s3) - (regular) reduc stmt (inner loop)
3586 stmt4: s2 = phi <s4> - double reduction stmt (outer loop)
3587
3588 At that point the regular reduction (stmt2 and stmt3) is
3589 already vectorized, as well as the exit phi node, stmt4.
3590 Here we vectorize the phi node of double reduction, stmt1, and
3591 update all relevant statements. */
3592
3593 /* Go through all the uses of s2 to find double reduction phi
3594 node, i.e., stmt1 above. */
3595 orig_name = PHI_RESULT (exit_phi);
3596 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name)
3597 {
3598 stmt_vec_info use_stmt_vinfo = vinfo_for_stmt (use_stmt);
3599 stmt_vec_info new_phi_vinfo;
3600 tree vect_phi_init, preheader_arg, vect_phi_res, init_def;
3601 basic_block bb = gimple_bb (use_stmt);
3602 gimple use;
3603
3604 /* Check that USE_STMT is really double reduction phi
3605 node. */
3606 if (gimple_code (use_stmt) != GIMPLE_PHI
3607 || gimple_phi_num_args (use_stmt) != 2
3608 || !use_stmt_vinfo
3609 || STMT_VINFO_DEF_TYPE (use_stmt_vinfo)
3610 != vect_double_reduction_def
3611 || bb->loop_father != outer_loop)
3612 continue;
3613
3614 /* Create vector phi node for double reduction:
3615 vs1 = phi <vs0, vs2>
3616 vs1 was created previously in this function by a call to
3617 vect_get_vec_def_for_operand and is stored in
3618 vec_initial_def;
3619 vs2 is defined by EPILOG_STMT, the vectorized EXIT_PHI;
3620 vs0 is created here. */
3621
3622 /* Create vector phi node. */
3623 vect_phi = create_phi_node (vec_initial_def, bb);
3624 new_phi_vinfo = new_stmt_vec_info (vect_phi,
3625 loop_vec_info_for_loop (outer_loop), NULL);
3626 set_vinfo_for_stmt (vect_phi, new_phi_vinfo);
3627
3628 /* Create vs0 - initial def of the double reduction phi. */
3629 preheader_arg = PHI_ARG_DEF_FROM_EDGE (use_stmt,
3630 loop_preheader_edge (outer_loop));
3631 init_def = get_initial_def_for_reduction (stmt,
3632 preheader_arg, NULL);
3633 vect_phi_init = vect_init_vector (use_stmt, init_def,
3634 vectype, NULL);
3635
3636 /* Update phi node arguments with vs0 and vs2. */
3637 add_phi_arg (vect_phi, vect_phi_init,
3638 loop_preheader_edge (outer_loop),
3639 UNKNOWN_LOCATION);
3640 add_phi_arg (vect_phi, PHI_RESULT (epilog_stmt),
3641 loop_latch_edge (outer_loop), UNKNOWN_LOCATION);
3642 if (vect_print_dump_info (REPORT_DETAILS))
3643 {
3644 fprintf (vect_dump, "created double reduction phi "
3645 "node: ");
3646 print_gimple_stmt (vect_dump, vect_phi, 0, TDF_SLIM);
3647 }
3648
3649 vect_phi_res = PHI_RESULT (vect_phi);
3650
3651 /* Replace the use, i.e., set the correct vs1 in the regular
3652 reduction phi node. FORNOW, NCOPIES is always 1, so the
3653 loop is redundant. */
3654 use = reduction_phi;
3655 for (j = 0; j < ncopies; j++)
3656 {
3657 edge pr_edge = loop_preheader_edge (loop);
3658 SET_PHI_ARG_DEF (use, pr_edge->dest_idx, vect_phi_res);
3659 use = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (use));
3660 }
3661 }
3662 }
3663
3664 /* Replace the uses: */
3665 orig_name = PHI_RESULT (exit_phi);
3666 scalar_result = VEC_index (tree, scalar_results, k);
3667 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name)
3668 FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
3669 SET_USE (use_p, scalar_result);
3670 }
3671
3672 VEC_free (gimple, heap, phis);
3673 }
3674
3675 VEC_free (tree, heap, scalar_results);
3676 VEC_free (gimple, heap, new_phis);
3677 }
3678
3679
3680 /* Function vectorizable_reduction.
3681
3682 Check if STMT performs a reduction operation that can be vectorized.
3683 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3684 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
3685 Return FALSE if not a vectorizable STMT, TRUE otherwise.
3686
3687 This function also handles reduction idioms (patterns) that have been
3688 recognized in advance during vect_pattern_recog. In this case, STMT may be
3689 of this form:
3690 X = pattern_expr (arg0, arg1, ..., X)
3691 and it's STMT_VINFO_RELATED_STMT points to the last stmt in the original
3692 sequence that had been detected and replaced by the pattern-stmt (STMT).
3693
3694 In some cases of reduction patterns, the type of the reduction variable X is
3695 different than the type of the other arguments of STMT.
3696 In such cases, the vectype that is used when transforming STMT into a vector
3697 stmt is different than the vectype that is used to determine the
3698 vectorization factor, because it consists of a different number of elements
3699 than the actual number of elements that are being operated upon in parallel.
3700
3701 For example, consider an accumulation of shorts into an int accumulator.
3702 On some targets it's possible to vectorize this pattern operating on 8
3703 shorts at a time (hence, the vectype for purposes of determining the
3704 vectorization factor should be V8HI); on the other hand, the vectype that
3705 is used to create the vector form is actually V4SI (the type of the result).
3706
3707 Upon entry to this function, STMT_VINFO_VECTYPE records the vectype that
3708 indicates what is the actual level of parallelism (V8HI in the example), so
3709 that the right vectorization factor would be derived. This vectype
3710 corresponds to the type of arguments to the reduction stmt, and should *NOT*
3711 be used to create the vectorized stmt. The right vectype for the vectorized
3712 stmt is obtained from the type of the result X:
3713 get_vectype_for_scalar_type (TREE_TYPE (X))
3714
3715 This means that, contrary to "regular" reductions (or "regular" stmts in
3716 general), the following equation:
3717 STMT_VINFO_VECTYPE == get_vectype_for_scalar_type (TREE_TYPE (X))
3718 does *NOT* necessarily hold for reduction patterns. */
3719
3720 bool
3721 vectorizable_reduction (gimple stmt, gimple_stmt_iterator *gsi,
3722 gimple *vec_stmt, slp_tree slp_node)
3723 {
3724 tree vec_dest;
3725 tree scalar_dest;
3726 tree loop_vec_def0 = NULL_TREE, loop_vec_def1 = NULL_TREE;
3727 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3728 tree vectype_out = STMT_VINFO_VECTYPE (stmt_info);
3729 tree vectype_in = NULL_TREE;
3730 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3731 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
3732 enum tree_code code, orig_code, epilog_reduc_code;
3733 enum machine_mode vec_mode;
3734 int op_type;
3735 optab optab, reduc_optab;
3736 tree new_temp = NULL_TREE;
3737 tree def;
3738 gimple def_stmt;
3739 enum vect_def_type dt;
3740 gimple new_phi = NULL;
3741 tree scalar_type;
3742 bool is_simple_use;
3743 gimple orig_stmt;
3744 stmt_vec_info orig_stmt_info;
3745 tree expr = NULL_TREE;
3746 int i;
3747 int ncopies;
3748 int epilog_copies;
3749 stmt_vec_info prev_stmt_info, prev_phi_info;
3750 bool single_defuse_cycle = false;
3751 tree reduc_def = NULL_TREE;
3752 gimple new_stmt = NULL;
3753 int j;
3754 tree ops[3];
3755 bool nested_cycle = false, found_nested_cycle_def = false;
3756 gimple reduc_def_stmt = NULL;
3757 /* The default is that the reduction variable is the last in statement. */
3758 int reduc_index = 2;
3759 bool double_reduc = false, dummy;
3760 basic_block def_bb;
3761 struct loop * def_stmt_loop, *outer_loop = NULL;
3762 tree def_arg;
3763 gimple def_arg_stmt;
3764 VEC (tree, heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL, *vect_defs = NULL;
3765 VEC (gimple, heap) *phis = NULL;
3766 int vec_num;
3767 tree def0, def1;
3768
3769 if (nested_in_vect_loop_p (loop, stmt))
3770 {
3771 outer_loop = loop;
3772 loop = loop->inner;
3773 nested_cycle = true;
3774 }
3775
3776 /* 1. Is vectorizable reduction? */
3777 /* Not supportable if the reduction variable is used in the loop. */
3778 if (STMT_VINFO_RELEVANT (stmt_info) > vect_used_in_outer)
3779 return false;
3780
3781 /* Reductions that are not used even in an enclosing outer-loop,
3782 are expected to be "live" (used out of the loop). */
3783 if (STMT_VINFO_RELEVANT (stmt_info) == vect_unused_in_scope
3784 && !STMT_VINFO_LIVE_P (stmt_info))
3785 return false;
3786
3787 /* Make sure it was already recognized as a reduction computation. */
3788 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_reduction_def
3789 && STMT_VINFO_DEF_TYPE (stmt_info) != vect_nested_cycle)
3790 return false;
3791
3792 /* 2. Has this been recognized as a reduction pattern?
3793
3794 Check if STMT represents a pattern that has been recognized
3795 in earlier analysis stages. For stmts that represent a pattern,
3796 the STMT_VINFO_RELATED_STMT field records the last stmt in
3797 the original sequence that constitutes the pattern. */
3798
3799 orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
3800 if (orig_stmt)
3801 {
3802 orig_stmt_info = vinfo_for_stmt (orig_stmt);
3803 gcc_assert (STMT_VINFO_RELATED_STMT (orig_stmt_info) == stmt);
3804 gcc_assert (STMT_VINFO_IN_PATTERN_P (orig_stmt_info));
3805 gcc_assert (!STMT_VINFO_IN_PATTERN_P (stmt_info));
3806 }
3807
3808 /* 3. Check the operands of the operation. The first operands are defined
3809 inside the loop body. The last operand is the reduction variable,
3810 which is defined by the loop-header-phi. */
3811
3812 gcc_assert (is_gimple_assign (stmt));
3813
3814 /* Flatten RHS */
3815 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)))
3816 {
3817 case GIMPLE_SINGLE_RHS:
3818 op_type = TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt));
3819 if (op_type == ternary_op)
3820 {
3821 tree rhs = gimple_assign_rhs1 (stmt);
3822 ops[0] = TREE_OPERAND (rhs, 0);
3823 ops[1] = TREE_OPERAND (rhs, 1);
3824 ops[2] = TREE_OPERAND (rhs, 2);
3825 code = TREE_CODE (rhs);
3826 }
3827 else
3828 return false;
3829 break;
3830
3831 case GIMPLE_BINARY_RHS:
3832 code = gimple_assign_rhs_code (stmt);
3833 op_type = TREE_CODE_LENGTH (code);
3834 gcc_assert (op_type == binary_op);
3835 ops[0] = gimple_assign_rhs1 (stmt);
3836 ops[1] = gimple_assign_rhs2 (stmt);
3837 break;
3838
3839 case GIMPLE_UNARY_RHS:
3840 return false;
3841
3842 default:
3843 gcc_unreachable ();
3844 }
3845
3846 scalar_dest = gimple_assign_lhs (stmt);
3847 scalar_type = TREE_TYPE (scalar_dest);
3848 if (!POINTER_TYPE_P (scalar_type) && !INTEGRAL_TYPE_P (scalar_type)
3849 && !SCALAR_FLOAT_TYPE_P (scalar_type))
3850 return false;
3851
3852 /* All uses but the last are expected to be defined in the loop.
3853 The last use is the reduction variable. In case of nested cycle this
3854 assumption is not true: we use reduc_index to record the index of the
3855 reduction variable. */
3856 for (i = 0; i < op_type-1; i++)
3857 {
3858 tree tem;
3859
3860 /* The condition of COND_EXPR is checked in vectorizable_condition(). */
3861 if (i == 0 && code == COND_EXPR)
3862 continue;
3863
3864 is_simple_use = vect_is_simple_use_1 (ops[i], loop_vinfo, NULL,
3865 &def_stmt, &def, &dt, &tem);
3866 if (!vectype_in)
3867 vectype_in = tem;
3868 gcc_assert (is_simple_use);
3869 if (dt != vect_internal_def
3870 && dt != vect_external_def
3871 && dt != vect_constant_def
3872 && dt != vect_induction_def
3873 && !(dt == vect_nested_cycle && nested_cycle))
3874 return false;
3875
3876 if (dt == vect_nested_cycle)
3877 {
3878 found_nested_cycle_def = true;
3879 reduc_def_stmt = def_stmt;
3880 reduc_index = i;
3881 }
3882 }
3883
3884 is_simple_use = vect_is_simple_use (ops[i], loop_vinfo, NULL, &def_stmt,
3885 &def, &dt);
3886 gcc_assert (is_simple_use);
3887 gcc_assert (dt == vect_reduction_def
3888 || dt == vect_nested_cycle
3889 || ((dt == vect_internal_def || dt == vect_external_def
3890 || dt == vect_constant_def || dt == vect_induction_def)
3891 && nested_cycle && found_nested_cycle_def));
3892 if (!found_nested_cycle_def)
3893 reduc_def_stmt = def_stmt;
3894
3895 gcc_assert (gimple_code (reduc_def_stmt) == GIMPLE_PHI);
3896 if (orig_stmt)
3897 gcc_assert (orig_stmt == vect_is_simple_reduction (loop_vinfo,
3898 reduc_def_stmt,
3899 !nested_cycle,
3900 &dummy));
3901 else
3902 gcc_assert (stmt == vect_is_simple_reduction (loop_vinfo, reduc_def_stmt,
3903 !nested_cycle, &dummy));
3904
3905 if (STMT_VINFO_LIVE_P (vinfo_for_stmt (reduc_def_stmt)))
3906 return false;
3907
3908 if (slp_node)
3909 ncopies = 1;
3910 else
3911 ncopies = (LOOP_VINFO_VECT_FACTOR (loop_vinfo)
3912 / TYPE_VECTOR_SUBPARTS (vectype_in));
3913
3914 gcc_assert (ncopies >= 1);
3915
3916 vec_mode = TYPE_MODE (vectype_in);
3917
3918 if (code == COND_EXPR)
3919 {
3920 if (!vectorizable_condition (stmt, gsi, NULL, ops[reduc_index], 0))
3921 {
3922 if (vect_print_dump_info (REPORT_DETAILS))
3923 fprintf (vect_dump, "unsupported condition in reduction");
3924
3925 return false;
3926 }
3927 }
3928 else
3929 {
3930 /* 4. Supportable by target? */
3931
3932 /* 4.1. check support for the operation in the loop */
3933 optab = optab_for_tree_code (code, vectype_in, optab_default);
3934 if (!optab)
3935 {
3936 if (vect_print_dump_info (REPORT_DETAILS))
3937 fprintf (vect_dump, "no optab.");
3938
3939 return false;
3940 }
3941
3942 if (optab_handler (optab, vec_mode)->insn_code == CODE_FOR_nothing)
3943 {
3944 if (vect_print_dump_info (REPORT_DETAILS))
3945 fprintf (vect_dump, "op not supported by target.");
3946
3947 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
3948 || LOOP_VINFO_VECT_FACTOR (loop_vinfo)
3949 < vect_min_worthwhile_factor (code))
3950 return false;
3951
3952 if (vect_print_dump_info (REPORT_DETAILS))
3953 fprintf (vect_dump, "proceeding using word mode.");
3954 }
3955
3956 /* Worthwhile without SIMD support? */
3957 if (!VECTOR_MODE_P (TYPE_MODE (vectype_in))
3958 && LOOP_VINFO_VECT_FACTOR (loop_vinfo)
3959 < vect_min_worthwhile_factor (code))
3960 {
3961 if (vect_print_dump_info (REPORT_DETAILS))
3962 fprintf (vect_dump, "not worthwhile without SIMD support.");
3963
3964 return false;
3965 }
3966 }
3967
3968 /* 4.2. Check support for the epilog operation.
3969
3970 If STMT represents a reduction pattern, then the type of the
3971 reduction variable may be different than the type of the rest
3972 of the arguments. For example, consider the case of accumulation
3973 of shorts into an int accumulator; The original code:
3974 S1: int_a = (int) short_a;
3975 orig_stmt-> S2: int_acc = plus <int_a ,int_acc>;
3976
3977 was replaced with:
3978 STMT: int_acc = widen_sum <short_a, int_acc>
3979
3980 This means that:
3981 1. The tree-code that is used to create the vector operation in the
3982 epilog code (that reduces the partial results) is not the
3983 tree-code of STMT, but is rather the tree-code of the original
3984 stmt from the pattern that STMT is replacing. I.e, in the example
3985 above we want to use 'widen_sum' in the loop, but 'plus' in the
3986 epilog.
3987 2. The type (mode) we use to check available target support
3988 for the vector operation to be created in the *epilog*, is
3989 determined by the type of the reduction variable (in the example
3990 above we'd check this: plus_optab[vect_int_mode]).
3991 However the type (mode) we use to check available target support
3992 for the vector operation to be created *inside the loop*, is
3993 determined by the type of the other arguments to STMT (in the
3994 example we'd check this: widen_sum_optab[vect_short_mode]).
3995
3996 This is contrary to "regular" reductions, in which the types of all
3997 the arguments are the same as the type of the reduction variable.
3998 For "regular" reductions we can therefore use the same vector type
3999 (and also the same tree-code) when generating the epilog code and
4000 when generating the code inside the loop. */
4001
4002 if (orig_stmt)
4003 {
4004 /* This is a reduction pattern: get the vectype from the type of the
4005 reduction variable, and get the tree-code from orig_stmt. */
4006 orig_code = gimple_assign_rhs_code (orig_stmt);
4007 gcc_assert (vectype_out);
4008 vec_mode = TYPE_MODE (vectype_out);
4009 }
4010 else
4011 {
4012 /* Regular reduction: use the same vectype and tree-code as used for
4013 the vector code inside the loop can be used for the epilog code. */
4014 orig_code = code;
4015 }
4016
4017 if (nested_cycle)
4018 {
4019 def_bb = gimple_bb (reduc_def_stmt);
4020 def_stmt_loop = def_bb->loop_father;
4021 def_arg = PHI_ARG_DEF_FROM_EDGE (reduc_def_stmt,
4022 loop_preheader_edge (def_stmt_loop));
4023 if (TREE_CODE (def_arg) == SSA_NAME
4024 && (def_arg_stmt = SSA_NAME_DEF_STMT (def_arg))
4025 && gimple_code (def_arg_stmt) == GIMPLE_PHI
4026 && flow_bb_inside_loop_p (outer_loop, gimple_bb (def_arg_stmt))
4027 && vinfo_for_stmt (def_arg_stmt)
4028 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_arg_stmt))
4029 == vect_double_reduction_def)
4030 double_reduc = true;
4031 }
4032
4033 epilog_reduc_code = ERROR_MARK;
4034 if (reduction_code_for_scalar_code (orig_code, &epilog_reduc_code))
4035 {
4036 reduc_optab = optab_for_tree_code (epilog_reduc_code, vectype_out,
4037 optab_default);
4038 if (!reduc_optab)
4039 {
4040 if (vect_print_dump_info (REPORT_DETAILS))
4041 fprintf (vect_dump, "no optab for reduction.");
4042
4043 epilog_reduc_code = ERROR_MARK;
4044 }
4045
4046 if (reduc_optab
4047 && optab_handler (reduc_optab, vec_mode)->insn_code
4048 == CODE_FOR_nothing)
4049 {
4050 if (vect_print_dump_info (REPORT_DETAILS))
4051 fprintf (vect_dump, "reduc op not supported by target.");
4052
4053 epilog_reduc_code = ERROR_MARK;
4054 }
4055 }
4056 else
4057 {
4058 if (!nested_cycle || double_reduc)
4059 {
4060 if (vect_print_dump_info (REPORT_DETAILS))
4061 fprintf (vect_dump, "no reduc code for scalar code.");
4062
4063 return false;
4064 }
4065 }
4066
4067 if (double_reduc && ncopies > 1)
4068 {
4069 if (vect_print_dump_info (REPORT_DETAILS))
4070 fprintf (vect_dump, "multiple types in double reduction");
4071
4072 return false;
4073 }
4074
4075 if (!vec_stmt) /* transformation not required. */
4076 {
4077 STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type;
4078 if (!vect_model_reduction_cost (stmt_info, epilog_reduc_code, ncopies))
4079 return false;
4080 return true;
4081 }
4082
4083 /** Transform. **/
4084
4085 if (vect_print_dump_info (REPORT_DETAILS))
4086 fprintf (vect_dump, "transform reduction.");
4087
4088 /* FORNOW: Multiple types are not supported for condition. */
4089 if (code == COND_EXPR)
4090 gcc_assert (ncopies == 1);
4091
4092 /* Create the destination vector */
4093 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
4094
4095 /* In case the vectorization factor (VF) is bigger than the number
4096 of elements that we can fit in a vectype (nunits), we have to generate
4097 more than one vector stmt - i.e - we need to "unroll" the
4098 vector stmt by a factor VF/nunits. For more details see documentation
4099 in vectorizable_operation. */
4100
4101 /* If the reduction is used in an outer loop we need to generate
4102 VF intermediate results, like so (e.g. for ncopies=2):
4103 r0 = phi (init, r0)
4104 r1 = phi (init, r1)
4105 r0 = x0 + r0;
4106 r1 = x1 + r1;
4107 (i.e. we generate VF results in 2 registers).
4108 In this case we have a separate def-use cycle for each copy, and therefore
4109 for each copy we get the vector def for the reduction variable from the
4110 respective phi node created for this copy.
4111
4112 Otherwise (the reduction is unused in the loop nest), we can combine
4113 together intermediate results, like so (e.g. for ncopies=2):
4114 r = phi (init, r)
4115 r = x0 + r;
4116 r = x1 + r;
4117 (i.e. we generate VF/2 results in a single register).
4118 In this case for each copy we get the vector def for the reduction variable
4119 from the vectorized reduction operation generated in the previous iteration.
4120 */
4121
4122 if (STMT_VINFO_RELEVANT (stmt_info) == vect_unused_in_scope)
4123 {
4124 single_defuse_cycle = true;
4125 epilog_copies = 1;
4126 }
4127 else
4128 epilog_copies = ncopies;
4129
4130 prev_stmt_info = NULL;
4131 prev_phi_info = NULL;
4132 if (slp_node)
4133 {
4134 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
4135 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype_out)
4136 == TYPE_VECTOR_SUBPARTS (vectype_in));
4137 }
4138 else
4139 {
4140 vec_num = 1;
4141 vec_oprnds0 = VEC_alloc (tree, heap, 1);
4142 if (op_type == ternary_op)
4143 vec_oprnds1 = VEC_alloc (tree, heap, 1);
4144 }
4145
4146 phis = VEC_alloc (gimple, heap, vec_num);
4147 vect_defs = VEC_alloc (tree, heap, vec_num);
4148 if (!slp_node)
4149 VEC_quick_push (tree, vect_defs, NULL_TREE);
4150
4151 for (j = 0; j < ncopies; j++)
4152 {
4153 if (j == 0 || !single_defuse_cycle)
4154 {
4155 for (i = 0; i < vec_num; i++)
4156 {
4157 /* Create the reduction-phi that defines the reduction
4158 operand. */
4159 new_phi = create_phi_node (vec_dest, loop->header);
4160 set_vinfo_for_stmt (new_phi,
4161 new_stmt_vec_info (new_phi, loop_vinfo,
4162 NULL));
4163 if (j == 0 || slp_node)
4164 VEC_quick_push (gimple, phis, new_phi);
4165 }
4166 }
4167
4168 if (code == COND_EXPR)
4169 {
4170 gcc_assert (!slp_node);
4171 vectorizable_condition (stmt, gsi, vec_stmt,
4172 PHI_RESULT (VEC_index (gimple, phis, 0)),
4173 reduc_index);
4174 /* Multiple types are not supported for condition. */
4175 break;
4176 }
4177
4178 /* Handle uses. */
4179 if (j == 0)
4180 {
4181 if (slp_node)
4182 vect_get_slp_defs (slp_node, &vec_oprnds0, &vec_oprnds1, -1);
4183 else
4184 {
4185 loop_vec_def0 = vect_get_vec_def_for_operand (ops[!reduc_index],
4186 stmt, NULL);
4187 VEC_quick_push (tree, vec_oprnds0, loop_vec_def0);
4188 if (op_type == ternary_op)
4189 {
4190 if (reduc_index == 0)
4191 loop_vec_def1 = vect_get_vec_def_for_operand (ops[2], stmt,
4192 NULL);
4193 else
4194 loop_vec_def1 = vect_get_vec_def_for_operand (ops[1], stmt,
4195 NULL);
4196
4197 VEC_quick_push (tree, vec_oprnds1, loop_vec_def1);
4198 }
4199 }
4200 }
4201 else
4202 {
4203 if (!slp_node)
4204 {
4205 enum vect_def_type dt = vect_unknown_def_type; /* Dummy */
4206 loop_vec_def0 = vect_get_vec_def_for_stmt_copy (dt, loop_vec_def0);
4207 VEC_replace (tree, vec_oprnds0, 0, loop_vec_def0);
4208 if (op_type == ternary_op)
4209 {
4210 loop_vec_def1 = vect_get_vec_def_for_stmt_copy (dt,
4211 loop_vec_def1);
4212 VEC_replace (tree, vec_oprnds1, 0, loop_vec_def1);
4213 }
4214 }
4215
4216 if (single_defuse_cycle)
4217 reduc_def = gimple_assign_lhs (new_stmt);
4218
4219 STMT_VINFO_RELATED_STMT (prev_phi_info) = new_phi;
4220 }
4221
4222 for (i = 0; VEC_iterate (tree, vec_oprnds0, i, def0); i++)
4223 {
4224 if (slp_node)
4225 reduc_def = PHI_RESULT (VEC_index (gimple, phis, i));
4226 else
4227 {
4228 if (!single_defuse_cycle || j == 0)
4229 reduc_def = PHI_RESULT (new_phi);
4230 }
4231
4232 def1 = ((op_type == ternary_op)
4233 ? VEC_index (tree, vec_oprnds1, i) : NULL);
4234 if (op_type == binary_op)
4235 {
4236 if (reduc_index == 0)
4237 expr = build2 (code, vectype_out, reduc_def, def0);
4238 else
4239 expr = build2 (code, vectype_out, def0, reduc_def);
4240 }
4241 else
4242 {
4243 if (reduc_index == 0)
4244 expr = build3 (code, vectype_out, reduc_def, def0, def1);
4245 else
4246 {
4247 if (reduc_index == 1)
4248 expr = build3 (code, vectype_out, def0, reduc_def, def1);
4249 else
4250 expr = build3 (code, vectype_out, def0, def1, reduc_def);
4251 }
4252 }
4253
4254 new_stmt = gimple_build_assign (vec_dest, expr);
4255 new_temp = make_ssa_name (vec_dest, new_stmt);
4256 gimple_assign_set_lhs (new_stmt, new_temp);
4257 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4258 if (slp_node)
4259 {
4260 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
4261 VEC_quick_push (tree, vect_defs, new_temp);
4262 }
4263 else
4264 VEC_replace (tree, vect_defs, 0, new_temp);
4265 }
4266
4267 if (slp_node)
4268 continue;
4269
4270 if (j == 0)
4271 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4272 else
4273 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4274
4275 prev_stmt_info = vinfo_for_stmt (new_stmt);
4276 prev_phi_info = vinfo_for_stmt (new_phi);
4277 }
4278
4279 /* Finalize the reduction-phi (set its arguments) and create the
4280 epilog reduction code. */
4281 if ((!single_defuse_cycle || code == COND_EXPR) && !slp_node)
4282 {
4283 new_temp = gimple_assign_lhs (*vec_stmt);
4284 VEC_replace (tree, vect_defs, 0, new_temp);
4285 }
4286
4287 vect_create_epilog_for_reduction (vect_defs, stmt, epilog_copies,
4288 epilog_reduc_code, phis, reduc_index,
4289 double_reduc, slp_node);
4290
4291 VEC_free (gimple, heap, phis);
4292 VEC_free (tree, heap, vec_oprnds0);
4293 if (vec_oprnds1)
4294 VEC_free (tree, heap, vec_oprnds1);
4295
4296 return true;
4297 }
4298
4299 /* Function vect_min_worthwhile_factor.
4300
4301 For a loop where we could vectorize the operation indicated by CODE,
4302 return the minimum vectorization factor that makes it worthwhile
4303 to use generic vectors. */
4304 int
4305 vect_min_worthwhile_factor (enum tree_code code)
4306 {
4307 switch (code)
4308 {
4309 case PLUS_EXPR:
4310 case MINUS_EXPR:
4311 case NEGATE_EXPR:
4312 return 4;
4313
4314 case BIT_AND_EXPR:
4315 case BIT_IOR_EXPR:
4316 case BIT_XOR_EXPR:
4317 case BIT_NOT_EXPR:
4318 return 2;
4319
4320 default:
4321 return INT_MAX;
4322 }
4323 }
4324
4325
4326 /* Function vectorizable_induction
4327
4328 Check if PHI performs an induction computation that can be vectorized.
4329 If VEC_STMT is also passed, vectorize the induction PHI: create a vectorized
4330 phi to replace it, put it in VEC_STMT, and add it to the same basic block.
4331 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4332
4333 bool
4334 vectorizable_induction (gimple phi, gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED,
4335 gimple *vec_stmt)
4336 {
4337 stmt_vec_info stmt_info = vinfo_for_stmt (phi);
4338 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4339 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4340 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
4341 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
4342 int ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
4343 tree vec_def;
4344
4345 gcc_assert (ncopies >= 1);
4346 /* FORNOW. This restriction should be relaxed. */
4347 if (nested_in_vect_loop_p (loop, phi) && ncopies > 1)
4348 {
4349 if (vect_print_dump_info (REPORT_DETAILS))
4350 fprintf (vect_dump, "multiple types in nested loop.");
4351 return false;
4352 }
4353
4354 if (!STMT_VINFO_RELEVANT_P (stmt_info))
4355 return false;
4356
4357 /* FORNOW: SLP not supported. */
4358 if (STMT_SLP_TYPE (stmt_info))
4359 return false;
4360
4361 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def);
4362
4363 if (gimple_code (phi) != GIMPLE_PHI)
4364 return false;
4365
4366 if (!vec_stmt) /* transformation not required. */
4367 {
4368 STMT_VINFO_TYPE (stmt_info) = induc_vec_info_type;
4369 if (vect_print_dump_info (REPORT_DETAILS))
4370 fprintf (vect_dump, "=== vectorizable_induction ===");
4371 vect_model_induction_cost (stmt_info, ncopies);
4372 return true;
4373 }
4374
4375 /** Transform. **/
4376
4377 if (vect_print_dump_info (REPORT_DETAILS))
4378 fprintf (vect_dump, "transform induction phi.");
4379
4380 vec_def = get_initial_def_for_induction (phi);
4381 *vec_stmt = SSA_NAME_DEF_STMT (vec_def);
4382 return true;
4383 }
4384
4385 /* Function vectorizable_live_operation.
4386
4387 STMT computes a value that is used outside the loop. Check if
4388 it can be supported. */
4389
4390 bool
4391 vectorizable_live_operation (gimple stmt,
4392 gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED,
4393 gimple *vec_stmt ATTRIBUTE_UNUSED)
4394 {
4395 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4396 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4397 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
4398 int i;
4399 int op_type;
4400 tree op;
4401 tree def;
4402 gimple def_stmt;
4403 enum vect_def_type dt;
4404 enum tree_code code;
4405 enum gimple_rhs_class rhs_class;
4406
4407 gcc_assert (STMT_VINFO_LIVE_P (stmt_info));
4408
4409 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def)
4410 return false;
4411
4412 if (!is_gimple_assign (stmt))
4413 return false;
4414
4415 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
4416 return false;
4417
4418 /* FORNOW. CHECKME. */
4419 if (nested_in_vect_loop_p (loop, stmt))
4420 return false;
4421
4422 code = gimple_assign_rhs_code (stmt);
4423 op_type = TREE_CODE_LENGTH (code);
4424 rhs_class = get_gimple_rhs_class (code);
4425 gcc_assert (rhs_class != GIMPLE_UNARY_RHS || op_type == unary_op);
4426 gcc_assert (rhs_class != GIMPLE_BINARY_RHS || op_type == binary_op);
4427
4428 /* FORNOW: support only if all uses are invariant. This means
4429 that the scalar operations can remain in place, unvectorized.
4430 The original last scalar value that they compute will be used. */
4431
4432 for (i = 0; i < op_type; i++)
4433 {
4434 if (rhs_class == GIMPLE_SINGLE_RHS)
4435 op = TREE_OPERAND (gimple_op (stmt, 1), i);
4436 else
4437 op = gimple_op (stmt, i + 1);
4438 if (op
4439 && !vect_is_simple_use (op, loop_vinfo, NULL, &def_stmt, &def, &dt))
4440 {
4441 if (vect_print_dump_info (REPORT_DETAILS))
4442 fprintf (vect_dump, "use not simple.");
4443 return false;
4444 }
4445
4446 if (dt != vect_external_def && dt != vect_constant_def)
4447 return false;
4448 }
4449
4450 /* No transformation is required for the cases we currently support. */
4451 return true;
4452 }
4453
4454 /* Kill any debug uses outside LOOP of SSA names defined in STMT. */
4455
4456 static void
4457 vect_loop_kill_debug_uses (struct loop *loop, gimple stmt)
4458 {
4459 ssa_op_iter op_iter;
4460 imm_use_iterator imm_iter;
4461 def_operand_p def_p;
4462 gimple ustmt;
4463
4464 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF)
4465 {
4466 FOR_EACH_IMM_USE_STMT (ustmt, imm_iter, DEF_FROM_PTR (def_p))
4467 {
4468 basic_block bb;
4469
4470 if (!is_gimple_debug (ustmt))
4471 continue;
4472
4473 bb = gimple_bb (ustmt);
4474
4475 if (!flow_bb_inside_loop_p (loop, bb))
4476 {
4477 if (gimple_debug_bind_p (ustmt))
4478 {
4479 if (vect_print_dump_info (REPORT_DETAILS))
4480 fprintf (vect_dump, "killing debug use");
4481
4482 gimple_debug_bind_reset_value (ustmt);
4483 update_stmt (ustmt);
4484 }
4485 else
4486 gcc_unreachable ();
4487 }
4488 }
4489 }
4490 }
4491
4492 /* Function vect_transform_loop.
4493
4494 The analysis phase has determined that the loop is vectorizable.
4495 Vectorize the loop - created vectorized stmts to replace the scalar
4496 stmts in the loop, and update the loop exit condition. */
4497
4498 void
4499 vect_transform_loop (loop_vec_info loop_vinfo)
4500 {
4501 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
4502 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
4503 int nbbs = loop->num_nodes;
4504 gimple_stmt_iterator si;
4505 int i;
4506 tree ratio = NULL;
4507 int vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
4508 bool strided_store;
4509 bool slp_scheduled = false;
4510 unsigned int nunits;
4511 tree cond_expr = NULL_TREE;
4512 gimple_seq cond_expr_stmt_list = NULL;
4513 bool do_peeling_for_loop_bound;
4514
4515 if (vect_print_dump_info (REPORT_DETAILS))
4516 fprintf (vect_dump, "=== vec_transform_loop ===");
4517
4518 /* Peel the loop if there are data refs with unknown alignment.
4519 Only one data ref with unknown store is allowed. */
4520
4521 if (LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo))
4522 vect_do_peeling_for_alignment (loop_vinfo);
4523
4524 do_peeling_for_loop_bound
4525 = (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
4526 || (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
4527 && LOOP_VINFO_INT_NITERS (loop_vinfo) % vectorization_factor != 0));
4528
4529 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
4530 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
4531 vect_loop_versioning (loop_vinfo,
4532 !do_peeling_for_loop_bound,
4533 &cond_expr, &cond_expr_stmt_list);
4534
4535 /* If the loop has a symbolic number of iterations 'n' (i.e. it's not a
4536 compile time constant), or it is a constant that doesn't divide by the
4537 vectorization factor, then an epilog loop needs to be created.
4538 We therefore duplicate the loop: the original loop will be vectorized,
4539 and will compute the first (n/VF) iterations. The second copy of the loop
4540 will remain scalar and will compute the remaining (n%VF) iterations.
4541 (VF is the vectorization factor). */
4542
4543 if (do_peeling_for_loop_bound)
4544 vect_do_peeling_for_loop_bound (loop_vinfo, &ratio,
4545 cond_expr, cond_expr_stmt_list);
4546 else
4547 ratio = build_int_cst (TREE_TYPE (LOOP_VINFO_NITERS (loop_vinfo)),
4548 LOOP_VINFO_INT_NITERS (loop_vinfo) / vectorization_factor);
4549
4550 /* 1) Make sure the loop header has exactly two entries
4551 2) Make sure we have a preheader basic block. */
4552
4553 gcc_assert (EDGE_COUNT (loop->header->preds) == 2);
4554
4555 split_edge (loop_preheader_edge (loop));
4556
4557 /* FORNOW: the vectorizer supports only loops which body consist
4558 of one basic block (header + empty latch). When the vectorizer will
4559 support more involved loop forms, the order by which the BBs are
4560 traversed need to be reconsidered. */
4561
4562 for (i = 0; i < nbbs; i++)
4563 {
4564 basic_block bb = bbs[i];
4565 stmt_vec_info stmt_info;
4566 gimple phi;
4567
4568 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
4569 {
4570 phi = gsi_stmt (si);
4571 if (vect_print_dump_info (REPORT_DETAILS))
4572 {
4573 fprintf (vect_dump, "------>vectorizing phi: ");
4574 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
4575 }
4576 stmt_info = vinfo_for_stmt (phi);
4577 if (!stmt_info)
4578 continue;
4579
4580 if (MAY_HAVE_DEBUG_STMTS && !STMT_VINFO_LIVE_P (stmt_info))
4581 vect_loop_kill_debug_uses (loop, phi);
4582
4583 if (!STMT_VINFO_RELEVANT_P (stmt_info)
4584 && !STMT_VINFO_LIVE_P (stmt_info))
4585 continue;
4586
4587 if ((TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info))
4588 != (unsigned HOST_WIDE_INT) vectorization_factor)
4589 && vect_print_dump_info (REPORT_DETAILS))
4590 fprintf (vect_dump, "multiple-types.");
4591
4592 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def)
4593 {
4594 if (vect_print_dump_info (REPORT_DETAILS))
4595 fprintf (vect_dump, "transform phi.");
4596 vect_transform_stmt (phi, NULL, NULL, NULL, NULL);
4597 }
4598 }
4599
4600 for (si = gsi_start_bb (bb); !gsi_end_p (si);)
4601 {
4602 gimple stmt = gsi_stmt (si);
4603 bool is_store;
4604
4605 if (vect_print_dump_info (REPORT_DETAILS))
4606 {
4607 fprintf (vect_dump, "------>vectorizing statement: ");
4608 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4609 }
4610
4611 stmt_info = vinfo_for_stmt (stmt);
4612
4613 /* vector stmts created in the outer-loop during vectorization of
4614 stmts in an inner-loop may not have a stmt_info, and do not
4615 need to be vectorized. */
4616 if (!stmt_info)
4617 {
4618 gsi_next (&si);
4619 continue;
4620 }
4621
4622 if (MAY_HAVE_DEBUG_STMTS && !STMT_VINFO_LIVE_P (stmt_info))
4623 vect_loop_kill_debug_uses (loop, stmt);
4624
4625 if (!STMT_VINFO_RELEVANT_P (stmt_info)
4626 && !STMT_VINFO_LIVE_P (stmt_info))
4627 {
4628 gsi_next (&si);
4629 continue;
4630 }
4631
4632 gcc_assert (STMT_VINFO_VECTYPE (stmt_info));
4633 nunits =
4634 (unsigned int) TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info));
4635 if (!STMT_SLP_TYPE (stmt_info)
4636 && nunits != (unsigned int) vectorization_factor
4637 && vect_print_dump_info (REPORT_DETAILS))
4638 /* For SLP VF is set according to unrolling factor, and not to
4639 vector size, hence for SLP this print is not valid. */
4640 fprintf (vect_dump, "multiple-types.");
4641
4642 /* SLP. Schedule all the SLP instances when the first SLP stmt is
4643 reached. */
4644 if (STMT_SLP_TYPE (stmt_info))
4645 {
4646 if (!slp_scheduled)
4647 {
4648 slp_scheduled = true;
4649
4650 if (vect_print_dump_info (REPORT_DETAILS))
4651 fprintf (vect_dump, "=== scheduling SLP instances ===");
4652
4653 vect_schedule_slp (loop_vinfo, NULL);
4654 }
4655
4656 /* Hybrid SLP stmts must be vectorized in addition to SLP. */
4657 if (!vinfo_for_stmt (stmt) || PURE_SLP_STMT (stmt_info))
4658 {
4659 gsi_next (&si);
4660 continue;
4661 }
4662 }
4663
4664 /* -------- vectorize statement ------------ */
4665 if (vect_print_dump_info (REPORT_DETAILS))
4666 fprintf (vect_dump, "transform statement.");
4667
4668 strided_store = false;
4669 is_store = vect_transform_stmt (stmt, &si, &strided_store, NULL, NULL);
4670 if (is_store)
4671 {
4672 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
4673 {
4674 /* Interleaving. If IS_STORE is TRUE, the vectorization of the
4675 interleaving chain was completed - free all the stores in
4676 the chain. */
4677 vect_remove_stores (DR_GROUP_FIRST_DR (stmt_info));
4678 gsi_remove (&si, true);
4679 continue;
4680 }
4681 else
4682 {
4683 /* Free the attached stmt_vec_info and remove the stmt. */
4684 free_stmt_vec_info (stmt);
4685 gsi_remove (&si, true);
4686 continue;
4687 }
4688 }
4689 gsi_next (&si);
4690 } /* stmts in BB */
4691 } /* BBs in loop */
4692
4693 slpeel_make_loop_iterate_ntimes (loop, ratio);
4694
4695 /* The memory tags and pointers in vectorized statements need to
4696 have their SSA forms updated. FIXME, why can't this be delayed
4697 until all the loops have been transformed? */
4698 update_ssa (TODO_update_ssa);
4699
4700 if (vect_print_dump_info (REPORT_VECTORIZED_LOCATIONS))
4701 fprintf (vect_dump, "LOOP VECTORIZED.");
4702 if (loop->inner && vect_print_dump_info (REPORT_VECTORIZED_LOCATIONS))
4703 fprintf (vect_dump, "OUTER LOOP VECTORIZED.");
4704 }